Subversion Repositories Kolibri OS

Rev

Rev 5270 | Rev 6934 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5270 Rev 6082
Line 4... Line 4...
4
#include 
4
#include 
Line 5... Line 5...
5
 
5
 
6
/* Forward declaration, a strange C thing */
6
/* Forward declaration, a strange C thing */
7
struct task_struct;
7
struct task_struct;
-
 
8
struct mm_struct;
Line 8... Line -...
8
struct mm_struct;
-
 
9
 
9
struct vm86;
10
#include 
10
 
11
#include 
11
#include 
12
#include 
12
#include 
13
#include 
13
#include 
14
#include 
14
#include 
15
#include 
15
#include 
16
#include 
16
#include 
17
#include 
17
#include 
18
#include 
18
#include 
19
#include 
19
#include 
20
#include 
20
#include 
21
#include 
21
#include 
-
 
22
#include 
Line 22... Line 23...
22
#include 
23
#include 
23
#include 
24
#include 
24
 
25
 
25
#include 
26
#include 
Line 50... Line 51...
50
	asm volatile("mov $1f, %0; 1:":"=r" (pc));
51
	asm volatile("mov $1f, %0; 1:":"=r" (pc));
Line 51... Line 52...
51
 
52
 
52
	return pc;
53
	return pc;
Line -... Line 54...
-
 
54
}
-
 
55
 
-
 
56
/*
-
 
57
 * These alignment constraints are for performance in the vSMP case,
-
 
58
 * but in the task_struct case we must also meet hardware imposed
53
}
59
 * alignment requirements of the FPU state:
54
 
60
 */
55
#ifdef CONFIG_X86_VSMP
61
#ifdef CONFIG_X86_VSMP
56
# define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
62
# define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
57
# define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
63
# define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
Line 107... Line 113...
107
	char			x86_vendor_id[16];
113
	char			x86_vendor_id[16];
108
	char			x86_model_id[64];
114
	char			x86_model_id[64];
109
	/* in KB - valid for CPUS which support this call: */
115
	/* in KB - valid for CPUS which support this call: */
110
	int			x86_cache_size;
116
	int			x86_cache_size;
111
	int			x86_cache_alignment;	/* In bytes */
117
	int			x86_cache_alignment;	/* In bytes */
-
 
118
	/* Cache QoS architectural values: */
-
 
119
	int			x86_cache_max_rmid;	/* max index */
-
 
120
	int			x86_cache_occ_scale;	/* scale to bytes */
112
	int			x86_power;
121
	int			x86_power;
113
	unsigned long		loops_per_jiffy;
122
	unsigned long		loops_per_jiffy;
114
	/* cpuid returned max cores value: */
123
	/* cpuid returned max cores value: */
115
	u16			 x86_max_cores;
124
	u16			 x86_max_cores;
116
	u16			apicid;
125
	u16			apicid;
Line 158... Line 167...
158
#define cpu_data(cpu)		boot_cpu_data
167
#define cpu_data(cpu)		boot_cpu_data
159
#endif
168
#endif
Line 160... Line 169...
160
 
169
 
Line 161... Line -...
161
extern const struct seq_operations cpuinfo_op;
-
 
162
 
-
 
163
#define cache_line_size()   (x86_cache_alignment)
170
extern const struct seq_operations cpuinfo_op;
164
 
-
 
Line 165... Line 171...
165
extern void cpu_detect(struct cpuinfo_x86 *c);
171
 
166
extern void fpu_detect(struct cpuinfo_x86 *c);
172
extern void cpu_detect(struct cpuinfo_x86 *c);
167
 
173
 
168
extern void early_cpu_init(void);
174
extern void early_cpu_init(void);
Line 208... Line 214...
208
struct x86_hw_tss {
214
struct x86_hw_tss {
209
	unsigned short		back_link, __blh;
215
	unsigned short		back_link, __blh;
210
	unsigned long		sp0;
216
	unsigned long		sp0;
211
	unsigned short		ss0, __ss0h;
217
	unsigned short		ss0, __ss0h;
212
	unsigned long		sp1;
218
	unsigned long		sp1;
-
 
219
 
-
 
220
	/*
-
 
221
	 * We don't use ring 1, so ss1 is a convenient scratch space in
-
 
222
	 * the same cacheline as sp0.  We use ss1 to cache the value in
-
 
223
	 * MSR_IA32_SYSENTER_CS.  When we context switch
-
 
224
	 * MSR_IA32_SYSENTER_CS, we first check if the new value being
-
 
225
	 * written matches ss1, and, if it's not, then we wrmsr the new
-
 
226
	 * value and update ss1.
-
 
227
	 *
-
 
228
	 * The only reason we context switch MSR_IA32_SYSENTER_CS is
-
 
229
	 * that we set it to zero in vm86 tasks to avoid corrupting the
-
 
230
	 * stack if we were to go through the sysenter path from vm86
-
 
231
	 * mode.
-
 
232
	 */
213
	/* ss1 caches MSR_IA32_SYSENTER_CS: */
233
	unsigned short		ss1;	/* MSR_IA32_SYSENTER_CS */
-
 
234
 
214
	unsigned short		ss1, __ss1h;
235
	unsigned short		__ss1h;
215
	unsigned long		sp2;
236
	unsigned long		sp2;
216
	unsigned short		ss2, __ss2h;
237
	unsigned short		ss2, __ss2h;
217
	unsigned long		__cr3;
238
	unsigned long		__cr3;
218
	unsigned long		ip;
239
	unsigned long		ip;
219
	unsigned long		flags;
240
	unsigned long		flags;
Line 274... Line 295...
274
	 * be within the limit.
295
	 * be within the limit.
275
	 */
296
	 */
276
	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
297
	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
Line 277... Line 298...
277
 
298
 
278
	/*
299
	/*
279
	 * .. and then another 0x100 bytes for the emergency kernel stack:
300
	 * Space for the temporary SYSENTER stack:
280
	 */
301
	 */
Line 281... Line 302...
281
	unsigned long		stack[64];
302
	unsigned long		SYSENTER_stack[64];
Line 282... Line 303...
282
 
303
 
-
 
304
} ____cacheline_aligned;
-
 
305
 
-
 
306
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
-
 
307
 
Line 283... Line 308...
283
} ____cacheline_aligned;
308
#ifdef CONFIG_X86_32
284
 
309
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
285
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
310
#endif
286
 
311
 
287
/*
312
/*
288
 * Save the original ist values for checking stack pointers during debugging
313
 * Save the original ist values for checking stack pointers during debugging
Line 289... Line -...
289
 */
-
 
290
struct orig_ist {
-
 
291
	unsigned long		ist[7];
-
 
292
};
-
 
293
 
-
 
294
#define	MXCSR_DEFAULT		0x1f80
-
 
295
 
-
 
296
struct i387_fsave_struct {
-
 
297
	u32			cwd;	/* FPU Control Word		*/
-
 
298
	u32			swd;	/* FPU Status Word		*/
-
 
299
	u32			twd;	/* FPU Tag Word			*/
-
 
300
	u32			fip;	/* FPU IP Offset		*/
-
 
301
	u32			fcs;	/* FPU IP Selector		*/
-
 
302
	u32			foo;	/* FPU Operand Pointer Offset	*/
-
 
303
	u32			fos;	/* FPU Operand Pointer Selector	*/
-
 
304
 
-
 
305
	/* 8*10 bytes for each FP-reg = 80 bytes:			*/
-
 
306
	u32			st_space[20];
-
 
307
 
-
 
308
	/* Software status information [not touched by FSAVE ]:		*/
-
 
309
	u32			status;
-
 
310
};
-
 
311
 
-
 
312
struct i387_fxsave_struct {
-
 
313
	u16			cwd; /* Control Word			*/
-
 
314
	u16			swd; /* Status Word			*/
-
 
315
	u16			twd; /* Tag Word			*/
-
 
316
	u16			fop; /* Last Instruction Opcode		*/
-
 
317
	union {
-
 
318
		struct {
-
 
319
			u64	rip; /* Instruction Pointer		*/
-
 
320
			u64	rdp; /* Data Pointer			*/
-
 
321
		};
-
 
322
		struct {
-
 
323
			u32	fip; /* FPU IP Offset			*/
-
 
324
			u32	fcs; /* FPU IP Selector			*/
-
 
325
			u32	foo; /* FPU Operand Offset		*/
-
 
326
			u32	fos; /* FPU Operand Selector		*/
-
 
327
		};
-
 
328
	};
-
 
329
	u32			mxcsr;		/* MXCSR Register State */
-
 
330
	u32			mxcsr_mask;	/* MXCSR Mask		*/
-
 
331
 
-
 
332
	/* 8*16 bytes for each FP-reg = 128 bytes:			*/
-
 
333
	u32			st_space[32];
-
 
334
 
-
 
335
	/* 16*16 bytes for each XMM-reg = 256 bytes:			*/
-
 
336
	u32			xmm_space[64];
-
 
337
 
-
 
338
	u32			padding[12];
-
 
339
 
-
 
340
	union {
-
 
341
		u32		padding1[12];
-
 
342
		u32		sw_reserved[12];
-
 
343
	};
-
 
344
 
-
 
345
} __attribute__((aligned(16)));
-
 
346
 
-
 
347
struct i387_soft_struct {
-
 
348
	u32			cwd;
-
 
349
	u32			swd;
-
 
350
	u32			twd;
-
 
351
	u32			fip;
-
 
352
	u32			fcs;
-
 
353
	u32			foo;
-
 
354
	u32			fos;
-
 
355
	/* 8*10 bytes for each FP-reg = 80 bytes: */
-
 
356
	u32			st_space[20];
-
 
357
	u8			ftop;
-
 
358
	u8			changed;
-
 
359
	u8			lookahead;
-
 
360
	u8			no_update;
-
 
361
	u8			rm;
-
 
362
	u8			alimit;
-
 
363
	struct math_emu_info	*info;
-
 
364
	u32			entry_eip;
-
 
365
};
-
 
366
 
-
 
367
struct ymmh_struct {
-
 
368
	/* 16 * 16 bytes for each YMMH-reg = 256 bytes */
-
 
369
	u32 ymmh_space[64];
-
 
370
};
-
 
371
 
-
 
372
/* We don't support LWP yet: */
-
 
373
struct lwp_struct {
-
 
374
	u8 reserved[128];
-
 
375
};
-
 
376
 
-
 
377
struct bndreg {
-
 
378
	u64 lower_bound;
-
 
379
	u64 upper_bound;
-
 
380
} __packed;
-
 
381
 
-
 
382
struct bndcsr {
-
 
383
	u64 bndcfgu;
-
 
384
	u64 bndstatus;
-
 
385
} __packed;
-
 
386
 
-
 
387
struct xsave_hdr_struct {
-
 
388
	u64 xstate_bv;
-
 
389
	u64 xcomp_bv;
-
 
390
	u64 reserved[6];
-
 
391
} __attribute__((packed));
-
 
392
 
-
 
393
struct xsave_struct {
-
 
394
	struct i387_fxsave_struct i387;
-
 
395
	struct xsave_hdr_struct xsave_hdr;
-
 
396
	struct ymmh_struct ymmh;
-
 
397
	struct lwp_struct lwp;
-
 
398
	struct bndreg bndreg[4];
-
 
399
	struct bndcsr bndcsr;
-
 
400
	/* new processor state extensions will go here */
-
 
401
} __attribute__ ((packed, aligned (64)));
-
 
402
 
-
 
403
union thread_xstate {
-
 
404
	struct i387_fsave_struct	fsave;
-
 
405
	struct i387_fxsave_struct	fxsave;
-
 
406
	struct i387_soft_struct		soft;
-
 
407
	struct xsave_struct		xsave;
-
 
408
};
-
 
409
 
-
 
410
struct fpu {
-
 
411
	unsigned int last_cpu;
314
 */
412
	unsigned int has_fpu;
315
struct orig_ist {
Line 413... Line 316...
413
	union thread_xstate *state;
316
	unsigned long		ist[7];
414
};
317
};
Line 459... Line 362...
459
DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
362
DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
460
DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
363
DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
461
#endif	/* X86_64 */
364
#endif	/* X86_64 */
Line 462... Line 365...
462
 
365
 
463
extern unsigned int xstate_size;
-
 
464
extern void free_thread_xstate(struct task_struct *);
-
 
Line 465... Line 366...
465
extern struct kmem_cache *task_xstate_cachep;
366
extern unsigned int xstate_size;
Line 466... Line 367...
466
 
367
 
467
struct perf_event;
368
struct perf_event;
Line 472... Line 373...
472
	unsigned long		sp0;
373
	unsigned long		sp0;
473
	unsigned long		sp;
374
	unsigned long		sp;
474
#ifdef CONFIG_X86_32
375
#ifdef CONFIG_X86_32
475
	unsigned long		sysenter_cs;
376
	unsigned long		sysenter_cs;
476
#else
377
#else
477
	unsigned long		usersp;	/* Copy from PDA */
-
 
478
	unsigned short		es;
378
	unsigned short		es;
479
	unsigned short		ds;
379
	unsigned short		ds;
480
	unsigned short		fsindex;
380
	unsigned short		fsindex;
481
	unsigned short		gsindex;
381
	unsigned short		gsindex;
482
#endif
382
#endif
Line 485... Line 385...
485
#endif
385
#endif
486
#ifdef CONFIG_X86_64
386
#ifdef CONFIG_X86_64
487
	unsigned long		fs;
387
	unsigned long		fs;
488
#endif
388
#endif
489
	unsigned long		gs;
389
	unsigned long		gs;
-
 
390
 
490
	/* Save middle states of ptrace breakpoints */
391
	/* Save middle states of ptrace breakpoints */
491
	struct perf_event	*ptrace_bps[HBP_NUM];
392
	struct perf_event	*ptrace_bps[HBP_NUM];
492
	/* Debug status used for traps, single steps, etc... */
393
	/* Debug status used for traps, single steps, etc... */
493
	unsigned long           debugreg6;
394
	unsigned long           debugreg6;
494
	/* Keep track of the exact dr7 value set by the user */
395
	/* Keep track of the exact dr7 value set by the user */
495
	unsigned long           ptrace_dr7;
396
	unsigned long           ptrace_dr7;
496
	/* Fault info: */
397
	/* Fault info: */
497
	unsigned long		cr2;
398
	unsigned long		cr2;
498
	unsigned long		trap_nr;
399
	unsigned long		trap_nr;
499
	unsigned long		error_code;
400
	unsigned long		error_code;
500
	/* floating point and extended processor state */
-
 
501
	struct fpu		fpu;
-
 
502
#ifdef CONFIG_X86_32
401
#ifdef CONFIG_VM86
503
	/* Virtual 86 mode info */
402
	/* Virtual 86 mode info */
504
	struct vm86_struct __user *vm86_info;
403
	struct vm86		*vm86;
505
	unsigned long		screen_bitmap;
-
 
506
	unsigned long		v86flags;
-
 
507
	unsigned long		v86mask;
-
 
508
	unsigned long		saved_sp0;
-
 
509
	unsigned int		saved_fs;
-
 
510
	unsigned int		saved_gs;
-
 
511
#endif
404
#endif
512
	/* IO permissions: */
405
	/* IO permissions: */
513
	unsigned long		*io_bitmap_ptr;
406
	unsigned long		*io_bitmap_ptr;
514
	unsigned long		iopl;
407
	unsigned long		iopl;
515
	/* Max allowed port in the bitmap, in bytes: */
408
	/* Max allowed port in the bitmap, in bytes: */
516
	unsigned		io_bitmap_max;
409
	unsigned		io_bitmap_max;
-
 
410
 
-
 
411
	/* Floating point and extended processor state */
-
 
412
	struct fpu		fpu;
517
	/*
413
	/*
518
	 * fpu_counter contains the number of consecutive context switches
-
 
519
	 * that the FPU is used. If this is over a threshold, the lazy fpu
414
	 * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
520
	 * saving becomes unlazy to save the trap. This is an unsigned char
-
 
521
	 * so that after 256 times the counter wraps and the behavior turns
-
 
522
	 * lazy again; this to deal with bursty apps that only use FPU for
-
 
523
	 * a short time
415
	 * the end.
524
	 */
416
	 */
525
	unsigned char fpu_counter;
-
 
526
};
417
};
Line 527... Line 418...
527
 
418
 
528
/*
419
/*
529
 * Set IOPL bits in EFLAGS from given mask
420
 * Set IOPL bits in EFLAGS from given mask
Line 562... Line 453...
562
#ifdef CONFIG_X86_64
453
#ifdef CONFIG_X86_64
563
	asm volatile("swapgs" ::: "memory");
454
	asm volatile("swapgs" ::: "memory");
564
#endif
455
#endif
565
}
456
}
Line -... Line 457...
-
 
457
 
566
 
458
 
567
#ifdef CONFIG_PARAVIRT
459
#ifdef CONFIG_PARAVIRT
568
#include 
460
#include 
569
#else
461
#else
570
#define __cpuid			native_cpuid
462
#define __cpuid			native_cpuid
-
 
463
#define paravirt_enabled()	0
Line 571... Line 464...
571
#define paravirt_enabled()	0
464
#define paravirt_has(x) 	0
572
 
465
 
573
static inline void load_sp0(struct tss_struct *tss,
466
static inline void load_sp0(struct tss_struct *tss,
574
			    struct thread_struct *thread)
467
			    struct thread_struct *thread)
575
{
468
{
Line 576... Line 469...
576
	native_load_sp0(tss, thread);
469
	native_load_sp0(tss, thread);
577
}
470
}
Line 578... Line -...
578
 
-
 
579
#define set_iopl_mask native_set_iopl_mask
-
 
580
#endif /* CONFIG_PARAVIRT */
-
 
581
 
-
 
582
/*
-
 
583
 * Save the cr4 feature set we're using (ie
-
 
584
 * Pentium 4MB enable and PPro Global page
-
 
585
 * enable), so that any CPU's that boot up
-
 
586
 * after us can get the correct flags.
-
 
587
 */
-
 
588
extern unsigned long mmu_cr4_features;
-
 
589
extern u32 *trampoline_cr4_features;
-
 
590
 
-
 
591
static inline void set_in_cr4(unsigned long mask)
-
 
592
{
-
 
593
	unsigned long cr4;
-
 
594
 
-
 
595
	mmu_cr4_features |= mask;
-
 
596
	if (trampoline_cr4_features)
-
 
597
		*trampoline_cr4_features = mmu_cr4_features;
-
 
598
	cr4 = read_cr4();
-
 
599
	cr4 |= mask;
-
 
600
	write_cr4(cr4);
-
 
601
}
-
 
602
 
-
 
603
static inline void clear_in_cr4(unsigned long mask)
-
 
604
{
-
 
605
	unsigned long cr4;
-
 
606
 
-
 
607
	mmu_cr4_features &= ~mask;
-
 
608
	if (trampoline_cr4_features)
-
 
609
		*trampoline_cr4_features = mmu_cr4_features;
-
 
610
	cr4 = read_cr4();
-
 
611
	cr4 &= ~mask;
471
 
612
	write_cr4(cr4);
472
#define set_iopl_mask native_set_iopl_mask
613
}
473
#endif /* CONFIG_PARAVIRT */
Line 684... Line 544...
684
 
544
 
685
	return edx;
545
	return edx;
Line 686... Line 546...
686
}
546
}
687
 
547
 
688
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
548
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
689
static inline void rep_nop(void)
549
static __always_inline void rep_nop(void)
690
{
550
{
Line 691... Line 551...
691
	asm volatile("rep; nop" ::: "memory");
551
	asm volatile("rep; nop" ::: "memory");
692
}
552
}
693
 
553
 
694
static inline void cpu_relax(void)
554
static __always_inline void cpu_relax(void)
Line 695... Line 555...
695
{
555
{
Line 773... Line 633...
773
	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
633
	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
774
}
634
}
Line 775... Line 635...
775
 
635
 
Line 776... Line -...
776
extern void set_task_blockstep(struct task_struct *task, bool on);
-
 
777
 
-
 
778
/*
-
 
779
 * from system description table in BIOS. Mostly for MCA use, but
-
 
780
 * others may find it useful:
-
 
781
 */
-
 
782
extern unsigned int		machine_id;
-
 
783
extern unsigned int		machine_submodel_id;
-
 
784
extern unsigned int		BIOS_revision;
636
extern void set_task_blockstep(struct task_struct *task, bool on);
785
 
637
 
786
/* Boot loader type from the setup header: */
638
/* Boot loader type from the setup header: */
Line 787... Line 639...
787
extern int			bootloader_type;
639
extern int			bootloader_type;
Line 792... Line 644...
792
#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
644
#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
793
#define ARCH_HAS_PREFETCHW
645
#define ARCH_HAS_PREFETCHW
794
#define ARCH_HAS_SPINLOCK_PREFETCH
646
#define ARCH_HAS_SPINLOCK_PREFETCH
Line 795... Line 647...
795
 
647
 
796
#ifdef CONFIG_X86_32
648
#ifdef CONFIG_X86_32
797
# define BASE_PREFETCH		ASM_NOP4
649
# define BASE_PREFETCH		""
798
# define ARCH_HAS_PREFETCH
650
# define ARCH_HAS_PREFETCH
799
#else
651
#else
800
# define BASE_PREFETCH		"prefetcht0 (%1)"
652
# define BASE_PREFETCH		"prefetcht0 %P1"
Line 801... Line 653...
801
#endif
653
#endif
802
 
654
 
803
/*
655
/*
Line 830... Line 682...
830
static inline void spin_lock_prefetch(const void *x)
682
static inline void spin_lock_prefetch(const void *x)
831
{
683
{
832
	prefetchw(x);
684
	prefetchw(x);
833
}
685
}
Line -... Line 686...
-
 
686
 
-
 
687
#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
-
 
688
			   TOP_OF_KERNEL_STACK_PADDING)
834
 
689
 
835
#ifdef CONFIG_X86_32
690
#ifdef CONFIG_X86_32
836
/*
691
/*
837
 * User space process size: 3GB (default).
692
 * User space process size: 3GB (default).
838
 */
693
 */
839
#define TASK_SIZE		PAGE_OFFSET
694
#define TASK_SIZE		PAGE_OFFSET
840
#define TASK_SIZE_MAX		TASK_SIZE
695
#define TASK_SIZE_MAX		TASK_SIZE
841
#define STACK_TOP		TASK_SIZE
696
#define STACK_TOP		TASK_SIZE
Line 842... Line 697...
842
#define STACK_TOP_MAX		STACK_TOP
697
#define STACK_TOP_MAX		STACK_TOP
843
 
-
 
844
#define INIT_THREAD  {							  \
698
 
845
	.sp0			= sizeof(init_stack) + (long)&init_stack, \
699
#define INIT_THREAD  {							  \
846
	.vm86_info		= NULL,					  \
700
	.sp0			= TOP_OF_INIT_STACK,			  \
847
	.sysenter_cs		= __KERNEL_CS,				  \
701
	.sysenter_cs		= __KERNEL_CS,				  \
Line 848... Line -...
848
	.io_bitmap_ptr		= NULL,					  \
-
 
849
}
-
 
850
 
-
 
851
/*
-
 
852
 * Note that the .io_bitmap member must be extra-big. This is because
-
 
853
 * the CPU will access an additional byte beyond the end of the IO
-
 
854
 * permission bitmap. The extra byte must be all 1 bits, and must
-
 
855
 * be within the limit.
-
 
856
 */
-
 
857
#define INIT_TSS  {							  \
-
 
858
	.x86_tss = {							  \
-
 
859
		.sp0		= sizeof(init_stack) + (long)&init_stack, \
-
 
860
		.ss0		= __KERNEL_DS,				  \
-
 
861
		.ss1		= __KERNEL_CS,				  \
-
 
862
		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		  \
-
 
863
	 },								  \
-
 
864
	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },	  \
702
	.io_bitmap_ptr		= NULL,					  \
Line 865... Line -...
865
}
-
 
866
 
-
 
867
extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 
868
 
-
 
869
#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
-
 
870
#define KSTK_TOP(info)                                                 \
-
 
871
({                                                                     \
-
 
872
       unsigned long *__ptr = (unsigned long *)(info);                 \
703
}
873
       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
704
 
874
})
705
extern unsigned long thread_saved_pc(struct task_struct *tsk);
875
 
706
 
876
/*
707
/*
877
 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
708
 * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
878
 * This is necessary to guarantee that the entire "struct pt_regs"
709
 * This is necessary to guarantee that the entire "struct pt_regs"
879
 * is accessible even if the CPU haven't stored the SS/ESP registers
710
 * is accessible even if the CPU haven't stored the SS/ESP registers
880
 * on the stack (interrupt gate does not save these registers
711
 * on the stack (interrupt gate does not save these registers
881
 * when switching to the same priv ring).
712
 * when switching to the same priv ring).
882
 * Therefore beware: accessing the ss/esp fields of the
713
 * Therefore beware: accessing the ss/esp fields of the
883
 * "struct pt_regs" is possible, but they may contain the
714
 * "struct pt_regs" is possible, but they may contain the
884
 * completely wrong values.
715
 * completely wrong values.
885
 */
716
 */
886
#define task_pt_regs(task)                                             \
717
#define task_pt_regs(task) \
887
({                                                                     \
718
({									\
Line 888... Line 719...
888
       struct pt_regs *__regs__;                                       \
719
	unsigned long __ptr = (unsigned long)task_stack_page(task);	\
Line 889... Line 720...
889
       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
720
	__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;		\
Line 917... Line 748...
917
 
748
 
918
#define STACK_TOP		TASK_SIZE
749
#define STACK_TOP		TASK_SIZE
Line 919... Line 750...
919
#define STACK_TOP_MAX		TASK_SIZE_MAX
750
#define STACK_TOP_MAX		TASK_SIZE_MAX
920
 
-
 
921
#define INIT_THREAD  { \
-
 
922
	.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-
 
923
}
751
 
924
 
-
 
925
#define INIT_TSS  { \
752
#define INIT_THREAD  { \
Line 926... Line 753...
926
	.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
753
	.sp0 = TOP_OF_INIT_STACK \
927
}
754
}
928
 
755
 
Line 933... Line 760...
933
#define thread_saved_pc(t)	(*(unsigned long *)((t)->thread.sp - 8))
760
#define thread_saved_pc(t)	(*(unsigned long *)((t)->thread.sp - 8))
Line 934... Line 761...
934
 
761
 
935
#define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
762
#define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
Line 936... Line -...
936
extern unsigned long KSTK_ESP(struct task_struct *task);
-
 
937
 
-
 
938
/*
-
 
939
 * User space RSP while inside the SYSCALL fast path
-
 
940
 */
-
 
941
DECLARE_PER_CPU(unsigned long, old_rsp);
763
extern unsigned long KSTK_ESP(struct task_struct *task);
Line 942... Line 764...
942
 
764
 
943
#endif /* CONFIG_X86_64 */
765
#endif /* CONFIG_X86_64 */
Line 959... Line 781...
959
 
781
 
960
extern int get_tsc_mode(unsigned long adr);
782
extern int get_tsc_mode(unsigned long adr);
Line 961... Line 783...
961
extern int set_tsc_mode(unsigned int val);
783
extern int set_tsc_mode(unsigned int val);
962
 
784
 
963
/* Register/unregister a process' MPX related resource */
785
/* Register/unregister a process' MPX related resource */
Line 964... Line 786...
964
#define MPX_ENABLE_MANAGEMENT(tsk)	mpx_enable_management((tsk))
786
#define MPX_ENABLE_MANAGEMENT()	mpx_enable_management()
965
#define MPX_DISABLE_MANAGEMENT(tsk)	mpx_disable_management((tsk))
787
#define MPX_DISABLE_MANAGEMENT()	mpx_disable_management()
966
 
788
 
967
#ifdef CONFIG_X86_INTEL_MPX
789
#ifdef CONFIG_X86_INTEL_MPX
968
extern int mpx_enable_management(struct task_struct *tsk);
790
extern int mpx_enable_management(void);
969
extern int mpx_disable_management(struct task_struct *tsk);
791
extern int mpx_disable_management(void);
970
#else
792
#else
971
static inline int mpx_enable_management(struct task_struct *tsk)
793
static inline int mpx_enable_management(void)
972
{
794
{
973
	return -EINVAL;
795
	return -EINVAL;
974
}
796
}
975
static inline int mpx_disable_management(struct task_struct *tsk)
797
static inline int mpx_disable_management(void)
976
{
798
{
Line 977... Line 799...
977
	return -EINVAL;
799
	return -EINVAL;
-
 
800
}
Line 978... Line 801...
978
}
801
#endif /* CONFIG_X86_INTEL_MPX */
979
#endif /* CONFIG_X86_INTEL_MPX */
802
 
980
 
803
extern u16 amd_get_nb_id(int cpu);