Subversion Repositories Kolibri OS

Rev

Rev 6934 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef _ASM_X86_PROCESSOR_H
  2. #define _ASM_X86_PROCESSOR_H
  3.  
  4. #include <asm/processor-flags.h>
  5.  
  6. /* Forward declaration, a strange C thing */
  7. struct task_struct;
  8. struct mm_struct;
  9. struct vm86;
  10.  
  11. #include <asm/math_emu.h>
  12. #include <asm/segment.h>
  13. #include <asm/types.h>
  14. #include <uapi/asm/sigcontext.h>
  15. #include <asm/current.h>
  16. #include <asm/cpufeatures.h>
  17. #include <asm/page.h>
  18. #include <asm/pgtable_types.h>
  19. #include <asm/percpu.h>
  20. #include <asm/msr.h>
  21. #include <asm/desc_defs.h>
  22. #include <asm/nops.h>
  23. #include <asm/special_insns.h>
  24. #include <asm/fpu/types.h>
  25.  
  26. #include <linux/personality.h>
  27. #include <linux/cache.h>
  28. #include <linux/threads.h>
  29. #include <linux/math64.h>
  30. #include <linux/err.h>
  31. #include <linux/irqflags.h>
  32.  
  33. /*
  34.  * We handle most unaligned accesses in hardware.  On the other hand
  35.  * unaligned DMA can be quite expensive on some Nehalem processors.
  36.  *
  37.  * Based on this we disable the IP header alignment in network drivers.
  38.  */
  39. #define NET_IP_ALIGN    0
  40.  
  41. #define HBP_NUM 4
  42. /*
  43.  * Default implementation of macro that returns current
  44.  * instruction pointer ("program counter").
  45.  */
  46. static inline void *current_text_addr(void)
  47. {
  48.         void *pc;
  49.  
  50.         asm volatile("mov $1f, %0; 1:":"=r" (pc));
  51.  
  52.         return pc;
  53. }
  54.  
  55. /*
  56.  * These alignment constraints are for performance in the vSMP case,
  57.  * but in the task_struct case we must also meet hardware imposed
  58.  * alignment requirements of the FPU state:
  59.  */
  60. #ifdef CONFIG_X86_VSMP
  61. # define ARCH_MIN_TASKALIGN             (1 << INTERNODE_CACHE_SHIFT)
  62. # define ARCH_MIN_MMSTRUCT_ALIGN        (1 << INTERNODE_CACHE_SHIFT)
  63. #else
  64. # define ARCH_MIN_TASKALIGN             16
  65. # define ARCH_MIN_MMSTRUCT_ALIGN        0
  66. #endif
  67.  
  68. enum tlb_infos {
  69.         ENTRIES,
  70.         NR_INFO
  71. };
  72.  
  73. extern u16 __read_mostly tlb_lli_4k[NR_INFO];
  74. extern u16 __read_mostly tlb_lli_2m[NR_INFO];
  75. extern u16 __read_mostly tlb_lli_4m[NR_INFO];
  76. extern u16 __read_mostly tlb_lld_4k[NR_INFO];
  77. extern u16 __read_mostly tlb_lld_2m[NR_INFO];
  78. extern u16 __read_mostly tlb_lld_4m[NR_INFO];
  79. extern u16 __read_mostly tlb_lld_1g[NR_INFO];
  80.  
  81. /*
  82.  *  CPU type and hardware bug flags. Kept separately for each CPU.
  83.  *  Members of this structure are referenced in head.S, so think twice
  84.  *  before touching them. [mj]
  85.  */
  86.  
  87. struct cpuinfo_x86 {
  88.         __u8                    x86;            /* CPU family */
  89.         __u8                    x86_vendor;     /* CPU vendor */
  90.         __u8                    x86_model;
  91.         __u8                    x86_mask;
  92. #ifdef CONFIG_X86_32
  93.         char                    wp_works_ok;    /* It doesn't on 386's */
  94.  
  95.         /* Problems on some 486Dx4's and old 386's: */
  96.         char                    rfu;
  97.         char                    pad0;
  98.         char                    pad1;
  99. #else
  100.         /* Number of 4K pages in DTLB/ITLB combined(in pages): */
  101.         int                     x86_tlbsize;
  102. #endif
  103.         __u8                    x86_virt_bits;
  104.         __u8                    x86_phys_bits;
  105.         /* CPUID returned core id bits: */
  106.         __u8                    x86_coreid_bits;
  107.         /* Max extended CPUID function supported: */
  108.         __u32                   extended_cpuid_level;
  109.         /* Maximum supported CPUID level, -1=no CPUID: */
  110.         int                     cpuid_level;
  111.         __u32                   x86_capability[NCAPINTS + NBUGINTS];
  112.         char                    x86_vendor_id[16];
  113.         char                    x86_model_id[64];
  114.         /* in KB - valid for CPUS which support this call: */
  115.         int                     x86_cache_size;
  116.         int                     x86_cache_alignment;    /* In bytes */
  117.         /* Cache QoS architectural values: */
  118.         int                     x86_cache_max_rmid;     /* max index */
  119.         int                     x86_cache_occ_scale;    /* scale to bytes */
  120.         int                     x86_power;
  121.         unsigned long           loops_per_jiffy;
  122.         /* cpuid returned max cores value: */
  123.         u16                      x86_max_cores;
  124.         u16                     apicid;
  125.         u16                     initial_apicid;
  126.         u16                     x86_clflush_size;
  127.         /* number of cores as seen by the OS: */
  128.         u16                     booted_cores;
  129.         /* Physical processor id: */
  130.         u16                     phys_proc_id;
  131.         /* Logical processor id: */
  132.         u16                     logical_proc_id;
  133.         /* Core id: */
  134.         u16                     cpu_core_id;
  135.         /* Index into per_cpu list: */
  136.         u16                     cpu_index;
  137.         u32                     microcode;
  138. };
  139.  
  140. #define X86_VENDOR_INTEL        0
  141. #define X86_VENDOR_CYRIX        1
  142. #define X86_VENDOR_AMD          2
  143. #define X86_VENDOR_UMC          3
  144. #define X86_VENDOR_CENTAUR      5
  145. #define X86_VENDOR_TRANSMETA    7
  146. #define X86_VENDOR_NSC          8
  147. #define X86_VENDOR_NUM          9
  148.  
  149. #define X86_VENDOR_UNKNOWN      0xff
  150.  
  151. /*
  152.  * capabilities of CPUs
  153.  */
  154. extern struct cpuinfo_x86       boot_cpu_data;
  155. extern struct cpuinfo_x86       new_cpu_data;
  156.  
  157. extern struct tss_struct        doublefault_tss;
  158. extern __u32                    cpu_caps_cleared[NCAPINTS];
  159. extern __u32                    cpu_caps_set[NCAPINTS];
  160.  
  161. #ifdef CONFIG_SMP
  162. DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
  163. #define cpu_data(cpu)           per_cpu(cpu_info, cpu)
  164. #else
  165. #define cpu_info                boot_cpu_data
  166. #define cpu_data(cpu)           boot_cpu_data
  167. #endif
  168.  
  169. extern const struct seq_operations cpuinfo_op;
  170.  
  171. extern void cpu_detect(struct cpuinfo_x86 *c);
  172.  
  173. extern void early_cpu_init(void);
  174. extern void identify_boot_cpu(void);
  175. extern void identify_secondary_cpu(struct cpuinfo_x86 *);
  176. extern void print_cpu_info(struct cpuinfo_x86 *);
  177. void print_cpu_msr(struct cpuinfo_x86 *);
  178. extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
  179. extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
  180. extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
  181.  
  182. extern void detect_extended_topology(struct cpuinfo_x86 *c);
  183. extern void detect_ht(struct cpuinfo_x86 *c);
  184.  
  185. #ifdef CONFIG_X86_32
  186. extern int have_cpuid_p(void);
  187. #else
  188. static inline int have_cpuid_p(void)
  189. {
  190.         return 1;
  191. }
  192. #endif
  193. static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
  194.                                 unsigned int *ecx, unsigned int *edx)
  195. {
  196.         /* ecx is often an input as well as an output. */
  197.         asm volatile("cpuid"
  198.             : "=a" (*eax),
  199.               "=b" (*ebx),
  200.               "=c" (*ecx),
  201.               "=d" (*edx)
  202.             : "0" (*eax), "2" (*ecx)
  203.             : "memory");
  204. }
  205.  
  206. static inline void load_cr3(pgd_t *pgdir)
  207. {
  208.         write_cr3(__pa(pgdir));
  209. }
  210.  
  211. #ifdef CONFIG_X86_32
  212. /* This is the TSS defined by the hardware. */
  213. struct x86_hw_tss {
  214.         unsigned short          back_link, __blh;
  215.         unsigned long           sp0;
  216.         unsigned short          ss0, __ss0h;
  217.         unsigned long           sp1;
  218.  
  219.         /*
  220.          * We don't use ring 1, so ss1 is a convenient scratch space in
  221.          * the same cacheline as sp0.  We use ss1 to cache the value in
  222.          * MSR_IA32_SYSENTER_CS.  When we context switch
  223.          * MSR_IA32_SYSENTER_CS, we first check if the new value being
  224.          * written matches ss1, and, if it's not, then we wrmsr the new
  225.          * value and update ss1.
  226.          *
  227.          * The only reason we context switch MSR_IA32_SYSENTER_CS is
  228.          * that we set it to zero in vm86 tasks to avoid corrupting the
  229.          * stack if we were to go through the sysenter path from vm86
  230.          * mode.
  231.          */
  232.         unsigned short          ss1;    /* MSR_IA32_SYSENTER_CS */
  233.  
  234.         unsigned short          __ss1h;
  235.         unsigned long           sp2;
  236.         unsigned short          ss2, __ss2h;
  237.         unsigned long           __cr3;
  238.         unsigned long           ip;
  239.         unsigned long           flags;
  240.         unsigned long           ax;
  241.         unsigned long           cx;
  242.         unsigned long           dx;
  243.         unsigned long           bx;
  244.         unsigned long           sp;
  245.         unsigned long           bp;
  246.         unsigned long           si;
  247.         unsigned long           di;
  248.         unsigned short          es, __esh;
  249.         unsigned short          cs, __csh;
  250.         unsigned short          ss, __ssh;
  251.         unsigned short          ds, __dsh;
  252.         unsigned short          fs, __fsh;
  253.         unsigned short          gs, __gsh;
  254.         unsigned short          ldt, __ldth;
  255.         unsigned short          trace;
  256.         unsigned short          io_bitmap_base;
  257.  
  258. } __attribute__((packed));
  259. #else
  260. struct x86_hw_tss {
  261.         u32                     reserved1;
  262.         u64                     sp0;
  263.         u64                     sp1;
  264.         u64                     sp2;
  265.         u64                     reserved2;
  266.         u64                     ist[7];
  267.         u32                     reserved3;
  268.         u32                     reserved4;
  269.         u16                     reserved5;
  270.         u16                     io_bitmap_base;
  271.  
  272. } __attribute__((packed)) ____cacheline_aligned;
  273. #endif
  274.  
  275. /*
  276.  * IO-bitmap sizes:
  277.  */
  278. #define IO_BITMAP_BITS                  65536
  279. #define IO_BITMAP_BYTES                 (IO_BITMAP_BITS/8)
  280. #define IO_BITMAP_LONGS                 (IO_BITMAP_BYTES/sizeof(long))
  281. #define IO_BITMAP_OFFSET                offsetof(struct tss_struct, io_bitmap)
  282. #define INVALID_IO_BITMAP_OFFSET        0x8000
  283.  
  284. struct tss_struct {
  285.         /*
  286.          * The hardware state:
  287.          */
  288.         struct x86_hw_tss       x86_tss;
  289.  
  290.         /*
  291.          * The extra 1 is there because the CPU will access an
  292.          * additional byte beyond the end of the IO permission
  293.          * bitmap. The extra byte must be all 1 bits, and must
  294.          * be within the limit.
  295.          */
  296.         unsigned long           io_bitmap[IO_BITMAP_LONGS + 1];
  297.  
  298. #ifdef CONFIG_X86_32
  299.         /*
  300.          * Space for the temporary SYSENTER stack.
  301.          */
  302.         unsigned long           SYSENTER_stack_canary;
  303.         unsigned long           SYSENTER_stack[64];
  304. #endif
  305.  
  306. } ____cacheline_aligned;
  307.  
  308. DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
  309.  
  310. #ifdef CONFIG_X86_32
  311. DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
  312. #endif
  313.  
  314. /*
  315.  * Save the original ist values for checking stack pointers during debugging
  316.  */
  317. struct orig_ist {
  318.         unsigned long           ist[7];
  319. };
  320.  
  321. #ifdef CONFIG_X86_64
  322. DECLARE_PER_CPU(struct orig_ist, orig_ist);
  323.  
  324. union irq_stack_union {
  325.         char irq_stack[IRQ_STACK_SIZE];
  326.         /*
  327.          * GCC hardcodes the stack canary as %gs:40.  Since the
  328.          * irq_stack is the object at %gs:0, we reserve the bottom
  329.          * 48 bytes of the irq stack for the canary.
  330.          */
  331.         struct {
  332.                 char gs_base[40];
  333.                 unsigned long stack_canary;
  334.         };
  335. };
  336.  
  337. DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
  338. DECLARE_INIT_PER_CPU(irq_stack_union);
  339.  
  340. DECLARE_PER_CPU(char *, irq_stack_ptr);
  341. DECLARE_PER_CPU(unsigned int, irq_count);
  342. extern asmlinkage void ignore_sysret(void);
  343. #else   /* X86_64 */
  344. #ifdef CONFIG_CC_STACKPROTECTOR
  345. /*
  346.  * Make sure stack canary segment base is cached-aligned:
  347.  *   "For Intel Atom processors, avoid non zero segment base address
  348.  *    that is not aligned to cache line boundary at all cost."
  349.  * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
  350.  */
  351. struct stack_canary {
  352.         char __pad[20];         /* canary at %gs:20 */
  353.         unsigned long canary;
  354. };
  355. DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
  356. #endif
  357. /*
  358.  * per-CPU IRQ handling stacks
  359.  */
  360. struct irq_stack {
  361.         u32                     stack[THREAD_SIZE/sizeof(u32)];
  362. } __aligned(THREAD_SIZE);
  363.  
  364. DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
  365. DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
  366. #endif  /* X86_64 */
  367.  
  368. extern unsigned int xstate_size;
  369.  
  370. struct perf_event;
  371.  
  372. struct thread_struct {
  373.         /* Cached TLS descriptors: */
  374.         struct desc_struct      tls_array[GDT_ENTRY_TLS_ENTRIES];
  375.         unsigned long           sp0;
  376.         unsigned long           sp;
  377. #ifdef CONFIG_X86_32
  378.         unsigned long           sysenter_cs;
  379. #else
  380.         unsigned short          es;
  381.         unsigned short          ds;
  382.         unsigned short          fsindex;
  383.         unsigned short          gsindex;
  384. #endif
  385. #ifdef CONFIG_X86_32
  386.         unsigned long           ip;
  387. #endif
  388. #ifdef CONFIG_X86_64
  389.         unsigned long           fs;
  390. #endif
  391.         unsigned long           gs;
  392.  
  393.         /* Save middle states of ptrace breakpoints */
  394.         struct perf_event       *ptrace_bps[HBP_NUM];
  395.         /* Debug status used for traps, single steps, etc... */
  396.         unsigned long           debugreg6;
  397.         /* Keep track of the exact dr7 value set by the user */
  398.         unsigned long           ptrace_dr7;
  399.         /* Fault info: */
  400.         unsigned long           cr2;
  401.         unsigned long           trap_nr;
  402.         unsigned long           error_code;
  403. #ifdef CONFIG_VM86
  404.         /* Virtual 86 mode info */
  405.         struct vm86             *vm86;
  406. #endif
  407.         /* IO permissions: */
  408.         unsigned long           *io_bitmap_ptr;
  409.         unsigned long           iopl;
  410.         /* Max allowed port in the bitmap, in bytes: */
  411.         unsigned                io_bitmap_max;
  412.  
  413.         /* Floating point and extended processor state */
  414.         struct fpu              fpu;
  415.         /*
  416.          * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
  417.          * the end.
  418.          */
  419. };
  420.  
  421. /*
  422.  * Set IOPL bits in EFLAGS from given mask
  423.  */
  424. static inline void native_set_iopl_mask(unsigned mask)
  425. {
  426. #ifdef CONFIG_X86_32
  427.         unsigned int reg;
  428.  
  429.         asm volatile ("pushfl;"
  430.                       "popl %0;"
  431.                       "andl %1, %0;"
  432.                       "orl %2, %0;"
  433.                       "pushl %0;"
  434.                       "popfl"
  435.                       : "=&r" (reg)
  436.                       : "i" (~X86_EFLAGS_IOPL), "r" (mask));
  437. #endif
  438. }
  439.  
  440. static inline void
  441. native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
  442. {
  443.         tss->x86_tss.sp0 = thread->sp0;
  444. #ifdef CONFIG_X86_32
  445.         /* Only happens when SEP is enabled, no need to test "SEP"arately: */
  446.         if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
  447.                 tss->x86_tss.ss1 = thread->sysenter_cs;
  448.                 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  449.         }
  450. #endif
  451. }
  452.  
  453. static inline void native_swapgs(void)
  454. {
  455. #ifdef CONFIG_X86_64
  456.         asm volatile("swapgs" ::: "memory");
  457. #endif
  458. }
  459.  
  460.  
  461. #ifdef CONFIG_PARAVIRT
  462. #include <asm/paravirt.h>
  463. #else
  464. #define __cpuid                 native_cpuid
  465. #define paravirt_enabled()      0
  466. #define paravirt_has(x)         0
  467.  
  468. static inline void load_sp0(struct tss_struct *tss,
  469.                             struct thread_struct *thread)
  470. {
  471.         native_load_sp0(tss, thread);
  472. }
  473.  
  474. #define set_iopl_mask native_set_iopl_mask
  475. #endif /* CONFIG_PARAVIRT */
  476.  
  477. typedef struct {
  478.         unsigned long           seg;
  479. } mm_segment_t;
  480.  
  481.  
  482. /* Free all resources held by a thread. */
  483. extern void release_thread(struct task_struct *);
  484.  
  485. unsigned long get_wchan(struct task_struct *p);
  486.  
  487. /*
  488.  * Generic CPUID function
  489.  * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
  490.  * resulting in stale register contents being returned.
  491.  */
  492. static inline void cpuid(unsigned int op,
  493.                          unsigned int *eax, unsigned int *ebx,
  494.                          unsigned int *ecx, unsigned int *edx)
  495. {
  496.         *eax = op;
  497.         *ecx = 0;
  498.         __cpuid(eax, ebx, ecx, edx);
  499. }
  500.  
  501. /* Some CPUID calls want 'count' to be placed in ecx */
  502. static inline void cpuid_count(unsigned int op, int count,
  503.                                unsigned int *eax, unsigned int *ebx,
  504.                                unsigned int *ecx, unsigned int *edx)
  505. {
  506.         *eax = op;
  507.         *ecx = count;
  508.         __cpuid(eax, ebx, ecx, edx);
  509. }
  510.  
  511. /*
  512.  * CPUID functions returning a single datum
  513.  */
  514. static inline unsigned int cpuid_eax(unsigned int op)
  515. {
  516.         unsigned int eax, ebx, ecx, edx;
  517.  
  518.         cpuid(op, &eax, &ebx, &ecx, &edx);
  519.  
  520.         return eax;
  521. }
  522.  
  523. static inline unsigned int cpuid_ebx(unsigned int op)
  524. {
  525.         unsigned int eax, ebx, ecx, edx;
  526.  
  527.         cpuid(op, &eax, &ebx, &ecx, &edx);
  528.  
  529.         return ebx;
  530. }
  531.  
  532. static inline unsigned int cpuid_ecx(unsigned int op)
  533. {
  534.         unsigned int eax, ebx, ecx, edx;
  535.  
  536.         cpuid(op, &eax, &ebx, &ecx, &edx);
  537.  
  538.         return ecx;
  539. }
  540.  
  541. static inline unsigned int cpuid_edx(unsigned int op)
  542. {
  543.         unsigned int eax, ebx, ecx, edx;
  544.  
  545.         cpuid(op, &eax, &ebx, &ecx, &edx);
  546.  
  547.         return edx;
  548. }
  549.  
  550. /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
  551. static __always_inline void rep_nop(void)
  552. {
  553.         asm volatile("rep; nop" ::: "memory");
  554. }
  555.  
  556. static __always_inline void cpu_relax(void)
  557. {
  558.         rep_nop();
  559. }
  560.  
  561. #define cpu_relax_lowlatency() cpu_relax()
  562.  
  563. /* Stop speculative execution and prefetching of modified code. */
  564. static inline void sync_core(void)
  565. {
  566.         int tmp;
  567.  
  568. #ifdef CONFIG_M486
  569.         /*
  570.          * Do a CPUID if available, otherwise do a jump.  The jump
  571.          * can conveniently enough be the jump around CPUID.
  572.          */
  573.         asm volatile("cmpl %2,%1\n\t"
  574.                      "jl 1f\n\t"
  575.                      "cpuid\n"
  576.                      "1:"
  577.                      : "=a" (tmp)
  578.                      : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
  579.                      : "ebx", "ecx", "edx", "memory");
  580. #else
  581.         /*
  582.          * CPUID is a barrier to speculative execution.
  583.          * Prefetched instructions are automatically
  584.          * invalidated when modified.
  585.          */
  586.         asm volatile("cpuid"
  587.                      : "=a" (tmp)
  588.                      : "0" (1)
  589.                      : "ebx", "ecx", "edx", "memory");
  590. #endif
  591. }
  592.  
  593. extern void select_idle_routine(const struct cpuinfo_x86 *c);
  594. extern void init_amd_e400_c1e_mask(void);
  595.  
  596. extern unsigned long            boot_option_idle_override;
  597. extern bool                     amd_e400_c1e_detected;
  598.  
  599. enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
  600.                          IDLE_POLL};
  601.  
  602. extern void enable_sep_cpu(void);
  603. extern int sysenter_setup(void);
  604.  
  605. extern void early_trap_init(void);
  606. void early_trap_pf_init(void);
  607.  
  608. /* Defined in head.S */
  609. extern struct desc_ptr          early_gdt_descr;
  610.  
  611. extern void cpu_set_gdt(int);
  612. extern void switch_to_new_gdt(int);
  613. extern void load_percpu_segment(int);
  614. extern void cpu_init(void);
  615.  
  616. static inline unsigned long get_debugctlmsr(void)
  617. {
  618.         unsigned long debugctlmsr = 0;
  619.  
  620. #ifndef CONFIG_X86_DEBUGCTLMSR
  621.         if (boot_cpu_data.x86 < 6)
  622.                 return 0;
  623. #endif
  624.         rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
  625.  
  626.         return debugctlmsr;
  627. }
  628.  
  629. static inline void update_debugctlmsr(unsigned long debugctlmsr)
  630. {
  631. #ifndef CONFIG_X86_DEBUGCTLMSR
  632.         if (boot_cpu_data.x86 < 6)
  633.                 return;
  634. #endif
  635.         wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
  636. }
  637.  
  638. extern void set_task_blockstep(struct task_struct *task, bool on);
  639.  
  640. /* Boot loader type from the setup header: */
  641. extern int                      bootloader_type;
  642. extern int                      bootloader_version;
  643.  
  644. extern char                     ignore_fpu_irq;
  645.  
  646. #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
  647. #define ARCH_HAS_PREFETCHW
  648. #define ARCH_HAS_SPINLOCK_PREFETCH
  649.  
  650. #ifdef CONFIG_X86_32
  651. # define BASE_PREFETCH          ""
  652. # define ARCH_HAS_PREFETCH
  653. #else
  654. # define BASE_PREFETCH          "prefetcht0 %P1"
  655. #endif
  656.  
  657. /*
  658.  * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
  659.  *
  660.  * It's not worth to care about 3dnow prefetches for the K6
  661.  * because they are microcoded there and very slow.
  662.  */
  663. static inline void prefetch(const void *x)
  664. {
  665.         alternative_input(BASE_PREFETCH, "prefetchnta %P1",
  666.                           X86_FEATURE_XMM,
  667.                           "m" (*(const char *)x));
  668. }
  669.  
  670. /*
  671.  * 3dnow prefetch to get an exclusive cache line.
  672.  * Useful for spinlocks to avoid one state transition in the
  673.  * cache coherency protocol:
  674.  */
  675. static inline void prefetchw(const void *x)
  676. {
  677.         alternative_input(BASE_PREFETCH, "prefetchw %P1",
  678.                           X86_FEATURE_3DNOWPREFETCH,
  679.                           "m" (*(const char *)x));
  680. }
  681.  
  682. static inline void spin_lock_prefetch(const void *x)
  683. {
  684.         prefetchw(x);
  685. }
  686.  
  687. #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
  688.                            TOP_OF_KERNEL_STACK_PADDING)
  689.  
  690. #ifdef CONFIG_X86_32
  691. /*
  692.  * User space process size: 3GB (default).
  693.  */
  694. #define TASK_SIZE               PAGE_OFFSET
  695. #define TASK_SIZE_MAX           TASK_SIZE
  696. #define STACK_TOP               TASK_SIZE
  697. #define STACK_TOP_MAX           STACK_TOP
  698.  
  699. #define INIT_THREAD  {                                                    \
  700.         .sp0                    = TOP_OF_INIT_STACK,                      \
  701.         .sysenter_cs            = __KERNEL_CS,                            \
  702.         .io_bitmap_ptr          = NULL,                                   \
  703. }
  704.  
  705. extern unsigned long thread_saved_pc(struct task_struct *tsk);
  706.  
  707. /*
  708.  * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
  709.  * This is necessary to guarantee that the entire "struct pt_regs"
  710.  * is accessible even if the CPU haven't stored the SS/ESP registers
  711.  * on the stack (interrupt gate does not save these registers
  712.  * when switching to the same priv ring).
  713.  * Therefore beware: accessing the ss/esp fields of the
  714.  * "struct pt_regs" is possible, but they may contain the
  715.  * completely wrong values.
  716.  */
  717. #define task_pt_regs(task) \
  718. ({                                                                      \
  719.         unsigned long __ptr = (unsigned long)task_stack_page(task);     \
  720.         __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;             \
  721.         ((struct pt_regs *)__ptr) - 1;                                  \
  722. })
  723.  
  724. #define KSTK_ESP(task)          (task_pt_regs(task)->sp)
  725.  
  726. #else
  727. /*
  728.  * User space process size. 47bits minus one guard page.  The guard
  729.  * page is necessary on Intel CPUs: if a SYSCALL instruction is at
  730.  * the highest possible canonical userspace address, then that
  731.  * syscall will enter the kernel with a non-canonical return
  732.  * address, and SYSRET will explode dangerously.  We avoid this
  733.  * particular problem by preventing anything from being mapped
  734.  * at the maximum canonical address.
  735.  */
  736. #define TASK_SIZE_MAX   ((1UL << 47) - PAGE_SIZE)
  737.  
  738. /* This decides where the kernel will search for a free chunk of vm
  739.  * space during mmap's.
  740.  */
  741. #define IA32_PAGE_OFFSET        ((current->personality & ADDR_LIMIT_3GB) ? \
  742.                                         0xc0000000 : 0xFFFFe000)
  743.  
  744. #define TASK_SIZE               (test_thread_flag(TIF_ADDR32) ? \
  745.                                         IA32_PAGE_OFFSET : TASK_SIZE_MAX)
  746. #define TASK_SIZE_OF(child)     ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
  747.                                         IA32_PAGE_OFFSET : TASK_SIZE_MAX)
  748.  
  749. #define STACK_TOP               TASK_SIZE
  750. #define STACK_TOP_MAX           TASK_SIZE_MAX
  751.  
  752. #define INIT_THREAD  { \
  753.         .sp0 = TOP_OF_INIT_STACK \
  754. }
  755.  
  756. /*
  757.  * Return saved PC of a blocked thread.
  758.  * What is this good for? it will be always the scheduler or ret_from_fork.
  759.  */
  760. #define thread_saved_pc(t)      READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8))
  761.  
  762. #define task_pt_regs(tsk)       ((struct pt_regs *)(tsk)->thread.sp0 - 1)
  763. extern unsigned long KSTK_ESP(struct task_struct *task);
  764.  
  765. #endif /* CONFIG_X86_64 */
  766.  
  767. extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
  768.                                                unsigned long new_sp);
  769.  
  770. /*
  771.  * This decides where the kernel will search for a free chunk of vm
  772.  * space during mmap's.
  773.  */
  774. #define TASK_UNMAPPED_BASE      (PAGE_ALIGN(TASK_SIZE / 3))
  775.  
  776. #define KSTK_EIP(task)          (task_pt_regs(task)->ip)
  777.  
  778. /* Get/set a process' ability to use the timestamp counter instruction */
  779. #define GET_TSC_CTL(adr)        get_tsc_mode((adr))
  780. #define SET_TSC_CTL(val)        set_tsc_mode((val))
  781.  
  782. extern int get_tsc_mode(unsigned long adr);
  783. extern int set_tsc_mode(unsigned int val);
  784.  
  785. /* Register/unregister a process' MPX related resource */
  786. #define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
  787. #define MPX_DISABLE_MANAGEMENT()        mpx_disable_management()
  788.  
  789. #ifdef CONFIG_X86_INTEL_MPX
  790. extern int mpx_enable_management(void);
  791. extern int mpx_disable_management(void);
  792. #else
  793. static inline int mpx_enable_management(void)
  794. {
  795.         return -EINVAL;
  796. }
  797. static inline int mpx_disable_management(void)
  798. {
  799.         return -EINVAL;
  800. }
  801. #endif /* CONFIG_X86_INTEL_MPX */
  802.  
  803. extern u16 amd_get_nb_id(int cpu);
  804. extern u32 amd_get_nodes_per_socket(void);
  805.  
  806. static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
  807. {
  808.         uint32_t base, eax, signature[3];
  809.  
  810.         for (base = 0x40000000; base < 0x40010000; base += 0x100) {
  811.                 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
  812.  
  813.                 if (!memcmp(sig, signature, 12) &&
  814.                     (leaves == 0 || ((eax - base) >= leaves)))
  815.                         return base;
  816.         }
  817.  
  818.         return 0;
  819. }
  820.  
  821. extern unsigned long arch_align_stack(unsigned long sp);
  822. extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
  823.  
  824. void default_idle(void);
  825. #ifdef  CONFIG_XEN
  826. bool xen_set_default_idle(void);
  827. #else
  828. #define xen_set_default_idle 0
  829. #endif
  830.  
  831. void stop_this_cpu(void *dummy);
  832. void df_debug(struct pt_regs *regs, long error_code);
  833. #endif /* _ASM_X86_PROCESSOR_H */
  834.