Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * CPU detection code, extracted from mmx.h
  3.  * (c)1997-99 by H. Dietz and R. Fisher
  4.  * Converted to C and improved by Fabrice Bellard.
  5.  *
  6.  * This file is part of FFmpeg.
  7.  *
  8.  * FFmpeg is free software; you can redistribute it and/or
  9.  * modify it under the terms of the GNU Lesser General Public
  10.  * License as published by the Free Software Foundation; either
  11.  * version 2.1 of the License, or (at your option) any later version.
  12.  *
  13.  * FFmpeg is distributed in the hope that it will be useful,
  14.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16.  * Lesser General Public License for more details.
  17.  *
  18.  * You should have received a copy of the GNU Lesser General Public
  19.  * License along with FFmpeg; if not, write to the Free Software
  20.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21.  */
  22.  
  23. #include <stdlib.h>
  24. #include <string.h>
  25.  
  26. #include "libavutil/x86/asm.h"
  27. #include "libavutil/x86/cpu.h"
  28. #include "libavutil/cpu.h"
  29. #include "libavutil/cpu_internal.h"
  30.  
  31. #if HAVE_YASM
  32.  
  33. #define cpuid(index, eax, ebx, ecx, edx)        \
  34.     ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx)
  35.  
  36. #define xgetbv(index, eax, edx)                 \
  37.     ff_cpu_xgetbv(index, &eax, &edx)
  38.  
  39. #elif HAVE_INLINE_ASM
  40.  
  41. /* ebx saving is necessary for PIC. gcc seems unable to see it alone */
  42. #define cpuid(index, eax, ebx, ecx, edx)                        \
  43.     __asm__ volatile (                                          \
  44.         "mov    %%"REG_b", %%"REG_S" \n\t"                      \
  45.         "cpuid                       \n\t"                      \
  46.         "xchg   %%"REG_b", %%"REG_S                             \
  47.         : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx)        \
  48.         : "0" (index))
  49.  
  50. #define xgetbv(index, eax, edx)                                 \
  51.     __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
  52.  
  53. #define get_eflags(x)                           \
  54.     __asm__ volatile ("pushfl     \n"           \
  55.                       "pop    %0  \n"           \
  56.                       : "=r"(x))
  57.  
  58. #define set_eflags(x)                           \
  59.     __asm__ volatile ("push    %0 \n"           \
  60.                       "popfl      \n"           \
  61.                       :: "r"(x))
  62.  
  63. #endif /* HAVE_INLINE_ASM */
  64.  
  65. #if ARCH_X86_64
  66.  
  67. #define cpuid_test() 1
  68.  
  69. #elif HAVE_YASM
  70.  
  71. #define cpuid_test ff_cpu_cpuid_test
  72.  
  73. #elif HAVE_INLINE_ASM
  74.  
  75. static int cpuid_test(void)
  76. {
  77.     x86_reg a, c;
  78.  
  79.     /* Check if CPUID is supported by attempting to toggle the ID bit in
  80.      * the EFLAGS register. */
  81.     get_eflags(a);
  82.     set_eflags(a ^ 0x200000);
  83.     get_eflags(c);
  84.  
  85.     return a != c;
  86. }
  87. #endif
  88.  
  89. /* Function to test if multimedia instructions are supported...  */
  90. int ff_get_cpu_flags_x86(void)
  91. {
  92.     int rval = 0;
  93.  
  94. #ifdef cpuid
  95.  
  96.     int eax, ebx, ecx, edx;
  97.     int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0;
  98.     int family = 0, model = 0;
  99.     union { int i[3]; char c[12]; } vendor;
  100.  
  101.     if (!cpuid_test())
  102.         return 0; /* CPUID not supported */
  103.  
  104.     cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]);
  105.  
  106.     if (max_std_level >= 1) {
  107.         cpuid(1, eax, ebx, ecx, std_caps);
  108.         family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
  109.         model  = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
  110.         if (std_caps & (1 << 15))
  111.             rval |= AV_CPU_FLAG_CMOV;
  112.         if (std_caps & (1 << 23))
  113.             rval |= AV_CPU_FLAG_MMX;
  114.         if (std_caps & (1 << 25))
  115.             rval |= AV_CPU_FLAG_MMXEXT;
  116. #if HAVE_SSE
  117.         if (std_caps & (1 << 25))
  118.             rval |= AV_CPU_FLAG_SSE;
  119.         if (std_caps & (1 << 26))
  120.             rval |= AV_CPU_FLAG_SSE2;
  121.         if (ecx & 1)
  122.             rval |= AV_CPU_FLAG_SSE3;
  123.         if (ecx & 0x00000200 )
  124.             rval |= AV_CPU_FLAG_SSSE3;
  125.         if (ecx & 0x00080000 )
  126.             rval |= AV_CPU_FLAG_SSE4;
  127.         if (ecx & 0x00100000 )
  128.             rval |= AV_CPU_FLAG_SSE42;
  129. #if HAVE_AVX
  130.         /* Check OXSAVE and AVX bits */
  131.         if ((ecx & 0x18000000) == 0x18000000) {
  132.             /* Check for OS support */
  133.             xgetbv(0, eax, edx);
  134.             if ((eax & 0x6) == 0x6)
  135.                 rval |= AV_CPU_FLAG_AVX;
  136.         }
  137. #if HAVE_AVX2
  138.     if (max_std_level >= 7) {
  139.         cpuid(7, eax, ebx, ecx, edx);
  140.         if (ebx&0x00000020)
  141.             rval |= AV_CPU_FLAG_AVX2;
  142.         /* TODO: BMI1/2 */
  143.     }
  144. #endif /* HAVE_AVX2 */
  145. #endif /* HAVE_AVX */
  146. #endif /* HAVE_SSE */
  147.     }
  148.  
  149.     cpuid(0x80000000, max_ext_level, ebx, ecx, edx);
  150.  
  151.     if (max_ext_level >= 0x80000001) {
  152.         cpuid(0x80000001, eax, ebx, ecx, ext_caps);
  153.         if (ext_caps & (1U << 31))
  154.             rval |= AV_CPU_FLAG_3DNOW;
  155.         if (ext_caps & (1 << 30))
  156.             rval |= AV_CPU_FLAG_3DNOWEXT;
  157.         if (ext_caps & (1 << 23))
  158.             rval |= AV_CPU_FLAG_MMX;
  159.         if (ext_caps & (1 << 22))
  160.             rval |= AV_CPU_FLAG_MMXEXT;
  161.  
  162.         /* Allow for selectively disabling SSE2 functions on AMD processors
  163.            with SSE2 support but not SSE4a. This includes Athlon64, some
  164.            Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
  165.            than SSE2 often enough to utilize this special-case flag.
  166.            AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
  167.            so that SSE2 is used unless explicitly disabled by checking
  168.            AV_CPU_FLAG_SSE2SLOW. */
  169.         if (!strncmp(vendor.c, "AuthenticAMD", 12) &&
  170.             rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040)) {
  171.             rval |= AV_CPU_FLAG_SSE2SLOW;
  172.         }
  173.  
  174.         /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
  175.          * used unless the OS has AVX support. */
  176.         if (rval & AV_CPU_FLAG_AVX) {
  177.             if (ecx & 0x00000800)
  178.                 rval |= AV_CPU_FLAG_XOP;
  179.             if (ecx & 0x00010000)
  180.                 rval |= AV_CPU_FLAG_FMA4;
  181.         }
  182.     }
  183.  
  184.     if (!strncmp(vendor.c, "GenuineIntel", 12)) {
  185.         if (family == 6 && (model == 9 || model == 13 || model == 14)) {
  186.             /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
  187.              * 6/14 (core1 "yonah") theoretically support sse2, but it's
  188.              * usually slower than mmx, so let's just pretend they don't.
  189.              * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is
  190.              * enabled so that SSE2 is not used unless explicitly enabled
  191.              * by checking AV_CPU_FLAG_SSE2SLOW. The same situation
  192.              * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */
  193.             if (rval & AV_CPU_FLAG_SSE2)
  194.                 rval ^= AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE2;
  195.             if (rval & AV_CPU_FLAG_SSE3)
  196.                 rval ^= AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSE3;
  197.         }
  198.         /* The Atom processor has SSSE3 support, which is useful in many cases,
  199.          * but sometimes the SSSE3 version is slower than the SSE2 equivalent
  200.          * on the Atom, but is generally faster on other processors supporting
  201.          * SSSE3. This flag allows for selectively disabling certain SSSE3
  202.          * functions on the Atom. */
  203.         if (family == 6 && model == 28)
  204.             rval |= AV_CPU_FLAG_ATOM;
  205.     }
  206.  
  207. #endif /* cpuid */
  208.  
  209.     return rval;
  210. }
  211.