Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4349 Serge 1
/*
2
 * simple math operations
3
 * Copyright (c) 2006 Michael Niedermayer  et al
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21
 
22
#ifndef AVCODEC_ARM_MATHOPS_H
23
#define AVCODEC_ARM_MATHOPS_H
24
 
25
#include 
26
#include "config.h"
27
#include "libavutil/common.h"
28
 
29
#if HAVE_INLINE_ASM
30
 
31
#if HAVE_ARMV6_INLINE
32
#define MULH MULH
33
static inline av_const int MULH(int a, int b)
34
{
35
    int r;
36
    __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
37
    return r;
38
}
39
 
40
#define FASTDIV FASTDIV
41
static av_always_inline av_const int FASTDIV(int a, int b)
42
{
43
    int r;
44
    __asm__ ("cmp     %2, #2               \n\t"
45
             "ldr     %0, [%3, %2, lsl #2] \n\t"
46
             "ite     le                   \n\t"
47
             "lsrle   %0, %1, #1           \n\t"
48
             "smmulgt %0, %0, %1           \n\t"
49
             : "=&r"(r) : "r"(a), "r"(b), "r"(ff_inverse) : "cc");
50
    return r;
51
}
52
 
53
#else /* HAVE_ARMV6_INLINE */
54
 
55
#define FASTDIV FASTDIV
56
static av_always_inline av_const int FASTDIV(int a, int b)
57
{
58
    int r, t;
59
    __asm__ ("umull %1, %0, %2, %3"
60
             : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
61
    return r;
62
}
63
#endif
64
 
65
#define MLS64(d, a, b) MAC64(d, -(a), b)
66
 
67
#if HAVE_ARMV5TE_INLINE
68
 
69
/* signed 16x16 -> 32 multiply add accumulate */
70
#   define MAC16(rt, ra, rb)                                            \
71
    __asm__ ("smlabb %0, %1, %2, %0" : "+r"(rt) : "r"(ra), "r"(rb));
72
 
73
/* signed 16x16 -> 32 multiply */
74
#   define MUL16 MUL16
75
static inline av_const int MUL16(int ra, int rb)
76
{
77
    int rt;
78
    __asm__ ("smulbb %0, %1, %2" : "=r"(rt) : "r"(ra), "r"(rb));
79
    return rt;
80
}
81
 
82
#endif
83
 
84
#define mid_pred mid_pred
85
static inline av_const int mid_pred(int a, int b, int c)
86
{
87
    int m;
88
    __asm__ (
89
        "mov   %0, %2  \n\t"
90
        "cmp   %1, %2  \n\t"
91
        "itt   gt      \n\t"
92
        "movgt %0, %1  \n\t"
93
        "movgt %1, %2  \n\t"
94
        "cmp   %1, %3  \n\t"
95
        "it    le      \n\t"
96
        "movle %1, %3  \n\t"
97
        "cmp   %0, %1  \n\t"
98
        "it    gt      \n\t"
99
        "movgt %0, %1  \n\t"
100
        : "=&r"(m), "+r"(a)
101
        : "r"(b), "r"(c)
102
        : "cc");
103
    return m;
104
}
105
 
106
#endif /* HAVE_INLINE_ASM */
107
 
108
#endif /* AVCODEC_ARM_MATHOPS_H */