Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4349 Serge 1
/*
2
 * This file is part of FFmpeg.
3
 *
4
 * FFmpeg is free software; you can redistribute it and/or
5
 * modify it under the terms of the GNU Lesser General Public
6
 * License as published by the Free Software Foundation; either
7
 * version 2.1 of the License, or (at your option) any later version.
8
 *
9
 * FFmpeg is distributed in the hope that it will be useful,
10
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12
 * Lesser General Public License for more details.
13
 *
14
 * You should have received a copy of the GNU Lesser General Public
15
 * License along with FFmpeg; if not, write to the Free Software
16
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
 */
18
 
19
#ifndef AVUTIL_INTREADWRITE_H
20
#define AVUTIL_INTREADWRITE_H
21
 
22
#include 
23
#include "libavutil/avconfig.h"
24
#include "attributes.h"
25
#include "bswap.h"
26
 
27
typedef union {
28
    uint64_t u64;
29
    uint32_t u32[2];
30
    uint16_t u16[4];
31
    uint8_t  u8 [8];
32
    double   f64;
33
    float    f32[2];
34
} av_alias av_alias64;
35
 
36
typedef union {
37
    uint32_t u32;
38
    uint16_t u16[2];
39
    uint8_t  u8 [4];
40
    float    f32;
41
} av_alias av_alias32;
42
 
43
typedef union {
44
    uint16_t u16;
45
    uint8_t  u8 [2];
46
} av_alias av_alias16;
47
 
48
/*
49
 * Arch-specific headers can provide any combination of
50
 * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
51
 * Preprocessor symbols must be defined, even if these are implemented
52
 * as inline functions.
53
 */
54
 
55
#ifdef HAVE_AV_CONFIG_H
56
 
57
#include "config.h"
58
 
59
#if   ARCH_ARM
60
#   include "arm/intreadwrite.h"
61
#elif ARCH_AVR32
62
#   include "avr32/intreadwrite.h"
63
#elif ARCH_MIPS
64
#   include "mips/intreadwrite.h"
65
#elif ARCH_PPC
66
#   include "ppc/intreadwrite.h"
67
#elif ARCH_TOMI
68
#   include "tomi/intreadwrite.h"
69
#elif ARCH_X86
70
#   include "x86/intreadwrite.h"
71
#endif
72
 
73
#endif /* HAVE_AV_CONFIG_H */
74
 
75
/*
76
 * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
77
 */
78
 
79
#if AV_HAVE_BIGENDIAN
80
 
81
#   if    defined(AV_RN16) && !defined(AV_RB16)
82
#       define AV_RB16(p) AV_RN16(p)
83
#   elif !defined(AV_RN16) &&  defined(AV_RB16)
84
#       define AV_RN16(p) AV_RB16(p)
85
#   endif
86
 
87
#   if    defined(AV_WN16) && !defined(AV_WB16)
88
#       define AV_WB16(p, v) AV_WN16(p, v)
89
#   elif !defined(AV_WN16) &&  defined(AV_WB16)
90
#       define AV_WN16(p, v) AV_WB16(p, v)
91
#   endif
92
 
93
#   if    defined(AV_RN24) && !defined(AV_RB24)
94
#       define AV_RB24(p) AV_RN24(p)
95
#   elif !defined(AV_RN24) &&  defined(AV_RB24)
96
#       define AV_RN24(p) AV_RB24(p)
97
#   endif
98
 
99
#   if    defined(AV_WN24) && !defined(AV_WB24)
100
#       define AV_WB24(p, v) AV_WN24(p, v)
101
#   elif !defined(AV_WN24) &&  defined(AV_WB24)
102
#       define AV_WN24(p, v) AV_WB24(p, v)
103
#   endif
104
 
105
#   if    defined(AV_RN32) && !defined(AV_RB32)
106
#       define AV_RB32(p) AV_RN32(p)
107
#   elif !defined(AV_RN32) &&  defined(AV_RB32)
108
#       define AV_RN32(p) AV_RB32(p)
109
#   endif
110
 
111
#   if    defined(AV_WN32) && !defined(AV_WB32)
112
#       define AV_WB32(p, v) AV_WN32(p, v)
113
#   elif !defined(AV_WN32) &&  defined(AV_WB32)
114
#       define AV_WN32(p, v) AV_WB32(p, v)
115
#   endif
116
 
117
#   if    defined(AV_RN48) && !defined(AV_RB48)
118
#       define AV_RB48(p) AV_RN48(p)
119
#   elif !defined(AV_RN48) &&  defined(AV_RB48)
120
#       define AV_RN48(p) AV_RB48(p)
121
#   endif
122
 
123
#   if    defined(AV_WN48) && !defined(AV_WB48)
124
#       define AV_WB48(p, v) AV_WN48(p, v)
125
#   elif !defined(AV_WN48) &&  defined(AV_WB48)
126
#       define AV_WN48(p, v) AV_WB48(p, v)
127
#   endif
128
 
129
#   if    defined(AV_RN64) && !defined(AV_RB64)
130
#       define AV_RB64(p) AV_RN64(p)
131
#   elif !defined(AV_RN64) &&  defined(AV_RB64)
132
#       define AV_RN64(p) AV_RB64(p)
133
#   endif
134
 
135
#   if    defined(AV_WN64) && !defined(AV_WB64)
136
#       define AV_WB64(p, v) AV_WN64(p, v)
137
#   elif !defined(AV_WN64) &&  defined(AV_WB64)
138
#       define AV_WN64(p, v) AV_WB64(p, v)
139
#   endif
140
 
141
#else /* AV_HAVE_BIGENDIAN */
142
 
143
#   if    defined(AV_RN16) && !defined(AV_RL16)
144
#       define AV_RL16(p) AV_RN16(p)
145
#   elif !defined(AV_RN16) &&  defined(AV_RL16)
146
#       define AV_RN16(p) AV_RL16(p)
147
#   endif
148
 
149
#   if    defined(AV_WN16) && !defined(AV_WL16)
150
#       define AV_WL16(p, v) AV_WN16(p, v)
151
#   elif !defined(AV_WN16) &&  defined(AV_WL16)
152
#       define AV_WN16(p, v) AV_WL16(p, v)
153
#   endif
154
 
155
#   if    defined(AV_RN24) && !defined(AV_RL24)
156
#       define AV_RL24(p) AV_RN24(p)
157
#   elif !defined(AV_RN24) &&  defined(AV_RL24)
158
#       define AV_RN24(p) AV_RL24(p)
159
#   endif
160
 
161
#   if    defined(AV_WN24) && !defined(AV_WL24)
162
#       define AV_WL24(p, v) AV_WN24(p, v)
163
#   elif !defined(AV_WN24) &&  defined(AV_WL24)
164
#       define AV_WN24(p, v) AV_WL24(p, v)
165
#   endif
166
 
167
#   if    defined(AV_RN32) && !defined(AV_RL32)
168
#       define AV_RL32(p) AV_RN32(p)
169
#   elif !defined(AV_RN32) &&  defined(AV_RL32)
170
#       define AV_RN32(p) AV_RL32(p)
171
#   endif
172
 
173
#   if    defined(AV_WN32) && !defined(AV_WL32)
174
#       define AV_WL32(p, v) AV_WN32(p, v)
175
#   elif !defined(AV_WN32) &&  defined(AV_WL32)
176
#       define AV_WN32(p, v) AV_WL32(p, v)
177
#   endif
178
 
179
#   if    defined(AV_RN48) && !defined(AV_RL48)
180
#       define AV_RL48(p) AV_RN48(p)
181
#   elif !defined(AV_RN48) &&  defined(AV_RL48)
182
#       define AV_RN48(p) AV_RL48(p)
183
#   endif
184
 
185
#   if    defined(AV_WN48) && !defined(AV_WL48)
186
#       define AV_WL48(p, v) AV_WN48(p, v)
187
#   elif !defined(AV_WN48) &&  defined(AV_WL48)
188
#       define AV_WN48(p, v) AV_WL48(p, v)
189
#   endif
190
 
191
#   if    defined(AV_RN64) && !defined(AV_RL64)
192
#       define AV_RL64(p) AV_RN64(p)
193
#   elif !defined(AV_RN64) &&  defined(AV_RL64)
194
#       define AV_RN64(p) AV_RL64(p)
195
#   endif
196
 
197
#   if    defined(AV_WN64) && !defined(AV_WL64)
198
#       define AV_WL64(p, v) AV_WN64(p, v)
199
#   elif !defined(AV_WN64) &&  defined(AV_WL64)
200
#       define AV_WN64(p, v) AV_WL64(p, v)
201
#   endif
202
 
203
#endif /* !AV_HAVE_BIGENDIAN */
204
 
205
/*
206
 * Define AV_[RW]N helper macros to simplify definitions not provided
207
 * by per-arch headers.
208
 */
209
 
210
#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__)
211
 
212
union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;
213
union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;
214
union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
215
 
216
#   define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)
217
#   define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))
218
 
219
#elif defined(__DECC)
220
 
221
#   define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
222
#   define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
223
 
224
#elif AV_HAVE_FAST_UNALIGNED
225
 
226
#   define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)
227
#   define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))
228
 
229
#else
230
 
231
#ifndef AV_RB16
232
#   define AV_RB16(x)                           \
233
    ((((const uint8_t*)(x))[0] << 8) |          \
234
      ((const uint8_t*)(x))[1])
235
#endif
236
#ifndef AV_WB16
237
#   define AV_WB16(p, darg) do {                \
238
        unsigned d = (darg);                    \
239
        ((uint8_t*)(p))[1] = (d);               \
240
        ((uint8_t*)(p))[0] = (d)>>8;            \
241
    } while(0)
242
#endif
243
 
244
#ifndef AV_RL16
245
#   define AV_RL16(x)                           \
246
    ((((const uint8_t*)(x))[1] << 8) |          \
247
      ((const uint8_t*)(x))[0])
248
#endif
249
#ifndef AV_WL16
250
#   define AV_WL16(p, darg) do {                \
251
        unsigned d = (darg);                    \
252
        ((uint8_t*)(p))[0] = (d);               \
253
        ((uint8_t*)(p))[1] = (d)>>8;            \
254
    } while(0)
255
#endif
256
 
257
#ifndef AV_RB32
258
#   define AV_RB32(x)                                \
259
    (((uint32_t)((const uint8_t*)(x))[0] << 24) |    \
260
               (((const uint8_t*)(x))[1] << 16) |    \
261
               (((const uint8_t*)(x))[2] <<  8) |    \
262
                ((const uint8_t*)(x))[3])
263
#endif
264
#ifndef AV_WB32
265
#   define AV_WB32(p, darg) do {                \
266
        unsigned d = (darg);                    \
267
        ((uint8_t*)(p))[3] = (d);               \
268
        ((uint8_t*)(p))[2] = (d)>>8;            \
269
        ((uint8_t*)(p))[1] = (d)>>16;           \
270
        ((uint8_t*)(p))[0] = (d)>>24;           \
271
    } while(0)
272
#endif
273
 
274
#ifndef AV_RL32
275
#   define AV_RL32(x)                                \
276
    (((uint32_t)((const uint8_t*)(x))[3] << 24) |    \
277
               (((const uint8_t*)(x))[2] << 16) |    \
278
               (((const uint8_t*)(x))[1] <<  8) |    \
279
                ((const uint8_t*)(x))[0])
280
#endif
281
#ifndef AV_WL32
282
#   define AV_WL32(p, darg) do {                \
283
        unsigned d = (darg);                    \
284
        ((uint8_t*)(p))[0] = (d);               \
285
        ((uint8_t*)(p))[1] = (d)>>8;            \
286
        ((uint8_t*)(p))[2] = (d)>>16;           \
287
        ((uint8_t*)(p))[3] = (d)>>24;           \
288
    } while(0)
289
#endif
290
 
291
#ifndef AV_RB64
292
#   define AV_RB64(x)                                   \
293
    (((uint64_t)((const uint8_t*)(x))[0] << 56) |       \
294
     ((uint64_t)((const uint8_t*)(x))[1] << 48) |       \
295
     ((uint64_t)((const uint8_t*)(x))[2] << 40) |       \
296
     ((uint64_t)((const uint8_t*)(x))[3] << 32) |       \
297
     ((uint64_t)((const uint8_t*)(x))[4] << 24) |       \
298
     ((uint64_t)((const uint8_t*)(x))[5] << 16) |       \
299
     ((uint64_t)((const uint8_t*)(x))[6] <<  8) |       \
300
      (uint64_t)((const uint8_t*)(x))[7])
301
#endif
302
#ifndef AV_WB64
303
#   define AV_WB64(p, darg) do {                \
304
        uint64_t d = (darg);                    \
305
        ((uint8_t*)(p))[7] = (d);               \
306
        ((uint8_t*)(p))[6] = (d)>>8;            \
307
        ((uint8_t*)(p))[5] = (d)>>16;           \
308
        ((uint8_t*)(p))[4] = (d)>>24;           \
309
        ((uint8_t*)(p))[3] = (d)>>32;           \
310
        ((uint8_t*)(p))[2] = (d)>>40;           \
311
        ((uint8_t*)(p))[1] = (d)>>48;           \
312
        ((uint8_t*)(p))[0] = (d)>>56;           \
313
    } while(0)
314
#endif
315
 
316
#ifndef AV_RL64
317
#   define AV_RL64(x)                                   \
318
    (((uint64_t)((const uint8_t*)(x))[7] << 56) |       \
319
     ((uint64_t)((const uint8_t*)(x))[6] << 48) |       \
320
     ((uint64_t)((const uint8_t*)(x))[5] << 40) |       \
321
     ((uint64_t)((const uint8_t*)(x))[4] << 32) |       \
322
     ((uint64_t)((const uint8_t*)(x))[3] << 24) |       \
323
     ((uint64_t)((const uint8_t*)(x))[2] << 16) |       \
324
     ((uint64_t)((const uint8_t*)(x))[1] <<  8) |       \
325
      (uint64_t)((const uint8_t*)(x))[0])
326
#endif
327
#ifndef AV_WL64
328
#   define AV_WL64(p, darg) do {                \
329
        uint64_t d = (darg);                    \
330
        ((uint8_t*)(p))[0] = (d);               \
331
        ((uint8_t*)(p))[1] = (d)>>8;            \
332
        ((uint8_t*)(p))[2] = (d)>>16;           \
333
        ((uint8_t*)(p))[3] = (d)>>24;           \
334
        ((uint8_t*)(p))[4] = (d)>>32;           \
335
        ((uint8_t*)(p))[5] = (d)>>40;           \
336
        ((uint8_t*)(p))[6] = (d)>>48;           \
337
        ((uint8_t*)(p))[7] = (d)>>56;           \
338
    } while(0)
339
#endif
340
 
341
#if AV_HAVE_BIGENDIAN
342
#   define AV_RN(s, p)    AV_RB##s(p)
343
#   define AV_WN(s, p, v) AV_WB##s(p, v)
344
#else
345
#   define AV_RN(s, p)    AV_RL##s(p)
346
#   define AV_WN(s, p, v) AV_WL##s(p, v)
347
#endif
348
 
349
#endif /* HAVE_FAST_UNALIGNED */
350
 
351
#ifndef AV_RN16
352
#   define AV_RN16(p) AV_RN(16, p)
353
#endif
354
 
355
#ifndef AV_RN32
356
#   define AV_RN32(p) AV_RN(32, p)
357
#endif
358
 
359
#ifndef AV_RN64
360
#   define AV_RN64(p) AV_RN(64, p)
361
#endif
362
 
363
#ifndef AV_WN16
364
#   define AV_WN16(p, v) AV_WN(16, p, v)
365
#endif
366
 
367
#ifndef AV_WN32
368
#   define AV_WN32(p, v) AV_WN(32, p, v)
369
#endif
370
 
371
#ifndef AV_WN64
372
#   define AV_WN64(p, v) AV_WN(64, p, v)
373
#endif
374
 
375
#if AV_HAVE_BIGENDIAN
376
#   define AV_RB(s, p)    AV_RN##s(p)
377
#   define AV_WB(s, p, v) AV_WN##s(p, v)
378
#   define AV_RL(s, p)    av_bswap##s(AV_RN##s(p))
379
#   define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v))
380
#else
381
#   define AV_RB(s, p)    av_bswap##s(AV_RN##s(p))
382
#   define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v))
383
#   define AV_RL(s, p)    AV_RN##s(p)
384
#   define AV_WL(s, p, v) AV_WN##s(p, v)
385
#endif
386
 
387
#define AV_RB8(x)     (((const uint8_t*)(x))[0])
388
#define AV_WB8(p, d)  do { ((uint8_t*)(p))[0] = (d); } while(0)
389
 
390
#define AV_RL8(x)     AV_RB8(x)
391
#define AV_WL8(p, d)  AV_WB8(p, d)
392
 
393
#ifndef AV_RB16
394
#   define AV_RB16(p)    AV_RB(16, p)
395
#endif
396
#ifndef AV_WB16
397
#   define AV_WB16(p, v) AV_WB(16, p, v)
398
#endif
399
 
400
#ifndef AV_RL16
401
#   define AV_RL16(p)    AV_RL(16, p)
402
#endif
403
#ifndef AV_WL16
404
#   define AV_WL16(p, v) AV_WL(16, p, v)
405
#endif
406
 
407
#ifndef AV_RB32
408
#   define AV_RB32(p)    AV_RB(32, p)
409
#endif
410
#ifndef AV_WB32
411
#   define AV_WB32(p, v) AV_WB(32, p, v)
412
#endif
413
 
414
#ifndef AV_RL32
415
#   define AV_RL32(p)    AV_RL(32, p)
416
#endif
417
#ifndef AV_WL32
418
#   define AV_WL32(p, v) AV_WL(32, p, v)
419
#endif
420
 
421
#ifndef AV_RB64
422
#   define AV_RB64(p)    AV_RB(64, p)
423
#endif
424
#ifndef AV_WB64
425
#   define AV_WB64(p, v) AV_WB(64, p, v)
426
#endif
427
 
428
#ifndef AV_RL64
429
#   define AV_RL64(p)    AV_RL(64, p)
430
#endif
431
#ifndef AV_WL64
432
#   define AV_WL64(p, v) AV_WL(64, p, v)
433
#endif
434
 
435
#ifndef AV_RB24
436
#   define AV_RB24(x)                           \
437
    ((((const uint8_t*)(x))[0] << 16) |         \
438
     (((const uint8_t*)(x))[1] <<  8) |         \
439
      ((const uint8_t*)(x))[2])
440
#endif
441
#ifndef AV_WB24
442
#   define AV_WB24(p, d) do {                   \
443
        ((uint8_t*)(p))[2] = (d);               \
444
        ((uint8_t*)(p))[1] = (d)>>8;            \
445
        ((uint8_t*)(p))[0] = (d)>>16;           \
446
    } while(0)
447
#endif
448
 
449
#ifndef AV_RL24
450
#   define AV_RL24(x)                           \
451
    ((((const uint8_t*)(x))[2] << 16) |         \
452
     (((const uint8_t*)(x))[1] <<  8) |         \
453
      ((const uint8_t*)(x))[0])
454
#endif
455
#ifndef AV_WL24
456
#   define AV_WL24(p, d) do {                   \
457
        ((uint8_t*)(p))[0] = (d);               \
458
        ((uint8_t*)(p))[1] = (d)>>8;            \
459
        ((uint8_t*)(p))[2] = (d)>>16;           \
460
    } while(0)
461
#endif
462
 
463
#ifndef AV_RB48
464
#   define AV_RB48(x)                                     \
465
    (((uint64_t)((const uint8_t*)(x))[0] << 40) |         \
466
     ((uint64_t)((const uint8_t*)(x))[1] << 32) |         \
467
     ((uint64_t)((const uint8_t*)(x))[2] << 24) |         \
468
     ((uint64_t)((const uint8_t*)(x))[3] << 16) |         \
469
     ((uint64_t)((const uint8_t*)(x))[4] <<  8) |         \
470
      (uint64_t)((const uint8_t*)(x))[5])
471
#endif
472
#ifndef AV_WB48
473
#   define AV_WB48(p, darg) do {                \
474
        uint64_t d = (darg);                    \
475
        ((uint8_t*)(p))[5] = (d);               \
476
        ((uint8_t*)(p))[4] = (d)>>8;            \
477
        ((uint8_t*)(p))[3] = (d)>>16;           \
478
        ((uint8_t*)(p))[2] = (d)>>24;           \
479
        ((uint8_t*)(p))[1] = (d)>>32;           \
480
        ((uint8_t*)(p))[0] = (d)>>40;           \
481
    } while(0)
482
#endif
483
 
484
#ifndef AV_RL48
485
#   define AV_RL48(x)                                     \
486
    (((uint64_t)((const uint8_t*)(x))[5] << 40) |         \
487
     ((uint64_t)((const uint8_t*)(x))[4] << 32) |         \
488
     ((uint64_t)((const uint8_t*)(x))[3] << 24) |         \
489
     ((uint64_t)((const uint8_t*)(x))[2] << 16) |         \
490
     ((uint64_t)((const uint8_t*)(x))[1] <<  8) |         \
491
      (uint64_t)((const uint8_t*)(x))[0])
492
#endif
493
#ifndef AV_WL48
494
#   define AV_WL48(p, darg) do {                \
495
        uint64_t d = (darg);                    \
496
        ((uint8_t*)(p))[0] = (d);               \
497
        ((uint8_t*)(p))[1] = (d)>>8;            \
498
        ((uint8_t*)(p))[2] = (d)>>16;           \
499
        ((uint8_t*)(p))[3] = (d)>>24;           \
500
        ((uint8_t*)(p))[4] = (d)>>32;           \
501
        ((uint8_t*)(p))[5] = (d)>>40;           \
502
    } while(0)
503
#endif
504
 
505
/*
506
 * The AV_[RW]NA macros access naturally aligned data
507
 * in a type-safe way.
508
 */
509
 
510
#define AV_RNA(s, p)    (((const av_alias##s*)(p))->u##s)
511
#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))
512
 
513
#ifndef AV_RN16A
514
#   define AV_RN16A(p) AV_RNA(16, p)
515
#endif
516
 
517
#ifndef AV_RN32A
518
#   define AV_RN32A(p) AV_RNA(32, p)
519
#endif
520
 
521
#ifndef AV_RN64A
522
#   define AV_RN64A(p) AV_RNA(64, p)
523
#endif
524
 
525
#ifndef AV_WN16A
526
#   define AV_WN16A(p, v) AV_WNA(16, p, v)
527
#endif
528
 
529
#ifndef AV_WN32A
530
#   define AV_WN32A(p, v) AV_WNA(32, p, v)
531
#endif
532
 
533
#ifndef AV_WN64A
534
#   define AV_WN64A(p, v) AV_WNA(64, p, v)
535
#endif
536
 
537
/*
538
 * The AV_COPYxxU macros are suitable for copying data to/from unaligned
539
 * memory locations.
540
 */
541
 
542
#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s));
543
 
544
#ifndef AV_COPY16U
545
#   define AV_COPY16U(d, s) AV_COPYU(16, d, s)
546
#endif
547
 
548
#ifndef AV_COPY32U
549
#   define AV_COPY32U(d, s) AV_COPYU(32, d, s)
550
#endif
551
 
552
#ifndef AV_COPY64U
553
#   define AV_COPY64U(d, s) AV_COPYU(64, d, s)
554
#endif
555
 
556
#ifndef AV_COPY128U
557
#   define AV_COPY128U(d, s)                                    \
558
    do {                                                        \
559
        AV_COPY64U(d, s);                                       \
560
        AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8);     \
561
    } while(0)
562
#endif
563
 
564
/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
565
 * naturally aligned. They may be implemented using MMX,
566
 * so emms_c() must be called before using any float code
567
 * afterwards.
568
 */
569
 
570
#define AV_COPY(n, d, s) \
571
    (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)
572
 
573
#ifndef AV_COPY16
574
#   define AV_COPY16(d, s) AV_COPY(16, d, s)
575
#endif
576
 
577
#ifndef AV_COPY32
578
#   define AV_COPY32(d, s) AV_COPY(32, d, s)
579
#endif
580
 
581
#ifndef AV_COPY64
582
#   define AV_COPY64(d, s) AV_COPY(64, d, s)
583
#endif
584
 
585
#ifndef AV_COPY128
586
#   define AV_COPY128(d, s)                    \
587
    do {                                       \
588
        AV_COPY64(d, s);                       \
589
        AV_COPY64((char*)(d)+8, (char*)(s)+8); \
590
    } while(0)
591
#endif
592
 
593
#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))
594
 
595
#ifndef AV_SWAP64
596
#   define AV_SWAP64(a, b) AV_SWAP(64, a, b)
597
#endif
598
 
599
#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)
600
 
601
#ifndef AV_ZERO16
602
#   define AV_ZERO16(d) AV_ZERO(16, d)
603
#endif
604
 
605
#ifndef AV_ZERO32
606
#   define AV_ZERO32(d) AV_ZERO(32, d)
607
#endif
608
 
609
#ifndef AV_ZERO64
610
#   define AV_ZERO64(d) AV_ZERO(64, d)
611
#endif
612
 
613
#ifndef AV_ZERO128
614
#   define AV_ZERO128(d)         \
615
    do {                         \
616
        AV_ZERO64(d);            \
617
        AV_ZERO64((char*)(d)+8); \
618
    } while(0)
619
#endif
620
 
621
#endif /* AVUTIL_INTREADWRITE_H */