Libav 0.7.1
|
00001 /* 00002 * This file is part of Libav. 00003 * 00004 * Libav is free software; you can redistribute it and/or 00005 * modify it under the terms of the GNU Lesser General Public 00006 * License as published by the Free Software Foundation; either 00007 * version 2.1 of the License, or (at your option) any later version. 00008 * 00009 * Libav is distributed in the hope that it will be useful, 00010 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00011 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00012 * Lesser General Public License for more details. 00013 * 00014 * You should have received a copy of the GNU Lesser General Public 00015 * License along with Libav; if not, write to the Free Software 00016 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00017 */ 00018 00019 #ifndef AVUTIL_INTREADWRITE_H 00020 #define AVUTIL_INTREADWRITE_H 00021 00022 #include <stdint.h> 00023 #include "libavutil/avconfig.h" 00024 #include "attributes.h" 00025 #include "bswap.h" 00026 00027 typedef union { 00028 uint64_t u64; 00029 uint32_t u32[2]; 00030 uint16_t u16[4]; 00031 uint8_t u8 [8]; 00032 double f64; 00033 float f32[2]; 00034 } av_alias av_alias64; 00035 00036 typedef union { 00037 uint32_t u32; 00038 uint16_t u16[2]; 00039 uint8_t u8 [4]; 00040 float f32; 00041 } av_alias av_alias32; 00042 00043 typedef union { 00044 uint16_t u16; 00045 uint8_t u8 [2]; 00046 } av_alias av_alias16; 00047 00048 /* 00049 * Arch-specific headers can provide any combination of 00050 * AV_[RW][BLN](16|24|32|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. 00051 * Preprocessor symbols must be defined, even if these are implemented 00052 * as inline functions. 00053 */ 00054 00055 #ifdef HAVE_AV_CONFIG_H 00056 00057 #include "config.h" 00058 00059 #if ARCH_ARM 00060 # include "arm/intreadwrite.h" 00061 #elif ARCH_AVR32 00062 # include "avr32/intreadwrite.h" 00063 #elif ARCH_MIPS 00064 # include "mips/intreadwrite.h" 00065 #elif ARCH_PPC 00066 # include "ppc/intreadwrite.h" 00067 #elif ARCH_TOMI 00068 # include "tomi/intreadwrite.h" 00069 #elif ARCH_X86 00070 # include "x86/intreadwrite.h" 00071 #endif 00072 00073 #endif /* HAVE_AV_CONFIG_H */ 00074 00075 /* 00076 * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. 00077 */ 00078 00079 #if AV_HAVE_BIGENDIAN 00080 00081 # if defined(AV_RN16) && !defined(AV_RB16) 00082 # define AV_RB16(p) AV_RN16(p) 00083 # elif !defined(AV_RN16) && defined(AV_RB16) 00084 # define AV_RN16(p) AV_RB16(p) 00085 # endif 00086 00087 # if defined(AV_WN16) && !defined(AV_WB16) 00088 # define AV_WB16(p, v) AV_WN16(p, v) 00089 # elif !defined(AV_WN16) && defined(AV_WB16) 00090 # define AV_WN16(p, v) AV_WB16(p, v) 00091 # endif 00092 00093 # if defined(AV_RN24) && !defined(AV_RB24) 00094 # define AV_RB24(p) AV_RN24(p) 00095 # elif !defined(AV_RN24) && defined(AV_RB24) 00096 # define AV_RN24(p) AV_RB24(p) 00097 # endif 00098 00099 # if defined(AV_WN24) && !defined(AV_WB24) 00100 # define AV_WB24(p, v) AV_WN24(p, v) 00101 # elif !defined(AV_WN24) && defined(AV_WB24) 00102 # define AV_WN24(p, v) AV_WB24(p, v) 00103 # endif 00104 00105 # if defined(AV_RN32) && !defined(AV_RB32) 00106 # define AV_RB32(p) AV_RN32(p) 00107 # elif !defined(AV_RN32) && defined(AV_RB32) 00108 # define AV_RN32(p) AV_RB32(p) 00109 # endif 00110 00111 # if defined(AV_WN32) && !defined(AV_WB32) 00112 # define AV_WB32(p, v) AV_WN32(p, v) 00113 # elif !defined(AV_WN32) && defined(AV_WB32) 00114 # define AV_WN32(p, v) AV_WB32(p, v) 00115 # endif 00116 00117 # if defined(AV_RN64) && !defined(AV_RB64) 00118 # define AV_RB64(p) AV_RN64(p) 00119 # elif !defined(AV_RN64) && defined(AV_RB64) 00120 # define AV_RN64(p) AV_RB64(p) 00121 # endif 00122 00123 # if defined(AV_WN64) && !defined(AV_WB64) 00124 # define AV_WB64(p, v) AV_WN64(p, v) 00125 # elif !defined(AV_WN64) && defined(AV_WB64) 00126 # define AV_WN64(p, v) AV_WB64(p, v) 00127 # endif 00128 00129 #else /* AV_HAVE_BIGENDIAN */ 00130 00131 # if defined(AV_RN16) && !defined(AV_RL16) 00132 # define AV_RL16(p) AV_RN16(p) 00133 # elif !defined(AV_RN16) && defined(AV_RL16) 00134 # define AV_RN16(p) AV_RL16(p) 00135 # endif 00136 00137 # if defined(AV_WN16) && !defined(AV_WL16) 00138 # define AV_WL16(p, v) AV_WN16(p, v) 00139 # elif !defined(AV_WN16) && defined(AV_WL16) 00140 # define AV_WN16(p, v) AV_WL16(p, v) 00141 # endif 00142 00143 # if defined(AV_RN24) && !defined(AV_RL24) 00144 # define AV_RL24(p) AV_RN24(p) 00145 # elif !defined(AV_RN24) && defined(AV_RL24) 00146 # define AV_RN24(p) AV_RL24(p) 00147 # endif 00148 00149 # if defined(AV_WN24) && !defined(AV_WL24) 00150 # define AV_WL24(p, v) AV_WN24(p, v) 00151 # elif !defined(AV_WN24) && defined(AV_WL24) 00152 # define AV_WN24(p, v) AV_WL24(p, v) 00153 # endif 00154 00155 # if defined(AV_RN32) && !defined(AV_RL32) 00156 # define AV_RL32(p) AV_RN32(p) 00157 # elif !defined(AV_RN32) && defined(AV_RL32) 00158 # define AV_RN32(p) AV_RL32(p) 00159 # endif 00160 00161 # if defined(AV_WN32) && !defined(AV_WL32) 00162 # define AV_WL32(p, v) AV_WN32(p, v) 00163 # elif !defined(AV_WN32) && defined(AV_WL32) 00164 # define AV_WN32(p, v) AV_WL32(p, v) 00165 # endif 00166 00167 # if defined(AV_RN64) && !defined(AV_RL64) 00168 # define AV_RL64(p) AV_RN64(p) 00169 # elif !defined(AV_RN64) && defined(AV_RL64) 00170 # define AV_RN64(p) AV_RL64(p) 00171 # endif 00172 00173 # if defined(AV_WN64) && !defined(AV_WL64) 00174 # define AV_WL64(p, v) AV_WN64(p, v) 00175 # elif !defined(AV_WN64) && defined(AV_WL64) 00176 # define AV_WN64(p, v) AV_WL64(p, v) 00177 # endif 00178 00179 #endif /* !AV_HAVE_BIGENDIAN */ 00180 00181 /* 00182 * Define AV_[RW]N helper macros to simplify definitions not provided 00183 * by per-arch headers. 00184 */ 00185 00186 #if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__) 00187 00188 union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; 00189 union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; 00190 union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; 00191 00192 # define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) 00193 # define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) 00194 00195 #elif defined(__DECC) 00196 00197 # define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) 00198 # define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) 00199 00200 #elif AV_HAVE_FAST_UNALIGNED 00201 00202 # define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) 00203 # define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) 00204 00205 #else 00206 00207 #ifndef AV_RB16 00208 # define AV_RB16(x) \ 00209 ((((const uint8_t*)(x))[0] << 8) | \ 00210 ((const uint8_t*)(x))[1]) 00211 #endif 00212 #ifndef AV_WB16 00213 # define AV_WB16(p, d) do { \ 00214 ((uint8_t*)(p))[1] = (d); \ 00215 ((uint8_t*)(p))[0] = (d)>>8; \ 00216 } while(0) 00217 #endif 00218 00219 #ifndef AV_RL16 00220 # define AV_RL16(x) \ 00221 ((((const uint8_t*)(x))[1] << 8) | \ 00222 ((const uint8_t*)(x))[0]) 00223 #endif 00224 #ifndef AV_WL16 00225 # define AV_WL16(p, d) do { \ 00226 ((uint8_t*)(p))[0] = (d); \ 00227 ((uint8_t*)(p))[1] = (d)>>8; \ 00228 } while(0) 00229 #endif 00230 00231 #ifndef AV_RB32 00232 # define AV_RB32(x) \ 00233 (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ 00234 (((const uint8_t*)(x))[1] << 16) | \ 00235 (((const uint8_t*)(x))[2] << 8) | \ 00236 ((const uint8_t*)(x))[3]) 00237 #endif 00238 #ifndef AV_WB32 00239 # define AV_WB32(p, d) do { \ 00240 ((uint8_t*)(p))[3] = (d); \ 00241 ((uint8_t*)(p))[2] = (d)>>8; \ 00242 ((uint8_t*)(p))[1] = (d)>>16; \ 00243 ((uint8_t*)(p))[0] = (d)>>24; \ 00244 } while(0) 00245 #endif 00246 00247 #ifndef AV_RL32 00248 # define AV_RL32(x) \ 00249 (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ 00250 (((const uint8_t*)(x))[2] << 16) | \ 00251 (((const uint8_t*)(x))[1] << 8) | \ 00252 ((const uint8_t*)(x))[0]) 00253 #endif 00254 #ifndef AV_WL32 00255 # define AV_WL32(p, d) do { \ 00256 ((uint8_t*)(p))[0] = (d); \ 00257 ((uint8_t*)(p))[1] = (d)>>8; \ 00258 ((uint8_t*)(p))[2] = (d)>>16; \ 00259 ((uint8_t*)(p))[3] = (d)>>24; \ 00260 } while(0) 00261 #endif 00262 00263 #ifndef AV_RB64 00264 # define AV_RB64(x) \ 00265 (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ 00266 ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ 00267 ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ 00268 ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ 00269 ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ 00270 ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ 00271 ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ 00272 (uint64_t)((const uint8_t*)(x))[7]) 00273 #endif 00274 #ifndef AV_WB64 00275 # define AV_WB64(p, d) do { \ 00276 ((uint8_t*)(p))[7] = (d); \ 00277 ((uint8_t*)(p))[6] = (d)>>8; \ 00278 ((uint8_t*)(p))[5] = (d)>>16; \ 00279 ((uint8_t*)(p))[4] = (d)>>24; \ 00280 ((uint8_t*)(p))[3] = (d)>>32; \ 00281 ((uint8_t*)(p))[2] = (d)>>40; \ 00282 ((uint8_t*)(p))[1] = (d)>>48; \ 00283 ((uint8_t*)(p))[0] = (d)>>56; \ 00284 } while(0) 00285 #endif 00286 00287 #ifndef AV_RL64 00288 # define AV_RL64(x) \ 00289 (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ 00290 ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ 00291 ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ 00292 ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ 00293 ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ 00294 ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ 00295 ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ 00296 (uint64_t)((const uint8_t*)(x))[0]) 00297 #endif 00298 #ifndef AV_WL64 00299 # define AV_WL64(p, d) do { \ 00300 ((uint8_t*)(p))[0] = (d); \ 00301 ((uint8_t*)(p))[1] = (d)>>8; \ 00302 ((uint8_t*)(p))[2] = (d)>>16; \ 00303 ((uint8_t*)(p))[3] = (d)>>24; \ 00304 ((uint8_t*)(p))[4] = (d)>>32; \ 00305 ((uint8_t*)(p))[5] = (d)>>40; \ 00306 ((uint8_t*)(p))[6] = (d)>>48; \ 00307 ((uint8_t*)(p))[7] = (d)>>56; \ 00308 } while(0) 00309 #endif 00310 00311 #if AV_HAVE_BIGENDIAN 00312 # define AV_RN(s, p) AV_RB##s(p) 00313 # define AV_WN(s, p, v) AV_WB##s(p, v) 00314 #else 00315 # define AV_RN(s, p) AV_RL##s(p) 00316 # define AV_WN(s, p, v) AV_WL##s(p, v) 00317 #endif 00318 00319 #endif /* HAVE_FAST_UNALIGNED */ 00320 00321 #ifndef AV_RN16 00322 # define AV_RN16(p) AV_RN(16, p) 00323 #endif 00324 00325 #ifndef AV_RN32 00326 # define AV_RN32(p) AV_RN(32, p) 00327 #endif 00328 00329 #ifndef AV_RN64 00330 # define AV_RN64(p) AV_RN(64, p) 00331 #endif 00332 00333 #ifndef AV_WN16 00334 # define AV_WN16(p, v) AV_WN(16, p, v) 00335 #endif 00336 00337 #ifndef AV_WN32 00338 # define AV_WN32(p, v) AV_WN(32, p, v) 00339 #endif 00340 00341 #ifndef AV_WN64 00342 # define AV_WN64(p, v) AV_WN(64, p, v) 00343 #endif 00344 00345 #if AV_HAVE_BIGENDIAN 00346 # define AV_RB(s, p) AV_RN##s(p) 00347 # define AV_WB(s, p, v) AV_WN##s(p, v) 00348 # define AV_RL(s, p) av_bswap##s(AV_RN##s(p)) 00349 # define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v)) 00350 #else 00351 # define AV_RB(s, p) av_bswap##s(AV_RN##s(p)) 00352 # define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v)) 00353 # define AV_RL(s, p) AV_RN##s(p) 00354 # define AV_WL(s, p, v) AV_WN##s(p, v) 00355 #endif 00356 00357 #define AV_RB8(x) (((const uint8_t*)(x))[0]) 00358 #define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) 00359 00360 #define AV_RL8(x) AV_RB8(x) 00361 #define AV_WL8(p, d) AV_WB8(p, d) 00362 00363 #ifndef AV_RB16 00364 # define AV_RB16(p) AV_RB(16, p) 00365 #endif 00366 #ifndef AV_WB16 00367 # define AV_WB16(p, v) AV_WB(16, p, v) 00368 #endif 00369 00370 #ifndef AV_RL16 00371 # define AV_RL16(p) AV_RL(16, p) 00372 #endif 00373 #ifndef AV_WL16 00374 # define AV_WL16(p, v) AV_WL(16, p, v) 00375 #endif 00376 00377 #ifndef AV_RB32 00378 # define AV_RB32(p) AV_RB(32, p) 00379 #endif 00380 #ifndef AV_WB32 00381 # define AV_WB32(p, v) AV_WB(32, p, v) 00382 #endif 00383 00384 #ifndef AV_RL32 00385 # define AV_RL32(p) AV_RL(32, p) 00386 #endif 00387 #ifndef AV_WL32 00388 # define AV_WL32(p, v) AV_WL(32, p, v) 00389 #endif 00390 00391 #ifndef AV_RB64 00392 # define AV_RB64(p) AV_RB(64, p) 00393 #endif 00394 #ifndef AV_WB64 00395 # define AV_WB64(p, v) AV_WB(64, p, v) 00396 #endif 00397 00398 #ifndef AV_RL64 00399 # define AV_RL64(p) AV_RL(64, p) 00400 #endif 00401 #ifndef AV_WL64 00402 # define AV_WL64(p, v) AV_WL(64, p, v) 00403 #endif 00404 00405 #ifndef AV_RB24 00406 # define AV_RB24(x) \ 00407 ((((const uint8_t*)(x))[0] << 16) | \ 00408 (((const uint8_t*)(x))[1] << 8) | \ 00409 ((const uint8_t*)(x))[2]) 00410 #endif 00411 #ifndef AV_WB24 00412 # define AV_WB24(p, d) do { \ 00413 ((uint8_t*)(p))[2] = (d); \ 00414 ((uint8_t*)(p))[1] = (d)>>8; \ 00415 ((uint8_t*)(p))[0] = (d)>>16; \ 00416 } while(0) 00417 #endif 00418 00419 #ifndef AV_RL24 00420 # define AV_RL24(x) \ 00421 ((((const uint8_t*)(x))[2] << 16) | \ 00422 (((const uint8_t*)(x))[1] << 8) | \ 00423 ((const uint8_t*)(x))[0]) 00424 #endif 00425 #ifndef AV_WL24 00426 # define AV_WL24(p, d) do { \ 00427 ((uint8_t*)(p))[0] = (d); \ 00428 ((uint8_t*)(p))[1] = (d)>>8; \ 00429 ((uint8_t*)(p))[2] = (d)>>16; \ 00430 } while(0) 00431 #endif 00432 00433 /* 00434 * The AV_[RW]NA macros access naturally aligned data 00435 * in a type-safe way. 00436 */ 00437 00438 #define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) 00439 #define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) 00440 00441 #ifndef AV_RN16A 00442 # define AV_RN16A(p) AV_RNA(16, p) 00443 #endif 00444 00445 #ifndef AV_RN32A 00446 # define AV_RN32A(p) AV_RNA(32, p) 00447 #endif 00448 00449 #ifndef AV_RN64A 00450 # define AV_RN64A(p) AV_RNA(64, p) 00451 #endif 00452 00453 #ifndef AV_WN16A 00454 # define AV_WN16A(p, v) AV_WNA(16, p, v) 00455 #endif 00456 00457 #ifndef AV_WN32A 00458 # define AV_WN32A(p, v) AV_WNA(32, p, v) 00459 #endif 00460 00461 #ifndef AV_WN64A 00462 # define AV_WN64A(p, v) AV_WNA(64, p, v) 00463 #endif 00464 00465 /* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be 00466 * naturally aligned. They may be implemented using MMX, 00467 * so emms_c() must be called before using any float code 00468 * afterwards. 00469 */ 00470 00471 #define AV_COPY(n, d, s) \ 00472 (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) 00473 00474 #ifndef AV_COPY16 00475 # define AV_COPY16(d, s) AV_COPY(16, d, s) 00476 #endif 00477 00478 #ifndef AV_COPY32 00479 # define AV_COPY32(d, s) AV_COPY(32, d, s) 00480 #endif 00481 00482 #ifndef AV_COPY64 00483 # define AV_COPY64(d, s) AV_COPY(64, d, s) 00484 #endif 00485 00486 #ifndef AV_COPY128 00487 # define AV_COPY128(d, s) \ 00488 do { \ 00489 AV_COPY64(d, s); \ 00490 AV_COPY64((char*)(d)+8, (char*)(s)+8); \ 00491 } while(0) 00492 #endif 00493 00494 #define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) 00495 00496 #ifndef AV_SWAP64 00497 # define AV_SWAP64(a, b) AV_SWAP(64, a, b) 00498 #endif 00499 00500 #define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) 00501 00502 #ifndef AV_ZERO16 00503 # define AV_ZERO16(d) AV_ZERO(16, d) 00504 #endif 00505 00506 #ifndef AV_ZERO32 00507 # define AV_ZERO32(d) AV_ZERO(32, d) 00508 #endif 00509 00510 #ifndef AV_ZERO64 00511 # define AV_ZERO64(d) AV_ZERO(64, d) 00512 #endif 00513 00514 #ifndef AV_ZERO128 00515 # define AV_ZERO128(d) \ 00516 do { \ 00517 AV_ZERO64(d); \ 00518 AV_ZERO64((char*)(d)+8); \ 00519 } while(0) 00520 #endif 00521 00522 #endif /* AVUTIL_INTREADWRITE_H */