68 int shift = av_log2(denom - 1) + 1;
69 uint64_t ret = (1ULL << 52) / denom;
70 uint64_t err = (1ULL << 52) - ret * denom;
74 return ret + err / denom;
87 uint64_t l = x * (mantissa & 0xffffffff);
88 uint64_t h = x * (mantissa >> 32);
91 l += 1 << av_log2(h >> 21);
98 return (x << 1) ^ (x >> 7);
103 static const uint8_t series[] = { 1, 2, 3, 5, 8, 13, 21 };
110 for (i = 0; i < 7; i++) {
119 if (bits < 0 || bits > 31) {
122 }
else if (bits == 0) {
137 int i, j, scale_factor;
138 unsigned prob, cumulative_target;
139 unsigned cumul_prob = 0;
140 unsigned scaled_cumul_prob = 0;
143 rac->
prob[257] = UINT_MAX;
145 for (i = 1; i < 257; i++) {
150 if ((uint64_t)cumul_prob + rac->
prob[i] > UINT_MAX) {
154 cumul_prob += rac->
prob[i];
162 for (j = 0; j < prob; j++)
173 scale_factor = av_log2(cumul_prob);
175 if (cumul_prob & (cumul_prob - 1)) {
177 for (i = 1; i < 257; i++) {
179 scaled_cumul_prob += rac->
prob[i];
183 cumulative_target = 1 << scale_factor;
185 if (scaled_cumul_prob > cumulative_target) {
187 "Scaled probabilities are larger than target!\n");
191 scaled_cumul_prob = cumulative_target - scaled_cumul_prob;
193 for (i = 1; scaled_cumul_prob; i = (i & 0x7f) + 1) {
212 rac->
scale = scale_factor;
215 for (i = 1; i < 257; i++)
222 uint8_t *diff,
int w,
int *left,
235 for (i = 0; i < w; i++) {
236 l =
mid_pred(l, src1[i], l + src1[i] - lt) + diff[i];
256 L = buf[width - stride - 1];
264 TL = buf[width - (2 *
stride) - 1];
286 memset(dst + i, 0, count);
301 if (l->
zeros == esc_count) {
315 const uint8_t *src,
const uint8_t *src_end,
316 int width,
int esc_count)
320 uint8_t zero_run = 0;
321 const uint8_t *src_start = src;
322 uint8_t mask1 = -(esc_count < 2);
323 uint8_t mask2 = -(esc_count < 3);
324 uint8_t *end = dst + (width - 2);
329 if (end - dst < count) {
334 memset(dst, 0, count);
341 while (!zero_run && dst + i < end) {
343 if (src + i >= src_end)
346 !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
363 return src_start - src;
370 const uint8_t *src,
int src_size)
376 int esc_count = src[0];
379 const uint8_t *src_end = src + src_size;
386 if (esc_count &&
AV_RL32(src + 1) < length) {
398 for (i = 0; i <
height; i++)
404 "Output more bytes than length (%d of %d)\n", read,
406 }
else if (esc_count < 8) {
410 for (i = 0; i <
height; i++) {
412 src_end, width, esc_count);
418 if (src_size < width * height)
421 for (i = 0; i <
height; i++) {
422 memcpy(dst + (i * stride), src, width);
426 }
else if (esc_count == 0xff) {
428 for (i = 0; i <
height; i++)
429 memset(dst + i * stride, src[1], width);
436 "Invalid zero run escape code! (%#x)\n", esc_count);
440 for (i = 0; i <
height; i++) {
459 const uint8_t *buf = avpkt->
data;
460 int buf_size = avpkt->
size;
463 uint8_t frametype = 0;
464 uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
466 uint8_t *srcs[4], *dst;
467 int i, j, planes = 3;
492 for (j = 0; j < avctx->
height; j++) {
493 for (i = 0; i < avctx->
width; i++)
494 AV_WN32(dst + i * 4, offset_gu);
524 for (i = 0; i < planes; i++)
526 if (offset_ry >= buf_size ||
527 offset_gu >= buf_size ||
528 offset_bv >= buf_size ||
529 (planes == 4 && offs[3] >= buf_size)) {
531 "Invalid frame offsets\n");
534 for (i = 0; i < planes; i++)
540 for (i = 0; i < planes; i++)
543 for (i = 0; i < avctx->
width; i++) {
560 for (i = 0; i < planes; i++)
572 if (offset_ry >= buf_size ||
573 offset_gu >= buf_size ||
574 offset_bv >= buf_size) {
576 "Invalid frame offsets\n");
582 buf_size - offset_ry);
585 buf + offset_gu, buf_size - offset_gu);
588 buf + offset_bv, buf_size - offset_bv);
592 "Unsupported Lagarith frame type: %#x\n", frametype);