Sync with aom branch for ext-refs

Plus a small code clean up. The experiment of EXT_REFS, compared against
the baseline, using Overall PSNR, now obtains a gain on lowres as:
Avg: -5.818; BDRate: -5.653

Compared against the previous EXT_REFS results on lowres, a tiny gain is
obtained as:
Avg: -0.047, BDRate: -0.063

(1) 780952 Add encoder first pass support to bi-prediction in EXT_REFS
(2) f91498 Add pred prob handling for new references in EXT_REFS
(3) e91472 Add decoder support for bi-direct prediction in EXT_REFS
(4) 0dbac9 Add encoder support to new references in EXT_REFS
(5) ad70cc Remove hard-coded number for EXT_REFS
(6) 9c1e2f Add the use of new reference frames at encoder in EXT_REFS
(7) 6d4fde Add the experiment flag of EXT_REFS

Change-Id: I26f7ca45b9ede7579fdb9d0d6a1a91f4334599bd
This commit is contained in:
Zoe Liu 2016-10-18 17:12:11 -07:00
Родитель cfc5ac5034
Коммит 6cfaff95b7
8 изменённых файлов: 125 добавлений и 177 удалений

Просмотреть файл

@ -195,13 +195,6 @@ typedef struct AV1Common {
int new_fb_idx;
FRAME_TYPE last_frame_type; /* last frame's frame type for motion search.*/
#if CONFIG_EXT_REFS
// frame type of the frame before last frame
FRAME_TYPE last2_frame_type;
// TODO(zoeliu): To check whether last3_frame_type is still needed.
// frame type of the frame two frames before last frame
FRAME_TYPE last3_frame_type;
#endif // CONFIG_EXT_REFS
FRAME_TYPE frame_type;
int show_frame;

Просмотреть файл

@ -167,101 +167,60 @@ int av1_get_intra_inter_context(const MACROBLOCKD *xd) {
}
#if CONFIG_EXT_REFS
#define CHECK_BWDREF_OR_ALTREF(ref_frame) \
(((ref_frame) == BWDREF_FRAME) || ((ref_frame) == ALTREF_FRAME))
int av1_get_reference_mode_context(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
int ctx;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
(void)cm;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries corresponding to real macroblocks.
// The prediction flags in these dummy entries are initialized to 0.
if (has_above && has_left) { // both edges available
if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi))
// neither edge uses comp pred (0/1)
ctx = CHECK_BWDREF_OR_ALTREF(above_mbmi->ref_frame[0]) ^
CHECK_BWDREF_OR_ALTREF(left_mbmi->ref_frame[0]);
else if (!has_second_ref(above_mbmi))
// one of two edges uses comp pred (2/3)
ctx = 2 + (CHECK_BWDREF_OR_ALTREF(above_mbmi->ref_frame[0]) ||
!is_inter_block(above_mbmi));
else if (!has_second_ref(left_mbmi))
// one of two edges uses comp pred (2/3)
ctx = 2 + (CHECK_BWDREF_OR_ALTREF(left_mbmi->ref_frame[0]) ||
!is_inter_block(left_mbmi));
else // both edges use comp pred (4)
ctx = 4;
} else if (has_above || has_left) { // one edge available
const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi;
if (!has_second_ref(edge_mbmi))
// edge does not use comp pred (0/1)
ctx = CHECK_BWDREF_OR_ALTREF(edge_mbmi->ref_frame[0]);
else
// edge uses comp pred (3)
ctx = 3;
} else { // no edges available (1)
ctx = 1;
}
assert(ctx >= 0 && ctx < COMP_INTER_CONTEXTS);
return ctx;
}
#else // CONFIG_EXT_REFS
int av1_get_reference_mode_context(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
int ctx;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries corresponding to real macroblocks.
// The prediction flags in these dummy entries are initialized to 0.
if (has_above && has_left) { // both edges available
if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi))
// neither edge uses comp pred (0/1)
ctx = (above_mbmi->ref_frame[0] == cm->comp_fixed_ref) ^
(left_mbmi->ref_frame[0] == cm->comp_fixed_ref);
else if (!has_second_ref(above_mbmi))
// one of two edges uses comp pred (2/3)
ctx = 2 + (above_mbmi->ref_frame[0] == cm->comp_fixed_ref ||
!is_inter_block(above_mbmi));
else if (!has_second_ref(left_mbmi))
// one of two edges uses comp pred (2/3)
ctx = 2 + (left_mbmi->ref_frame[0] == cm->comp_fixed_ref ||
!is_inter_block(left_mbmi));
else // both edges use comp pred (4)
ctx = 4;
} else if (has_above || has_left) { // one edge available
const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi;
if (!has_second_ref(edge_mbmi))
// edge does not use comp pred (0/1)
ctx = edge_mbmi->ref_frame[0] == cm->comp_fixed_ref;
else
// edge uses comp pred (3)
ctx = 3;
} else { // no edges available (1)
ctx = 1;
}
assert(ctx >= 0 && ctx < COMP_INTER_CONTEXTS);
return ctx;
}
#define CHECK_BACKWARD_REFS(ref_frame) \
(((ref_frame) >= BWDREF_FRAME) && ((ref_frame) <= ALTREF_FRAME))
#define IS_BACKWARD_REF_FRAME(ref_frame) CHECK_BACKWARD_REFS(ref_frame)
#else
#define IS_BACKWARD_REF_FRAME(ref_frame) ((ref_frame) == cm->comp_fixed_ref)
#endif // CONFIG_EXT_REFS
int av1_get_reference_mode_context(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
int ctx;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
#if CONFIG_EXT_REFS
(void)cm;
#endif // CONFIG_EXT_REFS
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries corresponding to real macroblocks.
// The prediction flags in these dummy entries are initialized to 0.
if (has_above && has_left) { // both edges available
if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi))
// neither edge uses comp pred (0/1)
ctx = IS_BACKWARD_REF_FRAME(above_mbmi->ref_frame[0]) ^
IS_BACKWARD_REF_FRAME(left_mbmi->ref_frame[0]);
else if (!has_second_ref(above_mbmi))
// one of two edges uses comp pred (2/3)
ctx = 2 + (IS_BACKWARD_REF_FRAME(above_mbmi->ref_frame[0]) ||
!is_inter_block(above_mbmi));
else if (!has_second_ref(left_mbmi))
// one of two edges uses comp pred (2/3)
ctx = 2 + (IS_BACKWARD_REF_FRAME(left_mbmi->ref_frame[0]) ||
!is_inter_block(left_mbmi));
else // both edges use comp pred (4)
ctx = 4;
} else if (has_above || has_left) { // one edge available
const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi;
if (!has_second_ref(edge_mbmi))
// edge does not use comp pred (0/1)
ctx = IS_BACKWARD_REF_FRAME(edge_mbmi->ref_frame[0]);
else
// edge uses comp pred (3)
ctx = 3;
} else { // no edges available (1)
ctx = 1;
}
assert(ctx >= 0 && ctx < COMP_INTER_CONTEXTS);
return ctx;
}
#if CONFIG_EXT_REFS
// TODO(zoeliu): Future work will be conducted to optimize the context design
@ -323,8 +282,8 @@ int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
if (frfa == frfl && CHECK_GOLDEN_OR_LAST3(frfa)) {
pred_context = 0;
} else if (l_sg && a_sg) { // single/single
if ((CHECK_BWDREF_OR_ALTREF(frfa) && CHECK_LAST_OR_LAST2(frfl)) ||
(CHECK_BWDREF_OR_ALTREF(frfl) && CHECK_LAST_OR_LAST2(frfa))) {
if ((CHECK_BACKWARD_REFS(frfa) && CHECK_LAST_OR_LAST2(frfl)) ||
(CHECK_BACKWARD_REFS(frfl) && CHECK_LAST_OR_LAST2(frfa))) {
pred_context = 4;
} else if (CHECK_GOLDEN_OR_LAST3(frfa) || CHECK_GOLDEN_OR_LAST3(frfl)) {
pred_context = 1;
@ -426,7 +385,7 @@ int av1_get_pred_context_comp_ref_p1(const AV1_COMMON *cm,
else if (CHECK_GOLDEN_OR_LAST3(frfa) || CHECK_GOLDEN_OR_LAST3(frfl))
pred_context = 2 + (frfa != frfl);
else if (frfa == frfl ||
(CHECK_BWDREF_OR_ALTREF(frfa) && CHECK_BWDREF_OR_ALTREF(frfl)))
(CHECK_BACKWARD_REFS(frfa) && CHECK_BACKWARD_REFS(frfl)))
pred_context = 3;
else
pred_context = 4;
@ -527,7 +486,7 @@ int av1_get_pred_context_comp_ref_p2(const AV1_COMMON *cm,
else if (CHECK_LAST_OR_LAST2(frfa) || CHECK_LAST_OR_LAST2(frfl))
pred_context = 2 + (frfa != frfl);
else if (frfa == frfl ||
(CHECK_BWDREF_OR_ALTREF(frfa) && CHECK_BWDREF_OR_ALTREF(frfl)))
(CHECK_BACKWARD_REFS(frfa) && CHECK_BACKWARD_REFS(frfl)))
pred_context = 3;
else
pred_context = 4;
@ -798,10 +757,10 @@ int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi;
if (!has_second_ref(edge_mbmi))
pred_context = 4 * (!CHECK_BWDREF_OR_ALTREF(edge_mbmi->ref_frame[0]));
pred_context = 4 * (!CHECK_BACKWARD_REFS(edge_mbmi->ref_frame[0]));
else
pred_context = 1 + (!CHECK_BWDREF_OR_ALTREF(edge_mbmi->ref_frame[0]) ||
!CHECK_BWDREF_OR_ALTREF(edge_mbmi->ref_frame[1]));
pred_context = 1 + (!CHECK_BACKWARD_REFS(edge_mbmi->ref_frame[0]) ||
!CHECK_BACKWARD_REFS(edge_mbmi->ref_frame[1]));
} else { // inter/inter
const int above_has_second = has_second_ref(above_mbmi);
const int left_has_second = has_second_ref(left_mbmi);
@ -812,24 +771,23 @@ int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
const MV_REFERENCE_FRAME left1 = left_mbmi->ref_frame[1];
if (above_has_second && left_has_second) {
pred_context = 1 + (!CHECK_BWDREF_OR_ALTREF(above0) ||
!CHECK_BWDREF_OR_ALTREF(above1) ||
!CHECK_BWDREF_OR_ALTREF(left0) ||
!CHECK_BWDREF_OR_ALTREF(left1));
pred_context =
1 + (!CHECK_BACKWARD_REFS(above0) || !CHECK_BACKWARD_REFS(above1) ||
!CHECK_BACKWARD_REFS(left0) || !CHECK_BACKWARD_REFS(left1));
} else if (above_has_second || left_has_second) {
const MV_REFERENCE_FRAME rfs = !above_has_second ? above0 : left0;
const MV_REFERENCE_FRAME crf1 = above_has_second ? above0 : left0;
const MV_REFERENCE_FRAME crf2 = above_has_second ? above1 : left1;
if (!CHECK_BWDREF_OR_ALTREF(rfs))
pred_context = 3 + (!CHECK_BWDREF_OR_ALTREF(crf1) ||
!CHECK_BWDREF_OR_ALTREF(crf2));
if (!CHECK_BACKWARD_REFS(rfs))
pred_context =
3 + (!CHECK_BACKWARD_REFS(crf1) || !CHECK_BACKWARD_REFS(crf2));
else
pred_context =
!CHECK_BWDREF_OR_ALTREF(crf1) || !CHECK_BWDREF_OR_ALTREF(crf2);
!CHECK_BACKWARD_REFS(crf1) || !CHECK_BACKWARD_REFS(crf2);
} else {
pred_context = 2 * (!CHECK_BWDREF_OR_ALTREF(above0)) +
2 * (!CHECK_BWDREF_OR_ALTREF(left0));
pred_context = 2 * (!CHECK_BACKWARD_REFS(above0)) +
2 * (!CHECK_BACKWARD_REFS(left0));
}
}
} else if (has_above || has_left) { // one edge available
@ -838,10 +796,10 @@ int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
pred_context = 2;
} else { // inter
if (!has_second_ref(edge_mbmi))
pred_context = 4 * (!CHECK_BWDREF_OR_ALTREF(edge_mbmi->ref_frame[0]));
pred_context = 4 * (!CHECK_BACKWARD_REFS(edge_mbmi->ref_frame[0]));
else
pred_context = 1 + (!CHECK_BWDREF_OR_ALTREF(edge_mbmi->ref_frame[0]) ||
!CHECK_BWDREF_OR_ALTREF(edge_mbmi->ref_frame[1]));
pred_context = 1 + (!CHECK_BACKWARD_REFS(edge_mbmi->ref_frame[0]) ||
!CHECK_BACKWARD_REFS(edge_mbmi->ref_frame[1]));
}
} else { // no edges available
pred_context = 2;
@ -876,7 +834,7 @@ int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
} else if (above_intra || left_intra) { // intra/inter or inter/intra
const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi;
if (!has_second_ref(edge_mbmi)) {
if (!CHECK_BWDREF_OR_ALTREF(edge_mbmi->ref_frame[0]))
if (!CHECK_BACKWARD_REFS(edge_mbmi->ref_frame[0]))
pred_context = 3;
else
pred_context = 4 * (edge_mbmi->ref_frame[0] == BWDREF_FRAME);
@ -912,12 +870,12 @@ int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
else
pred_context = 1 + 2 * (crf1 == BWDREF_FRAME || crf2 == BWDREF_FRAME);
} else {
if (!CHECK_BWDREF_OR_ALTREF(above0) && !CHECK_BWDREF_OR_ALTREF(left0)) {
if (!CHECK_BACKWARD_REFS(above0) && !CHECK_BACKWARD_REFS(left0)) {
pred_context = 2 + (above0 == left0);
} else if (!CHECK_BWDREF_OR_ALTREF(above0) ||
!CHECK_BWDREF_OR_ALTREF(left0)) {
} else if (!CHECK_BACKWARD_REFS(above0) ||
!CHECK_BACKWARD_REFS(left0)) {
const MV_REFERENCE_FRAME edge0 =
!CHECK_BWDREF_OR_ALTREF(above0) ? left0 : above0;
!CHECK_BACKWARD_REFS(above0) ? left0 : above0;
pred_context = 4 * (edge0 == BWDREF_FRAME);
} else {
pred_context =
@ -929,7 +887,7 @@ int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi;
if (!is_inter_block(edge_mbmi) ||
(!CHECK_BWDREF_OR_ALTREF(edge_mbmi->ref_frame[0]) &&
(!CHECK_BACKWARD_REFS(edge_mbmi->ref_frame[0]) &&
!has_second_ref(edge_mbmi)))
pred_context = 2;
else if (!has_second_ref(edge_mbmi))
@ -970,7 +928,7 @@ int av1_get_pred_context_single_ref_p3(const MACROBLOCKD *xd) {
} else if (above_intra || left_intra) { // intra/inter or inter/intra
const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi;
if (!has_second_ref(edge_mbmi)) {
if (CHECK_BWDREF_OR_ALTREF(edge_mbmi->ref_frame[0]))
if (CHECK_BACKWARD_REFS(edge_mbmi->ref_frame[0]))
pred_context = 3;
else
pred_context = 4 * CHECK_LAST_OR_LAST2(edge_mbmi->ref_frame[0]);
@ -1009,12 +967,11 @@ int av1_get_pred_context_single_ref_p3(const MACROBLOCKD *xd) {
pred_context =
1 + 2 * (CHECK_LAST_OR_LAST2(crf1) || CHECK_LAST_OR_LAST2(crf2));
} else {
if (CHECK_BWDREF_OR_ALTREF(above0) && CHECK_BWDREF_OR_ALTREF(left0)) {
if (CHECK_BACKWARD_REFS(above0) && CHECK_BACKWARD_REFS(left0)) {
pred_context = 2 + (above0 == left0);
} else if (CHECK_BWDREF_OR_ALTREF(above0) ||
CHECK_BWDREF_OR_ALTREF(left0)) {
} else if (CHECK_BACKWARD_REFS(above0) || CHECK_BACKWARD_REFS(left0)) {
const MV_REFERENCE_FRAME edge0 =
CHECK_BWDREF_OR_ALTREF(above0) ? left0 : above0;
CHECK_BACKWARD_REFS(above0) ? left0 : above0;
pred_context = 4 * CHECK_LAST_OR_LAST2(edge0);
} else {
pred_context =
@ -1026,7 +983,7 @@ int av1_get_pred_context_single_ref_p3(const MACROBLOCKD *xd) {
const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi;
if (!is_inter_block(edge_mbmi) ||
(CHECK_BWDREF_OR_ALTREF(edge_mbmi->ref_frame[0]) &&
(CHECK_BACKWARD_REFS(edge_mbmi->ref_frame[0]) &&
!has_second_ref(edge_mbmi)))
pred_context = 2;
else if (!has_second_ref(edge_mbmi))

Просмотреть файл

@ -127,7 +127,6 @@ static INLINE aom_prob av1_get_pred_prob_comp_bwdref_p(const AV1_COMMON *cm,
const int pred_context = av1_get_pred_context_comp_bwdref_p(cm, xd);
return cm->fc->comp_bwdref_prob[pred_context][0];
}
#endif // CONFIG_EXT_REFS
int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);

Просмотреть файл

@ -3254,10 +3254,6 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
RefCntBuffer *const frame_bufs = pool->frame_bufs;
int i, mask, ref_index = 0;
size_t sz;
#if CONFIG_EXT_REFS
cm->last3_frame_type = cm->last2_frame_type;
cm->last2_frame_type = cm->last_frame_type;
#endif // CONFIG_EXT_REFS
cm->last_frame_type = cm->frame_type;
cm->last_intra_only = cm->intra_only;

Просмотреть файл

@ -300,7 +300,7 @@ static void setup_frame(AV1_COMP *cpi) {
cm->frame_context_idx = ARF_FRAME;
#else
if (cpi->refresh_alt_ref_frame) cm->frame_context_idx = ARF_FRAME;
#endif
#endif // CONFIG_EXT_REFS
else if (cpi->rc.is_src_frame_alt_ref)
cm->frame_context_idx = OVERLAY_FRAME;
else if (cpi->refresh_golden_frame)
@ -308,7 +308,7 @@ static void setup_frame(AV1_COMP *cpi) {
#if CONFIG_EXT_REFS
else if (cpi->refresh_bwd_ref_frame)
cm->frame_context_idx = BRF_FRAME;
#endif
#endif // CONFIG_EXT_REFS
else
cm->frame_context_idx = REGULAR_FRAME;
}
@ -4412,7 +4412,8 @@ static void set_arf_sign_bias(AV1_COMP *cpi) {
arf_sign_bias =
(cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame);
}
#endif
#endif // CONFIG_EXT_REFS
cm->ref_frame_sign_bias[ALTREF_FRAME] = arf_sign_bias;
#if CONFIG_EXT_REFS
cm->ref_frame_sign_bias[BWDREF_FRAME] = cm->ref_frame_sign_bias[ALTREF_FRAME];
@ -4430,7 +4431,7 @@ static int setup_interp_filter_search_mask(AV1_COMP *cpi) {
// Get which arf used as ALTREF_FRAME
if (cpi->oxcf.pass == 2)
arf_idx += cpi->twopass.gf_group.arf_ref_idx[cpi->twopass.gf_group.index];
#endif
#endif // CONFIG_EXT_REFS
if (cpi->common.last_frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame)
return mask;
@ -4446,7 +4447,7 @@ static int setup_interp_filter_search_mask(AV1_COMP *cpi) {
for (ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref)
for (ifilter = EIGHTTAP_REGULAR; ifilter < SWITCHABLE_FILTERS; ++ifilter)
ref_total[ref] += cpi->interp_filter_selected[ref][ifilter];
#endif
#endif // CONFIG_EXT_REFS
for (ifilter = EIGHTTAP_REGULAR; ifilter < SWITCHABLE_FILTERS; ++ifilter) {
if ((ref_total[LAST_FRAME] &&
@ -4597,14 +4598,12 @@ static void encode_frame_to_data_rate(AV1_COMP *cpi, size_t *size,
// Update the frame type
cm->last_frame_type = cm->frame_type;
#if CONFIG_EXT_REFS
// Since we allocate a spot for the OVERLAY frame in the gf group, we need
// to do post-encoding update accordingly.
if (cpi->rc.is_src_frame_alt_ref) {
av1_set_target_rate(cpi);
av1_rc_postencode_update(cpi, *size);
}
#endif
cm->last_width = cm->width;
cm->last_height = cm->height;
@ -4773,10 +4772,6 @@ static void encode_frame_to_data_rate(AV1_COMP *cpi, size_t *size,
cpi->ref_frame_flags = get_ref_frame_flags(cpi);
#endif // !CONFIG_EXT_REFS
#if CONFIG_EXT_REFS
cm->last3_frame_type = cm->last2_frame_type;
cm->last2_frame_type = cm->last_frame_type;
#endif // CONFIG_EXT_REFS
cm->last_frame_type = cm->frame_type;
av1_rc_postencode_update(cpi, *size);
@ -5041,7 +5036,7 @@ static void check_src_altref(AV1_COMP *cpi,
rc->is_src_frame_alt_ref =
#if CONFIG_EXT_REFS
(gf_group->update_type[gf_group->index] == INTNL_OVERLAY_UPDATE) ||
#endif
#endif // CONFIG_EXT_REFS
(gf_group->update_type[gf_group->index] == OVERLAY_UPDATE);
} else {
rc->is_src_frame_alt_ref =
@ -5409,7 +5404,8 @@ int av1_get_compressed_data(AV1_COMP *cpi, unsigned int *frame_flags,
cpi->alt_fb_idx = gf_group->arf_ref_idx[gf_group->index];
}
}
#endif
#endif // CONFIG_EXT_REFS
// Start with a 0 size frame.
*size = 0;

Просмотреть файл

@ -23,6 +23,7 @@
#include "aom_scale/aom_scale.h"
#include "aom_scale/yv12config.h"
#include "aom_dsp/variance.h"
#include "av1/common/entropymv.h"
#include "av1/common/quant_common.h"
#include "av1/common/reconinter.h" // av1_setup_dst_planes()
@ -37,7 +38,6 @@
#include "av1/encoder/mcomp.h"
#include "av1/encoder/quantize.h"
#include "av1/encoder/rd.h"
#include "aom_dsp/variance.h"
#define OUTPUT_FPF 0
#define ARF_STATS_OUTPUT 0
@ -1580,10 +1580,6 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
double modified_err = 0.0;
double err_fraction;
int mid_boost_bits = 0;
#if !CONFIG_EXT_REFS
int mid_frame_idx;
unsigned char arf_buffer_indices[MAX_ACTIVE_ARFS];
#endif
#if CONFIG_EXT_REFS
// The use of bi-predictive frames are only enabled when following 3
// conditions are met:
@ -1597,6 +1593,7 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
(rc->baseline_gf_interval - rc->source_alt_ref_pending);
int bipred_group_end = 0;
int bipred_frame_index = 0;
int arf_pos[MAX_EXT_ARFS + 1];
const unsigned char ext_arf_interval =
(unsigned char)(rc->baseline_gf_interval / (cpi->num_extra_arfs + 1) - 1);
@ -1605,17 +1602,20 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
int ext_arf_boost[MAX_EXT_ARFS];
int is_sg_bipred_enabled = is_bipred_enabled;
int accumulative_subgroup_interval = 0;
#else
int mid_frame_idx;
unsigned char arf_buffer_indices[MAX_ACTIVE_ARFS];
#endif // CONFIG_EXT_REFS
#if CONFIG_EXT_REFS
av1_zero_array(ext_arf_boost, MAX_EXT_ARFS);
#endif
#endif // CONFIG_EXT_REFS
key_frame = cpi->common.frame_type == KEY_FRAME;
#if !CONFIG_EXT_REFS
get_arf_buffer_indices(arf_buffer_indices);
#endif
#endif // !CONFIG_EXT_REFS
// For key frames the frame target rate is already set and it
// is also the golden frame.
@ -1635,7 +1635,7 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
#else
gf_group->arf_update_idx[frame_index] = arf_buffer_indices[0];
gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0];
#endif
#endif // CONFIG_EXT_REFS
// Step over the golden frame / overlay frame
if (EOF == input_stats(twopass, &frame_stats)) return;
}
@ -1667,16 +1667,15 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
#if CONFIG_EXT_REFS
gf_group->arf_update_idx[frame_index] = 0;
gf_group->arf_ref_idx[frame_index] = 0;
gf_group->bidir_pred_enabled[frame_index] = 0;
gf_group->brf_src_offset[frame_index] = 0;
// NOTE: "bidir_pred_frame_index" stays unchanged for ARF_UPDATE frames.
#else
gf_group->arf_update_idx[frame_index] = arf_buffer_indices[0];
gf_group->arf_ref_idx[frame_index] =
arf_buffer_indices[cpi->multi_arf_last_grp_enabled &&
rc->source_alt_ref_active];
#endif // CONFIG_EXT_REFS && CONFIG_EXT_ARFS
#if CONFIG_EXT_REFS
gf_group->bidir_pred_enabled[frame_index] = 0;
gf_group->brf_src_offset[frame_index] = 0;
// NOTE: "bidir_pred_frame_index" stays unchanged for ARF_UPDATE frames.
#endif // CONFIG_EXT_REFS
#if CONFIG_EXT_REFS
@ -1730,13 +1729,14 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
#if !CONFIG_EXT_REFS
// Define middle frame
mid_frame_idx = frame_index + (rc->baseline_gf_interval >> 1) - 1;
#endif
#endif // !CONFIG_EXT_REFS
// Allocate bits to the other frames in the group.
for (i = 0; i < rc->baseline_gf_interval - rc->source_alt_ref_pending; ++i) {
#if !CONFIG_EXT_REFS
int arf_idx = 0;
#endif
#endif // !CONFIG_EXT_REFS
if (EOF == input_stats(twopass, &frame_stats)) break;
modified_err = calculate_modified_err(cpi, twopass, oxcf, &frame_stats);
@ -1753,8 +1753,9 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
target_frame_size -= (target_frame_size >> 4);
#if !CONFIG_EXT_REFS
if (frame_index <= mid_frame_idx) arf_idx = 1;
#endif
#endif // !CONFIG_EXT_REFS
}
#if CONFIG_EXT_REFS
gf_group->arf_update_idx[frame_index] = which_arf;
gf_group->arf_ref_idx[frame_index] = which_arf;
@ -1762,6 +1763,7 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
gf_group->arf_update_idx[frame_index] = arf_buffer_indices[arf_idx];
gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[arf_idx];
#endif // CONFIG_EXT_REFS
target_frame_size =
clamp(target_frame_size, 0, AOMMIN(max_bits, (int)total_group_bits));
@ -1773,6 +1775,7 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
is_bipred_enabled &&
(subgroup_interval[which_arf] > rc->bipred_group_interval);
}
// NOTE: BIDIR_PRED is only enabled when the length of the bi-predictive
// frame group interval is strictly smaller than that of the GOLDEN
// FRAME group interval.
@ -1841,6 +1844,7 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
#endif // CONFIG_EXT_REFS
++frame_index;
#if CONFIG_EXT_REFS
// Check if we need to update the ARF
if (cpi->num_extra_arfs && frame_index > arf_pos[which_arf]) {
@ -1858,7 +1862,7 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
++frame_index;
}
}
#endif
#endif // CONFIG_EXT_REFS
}
// Note:
@ -1871,7 +1875,8 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
#else
gf_group->arf_update_idx[frame_index] = arf_buffer_indices[0];
gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0];
#endif
#endif // CONFIG_EXT_REFS
if (rc->source_alt_ref_pending) {
gf_group->update_type[frame_index] = OVERLAY_UPDATE;
gf_group->rf_level[frame_index] = INTER_NORMAL;
@ -1887,8 +1892,7 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
gf_group->rf_level[arf_pos[i]] = INTER_LOW;
}
}
#endif
#if !CONFIG_EXT_REFS
#else
// Final setup for second arf and its overlay.
if (cpi->multi_arf_enabled) {
gf_group->bit_allocation[2] =
@ -1896,11 +1900,12 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
gf_group->update_type[mid_frame_idx] = OVERLAY_UPDATE;
gf_group->bit_allocation[mid_frame_idx] = 0;
}
#endif
#endif // CONFIG_EXT_REFS
} else {
gf_group->update_type[frame_index] = GF_UPDATE;
gf_group->rf_level[frame_index] = GF_ARF_STD;
}
#if CONFIG_EXT_REFS
gf_group->bidir_pred_enabled[frame_index] = 0;
gf_group->brf_src_offset[frame_index] = 0;
@ -1909,6 +1914,7 @@ static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
// Note whether multi-arf was enabled this group for next time.
cpi->multi_arf_last_grp_enabled = cpi->multi_arf_enabled;
}
// Analyse and define a gf/arf group.
static void define_gf_group(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
AV1_COMMON *const cm = &cpi->common;
@ -2116,8 +2122,8 @@ static void define_gf_group(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
cpi->num_extra_arfs = get_number_of_extra_arfs(rc->baseline_gf_interval,
rc->source_alt_ref_pending);
// Currently at maximum two extra ARFs' are allowed
assert(cpi->num_extra_arfs <= 2);
#endif
assert(cpi->num_extra_arfs <= MAX_EXT_ARFS);
#endif // CONFIG_EXT_REFS
rc->frames_till_gf_update_due = rc->baseline_gf_interval;

Просмотреть файл

@ -193,7 +193,7 @@ static INLINE int get_number_of_extra_arfs(int interval, int arf_pending) {
else
return 0;
}
#endif
#endif // CONFIG_EXT_REFS
#ifdef __cplusplus
} // extern "C"

Просмотреть файл

@ -193,7 +193,7 @@ int av1_rc_clamp_pframe_target_size(const AV1_COMP *const cpi, int target) {
if (cpi->rc.is_src_frame_alt_ref) {
#else
if (cpi->refresh_golden_frame && rc->is_src_frame_alt_ref) {
#endif
#endif // CONFIG_EXT_REFS
// If there is an active ARF at this location use the minimum
// bits on this frame even if it is a constructed arf.
// The active maximum quantizer insures that an appropriate
@ -1189,11 +1189,12 @@ static void update_golden_frame_stats(AV1_COMP *cpi) {
#else
// Update the Golden frame usage counts.
if (cpi->refresh_golden_frame) {
#endif
#endif // CONFIG_EXT_REFS
#if CONFIG_EXT_REFS
// We will not use internal overlay frames to replace the golden frame
if (!rc->is_src_frame_ext_arf)
#endif
#endif // CONFIG_EXT_REFS
// this frame refreshes means next frames don't unless specified by user
rc->frames_since_golden = 0;