2010-05-18 19:58:33 +04:00
|
|
|
/*
|
2010-09-09 16:16:39 +04:00
|
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
2010-05-18 19:58:33 +04:00
|
|
|
*
|
2010-06-18 20:39:21 +04:00
|
|
|
* Use of this source code is governed by a BSD-style license
|
2010-06-05 00:19:40 +04:00
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
2010-06-18 20:39:21 +04:00
|
|
|
* in the file PATENTS. All contributing project authors may
|
2010-06-05 00:19:40 +04:00
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
2010-05-18 19:58:33 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
2012-11-30 04:36:10 +04:00
|
|
|
#ifndef VP9_COMMON_VP9_BLOCKD_H_
|
|
|
|
#define VP9_COMMON_VP9_BLOCKD_H_
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-12-23 19:20:10 +04:00
|
|
|
#include "./vpx_config.h"
|
2010-05-18 19:58:33 +04:00
|
|
|
#include "vpx_scale/yv12config.h"
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 04:59:03 +04:00
|
|
|
#include "vp9/common/vp9_convolve.h"
|
2012-11-28 22:41:40 +04:00
|
|
|
#include "vp9/common/vp9_mv.h"
|
|
|
|
#include "vp9/common/vp9_treecoder.h"
|
2010-05-18 19:58:33 +04:00
|
|
|
#include "vpx_ports/mem.h"
|
2012-11-28 22:41:40 +04:00
|
|
|
#include "vp9/common/vp9_common.h"
|
2013-04-10 08:28:27 +04:00
|
|
|
#include "vp9/common/vp9_enums.h"
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2013-06-03 21:39:40 +04:00
|
|
|
#define BLOCK_SIZE_GROUPS 4
|
2013-04-24 16:04:45 +04:00
|
|
|
#define MAX_MB_SEGMENTS 8
|
|
|
|
#define MB_SEG_TREE_PROBS (MAX_MB_SEGMENTS-1)
|
2010-09-01 04:43:14 +04:00
|
|
|
|
2013-06-07 00:44:34 +04:00
|
|
|
#define PREDICTION_PROBS 3
|
2013-04-17 22:45:35 +04:00
|
|
|
|
2012-03-19 22:02:04 +04:00
|
|
|
#define MBSKIP_CONTEXTS 3
|
|
|
|
|
2010-05-18 19:58:33 +04:00
|
|
|
#define MAX_REF_LF_DELTAS 4
|
2013-06-07 09:55:31 +04:00
|
|
|
#define MAX_MODE_LF_DELTAS 2
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2010-10-28 03:04:02 +04:00
|
|
|
/* Segment Feature Masks */
|
2010-05-18 19:58:33 +04:00
|
|
|
#define SEGMENT_DELTADATA 0
|
|
|
|
#define SEGMENT_ABSDATA 1
|
2013-04-19 18:40:36 +04:00
|
|
|
#define MAX_MV_REF_CANDIDATES 2
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2013-06-07 00:44:34 +04:00
|
|
|
#define INTRA_INTER_CONTEXTS 4
|
|
|
|
#define COMP_INTER_CONTEXTS 5
|
|
|
|
#define REF_CONTEXTS 5
|
|
|
|
|
2013-02-20 23:36:31 +04:00
|
|
|
typedef enum {
|
2012-10-15 02:29:56 +04:00
|
|
|
PLANE_TYPE_Y_WITH_DC,
|
2013-02-16 00:09:05 +04:00
|
|
|
PLANE_TYPE_UV,
|
2012-10-15 02:29:56 +04:00
|
|
|
} PLANE_TYPE;
|
2011-02-24 00:37:08 +03:00
|
|
|
|
2010-08-31 18:49:57 +04:00
|
|
|
typedef char ENTROPY_CONTEXT;
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2013-04-23 21:12:18 +04:00
|
|
|
typedef char PARTITION_CONTEXT;
|
|
|
|
|
2013-04-17 02:30:28 +04:00
|
|
|
static INLINE int combine_entropy_contexts(ENTROPY_CONTEXT a,
|
|
|
|
ENTROPY_CONTEXT b) {
|
|
|
|
return (a != 0) + (b != 0);
|
|
|
|
}
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
typedef enum {
|
|
|
|
KEY_FRAME = 0,
|
2013-06-03 21:39:40 +04:00
|
|
|
INTER_FRAME = 1,
|
|
|
|
NUM_FRAME_TYPES,
|
2010-05-18 19:58:33 +04:00
|
|
|
} FRAME_TYPE;
|
|
|
|
|
2013-02-27 23:17:38 +04:00
|
|
|
typedef enum {
|
2013-01-09 02:14:01 +04:00
|
|
|
EIGHTTAP_SMOOTH,
|
|
|
|
EIGHTTAP,
|
|
|
|
EIGHTTAP_SHARP,
|
|
|
|
BILINEAR,
|
2012-07-19 00:43:01 +04:00
|
|
|
SWITCHABLE /* should be the last one */
|
|
|
|
} INTERPOLATIONFILTERTYPE;
|
|
|
|
|
2013-02-27 23:17:38 +04:00
|
|
|
typedef enum {
|
2013-05-09 22:37:51 +04:00
|
|
|
DC_PRED, // Average of above and left pixels
|
|
|
|
V_PRED, // Vertical
|
|
|
|
H_PRED, // Horizontal
|
|
|
|
D45_PRED, // Directional 45 deg = round(arctan(1/1) * 180/pi)
|
|
|
|
D135_PRED, // Directional 135 deg = 180 - 45
|
|
|
|
D117_PRED, // Directional 117 deg = 180 - 63
|
|
|
|
D153_PRED, // Directional 153 deg = 180 - 27
|
|
|
|
D27_PRED, // Directional 27 deg = round(arctan(1/2) * 180/pi)
|
|
|
|
D63_PRED, // Directional 63 deg = round(arctan(2/1) * 180/pi)
|
|
|
|
TM_PRED, // True-motion
|
2012-07-14 02:21:29 +04:00
|
|
|
NEARESTMV,
|
|
|
|
NEARMV,
|
|
|
|
ZEROMV,
|
|
|
|
NEWMV,
|
|
|
|
MB_MODE_COUNT
|
2010-05-18 19:58:33 +04:00
|
|
|
} MB_PREDICTION_MODE;
|
|
|
|
|
2013-04-24 23:14:58 +04:00
|
|
|
static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) {
|
2013-05-30 23:49:38 +04:00
|
|
|
return mode >= NEARESTMV && mode <= NEWMV;
|
2013-04-24 23:14:58 +04:00
|
|
|
}
|
|
|
|
|
2011-09-13 14:26:39 +04:00
|
|
|
// Segment level features.
|
2012-07-14 02:21:29 +04:00
|
|
|
typedef enum {
|
|
|
|
SEG_LVL_ALT_Q = 0, // Use alternate Quantizer ....
|
|
|
|
SEG_LVL_ALT_LF = 1, // Use alternate loop filter value...
|
|
|
|
SEG_LVL_REF_FRAME = 2, // Optional Segment reference frame
|
2013-01-28 19:22:53 +04:00
|
|
|
SEG_LVL_SKIP = 3, // Optional Segment (0,0) + skip mode
|
2013-03-19 06:53:02 +04:00
|
|
|
SEG_LVL_MAX = 4 // Number of MB level features supported
|
2011-09-13 14:26:39 +04:00
|
|
|
} SEG_LVL_FEATURES;
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2011-11-04 14:59:54 +04:00
|
|
|
// Segment level features.
|
2012-07-14 02:21:29 +04:00
|
|
|
typedef enum {
|
2012-12-07 00:40:57 +04:00
|
|
|
TX_4X4 = 0, // 4x4 dct transform
|
|
|
|
TX_8X8 = 1, // 8x8 dct transform
|
|
|
|
TX_16X16 = 2, // 16x16 dct transform
|
2013-05-31 03:17:41 +04:00
|
|
|
TX_32X32 = 3, // 32x32 dct transform
|
2012-12-07 00:40:57 +04:00
|
|
|
TX_SIZE_MAX_SB, // Number of transforms available to SBs
|
2011-11-04 14:59:54 +04:00
|
|
|
} TX_SIZE;
|
|
|
|
|
2012-06-25 23:26:09 +04:00
|
|
|
typedef enum {
|
|
|
|
DCT_DCT = 0, // DCT in both horizontal and vertical
|
2012-10-22 23:19:00 +04:00
|
|
|
ADST_DCT = 1, // ADST in vertical, DCT in horizontal
|
|
|
|
DCT_ADST = 2, // DCT in vertical, ADST in horizontal
|
2012-06-25 23:26:09 +04:00
|
|
|
ADST_ADST = 3 // ADST in both directions
|
|
|
|
} TX_TYPE;
|
|
|
|
|
2013-05-31 03:21:48 +04:00
|
|
|
#define VP9_INTRA_MODES (TM_PRED + 1)
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2013-06-05 22:21:44 +04:00
|
|
|
#define VP9_INTER_MODES (1 + NEWMV - NEARESTMV)
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2013-02-14 02:22:15 +04:00
|
|
|
#define WHT_UPSCALE_FACTOR 2
|
2012-11-25 07:33:58 +04:00
|
|
|
|
2013-06-06 22:14:04 +04:00
|
|
|
#define TX_SIZE_PROBS 6 // (TX_SIZE_MAX_SB * (TX_SIZE_MAX_SB - 1) / 2)
|
|
|
|
|
2013-06-06 22:14:04 +04:00
|
|
|
#define get_tx_probs(c, b) ((b) < BLOCK_SIZE_MB16X16 ? \
|
|
|
|
(c)->fc.tx_probs_8x8p : \
|
|
|
|
(b) < BLOCK_SIZE_SB32X32 ? \
|
|
|
|
(c)->fc.tx_probs_16x16p : (c)->fc.tx_probs_32x32p)
|
2013-06-06 22:14:04 +04:00
|
|
|
|
2010-05-18 19:58:33 +04:00
|
|
|
/* For keyframes, intra block modes are predicted by the (already decoded)
|
|
|
|
modes for the Y blocks to the left and above us; for interframes, there
|
|
|
|
is a single probability table. */
|
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
union b_mode_info {
|
|
|
|
struct {
|
2013-05-22 08:28:42 +04:00
|
|
|
MB_PREDICTION_MODE first;
|
2012-07-14 02:21:29 +04:00
|
|
|
} as_mode;
|
2013-02-09 07:46:36 +04:00
|
|
|
int_mv as_mv[2]; // first, second inter predictor motion vectors
|
2011-05-26 23:13:00 +04:00
|
|
|
};
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
typedef enum {
|
2012-11-07 18:50:25 +04:00
|
|
|
NONE = -1,
|
2012-07-14 02:21:29 +04:00
|
|
|
INTRA_FRAME = 0,
|
|
|
|
LAST_FRAME = 1,
|
|
|
|
GOLDEN_FRAME = 2,
|
|
|
|
ALTREF_FRAME = 3,
|
|
|
|
MAX_REF_FRAMES = 4
|
2010-05-18 19:58:33 +04:00
|
|
|
} MV_REFERENCE_FRAME;
|
|
|
|
|
2013-04-29 23:43:38 +04:00
|
|
|
static INLINE int b_width_log2(BLOCK_SIZE_TYPE sb_type) {
|
2013-04-10 08:28:27 +04:00
|
|
|
switch (sb_type) {
|
2013-05-07 21:45:31 +04:00
|
|
|
case BLOCK_SIZE_SB4X8:
|
2013-04-29 23:43:38 +04:00
|
|
|
case BLOCK_SIZE_AB4X4: return 0;
|
2013-05-07 21:45:31 +04:00
|
|
|
case BLOCK_SIZE_SB8X4:
|
2013-04-29 23:43:38 +04:00
|
|
|
case BLOCK_SIZE_SB8X8:
|
|
|
|
case BLOCK_SIZE_SB8X16: return 1;
|
2013-04-26 22:57:17 +04:00
|
|
|
case BLOCK_SIZE_SB16X8:
|
2013-04-25 02:10:23 +04:00
|
|
|
case BLOCK_SIZE_MB16X16:
|
2013-04-29 23:43:38 +04:00
|
|
|
case BLOCK_SIZE_SB16X32: return 2;
|
2013-04-10 08:28:27 +04:00
|
|
|
case BLOCK_SIZE_SB32X16:
|
2013-04-29 23:43:38 +04:00
|
|
|
case BLOCK_SIZE_SB32X32:
|
|
|
|
case BLOCK_SIZE_SB32X64: return 3;
|
2013-04-10 08:28:27 +04:00
|
|
|
case BLOCK_SIZE_SB64X32:
|
2013-04-29 23:43:38 +04:00
|
|
|
case BLOCK_SIZE_SB64X64: return 4;
|
2013-04-10 08:28:27 +04:00
|
|
|
default: assert(0);
|
2013-05-17 23:50:40 +04:00
|
|
|
return -1;
|
2013-04-10 08:28:27 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-29 23:43:38 +04:00
|
|
|
static INLINE int b_height_log2(BLOCK_SIZE_TYPE sb_type) {
|
2013-04-10 08:28:27 +04:00
|
|
|
switch (sb_type) {
|
2013-05-07 21:45:31 +04:00
|
|
|
case BLOCK_SIZE_SB8X4:
|
2013-04-29 23:43:38 +04:00
|
|
|
case BLOCK_SIZE_AB4X4: return 0;
|
2013-05-07 21:45:31 +04:00
|
|
|
case BLOCK_SIZE_SB4X8:
|
2013-04-29 23:43:38 +04:00
|
|
|
case BLOCK_SIZE_SB8X8:
|
|
|
|
case BLOCK_SIZE_SB16X8: return 1;
|
2013-04-26 22:57:17 +04:00
|
|
|
case BLOCK_SIZE_SB8X16:
|
2013-04-25 02:10:23 +04:00
|
|
|
case BLOCK_SIZE_MB16X16:
|
2013-04-29 23:43:38 +04:00
|
|
|
case BLOCK_SIZE_SB32X16: return 2;
|
2013-04-10 08:28:27 +04:00
|
|
|
case BLOCK_SIZE_SB16X32:
|
2013-04-29 23:43:38 +04:00
|
|
|
case BLOCK_SIZE_SB32X32:
|
|
|
|
case BLOCK_SIZE_SB64X32: return 3;
|
2013-04-10 08:28:27 +04:00
|
|
|
case BLOCK_SIZE_SB32X64:
|
2013-04-29 23:43:38 +04:00
|
|
|
case BLOCK_SIZE_SB64X64: return 4;
|
2013-04-10 08:28:27 +04:00
|
|
|
default: assert(0);
|
2013-05-17 23:50:40 +04:00
|
|
|
return -1;
|
2013-04-10 08:28:27 +04:00
|
|
|
}
|
|
|
|
}
|
2013-01-06 06:20:25 +04:00
|
|
|
|
2013-04-29 23:43:38 +04:00
|
|
|
static INLINE int mi_width_log2(BLOCK_SIZE_TYPE sb_type) {
|
|
|
|
int a = b_width_log2(sb_type) - 1;
|
2013-05-11 04:06:37 +04:00
|
|
|
// align 4x4 block to mode_info
|
|
|
|
if (a < 0)
|
|
|
|
a = 0;
|
2013-04-29 23:43:38 +04:00
|
|
|
assert(a >= 0);
|
|
|
|
return a;
|
2013-04-11 23:12:11 +04:00
|
|
|
}
|
|
|
|
|
2013-04-29 23:43:38 +04:00
|
|
|
static INLINE int mi_height_log2(BLOCK_SIZE_TYPE sb_type) {
|
|
|
|
int a = b_height_log2(sb_type) - 1;
|
2013-05-11 04:06:37 +04:00
|
|
|
if (a < 0)
|
|
|
|
a = 0;
|
2013-04-29 23:43:38 +04:00
|
|
|
assert(a >= 0);
|
|
|
|
return a;
|
2013-04-11 23:12:11 +04:00
|
|
|
}
|
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
typedef struct {
|
|
|
|
MB_PREDICTION_MODE mode, uv_mode;
|
2013-06-07 00:44:34 +04:00
|
|
|
MV_REFERENCE_FRAME ref_frame[2];
|
2012-07-14 02:21:29 +04:00
|
|
|
TX_SIZE txfm_size;
|
2012-08-10 03:07:41 +04:00
|
|
|
int_mv mv[2]; // for each reference frame used
|
2012-12-04 21:21:05 +04:00
|
|
|
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
|
2012-11-09 22:52:08 +04:00
|
|
|
int_mv best_mv, best_second_mv;
|
2012-08-24 18:44:01 +04:00
|
|
|
|
2012-11-12 19:09:25 +04:00
|
|
|
int mb_mode_context[MAX_REF_FRAMES];
|
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */
|
2013-04-23 17:01:55 +04:00
|
|
|
unsigned char segment_id; // Segment id for current frame
|
2012-07-14 02:21:29 +04:00
|
|
|
|
|
|
|
// Flags used for prediction status of various bistream signals
|
|
|
|
unsigned char seg_id_predicted;
|
|
|
|
|
|
|
|
// Indicates if the mb is part of the image (1) vs border (0)
|
|
|
|
// This can be useful in determining whether the MB provides
|
|
|
|
// a valid predictor
|
|
|
|
unsigned char mb_in_image;
|
2012-01-28 14:07:08 +04:00
|
|
|
|
2013-01-08 22:29:22 +04:00
|
|
|
INTERPOLATIONFILTERTYPE interp_filter;
|
2012-06-26 03:23:58 +04:00
|
|
|
|
2013-01-06 06:20:25 +04:00
|
|
|
BLOCK_SIZE_TYPE sb_type;
|
2010-05-18 19:58:33 +04:00
|
|
|
} MB_MODE_INFO;
|
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
typedef struct {
|
|
|
|
MB_MODE_INFO mbmi;
|
2013-05-27 18:55:25 +04:00
|
|
|
union b_mode_info bmi[4];
|
2010-05-18 19:58:33 +04:00
|
|
|
} MODE_INFO;
|
|
|
|
|
2013-06-22 02:34:29 +04:00
|
|
|
enum mv_precision {
|
|
|
|
MV_PRECISION_Q3,
|
|
|
|
MV_PRECISION_Q4
|
|
|
|
};
|
|
|
|
|
2013-06-10 11:23:04 +04:00
|
|
|
#define VP9_REF_SCALE_SHIFT 14
|
2013-02-09 05:49:44 +04:00
|
|
|
struct scale_factors {
|
2013-06-10 11:23:04 +04:00
|
|
|
int x_scale_fp; // horizontal fixed point scale factor
|
|
|
|
int y_scale_fp; // vertical fixed point scale factor
|
2013-02-09 05:49:44 +04:00
|
|
|
int x_offset_q4;
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 08:55:14 +04:00
|
|
|
int x_step_q4;
|
2013-02-09 05:49:44 +04:00
|
|
|
int y_offset_q4;
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 08:55:14 +04:00
|
|
|
int y_step_q4;
|
2013-04-04 20:56:02 +04:00
|
|
|
|
|
|
|
int (*scale_value_x)(int val, const struct scale_factors *scale);
|
|
|
|
int (*scale_value_y)(int val, const struct scale_factors *scale);
|
|
|
|
void (*set_scaled_offsets)(struct scale_factors *scale, int row, int col);
|
2013-06-22 02:34:29 +04:00
|
|
|
MV32 (*scale_mv_q3_to_q4)(const MV *mv, const struct scale_factors *scale);
|
|
|
|
MV32 (*scale_mv_q4)(const MV *mv, const struct scale_factors *scale);
|
2013-04-04 20:56:02 +04:00
|
|
|
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 08:55:14 +04:00
|
|
|
convolve_fn_t predict[2][2][2]; // horiz, vert, avg
|
2013-02-09 05:49:44 +04:00
|
|
|
};
|
|
|
|
|
2013-05-16 04:55:08 +04:00
|
|
|
#if CONFIG_ALPHA
|
|
|
|
enum { MAX_MB_PLANE = 4 };
|
|
|
|
#else
|
2013-04-03 01:50:40 +04:00
|
|
|
enum { MAX_MB_PLANE = 3 };
|
2013-05-16 04:55:08 +04:00
|
|
|
#endif
|
2013-04-03 01:50:40 +04:00
|
|
|
|
2013-04-20 02:52:17 +04:00
|
|
|
struct buf_2d {
|
|
|
|
uint8_t *buf;
|
|
|
|
int stride;
|
|
|
|
};
|
|
|
|
|
2013-04-23 19:26:10 +04:00
|
|
|
struct macroblockd_plane {
|
2013-04-03 01:50:40 +04:00
|
|
|
DECLARE_ALIGNED(16, int16_t, qcoeff[64 * 64]);
|
|
|
|
DECLARE_ALIGNED(16, int16_t, dqcoeff[64 * 64]);
|
2013-04-04 23:03:27 +04:00
|
|
|
DECLARE_ALIGNED(16, uint16_t, eobs[256]);
|
2013-04-06 02:54:59 +04:00
|
|
|
PLANE_TYPE plane_type;
|
|
|
|
int subsampling_x;
|
|
|
|
int subsampling_y;
|
2013-04-20 02:52:17 +04:00
|
|
|
struct buf_2d dst;
|
|
|
|
struct buf_2d pre[2];
|
2013-04-25 01:48:17 +04:00
|
|
|
int16_t *dequant;
|
2013-04-29 21:37:25 +04:00
|
|
|
ENTROPY_CONTEXT *above_context;
|
|
|
|
ENTROPY_CONTEXT *left_context;
|
2013-04-03 01:50:40 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
#define BLOCK_OFFSET(x, i, n) ((x) + (i) * (n))
|
|
|
|
|
2012-10-28 21:38:23 +04:00
|
|
|
typedef struct macroblockd {
|
2013-04-23 19:26:10 +04:00
|
|
|
struct macroblockd_plane plane[MAX_MB_PLANE];
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-08 02:45:05 +04:00
|
|
|
|
2013-02-09 05:49:44 +04:00
|
|
|
struct scale_factors scale_factor[2];
|
|
|
|
struct scale_factors scale_factor_uv[2];
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
MODE_INFO *prev_mode_info_context;
|
|
|
|
MODE_INFO *mode_info_context;
|
|
|
|
int mode_info_stride;
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
FRAME_TYPE frame_type;
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
int up_available;
|
|
|
|
int left_available;
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 21:35:28 +04:00
|
|
|
int right_available;
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2013-04-23 21:12:18 +04:00
|
|
|
// partition contexts
|
|
|
|
PARTITION_CONTEXT *above_seg_context;
|
|
|
|
PARTITION_CONTEXT *left_seg_context;
|
|
|
|
|
2013-04-30 03:07:17 +04:00
|
|
|
/* 0 (disable) 1 (enable) segmentation */
|
2012-07-14 02:21:29 +04:00
|
|
|
unsigned char segmentation_enabled;
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
/* 0 (do not update) 1 (update) the macroblock segmentation map. */
|
|
|
|
unsigned char update_mb_segmentation_map;
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
/* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
|
|
|
|
unsigned char update_mb_segmentation_data;
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
/* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
|
|
|
|
unsigned char mb_segment_abs_delta;
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
/* Per frame flags that define which MB level features (such as quantizer or loop filter level) */
|
|
|
|
/* are enabled and when enabled the proabilities used to decode the per MB flags in MB_MODE_INFO */
|
2011-11-15 15:13:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
// Probability Tree used to code Segment number
|
2013-04-24 16:04:45 +04:00
|
|
|
vp9_prob mb_segment_tree_probs[MB_SEG_TREE_PROBS];
|
2011-11-15 15:13:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
// Segment features
|
2013-06-07 22:41:00 +04:00
|
|
|
int16_t segment_feature_data[MAX_MB_SEGMENTS][SEG_LVL_MAX];
|
2012-07-14 02:21:29 +04:00
|
|
|
unsigned int segment_feature_mask[MAX_MB_SEGMENTS];
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
/* mode_based Loop filter adjustment */
|
|
|
|
unsigned char mode_ref_lf_delta_enabled;
|
|
|
|
unsigned char mode_ref_lf_delta_update;
|
|
|
|
|
|
|
|
/* Delta values have the range +/- MAX_LOOP_FILTER */
|
2013-04-11 23:16:35 +04:00
|
|
|
/* 0 = Intra, Last, GF, ARF */
|
|
|
|
signed char last_ref_lf_deltas[MAX_REF_LF_DELTAS];
|
|
|
|
/* 0 = Intra, Last, GF, ARF */
|
|
|
|
signed char ref_lf_deltas[MAX_REF_LF_DELTAS];
|
2013-06-07 09:55:31 +04:00
|
|
|
/* 0 = ZERO_MV, MV */
|
2013-04-11 23:16:35 +04:00
|
|
|
signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS];
|
2013-06-07 09:55:31 +04:00
|
|
|
/* 0 = ZERO_MV, MV */
|
2013-04-11 23:16:35 +04:00
|
|
|
signed char mode_lf_deltas[MAX_MODE_LF_DELTAS];
|
2012-07-14 02:21:29 +04:00
|
|
|
|
|
|
|
/* Distance of MB away from frame edges */
|
|
|
|
int mb_to_left_edge;
|
|
|
|
int mb_to_right_edge;
|
|
|
|
int mb_to_top_edge;
|
|
|
|
int mb_to_bottom_edge;
|
|
|
|
|
|
|
|
unsigned int frames_since_golden;
|
|
|
|
unsigned int frames_till_alt_ref_frame;
|
2012-11-25 07:33:58 +04:00
|
|
|
|
2013-02-12 03:58:22 +04:00
|
|
|
int lossless;
|
2012-11-25 07:33:58 +04:00
|
|
|
/* Inverse transform function pointers. */
|
2013-05-20 21:03:17 +04:00
|
|
|
void (*inv_txm4x4_1_add)(int16_t *input, uint8_t *dest, int stride);
|
|
|
|
void (*inv_txm4x4_add)(int16_t *input, uint8_t *dest, int stride);
|
2013-04-23 01:53:07 +04:00
|
|
|
void (*itxm_add)(int16_t *input, uint8_t *dest, int stride, int eob);
|
2012-11-25 07:33:58 +04:00
|
|
|
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 04:59:03 +04:00
|
|
|
struct subpix_fn_table subpix;
|
2012-11-25 07:33:58 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
int allow_high_precision_mv;
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2012-07-14 02:21:29 +04:00
|
|
|
int corrupted;
|
2010-12-16 18:46:31 +03:00
|
|
|
|
2013-05-01 03:13:20 +04:00
|
|
|
int sb_index; // index of 32x32 block inside the 64x64 block
|
|
|
|
int mb_index; // index of 16x16 block inside the 32x32 block
|
|
|
|
int b_index; // index of 8x8 block inside the 16x16 block
|
2013-05-12 00:24:03 +04:00
|
|
|
int ab_index; // index of 4x4 block inside the 8x8 block
|
2012-06-25 23:26:09 +04:00
|
|
|
int q_index;
|
|
|
|
|
2010-05-18 19:58:33 +04:00
|
|
|
} MACROBLOCKD;
|
|
|
|
|
2013-05-12 00:24:03 +04:00
|
|
|
static int *get_sb_index(MACROBLOCKD *xd, BLOCK_SIZE_TYPE subsize) {
|
|
|
|
switch (subsize) {
|
2013-05-16 09:28:36 +04:00
|
|
|
case BLOCK_SIZE_SB64X64:
|
2013-05-12 00:24:03 +04:00
|
|
|
case BLOCK_SIZE_SB64X32:
|
|
|
|
case BLOCK_SIZE_SB32X64:
|
|
|
|
case BLOCK_SIZE_SB32X32:
|
|
|
|
return &xd->sb_index;
|
|
|
|
case BLOCK_SIZE_SB32X16:
|
|
|
|
case BLOCK_SIZE_SB16X32:
|
|
|
|
case BLOCK_SIZE_MB16X16:
|
|
|
|
return &xd->mb_index;
|
|
|
|
case BLOCK_SIZE_SB16X8:
|
|
|
|
case BLOCK_SIZE_SB8X16:
|
|
|
|
case BLOCK_SIZE_SB8X8:
|
|
|
|
return &xd->b_index;
|
|
|
|
case BLOCK_SIZE_SB8X4:
|
|
|
|
case BLOCK_SIZE_SB4X8:
|
|
|
|
case BLOCK_SIZE_AB4X4:
|
|
|
|
return &xd->ab_index;
|
|
|
|
default:
|
|
|
|
assert(0);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-23 21:12:18 +04:00
|
|
|
static INLINE void update_partition_context(MACROBLOCKD *xd,
|
|
|
|
BLOCK_SIZE_TYPE sb_type,
|
|
|
|
BLOCK_SIZE_TYPE sb_size) {
|
2013-05-16 09:28:36 +04:00
|
|
|
int bsl = b_width_log2(sb_size), bs = (1 << bsl) / 2;
|
|
|
|
int bwl = b_width_log2(sb_type);
|
|
|
|
int bhl = b_height_log2(sb_type);
|
|
|
|
int boffset = b_width_log2(BLOCK_SIZE_SB64X64) - bsl;
|
2013-04-23 21:12:18 +04:00
|
|
|
int i;
|
2013-05-11 04:06:37 +04:00
|
|
|
|
2013-04-23 21:12:18 +04:00
|
|
|
// update the partition context at the end notes. set partition bits
|
|
|
|
// of block sizes larger than the current one to be one, and partition
|
|
|
|
// bits of smaller block sizes to be zero.
|
|
|
|
if ((bwl == bsl) && (bhl == bsl)) {
|
|
|
|
for (i = 0; i < bs; i++)
|
|
|
|
xd->left_seg_context[i] = ~(0xf << boffset);
|
|
|
|
for (i = 0; i < bs; i++)
|
|
|
|
xd->above_seg_context[i] = ~(0xf << boffset);
|
|
|
|
} else if ((bwl == bsl) && (bhl < bsl)) {
|
|
|
|
for (i = 0; i < bs; i++)
|
|
|
|
xd->left_seg_context[i] = ~(0xe << boffset);
|
|
|
|
for (i = 0; i < bs; i++)
|
|
|
|
xd->above_seg_context[i] = ~(0xf << boffset);
|
|
|
|
} else if ((bwl < bsl) && (bhl == bsl)) {
|
|
|
|
for (i = 0; i < bs; i++)
|
|
|
|
xd->left_seg_context[i] = ~(0xf << boffset);
|
|
|
|
for (i = 0; i < bs; i++)
|
|
|
|
xd->above_seg_context[i] = ~(0xe << boffset);
|
|
|
|
} else if ((bwl < bsl) && (bhl < bsl)) {
|
|
|
|
for (i = 0; i < bs; i++)
|
|
|
|
xd->left_seg_context[i] = ~(0xe << boffset);
|
|
|
|
for (i = 0; i < bs; i++)
|
|
|
|
xd->above_seg_context[i] = ~(0xe << boffset);
|
|
|
|
} else {
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE int partition_plane_context(MACROBLOCKD *xd,
|
|
|
|
BLOCK_SIZE_TYPE sb_type) {
|
2013-05-08 02:36:30 +04:00
|
|
|
int bsl = mi_width_log2(sb_type), bs = 1 << bsl;
|
2013-04-23 21:12:18 +04:00
|
|
|
int above = 0, left = 0, i;
|
2013-05-01 03:13:20 +04:00
|
|
|
int boffset = mi_width_log2(BLOCK_SIZE_SB64X64) - bsl;
|
2013-04-23 21:12:18 +04:00
|
|
|
|
2013-04-26 22:57:17 +04:00
|
|
|
assert(mi_width_log2(sb_type) == mi_height_log2(sb_type));
|
2013-04-23 21:12:18 +04:00
|
|
|
assert(bsl >= 0);
|
|
|
|
assert(boffset >= 0);
|
|
|
|
|
|
|
|
for (i = 0; i < bs; i++)
|
|
|
|
above |= (xd->above_seg_context[i] & (1 << boffset));
|
|
|
|
for (i = 0; i < bs; i++)
|
|
|
|
left |= (xd->left_seg_context[i] & (1 << boffset));
|
|
|
|
|
|
|
|
above = (above > 0);
|
|
|
|
left = (left > 0);
|
|
|
|
|
2013-05-11 04:06:37 +04:00
|
|
|
return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
|
2013-04-23 21:12:18 +04:00
|
|
|
}
|
|
|
|
|
2013-05-01 20:43:59 +04:00
|
|
|
static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
|
|
|
|
PARTITION_TYPE partition) {
|
|
|
|
BLOCK_SIZE_TYPE subsize;
|
|
|
|
switch (partition) {
|
|
|
|
case PARTITION_NONE:
|
|
|
|
subsize = bsize;
|
|
|
|
break;
|
|
|
|
case PARTITION_HORZ:
|
|
|
|
if (bsize == BLOCK_SIZE_SB64X64)
|
|
|
|
subsize = BLOCK_SIZE_SB64X32;
|
|
|
|
else if (bsize == BLOCK_SIZE_SB32X32)
|
|
|
|
subsize = BLOCK_SIZE_SB32X16;
|
|
|
|
else if (bsize == BLOCK_SIZE_MB16X16)
|
|
|
|
subsize = BLOCK_SIZE_SB16X8;
|
2013-05-11 04:06:37 +04:00
|
|
|
else if (bsize == BLOCK_SIZE_SB8X8)
|
|
|
|
subsize = BLOCK_SIZE_SB8X4;
|
2013-05-01 20:43:59 +04:00
|
|
|
else
|
|
|
|
assert(0);
|
|
|
|
break;
|
|
|
|
case PARTITION_VERT:
|
|
|
|
if (bsize == BLOCK_SIZE_SB64X64)
|
|
|
|
subsize = BLOCK_SIZE_SB32X64;
|
|
|
|
else if (bsize == BLOCK_SIZE_SB32X32)
|
|
|
|
subsize = BLOCK_SIZE_SB16X32;
|
|
|
|
else if (bsize == BLOCK_SIZE_MB16X16)
|
|
|
|
subsize = BLOCK_SIZE_SB8X16;
|
2013-05-11 04:06:37 +04:00
|
|
|
else if (bsize == BLOCK_SIZE_SB8X8)
|
|
|
|
subsize = BLOCK_SIZE_SB4X8;
|
2013-05-01 20:43:59 +04:00
|
|
|
else
|
|
|
|
assert(0);
|
|
|
|
break;
|
|
|
|
case PARTITION_SPLIT:
|
|
|
|
if (bsize == BLOCK_SIZE_SB64X64)
|
|
|
|
subsize = BLOCK_SIZE_SB32X32;
|
|
|
|
else if (bsize == BLOCK_SIZE_SB32X32)
|
|
|
|
subsize = BLOCK_SIZE_MB16X16;
|
|
|
|
else if (bsize == BLOCK_SIZE_MB16X16)
|
|
|
|
subsize = BLOCK_SIZE_SB8X8;
|
2013-05-11 04:06:37 +04:00
|
|
|
else if (bsize == BLOCK_SIZE_SB8X8)
|
|
|
|
subsize = BLOCK_SIZE_AB4X4;
|
2013-05-01 20:43:59 +04:00
|
|
|
else
|
|
|
|
assert(0);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
return subsize;
|
|
|
|
}
|
|
|
|
|
2012-08-02 20:07:33 +04:00
|
|
|
// transform mapping
|
2013-05-22 08:28:42 +04:00
|
|
|
static TX_TYPE txfm_map(MB_PREDICTION_MODE bmode) {
|
2012-08-02 20:07:33 +04:00
|
|
|
switch (bmode) {
|
2013-05-22 08:28:42 +04:00
|
|
|
case TM_PRED :
|
|
|
|
case D135_PRED :
|
2013-02-27 23:17:38 +04:00
|
|
|
return ADST_ADST;
|
2012-08-02 20:07:33 +04:00
|
|
|
|
2013-05-22 08:28:42 +04:00
|
|
|
case V_PRED :
|
|
|
|
case D117_PRED :
|
|
|
|
case D63_PRED:
|
2013-02-27 23:17:38 +04:00
|
|
|
return ADST_DCT;
|
2012-08-02 20:07:33 +04:00
|
|
|
|
2013-05-22 08:28:42 +04:00
|
|
|
case H_PRED :
|
|
|
|
case D153_PRED :
|
|
|
|
case D27_PRED :
|
2013-02-27 23:17:38 +04:00
|
|
|
return DCT_ADST;
|
2012-08-02 20:07:33 +04:00
|
|
|
|
2013-02-27 23:17:38 +04:00
|
|
|
default:
|
|
|
|
return DCT_DCT;
|
2012-08-02 20:07:33 +04:00
|
|
|
}
|
2012-10-16 03:41:41 +04:00
|
|
|
}
|
|
|
|
|
2013-03-06 03:18:06 +04:00
|
|
|
static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, int ib) {
|
2013-05-30 22:27:40 +04:00
|
|
|
TX_TYPE tx_type;
|
|
|
|
MODE_INFO *mi = xd->mode_info_context;
|
|
|
|
MB_MODE_INFO *const mbmi = &mi->mbmi;
|
2013-06-07 00:44:34 +04:00
|
|
|
if (xd->lossless || mbmi->ref_frame[0] != INTRA_FRAME)
|
2013-02-12 09:14:46 +04:00
|
|
|
return DCT_DCT;
|
2013-05-30 22:27:40 +04:00
|
|
|
if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
|
|
|
|
tx_type = txfm_map(mi->bmi[ib].as_mode.first);
|
|
|
|
} else {
|
|
|
|
assert(mbmi->mode <= TM_PRED);
|
|
|
|
tx_type = txfm_map(mbmi->mode);
|
2012-10-16 03:41:41 +04:00
|
|
|
}
|
|
|
|
return tx_type;
|
|
|
|
}
|
|
|
|
|
2013-03-06 03:18:06 +04:00
|
|
|
static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, int ib) {
|
2012-10-16 03:41:41 +04:00
|
|
|
TX_TYPE tx_type = DCT_DCT;
|
2013-05-14 21:35:11 +04:00
|
|
|
if (xd->mode_info_context->mbmi.mode <= TM_PRED) {
|
2013-05-25 03:13:54 +04:00
|
|
|
tx_type = txfm_map(xd->mode_info_context->mbmi.mode);
|
2012-10-16 03:41:41 +04:00
|
|
|
}
|
|
|
|
return tx_type;
|
2012-08-02 20:07:33 +04:00
|
|
|
}
|
2012-09-22 01:20:15 +04:00
|
|
|
|
2013-03-06 03:18:06 +04:00
|
|
|
static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, int ib) {
|
2012-10-16 03:41:41 +04:00
|
|
|
TX_TYPE tx_type = DCT_DCT;
|
2013-05-14 21:35:11 +04:00
|
|
|
if (xd->mode_info_context->mbmi.mode <= TM_PRED) {
|
2013-05-25 03:13:54 +04:00
|
|
|
tx_type = txfm_map(xd->mode_info_context->mbmi.mode);
|
2012-10-16 03:41:41 +04:00
|
|
|
}
|
|
|
|
return tx_type;
|
|
|
|
}
|
|
|
|
|
2013-05-07 02:52:06 +04:00
|
|
|
void vp9_setup_block_dptrs(MACROBLOCKD *xd,
|
|
|
|
int subsampling_x, int subsampling_y);
|
2010-05-18 19:58:33 +04:00
|
|
|
|
2013-06-10 17:48:58 +04:00
|
|
|
static TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) {
|
2013-04-02 05:23:04 +04:00
|
|
|
const TX_SIZE size = mbmi->txfm_size;
|
|
|
|
|
|
|
|
switch (mbmi->sb_type) {
|
|
|
|
case BLOCK_SIZE_SB64X64:
|
|
|
|
return size;
|
2013-04-10 08:28:27 +04:00
|
|
|
case BLOCK_SIZE_SB64X32:
|
|
|
|
case BLOCK_SIZE_SB32X64:
|
2013-04-02 05:23:04 +04:00
|
|
|
case BLOCK_SIZE_SB32X32:
|
|
|
|
if (size == TX_32X32)
|
|
|
|
return TX_16X16;
|
|
|
|
else
|
|
|
|
return size;
|
2013-05-01 03:13:20 +04:00
|
|
|
case BLOCK_SIZE_SB32X16:
|
|
|
|
case BLOCK_SIZE_SB16X32:
|
|
|
|
case BLOCK_SIZE_MB16X16:
|
|
|
|
if (size == TX_16X16)
|
|
|
|
return TX_8X8;
|
|
|
|
else
|
|
|
|
return size;
|
|
|
|
default:
|
|
|
|
return TX_4X4;
|
2013-02-20 22:16:24 +04:00
|
|
|
}
|
2013-04-02 05:23:04 +04:00
|
|
|
|
|
|
|
return size;
|
2013-02-20 22:16:24 +04:00
|
|
|
}
|
2013-03-27 02:23:30 +04:00
|
|
|
|
2013-04-03 01:50:40 +04:00
|
|
|
struct plane_block_idx {
|
|
|
|
int plane;
|
|
|
|
int block;
|
|
|
|
};
|
|
|
|
|
|
|
|
// TODO(jkoleszar): returning a struct so it can be used in a const context,
|
|
|
|
// expect to refactor this further later.
|
2013-04-04 23:03:27 +04:00
|
|
|
static INLINE struct plane_block_idx plane_block_idx(int y_blocks,
|
2013-03-28 21:42:23 +04:00
|
|
|
int b_idx) {
|
2013-04-04 23:03:27 +04:00
|
|
|
const int v_offset = y_blocks * 5 / 4;
|
2013-04-03 01:50:40 +04:00
|
|
|
struct plane_block_idx res;
|
|
|
|
|
2013-04-04 23:03:27 +04:00
|
|
|
if (b_idx < y_blocks) {
|
2013-04-03 01:50:40 +04:00
|
|
|
res.plane = 0;
|
|
|
|
res.block = b_idx;
|
|
|
|
} else if (b_idx < v_offset) {
|
|
|
|
res.plane = 1;
|
2013-04-04 23:03:27 +04:00
|
|
|
res.block = b_idx - y_blocks;
|
2013-04-03 01:50:40 +04:00
|
|
|
} else {
|
2013-04-04 23:03:27 +04:00
|
|
|
assert(b_idx < y_blocks * 3 / 2);
|
2013-04-03 01:50:40 +04:00
|
|
|
res.plane = 2;
|
|
|
|
res.block = b_idx - v_offset;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2013-05-31 23:30:32 +04:00
|
|
|
static INLINE int plane_block_width(BLOCK_SIZE_TYPE bsize,
|
|
|
|
const struct macroblockd_plane* plane) {
|
|
|
|
return 4 << (b_width_log2(bsize) - plane->subsampling_x);
|
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE int plane_block_height(BLOCK_SIZE_TYPE bsize,
|
|
|
|
const struct macroblockd_plane* plane) {
|
|
|
|
return 4 << (b_height_log2(bsize) - plane->subsampling_y);
|
|
|
|
}
|
|
|
|
|
2013-04-09 21:15:10 +04:00
|
|
|
typedef void (*foreach_transformed_block_visitor)(int plane, int block,
|
2013-04-13 01:12:05 +04:00
|
|
|
BLOCK_SIZE_TYPE bsize,
|
2013-04-09 21:15:10 +04:00
|
|
|
int ss_txfrm_size,
|
|
|
|
void *arg);
|
2013-06-06 08:14:14 +04:00
|
|
|
|
2013-04-09 21:15:10 +04:00
|
|
|
static INLINE void foreach_transformed_block_in_plane(
|
2013-04-13 01:12:05 +04:00
|
|
|
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane,
|
2013-05-01 03:13:20 +04:00
|
|
|
foreach_transformed_block_visitor visit, void *arg) {
|
2013-04-13 01:12:05 +04:00
|
|
|
const int bw = b_width_log2(bsize), bh = b_height_log2(bsize);
|
|
|
|
|
2013-04-09 21:15:10 +04:00
|
|
|
// block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
|
|
|
|
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
|
2013-05-08 01:44:12 +04:00
|
|
|
// transform size varies per plane, look it up in a common way.
|
2013-06-10 17:48:58 +04:00
|
|
|
const MB_MODE_INFO* mbmi = &xd->mode_info_context->mbmi;
|
|
|
|
const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi)
|
|
|
|
: mbmi->txfm_size;
|
2013-04-13 01:12:05 +04:00
|
|
|
const int block_size_b = bw + bh;
|
2013-04-09 21:15:10 +04:00
|
|
|
const int txfrm_size_b = tx_size * 2;
|
|
|
|
|
|
|
|
// subsampled size of the block
|
2013-06-06 17:07:09 +04:00
|
|
|
const int ss_sum = xd->plane[plane].subsampling_x
|
|
|
|
+ xd->plane[plane].subsampling_y;
|
2013-04-09 21:15:10 +04:00
|
|
|
const int ss_block_size = block_size_b - ss_sum;
|
|
|
|
|
2013-05-08 01:44:12 +04:00
|
|
|
const int step = 1 << txfrm_size_b;
|
2013-04-09 21:15:10 +04:00
|
|
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
assert(txfrm_size_b <= block_size_b);
|
2013-05-08 01:44:12 +04:00
|
|
|
assert(txfrm_size_b <= ss_block_size);
|
2013-06-06 17:07:09 +04:00
|
|
|
|
|
|
|
// If mb_to_right_edge is < 0 we are in a situation in which
|
|
|
|
// the current block size extends into the UMV and we won't
|
|
|
|
// visit the sub blocks that are wholly within the UMV.
|
|
|
|
if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) {
|
|
|
|
int r, c;
|
|
|
|
const int sw = bw - xd->plane[plane].subsampling_x;
|
|
|
|
const int sh = bh - xd->plane[plane].subsampling_y;
|
|
|
|
int max_blocks_wide = 1 << sw;
|
|
|
|
int max_blocks_high = 1 << sh;
|
|
|
|
|
|
|
|
// xd->mb_to_right_edge is in units of pixels * 8. This converts
|
|
|
|
// it to 4x4 block sizes.
|
|
|
|
if (xd->mb_to_right_edge < 0)
|
|
|
|
max_blocks_wide +=
|
|
|
|
+ (xd->mb_to_right_edge >> (5 + xd->plane[plane].subsampling_x));
|
|
|
|
|
|
|
|
if (xd->mb_to_bottom_edge < 0)
|
|
|
|
max_blocks_high +=
|
|
|
|
+ (xd->mb_to_bottom_edge >> (5 + xd->plane[plane].subsampling_y));
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
// Unlike the normal case - in here we have to keep track of the
|
|
|
|
// row and column of the blocks we use so that we know if we are in
|
|
|
|
// the unrestricted motion border..
|
|
|
|
for (r = 0; r < (1 << sh); r += (1 << tx_size)) {
|
|
|
|
for (c = 0; c < (1 << sw); c += (1 << tx_size)) {
|
|
|
|
if (r < max_blocks_high && c < max_blocks_wide)
|
|
|
|
visit(plane, i, bsize, txfrm_size_b, arg);
|
|
|
|
i += step;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < (1 << ss_block_size); i += step) {
|
|
|
|
visit(plane, i, bsize, txfrm_size_b, arg);
|
|
|
|
}
|
2013-04-09 21:15:10 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE void foreach_transformed_block(
|
2013-04-13 01:12:05 +04:00
|
|
|
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
|
2013-04-09 21:15:10 +04:00
|
|
|
foreach_transformed_block_visitor visit, void *arg) {
|
|
|
|
int plane;
|
|
|
|
|
|
|
|
for (plane = 0; plane < MAX_MB_PLANE; plane++) {
|
2013-05-01 03:13:20 +04:00
|
|
|
foreach_transformed_block_in_plane(xd, bsize, plane,
|
2013-04-09 21:15:10 +04:00
|
|
|
visit, arg);
|
|
|
|
}
|
|
|
|
}
|
2013-04-03 01:50:40 +04:00
|
|
|
|
2013-04-11 22:14:31 +04:00
|
|
|
static INLINE void foreach_transformed_block_uv(
|
2013-04-13 01:12:05 +04:00
|
|
|
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
|
2013-04-11 22:14:31 +04:00
|
|
|
foreach_transformed_block_visitor visit, void *arg) {
|
|
|
|
int plane;
|
|
|
|
|
|
|
|
for (plane = 1; plane < MAX_MB_PLANE; plane++) {
|
2013-05-01 03:13:20 +04:00
|
|
|
foreach_transformed_block_in_plane(xd, bsize, plane,
|
2013-04-11 22:14:31 +04:00
|
|
|
visit, arg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
make buid_inter_predictors block size agnostic (luma)
This commit converts the luma versions of vp9_build_inter_predictors_sb
to use a common function. Update the convolution functions to support
block sizes larger than 16x16, and add a foreach_predicted_block walker.
Next step will be to calculate the UV motion vector and implement SBUV,
then fold in vp9_build_inter16x16_predictors_mb and SPLITMV.
At the 16x16, 32x32, and 64x64 levels implemented in this commit, each
plane is predicted with only a single call to vp9_build_inter_predictor.
This is not yet called for SPLITMV. If the notion of SPLITMV/I8X8/I4X4
goes away, then the prediction block walker can go away, since we'll
always predict the whole bsize in a single step. Implemented using a
block walker at this stage for SPLITMV, as a 4x4 "prediction block size"
within the BLOCK_SIZE_MB16X16 macroblock. It would also support other
rectangular sizes too, if the blocks smaller than 16x16 remain
implemented as a SPLITMV-like thing. Just using 4x4 for now.
There's also a potential to combine with the foreach_transformed_block
walker if the logic for calculating the size of the subsampled
transform is made more straightforward, perhaps as a consequence of
supporing smaller macroblocks than 16x16. Will watch what happens there.
Change-Id: Iddd9973398542216601b630c628b9b7fdee33fe2
2013-04-13 04:19:57 +04:00
|
|
|
// TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could
|
|
|
|
// calculate the subsampled BLOCK_SIZE_TYPE, but that type isn't defined for
|
|
|
|
// sizes smaller than 16x16 yet.
|
|
|
|
typedef void (*foreach_predicted_block_visitor)(int plane, int block,
|
|
|
|
BLOCK_SIZE_TYPE bsize,
|
|
|
|
int pred_w, int pred_h,
|
|
|
|
void *arg);
|
|
|
|
static INLINE void foreach_predicted_block_in_plane(
|
|
|
|
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane,
|
|
|
|
foreach_predicted_block_visitor visit, void *arg) {
|
2013-04-18 00:41:18 +04:00
|
|
|
int i, x, y;
|
make buid_inter_predictors block size agnostic (luma)
This commit converts the luma versions of vp9_build_inter_predictors_sb
to use a common function. Update the convolution functions to support
block sizes larger than 16x16, and add a foreach_predicted_block walker.
Next step will be to calculate the UV motion vector and implement SBUV,
then fold in vp9_build_inter16x16_predictors_mb and SPLITMV.
At the 16x16, 32x32, and 64x64 levels implemented in this commit, each
plane is predicted with only a single call to vp9_build_inter_predictor.
This is not yet called for SPLITMV. If the notion of SPLITMV/I8X8/I4X4
goes away, then the prediction block walker can go away, since we'll
always predict the whole bsize in a single step. Implemented using a
block walker at this stage for SPLITMV, as a 4x4 "prediction block size"
within the BLOCK_SIZE_MB16X16 macroblock. It would also support other
rectangular sizes too, if the blocks smaller than 16x16 remain
implemented as a SPLITMV-like thing. Just using 4x4 for now.
There's also a potential to combine with the foreach_transformed_block
walker if the logic for calculating the size of the subsampled
transform is made more straightforward, perhaps as a consequence of
supporing smaller macroblocks than 16x16. Will watch what happens there.
Change-Id: Iddd9973398542216601b630c628b9b7fdee33fe2
2013-04-13 04:19:57 +04:00
|
|
|
|
|
|
|
// block sizes in number of 4x4 blocks log 2 ("*_b")
|
|
|
|
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
|
|
|
|
// subsampled size of the block
|
2013-05-31 23:30:32 +04:00
|
|
|
const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
|
|
|
|
const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
|
make buid_inter_predictors block size agnostic (luma)
This commit converts the luma versions of vp9_build_inter_predictors_sb
to use a common function. Update the convolution functions to support
block sizes larger than 16x16, and add a foreach_predicted_block walker.
Next step will be to calculate the UV motion vector and implement SBUV,
then fold in vp9_build_inter16x16_predictors_mb and SPLITMV.
At the 16x16, 32x32, and 64x64 levels implemented in this commit, each
plane is predicted with only a single call to vp9_build_inter_predictor.
This is not yet called for SPLITMV. If the notion of SPLITMV/I8X8/I4X4
goes away, then the prediction block walker can go away, since we'll
always predict the whole bsize in a single step. Implemented using a
block walker at this stage for SPLITMV, as a 4x4 "prediction block size"
within the BLOCK_SIZE_MB16X16 macroblock. It would also support other
rectangular sizes too, if the blocks smaller than 16x16 remain
implemented as a SPLITMV-like thing. Just using 4x4 for now.
There's also a potential to combine with the foreach_transformed_block
walker if the logic for calculating the size of the subsampled
transform is made more straightforward, perhaps as a consequence of
supporing smaller macroblocks than 16x16. Will watch what happens there.
Change-Id: Iddd9973398542216601b630c628b9b7fdee33fe2
2013-04-13 04:19:57 +04:00
|
|
|
|
|
|
|
// size of the predictor to use.
|
2013-04-18 00:41:18 +04:00
|
|
|
int pred_w, pred_h;
|
|
|
|
|
2013-05-30 23:49:38 +04:00
|
|
|
if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
|
|
|
|
assert(bsize == BLOCK_SIZE_SB8X8);
|
2013-05-01 03:13:20 +04:00
|
|
|
pred_w = 0;
|
|
|
|
pred_h = 0;
|
2013-04-18 00:41:18 +04:00
|
|
|
} else {
|
2013-05-31 23:30:32 +04:00
|
|
|
pred_w = bwl;
|
|
|
|
pred_h = bhl;
|
2013-04-18 00:41:18 +04:00
|
|
|
}
|
2013-05-31 23:30:32 +04:00
|
|
|
assert(pred_w <= bwl);
|
|
|
|
assert(pred_h <= bhl);
|
2013-04-18 00:41:18 +04:00
|
|
|
|
|
|
|
// visit each subblock in raster order
|
|
|
|
i = 0;
|
2013-05-31 23:30:32 +04:00
|
|
|
for (y = 0; y < 1 << bhl; y += 1 << pred_h) {
|
|
|
|
for (x = 0; x < 1 << bwl; x += 1 << pred_w) {
|
2013-04-18 00:41:18 +04:00
|
|
|
visit(plane, i, bsize, pred_w, pred_h, arg);
|
|
|
|
i += 1 << pred_w;
|
|
|
|
}
|
2013-05-31 23:30:32 +04:00
|
|
|
i += (1 << (bwl + pred_h)) - (1 << bwl);
|
make buid_inter_predictors block size agnostic (luma)
This commit converts the luma versions of vp9_build_inter_predictors_sb
to use a common function. Update the convolution functions to support
block sizes larger than 16x16, and add a foreach_predicted_block walker.
Next step will be to calculate the UV motion vector and implement SBUV,
then fold in vp9_build_inter16x16_predictors_mb and SPLITMV.
At the 16x16, 32x32, and 64x64 levels implemented in this commit, each
plane is predicted with only a single call to vp9_build_inter_predictor.
This is not yet called for SPLITMV. If the notion of SPLITMV/I8X8/I4X4
goes away, then the prediction block walker can go away, since we'll
always predict the whole bsize in a single step. Implemented using a
block walker at this stage for SPLITMV, as a 4x4 "prediction block size"
within the BLOCK_SIZE_MB16X16 macroblock. It would also support other
rectangular sizes too, if the blocks smaller than 16x16 remain
implemented as a SPLITMV-like thing. Just using 4x4 for now.
There's also a potential to combine with the foreach_transformed_block
walker if the logic for calculating the size of the subsampled
transform is made more straightforward, perhaps as a consequence of
supporing smaller macroblocks than 16x16. Will watch what happens there.
Change-Id: Iddd9973398542216601b630c628b9b7fdee33fe2
2013-04-13 04:19:57 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
static INLINE void foreach_predicted_block(
|
|
|
|
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
|
|
|
|
foreach_predicted_block_visitor visit, void *arg) {
|
|
|
|
int plane;
|
|
|
|
|
|
|
|
for (plane = 0; plane < MAX_MB_PLANE; plane++) {
|
|
|
|
foreach_predicted_block_in_plane(xd, bsize, plane, visit, arg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static INLINE void foreach_predicted_block_uv(
|
|
|
|
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
|
|
|
|
foreach_predicted_block_visitor visit, void *arg) {
|
|
|
|
int plane;
|
|
|
|
|
|
|
|
for (plane = 1; plane < MAX_MB_PLANE; plane++) {
|
|
|
|
foreach_predicted_block_in_plane(xd, bsize, plane, visit, arg);
|
|
|
|
}
|
|
|
|
}
|
2013-04-23 19:26:10 +04:00
|
|
|
static int raster_block_offset(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize,
|
2013-04-24 03:22:47 +04:00
|
|
|
int plane, int block, int stride) {
|
2013-04-23 19:26:10 +04:00
|
|
|
const int bw = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
|
|
|
|
const int y = 4 * (block >> bw), x = 4 * (block & ((1 << bw) - 1));
|
|
|
|
return y * stride + x;
|
|
|
|
}
|
|
|
|
static int16_t* raster_block_offset_int16(MACROBLOCKD *xd,
|
|
|
|
BLOCK_SIZE_TYPE bsize,
|
|
|
|
int plane, int block, int16_t *base) {
|
2013-05-31 23:30:32 +04:00
|
|
|
const int stride = plane_block_width(bsize, &xd->plane[plane]);
|
2013-04-24 03:22:47 +04:00
|
|
|
return base + raster_block_offset(xd, bsize, plane, block, stride);
|
|
|
|
}
|
|
|
|
static uint8_t* raster_block_offset_uint8(MACROBLOCKD *xd,
|
|
|
|
BLOCK_SIZE_TYPE bsize,
|
|
|
|
int plane, int block,
|
|
|
|
uint8_t *base, int stride) {
|
|
|
|
return base + raster_block_offset(xd, bsize, plane, block, stride);
|
2013-04-23 19:26:10 +04:00
|
|
|
}
|
make buid_inter_predictors block size agnostic (luma)
This commit converts the luma versions of vp9_build_inter_predictors_sb
to use a common function. Update the convolution functions to support
block sizes larger than 16x16, and add a foreach_predicted_block walker.
Next step will be to calculate the UV motion vector and implement SBUV,
then fold in vp9_build_inter16x16_predictors_mb and SPLITMV.
At the 16x16, 32x32, and 64x64 levels implemented in this commit, each
plane is predicted with only a single call to vp9_build_inter_predictor.
This is not yet called for SPLITMV. If the notion of SPLITMV/I8X8/I4X4
goes away, then the prediction block walker can go away, since we'll
always predict the whole bsize in a single step. Implemented using a
block walker at this stage for SPLITMV, as a 4x4 "prediction block size"
within the BLOCK_SIZE_MB16X16 macroblock. It would also support other
rectangular sizes too, if the blocks smaller than 16x16 remain
implemented as a SPLITMV-like thing. Just using 4x4 for now.
There's also a potential to combine with the foreach_transformed_block
walker if the logic for calculating the size of the subsampled
transform is made more straightforward, perhaps as a consequence of
supporing smaller macroblocks than 16x16. Will watch what happens there.
Change-Id: Iddd9973398542216601b630c628b9b7fdee33fe2
2013-04-13 04:19:57 +04:00
|
|
|
|
2013-04-30 20:54:51 +04:00
|
|
|
static int txfrm_block_to_raster_block(MACROBLOCKD *xd,
|
|
|
|
BLOCK_SIZE_TYPE bsize,
|
|
|
|
int plane, int block,
|
|
|
|
int ss_txfrm_size) {
|
|
|
|
const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
|
|
|
|
const int txwl = ss_txfrm_size / 2;
|
|
|
|
const int tx_cols_lg2 = bwl - txwl;
|
|
|
|
const int tx_cols = 1 << tx_cols_lg2;
|
|
|
|
const int raster_mb = block >> ss_txfrm_size;
|
|
|
|
const int x = (raster_mb & (tx_cols - 1)) << (txwl);
|
|
|
|
const int y = raster_mb >> tx_cols_lg2 << (txwl);
|
|
|
|
return x + (y << bwl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void txfrm_block_to_raster_xy(MACROBLOCKD *xd,
|
|
|
|
BLOCK_SIZE_TYPE bsize,
|
|
|
|
int plane, int block,
|
|
|
|
int ss_txfrm_size,
|
|
|
|
int *x, int *y) {
|
|
|
|
const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
|
|
|
|
const int txwl = ss_txfrm_size / 2;
|
|
|
|
const int tx_cols_lg2 = bwl - txwl;
|
|
|
|
const int tx_cols = 1 << tx_cols_lg2;
|
|
|
|
const int raster_mb = block >> ss_txfrm_size;
|
|
|
|
*x = (raster_mb & (tx_cols - 1)) << (txwl);
|
|
|
|
*y = raster_mb >> tx_cols_lg2 << (txwl);
|
|
|
|
}
|
2013-06-06 17:07:09 +04:00
|
|
|
|
|
|
|
static void extend_for_intra(MACROBLOCKD* const xd, int plane, int block,
|
|
|
|
BLOCK_SIZE_TYPE bsize, int ss_txfrm_size) {
|
|
|
|
const int bw = plane_block_width(bsize, &xd->plane[plane]);
|
|
|
|
const int bh = plane_block_height(bsize, &xd->plane[plane]);
|
|
|
|
int x, y;
|
|
|
|
txfrm_block_to_raster_xy(xd, bsize, plane, block, ss_txfrm_size, &x, &y);
|
|
|
|
x = x * 4 - 1;
|
|
|
|
y = y * 4 - 1;
|
|
|
|
// Copy a pixel into the umv if we are in a situation where the block size
|
|
|
|
// extends into the UMV.
|
|
|
|
// TODO(JBB): Should be able to do the full extend in place so we don't have
|
|
|
|
// to do this multiple times.
|
|
|
|
if (xd->mb_to_right_edge < 0) {
|
|
|
|
int umv_border_start = bw
|
|
|
|
+ (xd->mb_to_right_edge >> (3 + xd->plane[plane].subsampling_x));
|
|
|
|
|
|
|
|
if (x + bw > umv_border_start)
|
|
|
|
vpx_memset(
|
|
|
|
xd->plane[plane].dst.buf + y * xd->plane[plane].dst.stride
|
|
|
|
+ umv_border_start,
|
|
|
|
*(xd->plane[plane].dst.buf + y * xd->plane[plane].dst.stride
|
|
|
|
+ umv_border_start - 1),
|
|
|
|
bw);
|
|
|
|
}
|
|
|
|
if (xd->mb_to_bottom_edge < 0) {
|
|
|
|
int umv_border_start = bh
|
|
|
|
+ (xd->mb_to_bottom_edge >> (3 + xd->plane[plane].subsampling_y));
|
|
|
|
int i;
|
|
|
|
uint8_t c = *(xd->plane[plane].dst.buf
|
|
|
|
+ (umv_border_start - 1) * xd->plane[plane].dst.stride + x);
|
|
|
|
|
|
|
|
uint8_t *d = xd->plane[plane].dst.buf
|
|
|
|
+ umv_border_start * xd->plane[plane].dst.stride + x;
|
|
|
|
|
|
|
|
if (y + bh > umv_border_start)
|
|
|
|
for (i = 0; i < bh; i++, d += xd->plane[plane].dst.stride)
|
|
|
|
*d = c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static void set_contexts_on_border(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize,
|
|
|
|
int plane, int ss_tx_size, int eob, int aoff,
|
|
|
|
int loff, ENTROPY_CONTEXT *A,
|
|
|
|
ENTROPY_CONTEXT *L) {
|
|
|
|
const int bw = b_width_log2(bsize), bh = b_height_log2(bsize);
|
|
|
|
const int sw = bw - xd->plane[plane].subsampling_x;
|
|
|
|
const int sh = bh - xd->plane[plane].subsampling_y;
|
|
|
|
int mi_blocks_wide = 1 << sw;
|
|
|
|
int mi_blocks_high = 1 << sh;
|
|
|
|
int tx_size_in_blocks = (1 << ss_tx_size);
|
|
|
|
int above_contexts = tx_size_in_blocks;
|
|
|
|
int left_contexts = tx_size_in_blocks;
|
|
|
|
int pt;
|
|
|
|
|
|
|
|
// xd->mb_to_right_edge is in units of pixels * 8. This converts
|
|
|
|
// it to 4x4 block sizes.
|
|
|
|
if (xd->mb_to_right_edge < 0) {
|
|
|
|
mi_blocks_wide += (xd->mb_to_right_edge
|
|
|
|
>> (5 + xd->plane[plane].subsampling_x));
|
|
|
|
}
|
|
|
|
|
|
|
|
// this code attempts to avoid copying into contexts that are outside
|
|
|
|
// our border. Any blocks that do are set to 0...
|
|
|
|
if (above_contexts + aoff > mi_blocks_wide)
|
|
|
|
above_contexts = mi_blocks_wide - aoff;
|
|
|
|
|
|
|
|
if (xd->mb_to_bottom_edge < 0) {
|
|
|
|
mi_blocks_high += (xd->mb_to_bottom_edge
|
|
|
|
>> (5 + xd->plane[plane].subsampling_y));
|
|
|
|
}
|
|
|
|
if (left_contexts + loff > mi_blocks_high) {
|
|
|
|
left_contexts = mi_blocks_high - loff;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (pt = 0; pt < above_contexts; pt++)
|
|
|
|
A[pt] = eob > 0;
|
|
|
|
for (pt = above_contexts; pt < (1 << ss_tx_size); pt++)
|
|
|
|
A[pt] = 0;
|
|
|
|
for (pt = 0; pt < left_contexts; pt++)
|
|
|
|
L[pt] = eob > 0;
|
|
|
|
for (pt = left_contexts; pt < (1 << ss_tx_size); pt++)
|
|
|
|
L[pt] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-12-19 03:31:19 +04:00
|
|
|
#endif // VP9_COMMON_VP9_BLOCKD_H_
|