Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next

Main pull for 3.19.  I may have another pull in a few days with some
mdp5 bits (and hopefully mdp5 atomic), but I figured there was no need
to hold up what we have already.  Main highlights so far:

1) a4xx gpu support (userspace gallium bits on mesa master)
2) mdp4/hdmi/core bits for atomic helpers.  Still missing mdp5
conversion, main hold up there is current hard-coded mixer setup isn't
clever enough to deal with disabling primary plane while crtc active.
3) various other misc cleanup/fixes/etc..

* 'msm-next' of git://people.freedesktop.org/~robclark/linux: (21 commits)
  drm/msm: a4xx support for msm-drm
  drm/msm: Handle register offset differences between a3xx and a4xx
  drm/msm: small mmap offset cleanups
  drm/msm/mdp4: atomic
  drm/msm/hdmi: atomic
  drm/msm: atomic core bits
  drm/msm: bit of fb error checking
  drm/msm: fb prepare/cleanup
  drm/msm: remove unused compile-test stub
  drm/msm: small fence cleanup
  drm/msm/mdp5: drop attached planes table
  drm/msm/mdp4: drop attached planes table
  drm/msm/mdp4: don't care about fb in crtc
  drm/msm/mdp5: drop private primary ptr
  drm/msm/mdp4: drop private primary ptr
  drm/msm: Fix fbdev for 16- and 24-bit modes.
  drm/msm: Allow exported dma-bufs to be mapped
  drm/msm/hdmi: refactor bind/init
  drm/msm: update generated headers
  drm/msm/adreno: slight init order cleanup
  ...
This commit is contained in:
Dave Airlie 2014-11-17 06:36:53 +10:00
Родитель 967b8e04d7 23bd62fd41
Коммит d0d6c524bf
41 изменённых файлов: 3901 добавлений и 505 удалений

Просмотреть файл

@ -3,6 +3,7 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
select SHMEM

Просмотреть файл

@ -7,6 +7,7 @@ msm-y := \
adreno/adreno_device.o \
adreno/adreno_gpu.o \
adreno/a3xx_gpu.o \
adreno/a4xx_gpu.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@ -30,6 +31,7 @@ msm-y := \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
msm_atomic.o \
msm_drv.o \
msm_fb.o \
msm_gem.o \

Просмотреть файл

@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@ -926,11 +926,11 @@ static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
{
return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
}
#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
@ -1243,13 +1243,13 @@ static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
{
return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
}
#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
{
return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
}
#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
@ -1257,13 +1257,13 @@ static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
{
return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
}
#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
{
return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
}
#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
@ -1271,7 +1271,7 @@ static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
{
return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
}
#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283

Просмотреть файл

@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@ -86,6 +86,14 @@ enum a3xx_vtx_fmt {
VFMT_NORM_USHORT_16_16 = 29,
VFMT_NORM_USHORT_16_16_16 = 30,
VFMT_NORM_USHORT_16_16_16_16 = 31,
VFMT_UINT_32 = 32,
VFMT_UINT_32_32 = 33,
VFMT_UINT_32_32_32 = 34,
VFMT_UINT_32_32_32_32 = 35,
VFMT_INT_32 = 36,
VFMT_INT_32_32 = 37,
VFMT_INT_32_32_32 = 38,
VFMT_INT_32_32_32_32 = 39,
VFMT_UBYTE_8 = 40,
VFMT_UBYTE_8_8 = 41,
VFMT_UBYTE_8_8_8 = 42,
@ -112,7 +120,9 @@ enum a3xx_tex_fmt {
TFMT_NORM_USHORT_565 = 4,
TFMT_NORM_USHORT_5551 = 6,
TFMT_NORM_USHORT_4444 = 7,
TFMT_NORM_USHORT_Z16 = 9,
TFMT_NORM_UINT_X8Z24 = 10,
TFMT_FLOAT_Z32 = 11,
TFMT_NORM_UINT_NV12_UV_TILED = 17,
TFMT_NORM_UINT_NV12_Y_TILED = 19,
TFMT_NORM_UINT_NV12_UV = 21,
@ -121,18 +131,38 @@ enum a3xx_tex_fmt {
TFMT_NORM_UINT_I420_U = 26,
TFMT_NORM_UINT_I420_V = 27,
TFMT_NORM_UINT_2_10_10_10 = 41,
TFMT_FLOAT_9_9_9_E5 = 42,
TFMT_FLOAT_10_11_11 = 43,
TFMT_NORM_UINT_A8 = 44,
TFMT_NORM_UINT_L8_A8 = 47,
TFMT_NORM_UINT_8 = 48,
TFMT_NORM_UINT_8_8 = 49,
TFMT_NORM_UINT_8_8_8 = 50,
TFMT_NORM_UINT_8_8_8_8 = 51,
TFMT_NORM_SINT_8_8 = 53,
TFMT_NORM_SINT_8_8_8_8 = 55,
TFMT_UINT_8_8 = 57,
TFMT_UINT_8_8_8_8 = 59,
TFMT_SINT_8_8 = 61,
TFMT_SINT_8_8_8_8 = 63,
TFMT_FLOAT_16 = 64,
TFMT_FLOAT_16_16 = 65,
TFMT_FLOAT_16_16_16_16 = 67,
TFMT_UINT_16 = 68,
TFMT_UINT_16_16 = 69,
TFMT_UINT_16_16_16_16 = 71,
TFMT_SINT_16 = 72,
TFMT_SINT_16_16 = 73,
TFMT_SINT_16_16_16_16 = 75,
TFMT_FLOAT_32 = 84,
TFMT_FLOAT_32_32 = 85,
TFMT_FLOAT_32_32_32_32 = 87,
TFMT_UINT_32 = 88,
TFMT_UINT_32_32 = 89,
TFMT_UINT_32_32_32_32 = 91,
TFMT_SINT_32 = 92,
TFMT_SINT_32_32 = 93,
TFMT_SINT_32_32_32_32 = 95,
};
enum a3xx_tex_fetchsize {
@ -145,19 +175,34 @@ enum a3xx_tex_fetchsize {
};
enum a3xx_color_fmt {
RB_R5G6B5_UNORM = 0,
RB_R5G5B5A1_UNORM = 1,
RB_R4G4B4A4_UNORM = 3,
RB_R8G8B8_UNORM = 4,
RB_R8G8B8A8_UNORM = 8,
RB_Z16_UNORM = 12,
RB_R8G8B8A8_UINT = 10,
RB_R8G8B8A8_SINT = 11,
RB_R8G8_UNORM = 12,
RB_R8_UINT = 14,
RB_R8_SINT = 15,
RB_R10G10B10A2_UNORM = 16,
RB_A8_UNORM = 20,
RB_R8_UNORM = 21,
RB_R16G16B16A16_FLOAT = 27,
RB_R11G11B10_FLOAT = 28,
RB_R16_SINT = 40,
RB_R16G16_SINT = 41,
RB_R16G16B16A16_SINT = 43,
RB_R16_UINT = 44,
RB_R16G16_UINT = 45,
RB_R16G16B16A16_UINT = 47,
RB_R32G32B32A32_FLOAT = 51,
};
enum a3xx_color_swap {
WZYX = 0,
WXYZ = 1,
ZYXW = 2,
XYZW = 3,
RB_R32_SINT = 52,
RB_R32G32_SINT = 53,
RB_R32G32B32A32_SINT = 55,
RB_R32_UINT = 56,
RB_R32G32_UINT = 57,
RB_R32G32B32A32_UINT = 59,
};
enum a3xx_sp_perfcounter_select {
@ -194,6 +239,11 @@ enum a3xx_rb_blend_opcode {
BLEND_MAX_DST_SRC = 4,
};
enum a3xx_intp_mode {
SMOOTH = 0,
FLAT = 1,
};
enum a3xx_tex_filter {
A3XX_TEX_NEAREST = 0,
A3XX_TEX_LINEAR = 1,
@ -536,6 +586,10 @@ enum a3xx_tex_type {
#define REG_A3XX_CP_MEQ_DATA 0x000001db
#define REG_A3XX_CP_WFI_PEND_CTR 0x000001f5
#define REG_A3XX_RBBM_PM_OVERRIDE2 0x0000039d
#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
#define REG_A3XX_CP_HW_FAULT 0x0000045c
@ -550,6 +604,12 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
#define REG_A3XX_CP_AHB_FAULT 0x0000054d
#define REG_A3XX_SQ_GPR_MANAGEMENT 0x00000d00
#define REG_A3XX_SQ_INST_STORE_MANAGMENT 0x00000d02
#define REG_A3XX_TP0_CHICKEN 0x00000e1e
#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
@ -632,13 +692,13 @@ static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
{
return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
}
#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
{
return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
}
#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
@ -646,7 +706,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
{
return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
return ((((int32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
}
#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
@ -654,7 +714,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
{
return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
}
#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
@ -662,7 +722,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
{
return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
}
#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
@ -673,7 +733,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
{
return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
return ((((int32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
}
#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
@ -863,6 +923,7 @@ static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
}
#define A3XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00004000
#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
@ -1001,6 +1062,7 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
{
return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
}
#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@ -1079,7 +1141,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
#define REG_A3XX_RB_DEPTH_INFO 0x00002102
#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
{
@ -1265,6 +1327,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
{
return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
}
#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
@ -1281,7 +1344,12 @@ static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize
#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000
#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
}
#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
@ -1484,6 +1552,8 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
@ -1537,6 +1607,7 @@ static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
{
return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
}
#define A3XX_VFD_DECODE_INSTR_INT 0x00100000
#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
@ -1604,6 +1675,102 @@ static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003
#define A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT 0
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C0(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C0__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_C1__MASK 0x0000000c
#define A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT 2
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C1(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C1__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_C2__MASK 0x00000030
#define A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT 4
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C2(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C2__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_C3__MASK 0x000000c0
#define A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT 6
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C3(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C3__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_C4__MASK 0x00000300
#define A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT 8
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C4(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C4__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_C5__MASK 0x00000c00
#define A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT 10
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C5(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C5__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_C6__MASK 0x00003000
#define A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT 12
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C6(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C6__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_C7__MASK 0x0000c000
#define A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT 14
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C7(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C7__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_C8__MASK 0x00030000
#define A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT 16
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C8(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C8__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_C9__MASK 0x000c0000
#define A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT 18
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C9(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C9__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_CA__MASK 0x00300000
#define A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT 20
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CA(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CA__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_CB__MASK 0x00c00000
#define A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT 22
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CB(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CB__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_CC__MASK 0x03000000
#define A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT 24
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CC(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CC__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_CD__MASK 0x0c000000
#define A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT 26
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CD(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CD__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_CE__MASK 0x30000000
#define A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT 28
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CE(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CE__MASK;
}
#define A3XX_VPC_VARYING_INTERP_MODE_CF__MASK 0xc0000000
#define A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT 30
static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
{
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK;
}
static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
@ -1928,6 +2095,8 @@ static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
}
#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
#define A3XX_SP_FS_MRT_REG_SINT 0x00000400
#define A3XX_SP_FS_MRT_REG_UINT 0x00000800
static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
@ -1947,6 +2116,8 @@ static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
}
#define REG_A3XX_PA_SC_AA_CONFIG 0x00002301
#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
@ -2297,11 +2468,11 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
{
return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
}
#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
@ -2347,17 +2518,23 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val
#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
#define REG_A3XX_TEX_SAMP_1 0x00000001
#define A3XX_TEX_SAMP_1_LOD_BIAS__MASK 0x000007ff
#define A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT 0
static inline uint32_t A3XX_TEX_SAMP_1_LOD_BIAS(float val)
{
return ((((int32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT) & A3XX_TEX_SAMP_1_LOD_BIAS__MASK;
}
#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
{
return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
}
#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
{
return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
}
#define REG_A3XX_TEX_CONST_0 0x00000000
@ -2448,6 +2625,24 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
}
#define REG_A3XX_TEX_CONST_3 0x00000003
#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0000000f
#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
{
return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
}
#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000
#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17
static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val)
{
return ((val) << A3XX_TEX_CONST_3_DEPTH__SHIFT) & A3XX_TEX_CONST_3_DEPTH__MASK;
}
#define A3XX_TEX_CONST_3_LAYERSZ2__MASK 0xf0000000
#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28
static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
{
return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
}
#endif /* A3XX_XML */

Просмотреть файл

@ -2,6 +2,8 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@ -406,6 +408,94 @@ static void a3xx_dump(struct msm_gpu *gpu)
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
adreno_dump(gpu);
}
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
REG_A3XX_CP_PFP_UCODE_DATA),
REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
REG_A3XX_CP_PFP_UCODE_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
REG_A3XX_CP_PROTECT_STATUS),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
REG_A3XX_RBBM_PERFCTR_CTL),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
REG_A3XX_RBBM_INT_0_STATUS),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
REG_A3XX_RBBM_AHB_ERROR_STATUS),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
REG_A3XX_RBBM_INT_CLEAR_CMD),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
REG_A3XX_VSC_SIZE_ADDRESS),
REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
REG_A3XX_SP_VS_OBJ_START_REG),
REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
REG_A3XX_SP_FS_OBJ_START_REG),
REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
REG_A3XX_RBBM_PM_OVERRIDE2),
REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
REG_A3XX_SQ_GPR_MANAGEMENT),
REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
REG_A3XX_SQ_INST_STORE_MANAGMENT),
REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
REG_A3XX_RBBM_SW_RESET_CMD),
REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static const struct adreno_gpu_funcs funcs = {
.base = {
@ -463,6 +553,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
if (ret)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,604 @@
/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "a4xx_gpu.h"
#ifdef CONFIG_MSM_OCMEM
# include <soc/qcom/ocmem.h>
#endif
#define A4XX_INT0_MASK \
(A4XX_INT0_RBBM_AHB_ERROR | \
A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
A4XX_INT0_CP_T0_PACKET_IN_IB | \
A4XX_INT0_CP_OPCODE_ERROR | \
A4XX_INT0_CP_RESERVED_BIT_ERROR | \
A4XX_INT0_CP_HW_FAULT | \
A4XX_INT0_CP_IB1_INT | \
A4XX_INT0_CP_IB2_INT | \
A4XX_INT0_CP_RB_INT | \
A4XX_INT0_CP_REG_PROTECT_FAULT | \
A4XX_INT0_CP_AHB_ERROR_HALT | \
A4XX_INT0_UCHE_OOB_ACCESS)
extern bool hang_debug;
static void a4xx_dump(struct msm_gpu *gpu);
/*
* a4xx_enable_hwcg() - Program the clock control registers
* @device: The adreno device pointer
*/
static void a4xx_enable_hwcg(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
unsigned int i;
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
/* Disable L1 clocking in A420 due to CCU issues with it */
for (i = 0; i < 4; i++) {
if (adreno_is_a420(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
0x00002020);
} else {
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
0x00022020);
}
}
for (i = 0; i < 4; i++) {
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
0x00000922);
}
for (i = 0; i < 4; i++) {
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
0x00000000);
}
for (i = 0; i < 4; i++) {
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
0x00000001);
}
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
}
static void a4xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb;
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000080);
OUT_RING(ring, 0x00000100);
OUT_RING(ring, 0x00000180);
OUT_RING(ring, 0x00006600);
OUT_RING(ring, 0x00000150);
OUT_RING(ring, 0x0000014e);
OUT_RING(ring, 0x00000154);
OUT_RING(ring, 0x00000001);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu);
gpu->funcs->idle(gpu);
}
static int a4xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
uint32_t *ptr, len;
int i, ret;
if (adreno_is_a4xx(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
} else {
BUG();
}
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
/* Tune the hystersis counters for SP and CP idle detection */
gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
/* Enable the RBBM error reporting bits */
gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
/* Enable AHB error reporting*/
gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
/* Enable power counters*/
gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
/*
* Turn on hang detection - this spews a lot of useful information
* into the RBBM registers on a hang:
*/
gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
(1 << 30) | 0xFFFF);
gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
(unsigned int)(a4xx_gpu->ocmem_base >> 14));
/* Turn on performance counters: */
gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
/* Disable L2 bypass to avoid UCHE out of bounds errors */
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
a4xx_enable_hwcg(gpu);
/*
* For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
* due to timing issue with HLSQ_TP_CLK_EN
*/
if (adreno_is_a420(adreno_gpu)) {
unsigned int val;
val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
}
ret = adreno_hw_init(gpu);
if (ret)
return ret;
/* setup access protection: */
gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
/* RBBM registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
/* CP registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
/* RB registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
/* HLSQ registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
/* VPC registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
/* SMMU registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
ret = adreno_hw_init(gpu);
if (ret)
return ret;
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->pm4->data);
len = adreno_gpu->pm4->size / 4;
DBG("loading PM4 ucode version: %u", ptr[0]);
gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
/* Load PFP: */
ptr = (uint32_t *)(adreno_gpu->pfp->data);
len = adreno_gpu->pfp->size / 4;
DBG("loading PFP ucode version: %u", ptr[0]);
gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
a4xx_me_init(gpu);
return 0;
}
static void a4xx_recover(struct msm_gpu *gpu)
{
/* dump registers before resetting gpu, if enabled: */
if (hang_debug)
a4xx_dump(gpu);
gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
adreno_recover(gpu);
}
static void a4xx_destroy(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
DBG("%s", gpu->name);
adreno_gpu_cleanup(adreno_gpu);
#ifdef CONFIG_MSM_OCMEM
if (a4xx_gpu->ocmem_base)
ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
#endif
kfree(a4xx_gpu);
}
static void a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
adreno_idle(gpu);
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
A4XX_RBBM_STATUS_GPU_BUSY)))
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
/* TODO maybe we need to reset GPU here to recover from hang? */
}
static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
{
uint32_t status;
status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
DBG("%s: Int status %08x", gpu->name, status);
gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
msm_gpu_retire(gpu);
return IRQ_HANDLED;
}
static const unsigned int a4xx_registers[] = {
/* RBBM */
0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
/* CP */
0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
0x0578, 0x058F,
/* VSC */
0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
/* GRAS */
0x0C80, 0x0C81, 0x0C88, 0x0C8F,
/* RB */
0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
/* PC */
0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
/* VFD */
0x0E40, 0x0E4A,
/* VPC */
0x0E60, 0x0E61, 0x0E63, 0x0E68,
/* UCHE */
0x0E80, 0x0E84, 0x0E88, 0x0E95,
/* VMIDMT */
0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
0x1380, 0x1380,
/* GRAS CTX 0 */
0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
/* PC CTX 0 */
0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
/* VFD CTX 0 */
0x2200, 0x2204, 0x2208, 0x22A9,
/* GRAS CTX 1 */
0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
/* PC CTX 1 */
0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
/* VFD CTX 1 */
0x2600, 0x2604, 0x2608, 0x26A9,
/* XPU */
0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
/* VBIF */
0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
~0 /* sentinel */
};
#ifdef CONFIG_DEBUG_FS
static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A4XX_RBBM_STATUS));
gpu->funcs->pm_suspend(gpu);
adreno_show(gpu, m);
}
#endif
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
REG_A4XX_CP_PFP_UCODE_DATA),
REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
REG_A4XX_CP_PFP_UCODE_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
REG_A4XX_CP_PROTECT_STATUS),
REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
REG_A4XX_RBBM_PERFCTR_CTL),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
REG_A4XX_RBBM_INT_0_STATUS),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
REG_A4XX_RBBM_AHB_ERROR_STATUS),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
REG_A4XX_VPC_DEBUG_RAM_SEL),
REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
REG_A4XX_VPC_DEBUG_RAM_READ),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
REG_A4XX_RBBM_INT_CLEAR_CMD),
REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
REG_A4XX_VSC_SIZE_ADDRESS),
REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
REG_A4XX_SP_VS_PVT_MEM_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
REG_A4XX_SP_FS_PVT_MEM_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
REG_A4XX_SP_VS_OBJ_START),
REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
REG_A4XX_SP_FS_OBJ_START),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
REG_A4XX_RBBM_SW_RESET_CMD),
REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
REG_A4XX_UCHE_INVALIDATE0),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static void a4xx_dump(struct msm_gpu *gpu)
{
adreno_dump(gpu);
printk("status: %08x\n",
gpu_read(gpu, REG_A4XX_RBBM_STATUS));
adreno_dump(gpu);
}
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
.hw_init = a4xx_hw_init,
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a4xx_recover,
.last_fence = adreno_last_fence,
.submit = adreno_submit,
.flush = adreno_flush,
.idle = a4xx_idle,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
#ifdef CONFIG_DEBUG_FS
.show = a4xx_show,
#endif
},
};
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
{
struct a4xx_gpu *a4xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
int ret;
if (!pdev) {
dev_err(dev->dev, "no a4xx device\n");
ret = -ENXIO;
goto fail;
}
a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
if (!a4xx_gpu) {
ret = -ENOMEM;
goto fail;
}
adreno_gpu = &a4xx_gpu->base;
gpu = &adreno_gpu->base;
a4xx_gpu->pdev = pdev;
gpu->perfcntrs = NULL;
gpu->num_perfcntrs = 0;
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
if (ret)
goto fail;
/* if needed, allocate gmem: */
if (adreno_is_a4xx(adreno_gpu)) {
#ifdef CONFIG_MSM_OCMEM
/* TODO this is different/missing upstream: */
struct ocmem_buf *ocmem_hdl =
ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
a4xx_gpu->ocmem_hdl = ocmem_hdl;
a4xx_gpu->ocmem_base = ocmem_hdl->addr;
adreno_gpu->gmem = ocmem_hdl->len;
DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
a4xx_gpu->ocmem_base);
#endif
}
if (!gpu->mmu) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
* limp along with just modesetting. If it turns out
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
dev_err(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}
return gpu;
fail:
if (a4xx_gpu)
a4xx_destroy(&a4xx_gpu->base.base);
return ERR_PTR(ret);
}

Просмотреть файл

@ -0,0 +1,34 @@
/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __A4XX_GPU_H__
#define __A4XX_GPU_H__
#include "adreno_gpu.h"
/* arrg, somehow fb.h is getting pulled in: */
#undef ROP_COPY
#undef ROP_XOR
#include "a4xx.xml.h"
struct a4xx_gpu {
struct adreno_gpu base;
struct platform_device *pdev;
/* if OCMEM is used for GMEM: */
uint32_t ocmem_base;
void *ocmem_hdl;
};
#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
#endif /* __A4XX_GPU_H__ */

Просмотреть файл

@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@ -105,6 +105,7 @@ enum adreno_rb_dither_mode {
enum adreno_rb_depth_format {
DEPTHX_16 = 0,
DEPTHX_24_8 = 1,
DEPTHX_32 = 2,
};
enum adreno_rb_copy_control_mode {
@ -132,6 +133,7 @@ enum a3xx_threadmode {
};
enum a3xx_instrbuffermode {
CACHE = 0,
BUFFER = 1,
};
@ -140,6 +142,13 @@ enum a3xx_threadsize {
FOUR_QUADS = 1,
};
enum a3xx_color_swap {
WZYX = 0,
WXYZ = 1,
ZYXW = 2,
XYZW = 3,
};
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1

Просмотреть файл

@ -2,6 +2,8 @@
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@ -28,6 +30,7 @@ MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!
module_param_named(hang_debug, hang_debug, bool, 0600);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
static const struct adreno_info gpulist[] = {
{
@ -54,6 +57,14 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a330_pfp.fw",
.gmem = SZ_1M,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
.revn = 420,
.name = "A420",
.pm4fw = "a420_pm4.fw",
.pfpfw = "a420_pfp.fw",
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
},
};
@ -61,6 +72,8 @@ MODULE_FIRMWARE("a300_pm4.fw");
MODULE_FIRMWARE("a300_pfp.fw");
MODULE_FIRMWARE("a330_pm4.fw");
MODULE_FIRMWARE("a330_pfp.fw");
MODULE_FIRMWARE("a420_pm4.fw");
MODULE_FIRMWARE("a420_pfp.fw");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{

Просмотреть файл

@ -2,6 +2,8 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@ -63,19 +65,21 @@ int adreno_hw_init(struct msm_gpu *gpu)
}
/* Setup REG_CP_RB_CNTL: */
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
/* size is log2(quad-words): */
AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
/* Setup ringbuffer address: */
gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
rbmemptr(adreno_gpu, rptr));
/* Setup scratch/timestamp: */
gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
rbmemptr(adreno_gpu, fence));
gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
return 0;
}
@ -151,7 +155,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence);
if (adreno_is_a3xx(adreno_gpu)) {
if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
/* Flush HLSQ lazy updates to make sure there is nothing
* pending for indirect loads after the timestamp has
* passed:
@ -188,12 +192,13 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
void adreno_flush(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr = get_wptr(gpu->rb);
/* ensure writes to ringbuffer have hit system memory: */
mb();
gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
void adreno_idle(struct msm_gpu *gpu)
@ -319,6 +324,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
RB_SIZE);
if (ret)
return ret;
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@ -333,12 +344,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
RB_SIZE);
if (ret)
return ret;
mmu = gpu->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,

Просмотреть файл

@ -2,6 +2,8 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@ -25,6 +27,81 @@
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
/**
* adreno_regs: List of registers that are used in across all
* 3D devices. Each device type has different offset value for the same
* register, so an array of register offsets are declared for every device
* and are indexed by the enumeration values defined in this enum
*/
enum adreno_regs {
REG_ADRENO_CP_DEBUG,
REG_ADRENO_CP_ME_RAM_WADDR,
REG_ADRENO_CP_ME_RAM_DATA,
REG_ADRENO_CP_PFP_UCODE_DATA,
REG_ADRENO_CP_PFP_UCODE_ADDR,
REG_ADRENO_CP_WFI_PEND_CTR,
REG_ADRENO_CP_RB_BASE,
REG_ADRENO_CP_RB_RPTR_ADDR,
REG_ADRENO_CP_RB_RPTR,
REG_ADRENO_CP_RB_WPTR,
REG_ADRENO_CP_PROTECT_CTRL,
REG_ADRENO_CP_ME_CNTL,
REG_ADRENO_CP_RB_CNTL,
REG_ADRENO_CP_IB1_BASE,
REG_ADRENO_CP_IB1_BUFSZ,
REG_ADRENO_CP_IB2_BASE,
REG_ADRENO_CP_IB2_BUFSZ,
REG_ADRENO_CP_TIMESTAMP,
REG_ADRENO_CP_ME_RAM_RADDR,
REG_ADRENO_CP_ROQ_ADDR,
REG_ADRENO_CP_ROQ_DATA,
REG_ADRENO_CP_MERCIU_ADDR,
REG_ADRENO_CP_MERCIU_DATA,
REG_ADRENO_CP_MERCIU_DATA2,
REG_ADRENO_CP_MEQ_ADDR,
REG_ADRENO_CP_MEQ_DATA,
REG_ADRENO_CP_HW_FAULT,
REG_ADRENO_CP_PROTECT_STATUS,
REG_ADRENO_SCRATCH_ADDR,
REG_ADRENO_SCRATCH_UMSK,
REG_ADRENO_SCRATCH_REG2,
REG_ADRENO_RBBM_STATUS,
REG_ADRENO_RBBM_PERFCTR_CTL,
REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
REG_ADRENO_RBBM_INT_0_MASK,
REG_ADRENO_RBBM_INT_0_STATUS,
REG_ADRENO_RBBM_AHB_ERROR_STATUS,
REG_ADRENO_RBBM_PM_OVERRIDE2,
REG_ADRENO_RBBM_AHB_CMD,
REG_ADRENO_RBBM_INT_CLEAR_CMD,
REG_ADRENO_RBBM_SW_RESET_CMD,
REG_ADRENO_RBBM_CLOCK_CTL,
REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
REG_ADRENO_VPC_DEBUG_RAM_SEL,
REG_ADRENO_VPC_DEBUG_RAM_READ,
REG_ADRENO_VSC_SIZE_ADDRESS,
REG_ADRENO_VFD_CONTROL_0,
REG_ADRENO_VFD_INDEX_MAX,
REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
REG_ADRENO_SP_VS_OBJ_START_REG,
REG_ADRENO_SP_FS_OBJ_START_REG,
REG_ADRENO_PA_SC_AA_CONFIG,
REG_ADRENO_SQ_GPR_MANAGEMENT,
REG_ADRENO_SQ_INST_STORE_MANAGMENT,
REG_ADRENO_TP0_CHICKEN,
REG_ADRENO_RBBM_RBBM_CTL,
REG_ADRENO_UCHE_INVALIDATE0,
REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
REG_ADRENO_REGISTER_MAX,
};
struct adreno_rev {
uint8_t core;
uint8_t major;
@ -76,6 +153,13 @@ struct adreno_gpu {
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
uint32_t memptrs_iova;
/*
* Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific
* code (a3xx_gpu.c) and stored in this common location.
*/
const unsigned int *reg_offsets;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@ -128,6 +212,16 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
}
static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
{
return (gpu->revn >= 400) && (gpu->revn < 500);
}
static inline int adreno_is_a420(struct adreno_gpu *gpu)
{
return gpu->revn == 420;
}
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
@ -171,5 +265,37 @@ OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
/*
* adreno_checkreg_off() - Checks the validity of a register enum
* @gpu: Pointer to struct adreno_gpu
* @offset_name: The register enum that is checked
*/
static inline bool adreno_reg_check(struct adreno_gpu *gpu,
enum adreno_regs offset_name)
{
if (offset_name >= REG_ADRENO_REGISTER_MAX ||
!gpu->reg_offsets[offset_name]) {
BUG();
}
return true;
}
static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
enum adreno_regs offset_name)
{
u32 reg = gpu->reg_offsets[offset_name];
u32 val = 0;
if(adreno_reg_check(gpu,offset_name))
val = gpu_read(&gpu->base, reg - 1);
return val;
}
static inline void adreno_gpu_write(struct adreno_gpu *gpu,
enum adreno_regs offset_name, u32 data)
{
u32 reg = gpu->reg_offsets[offset_name];
if(adreno_reg_check(gpu, offset_name))
gpu_write(&gpu->base, reg - 1, data);
}
#endif /* __ADRENO_GPU_H__ */

Просмотреть файл

@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@ -157,6 +157,7 @@ enum adreno_pm4_type3_packets {
CP_IM_STORE = 44,
CP_SET_DRAW_INIT_FLAGS = 75,
CP_SET_PROTECTED_MODE = 95,
CP_BOOTSTRAP_UCODE = 111,
CP_LOAD_STATE = 48,
CP_COND_INDIRECT_BUFFER_PFE = 58,
CP_COND_INDIRECT_BUFFER_PFD = 50,
@ -278,11 +279,11 @@ static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
#define CP_DRAW_INDX_1_NUM_INDICES__MASK 0xffff0000
#define CP_DRAW_INDX_1_NUM_INDICES__SHIFT 16
static inline uint32_t CP_DRAW_INDX_1_NUM_INDICES(uint32_t val)
#define CP_DRAW_INDX_1_NUM_INSTANCES__MASK 0xff000000
#define CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT 24
static inline uint32_t CP_DRAW_INDX_1_NUM_INSTANCES(uint32_t val)
{
return ((val) << CP_DRAW_INDX_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_1_NUM_INDICES__MASK;
return ((val) << CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_1_NUM_INSTANCES__MASK;
}
#define REG_CP_DRAW_INDX_2 0x00000002
@ -293,20 +294,20 @@ static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
}
#define REG_CP_DRAW_INDX_2 0x00000002
#define CP_DRAW_INDX_2_INDX_BASE__MASK 0xffffffff
#define CP_DRAW_INDX_2_INDX_BASE__SHIFT 0
static inline uint32_t CP_DRAW_INDX_2_INDX_BASE(uint32_t val)
#define REG_CP_DRAW_INDX_3 0x00000003
#define CP_DRAW_INDX_3_INDX_BASE__MASK 0xffffffff
#define CP_DRAW_INDX_3_INDX_BASE__SHIFT 0
static inline uint32_t CP_DRAW_INDX_3_INDX_BASE(uint32_t val)
{
return ((val) << CP_DRAW_INDX_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_2_INDX_BASE__MASK;
return ((val) << CP_DRAW_INDX_3_INDX_BASE__SHIFT) & CP_DRAW_INDX_3_INDX_BASE__MASK;
}
#define REG_CP_DRAW_INDX_2 0x00000002
#define CP_DRAW_INDX_2_INDX_SIZE__MASK 0xffffffff
#define CP_DRAW_INDX_2_INDX_SIZE__SHIFT 0
static inline uint32_t CP_DRAW_INDX_2_INDX_SIZE(uint32_t val)
#define REG_CP_DRAW_INDX_4 0x00000004
#define CP_DRAW_INDX_4_INDX_SIZE__MASK 0xffffffff
#define CP_DRAW_INDX_4_INDX_SIZE__SHIFT 0
static inline uint32_t CP_DRAW_INDX_4_INDX_SIZE(uint32_t val)
{
return ((val) << CP_DRAW_INDX_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_2_INDX_SIZE__MASK;
return ((val) << CP_DRAW_INDX_4_INDX_SIZE__SHIFT) & CP_DRAW_INDX_4_INDX_SIZE__MASK;
}
#define REG_CP_DRAW_INDX_2_0 0x00000000
@ -345,11 +346,11 @@ static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
#define CP_DRAW_INDX_2_1_NUM_INDICES__MASK 0xffff0000
#define CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT 16
static inline uint32_t CP_DRAW_INDX_2_1_NUM_INDICES(uint32_t val)
#define CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK 0xff000000
#define CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT 24
static inline uint32_t CP_DRAW_INDX_2_1_NUM_INSTANCES(uint32_t val)
{
return ((val) << CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INDICES__MASK;
return ((val) << CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK;
}
#define REG_CP_DRAW_INDX_2_2 0x00000002
@ -388,11 +389,11 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size va
#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000
#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000
#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000
#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK 0xffff0000
#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT 16
static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INDICES(uint32_t val)
#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK 0xffff0000
#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT 16
static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES(uint32_t val)
{
return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK;
return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK;
}
#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
@ -405,20 +406,22 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
}
#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK 0xffffffff
#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT 0
static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_BASE(uint32_t val)
#define REG_CP_DRAW_INDX_OFFSET_3 0x00000003
#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff
#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT 0
static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint32_t val)
{
return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK;
return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK;
}
#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK 0xffffffff
#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT 0
static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_SIZE(uint32_t val)
#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK 0xffffffff
#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT 0
static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
{
return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK;
return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
}
#define REG_CP_SET_DRAW_STATE_0 0x00000000

Просмотреть файл

@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)

Просмотреть файл

@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)

Просмотреть файл

@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)

Просмотреть файл

@ -68,24 +68,17 @@ void hdmi_destroy(struct kref *kref)
platform_set_drvdata(hdmi->pdev, NULL);
}
/* initialize connector */
struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
/* construct hdmi at bind/probe time, grab all the resources. If
* we are to EPROBE_DEFER we want to do it here, rather than later
* at modeset_init() time
*/
static struct hdmi *hdmi_init(struct platform_device *pdev)
{
struct hdmi_platform_config *config = pdev->dev.platform_data;
struct hdmi *hdmi = NULL;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->hdmi_pdev;
struct hdmi_platform_config *config;
int i, ret;
if (!pdev) {
dev_err(dev->dev, "no hdmi device\n");
ret = -ENXIO;
goto fail;
}
config = pdev->dev.platform_data;
hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi) {
ret = -ENOMEM;
goto fail;
@ -93,12 +86,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
kref_init(&hdmi->refcount);
hdmi->dev = dev;
hdmi->pdev = pdev;
hdmi->config = config;
hdmi->encoder = encoder;
hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
/* not sure about which phy maps to which msm.. probably I miss some */
if (config->phy_init)
@ -108,7 +97,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
dev_err(dev->dev, "failed to load phy: %d\n", ret);
dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
hdmi->phy = NULL;
goto fail;
}
@ -127,7 +116,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
config->hpd_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
@ -143,7 +132,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
config->pwr_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
goto fail;
}
@ -158,7 +147,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n",
dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
goto fail;
}
@ -173,7 +162,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n",
dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
goto fail;
}
@ -184,11 +173,41 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
hdmi->i2c = hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
ret = PTR_ERR(hdmi->i2c);
dev_err(dev->dev, "failed to get i2c: %d\n", ret);
dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
hdmi->i2c = NULL;
goto fail;
}
return hdmi;
fail:
if (hdmi)
hdmi_destroy(&hdmi->refcount);
return ERR_PTR(ret);
}
/* Second part of initialization, the drm/kms level modeset_init,
* constructs/initializes mode objects, etc, is called from master
* driver (not hdmi sub-device's probe/bind!)
*
* Any resource (regulator/clk/etc) which could be missing at boot
* should be handled in hdmi_init() so that failure happens from
* hdmi sub-device's probe.
*/
int hdmi_modeset_init(struct hdmi *hdmi,
struct drm_device *dev, struct drm_encoder *encoder)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = hdmi->pdev;
struct hdmi_platform_config *config = pdev->dev.platform_data;
int ret;
hdmi->dev = dev;
hdmi->encoder = encoder;
hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
hdmi->bridge = hdmi_bridge_init(hdmi);
if (IS_ERR(hdmi->bridge)) {
ret = PTR_ERR(hdmi->bridge);
@ -230,19 +249,20 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
platform_set_drvdata(pdev, hdmi);
return hdmi;
return 0;
fail:
if (hdmi) {
/* bridge/connector are normally destroyed by drm: */
if (hdmi->bridge)
if (hdmi->bridge) {
hdmi->bridge->funcs->destroy(hdmi->bridge);
if (hdmi->connector)
hdmi->bridge = NULL;
}
if (hdmi->connector) {
hdmi->connector->funcs->destroy(hdmi->connector);
hdmi_destroy(&hdmi->refcount);
hdmi->connector = NULL;
}
return ERR_PTR(ret);
return ret;
}
/*
@ -251,11 +271,10 @@ fail:
#include <linux/of_gpio.h>
static void set_hdmi_pdev(struct drm_device *dev,
struct platform_device *pdev)
static void set_hdmi(struct drm_device *dev, struct hdmi *hdmi)
{
struct msm_drm_private *priv = dev->dev_private;
priv->hdmi_pdev = pdev;
priv->hdmi = hdmi;
}
#ifdef CONFIG_OF
@ -279,6 +298,7 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
static struct hdmi_platform_config config = {};
struct hdmi *hdmi;
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
@ -369,14 +389,17 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
}
#endif
dev->platform_data = &config;
set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev));
hdmi = hdmi_init(to_platform_device(dev));
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
set_hdmi(dev_get_drvdata(master), hdmi);
return 0;
}
static void hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
set_hdmi_pdev(dev_get_drvdata(master), NULL);
set_hdmi(dev_get_drvdata(master), NULL);
}
static const struct component_ops hdmi_ops = {

Просмотреть файл

@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)

Просмотреть файл

@ -401,6 +401,9 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
.detect = hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = hdmi_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {

Просмотреть файл

@ -510,7 +510,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
#ifdef CONFIG_COMMON_CLK
phy_8960->pll_hw.init = &pll_init;
phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
phy_8960->pll = devm_clk_register(&hdmi->pdev->dev, &phy_8960->pll_hw);
if (IS_ERR(phy_8960->pll)) {
ret = PTR_ERR(phy_8960->pll);
phy_8960->pll = NULL;

Просмотреть файл

@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)

Просмотреть файл

@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)

Просмотреть файл

@ -25,8 +25,6 @@
struct mdp4_crtc {
struct drm_crtc base;
char name[8];
struct drm_plane *plane;
struct drm_plane *planes[8];
int id;
int ovlp;
enum mdp4_dma dma;
@ -52,25 +50,11 @@ struct mdp4_crtc {
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
struct msm_fence_cb pageflip_cb;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
atomic_t pending;
/* the fb that we logically (from PoV of KMS API) hold a ref
* to. Which we may not yet be scanning out (we may still
* be scanning out previous in case of page_flip while waiting
* for gpu rendering to complete:
*/
struct drm_framebuffer *fb;
/* the fb that we currently hold a scanout ref to: */
struct drm_framebuffer *scanout_fb;
/* for unref'ing framebuffers after scanout completes: */
struct drm_flip_work unref_fb_work;
/* for unref'ing cursor bo's after scanout completes: */
struct drm_flip_work unref_cursor_work;
@ -97,15 +81,14 @@ static void crtc_flush(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
uint32_t i, flush = 0;
struct drm_plane *plane;
uint32_t flush = 0;
for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
struct drm_plane *plane = mdp4_crtc->planes[i];
if (plane) {
for_each_plane_on_crtc(crtc, plane) {
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
flush |= pipe2flush(pipe_id);
}
}
flush |= ovlp2flush(mdp4_crtc->ovlp);
DBG("%s: flush=%08x", mdp4_crtc->name, flush);
@ -113,47 +96,6 @@ static void crtc_flush(struct drm_crtc *crtc)
mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
}
static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_framebuffer *old_fb = mdp4_crtc->fb;
/* grab reference to incoming scanout fb: */
drm_framebuffer_reference(new_fb);
mdp4_crtc->base.primary->fb = new_fb;
mdp4_crtc->fb = new_fb;
if (old_fb)
drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
}
/* unlike update_fb(), take a ref to the new scanout fb *before* updating
* plane, then call this. Needed to ensure we don't unref the buffer that
* is actually still being scanned out.
*
* Note that this whole thing goes away with atomic.. since we can defer
* calling into driver until rendering is done.
*/
static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
/* flush updates, to make sure hw is updated to new scanout fb,
* so that we can safely queue unref to current fb (ie. next
* vblank we know hw is done w/ previous scanout_fb).
*/
crtc_flush(crtc);
if (mdp4_crtc->scanout_fb)
drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
mdp4_crtc->scanout_fb);
mdp4_crtc->scanout_fb = fb;
/* enable vblank to complete flip: */
request_pending(crtc, PENDING_FLIP);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
@ -171,38 +113,13 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
*/
if (!file || (event->base.file_priv == file)) {
mdp4_crtc->event = NULL;
DBG("%s: send event: %p", mdp4_crtc->name, event);
drm_send_vblank_event(dev, mdp4_crtc->id, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static void pageflip_cb(struct msm_fence_cb *cb)
{
struct mdp4_crtc *mdp4_crtc =
container_of(cb, struct mdp4_crtc, pageflip_cb);
struct drm_crtc *crtc = &mdp4_crtc->base;
struct drm_framebuffer *fb = crtc->primary->fb;
if (!fb)
return;
drm_framebuffer_reference(fb);
mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
update_scanout(crtc, fb);
}
static void unref_fb_worker(struct drm_flip_work *work, void *val)
{
struct mdp4_crtc *mdp4_crtc =
container_of(work, struct mdp4_crtc, unref_fb_work);
struct drm_device *dev = mdp4_crtc->base.dev;
mutex_lock(&dev->mode_config.mutex);
drm_framebuffer_unreference(val);
mutex_unlock(&dev->mode_config.mutex);
}
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
{
struct mdp4_crtc *mdp4_crtc =
@ -218,7 +135,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
kfree(mdp4_crtc);
@ -255,6 +171,7 @@ static void blend_setup(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_plane *plane;
int i, ovlp = mdp4_crtc->ovlp;
uint32_t mixer_cfg = 0;
static const enum mdp_mixer_stage_id stages[] = {
@ -284,9 +201,7 @@ static void blend_setup(struct drm_crtc *crtc)
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
struct drm_plane *plane = mdp4_crtc->planes[i];
if (plane) {
for_each_plane_on_crtc(crtc, plane) {
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
int idx = idxs[pipe_id];
if (idx > 0) {
@ -297,7 +212,6 @@ static void blend_setup(struct drm_crtc *crtc)
mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
pipe_id, stages[idx]);
}
}
/* this shouldn't happen.. and seems to cause underflow: */
WARN_ON(!mixer_cfg);
@ -328,18 +242,18 @@ static void blend_setup(struct drm_crtc *crtc)
mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
}
static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
int ret, ovlp = mdp4_crtc->ovlp;
int ovlp = mdp4_crtc->ovlp;
struct drm_display_mode *mode;
mode = adjusted_mode;
if (WARN_ON(!crtc->state))
return;
mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mdp4_crtc->name, mode->base.id, mode->name,
@ -350,28 +264,13 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
/* grab extra ref for update_scanout() */
drm_framebuffer_reference(crtc->primary->fb);
ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
drm_framebuffer_unreference(crtc->primary->fb);
dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
mdp4_crtc->name, ret);
return ret;
}
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
/* take data from pipe: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
crtc->primary->fb->pitches[0]);
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
MDP4_DMA_DST_SIZE_WIDTH(0) |
MDP4_DMA_DST_SIZE_HEIGHT(0));
@ -380,8 +279,7 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
crtc->primary->fb->pitches[0]);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
@ -390,11 +288,6 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
}
update_fb(crtc, crtc->primary->fb);
update_scanout(crtc, crtc->primary->fb);
return 0;
}
static void mdp4_crtc_prepare(struct drm_crtc *crtc)
@ -416,60 +309,51 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc)
drm_crtc_vblank_put(crtc);
}
static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_plane *plane = mdp4_crtc->plane;
struct drm_display_mode *mode = &crtc->mode;
int ret;
/* grab extra ref for update_scanout() */
drm_framebuffer_reference(crtc->primary->fb);
ret = mdp4_plane_mode_set(plane, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
drm_framebuffer_unreference(crtc->primary->fb);
return ret;
}
update_fb(crtc, crtc->primary->fb);
update_scanout(crtc, crtc->primary->fb);
return 0;
}
static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
{
}
static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *new_fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_gem_object *obj;
unsigned long flags;
DBG("%s: check", mdp4_crtc->name);
if (mdp4_crtc->event) {
dev_err(dev->dev, "already pending flip!\n");
return -EBUSY;
}
obj = msm_framebuffer_bo(new_fb, 0);
// TODO anything else to check?
return 0;
}
static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s: begin", mdp4_crtc->name);
}
static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
unsigned long flags;
DBG("%s: flush", mdp4_crtc->name);
WARN_ON(mdp4_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags);
mdp4_crtc->event = event;
mdp4_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
update_fb(crtc, new_fb);
return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
blend_setup(crtc);
crtc_flush(crtc);
request_pending(crtc, PENDING_FLIP);
}
static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@ -607,22 +491,29 @@ static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
}
static const struct drm_crtc_funcs mdp4_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.set_config = drm_atomic_helper_set_config,
.destroy = mdp4_crtc_destroy,
.page_flip = mdp4_crtc_page_flip,
.page_flip = drm_atomic_helper_page_flip,
.set_property = mdp4_crtc_set_property,
.cursor_set = mdp4_crtc_cursor_set,
.cursor_move = mdp4_crtc_cursor_move,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
.dpms = mdp4_crtc_dpms,
.mode_fixup = mdp4_crtc_mode_fixup,
.mode_set = mdp4_crtc_mode_set,
.mode_set_nofb = mdp4_crtc_mode_set_nofb,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = mdp4_crtc_prepare,
.commit = mdp4_crtc_commit,
.mode_set_base = mdp4_crtc_mode_set_base,
.load_lut = mdp4_crtc_load_lut,
.atomic_check = mdp4_crtc_atomic_check,
.atomic_begin = mdp4_crtc_atomic_begin,
.atomic_flush = mdp4_crtc_atomic_flush,
};
static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
@ -638,7 +529,6 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
if (pending & PENDING_FLIP) {
complete_flip(crtc, NULL);
drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
}
if (pending & PENDING_CURSOR) {
@ -663,7 +553,8 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
{
DBG("cancel: %p", file);
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s: cancel: %p", mdp4_crtc->name, file);
complete_flip(crtc, file);
}
@ -717,35 +608,6 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
}
static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
struct drm_plane *plane)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
if (mdp4_crtc->planes[pipe_id] == plane)
return;
mdp4_crtc->planes[pipe_id] = plane;
blend_setup(crtc);
if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
crtc_flush(crtc);
}
void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
{
set_attach(crtc, mdp4_plane_pipe(plane), plane);
}
void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
{
/* don't actually detatch our primary plane: */
if (to_mdp4_crtc(crtc)->plane == plane)
return;
set_attach(crtc, mdp4_plane_pipe(plane), NULL);
}
static const char *dma_names[] = {
"DMA_P", "DMA_S", "DMA_E",
};
@ -764,7 +626,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
crtc = &mdp4_crtc->base;
mdp4_crtc->plane = plane;
mdp4_crtc->id = id;
mdp4_crtc->ovlp = ovlp_id;
@ -781,17 +642,14 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
spin_lock_init(&mdp4_crtc->cursor.lock);
drm_flip_work_init(&mdp4_crtc->unref_fb_work,
"unref fb", unref_fb_worker);
drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
plane->crtc = crtc;
mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
mdp4_plane_install_properties(plane, &crtc->base);
return crtc;
}

Просмотреть файл

@ -228,7 +228,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_panel *panel;
struct hdmi *hdmi;
int ret;
/* construct non-private planes: */
@ -326,12 +325,14 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
priv->crtcs[priv->num_crtcs++] = crtc;
priv->encoders[priv->num_encoders++] = encoder;
hdmi = hdmi_init(dev, encoder);
if (IS_ERR(hdmi)) {
ret = PTR_ERR(hdmi);
if (priv->hdmi) {
/* Construct bridge/connector for HDMI: */
ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
if (ret) {
dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
goto fail;
}
}
return 0;
@ -381,6 +382,10 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (IS_ERR(mdp4_kms->dsi_pll_vddio))
mdp4_kms->dsi_pll_vddio = NULL;
/* NOTE: driver for this regulator still missing upstream.. use
* _get_exclusive() and ignore the error if it does not exist
* (and hope that the bootloader left it on for us)
*/
mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
if (IS_ERR(mdp4_kms->vdd))
mdp4_kms->vdd = NULL;

Просмотреть файл

@ -194,14 +194,6 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
void mdp4_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
void mdp4_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb);
int mdp4_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane);
@ -210,8 +202,6 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id, int ovlp_id,
enum mdp4_dma dma_id);

Просмотреть файл

@ -98,6 +98,9 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
.detect = mdp4_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = mdp4_lvds_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {

Просмотреть файл

@ -31,47 +31,26 @@ struct mdp4_plane {
};
#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
static void mdp4_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb);
static int mdp4_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
static struct mdp4_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
static int mdp4_plane_update(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
mdp4_plane->enabled = true;
if (plane->fb)
drm_framebuffer_unreference(plane->fb);
drm_framebuffer_reference(fb);
return mdp4_plane_mode_set(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
}
static int mdp4_plane_disable(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
DBG("%s: disable", mdp4_plane->name);
if (plane->crtc)
mdp4_crtc_detach(plane->crtc, plane);
return 0;
}
static void mdp4_plane_destroy(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
mdp4_plane_disable(plane);
drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
kfree(mdp4_plane);
@ -92,19 +71,74 @@ int mdp4_plane_set_property(struct drm_plane *plane,
}
static const struct drm_plane_funcs mdp4_plane_funcs = {
.update_plane = mdp4_plane_update,
.disable_plane = mdp4_plane_disable,
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp4_plane_destroy,
.set_property = mdp4_plane_set_property,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
void mdp4_plane_set_scanout(struct drm_plane *plane,
static int mdp4_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
return msm_framebuffer_prepare(fb, mdp4_kms->id);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
msm_framebuffer_cleanup(fb, mdp4_kms->id);
}
static int mdp4_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
return 0;
}
static void mdp4_plane_atomic_update(struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
int ret;
ret = mdp4_plane_mode_set(plane,
state->crtc, state->fb,
state->crtc_x, state->crtc_y,
state->crtc_w, state->crtc_h,
state->src_x, state->src_y,
state->src_w, state->src_h);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
}
static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
.prepare_fb = mdp4_plane_prepare_fb,
.cleanup_fb = mdp4_plane_cleanup_fb,
.atomic_check = mdp4_plane_atomic_check,
.atomic_update = mdp4_plane_atomic_update,
};
static void mdp4_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
enum mdp4_pipe pipe = mdp4_plane->pipe;
uint32_t iova;
uint32_t iova = msm_framebuffer_iova(fb, mdp4_kms->id, 0);
DBG("%s: set_scanout: %08x (%u)", mdp4_plane->name,
iova, fb->pitches[0]);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
@ -114,7 +148,6 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
plane->fb = fb;
@ -122,7 +155,7 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
int mdp4_plane_mode_set(struct drm_plane *plane,
static int mdp4_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@ -137,6 +170,11 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
if (!(crtc && fb)) {
DBG("%s: disabled!", mdp4_plane->name);
return 0;
}
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
@ -197,9 +235,6 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
/* TODO detach from old crtc (if we had more than one) */
mdp4_crtc_attach(crtc, plane);
return 0;
}
@ -239,9 +274,12 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
ARRAY_SIZE(mdp4_plane->formats));
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
mdp4_plane->formats, mdp4_plane->nformats,
type);
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
mdp4_plane->formats, mdp4_plane->nformats, type);
if (ret)
goto fail;
drm_plane_helper_add(plane, &mdp4_plane_helper_funcs);
mdp4_plane_install_properties(plane, &plane->base);

Просмотреть файл

@ -10,14 +10,14 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)

Просмотреть файл

@ -25,8 +25,6 @@
struct mdp5_crtc {
struct drm_crtc base;
char name[8];
struct drm_plane *plane;
struct drm_plane *planes[8];
int id;
bool enabled;
@ -78,15 +76,14 @@ static void crtc_flush(struct drm_crtc *crtc)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
int id = mdp5_crtc->id;
uint32_t i, flush = 0;
struct drm_plane *plane;
uint32_t flush = 0;
for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
struct drm_plane *plane = mdp5_crtc->planes[i];
if (plane) {
for_each_plane_on_crtc(crtc, plane) {
enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
flush |= pipe2flush(pipe);
}
}
flush |= mixer2flush(mdp5_crtc->id);
flush |= MDP5_CTL_FLUSH_CTL;
@ -142,7 +139,8 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
unsigned long flags, i;
struct drm_plane *plane;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp5_crtc->event;
@ -158,12 +156,9 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
}
spin_unlock_irqrestore(&dev->event_lock, flags);
for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
struct drm_plane *plane = mdp5_crtc->planes[i];
if (plane)
for_each_plane_on_crtc(crtc, plane)
mdp5_plane_complete_flip(plane);
}
}
static void pageflip_cb(struct msm_fence_cb *cb)
{
@ -176,7 +171,7 @@ static void pageflip_cb(struct msm_fence_cb *cb)
return;
drm_framebuffer_reference(fb);
mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
mdp5_plane_set_scanout(crtc->primary, fb);
update_scanout(crtc, fb);
}
@ -289,7 +284,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
/* grab extra ref for update_scanout() */
drm_framebuffer_reference(crtc->primary->fb);
ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->primary->fb,
ret = mdp5_plane_mode_set(crtc->primary, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
@ -330,8 +325,7 @@ static void mdp5_crtc_commit(struct drm_crtc *crtc)
static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_plane *plane = mdp5_crtc->plane;
struct drm_plane *plane = crtc->primary;
struct drm_display_mode *mode = &crtc->mode;
int ret;
@ -504,14 +498,8 @@ static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
if (mdp5_crtc->planes[pipe_id] == plane)
return;
mdp5_crtc->planes[pipe_id] = plane;
blend_setup(crtc);
if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
if (mdp5_crtc->enabled && (plane != crtc->primary))
crtc_flush(crtc);
}
@ -523,7 +511,7 @@ void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
{
/* don't actually detatch our primary plane: */
if (to_mdp5_crtc(crtc)->plane == plane)
if (crtc->primary == plane)
return;
set_attach(crtc, mdp5_plane_pipe(plane), NULL);
}
@ -541,7 +529,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
crtc = &mdp5_crtc->base;
mdp5_crtc->plane = plane;
mdp5_crtc->id = id;
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
@ -557,8 +544,9 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
plane->crtc = crtc;
mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
mdp5_plane_install_properties(plane, &crtc->base);
return crtc;
}

Просмотреть файл

@ -82,6 +82,7 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
uint32_t intr;
intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
@ -92,7 +93,7 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
mdp5_irq_mdp(mdp_kms);
if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
hdmi_irq(0, mdp5_kms->hdmi);
hdmi_irq(0, priv->hdmi);
return IRQ_HANDLED;
}

Просмотреть файл

@ -324,12 +324,13 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
priv->encoders[priv->num_encoders++] = encoder;
/* Construct bridge/connector for HDMI: */
mdp5_kms->hdmi = hdmi_init(dev, encoder);
if (IS_ERR(mdp5_kms->hdmi)) {
ret = PTR_ERR(mdp5_kms->hdmi);
if (priv->hdmi) {
ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
if (ret) {
dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
goto fail;
}
}
return 0;

Просмотреть файл

@ -71,8 +71,6 @@ struct mdp5_kms {
struct clk *lut_clk;
struct clk *vsync_clk;
struct hdmi *hdmi;
struct mdp_irq error_handler;
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)

Просмотреть файл

@ -0,0 +1,163 @@
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gem.h"
struct msm_commit {
struct drm_atomic_state *state;
uint32_t fence;
struct msm_fence_cb fence_cb;
};
static void fence_cb(struct msm_fence_cb *cb);
static struct msm_commit *new_commit(struct drm_atomic_state *state)
{
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return NULL;
c->state = state;
/* TODO we might need a way to indicate to run the cb on a
* different wq so wait_for_vblanks() doesn't block retiring
* bo's..
*/
INIT_FENCE_CB(&c->fence_cb, fence_cb);
return c;
}
/* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon.
*/
static void complete_commit(struct msm_commit *c)
{
struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev;
drm_atomic_helper_commit_pre_planes(dev, state);
drm_atomic_helper_commit_planes(dev, state);
drm_atomic_helper_commit_post_planes(dev, state);
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
kfree(c);
}
static void fence_cb(struct msm_fence_cb *cb)
{
struct msm_commit *c =
container_of(cb, struct msm_commit, fence_cb);
complete_commit(c);
}
static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
{
struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
}
/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
* @state: the driver state object
* @async: asynchronous commit
*
* This function commits a with drm_atomic_helper_check() pre-validated state
* object. This can still fail when e.g. the framebuffer reservation fails. For
* now this doesn't implement asynchronous commits.
*
* RETURNS
* Zero for success or -errno.
*/
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async)
{
struct msm_commit *c;
int nplanes = dev->mode_config.num_total_plane;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
c = new_commit(state);
/*
* Figure out what fence to wait for:
*/
for (i = 0; i < nplanes; i++) {
struct drm_plane *plane = state->planes[i];
struct drm_plane_state *new_state = state->plane_states[i];
if (!plane)
continue;
if (plane->state->fb != new_state->fb)
add_fb(c, new_state->fb);
}
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
*/
drm_atomic_helper_swap_state(dev, state);
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one conditions: It must be guaranteed
* that the asynchronous work has either been cancelled (if the driver
* supports it, which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
* before the new state gets committed on the software side with
* drm_atomic_helper_swap_state().
*
* This scheme allows new atomic state updates to be prepared and
* checked in parallel to the asynchronous completion of the previous
* update. Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout.
*/
if (async) {
msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
return 0;
}
ret = msm_wait_fence_interruptable(dev, c->fence, NULL);
if (ret) {
WARN_ON(ret); // TODO unswap state back? or??
kfree(c);
return ret;
}
complete_commit(c);
return 0;
}

Просмотреть файл

@ -29,6 +29,8 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.output_poll_changed = msm_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = msm_atomic_commit,
};
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
@ -294,6 +296,8 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
goto fail;
}
drm_mode_config_reset(dev);
#ifdef CONFIG_DRM_MSM_FBDEV
priv->fbdev = msm_fbdev_init(dev);
#endif
@ -619,6 +623,26 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
return ret;
}
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (!list_empty(&cb->work.entry)) {
ret = -EINVAL;
} else if (fence > priv->completed_fence) {
cb->fence = fence;
list_add_tail(&cb->work.entry, &priv->fence_cbs);
} else {
queue_work(priv->wq, &cb->work);
}
mutex_unlock(&dev->struct_mutex);
return ret;
}
/* called from workqueue */
void msm_update_fence(struct drm_device *dev, uint32_t fence)
{
@ -832,6 +856,7 @@ static struct drm_driver msm_driver = {
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
.gem_prime_vmap = msm_gem_prime_vmap,
.gem_prime_vunmap = msm_gem_prime_vunmap,
.gem_prime_mmap = msm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
.debugfs_cleanup = msm_debugfs_cleanup,

Просмотреть файл

@ -32,15 +32,6 @@
#include <linux/types.h>
#include <asm/sizes.h>
#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_QCOM)
/* stubs we need for compile-test: */
static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
{
return NULL;
}
#endif
#ifndef CONFIG_OF
#include <mach/board.h>
#include <mach/socinfo.h>
@ -48,7 +39,10 @@ static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
#endif
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
@ -75,7 +69,12 @@ struct msm_drm_private {
struct msm_kms *kms;
/* subordinate devices, if present: */
struct platform_device *hdmi_pdev, *gpu_pdev;
struct platform_device *gpu_pdev;
/* possibly this should be in the kms component, but it is
* shared by both mdp4 and mdp5..
*/
struct hdmi *hdmi;
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
@ -145,21 +144,29 @@ void __msm_fence_worker(struct work_struct *work);
(_cb)->func = _func; \
} while (0)
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout);
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint32_t *iova);
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
void msm_gem_put_iova(struct drm_gem_object *obj, int id);
@ -170,6 +177,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
@ -192,6 +200,9 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size, struct sg_table *sgt);
int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@ -202,7 +213,8 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
struct hdmi;
struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
struct drm_encoder *encoder);
irqreturn_t hdmi_irq(int irq, void *dev_id);
void __init hdmi_register(void);
void __exit hdmi_unregister(void);

Просмотреть файл

@ -24,7 +24,7 @@
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
struct drm_gem_object *planes[2];
struct drm_gem_object *planes[3];
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
@ -87,6 +87,42 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
}
#endif
/* prepare/pin all the fb's bo's for scanout. Note that it is not valid
* to prepare an fb more multiple different initiator 'id's. But that
* should be fine, since only the scanout (mdpN) side of things needs
* this, the gpu doesn't care about fb's.
*/
int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
uint32_t iova;
for (i = 0; i < n; i++) {
ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
}
return 0;
}
void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = drm_format_num_planes(fb->pixel_format);
for (i = 0; i < n; i++)
msm_gem_put_iova(msm_fb->planes[i], id);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
return msm_gem_iova(msm_fb->planes[plane], id);
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
@ -166,6 +202,11 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->format = format;
if (n > ARRAY_SIZE(msm_fb->planes)) {
ret = -EINVAL;
goto fail;
}
for (i = 0; i < n; i++) {
unsigned int width = mode_cmd->width / (i ? hsub : 1);
unsigned int height = mode_cmd->height / (i ? vsub : 1);

Просмотреть файл

@ -93,9 +93,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
uint32_t paddr;
int ret, size;
sizes->surface_bpp = 32;
sizes->surface_depth = 24;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
sizes->fb_width, sizes->fb_height);

Просмотреть файл

@ -309,6 +309,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
return ret;
}
/* get iova, taking a reference. Should have a matching put */
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@ -328,6 +329,16 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
return ret;
}
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!msm_obj->domain[id].iova);
return msm_obj->domain[id].iova;
}
void msm_gem_put_iova(struct drm_gem_object *obj, int id)
{
// XXX TODO ..
@ -397,23 +408,10 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
struct msm_fence_cb *cb)
{
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (!list_empty(&cb->work.entry)) {
ret = -EINVAL;
} else if (is_active(msm_obj)) {
cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
list_add_tail(&cb->work.entry, &priv->fence_cbs);
} else {
queue_work(priv->wq, &cb->work);
}
mutex_unlock(&dev->struct_mutex);
return ret;
uint32_t fence = msm_gem_fence(msm_obj,
MSM_PREP_READ | MSM_PREP_WRITE);
return msm_queue_fence_cb(obj->dev, cb, fence);
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
@ -452,12 +450,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
int ret = 0;
if (is_active(msm_obj)) {
uint32_t fence = 0;
uint32_t fence = msm_gem_fence(msm_obj, op);
if (op & MSM_PREP_READ)
fence = msm_obj->write_fence;
if (op & MSM_PREP_WRITE)
fence = max(fence, msm_obj->read_fence);
if (op & MSM_PREP_NOSYNC)
timeout = NULL;
@ -525,13 +519,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
struct msm_mmu *mmu = priv->mmus[id];
if (mmu && msm_obj->domain[id].iova) {
uint32_t offset = (uint32_t)mmap_offset(obj);
uint32_t offset = msm_obj->domain[id].iova;
mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
}
}
drm_gem_free_mmap_offset(obj);
if (obj->import_attach) {
if (msm_obj->vaddr)
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);

Просмотреть файл

@ -70,6 +70,19 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL;
}
static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
uint32_t op)
{
uint32_t fence = 0;
if (op & MSM_PREP_READ)
fence = msm_obj->write_fence;
if (op & MSM_PREP_WRITE)
fence = max(fence, msm_obj->read_fence);
return fence;
}
#define MAX_CMDS 4
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,

Просмотреть файл

@ -37,6 +37,19 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
/* TODO msm_gem_vunmap() */
}
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
mutex_lock(&obj->dev->struct_mutex);
ret = drm_gem_mmap_obj(obj, obj->size, vma);
mutex_unlock(&obj->dev->struct_mutex);
if (ret < 0)
return ret;
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{

Просмотреть файл

@ -65,4 +65,9 @@ static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
/* TODO move these helper iterator macro somewhere common: */
#define for_each_plane_on_crtc(_crtc, _plane) \
list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \
if ((_plane)->crtc == (_crtc))
#endif /* __MSM_KMS_H__ */