Merge branch 'msm-fixes-3.19' of git://people.freedesktop.org/~robclark/linux into drm-fixes
A few msm fixes for 3.19: * hdmi regulators fix * hdmi fix for spurious HPD interrupts * fix for sync atomic update after async update (which could show up with a setcrtc following a pageflip) * couple little Coccinelle cleanups * 'msm-fixes-3.19' of git://people.freedesktop.org/~robclark/linux: drm/msm/hdmi: rework HDMI IRQ handler drm/msm/hdmi: enable regulators before clocks to avoid warnings drm/msm/mdp5: update irqs on crtc<->encoder link change drm/msm: block incoming update on pending updates drm/msm: Deletion of unnecessary checks before the function call "release_firmware" drm/msm: Deletion of unnecessary checks before two function calls
This commit is contained in:
Коммит
955f6be8ec
|
@ -386,9 +386,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
|
|||
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
|
||||
drm_gem_object_unreference(gpu->memptrs_bo);
|
||||
}
|
||||
if (gpu->pm4)
|
||||
release_firmware(gpu->pm4);
|
||||
if (gpu->pfp)
|
||||
release_firmware(gpu->pfp);
|
||||
release_firmware(gpu->pm4);
|
||||
release_firmware(gpu->pfp);
|
||||
msm_gpu_cleanup(&gpu->base);
|
||||
}
|
||||
|
|
|
@ -141,6 +141,15 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
|
|||
uint32_t hpd_ctrl;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < config->hpd_reg_cnt; i++) {
|
||||
ret = regulator_enable(hdmi->hpd_regs[i]);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
|
||||
config->hpd_reg_names[i], ret);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
ret = gpio_config(hdmi, true);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
|
||||
|
@ -164,15 +173,6 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
|
|||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < config->hpd_reg_cnt; i++) {
|
||||
ret = regulator_enable(hdmi->hpd_regs[i]);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
|
||||
config->hpd_reg_names[i], ret);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
hdmi_set_mode(hdmi, false);
|
||||
phy->funcs->reset(phy);
|
||||
hdmi_set_mode(hdmi, true);
|
||||
|
@ -200,7 +200,7 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int hdp_disable(struct hdmi_connector *hdmi_connector)
|
||||
static void hdp_disable(struct hdmi_connector *hdmi_connector)
|
||||
{
|
||||
struct hdmi *hdmi = hdmi_connector->hdmi;
|
||||
const struct hdmi_platform_config *config = hdmi->config;
|
||||
|
@ -212,28 +212,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector)
|
|||
|
||||
hdmi_set_mode(hdmi, false);
|
||||
|
||||
for (i = 0; i < config->hpd_reg_cnt; i++) {
|
||||
ret = regulator_disable(hdmi->hpd_regs[i]);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
|
||||
config->hpd_reg_names[i], ret);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < config->hpd_clk_cnt; i++)
|
||||
clk_disable_unprepare(hdmi->hpd_clks[i]);
|
||||
|
||||
ret = gpio_config(hdmi, false);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
|
||||
goto fail;
|
||||
if (ret)
|
||||
dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
|
||||
|
||||
for (i = 0; i < config->hpd_reg_cnt; i++) {
|
||||
ret = regulator_disable(hdmi->hpd_regs[i]);
|
||||
if (ret)
|
||||
dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
|
||||
config->hpd_reg_names[i], ret);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -260,11 +251,11 @@ void hdmi_connector_irq(struct drm_connector *connector)
|
|||
(hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
|
||||
bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
|
||||
|
||||
DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
|
||||
|
||||
/* ack the irq: */
|
||||
/* ack & disable (temporarily) HPD events: */
|
||||
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
|
||||
hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
|
||||
HDMI_HPD_INT_CTRL_INT_ACK);
|
||||
|
||||
DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
|
||||
|
||||
/* detect disconnect if we are connected or visa versa: */
|
||||
hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
|
||||
|
|
|
@ -331,17 +331,8 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
|
|||
struct drm_crtc_state *state)
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
||||
DBG("%s: check", mdp4_crtc->name);
|
||||
|
||||
if (mdp4_crtc->event) {
|
||||
dev_err(dev->dev, "already pending flip!\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
// TODO anything else to check?
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -357,7 +348,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
|
|||
struct drm_device *dev = crtc->dev;
|
||||
unsigned long flags;
|
||||
|
||||
DBG("%s: flush", mdp4_crtc->name);
|
||||
DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
|
||||
|
||||
WARN_ON(mdp4_crtc->event);
|
||||
|
||||
|
|
|
@ -303,11 +303,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
|||
|
||||
DBG("%s: check", mdp5_crtc->name);
|
||||
|
||||
if (mdp5_crtc->event) {
|
||||
dev_err(dev->dev, "already pending flip!\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* request a free CTL, if none is already allocated for this CRTC */
|
||||
if (state->enable && !mdp5_crtc->ctl) {
|
||||
mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
|
||||
|
@ -364,7 +359,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
|
|||
struct drm_device *dev = crtc->dev;
|
||||
unsigned long flags;
|
||||
|
||||
DBG("%s: flush", mdp5_crtc->name);
|
||||
DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
|
||||
|
||||
WARN_ON(mdp5_crtc->event);
|
||||
|
||||
|
@ -460,10 +455,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
|
|||
/* now that we know what irq's we want: */
|
||||
mdp5_crtc->err.irqmask = intf2err(intf);
|
||||
mdp5_crtc->vblank.irqmask = intf2vblank(intf);
|
||||
|
||||
/* when called from modeset_init(), skip the rest until later: */
|
||||
if (!mdp5_kms)
|
||||
return;
|
||||
mdp_irq_update(&mdp5_kms->base);
|
||||
|
||||
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
|
||||
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
|
||||
|
|
|
@ -216,17 +216,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
/* NOTE: the vsync and error irq's are actually associated with
|
||||
* the INTF/encoder.. the easiest way to deal with this (ie. what
|
||||
* we do now) is assume a fixed relationship between crtc's and
|
||||
* encoders. I'm not sure if there is ever a need to more freely
|
||||
* assign crtcs to encoders, but if there is then we need to take
|
||||
* care of error and vblank irq's that the crtc has registered,
|
||||
* and also update user-requested vblank_mask.
|
||||
*/
|
||||
encoder->possible_crtcs = BIT(0);
|
||||
mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
|
||||
|
||||
encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
|
||||
priv->encoders[priv->num_encoders++] = encoder;
|
||||
|
||||
/* Construct bridge/connector for HDMI: */
|
||||
|
|
|
@ -42,7 +42,10 @@ static void update_irq(struct mdp_kms *mdp_kms)
|
|||
mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
|
||||
}
|
||||
|
||||
static void update_irq_unlocked(struct mdp_kms *mdp_kms)
|
||||
/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
|
||||
* link changes, this must be called to figure out the new global irqmask
|
||||
*/
|
||||
void mdp_irq_update(struct mdp_kms *mdp_kms)
|
||||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
|
@ -122,7 +125,7 @@ void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
|
|||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
if (needs_update)
|
||||
update_irq_unlocked(mdp_kms);
|
||||
mdp_irq_update(mdp_kms);
|
||||
}
|
||||
|
||||
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
|
||||
|
@ -141,5 +144,5 @@ void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
|
|||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
if (needs_update)
|
||||
update_irq_unlocked(mdp_kms);
|
||||
mdp_irq_update(mdp_kms);
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
|
|||
void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
|
||||
void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
|
||||
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
|
||||
|
||||
void mdp_irq_update(struct mdp_kms *mdp_kms);
|
||||
|
||||
/*
|
||||
* pixel format helpers:
|
||||
|
|
|
@ -23,10 +23,41 @@ struct msm_commit {
|
|||
struct drm_atomic_state *state;
|
||||
uint32_t fence;
|
||||
struct msm_fence_cb fence_cb;
|
||||
uint32_t crtc_mask;
|
||||
};
|
||||
|
||||
static void fence_cb(struct msm_fence_cb *cb);
|
||||
|
||||
/* block until specified crtcs are no longer pending update, and
|
||||
* atomically mark them as pending update
|
||||
*/
|
||||
static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock(&priv->pending_crtcs_event.lock);
|
||||
ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
|
||||
!(priv->pending_crtcs & crtc_mask));
|
||||
if (ret == 0) {
|
||||
DBG("start: %08x", crtc_mask);
|
||||
priv->pending_crtcs |= crtc_mask;
|
||||
}
|
||||
spin_unlock(&priv->pending_crtcs_event.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* clear specified crtcs (no longer pending update)
|
||||
*/
|
||||
static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
|
||||
{
|
||||
spin_lock(&priv->pending_crtcs_event.lock);
|
||||
DBG("end: %08x", crtc_mask);
|
||||
priv->pending_crtcs &= ~crtc_mask;
|
||||
wake_up_all_locked(&priv->pending_crtcs_event);
|
||||
spin_unlock(&priv->pending_crtcs_event.lock);
|
||||
}
|
||||
|
||||
static struct msm_commit *new_commit(struct drm_atomic_state *state)
|
||||
{
|
||||
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
|
@ -58,12 +89,27 @@ static void complete_commit(struct msm_commit *c)
|
|||
|
||||
drm_atomic_helper_commit_post_planes(dev, state);
|
||||
|
||||
/* NOTE: _wait_for_vblanks() only waits for vblank on
|
||||
* enabled CRTCs. So we end up faulting when disabling
|
||||
* due to (potentially) unref'ing the outgoing fb's
|
||||
* before the vblank when the disable has latched.
|
||||
*
|
||||
* But if it did wait on disabled (or newly disabled)
|
||||
* CRTCs, that would be racy (ie. we could have missed
|
||||
* the irq. We need some way to poll for pipe shut
|
||||
* down. Or just live with occasionally hitting the
|
||||
* timeout in the CRTC disable path (which really should
|
||||
* not be critical path)
|
||||
*/
|
||||
|
||||
drm_atomic_helper_wait_for_vblanks(dev, state);
|
||||
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
|
||||
drm_atomic_state_free(state);
|
||||
|
||||
end_atomic(dev->dev_private, c->crtc_mask);
|
||||
|
||||
kfree(c);
|
||||
}
|
||||
|
||||
|
@ -97,8 +143,9 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
|
|||
int msm_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state, bool async)
|
||||
{
|
||||
struct msm_commit *c;
|
||||
int nplanes = dev->mode_config.num_total_plane;
|
||||
int ncrtcs = dev->mode_config.num_crtc;
|
||||
struct msm_commit *c;
|
||||
int i, ret;
|
||||
|
||||
ret = drm_atomic_helper_prepare_planes(dev, state);
|
||||
|
@ -106,6 +153,18 @@ int msm_atomic_commit(struct drm_device *dev,
|
|||
return ret;
|
||||
|
||||
c = new_commit(state);
|
||||
if (!c)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Figure out what crtcs we have:
|
||||
*/
|
||||
for (i = 0; i < ncrtcs; i++) {
|
||||
struct drm_crtc *crtc = state->crtcs[i];
|
||||
if (!crtc)
|
||||
continue;
|
||||
c->crtc_mask |= (1 << drm_crtc_index(crtc));
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out what fence to wait for:
|
||||
|
@ -121,6 +180,14 @@ int msm_atomic_commit(struct drm_device *dev,
|
|||
add_fb(c, new_state->fb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for pending updates on any of the same crtc's and then
|
||||
* mark our set of crtc's as busy:
|
||||
*/
|
||||
ret = start_atomic(dev->dev_private, c->crtc_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* This is the point of no return - everything below never fails except
|
||||
* when the hw goes bonghits. Which means we can commit the new state on
|
||||
|
|
|
@ -193,6 +193,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
priv->wq = alloc_ordered_workqueue("msm", 0);
|
||||
init_waitqueue_head(&priv->fence_event);
|
||||
init_waitqueue_head(&priv->pending_crtcs_event);
|
||||
|
||||
INIT_LIST_HEAD(&priv->inactive_list);
|
||||
INIT_LIST_HEAD(&priv->fence_cbs);
|
||||
|
|
|
@ -96,6 +96,10 @@ struct msm_drm_private {
|
|||
/* callbacks deferred until bo is inactive: */
|
||||
struct list_head fence_cbs;
|
||||
|
||||
/* crtcs pending async atomic updates: */
|
||||
uint32_t pending_crtcs;
|
||||
wait_queue_head_t pending_crtcs_event;
|
||||
|
||||
/* registered MMUs: */
|
||||
unsigned int num_mmus;
|
||||
struct msm_mmu *mmus[NUM_DOMAINS];
|
||||
|
|
|
@ -190,8 +190,7 @@ fail_unlock:
|
|||
fail:
|
||||
|
||||
if (ret) {
|
||||
if (fbi)
|
||||
framebuffer_release(fbi);
|
||||
framebuffer_release(fbi);
|
||||
if (fb) {
|
||||
drm_framebuffer_unregister_private(fb);
|
||||
drm_framebuffer_remove(fb);
|
||||
|
|
|
@ -535,8 +535,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
|||
drm_free_large(msm_obj->pages);
|
||||
|
||||
} else {
|
||||
if (msm_obj->vaddr)
|
||||
vunmap(msm_obj->vaddr);
|
||||
vunmap(msm_obj->vaddr);
|
||||
put_pages(obj);
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче