Samsung SPH-L720 (Sprint) source updates
/drivers/video/msm/mdp4_overlay_dtv.c
blob:4ec45bc46bf58e8179e4270891451884dec4cf51 -> blob:aa0f79d5b96d18a345b95e61a78d24ef63dd5f4c
--- drivers/video/msm/mdp4_overlay_dtv.c
+++ drivers/video/msm/mdp4_overlay_dtv.c
@@ -200,7 +200,6 @@ int mdp4_dtv_pipe_commit(int cndx, int w
vp = &vctrl->vlist[undx];
pipe = vctrl->base_pipe;
mixer = pipe->mixer_num;
- mdp4_overlay_iommu_unmap_freelist(mixer);
mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
@@ -224,6 +223,31 @@ int mdp4_dtv_pipe_commit(int cndx, int w
/* pipe not unset */
mdp4_overlay_vsync_commit(pipe);
}
+ }
+ }
+ mdp4_mixer_stage_commit(mixer);
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_dtv_start();
+
+ /*
+ * there has possibility that pipe commit come very close to next vsync
+ * this may cause two consecutive pie_commits happen within same vsync
+ * period which casue iommu page fault when previous iommu buffer
+ * freed. Set ION_IOMMU_UNMAP_DELAYED flag at ion_map_iommu() to
+ * add delay unmap iommu buffer to fix this problem.
+ * Also ion_unmap_iommu() may take as long as 9 ms to free an ion buffer.
+ * therefore mdp4_overlay_iommu_unmap_freelist(mixer) should be called
+ * ater stage_commit() to ensure pipe_commit (up to stage_commit)
+ * is completed within vsync period.
+ */
+
+ /* free previous committed iommu back to pool */
+ mdp4_overlay_iommu_unmap_freelist(mixer);
+
+ pipe = vp->plist;
+ for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+ if (pipe->pipe_used) {
/* free previous iommu to freelist
* which will be freed at next
* pipe_commit
@@ -232,11 +256,6 @@ int mdp4_dtv_pipe_commit(int cndx, int w
pipe->pipe_used = 0; /* clear */
}
}
- mdp4_mixer_stage_commit(mixer);
-
- /* start timing generator & mmu if they are not started yet */
- mdp4_overlay_dtv_start();
-
pipe = vctrl->base_pipe;
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (pipe->ov_blt_addr) {
@@ -371,6 +390,9 @@ ssize_t mdp4_dtv_show_event(struct devic
ssize_t ret = 0;
unsigned long flags;
u64 vsync_tick;
+ ktime_t ctime;
+ u32 ctick, ptick;
+ int diff;
cndx = 0;
vctrl = &vsync_ctrl_db[0];
@@ -379,6 +401,28 @@ ssize_t mdp4_dtv_show_event(struct devic
!external_common_state->hpd_state ||
atomic_read(&vctrl->vsync_resume) == 0)
return 0;
+ /*
+ * show_event thread keep spinning on vctrl->vsync_comp
+ * race condition on x.done if multiple thread blocked
+ * at wait_for_completion(&vctrl->vsync_comp)
+ *
+ * if show_event thread waked up first then it will come back
+ * and call INIT_COMPLETION(vctrl->vsync_comp) which set x.done = 0
+ * then second thread wakeed up which set x.done = 0x7ffffffd
+ * after that wait_for_completion will never wait.
+ * To avoid this, force show_event thread to sleep 5 ms here
+ * since it has full vsycn period (16.6 ms) to wait
+ */
+ ctime = ktime_get();
+ ctick = (u32)ktime_to_us(ctime);
+ ptick = (u32)ktime_to_us(vctrl->vsync_time);
+ ptick += 5000; /* 5ms */
+ diff = ptick - ctick;
+ if (diff > 0) {
+ if (diff > 1000) /* 1 ms */
+ diff = 1000;
+ usleep(diff);
+ }
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (vctrl->wait_vsync_cnt == 0)
@@ -655,6 +699,7 @@ int mdp4_dtv_off(struct platform_device
mdp4_dtv_wait4vsync(cndx);
complete_all(&vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt = 0;
pipe = vctrl->base_pipe;
if (pipe != NULL) {
@@ -893,10 +938,9 @@ void mdp4_external_vsync_dtv(void)
spin_lock(&vctrl->spin_lock);
vctrl->vsync_time = ktime_get();
- if (vctrl->wait_vsync_cnt) {
- complete_all(&vctrl->vsync_comp);
- vctrl->wait_vsync_cnt = 0;
- }
+ complete_all(&vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt = 0;
+
spin_unlock(&vctrl->spin_lock);
}