|
@@ -27,16 +27,51 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
|
|
|
int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
|
|
|
int ret;
|
|
|
bool unmap = false;
|
|
|
+ bool store_for_later = false;
|
|
|
+ int x2, y2;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
obj = afbdev->gfb.obj;
|
|
|
bo = gem_to_cirrus_bo(obj);
|
|
|
|
|
|
+ /*
|
|
|
+ * try and reserve the BO, if we fail with busy
|
|
|
+ * then the BO is being moved and we should
|
|
|
+ * store up the damage until later.
|
|
|
+ */
|
|
|
ret = cirrus_bo_reserve(bo, true);
|
|
|
if (ret) {
|
|
|
- DRM_ERROR("failed to reserve fb bo\n");
|
|
|
+ if (ret != -EBUSY)
|
|
|
+ return;
|
|
|
+ store_for_later = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ x2 = x + width - 1;
|
|
|
+ y2 = y + height - 1;
|
|
|
+ spin_lock_irqsave(&afbdev->dirty_lock, flags);
|
|
|
+
|
|
|
+ if (afbdev->y1 < y)
|
|
|
+ y = afbdev->y1;
|
|
|
+ if (afbdev->y2 > y2)
|
|
|
+ y2 = afbdev->y2;
|
|
|
+ if (afbdev->x1 < x)
|
|
|
+ x = afbdev->x1;
|
|
|
+ if (afbdev->x2 > x2)
|
|
|
+ x2 = afbdev->x2;
|
|
|
+
|
|
|
+ if (store_for_later) {
|
|
|
+ afbdev->x1 = x;
|
|
|
+ afbdev->x2 = x2;
|
|
|
+ afbdev->y1 = y;
|
|
|
+ afbdev->y2 = y2;
|
|
|
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+ afbdev->x1 = afbdev->y1 = INT_MAX;
|
|
|
+ afbdev->x2 = afbdev->y2 = 0;
|
|
|
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
|
|
|
+
|
|
|
if (!bo->kmap.virtual) {
|
|
|
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
|
|
|
if (ret) {
|
|
@@ -268,6 +303,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
|
|
|
|
|
|
cdev->mode_info.gfbdev = gfbdev;
|
|
|
gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
|
|
|
+ spin_lock_init(&gfbdev->dirty_lock);
|
|
|
|
|
|
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
|
|
|
cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
|