|
@@ -656,13 +656,13 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
|
|
|
DRM_DEBUG("zone invalid\n");
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- spin_lock(&dev->count_lock);
|
|
|
+ spin_lock(&dev->buf_lock);
|
|
|
if (dev->buf_use) {
|
|
|
- spin_unlock(&dev->count_lock);
|
|
|
+ spin_unlock(&dev->buf_lock);
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
atomic_inc(&dev->buf_alloc);
|
|
|
- spin_unlock(&dev->count_lock);
|
|
|
+ spin_unlock(&dev->buf_lock);
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
entry = &dma->bufs[order];
|
|
@@ -805,13 +805,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
|
|
|
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
|
|
|
total = PAGE_SIZE << page_order;
|
|
|
|
|
|
- spin_lock(&dev->count_lock);
|
|
|
+ spin_lock(&dev->buf_lock);
|
|
|
if (dev->buf_use) {
|
|
|
- spin_unlock(&dev->count_lock);
|
|
|
+ spin_unlock(&dev->buf_lock);
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
atomic_inc(&dev->buf_alloc);
|
|
|
- spin_unlock(&dev->count_lock);
|
|
|
+ spin_unlock(&dev->buf_lock);
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
entry = &dma->bufs[order];
|
|
@@ -1015,13 +1015,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
|
|
|
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- spin_lock(&dev->count_lock);
|
|
|
+ spin_lock(&dev->buf_lock);
|
|
|
if (dev->buf_use) {
|
|
|
- spin_unlock(&dev->count_lock);
|
|
|
+ spin_unlock(&dev->buf_lock);
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
atomic_inc(&dev->buf_alloc);
|
|
|
- spin_unlock(&dev->count_lock);
|
|
|
+ spin_unlock(&dev->buf_lock);
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
entry = &dma->bufs[order];
|
|
@@ -1175,7 +1175,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
|
|
|
* \param arg pointer to a drm_buf_info structure.
|
|
|
* \return zero on success or a negative number on failure.
|
|
|
*
|
|
|
- * Increments drm_device::buf_use while holding the drm_device::count_lock
|
|
|
+ * Increments drm_device::buf_use while holding the drm_device::buf_lock
|
|
|
* lock, preventing of allocating more buffers after this call. Information
|
|
|
* about each requested buffer is then copied into user space.
|
|
|
*/
|
|
@@ -1196,13 +1196,13 @@ int drm_infobufs(struct drm_device *dev, void *data,
|
|
|
if (!dma)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- spin_lock(&dev->count_lock);
|
|
|
+ spin_lock(&dev->buf_lock);
|
|
|
if (atomic_read(&dev->buf_alloc)) {
|
|
|
- spin_unlock(&dev->count_lock);
|
|
|
+ spin_unlock(&dev->buf_lock);
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
++dev->buf_use; /* Can't allocate more after this call */
|
|
|
- spin_unlock(&dev->count_lock);
|
|
|
+ spin_unlock(&dev->buf_lock);
|
|
|
|
|
|
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
|
|
|
if (dma->bufs[i].buf_count)
|
|
@@ -1381,13 +1381,13 @@ int drm_mapbufs(struct drm_device *dev, void *data,
|
|
|
if (!dma)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- spin_lock(&dev->count_lock);
|
|
|
+ spin_lock(&dev->buf_lock);
|
|
|
if (atomic_read(&dev->buf_alloc)) {
|
|
|
- spin_unlock(&dev->count_lock);
|
|
|
+ spin_unlock(&dev->buf_lock);
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
dev->buf_use++; /* Can't allocate more after this call */
|
|
|
- spin_unlock(&dev->count_lock);
|
|
|
+ spin_unlock(&dev->buf_lock);
|
|
|
|
|
|
if (request->count >= dma->buf_count) {
|
|
|
if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
|