|
@@ -1,4 +1,5 @@
|
|
|
/*
|
|
|
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
|
|
* Copyright (C) 2013 Red Hat
|
|
|
* Author: Rob Clark <robdclark@gmail.com>
|
|
|
*
|
|
@@ -29,8 +30,11 @@
|
|
|
* Based on the size of the attached scanout buffer, a certain # of
|
|
|
* blocks must be allocated to that client out of the shared pool.
|
|
|
*
|
|
|
- * For each block, it can be either free, or pending/in-use by a
|
|
|
- * client. The updates happen in three steps:
|
|
|
+ * In some hw, some blocks are statically allocated for certain pipes
|
|
|
+ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
|
|
|
+ *
|
|
|
+ * For each block that can be dynamically allocated, it can be either
|
|
|
+ * free, or pending/in-use by a client. The updates happen in three steps:
|
|
|
*
|
|
|
* 1) mdp5_smp_request():
|
|
|
* When plane scanout is setup, calculate required number of
|
|
@@ -61,21 +65,64 @@
|
|
|
* inuse and pending state of all clients..
|
|
|
*/
|
|
|
|
|
|
-static DEFINE_SPINLOCK(smp_lock);
|
|
|
+struct mdp5_smp {
|
|
|
+ struct drm_device *dev;
|
|
|
+
|
|
|
+ int blk_cnt;
|
|
|
+ int blk_size;
|
|
|
+
|
|
|
+ spinlock_t state_lock;
|
|
|
+ mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
|
|
|
+
|
|
|
+ struct mdp5_client_smp_state client_state[CID_MAX];
|
|
|
+};
|
|
|
|
|
|
+static inline
|
|
|
+struct mdp5_kms *get_kms(struct mdp5_smp *smp)
|
|
|
+{
|
|
|
+ struct msm_drm_private *priv = smp->dev->dev_private;
|
|
|
+
|
|
|
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
|
|
|
+}
|
|
|
+
|
|
|
+static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
|
|
|
+{
|
|
|
+ WARN_ON(plane >= pipe2nclients(pipe));
|
|
|
+ switch (pipe) {
|
|
|
+ case SSPP_VIG0: return CID_VIG0_Y + plane;
|
|
|
+ case SSPP_VIG1: return CID_VIG1_Y + plane;
|
|
|
+ case SSPP_VIG2: return CID_VIG2_Y + plane;
|
|
|
+ case SSPP_RGB0: return CID_RGB0;
|
|
|
+ case SSPP_RGB1: return CID_RGB1;
|
|
|
+ case SSPP_RGB2: return CID_RGB2;
|
|
|
+ case SSPP_DMA0: return CID_DMA0_Y + plane;
|
|
|
+ case SSPP_DMA1: return CID_DMA1_Y + plane;
|
|
|
+ case SSPP_VIG3: return CID_VIG3_Y + plane;
|
|
|
+ case SSPP_RGB3: return CID_RGB3;
|
|
|
+ default: return CID_UNUSED;
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
/* step #1: update # of blocks pending for the client: */
|
|
|
-int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
|
|
|
+static int smp_request_block(struct mdp5_smp *smp,
|
|
|
enum mdp5_client_id cid, int nblks)
|
|
|
{
|
|
|
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
|
|
|
- int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
|
|
|
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
|
|
|
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
|
|
|
+ int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
|
|
|
+ int reserved = mdp5_kms->hw_cfg->smp.reserved[cid];
|
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&smp_lock, flags);
|
|
|
+ spin_lock_irqsave(&smp->state_lock, flags);
|
|
|
+
|
|
|
+ nblks -= reserved;
|
|
|
+ if (reserved)
|
|
|
+ DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
|
|
|
|
|
|
- avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
|
|
|
+ avail = cnt - bitmap_weight(smp->state, cnt);
|
|
|
if (nblks > avail) {
|
|
|
+ dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
|
|
|
+ nblks, avail);
|
|
|
ret = -ENOSPC;
|
|
|
goto fail;
|
|
|
}
|
|
@@ -84,9 +131,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
|
|
|
if (nblks > cur_nblks) {
|
|
|
/* grow the existing pending reservation: */
|
|
|
for (i = cur_nblks; i < nblks; i++) {
|
|
|
- int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
|
|
|
+ int blk = find_first_zero_bit(smp->state, cnt);
|
|
|
set_bit(blk, ps->pending);
|
|
|
- set_bit(blk, mdp5_kms->smp_state);
|
|
|
+ set_bit(blk, smp->state);
|
|
|
}
|
|
|
} else {
|
|
|
/* shrink the existing pending reservation: */
|
|
@@ -98,15 +145,89 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
|
|
|
}
|
|
|
|
|
|
fail:
|
|
|
- spin_unlock_irqrestore(&smp_lock, flags);
|
|
|
+ spin_unlock_irqrestore(&smp->state_lock, flags);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void set_fifo_thresholds(struct mdp5_smp *smp,
|
|
|
+ enum mdp5_pipe pipe, int nblks)
|
|
|
+{
|
|
|
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
|
|
|
+ u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
|
|
|
+ u32 val;
|
|
|
+
|
|
|
+ /* 1/4 of SMP pool that is being fetched */
|
|
|
+ val = (nblks * smp_entries_per_blk) / 4;
|
|
|
+
|
|
|
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
|
|
|
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
|
|
|
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * NOTE: looks like if horizontal decimation is used (if we supported that)
|
|
|
+ * then the width used to calculate SMP block requirements is the post-
|
|
|
+ * decimated width. Ie. SMP buffering sits downstream of decimation (which
|
|
|
+ * presumably happens during the dma from scanout buffer).
|
|
|
+ */
|
|
|
+int mdp5_smp_request(void *handler, enum mdp5_pipe pipe, u32 fmt, u32 width)
|
|
|
+{
|
|
|
+ struct mdp5_smp *smp = handler;
|
|
|
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
|
|
|
+ struct drm_device *dev = mdp5_kms->dev;
|
|
|
+ int i, hsub, nplanes, nlines, nblks, ret;
|
|
|
+
|
|
|
+ nplanes = drm_format_num_planes(fmt);
|
|
|
+ hsub = drm_format_horz_chroma_subsampling(fmt);
|
|
|
+
|
|
|
+ /* different if BWC (compressed framebuffer?) enabled: */
|
|
|
+ nlines = 2;
|
|
|
+
|
|
|
+ for (i = 0, nblks = 0; i < nplanes; i++) {
|
|
|
+ int n, fetch_stride, cpp;
|
|
|
+
|
|
|
+ cpp = drm_format_plane_cpp(fmt, i);
|
|
|
+ fetch_stride = width * cpp / (i ? hsub : 1);
|
|
|
+
|
|
|
+ n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
|
|
|
+
|
|
|
+ /* for hw rev v1.00 */
|
|
|
+ if (mdp5_kms->rev == 0)
|
|
|
+ n = roundup_pow_of_two(n);
|
|
|
+
|
|
|
+ DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
|
|
|
+ ret = smp_request_block(smp, pipe2client(pipe, i), n);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
|
|
|
+ n, ret);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ nblks += n;
|
|
|
+ }
|
|
|
+
|
|
|
+ set_fifo_thresholds(smp, pipe, nblks);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void update_smp_state(struct mdp5_kms *mdp5_kms,
|
|
|
+/* Release SMP blocks for all clients of the pipe */
|
|
|
+void mdp5_smp_release(void *handler, enum mdp5_pipe pipe)
|
|
|
+{
|
|
|
+ struct mdp5_smp *smp = handler;
|
|
|
+ int i, nblks;
|
|
|
+
|
|
|
+ for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
|
|
|
+ smp_request_block(smp, pipe2client(pipe, i), 0);
|
|
|
+ set_fifo_thresholds(smp, pipe, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static void update_smp_state(struct mdp5_smp *smp,
|
|
|
enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
|
|
|
{
|
|
|
- int cnt = mdp5_kms->smp_blk_cnt;
|
|
|
- uint32_t blk, val;
|
|
|
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
|
|
|
+ int cnt = smp->blk_cnt;
|
|
|
+ u32 blk, val;
|
|
|
|
|
|
for_each_set_bit(blk, *assigned, cnt) {
|
|
|
int idx = blk / 3;
|
|
@@ -135,39 +256,84 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms,
|
|
|
}
|
|
|
|
|
|
/* step #2: configure hw for union(pending, inuse): */
|
|
|
-void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
|
|
|
+void mdp5_smp_configure(void *handler, enum mdp5_pipe pipe)
|
|
|
{
|
|
|
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
|
|
|
- int cnt = mdp5_kms->smp_blk_cnt;
|
|
|
+ struct mdp5_smp *smp = handler;
|
|
|
+ int cnt = smp->blk_cnt;
|
|
|
mdp5_smp_state_t assigned;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < pipe2nclients(pipe); i++) {
|
|
|
+ enum mdp5_client_id cid = pipe2client(pipe, i);
|
|
|
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
|
|
|
|
|
|
- bitmap_or(assigned, ps->inuse, ps->pending, cnt);
|
|
|
- update_smp_state(mdp5_kms, cid, &assigned);
|
|
|
+ bitmap_or(assigned, ps->inuse, ps->pending, cnt);
|
|
|
+ update_smp_state(smp, cid, &assigned);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/* step #3: after vblank, copy pending -> inuse: */
|
|
|
-void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
|
|
|
+void mdp5_smp_commit(void *handler, enum mdp5_pipe pipe)
|
|
|
{
|
|
|
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
|
|
|
- int cnt = mdp5_kms->smp_blk_cnt;
|
|
|
+ struct mdp5_smp *smp = handler;
|
|
|
+ int cnt = smp->blk_cnt;
|
|
|
mdp5_smp_state_t released;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < pipe2nclients(pipe); i++) {
|
|
|
+ enum mdp5_client_id cid = pipe2client(pipe, i);
|
|
|
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Figure out if there are any blocks we where previously
|
|
|
+ * using, which can be released and made available to other
|
|
|
+ * clients:
|
|
|
+ */
|
|
|
+ if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&smp->state_lock, flags);
|
|
|
+ /* clear released blocks: */
|
|
|
+ bitmap_andnot(smp->state, smp->state, released, cnt);
|
|
|
+ spin_unlock_irqrestore(&smp->state_lock, flags);
|
|
|
+
|
|
|
+ update_smp_state(smp, CID_UNUSED, &released);
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * Figure out if there are any blocks we where previously
|
|
|
- * using, which can be released and made available to other
|
|
|
- * clients:
|
|
|
- */
|
|
|
- if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- spin_lock_irqsave(&smp_lock, flags);
|
|
|
- /* clear released blocks: */
|
|
|
- bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
|
|
|
- released, cnt);
|
|
|
- spin_unlock_irqrestore(&smp_lock, flags);
|
|
|
-
|
|
|
- update_smp_state(mdp5_kms, CID_UNUSED, &released);
|
|
|
+ bitmap_copy(ps->inuse, ps->pending, cnt);
|
|
|
}
|
|
|
+}
|
|
|
+
|
|
|
+void mdp5_smp_destroy(void *handler)
|
|
|
+{
|
|
|
+ struct mdp5_smp *smp = handler;
|
|
|
+
|
|
|
+ kfree(smp);
|
|
|
+}
|
|
|
+
|
|
|
+void *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
|
|
|
+{
|
|
|
+ struct mdp5_smp *smp = NULL;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ smp = kzalloc(sizeof(*smp), GFP_KERNEL);
|
|
|
+ if (unlikely(!smp)) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+
|
|
|
+ smp->dev = dev;
|
|
|
+ smp->blk_cnt = cfg->mmb_count;
|
|
|
+ smp->blk_size = cfg->mmb_size;
|
|
|
+
|
|
|
+ /* statically tied MMBs cannot be re-allocated: */
|
|
|
+ bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
|
|
|
+ spin_lock_init(&smp->state_lock);
|
|
|
+
|
|
|
+ return smp;
|
|
|
+fail:
|
|
|
+ if (smp)
|
|
|
+ mdp5_smp_destroy(smp);
|
|
|
|
|
|
- bitmap_copy(ps->inuse, ps->pending, cnt);
|
|
|
+ return ERR_PTR(ret);
|
|
|
}
|