Răsfoiți Sursa

Merge branch 'linux-4.2' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into drm-fixes

Various minor fixes all over the place, nothing too scary.

* 'linux-4.2' of git://anongit.freedesktop.org/git/nouveau/linux-2.6:
  drm/nouveau/fbcon/g80: reduce PUSH_SPACE alloc, fire ring on accel init
  drm/nouveau/fbcon/gf100-: reduce RING_SPACE allocation
  drm/nouveau/fbcon/nv11-: correctly account for ring space usage
  drm/nouveau/bios: add proper support for opcode 0x59
  drm/nouveau/bios: add 0x59 and 0x5a opcodes
  drm/nouveau/disp: Use NULL for pointers
  drm/nouveau/pm: fix a potential race condition when creating an engine context
  drm/nouveau/pm: prevent freeing the wrong engine context
  drm/nouveau/gr/gf100: wait for GR idle after GO_IDLE bundle
  drm/nouveau/gr/gf100: wait on bottom half of FE's pipeline
  drm/nouveau/fifo/gk104: kick channels when deactivating them
  drm/nouveau/ibus/gk20a: increase SM wait timeout
  drm/nouveau/platform: fix compile error if !CONFIG_IOMMU
  drm/nouveau: Do not leak client objects
  drm/nouveau/clk/gt215: u32->s32 for difference in req. and set clock
  drm/nouveau/drm/nv04-nv40/instmem: protect access to priv->heap by mutex
  drm/nouveau: hold mutex when calling nouveau_abi16_fini()
Dave Airlie 10 ani în urmă
părinte
comite
eb8bb7774e

+ 3 - 0
drivers/gpu/drm/nouveau/nouveau_drm.c

@@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
 	nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
 	nvif_client_fini(&cli->base);
 	usif_client_fini(cli);
+	kfree(cli);
 }
 
 static void
@@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
 
 	pm_runtime_get_sync(dev->dev);
 
+	mutex_lock(&cli->mutex);
 	if (cli->abi16)
 		nouveau_abi16_fini(cli->abi16);
+	mutex_unlock(&cli->mutex);
 
 	mutex_lock(&drm->client.mutex);
 	list_del(&cli->head);

+ 16 - 0
drivers/gpu/drm/nouveau/nouveau_platform.c

@@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
 	return 0;
 }
 
+#if IS_ENABLED(CONFIG_IOMMU_API)
+
 static void nouveau_platform_probe_iommu(struct device *dev,
 					 struct nouveau_platform_gpu *gpu)
 {
@@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
 	}
 }
 
+#else
+
+static void nouveau_platform_probe_iommu(struct device *dev,
+					 struct nouveau_platform_gpu *gpu)
+{
+}
+
+static void nouveau_platform_remove_iommu(struct device *dev,
+					  struct nouveau_platform_gpu *gpu)
+{
+}
+
+#endif
+
 static int nouveau_platform_probe(struct platform_device *pdev)
 {
 	struct nouveau_platform_gpu *gpu;

+ 1 - 1
drivers/gpu/drm/nouveau/nv04_fbcon.c

@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
 	if (ret)
 		return ret;
 
-	if (RING_SPACE(chan, 49)) {
+	if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
 		nouveau_fbcon_gpu_lockup(info);
 		return 0;
 	}

+ 2 - 1
drivers/gpu/drm/nouveau/nv50_fbcon.c

@@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
 	if (ret)
 		return ret;
 
-	ret = RING_SPACE(chan, 59);
+	ret = RING_SPACE(chan, 58);
 	if (ret) {
 		nouveau_fbcon_gpu_lockup(info);
 		return ret;
@@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
 	OUT_RING(chan, info->var.yres_virtual);
 	OUT_RING(chan, upper_32_bits(fb->vma.offset));
 	OUT_RING(chan, lower_32_bits(fb->vma.offset));
+	FIRE_RING(chan);
 
 	return 0;
 }

+ 1 - 1
drivers/gpu/drm/nouveau/nvc0_fbcon.c

@@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
 		return -EINVAL;
 	}
 
-	ret = RING_SPACE(chan, 60);
+	ret = RING_SPACE(chan, 58);
 	if (ret) {
 		WARN_ON(1);
 		nouveau_fbcon_gpu_lockup(info);

+ 1 - 1
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c

@@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
 		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
 		default:
 			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
-			return 0x0000;
+			return NULL;
 		}
 	}
 

+ 21 - 8
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c

@@ -165,15 +165,31 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
 	return 0;
 }
 
+static int
+gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
+{
+	struct nvkm_object *obj = (void *)chan;
+	struct gk104_fifo_priv *priv = (void *)obj->engine;
+
+	nv_wr32(priv, 0x002634, chan->base.chid);
+	if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
+		nv_error(priv, "channel %d [%s] kick timeout\n",
+			 chan->base.chid, nvkm_client_name(chan));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
 static int
 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
 			  struct nvkm_object *object)
 {
 	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gk104_fifo_priv *priv = (void *)parent->engine;
 	struct gk104_fifo_base *base = (void *)parent->parent;
 	struct gk104_fifo_chan *chan = (void *)parent;
 	u32 addr;
+	int ret;
 
 	switch (nv_engidx(object->engine)) {
 	case NVDEV_ENGINE_SW    : return 0;
@@ -188,13 +204,9 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
 		return -EINVAL;
 	}
 
-	nv_wr32(priv, 0x002634, chan->base.chid);
-	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
-		nv_error(priv, "channel %d [%s] kick timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			return -EBUSY;
-	}
+	ret = gk104_fifo_chan_kick(chan);
+	if (ret && suspend)
+		return ret;
 
 	if (addr) {
 		nv_wo32(base, addr + 0x00, 0x00000000);
@@ -319,6 +331,7 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
 		gk104_fifo_runlist_update(priv, chan->engine);
 	}
 
+	gk104_fifo_chan_kick(chan);
 	nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
 	return nvkm_fifo_channel_fini(&chan->base, suspend);
 }

+ 38 - 1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c

@@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
 		gf100_gr_zbc_clear_depth(priv, index);
 }
 
+/**
+ * Wait until GR goes idle. GR is considered idle if it is disabled by the
+ * MC (0x200) register, or GR is not busy and a context switch is not in
+ * progress.
+ */
+int
+gf100_gr_wait_idle(struct gf100_gr_priv *priv)
+{
+	unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
+	bool gr_enabled, ctxsw_active, gr_busy;
+
+	do {
+		/*
+		 * required to make sure FIFO_ENGINE_STATUS (0x2640) is
+		 * up-to-date
+		 */
+		nv_rd32(priv, 0x400700);
+
+		gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
+		ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
+		gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
+
+		if (!gr_enabled || (!gr_busy && !ctxsw_active))
+			return 0;
+	} while (time_before(jiffies, end_jiffies));
+
+	nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
+		 gr_enabled, ctxsw_active, gr_busy);
+	return -EAGAIN;
+}
+
 void
 gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
 {
@@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
 
 		while (addr < next) {
 			nv_wr32(priv, 0x400200, addr);
-			nv_wait(priv, 0x400700, 0x00000002, 0x00000000);
+			/**
+			 * Wait for GR to go idle after submitting a
+			 * GO_IDLE bundle
+			 */
+			if ((addr & 0xffff) == 0xe100)
+				gf100_gr_wait_idle(priv);
+			nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
 			addr += init->pitch;
 		}
 	}

+ 1 - 0
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h

@@ -181,6 +181,7 @@ struct gf100_gr_oclass {
 	int ppc_nr;
 };
 
+int  gf100_gr_wait_idle(struct gf100_gr_priv *);
 void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
 void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
 void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);

+ 8 - 6
drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c

@@ -332,9 +332,12 @@ static void
 nvkm_perfctx_dtor(struct nvkm_object *object)
 {
 	struct nvkm_pm *ppm = (void *)object->engine;
+	struct nvkm_perfctx *ctx = (void *)object;
+
 	mutex_lock(&nv_subdev(ppm)->mutex);
-	nvkm_engctx_destroy(&ppm->context->base);
-	ppm->context = NULL;
+	nvkm_engctx_destroy(&ctx->base);
+	if (ppm->context == ctx)
+		ppm->context = NULL;
 	mutex_unlock(&nv_subdev(ppm)->mutex);
 }
 
@@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 	mutex_lock(&nv_subdev(ppm)->mutex);
 	if (ppm->context == NULL)
 		ppm->context = ctx;
-	mutex_unlock(&nv_subdev(ppm)->mutex);
-
 	if (ctx != ppm->context)
-		return -EBUSY;
+		ret = -EBUSY;
+	mutex_unlock(&nv_subdev(ppm)->mutex);
 
-	return 0;
+	return ret;
 }
 
 struct nvkm_oclass

+ 40 - 0
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c

@@ -1284,6 +1284,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
 	}
 }
 
+/**
+ * INIT_PLL_INDIRECT - opcode 0x59
+ *
+ */
+static void
+init_pll_indirect(struct nvbios_init *init)
+{
+	struct nvkm_bios *bios = init->bios;
+	u32  reg = nv_ro32(bios, init->offset + 1);
+	u16 addr = nv_ro16(bios, init->offset + 5);
+	u32 freq = (u32)nv_ro16(bios, addr) * 1000;
+
+	trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
+	      reg, addr, freq);
+	init->offset += 7;
+
+	init_prog_pll(init, reg, freq);
+}
+
+/**
+ * INIT_ZM_REG_INDIRECT - opcode 0x5a
+ *
+ */
+static void
+init_zm_reg_indirect(struct nvbios_init *init)
+{
+	struct nvkm_bios *bios = init->bios;
+	u32  reg = nv_ro32(bios, init->offset + 1);
+	u16 addr = nv_ro16(bios, init->offset + 5);
+	u32 data = nv_ro32(bios, addr);
+
+	trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
+	      reg, addr, data);
+	init->offset += 7;
+
+	init_wr32(init, addr, data);
+}
+
 /**
  * INIT_SUB_DIRECT - opcode 0x5b
  *
@@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
 	[0x56] = { init_condition_time },
 	[0x57] = { init_ltime },
 	[0x58] = { init_zm_reg_sequence },
+	[0x59] = { init_pll_indirect },
+	[0x5a] = { init_zm_reg_indirect },
 	[0x5b] = { init_sub_direct },
 	[0x5c] = { init_jump },
 	[0x5e] = { init_i2c_if },

+ 2 - 1
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c

@@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
 	       struct gt215_clk_info *info)
 {
 	struct gt215_clk_priv *priv = (void *)clock;
-	u32 oclk, sclk, sdiv, diff;
+	u32 oclk, sclk, sdiv;
+	s32 diff;
 
 	info->clk = 0;
 

+ 8 - 0
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c

@@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
 	nv_wr32(priv, 0x12004c, 0x4);
 	nv_wr32(priv, 0x122204, 0x2);
 	nv_rd32(priv, 0x122204);
+
+	/*
+	 * Bug: increase clock timeout to avoid operation failure at high
+	 * gpcclk rate.
+	 */
+	nv_wr32(priv, 0x122354, 0x800);
+	nv_wr32(priv, 0x128328, 0x800);
+	nv_wr32(priv, 0x124320, 0x800);
 }
 
 static void

+ 8 - 0
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c

@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
 {
 	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
 	struct nv04_instobj_priv *node = (void *)object;
+	struct nvkm_subdev *subdev = (void *)priv;
+
+	mutex_lock(&subdev->mutex);
 	nvkm_mm_free(&priv->heap, &node->mem);
+	mutex_unlock(&subdev->mutex);
+
 	nvkm_instobj_destroy(&node->base);
 }
 
@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
 	struct nv04_instobj_priv *node;
 	struct nvkm_instobj_args *args = data;
+	struct nvkm_subdev *subdev = (void *)priv;
 	int ret;
 
 	if (!args->align)
@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 	if (ret)
 		return ret;
 
+	mutex_lock(&subdev->mutex);
 	ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
 			   args->align, &node->mem);
+	mutex_unlock(&subdev->mutex);
 	if (ret)
 		return ret;