瀏覽代碼

drm/nouveau/falcon: delay construction of falcons to oneinit()

Reading registers at device construction time can be harmful, as there
is no guarantee the underlying engine will be up, or in its runtime
configuration. Defer register reading to the oneinit() hook and update
users accordingly.

Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Alexandre Courbot 8 年之前
父節點
當前提交
9e4397579f

+ 6 - 0
drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h

@@ -46,8 +46,14 @@ struct nvkm_falcon {
 	struct nvkm_engine engine;
 };
 
+/* This constructor must be called from the owner's oneinit() hook and
+ * *not* its constructor.  This is to ensure that DEVINIT has been
+ * completed, and that the device is correctly enabled before we touch
+ * falcon registers.
+ */
 int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
 		       struct nvkm_falcon **);
+
 void nvkm_falcon_del(struct nvkm_falcon **);
 int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
 void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);

+ 14 - 15
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c

@@ -1647,8 +1647,18 @@ static int
 gf100_gr_oneinit(struct nvkm_gr *base)
 {
 	struct gf100_gr *gr = gf100_gr(base);
-	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	int i, j;
+	int ret;
+
+	ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
+	if (ret)
+		return ret;
+
+	ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
+	if (ret)
+		return ret;
 
 	nvkm_pmu_pgob(device->pmu, false);
 
@@ -1856,24 +1866,13 @@ int
 gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
 	      int index, struct gf100_gr *gr)
 {
-	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
-	int ret;
-
 	gr->func = func;
 	gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
 				    func->fecs.ucode == NULL);
 
-	ret = nvkm_gr_ctor(&gf100_gr_, device, index,
-			   gr->firmware || func->fecs.ucode != NULL,
-			   &gr->base);
-	if (ret)
-		return ret;
-
-	ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
-	if (ret)
-		return ret;
-
-	return nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
+	return nvkm_gr_ctor(&gf100_gr_, device, index,
+			    gr->firmware || func->fecs.ucode != NULL,
+			    &gr->base);
 }
 
 int

+ 3 - 0
drivers/gpu/drm/nouveau/nvkm/falcon/base.c

@@ -137,6 +137,9 @@ nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
 void
 nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
 {
+	if (unlikely(!falcon))
+		return;
+
 	mutex_lock(&falcon->mutex);
 	if (falcon->user == user) {
 		nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);

+ 9 - 1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c

@@ -115,6 +115,13 @@ nvkm_pmu_init(struct nvkm_subdev *subdev)
 	return ret;
 }
 
+static int
+nvkm_pmu_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+	return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
+}
+
 static void *
 nvkm_pmu_dtor(struct nvkm_subdev *subdev)
 {
@@ -128,6 +135,7 @@ static const struct nvkm_subdev_func
 nvkm_pmu = {
 	.dtor = nvkm_pmu_dtor,
 	.preinit = nvkm_pmu_preinit,
+	.oneinit = nvkm_pmu_oneinit,
 	.init = nvkm_pmu_init,
 	.fini = nvkm_pmu_fini,
 	.intr = nvkm_pmu_intr,
@@ -141,7 +149,7 @@ nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
 	pmu->func = func;
 	INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
 	init_waitqueue_head(&pmu->recv.wait);
-	return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
+	return 0;
 }
 
 int