Explorar el Código

drm/nouveau/pmu: move ucode handling into gt215 implementation

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Ben Skeggs hace 8 años
padre
commit
da7d2062fc

+ 37 - 184
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c

@@ -32,227 +32,80 @@ nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 		pmu->func->pgob(pmu, enable);
 }
 
-int
-nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
-	      u32 process, u32 message, u32 data0, u32 data1)
-{
-	struct nvkm_subdev *subdev = &pmu->subdev;
-	struct nvkm_device *device = subdev->device;
-	u32 addr;
-
-	mutex_lock(&subdev->mutex);
-	/* wait for a free slot in the fifo */
-	addr  = nvkm_rd32(device, 0x10a4a0);
-	if (nvkm_msec(device, 2000,
-		u32 tmp = nvkm_rd32(device, 0x10a4b0);
-		if (tmp != (addr ^ 8))
-			break;
-	) < 0) {
-		mutex_unlock(&subdev->mutex);
-		return -EBUSY;
-	}
-
-	/* we currently only support a single process at a time waiting
-	 * on a synchronous reply, take the PMU mutex and tell the
-	 * receive handler what we're waiting for
-	 */
-	if (reply) {
-		pmu->recv.message = message;
-		pmu->recv.process = process;
-	}
-
-	/* acquire data segment access */
-	do {
-		nvkm_wr32(device, 0x10a580, 0x00000001);
-	} while (nvkm_rd32(device, 0x10a580) != 0x00000001);
-
-	/* write the packet */
-	nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
-				pmu->send.base));
-	nvkm_wr32(device, 0x10a1c4, process);
-	nvkm_wr32(device, 0x10a1c4, message);
-	nvkm_wr32(device, 0x10a1c4, data0);
-	nvkm_wr32(device, 0x10a1c4, data1);
-	nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
-
-	/* release data segment access */
-	nvkm_wr32(device, 0x10a580, 0x00000000);
-
-	/* wait for reply, if requested */
-	if (reply) {
-		wait_event(pmu->recv.wait, (pmu->recv.process == 0));
-		reply[0] = pmu->recv.data[0];
-		reply[1] = pmu->recv.data[1];
-	}
-
-	mutex_unlock(&subdev->mutex);
-	return 0;
-}
-
 static void
 nvkm_pmu_recv(struct work_struct *work)
 {
-	struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
-	struct nvkm_subdev *subdev = &pmu->subdev;
-	struct nvkm_device *device = subdev->device;
-	u32 process, message, data0, data1;
-
-	/* nothing to do if GET == PUT */
-	u32 addr =  nvkm_rd32(device, 0x10a4cc);
-	if (addr == nvkm_rd32(device, 0x10a4c8))
-		return;
-
-	/* acquire data segment access */
-	do {
-		nvkm_wr32(device, 0x10a580, 0x00000002);
-	} while (nvkm_rd32(device, 0x10a580) != 0x00000002);
-
-	/* read the packet */
-	nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
-				pmu->recv.base));
-	process = nvkm_rd32(device, 0x10a1c4);
-	message = nvkm_rd32(device, 0x10a1c4);
-	data0   = nvkm_rd32(device, 0x10a1c4);
-	data1   = nvkm_rd32(device, 0x10a1c4);
-	nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
-
-	/* release data segment access */
-	nvkm_wr32(device, 0x10a580, 0x00000000);
-
-	/* wake process if it's waiting on a synchronous reply */
-	if (pmu->recv.process) {
-		if (process == pmu->recv.process &&
-		    message == pmu->recv.message) {
-			pmu->recv.data[0] = data0;
-			pmu->recv.data[1] = data1;
-			pmu->recv.process = 0;
-			wake_up(&pmu->recv.wait);
-			return;
-		}
-	}
+	struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work);
+	return pmu->func->recv(pmu);
+}
 
-	/* right now there's no other expected responses from the engine,
-	 * so assume that any unexpected message is an error.
-	 */
-	nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
-		  (char)((process & 0x000000ff) >>  0),
-		  (char)((process & 0x0000ff00) >>  8),
-		  (char)((process & 0x00ff0000) >> 16),
-		  (char)((process & 0xff000000) >> 24),
-		  process, message, data0, data1);
+int
+nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
+	      u32 process, u32 message, u32 data0, u32 data1)
+{
+	if (!pmu || !pmu->func->send)
+		return -ENODEV;
+	return pmu->func->send(pmu, reply, process, message, data0, data1);
 }
 
 static void
 nvkm_pmu_intr(struct nvkm_subdev *subdev)
 {
 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
-	struct nvkm_device *device = pmu->subdev.device;
-	u32 disp = nvkm_rd32(device, 0x10a01c);
-	u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
-
-	if (intr & 0x00000020) {
-		u32 stat = nvkm_rd32(device, 0x10a16c);
-		if (stat & 0x80000000) {
-			nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
-				   stat & 0x00ffffff,
-				   nvkm_rd32(device, 0x10a168));
-			nvkm_wr32(device, 0x10a16c, 0x00000000);
-			intr &= ~0x00000020;
-		}
-	}
-
-	if (intr & 0x00000040) {
-		schedule_work(&pmu->recv.work);
-		nvkm_wr32(device, 0x10a004, 0x00000040);
-		intr &= ~0x00000040;
-	}
-
-	if (intr & 0x00000080) {
-		nvkm_info(subdev, "wr32 %06x %08x\n",
-			  nvkm_rd32(device, 0x10a7a0),
-			  nvkm_rd32(device, 0x10a7a4));
-		nvkm_wr32(device, 0x10a004, 0x00000080);
-		intr &= ~0x00000080;
-	}
-
-	if (intr) {
-		nvkm_error(subdev, "intr %08x\n", intr);
-		nvkm_wr32(device, 0x10a004, intr);
-	}
+	if (!pmu->func->intr)
+		return;
+	pmu->func->intr(pmu);
 }
 
 static int
 nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
 {
 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
-	struct nvkm_device *device = pmu->subdev.device;
 
-	nvkm_wr32(device, 0x10a014, 0x00000060);
+	if (pmu->func->fini)
+		pmu->func->fini(pmu);
+
 	flush_work(&pmu->recv.work);
 	return 0;
 }
 
 static int
-nvkm_pmu_init(struct nvkm_subdev *subdev)
+nvkm_pmu_reset(struct nvkm_pmu *pmu)
 {
-	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
 	struct nvkm_device *device = pmu->subdev.device;
-	int i;
 
-	/* prevent previous ucode from running, wait for idle, reset */
-	nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+	if (!(nvkm_rd32(device, 0x000200) & 0x00002000))
+		return 0;
+
+	/* Inhibit interrupts, and wait for idle. */
+	nvkm_wr32(device, 0x10a014, 0x0000ffff);
 	nvkm_msec(device, 2000,
 		if (!nvkm_rd32(device, 0x10a04c))
 			break;
 	);
-	nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
-	nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
-	nvkm_rd32(device, 0x000200);
+
+	/* Reset. */
+	pmu->func->reset(pmu);
+
+	/* Wait for IMEM/DMEM scrubbing to be complete. */
 	nvkm_msec(device, 2000,
 		if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
 			break;
 	);
 
-	/* upload data segment */
-	nvkm_wr32(device, 0x10a1c0, 0x01000000);
-	for (i = 0; i < pmu->func->data.size / 4; i++)
-		nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
-
-	/* upload code segment */
-	nvkm_wr32(device, 0x10a180, 0x01000000);
-	for (i = 0; i < pmu->func->code.size / 4; i++) {
-		if ((i & 0x3f) == 0)
-			nvkm_wr32(device, 0x10a188, i >> 6);
-		nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
-	}
-
-	/* start it running */
-	nvkm_wr32(device, 0x10a10c, 0x00000000);
-	nvkm_wr32(device, 0x10a104, 0x00000000);
-	nvkm_wr32(device, 0x10a100, 0x00000002);
-
-	/* wait for valid host->pmu ring configuration */
-	if (nvkm_msec(device, 2000,
-		if (nvkm_rd32(device, 0x10a4d0))
-			break;
-	) < 0)
-		return -EBUSY;
-	pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
-	pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
-
-	/* wait for valid pmu->host ring configuration */
-	if (nvkm_msec(device, 2000,
-		if (nvkm_rd32(device, 0x10a4dc))
-			break;
-	) < 0)
-		return -EBUSY;
-	pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
-	pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
-
-	nvkm_wr32(device, 0x10a010, 0x000000e0);
 	return 0;
 }
 
+static int
+nvkm_pmu_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+	int ret = nvkm_pmu_reset(pmu);
+	if (ret == 0 && pmu->func->init)
+		ret = pmu->func->init(pmu);
+	return ret;
+}
+
 static void *
 nvkm_pmu_dtor(struct nvkm_subdev *subdev)
 {

+ 6 - 0
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c

@@ -30,6 +30,12 @@ gf100_pmu = {
 	.code.size = sizeof(gf100_pmu_code),
 	.data.data = gf100_pmu_data,
 	.data.size = sizeof(gf100_pmu_data),
+	.reset = gt215_pmu_reset,
+	.init = gt215_pmu_init,
+	.fini = gt215_pmu_fini,
+	.intr = gt215_pmu_intr,
+	.send = gt215_pmu_send,
+	.recv = gt215_pmu_recv,
 };
 
 int

+ 6 - 0
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c

@@ -30,6 +30,12 @@ gf119_pmu = {
 	.code.size = sizeof(gf119_pmu_code),
 	.data.data = gf119_pmu_data,
 	.data.size = sizeof(gf119_pmu_data),
+	.reset = gt215_pmu_reset,
+	.init = gt215_pmu_init,
+	.fini = gt215_pmu_fini,
+	.intr = gt215_pmu_intr,
+	.send = gt215_pmu_send,
+	.recv = gt215_pmu_recv,
 };
 
 int

+ 6 - 0
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c

@@ -109,6 +109,12 @@ gk104_pmu = {
 	.code.size = sizeof(gk104_pmu_code),
 	.data.data = gk104_pmu_data,
 	.data.size = sizeof(gk104_pmu_data),
+	.reset = gt215_pmu_reset,
+	.init = gt215_pmu_init,
+	.fini = gt215_pmu_fini,
+	.intr = gt215_pmu_intr,
+	.send = gt215_pmu_send,
+	.recv = gt215_pmu_recv,
 	.pgob = gk104_pmu_pgob,
 };
 

+ 6 - 0
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c

@@ -88,6 +88,12 @@ gk110_pmu = {
 	.code.size = sizeof(gk110_pmu_code),
 	.data.data = gk110_pmu_data,
 	.data.size = sizeof(gk110_pmu_data),
+	.reset = gt215_pmu_reset,
+	.init = gt215_pmu_init,
+	.fini = gt215_pmu_fini,
+	.intr = gt215_pmu_intr,
+	.send = gt215_pmu_send,
+	.recv = gt215_pmu_recv,
 	.pgob = gk110_pmu_pgob,
 };
 

+ 6 - 0
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c

@@ -30,6 +30,12 @@ gk208_pmu = {
 	.code.size = sizeof(gk208_pmu_code),
 	.data.data = gk208_pmu_data,
 	.data.size = sizeof(gk208_pmu_data),
+	.reset = gt215_pmu_reset,
+	.init = gt215_pmu_init,
+	.fini = gt215_pmu_fini,
+	.intr = gt215_pmu_intr,
+	.send = gt215_pmu_send,
+	.recv = gt215_pmu_recv,
 	.pgob = gk110_pmu_pgob,
 };
 

+ 6 - 0
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c

@@ -32,6 +32,12 @@ gm107_pmu = {
 	.code.size = sizeof(gm107_pmu_code),
 	.data.data = gm107_pmu_data,
 	.data.size = sizeof(gm107_pmu_data),
+	.reset = gt215_pmu_reset,
+	.init = gt215_pmu_init,
+	.fini = gt215_pmu_fini,
+	.intr = gt215_pmu_intr,
+	.send = gt215_pmu_send,
+	.recv = gt215_pmu_recv,
 };
 
 int

+ 217 - 0
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c

@@ -24,12 +24,229 @@
 #include "priv.h"
 #include "fuc/gt215.fuc3.h"
 
+#include <subdev/timer.h>
+
+int
+gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
+	       u32 process, u32 message, u32 data0, u32 data1)
+{
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 addr;
+
+	mutex_lock(&subdev->mutex);
+	/* wait for a free slot in the fifo */
+	addr  = nvkm_rd32(device, 0x10a4a0);
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x10a4b0);
+		if (tmp != (addr ^ 8))
+			break;
+	) < 0) {
+		mutex_unlock(&subdev->mutex);
+		return -EBUSY;
+	}
+
+	/* we currently only support a single process at a time waiting
+	 * on a synchronous reply, take the PMU mutex and tell the
+	 * receive handler what we're waiting for
+	 */
+	if (reply) {
+		pmu->recv.message = message;
+		pmu->recv.process = process;
+	}
+
+	/* acquire data segment access */
+	do {
+		nvkm_wr32(device, 0x10a580, 0x00000001);
+	} while (nvkm_rd32(device, 0x10a580) != 0x00000001);
+
+	/* write the packet */
+	nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+				pmu->send.base));
+	nvkm_wr32(device, 0x10a1c4, process);
+	nvkm_wr32(device, 0x10a1c4, message);
+	nvkm_wr32(device, 0x10a1c4, data0);
+	nvkm_wr32(device, 0x10a1c4, data1);
+	nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
+
+	/* release data segment access */
+	nvkm_wr32(device, 0x10a580, 0x00000000);
+
+	/* wait for reply, if requested */
+	if (reply) {
+		wait_event(pmu->recv.wait, (pmu->recv.process == 0));
+		reply[0] = pmu->recv.data[0];
+		reply[1] = pmu->recv.data[1];
+	}
+
+	mutex_unlock(&subdev->mutex);
+	return 0;
+}
+
+void
+gt215_pmu_recv(struct nvkm_pmu *pmu)
+{
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 process, message, data0, data1;
+
+	/* nothing to do if GET == PUT */
+	u32 addr =  nvkm_rd32(device, 0x10a4cc);
+	if (addr == nvkm_rd32(device, 0x10a4c8))
+		return;
+
+	/* acquire data segment access */
+	do {
+		nvkm_wr32(device, 0x10a580, 0x00000002);
+	} while (nvkm_rd32(device, 0x10a580) != 0x00000002);
+
+	/* read the packet */
+	nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+				pmu->recv.base));
+	process = nvkm_rd32(device, 0x10a1c4);
+	message = nvkm_rd32(device, 0x10a1c4);
+	data0   = nvkm_rd32(device, 0x10a1c4);
+	data1   = nvkm_rd32(device, 0x10a1c4);
+	nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
+
+	/* release data segment access */
+	nvkm_wr32(device, 0x10a580, 0x00000000);
+
+	/* wake process if it's waiting on a synchronous reply */
+	if (pmu->recv.process) {
+		if (process == pmu->recv.process &&
+		    message == pmu->recv.message) {
+			pmu->recv.data[0] = data0;
+			pmu->recv.data[1] = data1;
+			pmu->recv.process = 0;
+			wake_up(&pmu->recv.wait);
+			return;
+		}
+	}
+
+	/* right now there's no other expected responses from the engine,
+	 * so assume that any unexpected message is an error.
+	 */
+	nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
+		  (char)((process & 0x000000ff) >>  0),
+		  (char)((process & 0x0000ff00) >>  8),
+		  (char)((process & 0x00ff0000) >> 16),
+		  (char)((process & 0xff000000) >> 24),
+		  process, message, data0, data1);
+}
+
+void
+gt215_pmu_intr(struct nvkm_pmu *pmu)
+{
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 disp = nvkm_rd32(device, 0x10a01c);
+	u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
+
+	if (intr & 0x00000020) {
+		u32 stat = nvkm_rd32(device, 0x10a16c);
+		if (stat & 0x80000000) {
+			nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
+				   stat & 0x00ffffff,
+				   nvkm_rd32(device, 0x10a168));
+			nvkm_wr32(device, 0x10a16c, 0x00000000);
+			intr &= ~0x00000020;
+		}
+	}
+
+	if (intr & 0x00000040) {
+		schedule_work(&pmu->recv.work);
+		nvkm_wr32(device, 0x10a004, 0x00000040);
+		intr &= ~0x00000040;
+	}
+
+	if (intr & 0x00000080) {
+		nvkm_info(subdev, "wr32 %06x %08x\n",
+			  nvkm_rd32(device, 0x10a7a0),
+			  nvkm_rd32(device, 0x10a7a4));
+		nvkm_wr32(device, 0x10a004, 0x00000080);
+		intr &= ~0x00000080;
+	}
+
+	if (intr) {
+		nvkm_error(subdev, "intr %08x\n", intr);
+		nvkm_wr32(device, 0x10a004, intr);
+	}
+}
+
+void
+gt215_pmu_fini(struct nvkm_pmu *pmu)
+{
+	nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
+}
+
+void
+gt215_pmu_reset(struct nvkm_pmu *pmu)
+{
+	struct nvkm_device *device = pmu->subdev.device;
+	nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+	nvkm_rd32(device, 0x000200);
+}
+
+int
+gt215_pmu_init(struct nvkm_pmu *pmu)
+{
+	struct nvkm_device *device = pmu->subdev.device;
+	int i;
+
+	/* upload data segment */
+	nvkm_wr32(device, 0x10a1c0, 0x01000000);
+	for (i = 0; i < pmu->func->data.size / 4; i++)
+		nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
+
+	/* upload code segment */
+	nvkm_wr32(device, 0x10a180, 0x01000000);
+	for (i = 0; i < pmu->func->code.size / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nvkm_wr32(device, 0x10a188, i >> 6);
+		nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
+	}
+
+	/* start it running */
+	nvkm_wr32(device, 0x10a10c, 0x00000000);
+	nvkm_wr32(device, 0x10a104, 0x00000000);
+	nvkm_wr32(device, 0x10a100, 0x00000002);
+
+	/* wait for valid host->pmu ring configuration */
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x10a4d0))
+			break;
+	) < 0)
+		return -EBUSY;
+	pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
+	pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
+
+	/* wait for valid pmu->host ring configuration */
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x10a4dc))
+			break;
+	) < 0)
+		return -EBUSY;
+	pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
+	pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
+
+	nvkm_wr32(device, 0x10a010, 0x000000e0);
+	return 0;
+}
+
 static const struct nvkm_pmu_func
 gt215_pmu = {
 	.code.data = gt215_pmu_code,
 	.code.size = sizeof(gt215_pmu_code),
 	.data.data = gt215_pmu_data,
 	.data.size = sizeof(gt215_pmu_data),
+	.reset = gt215_pmu_reset,
+	.init = gt215_pmu_init,
+	.fini = gt215_pmu_fini,
+	.intr = gt215_pmu_intr,
+	.send = gt215_pmu_send,
+	.recv = gt215_pmu_recv,
 };
 
 int

+ 14 - 0
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h

@@ -18,8 +18,22 @@ struct nvkm_pmu_func {
 		u32  size;
 	} data;
 
+	void (*reset)(struct nvkm_pmu *);
+	int (*init)(struct nvkm_pmu *);
+	void (*fini)(struct nvkm_pmu *);
+	void (*intr)(struct nvkm_pmu *);
+	int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process,
+		    u32 message, u32 data0, u32 data1);
+	void (*recv)(struct nvkm_pmu *);
 	void (*pgob)(struct nvkm_pmu *, bool);
 };
 
+void gt215_pmu_reset(struct nvkm_pmu *);
+int gt215_pmu_init(struct nvkm_pmu *);
+void gt215_pmu_fini(struct nvkm_pmu *);
+void gt215_pmu_intr(struct nvkm_pmu *);
+void gt215_pmu_recv(struct nvkm_pmu *);
+int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
+
 void gk110_pmu_pgob(struct nvkm_pmu *, bool);
 #endif