|
@@ -21,7 +21,8 @@
|
|
*
|
|
*
|
|
* Authors: Ben Skeggs
|
|
* Authors: Ben Skeggs
|
|
*/
|
|
*/
|
|
-#include "gf100.h"
|
|
|
|
|
|
+#define gf100_ram(p) container_of((p), struct gf100_ram, base)
|
|
|
|
+#include "ram.h"
|
|
#include "ramfuc.h"
|
|
#include "ramfuc.h"
|
|
|
|
|
|
#include <core/option.h>
|
|
#include <core/option.h>
|
|
@@ -107,7 +108,7 @@ static void
|
|
gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
|
|
gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
|
|
{
|
|
{
|
|
struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
|
|
struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
|
|
- struct nvkm_fb *fb = nvkm_fb(ram);
|
|
|
|
|
|
+ struct nvkm_fb *fb = ram->base.fb;
|
|
struct nvkm_device *device = fb->subdev.device;
|
|
struct nvkm_device *device = fb->subdev.device;
|
|
u32 part = nvkm_rd32(device, 0x022438), i;
|
|
u32 part = nvkm_rd32(device, 0x022438), i;
|
|
u32 mask = nvkm_rd32(device, 0x022554);
|
|
u32 mask = nvkm_rd32(device, 0x022554);
|
|
@@ -124,14 +125,14 @@ gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
|
|
}
|
|
}
|
|
|
|
|
|
static int
|
|
static int
|
|
-gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
|
|
|
|
|
+gf100_ram_calc(struct nvkm_ram *base, u32 freq)
|
|
{
|
|
{
|
|
- struct nvkm_subdev *subdev = &fb->subdev;
|
|
|
|
|
|
+ struct gf100_ram *ram = gf100_ram(base);
|
|
|
|
+ struct gf100_ramfuc *fuc = &ram->fuc;
|
|
|
|
+ struct nvkm_subdev *subdev = &ram->base.fb->subdev;
|
|
struct nvkm_device *device = subdev->device;
|
|
struct nvkm_device *device = subdev->device;
|
|
struct nvkm_clk *clk = device->clk;
|
|
struct nvkm_clk *clk = device->clk;
|
|
struct nvkm_bios *bios = device->bios;
|
|
struct nvkm_bios *bios = device->bios;
|
|
- struct gf100_ram *ram = (void *)fb->ram;
|
|
|
|
- struct gf100_ramfuc *fuc = &ram->fuc;
|
|
|
|
struct nvbios_ramcfg cfg;
|
|
struct nvbios_ramcfg cfg;
|
|
u8 ver, cnt, len, strap;
|
|
u8 ver, cnt, len, strap;
|
|
struct {
|
|
struct {
|
|
@@ -152,7 +153,7 @@ gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
|
}
|
|
}
|
|
|
|
|
|
/* locate specific data set for the attached memory */
|
|
/* locate specific data set for the attached memory */
|
|
- strap = nvbios_ramcfg_index(nv_subdev(fb));
|
|
|
|
|
|
+ strap = nvbios_ramcfg_index(subdev);
|
|
if (strap >= cnt) {
|
|
if (strap >= cnt) {
|
|
nvkm_error(subdev, "invalid ramcfg strap\n");
|
|
nvkm_error(subdev, "invalid ramcfg strap\n");
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
@@ -177,7 +178,7 @@ gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
|
timing.data = 0;
|
|
timing.data = 0;
|
|
}
|
|
}
|
|
|
|
|
|
- ret = ram_init(fuc, fb);
|
|
|
|
|
|
+ ret = ram_init(fuc, ram->base.fb);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
@@ -212,8 +213,8 @@ gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
|
|
|
|
|
if (mode == 1 && from == 0) {
|
|
if (mode == 1 && from == 0) {
|
|
/* calculate refpll */
|
|
/* calculate refpll */
|
|
- ret = gt215_pll_calc(nv_subdev(fb), &ram->refpll,
|
|
|
|
- ram->mempll.refclk, &N1, NULL, &M1, &P);
|
|
|
|
|
|
+ ret = gt215_pll_calc(subdev, &ram->refpll, ram->mempll.refclk,
|
|
|
|
+ &N1, NULL, &M1, &P);
|
|
if (ret <= 0) {
|
|
if (ret <= 0) {
|
|
nvkm_error(subdev, "unable to calc refpll\n");
|
|
nvkm_error(subdev, "unable to calc refpll\n");
|
|
return ret ? ret : -ERANGE;
|
|
return ret ? ret : -ERANGE;
|
|
@@ -227,7 +228,7 @@ gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
|
ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
|
|
ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
|
|
|
|
|
|
/* calculate mempll */
|
|
/* calculate mempll */
|
|
- ret = gt215_pll_calc(nv_subdev(fb), &ram->mempll, freq,
|
|
|
|
|
|
+ ret = gt215_pll_calc(subdev, &ram->mempll, freq,
|
|
&N1, NULL, &M1, &P);
|
|
&N1, NULL, &M1, &P);
|
|
if (ret <= 0) {
|
|
if (ret <= 0) {
|
|
nvkm_error(subdev, "unable to calc refpll\n");
|
|
nvkm_error(subdev, "unable to calc refpll\n");
|
|
@@ -404,49 +405,48 @@ gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
|
}
|
|
}
|
|
|
|
|
|
static int
|
|
static int
|
|
-gf100_ram_prog(struct nvkm_fb *fb)
|
|
|
|
|
|
+gf100_ram_prog(struct nvkm_ram *base)
|
|
{
|
|
{
|
|
- struct nvkm_device *device = nv_device(fb);
|
|
|
|
- struct gf100_ram *ram = (void *)fb->ram;
|
|
|
|
- struct gf100_ramfuc *fuc = &ram->fuc;
|
|
|
|
- ram_exec(fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
|
|
|
|
|
|
+ struct gf100_ram *ram = gf100_ram(base);
|
|
|
|
+ struct nvkm_device *device = ram->base.fb->subdev.device;
|
|
|
|
+ ram_exec(&ram->fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
-gf100_ram_tidy(struct nvkm_fb *fb)
|
|
|
|
|
|
+gf100_ram_tidy(struct nvkm_ram *base)
|
|
{
|
|
{
|
|
- struct gf100_ram *ram = (void *)fb->ram;
|
|
|
|
- struct gf100_ramfuc *fuc = &ram->fuc;
|
|
|
|
- ram_exec(fuc, false);
|
|
|
|
|
|
+ struct gf100_ram *ram = gf100_ram(base);
|
|
|
|
+ ram_exec(&ram->fuc, false);
|
|
}
|
|
}
|
|
|
|
|
|
extern const u8 gf100_pte_storage_type_map[256];
|
|
extern const u8 gf100_pte_storage_type_map[256];
|
|
|
|
|
|
void
|
|
void
|
|
-gf100_ram_put(struct nvkm_fb *fb, struct nvkm_mem **pmem)
|
|
|
|
|
|
+gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
|
|
{
|
|
{
|
|
- struct nvkm_ltc *ltc = nvkm_ltc(fb);
|
|
|
|
|
|
+ struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
|
|
struct nvkm_mem *mem = *pmem;
|
|
struct nvkm_mem *mem = *pmem;
|
|
|
|
|
|
*pmem = NULL;
|
|
*pmem = NULL;
|
|
if (unlikely(mem == NULL))
|
|
if (unlikely(mem == NULL))
|
|
return;
|
|
return;
|
|
|
|
|
|
- mutex_lock(&fb->subdev.mutex);
|
|
|
|
|
|
+ mutex_lock(&ram->fb->subdev.mutex);
|
|
if (mem->tag)
|
|
if (mem->tag)
|
|
ltc->tags_free(ltc, &mem->tag);
|
|
ltc->tags_free(ltc, &mem->tag);
|
|
- __nv50_ram_put(fb, mem);
|
|
|
|
- mutex_unlock(&fb->subdev.mutex);
|
|
|
|
|
|
+ __nv50_ram_put(ram, mem);
|
|
|
|
+ mutex_unlock(&ram->fb->subdev.mutex);
|
|
|
|
|
|
kfree(mem);
|
|
kfree(mem);
|
|
}
|
|
}
|
|
|
|
|
|
int
|
|
int
|
|
-gf100_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
|
|
|
|
|
|
+gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
|
|
u32 memtype, struct nvkm_mem **pmem)
|
|
u32 memtype, struct nvkm_mem **pmem)
|
|
{
|
|
{
|
|
- struct nvkm_mm *mm = &fb->vram;
|
|
|
|
|
|
+ struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
|
|
|
|
+ struct nvkm_mm *mm = &ram->vram;
|
|
struct nvkm_mm_node *r;
|
|
struct nvkm_mm_node *r;
|
|
struct nvkm_mem *mem;
|
|
struct nvkm_mem *mem;
|
|
int type = (memtype & 0x0ff);
|
|
int type = (memtype & 0x0ff);
|
|
@@ -454,9 +454,9 @@ gf100_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
|
|
const bool comp = gf100_pte_storage_type_map[type] != type;
|
|
const bool comp = gf100_pte_storage_type_map[type] != type;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- size >>= 12;
|
|
|
|
- align >>= 12;
|
|
|
|
- ncmin >>= 12;
|
|
|
|
|
|
+ size >>= NVKM_RAM_MM_SHIFT;
|
|
|
|
+ align >>= NVKM_RAM_MM_SHIFT;
|
|
|
|
+ ncmin >>= NVKM_RAM_MM_SHIFT;
|
|
if (!ncmin)
|
|
if (!ncmin)
|
|
ncmin = size;
|
|
ncmin = size;
|
|
|
|
|
|
@@ -467,12 +467,10 @@ gf100_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
|
|
INIT_LIST_HEAD(&mem->regions);
|
|
INIT_LIST_HEAD(&mem->regions);
|
|
mem->size = size;
|
|
mem->size = size;
|
|
|
|
|
|
- mutex_lock(&fb->subdev.mutex);
|
|
|
|
|
|
+ mutex_lock(&ram->fb->subdev.mutex);
|
|
if (comp) {
|
|
if (comp) {
|
|
- struct nvkm_ltc *ltc = nvkm_ltc(fb);
|
|
|
|
-
|
|
|
|
/* compression only works with lpages */
|
|
/* compression only works with lpages */
|
|
- if (align == (1 << (17 - 12))) {
|
|
|
|
|
|
+ if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
|
|
int n = size >> 5;
|
|
int n = size >> 5;
|
|
ltc->tags_alloc(ltc, n, &mem->tag);
|
|
ltc->tags_alloc(ltc, n, &mem->tag);
|
|
}
|
|
}
|
|
@@ -488,157 +486,158 @@ gf100_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
|
|
else
|
|
else
|
|
ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
|
|
ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
|
|
if (ret) {
|
|
if (ret) {
|
|
- mutex_unlock(&fb->subdev.mutex);
|
|
|
|
- fb->ram->put(fb, &mem);
|
|
|
|
|
|
+ mutex_unlock(&ram->fb->subdev.mutex);
|
|
|
|
+ ram->func->put(ram, &mem);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
list_add_tail(&r->rl_entry, &mem->regions);
|
|
list_add_tail(&r->rl_entry, &mem->regions);
|
|
size -= r->length;
|
|
size -= r->length;
|
|
} while (size);
|
|
} while (size);
|
|
- mutex_unlock(&fb->subdev.mutex);
|
|
|
|
|
|
+ mutex_unlock(&ram->fb->subdev.mutex);
|
|
|
|
|
|
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
|
|
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
|
|
- mem->offset = (u64)r->offset << 12;
|
|
|
|
|
|
+ mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
|
|
*pmem = mem;
|
|
*pmem = mem;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int
|
|
|
|
+gf100_ram_init(struct nvkm_ram *base)
|
|
|
|
+{
|
|
|
|
+ static const u8 train0[] = {
|
|
|
|
+ 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
|
|
|
|
+ 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
|
|
|
|
+ };
|
|
|
|
+ static const u32 train1[] = {
|
|
|
|
+ 0x00000000, 0xffffffff,
|
|
|
|
+ 0x55555555, 0xaaaaaaaa,
|
|
|
|
+ 0x33333333, 0xcccccccc,
|
|
|
|
+ 0xf0f0f0f0, 0x0f0f0f0f,
|
|
|
|
+ 0x00ff00ff, 0xff00ff00,
|
|
|
|
+ 0x0000ffff, 0xffff0000,
|
|
|
|
+ };
|
|
|
|
+ struct gf100_ram *ram = gf100_ram(base);
|
|
|
|
+ struct nvkm_device *device = ram->base.fb->subdev.device;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ switch (ram->base.type) {
|
|
|
|
+ case NVKM_RAM_TYPE_GDDR5:
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* prepare for ddr link training, and load training patterns */
|
|
|
|
+ for (i = 0; i < 0x30; i++) {
|
|
|
|
+ nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
|
|
|
|
+ nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
|
|
|
|
+ nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
|
|
|
|
+ nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
|
|
|
|
+ nvkm_wr32(device, 0x10f918, train1[i % 12]);
|
|
|
|
+ nvkm_wr32(device, 0x10f91c, train1[i % 12]);
|
|
|
|
+ nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
|
|
|
|
+ nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
|
|
|
|
+ nvkm_wr32(device, 0x10f918, train1[i % 12]);
|
|
|
|
+ nvkm_wr32(device, 0x10f91c, train1[i % 12]);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static const struct nvkm_ram_func
|
|
|
|
+gf100_ram_func = {
|
|
|
|
+ .init = gf100_ram_init,
|
|
|
|
+ .get = gf100_ram_get,
|
|
|
|
+ .put = gf100_ram_put,
|
|
|
|
+ .calc = gf100_ram_calc,
|
|
|
|
+ .prog = gf100_ram_prog,
|
|
|
|
+ .tidy = gf100_ram_tidy,
|
|
|
|
+};
|
|
|
|
+
|
|
int
|
|
int
|
|
-gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
|
|
|
|
- struct nvkm_oclass *oclass, u32 maskaddr, int size,
|
|
|
|
- void **pobject)
|
|
|
|
|
|
+gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
|
|
|
|
+ u32 maskaddr, struct nvkm_ram *ram)
|
|
{
|
|
{
|
|
- struct nvkm_fb *fb = nvkm_fb(parent);
|
|
|
|
struct nvkm_subdev *subdev = &fb->subdev;
|
|
struct nvkm_subdev *subdev = &fb->subdev;
|
|
struct nvkm_device *device = subdev->device;
|
|
struct nvkm_device *device = subdev->device;
|
|
struct nvkm_bios *bios = device->bios;
|
|
struct nvkm_bios *bios = device->bios;
|
|
- struct nvkm_ram *ram;
|
|
|
|
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
|
|
|
|
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
|
|
|
|
|
|
+ const u32 rsvd_head = ( 256 * 1024); /* vga memory */
|
|
|
|
+ const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
|
|
u32 parts = nvkm_rd32(device, 0x022438);
|
|
u32 parts = nvkm_rd32(device, 0x022438);
|
|
u32 pmask = nvkm_rd32(device, maskaddr);
|
|
u32 pmask = nvkm_rd32(device, maskaddr);
|
|
- u32 bsize = nvkm_rd32(device, 0x10f20c);
|
|
|
|
- u32 offset, length;
|
|
|
|
|
|
+ u64 bsize = (u64)nvkm_rd32(device, 0x10f20c) << 20;
|
|
|
|
+ u64 psize, size = 0;
|
|
|
|
+ enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
|
|
bool uniform = true;
|
|
bool uniform = true;
|
|
- int ret, part;
|
|
|
|
-
|
|
|
|
- ret = nvkm_ram_create_(parent, engine, oclass, size, pobject);
|
|
|
|
- ram = *pobject;
|
|
|
|
- if (ret)
|
|
|
|
- return ret;
|
|
|
|
|
|
+ int ret, i;
|
|
|
|
|
|
nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800));
|
|
nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800));
|
|
nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask);
|
|
nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask);
|
|
|
|
|
|
- ram->type = nvkm_fb_bios_memtype(bios);
|
|
|
|
- ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
|
|
|
|
-
|
|
|
|
/* read amount of vram attached to each memory controller */
|
|
/* read amount of vram attached to each memory controller */
|
|
- for (part = 0; part < parts; part++) {
|
|
|
|
- if (!(pmask & (1 << part))) {
|
|
|
|
- u32 size = nvkm_rd32(device, 0x11020c + (part * 0x1000));
|
|
|
|
- if (size != bsize) {
|
|
|
|
- if (size < bsize)
|
|
|
|
- bsize = size;
|
|
|
|
- uniform = false;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- nvkm_debug(subdev, "%d: size %08x\n", part, size);
|
|
|
|
- ram->size += (u64)size << 20;
|
|
|
|
|
|
+ for (i = 0; i < parts; i++) {
|
|
|
|
+ if (pmask & (1 << i))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ psize = (u64)nvkm_rd32(device, 0x11020c + (i * 0x1000)) << 20;
|
|
|
|
+ if (psize != bsize) {
|
|
|
|
+ if (psize < bsize)
|
|
|
|
+ bsize = psize;
|
|
|
|
+ uniform = false;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ nvkm_debug(subdev, "%d: %d MiB\n", i, (u32)(psize >> 20));
|
|
|
|
+ size += psize;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ ret = nvkm_ram_ctor(func, fb, type, size, 0, ram);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ nvkm_mm_fini(&ram->vram);
|
|
|
|
+
|
|
/* if all controllers have the same amount attached, there's no holes */
|
|
/* if all controllers have the same amount attached, there's no holes */
|
|
if (uniform) {
|
|
if (uniform) {
|
|
- offset = rsvd_head;
|
|
|
|
- length = (ram->size >> 12) - rsvd_head - rsvd_tail;
|
|
|
|
- ret = nvkm_mm_init(&fb->vram, offset, length, 1);
|
|
|
|
|
|
+ ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
|
|
|
+ (size - rsvd_head - rsvd_tail) >>
|
|
|
|
+ NVKM_RAM_MM_SHIFT, 1);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
} else {
|
|
} else {
|
|
/* otherwise, address lowest common amount from 0GiB */
|
|
/* otherwise, address lowest common amount from 0GiB */
|
|
- ret = nvkm_mm_init(&fb->vram, rsvd_head,
|
|
|
|
- (bsize << 8) * parts - rsvd_head, 1);
|
|
|
|
|
|
+ ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
|
|
|
+ ((bsize * parts) - rsvd_head) >>
|
|
|
|
+ NVKM_RAM_MM_SHIFT, 1);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
/* and the rest starting from (8GiB + common_size) */
|
|
/* and the rest starting from (8GiB + common_size) */
|
|
- offset = (0x0200000000ULL >> 12) + (bsize << 8);
|
|
|
|
- length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
|
|
|
|
-
|
|
|
|
- ret = nvkm_mm_init(&fb->vram, offset, length, 1);
|
|
|
|
|
|
+ ret = nvkm_mm_init(&ram->vram, (0x0200000000ULL + bsize) >>
|
|
|
|
+ NVKM_RAM_MM_SHIFT,
|
|
|
|
+ (size - (bsize * parts) - rsvd_tail) >>
|
|
|
|
+ NVKM_RAM_MM_SHIFT, 1);
|
|
if (ret)
|
|
if (ret)
|
|
- nvkm_mm_fini(&fb->vram);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (ret)
|
|
|
|
- return ret;
|
|
|
|
-
|
|
|
|
- ram->get = gf100_ram_get;
|
|
|
|
- ram->put = gf100_ram_put;
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int
|
|
|
|
-gf100_ram_init(struct nvkm_object *object)
|
|
|
|
-{
|
|
|
|
- struct nvkm_fb *fb = (void *)object->parent;
|
|
|
|
- struct nvkm_device *device = fb->subdev.device;
|
|
|
|
- struct gf100_ram *ram = (void *)object;
|
|
|
|
- int ret, i;
|
|
|
|
-
|
|
|
|
- ret = nvkm_ram_init(&ram->base);
|
|
|
|
- if (ret)
|
|
|
|
- return ret;
|
|
|
|
-
|
|
|
|
- /* prepare for ddr link training, and load training patterns */
|
|
|
|
- switch (ram->base.type) {
|
|
|
|
- case NV_MEM_TYPE_GDDR5: {
|
|
|
|
- static const u8 train0[] = {
|
|
|
|
- 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
|
|
|
|
- 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
|
|
|
|
- };
|
|
|
|
- static const u32 train1[] = {
|
|
|
|
- 0x00000000, 0xffffffff,
|
|
|
|
- 0x55555555, 0xaaaaaaaa,
|
|
|
|
- 0x33333333, 0xcccccccc,
|
|
|
|
- 0xf0f0f0f0, 0x0f0f0f0f,
|
|
|
|
- 0x00ff00ff, 0xff00ff00,
|
|
|
|
- 0x0000ffff, 0xffff0000,
|
|
|
|
- };
|
|
|
|
-
|
|
|
|
- for (i = 0; i < 0x30; i++) {
|
|
|
|
- nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
|
|
|
|
- nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
|
|
|
|
- nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
|
|
|
|
- nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
|
|
|
|
- nvkm_wr32(device, 0x10f918, train1[i % 12]);
|
|
|
|
- nvkm_wr32(device, 0x10f91c, train1[i % 12]);
|
|
|
|
- nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
|
|
|
|
- nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
|
|
|
|
- nvkm_wr32(device, 0x10f918, train1[i % 12]);
|
|
|
|
- nvkm_wr32(device, 0x10f91c, train1[i % 12]);
|
|
|
|
- }
|
|
|
|
- } break;
|
|
|
|
- default:
|
|
|
|
- break;
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static int
|
|
|
|
-gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|
|
|
- struct nvkm_oclass *oclass, void *data, u32 size,
|
|
|
|
- struct nvkm_object **pobject)
|
|
|
|
|
|
+int
|
|
|
|
+gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
|
{
|
|
{
|
|
- struct nvkm_fb *fb = nvkm_fb(parent);
|
|
|
|
struct nvkm_subdev *subdev = &fb->subdev;
|
|
struct nvkm_subdev *subdev = &fb->subdev;
|
|
struct nvkm_bios *bios = subdev->device->bios;
|
|
struct nvkm_bios *bios = subdev->device->bios;
|
|
struct gf100_ram *ram;
|
|
struct gf100_ram *ram;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
|
|
|
|
- *pobject = nv_object(ram);
|
|
|
|
|
|
+ if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ *pram = &ram->base;
|
|
|
|
+
|
|
|
|
+ ret = gf100_ram_ctor(&gf100_ram_func, fb, 0x022554, &ram->base);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
@@ -654,17 +653,6 @@ gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
- switch (ram->base.type) {
|
|
|
|
- case NV_MEM_TYPE_GDDR5:
|
|
|
|
- ram->base.calc = gf100_ram_calc;
|
|
|
|
- ram->base.prog = gf100_ram_prog;
|
|
|
|
- ram->base.tidy = gf100_ram_tidy;
|
|
|
|
- break;
|
|
|
|
- default:
|
|
|
|
- nvkm_warn(subdev, "reclocking of this ram type unsupported\n");
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
|
|
ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
|
|
ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
|
|
ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
|
|
ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
|
|
ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
|
|
@@ -725,14 +713,3 @@ gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|
ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
|
|
ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
-
|
|
|
|
-struct nvkm_oclass
|
|
|
|
-gf100_ram_oclass = {
|
|
|
|
- .handle = 0,
|
|
|
|
- .ofuncs = &(struct nvkm_ofuncs) {
|
|
|
|
- .ctor = gf100_ram_ctor,
|
|
|
|
- .dtor = _nvkm_ram_dtor,
|
|
|
|
- .init = gf100_ram_init,
|
|
|
|
- .fini = _nvkm_ram_fini,
|
|
|
|
- }
|
|
|
|
-};
|
|
|