|
@@ -15,7 +15,6 @@
|
|
#include <linux/sched.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/hash.h>
|
|
#include <linux/hash.h>
|
|
-#include <linux/pmem.h>
|
|
|
|
#include <linux/sort.h>
|
|
#include <linux/sort.h>
|
|
#include <linux/io.h>
|
|
#include <linux/io.h>
|
|
#include <linux/nd.h>
|
|
#include <linux/nd.h>
|
|
@@ -169,6 +168,11 @@ bool is_nd_blk(struct device *dev)
|
|
return dev ? dev->type == &nd_blk_device_type : false;
|
|
return dev ? dev->type == &nd_blk_device_type : false;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+bool is_nd_volatile(struct device *dev)
|
|
|
|
+{
|
|
|
|
+ return dev ? dev->type == &nd_volatile_device_type : false;
|
|
|
|
+}
|
|
|
|
+
|
|
struct nd_region *to_nd_region(struct device *dev)
|
|
struct nd_region *to_nd_region(struct device *dev)
|
|
{
|
|
{
|
|
struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
|
|
struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
|
|
@@ -215,7 +219,7 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
|
|
*/
|
|
*/
|
|
int nd_region_to_nstype(struct nd_region *nd_region)
|
|
int nd_region_to_nstype(struct nd_region *nd_region)
|
|
{
|
|
{
|
|
- if (is_nd_pmem(&nd_region->dev)) {
|
|
|
|
|
|
+ if (is_memory(&nd_region->dev)) {
|
|
u16 i, alias;
|
|
u16 i, alias;
|
|
|
|
|
|
for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
|
|
for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
|
|
@@ -243,7 +247,7 @@ static ssize_t size_show(struct device *dev,
|
|
struct nd_region *nd_region = to_nd_region(dev);
|
|
struct nd_region *nd_region = to_nd_region(dev);
|
|
unsigned long long size = 0;
|
|
unsigned long long size = 0;
|
|
|
|
|
|
- if (is_nd_pmem(dev)) {
|
|
|
|
|
|
+ if (is_memory(dev)) {
|
|
size = nd_region->ndr_size;
|
|
size = nd_region->ndr_size;
|
|
} else if (nd_region->ndr_mappings == 1) {
|
|
} else if (nd_region->ndr_mappings == 1) {
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
|
|
@@ -309,7 +313,7 @@ static ssize_t set_cookie_show(struct device *dev,
|
|
struct nd_interleave_set *nd_set = nd_region->nd_set;
|
|
struct nd_interleave_set *nd_set = nd_region->nd_set;
|
|
ssize_t rc = 0;
|
|
ssize_t rc = 0;
|
|
|
|
|
|
- if (is_nd_pmem(dev) && nd_set)
|
|
|
|
|
|
+ if (is_memory(dev) && nd_set)
|
|
/* pass, should be precluded by region_visible */;
|
|
/* pass, should be precluded by region_visible */;
|
|
else
|
|
else
|
|
return -ENXIO;
|
|
return -ENXIO;
|
|
@@ -363,7 +367,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
|
|
if (!ndd)
|
|
if (!ndd)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- if (is_nd_pmem(&nd_region->dev)) {
|
|
|
|
|
|
+ if (is_memory(&nd_region->dev)) {
|
|
available += nd_pmem_available_dpa(nd_region,
|
|
available += nd_pmem_available_dpa(nd_region,
|
|
nd_mapping, &overlap);
|
|
nd_mapping, &overlap);
|
|
if (overlap > blk_max_overlap) {
|
|
if (overlap > blk_max_overlap) {
|
|
@@ -549,10 +553,10 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
|
|
struct nd_interleave_set *nd_set = nd_region->nd_set;
|
|
struct nd_interleave_set *nd_set = nd_region->nd_set;
|
|
int type = nd_region_to_nstype(nd_region);
|
|
int type = nd_region_to_nstype(nd_region);
|
|
|
|
|
|
- if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
|
|
|
|
|
|
+ if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
|
|
|
|
|
|
+ if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
|
|
if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
|
|
@@ -580,7 +584,7 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
|
|
|| type == ND_DEVICE_NAMESPACE_BLK)
|
|
|| type == ND_DEVICE_NAMESPACE_BLK)
|
|
&& a == &dev_attr_available_size.attr)
|
|
&& a == &dev_attr_available_size.attr)
|
|
return a->mode;
|
|
return a->mode;
|
|
- else if (is_nd_pmem(dev) && nd_set)
|
|
|
|
|
|
+ else if (is_memory(dev) && nd_set)
|
|
return a->mode;
|
|
return a->mode;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -637,7 +641,7 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
|
|
{
|
|
{
|
|
struct nd_region *nd_region;
|
|
struct nd_region *nd_region;
|
|
|
|
|
|
- if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
|
|
|
|
|
|
+ if (!probe && is_nd_region(dev)) {
|
|
int i;
|
|
int i;
|
|
|
|
|
|
nd_region = to_nd_region(dev);
|
|
nd_region = to_nd_region(dev);
|
|
@@ -655,12 +659,8 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
|
|
if (ndd)
|
|
if (ndd)
|
|
atomic_dec(&nvdimm->busy);
|
|
atomic_dec(&nvdimm->busy);
|
|
}
|
|
}
|
|
-
|
|
|
|
- if (is_nd_pmem(dev))
|
|
|
|
- return;
|
|
|
|
}
|
|
}
|
|
- if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent))
|
|
|
|
- && probe) {
|
|
|
|
|
|
+ if (dev->parent && is_nd_region(dev->parent) && probe) {
|
|
nd_region = to_nd_region(dev->parent);
|
|
nd_region = to_nd_region(dev->parent);
|
|
nvdimm_bus_lock(dev);
|
|
nvdimm_bus_lock(dev);
|
|
if (nd_region->ns_seed == dev)
|
|
if (nd_region->ns_seed == dev)
|
|
@@ -1048,8 +1048,8 @@ void nvdimm_flush(struct nd_region *nd_region)
|
|
* The first wmb() is needed to 'sfence' all previous writes
|
|
* The first wmb() is needed to 'sfence' all previous writes
|
|
* such that they are architecturally visible for the platform
|
|
* such that they are architecturally visible for the platform
|
|
* buffer flush. Note that we've already arranged for pmem
|
|
* buffer flush. Note that we've already arranged for pmem
|
|
- * writes to avoid the cache via arch_memcpy_to_pmem(). The
|
|
|
|
- * final wmb() ensures ordering for the NVDIMM flush write.
|
|
|
|
|
|
+ * writes to avoid the cache via memcpy_flushcache(). The final
|
|
|
|
+ * wmb() ensures ordering for the NVDIMM flush write.
|
|
*/
|
|
*/
|
|
wmb();
|
|
wmb();
|
|
for (i = 0; i < nd_region->ndr_mappings; i++)
|
|
for (i = 0; i < nd_region->ndr_mappings; i++)
|
|
@@ -1071,8 +1071,9 @@ int nvdimm_has_flush(struct nd_region *nd_region)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- /* no nvdimm == flushing capability unknown */
|
|
|
|
- if (nd_region->ndr_mappings == 0)
|
|
|
|
|
|
+ /* no nvdimm or pmem api == flushing capability unknown */
|
|
|
|
+ if (nd_region->ndr_mappings == 0
|
|
|
|
+ || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
|
|
return -ENXIO;
|
|
return -ENXIO;
|
|
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
@@ -1092,6 +1093,12 @@ int nvdimm_has_flush(struct nd_region *nd_region)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(nvdimm_has_flush);
|
|
EXPORT_SYMBOL_GPL(nvdimm_has_flush);
|
|
|
|
|
|
|
|
+int nvdimm_has_cache(struct nd_region *nd_region)
|
|
|
|
+{
|
|
|
|
+ return is_nd_pmem(&nd_region->dev);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(nvdimm_has_cache);
|
|
|
|
+
|
|
void __exit nd_region_devs_exit(void)
|
|
void __exit nd_region_devs_exit(void)
|
|
{
|
|
{
|
|
ida_destroy(®ion_ida);
|
|
ida_destroy(®ion_ida);
|