|
@@ -78,7 +78,7 @@ static const char *rproc_crash_to_string(enum rproc_crash_type type)
|
|
|
* will try to access an unmapped device address.
|
|
|
*/
|
|
|
static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
|
|
|
- unsigned long iova, int flags, void *token)
|
|
|
+ unsigned long iova, int flags, void *token)
|
|
|
{
|
|
|
struct rproc *rproc = token;
|
|
|
|
|
@@ -236,8 +236,8 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
|
|
|
}
|
|
|
notifyid = ret;
|
|
|
|
|
|
- dev_dbg(dev, "vring%d: va %p dma %llx size %x idr %d\n", i, va,
|
|
|
- (unsigned long long)dma, size, notifyid);
|
|
|
+ dev_dbg(dev, "vring%d: va %p dma %pad size 0x%x idr %d\n",
|
|
|
+ i, va, &dma, size, notifyid);
|
|
|
|
|
|
rvring->va = va;
|
|
|
rvring->dma = dma;
|
|
@@ -263,19 +263,13 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
|
|
|
struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
|
|
|
struct rproc_vring *rvring = &rvdev->vring[i];
|
|
|
|
|
|
- dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n",
|
|
|
- i, vring->da, vring->num, vring->align);
|
|
|
-
|
|
|
- /* make sure reserved bytes are zeroes */
|
|
|
- if (vring->reserved) {
|
|
|
- dev_err(dev, "vring rsc has non zero reserved bytes\n");
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
+ dev_dbg(dev, "vdev rsc: vring%d: da 0x%x, qsz %d, align %d\n",
|
|
|
+ i, vring->da, vring->num, vring->align);
|
|
|
|
|
|
/* verify queue size and vring alignment are sane */
|
|
|
if (!vring->num || !vring->align) {
|
|
|
dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
|
|
|
- vring->num, vring->align);
|
|
|
+ vring->num, vring->align);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -330,7 +324,7 @@ void rproc_free_vring(struct rproc_vring *rvring)
|
|
|
* Returns 0 on success, or an appropriate error code otherwise
|
|
|
*/
|
|
|
static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
|
|
|
- int offset, int avail)
|
|
|
+ int offset, int avail)
|
|
|
{
|
|
|
struct device *dev = &rproc->dev;
|
|
|
struct rproc_vdev *rvdev;
|
|
@@ -349,7 +343,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
|
|
|
+ dev_dbg(dev, "vdev rsc: id %d, dfeatures 0x%x, cfg len %d, %d vrings\n",
|
|
|
rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
|
|
|
|
|
|
/* we currently support only two vrings per rvdev */
|
|
@@ -358,7 +352,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL);
|
|
|
+ rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL);
|
|
|
if (!rvdev)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -407,7 +401,7 @@ free_rvdev:
|
|
|
* Returns 0 on success, or an appropriate error code otherwise
|
|
|
*/
|
|
|
static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
|
|
|
- int offset, int avail)
|
|
|
+ int offset, int avail)
|
|
|
{
|
|
|
struct rproc_mem_entry *trace;
|
|
|
struct device *dev = &rproc->dev;
|
|
@@ -455,8 +449,8 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
|
|
|
|
|
|
rproc->num_traces++;
|
|
|
|
|
|
- dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n", name, ptr,
|
|
|
- rsc->da, rsc->len);
|
|
|
+ dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n",
|
|
|
+ name, ptr, rsc->da, rsc->len);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -487,7 +481,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
|
|
|
* are outside those ranges.
|
|
|
*/
|
|
|
static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
|
|
|
- int offset, int avail)
|
|
|
+ int offset, int avail)
|
|
|
{
|
|
|
struct rproc_mem_entry *mapping;
|
|
|
struct device *dev = &rproc->dev;
|
|
@@ -530,7 +524,7 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
|
|
|
list_add_tail(&mapping->node, &rproc->mappings);
|
|
|
|
|
|
dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
|
|
|
- rsc->pa, rsc->da, rsc->len);
|
|
|
+ rsc->pa, rsc->da, rsc->len);
|
|
|
|
|
|
return 0;
|
|
|
|
|
@@ -558,9 +552,8 @@ out:
|
|
|
* pressure is important; it may have a substantial impact on performance.
|
|
|
*/
|
|
|
static int rproc_handle_carveout(struct rproc *rproc,
|
|
|
- struct fw_rsc_carveout *rsc,
|
|
|
- int offset, int avail)
|
|
|
-
|
|
|
+ struct fw_rsc_carveout *rsc,
|
|
|
+ int offset, int avail)
|
|
|
{
|
|
|
struct rproc_mem_entry *carveout, *mapping;
|
|
|
struct device *dev = &rproc->dev;
|
|
@@ -579,8 +572,8 @@ static int rproc_handle_carveout(struct rproc *rproc,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n",
|
|
|
- rsc->da, rsc->pa, rsc->len, rsc->flags);
|
|
|
+ dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n",
|
|
|
+ rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
|
|
|
|
|
|
carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
|
|
|
if (!carveout)
|
|
@@ -588,13 +581,14 @@ static int rproc_handle_carveout(struct rproc *rproc,
|
|
|
|
|
|
va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL);
|
|
|
if (!va) {
|
|
|
- dev_err(dev->parent, "dma_alloc_coherent err: %d\n", rsc->len);
|
|
|
+ dev_err(dev->parent,
|
|
|
+ "failed to allocate dma memory: len 0x%x\n", rsc->len);
|
|
|
ret = -ENOMEM;
|
|
|
goto free_carv;
|
|
|
}
|
|
|
|
|
|
- dev_dbg(dev, "carveout va %p, dma %llx, len 0x%x\n", va,
|
|
|
- (unsigned long long)dma, rsc->len);
|
|
|
+ dev_dbg(dev, "carveout va %p, dma %pad, len 0x%x\n",
|
|
|
+ va, &dma, rsc->len);
|
|
|
|
|
|
/*
|
|
|
* Ok, this is non-standard.
|
|
@@ -616,13 +610,12 @@ static int rproc_handle_carveout(struct rproc *rproc,
|
|
|
if (rproc->domain) {
|
|
|
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
|
|
|
if (!mapping) {
|
|
|
- dev_err(dev, "kzalloc mapping failed\n");
|
|
|
ret = -ENOMEM;
|
|
|
goto dma_free;
|
|
|
}
|
|
|
|
|
|
ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
|
|
|
- rsc->flags);
|
|
|
+ rsc->flags);
|
|
|
if (ret) {
|
|
|
dev_err(dev, "iommu_map failed: %d\n", ret);
|
|
|
goto free_mapping;
|
|
@@ -639,8 +632,8 @@ static int rproc_handle_carveout(struct rproc *rproc,
|
|
|
mapping->len = rsc->len;
|
|
|
list_add_tail(&mapping->node, &rproc->mappings);
|
|
|
|
|
|
- dev_dbg(dev, "carveout mapped 0x%x to 0x%llx\n",
|
|
|
- rsc->da, (unsigned long long)dma);
|
|
|
+ dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
|
|
|
+ rsc->da, &dma);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -697,17 +690,13 @@ static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
|
|
|
[RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
|
|
|
[RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
|
|
|
[RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
|
|
|
- [RSC_VDEV] = NULL, /* VDEVs were handled upon registrarion */
|
|
|
+ [RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings,
|
|
|
};
|
|
|
|
|
|
static rproc_handle_resource_t rproc_vdev_handler[RSC_LAST] = {
|
|
|
[RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
|
|
|
};
|
|
|
|
|
|
-static rproc_handle_resource_t rproc_count_vrings_handler[RSC_LAST] = {
|
|
|
- [RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings,
|
|
|
-};
|
|
|
-
|
|
|
/* handle firmware resource entries before booting the remote processor */
|
|
|
static int rproc_handle_resources(struct rproc *rproc, int len,
|
|
|
rproc_handle_resource_t handlers[RSC_LAST])
|
|
@@ -757,6 +746,7 @@ static int rproc_handle_resources(struct rproc *rproc, int len,
|
|
|
static void rproc_resource_cleanup(struct rproc *rproc)
|
|
|
{
|
|
|
struct rproc_mem_entry *entry, *tmp;
|
|
|
+ struct rproc_vdev *rvdev, *rvtmp;
|
|
|
struct device *dev = &rproc->dev;
|
|
|
|
|
|
/* clean up debugfs trace entries */
|
|
@@ -775,7 +765,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
|
|
|
if (unmapped != entry->len) {
|
|
|
/* nothing much to do besides complaining */
|
|
|
dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
|
|
|
- unmapped);
|
|
|
+ unmapped);
|
|
|
}
|
|
|
|
|
|
list_del(&entry->node);
|
|
@@ -789,6 +779,10 @@ static void rproc_resource_cleanup(struct rproc *rproc)
|
|
|
list_del(&entry->node);
|
|
|
kfree(entry);
|
|
|
}
|
|
|
+
|
|
|
+ /* clean up remote vdev entries */
|
|
|
+ list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
|
|
|
+ rproc_remove_virtio_dev(rvdev);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -801,9 +795,6 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
|
|
|
struct resource_table *table, *loaded_table;
|
|
|
int ret, tablesz;
|
|
|
|
|
|
- if (!rproc->table_ptr)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
ret = rproc_fw_sanity_check(rproc, fw);
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -830,9 +821,25 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
|
|
|
goto clean_up;
|
|
|
}
|
|
|
|
|
|
- /* Verify that resource table in loaded fw is unchanged */
|
|
|
- if (rproc->table_csum != crc32(0, table, tablesz)) {
|
|
|
- dev_err(dev, "resource checksum failed, fw changed?\n");
|
|
|
+ /*
|
|
|
+ * Create a copy of the resource table. When a virtio device starts
|
|
|
+ * and calls vring_new_virtqueue() the address of the allocated vring
|
|
|
+ * will be stored in the cached_table. Before the device is started,
|
|
|
+ * cached_table will be copied into device memory.
|
|
|
+ */
|
|
|
+ rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
|
|
|
+ if (!rproc->cached_table)
|
|
|
+ goto clean_up;
|
|
|
+
|
|
|
+ rproc->table_ptr = rproc->cached_table;
|
|
|
+
|
|
|
+ /* reset max_notifyid */
|
|
|
+ rproc->max_notifyid = -1;
|
|
|
+
|
|
|
+ /* look for virtio devices and register them */
|
|
|
+ ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "Failed to handle vdev resources: %d\n", ret);
|
|
|
goto clean_up;
|
|
|
}
|
|
|
|
|
@@ -840,49 +847,50 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
|
|
|
ret = rproc_handle_resources(rproc, tablesz, rproc_loading_handlers);
|
|
|
if (ret) {
|
|
|
dev_err(dev, "Failed to process resources: %d\n", ret);
|
|
|
- goto clean_up;
|
|
|
+ goto clean_up_resources;
|
|
|
}
|
|
|
|
|
|
/* load the ELF segments to memory */
|
|
|
ret = rproc_load_segments(rproc, fw);
|
|
|
if (ret) {
|
|
|
dev_err(dev, "Failed to load program segments: %d\n", ret);
|
|
|
- goto clean_up;
|
|
|
+ goto clean_up_resources;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* The starting device has been given the rproc->cached_table as the
|
|
|
* resource table. The address of the vring along with the other
|
|
|
* allocated resources (carveouts etc) is stored in cached_table.
|
|
|
- * In order to pass this information to the remote device we must
|
|
|
- * copy this information to device memory.
|
|
|
+ * In order to pass this information to the remote device we must copy
|
|
|
+ * this information to device memory. We also update the table_ptr so
|
|
|
+ * that any subsequent changes will be applied to the loaded version.
|
|
|
*/
|
|
|
loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
|
|
|
- if (loaded_table)
|
|
|
+ if (loaded_table) {
|
|
|
memcpy(loaded_table, rproc->cached_table, tablesz);
|
|
|
+ rproc->table_ptr = loaded_table;
|
|
|
+ }
|
|
|
|
|
|
/* power up the remote processor */
|
|
|
ret = rproc->ops->start(rproc);
|
|
|
if (ret) {
|
|
|
dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
|
|
|
- goto clean_up;
|
|
|
+ goto clean_up_resources;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Update table_ptr so that all subsequent vring allocations and
|
|
|
- * virtio fields manipulation update the actual loaded resource table
|
|
|
- * in device memory.
|
|
|
- */
|
|
|
- rproc->table_ptr = loaded_table;
|
|
|
-
|
|
|
rproc->state = RPROC_RUNNING;
|
|
|
|
|
|
dev_info(dev, "remote processor %s is now up\n", rproc->name);
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
-clean_up:
|
|
|
+clean_up_resources:
|
|
|
rproc_resource_cleanup(rproc);
|
|
|
+clean_up:
|
|
|
+ kfree(rproc->cached_table);
|
|
|
+ rproc->cached_table = NULL;
|
|
|
+ rproc->table_ptr = NULL;
|
|
|
+
|
|
|
rproc_disable_iommu(rproc);
|
|
|
return ret;
|
|
|
}
|
|
@@ -898,42 +906,11 @@ clean_up:
|
|
|
static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
|
|
|
{
|
|
|
struct rproc *rproc = context;
|
|
|
- struct resource_table *table;
|
|
|
- int ret, tablesz;
|
|
|
-
|
|
|
- if (rproc_fw_sanity_check(rproc, fw) < 0)
|
|
|
- goto out;
|
|
|
-
|
|
|
- /* look for the resource table */
|
|
|
- table = rproc_find_rsc_table(rproc, fw, &tablesz);
|
|
|
- if (!table)
|
|
|
- goto out;
|
|
|
-
|
|
|
- rproc->table_csum = crc32(0, table, tablesz);
|
|
|
-
|
|
|
- /*
|
|
|
- * Create a copy of the resource table. When a virtio device starts
|
|
|
- * and calls vring_new_virtqueue() the address of the allocated vring
|
|
|
- * will be stored in the cached_table. Before the device is started,
|
|
|
- * cached_table will be copied into devic memory.
|
|
|
- */
|
|
|
- rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
|
|
|
- if (!rproc->cached_table)
|
|
|
- goto out;
|
|
|
-
|
|
|
- rproc->table_ptr = rproc->cached_table;
|
|
|
|
|
|
- /* count the number of notify-ids */
|
|
|
- rproc->max_notifyid = -1;
|
|
|
- ret = rproc_handle_resources(rproc, tablesz,
|
|
|
- rproc_count_vrings_handler);
|
|
|
- if (ret)
|
|
|
- goto out;
|
|
|
-
|
|
|
- /* look for virtio devices and register them */
|
|
|
- ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);
|
|
|
+ /* if rproc is marked always-on, request it to boot */
|
|
|
+ if (rproc->auto_boot)
|
|
|
+ rproc_boot_nowait(rproc);
|
|
|
|
|
|
-out:
|
|
|
release_firmware(fw);
|
|
|
/* allow rproc_del() contexts, if any, to proceed */
|
|
|
complete_all(&rproc->firmware_loading_complete);
|
|
@@ -969,7 +946,7 @@ static int rproc_add_virtio_devices(struct rproc *rproc)
|
|
|
* rproc_trigger_recovery() - recover a remoteproc
|
|
|
* @rproc: the remote processor
|
|
|
*
|
|
|
- * The recovery is done by reseting all the virtio devices, that way all the
|
|
|
+ * The recovery is done by resetting all the virtio devices, that way all the
|
|
|
* rpmsg drivers will be reseted along with the remote processor making the
|
|
|
* remoteproc functional again.
|
|
|
*
|
|
@@ -977,23 +954,23 @@ static int rproc_add_virtio_devices(struct rproc *rproc)
|
|
|
*/
|
|
|
int rproc_trigger_recovery(struct rproc *rproc)
|
|
|
{
|
|
|
- struct rproc_vdev *rvdev, *rvtmp;
|
|
|
-
|
|
|
dev_err(&rproc->dev, "recovering %s\n", rproc->name);
|
|
|
|
|
|
init_completion(&rproc->crash_comp);
|
|
|
|
|
|
- /* clean up remote vdev entries */
|
|
|
- list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
|
|
|
- rproc_remove_virtio_dev(rvdev);
|
|
|
+ /* shut down the remote */
|
|
|
+ /* TODO: make sure this works with rproc->power > 1 */
|
|
|
+ rproc_shutdown(rproc);
|
|
|
|
|
|
/* wait until there is no more rproc users */
|
|
|
wait_for_completion(&rproc->crash_comp);
|
|
|
|
|
|
- /* Free the copy of the resource table */
|
|
|
- kfree(rproc->cached_table);
|
|
|
+ /*
|
|
|
+ * boot the remote processor up again
|
|
|
+ */
|
|
|
+ rproc_boot(rproc);
|
|
|
|
|
|
- return rproc_add_virtio_devices(rproc);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1058,20 +1035,6 @@ static int __rproc_boot(struct rproc *rproc, bool wait)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- /* loading a firmware is required */
|
|
|
- if (!rproc->firmware) {
|
|
|
- dev_err(dev, "%s: no firmware to load\n", __func__);
|
|
|
- ret = -EINVAL;
|
|
|
- goto unlock_mutex;
|
|
|
- }
|
|
|
-
|
|
|
- /* prevent underlying implementation from being removed */
|
|
|
- if (!try_module_get(dev->parent->driver->owner)) {
|
|
|
- dev_err(dev, "%s: can't get owner\n", __func__);
|
|
|
- ret = -EINVAL;
|
|
|
- goto unlock_mutex;
|
|
|
- }
|
|
|
-
|
|
|
/* skip the boot process if rproc is already powered up */
|
|
|
if (atomic_inc_return(&rproc->power) > 1) {
|
|
|
ret = 0;
|
|
@@ -1096,10 +1059,8 @@ static int __rproc_boot(struct rproc *rproc, bool wait)
|
|
|
release_firmware(firmware_p);
|
|
|
|
|
|
downref_rproc:
|
|
|
- if (ret) {
|
|
|
- module_put(dev->parent->driver->owner);
|
|
|
+ if (ret)
|
|
|
atomic_dec(&rproc->power);
|
|
|
- }
|
|
|
unlock_mutex:
|
|
|
mutex_unlock(&rproc->lock);
|
|
|
return ret;
|
|
@@ -1173,8 +1134,10 @@ void rproc_shutdown(struct rproc *rproc)
|
|
|
|
|
|
rproc_disable_iommu(rproc);
|
|
|
|
|
|
- /* Give the next start a clean resource table */
|
|
|
- rproc->table_ptr = rproc->cached_table;
|
|
|
+ /* Free the copy of the resource table */
|
|
|
+ kfree(rproc->cached_table);
|
|
|
+ rproc->cached_table = NULL;
|
|
|
+ rproc->table_ptr = NULL;
|
|
|
|
|
|
/* if in crash state, unlock crash handler */
|
|
|
if (rproc->state == RPROC_CRASHED)
|
|
@@ -1186,8 +1149,6 @@ void rproc_shutdown(struct rproc *rproc)
|
|
|
|
|
|
out:
|
|
|
mutex_unlock(&rproc->lock);
|
|
|
- if (!ret)
|
|
|
- module_put(dev->parent->driver->owner);
|
|
|
}
|
|
|
EXPORT_SYMBOL(rproc_shutdown);
|
|
|
|
|
@@ -1216,6 +1177,12 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
|
|
|
mutex_lock(&rproc_list_mutex);
|
|
|
list_for_each_entry(r, &rproc_list, node) {
|
|
|
if (r->dev.parent && r->dev.parent->of_node == np) {
|
|
|
+ /* prevent underlying implementation from being removed */
|
|
|
+ if (!try_module_get(r->dev.parent->driver->owner)) {
|
|
|
+ dev_err(&r->dev, "can't get owner\n");
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
rproc = r;
|
|
|
get_device(&rproc->dev);
|
|
|
break;
|
|
@@ -1335,11 +1302,11 @@ static struct device_type rproc_type = {
|
|
|
* On success the new rproc is returned, and on failure, NULL.
|
|
|
*
|
|
|
* Note: _never_ directly deallocate @rproc, even if it was not registered
|
|
|
- * yet. Instead, when you need to unroll rproc_alloc(), use rproc_put().
|
|
|
+ * yet. Instead, when you need to unroll rproc_alloc(), use rproc_free().
|
|
|
*/
|
|
|
struct rproc *rproc_alloc(struct device *dev, const char *name,
|
|
|
- const struct rproc_ops *ops,
|
|
|
- const char *firmware, int len)
|
|
|
+ const struct rproc_ops *ops,
|
|
|
+ const char *firmware, int len)
|
|
|
{
|
|
|
struct rproc *rproc;
|
|
|
char *p, *template = "rproc-%s-fw";
|
|
@@ -1359,7 +1326,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
|
|
|
*/
|
|
|
name_len = strlen(name) + strlen(template) - 2 + 1;
|
|
|
|
|
|
- rproc = kzalloc(sizeof(struct rproc) + len + name_len, GFP_KERNEL);
|
|
|
+ rproc = kzalloc(sizeof(*rproc) + len + name_len, GFP_KERNEL);
|
|
|
if (!rproc)
|
|
|
return NULL;
|
|
|
|
|
@@ -1374,6 +1341,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
|
|
|
rproc->name = name;
|
|
|
rproc->ops = ops;
|
|
|
rproc->priv = &rproc[1];
|
|
|
+ rproc->auto_boot = true;
|
|
|
|
|
|
device_initialize(&rproc->dev);
|
|
|
rproc->dev.parent = dev;
|
|
@@ -1413,7 +1381,22 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
|
|
|
EXPORT_SYMBOL(rproc_alloc);
|
|
|
|
|
|
/**
|
|
|
- * rproc_put() - unroll rproc_alloc()
|
|
|
+ * rproc_free() - unroll rproc_alloc()
|
|
|
+ * @rproc: the remote processor handle
|
|
|
+ *
|
|
|
+ * This function decrements the rproc dev refcount.
|
|
|
+ *
|
|
|
+ * If no one holds any reference to rproc anymore, then its refcount would
|
|
|
+ * now drop to zero, and it would be freed.
|
|
|
+ */
|
|
|
+void rproc_free(struct rproc *rproc)
|
|
|
+{
|
|
|
+ put_device(&rproc->dev);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(rproc_free);
|
|
|
+
|
|
|
+/**
|
|
|
+ * rproc_put() - release rproc reference
|
|
|
* @rproc: the remote processor handle
|
|
|
*
|
|
|
* This function decrements the rproc dev refcount.
|
|
@@ -1423,6 +1406,7 @@ EXPORT_SYMBOL(rproc_alloc);
|
|
|
*/
|
|
|
void rproc_put(struct rproc *rproc)
|
|
|
{
|
|
|
+ module_put(rproc->dev.parent->driver->owner);
|
|
|
put_device(&rproc->dev);
|
|
|
}
|
|
|
EXPORT_SYMBOL(rproc_put);
|
|
@@ -1438,7 +1422,7 @@ EXPORT_SYMBOL(rproc_put);
|
|
|
*
|
|
|
* After rproc_del() returns, @rproc isn't freed yet, because
|
|
|
* of the outstanding reference created by rproc_alloc. To decrement that
|
|
|
- * one last refcount, one still needs to call rproc_put().
|
|
|
+ * one last refcount, one still needs to call rproc_free().
|
|
|
*
|
|
|
* Returns 0 on success and -EINVAL if @rproc isn't valid.
|
|
|
*/
|
|
@@ -1452,13 +1436,15 @@ int rproc_del(struct rproc *rproc)
|
|
|
/* if rproc is just being registered, wait */
|
|
|
wait_for_completion(&rproc->firmware_loading_complete);
|
|
|
|
|
|
+ /* if rproc is marked always-on, rproc_add() booted it */
|
|
|
+ /* TODO: make sure this works with rproc->power > 1 */
|
|
|
+ if (rproc->auto_boot)
|
|
|
+ rproc_shutdown(rproc);
|
|
|
+
|
|
|
/* clean up remote vdev entries */
|
|
|
list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node)
|
|
|
rproc_remove_virtio_dev(rvdev);
|
|
|
|
|
|
- /* Free the copy of the resource table */
|
|
|
- kfree(rproc->cached_table);
|
|
|
-
|
|
|
/* the rproc is downref'ed as soon as it's removed from the klist */
|
|
|
mutex_lock(&rproc_list_mutex);
|
|
|
list_del(&rproc->node);
|