Bläddra i källkod

Merged TI feature platform_base into ti-linux-4.19.y

* 'platform-ti-linux-4.19.y' of ssh://bitbucket.itg.ti.com/lcpdpublicdom/platform:
  arm64: dts: ti: k3-j721e-main: Add ti,notdpkt flag to sa2ul psil-config
  arm64: dts: ti: k3-am65-main: Add ti,notdpkt flag to sa2ul psil-config
  crypto: sa2ul: Export function to enable trng
  crypto: sa2ul: Add a member to store base address
  char: hw_random: Kconfig: For ARCH_K3 add a dependency on SA2UL
  dmaengine: ti: k3-udma: Add support for suppressing TDC message
  dt-bindings: dma: ti: k3-udma: Add option to suppress TDCM
  dmaengine: ti: k3-udma: Helper function to reset udma_chan parameters
  dmaengine: ti: k3-udma: Use different byte counters in tx_status
  dmaengine: ti: k3-udma: In case of error in filter_fn, reset the direction
  dmaengine: ti: k3-udma: rework rflow management code
  dmaengine: ti: k3-navss-udma: drop skip_psil cfg option
  dmaengine: ti: k3-udma: print board res ranges
  dmaengine: ti: k3-navss-udma: improve err print on rflow reservation
  soc: ti: k3-ringacc: fix k3_ringacc_ring_reset_dma
  arm64: dts: k3-j721e: jailhouse: Disable SW10 from root cell
  arm64: dts: k3-j721e: Add gpio-keys on common processor board

Signed-off-by: LCPD Auto Merger <lcpd_integration@list.ti.com>
LCPD Auto Merger 6 år sedan
förälder
incheckning
6af0adc4de

+ 2 - 0
Documentation/devicetree/bindings/dma/ti/k3-udma.txt

@@ -82,6 +82,8 @@ Configuration node Optional properties:
 			descriptor.
 - ti,psd-size:		Size of the Protocol Specific Data section of the
 			descriptor.
+- ti,notdpkt:		The Teardown Completion Message on the thread must be
+			suppressed.
 
 Example:
 

+ 3 - 0
arch/arm64/boot/dts/ti/k3-am65-main.dtsi

@@ -474,18 +474,21 @@
 			linux,udma-mode = <UDMA_PKT_MODE>;
 			ti,needs-epib;
 			ti,psd-size = <64>;
+			ti,notdpkt;
 		};
 
 		ti,psil-config1 {
 			linux,udma-mode = <UDMA_PKT_MODE>;
 			ti,needs-epib;
 			ti,psd-size = <64>;
+			ti,notdpkt;
 		};
 
 		ti,psil-config2 {
 			linux,udma-mode = <UDMA_PKT_MODE>;
 			ti,needs-epib;
 			ti,psd-size = <64>;
+			ti,notdpkt;
 		};
 
 		eip76d_trng: trng@4e10000 {

+ 4 - 0
arch/arm64/boot/dts/ti/k3-j721e-common-proc-board-jailhouse.dtso

@@ -59,6 +59,10 @@
 	status = "disabled";
 };
 
+&sw10 {
+	status = "disabled";
+};
+
 &dss {
 	power-domains = <&k3_pds 152 TI_SCI_PD_SHARED>; /* share IP among VMs and RTOS */
 

+ 32 - 0
arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts

@@ -7,6 +7,7 @@
 
 #include "k3-j721e-som-p0.dtsi"
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
 #include <dt-bindings/net/ti-dp83867.h>
 #include <dt-bindings/pci/pci.h>
 #include <dt-bindings/sound/ti-mcasp.h>
@@ -124,6 +125,25 @@
 		};
 	};
 
+	gpio_keys: gpio-keys {
+		compatible = "gpio-keys";
+		autorepeat;
+		pinctrl-names = "default";
+		pinctrl-0 = <&sw10_button_pins_default &sw11_button_pins_default>;
+
+		sw10: sw10 {
+			label = "GPIO Key USER1";
+			linux,code = <BTN_0>;
+			gpios = <&main_gpio0 0 GPIO_ACTIVE_LOW>;
+		};
+
+		sw11: sw11 {
+			label = "GPIO Key USER2";
+			linux,code = <BTN_1>;
+			gpios = <&wkup_gpio0 7 GPIO_ACTIVE_LOW>;
+		};
+	};
+
 	vdd_mmc1: fixedregulator-sd {
 		compatible = "regulator-fixed";
 		regulator-name = "vdd_mmc1";
@@ -183,6 +203,12 @@
 			J721E_WKUP_IOPAD(0x38, PIN_INPUT, 0) /* (A23) MCU_OSPI1_LBCLKO */
 		>;
 	};
+
+	sw11_button_pins_default: sw11_button_pins_default {
+		pinctrl-single,pins = <
+			J721E_WKUP_IOPAD(0xcc, PIN_INPUT, 7) /* (G28) WKUP_GPIO0_7 */
+		>;
+	};
 };
 
 &wkup_uart0 {
@@ -335,6 +361,12 @@
 			J721E_IOPAD(0x1c4, PIN_INPUT, 5) /* SPI0_CS1.DP0_HPD */
 		>;
 	};
+
+	sw10_button_pins_default: sw10_button_pins_default {
+		pinctrl-single,pins = <
+			J721E_IOPAD(0x0, PIN_INPUT, 7) /* (AC18) EXTINTn.GPIO0_0 */
+		>;
+	};
 };
 
 &dss {

+ 3 - 0
arch/arm64/boot/dts/ti/k3-j721e-main.dtsi

@@ -531,18 +531,21 @@
 			linux,udma-mode = <UDMA_PKT_MODE>;
 			ti,needs-epib;
 			ti,psd-size = <64>;
+			ti,notdpkt;
 		};
 
 		ti,psil-config1 {
 			linux,udma-mode = <UDMA_PKT_MODE>;
 			ti,needs-epib;
 			ti,psd-size = <64>;
+			ti,notdpkt;
 		};
 
 		ti,psil-config2 {
 			linux,udma-mode = <UDMA_PKT_MODE>;
 			ti,needs-epib;
 			ti,psd-size = <64>;
+			ti,notdpkt;
 		};
 
 		main_eip76d_trng: trng@4e10000 {

+ 1 - 1
drivers/char/hw_random/Kconfig

@@ -154,7 +154,7 @@ config HW_RANDOM_IXP4XX
 
 config HW_RANDOM_OMAP
 	tristate "OMAP Random Number Generator support"
-	depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3
+	depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || (ARCH_K3 && CRYPTO_DEV_SA2UL)
 	default HW_RANDOM
  	---help---
  	  This driver provides kernel-side support for the Random Number

+ 6 - 0
drivers/char/hw_random/omap-rng.c

@@ -29,6 +29,7 @@
 #include <linux/of_address.h>
 #include <linux/interrupt.h>
 #include <linux/clk.h>
+#include <linux/k3_sa2ul.h>
 
 #include <asm/io.h>
 
@@ -440,6 +441,11 @@ static int omap_rng_probe(struct platform_device *pdev)
 	if (!priv)
 		return -ENOMEM;
 
+#if defined(CONFIG_CRYPTO_DEV_SA2UL_MODULE) || defined(CONFIG_CRYPTO_DEV_SA2UL)
+	ret = sa2ul_trng_enable(dev->parent);
+	if (ret)
+		return ret;
+#endif
 	priv->rng.read = omap_rng_do_read;
 	priv->rng.init = omap_rng_init;
 	priv->rng.cleanup = omap_rng_cleanup;

+ 20 - 1
drivers/crypto/sa2ul.c

@@ -2155,6 +2155,24 @@ err_dma_tx:
 	return ret;
 }
 
+int sa2ul_trng_enable(struct device *dev)
+{
+	struct sa_crypto_data *dev_data;
+	u32 val;
+
+	if (!dev)
+		return -ENODEV;
+
+	dev_data = dev_get_drvdata(dev);
+
+	val = readl_relaxed(dev_data->base + SA_ENGINE_ENABLE_CONTROL);
+	writel_relaxed(val | SA_EEC_TRNG_EN, dev_data->base +
+		       SA_ENGINE_ENABLE_CONTROL);
+
+	return 0;
+}
+EXPORT_SYMBOL(sa2ul_trng_enable);
+
 static int sa_ul_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -2183,8 +2201,9 @@ static int sa_ul_probe(struct platform_device *pdev)
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	saul_base = devm_ioremap_resource(dev, res);
 
+	dev_data->base = saul_base;
 	val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
-	    SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | SA_EEC_TRNG_EN;
+	    SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN;
 
 	writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
 

+ 2 - 0
drivers/crypto/sa2ul.h

@@ -157,6 +157,7 @@ struct sa_tfm_ctx;
 
 /**
  * struct sa_crypto_data - Crypto driver instance data
+ * @base: Base address of the register space
  * @pdev: Platform device pointer
  * @sc_pool: security context pool
  * @dev: Device pointer
@@ -171,6 +172,7 @@ struct sa_tfm_ctx;
  * @dma_tx: Pointer to DMA TX channel
  */
 struct sa_crypto_data {
+	void __iomem *base;
 	struct platform_device	*pdev;
 	struct dma_pool		*sc_pool;
 	struct device *dev;

+ 42 - 23
drivers/dma/ti/k3-navss-udma.c

@@ -757,6 +757,35 @@ static void k3_nav_udmax_dump_rx_rt_chn(struct k3_nav_udmax_rx_channel *chn,
 		xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG));
 }
 
+static int
+k3_nav_udmax_allocate_rx_flows(struct k3_nav_udmax_rx_channel *rx_chn,
+			       struct k3_nav_udmax_rx_channel_cfg *cfg)
+{
+	int ret;
+
+	/* default rflow */
+	if (cfg->flow_id_use_rxchan_id)
+		return 0;
+
+	/* not a GP rflows */
+	if (rx_chn->flow_id_base != -1 &&
+	    !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+		return 0;
+
+	/* Allocate range of GP rflows */
+	ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
+					 rx_chn->flow_id_base,
+					 rx_chn->flow_num);
+	if (ret < 0) {
+		dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
+			rx_chn->flow_id_base, rx_chn->flow_num, ret);
+		return ret;
+	}
+	rx_chn->flow_id_base = ret;
+
+	return 0;
+}
+
 struct k3_nav_udmax_rx_channel *k3_nav_udmax_request_rx_chn(struct device *dev,
 		const char *name, struct k3_nav_udmax_rx_channel_cfg *cfg)
 {
@@ -810,17 +839,9 @@ struct k3_nav_udmax_rx_channel *k3_nav_udmax_request_rx_chn(struct device *dev,
 		goto err;
 	}
 
-	/* Reserve range of RX flows */
-	if (!cfg->flow_id_use_rxchan_id) {
-		ret = xudma_reserve_rflow_range(rx_chn->common.udmax,
-						rx_chn->flow_id_base,
-						rx_chn->flow_num);
-		if (ret < 0) {
-			dev_err(dev, "UDMAX reserve_rflow get err %d\n", ret);
-			goto err;
-		}
-		rx_chn->flow_id_base = ret;
-	}
+	ret = k3_nav_udmax_allocate_rx_flows(rx_chn, cfg);
+	if (ret)
+		goto err;
 
 	for (i = 0; i < rx_chn->flow_num; i++)
 		rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
@@ -845,18 +866,16 @@ struct k3_nav_udmax_rx_channel *k3_nav_udmax_request_rx_chn(struct device *dev,
 			goto err;
 	}
 
-	if (!cfg->skip_psil) {
-		ret = xudma_navss_psil_pair(rx_chn->common.udmax,
-					    rx_chn->common.src_thread,
-					    rx_chn->common.dst_thread);
-		if (ret) {
-			dev_err(dev, "PSI-L request err %d\n", ret);
-			goto err;
-		}
-
-		rx_chn->psil_paired = true;
+	ret = xudma_navss_psil_pair(rx_chn->common.udmax,
+				    rx_chn->common.src_thread,
+				    rx_chn->common.dst_thread);
+	if (ret) {
+		dev_err(dev, "PSI-L request err %d\n", ret);
+		goto err;
 	}
 
+	rx_chn->psil_paired = true;
+
 	/* reset RX RT registers */
 	k3_nav_udmax_disable_rx_chn(rx_chn);
 
@@ -890,8 +909,8 @@ void k3_nav_udmax_release_rx_chn(struct k3_nav_udmax_rx_channel *rx_chn)
 	if (rx_chn->need_tisci_free)
 		rx_chn->need_tisci_free = false;
 
-	xudma_free_rflow_range(rx_chn->common.udmax,
-			       rx_chn->flow_id_base, rx_chn->flow_num);
+	xudma_free_gp_rflow_range(rx_chn->common.udmax,
+				  rx_chn->flow_id_base, rx_chn->flow_num);
 
 	if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
 		xudma_rchan_put(rx_chn->common.udmax,

+ 24 - 7
drivers/dma/ti/k3-udma-private.c

@@ -69,17 +69,23 @@ struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud)
 }
 EXPORT_SYMBOL(xudma_dev_get_tisci_rm);
 
-int xudma_reserve_rflow_range(struct udma_dev *ud, int from, int cnt)
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
 {
-	return __udma_reserve_rflow_range(ud, from, cnt);
+	return __udma_alloc_gp_rflow_range(ud, from, cnt);
 }
-EXPORT_SYMBOL(xudma_reserve_rflow_range);
+EXPORT_SYMBOL(xudma_alloc_gp_rflow_range);
 
-int xudma_free_rflow_range(struct udma_dev *ud, int from, int cnt)
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
 {
-	return __udma_free_rflow_range(ud, from, cnt);
+	return __udma_free_gp_rflow_range(ud, from, cnt);
 }
-EXPORT_SYMBOL(xudma_free_rflow_range);
+EXPORT_SYMBOL(xudma_free_gp_rflow_range);
+
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id)
+{
+	return !test_bit(id, ud->rflow_gp_map);
+}
+EXPORT_SYMBOL(xudma_rflow_is_gp);
 
 #define XUDMA_GET_PUT_RESOURCE(res)					\
 struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id)	\
@@ -95,7 +101,18 @@ void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p)	\
 EXPORT_SYMBOL(xudma_##res##_put)
 XUDMA_GET_PUT_RESOURCE(tchan);
 XUDMA_GET_PUT_RESOURCE(rchan);
-XUDMA_GET_PUT_RESOURCE(rflow);
+
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id)
+{
+	return __udma_get_rflow(ud, id);
+}
+EXPORT_SYMBOL(xudma_rflow_get);
+
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p)
+{
+	__udma_put_rflow(ud, p);
+}
+EXPORT_SYMBOL(xudma_rflow_put);
 
 #define XUDMA_GET_RESOURCE_ID(res)					\
 int xudma_##res##_get_id(struct udma_##res *p)				\

+ 96 - 69
drivers/dma/ti/k3-udma.c

@@ -110,8 +110,9 @@ struct udma_dev {
 	int rflow_cnt;
 	unsigned long *tchan_map;
 	unsigned long *rchan_map;
-	unsigned long *rflow_map;
-	unsigned long *rflow_map_reserved;
+	unsigned long *rflow_gp_map;
+	unsigned long *rflow_gp_map_allocated;
+	unsigned long *rflow_in_use;
 
 	struct udma_tchan *tchans;
 	struct udma_rchan *rchans;
@@ -194,6 +195,7 @@ struct udma_chan {
 	u32 psd_size; /* size of Protocol Specific Data */
 	u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
 	u32 hdesc_size; /* Size of a packet descriptor in packet mode */
+	bool notdpkt; /* Suppress sending TDC packet */
 	int remote_thread_id;
 	u32 src_thread;
 	u32 dst_thread;
@@ -337,6 +339,22 @@ static inline char *udma_get_dir_text(enum dma_transfer_direction dir)
 	return "invalid";
 }
 
+static void udma_reset_uchan(struct udma_chan *uc)
+{
+	uc->state = UDMA_CHAN_IS_IDLE;
+	uc->remote_thread_id = -1;
+	uc->dir = DMA_MEM_TO_MEM;
+	uc->pkt_mode = false;
+	uc->static_tr_type = 0;
+	uc->enable_acc32 = 0;
+	uc->enable_burst = 0;
+	uc->channel_tpl = 0;
+	uc->psd_size = 0;
+	uc->metadata_size = 0;
+	uc->hdesc_size = 0;
+	uc->notdpkt = 0;
+}
+
 static inline void udma_dump_chan_stdata(struct udma_chan *uc)
 {
 	struct device *dev = uc->ud->dev;
@@ -719,6 +737,7 @@ static inline int udma_reset_chan(struct udma_chan *uc, bool hard)
 		uc->psd_size = uc_backup.psd_size;
 		uc->metadata_size = uc_backup.metadata_size;
 		uc->hdesc_size = uc_backup.hdesc_size;
+		uc->notdpkt = uc_backup.notdpkt;
 
 		ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
 		if (ret)
@@ -1057,12 +1076,12 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
 }
 
 /**
- * __udma_reserve_rflow_range - reserve range of flow ids
+ * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
  * @ud: UDMA device
  * @from: Start the search from this flow id number
  * @cnt: Number of consecutive flow ids to allocate
  *
- * Reserve range of flow ids for future use, those flows can be allocated
+ * Allocate range of RX flow ids for future use, those flows can be requested
  * only using explicit flow id number. if @from is set to -1 it will try to find
  * first free range. if @from is positive value it will force allocation only
  * of the specified range of flows.
@@ -1072,7 +1091,7 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
  * -EINVAL if wrong input values passed.
  * Returns flow id on success.
  */
-static int __udma_reserve_rflow_range(struct udma_dev *ud, int from, int cnt)
+static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
 {
 	int start, tmp_from;
 	DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
@@ -1080,14 +1099,14 @@ static int __udma_reserve_rflow_range(struct udma_dev *ud, int from, int cnt)
 	tmp_from = from;
 	if (tmp_from < 0)
 		tmp_from = ud->rchan_cnt;
-	/* default flows can't be reserved and accessible only by id */
+	/* default flows can't be allocated and accessible only by id */
 	if (tmp_from < ud->rchan_cnt)
 		return -EINVAL;
 
 	if (tmp_from + cnt > ud->rflow_cnt)
 		return -EINVAL;
 
-	bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
+	bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
 		  ud->rflow_cnt);
 
 	start = bitmap_find_next_zero_area(tmp,
@@ -1099,44 +1118,57 @@ static int __udma_reserve_rflow_range(struct udma_dev *ud, int from, int cnt)
 	if (from >= 0 && start != from)
 		return -EEXIST;
 
-	bitmap_set(ud->rflow_map_reserved, start, cnt);
+	bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
 	return start;
 }
 
-static int __udma_free_rflow_range(struct udma_dev *ud, int from, int cnt)
+static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
 {
 	if (from < ud->rchan_cnt)
 		return -EINVAL;
 	if (from + cnt > ud->rflow_cnt)
 		return -EINVAL;
 
-	bitmap_clear(ud->rflow_map_reserved, from, cnt);
+	bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
 	return 0;
 }
 
-static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud,
-					       enum udma_tp_level tpl, int id)
+static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
 {
-	DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
+	/*
+	 * Attempt to request rflow by ID can be made for any rflow
+	 * if not in use with assumption that caller knows what's doing.
+	 * TI-SCI FW will perform additional permission check ant way, it's
+	 * safe
+	 */
 
-	if (id >= 0) {
-		if (test_bit(id, ud->rflow_map)) {
-			dev_err(ud->dev, "rflow%d is in use\n", id);
-			return ERR_PTR(-ENOENT);
-		}
-	} else {
-		bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
-			  ud->rflow_cnt);
+	if (id < 0 || id >= ud->rflow_cnt)
+		return ERR_PTR(-ENOENT);
 
-		id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
-		if (id >= ud->rflow_cnt)
-			return ERR_PTR(-ENOENT);
-	}
+	if (test_bit(id, ud->rflow_in_use))
+		return ERR_PTR(-ENOENT);
+
+	/* GP rflow has to be allocated first */
+	if (!test_bit(id, ud->rflow_gp_map) &&
+	    !test_bit(id, ud->rflow_gp_map_allocated))
+		return ERR_PTR(-EINVAL);
 
-	set_bit(id, ud->rflow_map);
+	dev_dbg(ud->dev, "get rflow%d\n", id);
+	set_bit(id, ud->rflow_in_use);
 	return &ud->rflows[id];
 }
 
+static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
+{
+	if (!test_bit(rflow->id, ud->rflow_in_use)) {
+		dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
+		return;
+	}
+
+	dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
+	clear_bit(rflow->id, ud->rflow_in_use);
+}
+
 #define UDMA_RESERVE_RESOURCE(res)					\
 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,	\
 					       enum udma_tp_level tpl,	\
@@ -1259,7 +1291,7 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id)
 	if (!uc->rchan)
 		dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
 
-	uc->rflow = __udma_reserve_rflow(ud, uc->channel_tpl, flow_id);
+	uc->rflow = __udma_get_rflow(ud, flow_id);
 	if (IS_ERR(uc->rflow))
 		return PTR_ERR(uc->rflow);
 
@@ -1297,7 +1329,7 @@ static void udma_put_rflow(struct udma_chan *uc)
 	if (uc->rflow) {
 		dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
 			uc->rflow->id);
-		clear_bit(uc->rflow->id, ud->rflow_map);
+		__udma_put_rflow(ud, uc->rflow);
 		uc->rflow = NULL;
 	}
 }
@@ -1580,7 +1612,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
 		req_tx.tx_filt_einfo = 0;
 		req_tx.tx_filt_pswords = 0;
 		req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
-		req_tx.tx_supr_tdpkt = 0;
+		req_tx.tx_supr_tdpkt = uc->notdpkt;
 		req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
 		req_tx.txcq_qnum = tc_ring;
 
@@ -1651,7 +1683,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
 			req_tx.tx_filt_einfo = 0;
 			req_tx.tx_filt_pswords = 0;
 			req_tx.tx_chan_type = mode;
-			req_tx.tx_supr_tdpkt = 0;
+			req_tx.tx_supr_tdpkt = uc->notdpkt;
 			req_tx.tx_fetch_size = fetch_size >> 2;
 			req_tx.txcq_qnum = tc_ring;
 
@@ -1845,16 +1877,7 @@ err_chan_free:
 err_res_free:
 	udma_free_tx_resources(uc);
 	udma_free_rx_resources(uc);
-	uc->remote_thread_id = -1;
-	uc->dir = DMA_MEM_TO_MEM;
-	uc->pkt_mode = false;
-	uc->static_tr_type = 0;
-	uc->enable_acc32 = 0;
-	uc->enable_burst = 0;
-	uc->channel_tpl = 0;
-	uc->psd_size = 0;
-	uc->metadata_size = 0;
-	uc->hdesc_size = 0;
+	udma_reset_uchan(uc);
 
 	if (uc->use_dma_pool) {
 		dma_pool_destroy(uc->hdesc_pool);
@@ -2618,7 +2641,7 @@ static enum dma_status udma_tx_status(struct dma_chan *chan,
 
 		if (uc->desc->dir == DMA_MEM_TO_DEV) {
 			bcnt = udma_tchanrt_read(uc->tchan,
-						 UDMA_TCHAN_RT_BCNT_REG);
+						 UDMA_TCHAN_RT_SBCNT_REG);
 			pdma_bcnt = udma_tchanrt_read(uc->tchan,
 						UDMA_TCHAN_RT_PEER_BCNT_REG);
 			pcnt = udma_tchanrt_read(uc->tchan,
@@ -2628,7 +2651,7 @@ static enum dma_status udma_tx_status(struct dma_chan *chan,
 				delay = bcnt - pdma_bcnt;
 		} else if (uc->desc->dir == DMA_DEV_TO_MEM) {
 			bcnt = udma_rchanrt_read(uc->rchan,
-						 UDMA_RCHAN_RT_SBCNT_REG);
+						 UDMA_RCHAN_RT_BCNT_REG);
 			pdma_bcnt = udma_rchanrt_read(uc->rchan,
 						UDMA_RCHAN_RT_PEER_BCNT_REG);
 			pcnt = udma_rchanrt_read(uc->rchan,
@@ -2640,7 +2663,7 @@ static enum dma_status udma_tx_status(struct dma_chan *chan,
 			u32 sbcnt;
 
 			sbcnt = udma_tchanrt_read(uc->tchan,
-						  UDMA_TCHAN_RT_SBCNT_REG);
+						  UDMA_TCHAN_RT_BCNT_REG);
 			bcnt = udma_tchanrt_read(uc->tchan,
 						 UDMA_TCHAN_RT_PEER_BCNT_REG);
 			pcnt = udma_tchanrt_read(uc->tchan,
@@ -2895,17 +2918,7 @@ static void udma_free_chan_resources(struct dma_chan *chan)
 
 	udma_free_tx_resources(uc);
 	udma_free_rx_resources(uc);
-
-	uc->remote_thread_id = -1;
-	uc->dir = DMA_MEM_TO_MEM;
-	uc->pkt_mode = false;
-	uc->static_tr_type = 0;
-	uc->enable_acc32 = 0;
-	uc->enable_burst = 0;
-	uc->channel_tpl = 0;
-	uc->psd_size = 0;
-	uc->metadata_size = 0;
-	uc->hdesc_size = 0;
+	udma_reset_uchan(uc);
 
 	if (uc->use_dma_pool) {
 		dma_pool_destroy(uc->hdesc_pool);
@@ -2943,11 +2956,13 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
 	slave_node = of_find_node_by_phandle(args[0]);
 	if (!slave_node) {
 		dev_err(ud->dev, "Slave node is missing\n");
+		uc->dir = DMA_MEM_TO_MEM;
 		return false;
 	}
 
 	if (of_property_read_u32(slave_node, "ti,psil-base", &val)) {
 		dev_err(ud->dev, "ti,psil-base is missing\n");
+		uc->dir = DMA_MEM_TO_MEM;
 		return false;
 	}
 
@@ -2958,6 +2973,7 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
 	chconf_node = of_find_node_by_name(slave_node, prop);
 	if (!chconf_node) {
 		dev_err(ud->dev, "Channel configuration node is missing\n");
+		uc->dir = DMA_MEM_TO_MEM;
 		uc->remote_thread_id = -1;
 		return false;
 	}
@@ -2980,6 +2996,8 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
 	if (!of_property_read_u32(chconf_node, "ti,channel-tpl", &val))
 		uc->channel_tpl = val;
 
+	uc->notdpkt = of_property_read_bool(chconf_node, "ti,notdpkt");
+
 	uc->needs_epib = of_property_read_bool(chconf_node, "ti,needs-epib");
 	if (!of_property_read_u32(chconf_node, "ti,psd-size", &val))
 		uc->psd_size = val;
@@ -3133,17 +3151,22 @@ static int udma_setup_resources(struct udma_dev *ud)
 					   sizeof(unsigned long), GFP_KERNEL);
 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
 				  GFP_KERNEL);
-	ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
-					   sizeof(unsigned long), GFP_KERNEL);
-	ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
+	ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
 					      sizeof(unsigned long),
 					      GFP_KERNEL);
+	ud->rflow_gp_map_allocated = devm_kcalloc(dev,
+						  BITS_TO_LONGS(ud->rflow_cnt),
+						  sizeof(unsigned long),
+						  GFP_KERNEL);
+	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
+					sizeof(unsigned long),
+					GFP_KERNEL);
 	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
 				  GFP_KERNEL);
 
-	if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
-	    !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
-	    !ud->rflows)
+	if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
+	    !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
+	    !ud->rflows || !ud->rflow_in_use)
 		return -ENOMEM;
 
 	/*
@@ -3151,7 +3174,10 @@ static int udma_setup_resources(struct udma_dev *ud)
 	 * as default flows if remote HW can't generate flow_ids. Those
 	 * RX flows can be requested only explicitly by id.
 	 */
-	bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
+	bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
+
+	/* by default no GP rflows are assigned to Linux */
+	bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
 
 	/* Get resource ranges from tisci */
 	for (i = 0; i < RM_RANGE_LAST; i++)
@@ -3170,6 +3196,8 @@ static int udma_setup_resources(struct udma_dev *ud)
 			rm_desc = &rm_res->desc[i];
 			bitmap_clear(ud->tchan_map, rm_desc->start,
 				     rm_desc->num);
+			dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
+				rm_desc->start, rm_desc->num);
 		}
 	}
 
@@ -3177,31 +3205,30 @@ static int udma_setup_resources(struct udma_dev *ud)
 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
 	if (IS_ERR(rm_res)) {
 		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
-		bitmap_zero(ud->rflow_map, ud->rchan_cnt);
 	} else {
 		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
-		bitmap_fill(ud->rflow_map, ud->rchan_cnt);
 		for (i = 0; i < rm_res->sets; i++) {
 			rm_desc = &rm_res->desc[i];
 			bitmap_clear(ud->rchan_map, rm_desc->start,
 				     rm_desc->num);
-			bitmap_clear(ud->rflow_map, rm_desc->start,
-				     rm_desc->num);
+			dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
+				rm_desc->start, rm_desc->num);
 		}
 	}
 
 	/* GP rflow ranges */
 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
 	if (IS_ERR(rm_res)) {
-		bitmap_clear(ud->rflow_map, ud->rchan_cnt,
+		/* all gp flows are assigned exclusively to Linux */
+		bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
 			     ud->rflow_cnt - ud->rchan_cnt);
 	} else {
-		bitmap_set(ud->rflow_map, ud->rchan_cnt,
-			   ud->rflow_cnt - ud->rchan_cnt);
 		for (i = 0; i < rm_res->sets; i++) {
 			rm_desc = &rm_res->desc[i];
-			bitmap_clear(ud->rflow_map, rm_desc->start,
+			bitmap_clear(ud->rflow_gp_map, rm_desc->start,
 				     rm_desc->num);
+			dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
+				rm_desc->start, rm_desc->num);
 		}
 	}
 

+ 3 - 2
drivers/dma/ti/k3-udma.h

@@ -140,8 +140,8 @@ void xudma_dev_put(struct udma_dev *ud);
 u32 xudma_dev_get_psil_base(struct udma_dev *ud);
 struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
 
-int xudma_reserve_rflow_range(struct udma_dev *ud, int from, int cnt);
-int xudma_free_rflow_range(struct udma_dev *ud, int from, int cnt);
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
 
 struct udma_tchan *xudma_tchan_get(struct udma_dev *ud, int id);
 struct udma_rchan *xudma_rchan_get(struct udma_dev *ud, int id);
@@ -159,5 +159,6 @@ u32 xudma_tchanrt_read(struct udma_tchan *tchan, int reg);
 void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val);
 u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg);
 void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val);
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id);
 
 #endif /* K3_UDMA_H_ */

+ 3 - 1
drivers/soc/ti/k3-ringacc.c

@@ -391,8 +391,10 @@ void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
 	if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
 		return;
 
-	if (!ring->parent->dma_ring_reset_quirk)
+	if (!ring->parent->dma_ring_reset_quirk) {
+		k3_ringacc_ring_reset(ring);
 		return;
+	}
 
 	if (!occ)
 		occ = dbg_readl(&ring->rt->occ);

+ 2 - 3
include/linux/dma/k3-navss-udma.h

@@ -86,8 +86,8 @@ struct k3_nav_udmax_rx_flow_cfg {
  *
  * @psdata_size:	SW Data is present in Host PD of @swdata_size bytes
  * @flow_id_base:	first flow_id used by channel.
- *			if @flow_id_base = -1 - flow ids range will be allocated
- *			dynamically.
+ *			if @flow_id_base = -1 - range of GP rflows will be
+ *			allocated dynamically.
  * @flow_id_num:	number of RX flows used by channel
  * @flow_id_use_rxchan_id:	use RX channel id as flow id,
  *				used only if @flow_id_num = 1
@@ -99,7 +99,6 @@ struct k3_nav_udmax_rx_channel_cfg {
 	int  flow_id_base;
 	int  flow_id_num;
 	bool flow_id_use_rxchan_id;
-	bool skip_psil;
 
 	struct k3_nav_udmax_rx_flow_cfg *def_flow_cfg;
 };

+ 7 - 0
include/linux/k3_sa2ul.h

@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef	__K3_SA2UL_H
+#define	__K3_SA2UL_H
+
+int sa2ul_trng_enable(struct device *dev);
+
+#endif /* __K3_SA2UL_H */