Browse Source

Merge tag 'thunderbolt-for-v4.17' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into char-misc-next

Mike writes:

thunderbolt: Changes for v4.17 merge window

New features:

  - Intel Titan Ridge Thunderbolt 3 controller support
  - Preboot ACL supported, allowing more secure way to boot from
    Thunderbolt devices
  - New "USB only" security level

In addition there are a couple of fixes for increasing timeout when
authenticating the ICM firmware and reading root switch config space.
Preventing a crash on certain Lenovo systems where ICM firmware for some
reason is not always properly starting up.
Greg Kroah-Hartman 7 years ago
parent
commit
6f005302a2

+ 33 - 0
Documentation/ABI/testing/sysfs-bus-thunderbolt

@@ -1,3 +1,26 @@
+What: /sys/bus/thunderbolt/devices/.../domainX/boot_acl
+Date:		Jun 2018
+KernelVersion:	4.17
+Contact:	thunderbolt-software@lists.01.org
+Description:	Holds a comma separated list of device unique_ids that
+		are allowed to be connected automatically during system
+		startup (e.g boot devices). The list always contains
+		maximum supported number of unique_ids where unused
+		entries are empty. This allows the userspace software
+		to determine how many entries the controller supports.
+		If there are multiple controllers, each controller has
+		its own ACL list and size may be different between the
+		controllers.
+
+		System BIOS may have an option "Preboot ACL" or similar
+		that needs to be selected before this list is taken into
+		consideration.
+
+		Software always updates a full list in each write.
+
+		If a device is authorized automatically during boot its
+		boot attribute is set to 1.
+
 What: /sys/bus/thunderbolt/devices/.../domainX/security
 Date:		Sep 2017
 KernelVersion:	4.13
@@ -12,6 +35,9 @@ Description:	This attribute holds current Thunderbolt security level
 			minimum. User needs to authorize each device.
 		dponly: Automatically tunnel Display port (and USB). No
 			PCIe tunnels are created.
+		usbonly: Automatically tunnel USB controller of the
+			 connected Thunderbolt dock (and Display Port). All
+			 PCIe links downstream of the dock are removed.
 
 What: /sys/bus/thunderbolt/devices/.../authorized
 Date:		Sep 2017
@@ -38,6 +64,13 @@ Description:	This attribute is used to authorize Thunderbolt devices
 		   the device did not contain a key at all, and
 		   EKEYREJECTED if the challenge response did not match.
 
+What: /sys/bus/thunderbolt/devices/.../boot
+Date:		Jun 2018
+KernelVersion:	4.17
+Contact:	thunderbolt-software@lists.01.org
+Description:	This attribute contains 1 if Thunderbolt device was already
+		authorized on boot and 0 otherwise.
+
 What: /sys/bus/thunderbolt/devices/.../key
 Date:		Sep 2017
 KernelVersion:	4.13

+ 10 - 5
Documentation/admin-guide/thunderbolt.rst

@@ -21,11 +21,11 @@ vulnerable to DMA attacks.
 Security levels and how to use them
 -----------------------------------
 Starting with Intel Falcon Ridge Thunderbolt controller there are 4
-security levels available. The reason for these is the fact that the
-connected devices can be DMA masters and thus read contents of the host
-memory without CPU and OS knowing about it. There are ways to prevent
-this by setting up an IOMMU but it is not always available for various
-reasons.
+security levels available. Intel Titan Ridge added one more security level
+(usbonly). The reason for these is the fact that the connected devices can
+be DMA masters and thus read contents of the host memory without CPU and OS
+knowing about it. There are ways to prevent this by setting up an IOMMU but
+it is not always available for various reasons.
 
 The security levels are as follows:
 
@@ -52,6 +52,11 @@ The security levels are as follows:
     USB. No PCIe tunneling is done. In BIOS settings this is
     typically called *Display Port Only*.
 
+  usbonly
+    The firmware automatically creates tunnels for the USB controller and
+    Display Port in a dock. All PCIe links downstream of the dock are
+    removed.
+
 The current security level can be read from
 ``/sys/bus/thunderbolt/devices/domainX/security`` where ``domainX`` is
 the Thunderbolt domain the host controller manages. There is typically

+ 13 - 15
drivers/thunderbolt/dma_port.c

@@ -170,24 +170,22 @@ static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
 
 static int dma_find_port(struct tb_switch *sw)
 {
-	int port, ret;
-	u32 type;
+	static const int ports[] = { 3, 5, 7 };
+	int i;
 
 	/*
-	 * The DMA (NHI) port is either 3 or 5 depending on the
-	 * controller. Try both starting from 5 which is more common.
+	 * The DMA (NHI) port is either 3, 5 or 7 depending on the
+	 * controller. Try all of them.
 	 */
-	port = 5;
-	ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
-			    DMA_PORT_TIMEOUT);
-	if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
-		return port;
-
-	port = 3;
-	ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
-			    DMA_PORT_TIMEOUT);
-	if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
-		return port;
+	for (i = 0; i < ARRAY_SIZE(ports); i++) {
+		u32 type;
+		int ret;
+
+		ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
+				    2, 1, DMA_PORT_TIMEOUT);
+		if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
+			return ports[i];
+	}
 
 	return -ENODEV;
 }

+ 129 - 1
drivers/thunderbolt/domain.c

@@ -117,23 +117,151 @@ static const char * const tb_security_names[] = {
 	[TB_SECURITY_USER] = "user",
 	[TB_SECURITY_SECURE] = "secure",
 	[TB_SECURITY_DPONLY] = "dponly",
+	[TB_SECURITY_USBONLY] = "usbonly",
 };
 
+static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct tb *tb = container_of(dev, struct tb, dev);
+	uuid_t *uuids;
+	ssize_t ret;
+	int i;
+
+	uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
+	if (!uuids)
+		return -ENOMEM;
+
+	if (mutex_lock_interruptible(&tb->lock)) {
+		ret = -ERESTARTSYS;
+		goto out;
+	}
+	ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
+	if (ret) {
+		mutex_unlock(&tb->lock);
+		goto out;
+	}
+	mutex_unlock(&tb->lock);
+
+	for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
+		if (!uuid_is_null(&uuids[i]))
+			ret += snprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
+					&uuids[i]);
+
+		ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s",
+			       i < tb->nboot_acl - 1 ? "," : "\n");
+	}
+
+out:
+	kfree(uuids);
+	return ret;
+}
+
+static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct tb *tb = container_of(dev, struct tb, dev);
+	char *str, *s, *uuid_str;
+	ssize_t ret = 0;
+	uuid_t *acl;
+	int i = 0;
+
+	/*
+	 * Make sure the value is not bigger than tb->nboot_acl * UUID
+	 * length + commas and optional "\n". Also the smallest allowable
+	 * string is tb->nboot_acl * ",".
+	 */
+	if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
+		return -EINVAL;
+	if (count < tb->nboot_acl - 1)
+		return -EINVAL;
+
+	str = kstrdup(buf, GFP_KERNEL);
+	if (!str)
+		return -ENOMEM;
+
+	acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
+	if (!acl) {
+		ret = -ENOMEM;
+		goto err_free_str;
+	}
+
+	uuid_str = strim(str);
+	while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
+		size_t len = strlen(s);
+
+		if (len) {
+			if (len != UUID_STRING_LEN) {
+				ret = -EINVAL;
+				goto err_free_acl;
+			}
+			ret = uuid_parse(s, &acl[i]);
+			if (ret)
+				goto err_free_acl;
+		}
+
+		i++;
+	}
+
+	if (s || i < tb->nboot_acl) {
+		ret = -EINVAL;
+		goto err_free_acl;
+	}
+
+	if (mutex_lock_interruptible(&tb->lock)) {
+		ret = -ERESTARTSYS;
+		goto err_free_acl;
+	}
+	ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
+	mutex_unlock(&tb->lock);
+
+err_free_acl:
+	kfree(acl);
+err_free_str:
+	kfree(str);
+
+	return ret ?: count;
+}
+static DEVICE_ATTR_RW(boot_acl);
+
 static ssize_t security_show(struct device *dev, struct device_attribute *attr,
 			     char *buf)
 {
 	struct tb *tb = container_of(dev, struct tb, dev);
+	const char *name = "unknown";
 
-	return sprintf(buf, "%s\n", tb_security_names[tb->security_level]);
+	if (tb->security_level < ARRAY_SIZE(tb_security_names))
+		name = tb_security_names[tb->security_level];
+
+	return sprintf(buf, "%s\n", name);
 }
 static DEVICE_ATTR_RO(security);
 
 static struct attribute *domain_attrs[] = {
+	&dev_attr_boot_acl.attr,
 	&dev_attr_security.attr,
 	NULL,
 };
 
+static umode_t domain_attr_is_visible(struct kobject *kobj,
+				      struct attribute *attr, int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct tb *tb = container_of(dev, struct tb, dev);
+
+	if (attr == &dev_attr_boot_acl.attr) {
+		if (tb->nboot_acl &&
+		    tb->cm_ops->get_boot_acl &&
+		    tb->cm_ops->set_boot_acl)
+			return attr->mode;
+		return 0;
+	}
+
+	return attr->mode;
+}
+
 static struct attribute_group domain_attr_group = {
+	.is_visible = domain_attr_is_visible,
 	.attrs = domain_attrs,
 };
 

+ 685 - 79
drivers/thunderbolt/icm.c

@@ -41,7 +41,8 @@
 #define PHY_PORT_CS1_LINK_STATE_MASK	GENMASK(29, 26)
 #define PHY_PORT_CS1_LINK_STATE_SHIFT	26
 
-#define ICM_TIMEOUT			5000 /* ms */
+#define ICM_TIMEOUT			5000	/* ms */
+#define ICM_APPROVE_TIMEOUT		10000	/* ms */
 #define ICM_MAX_LINK			4
 #define ICM_MAX_DEPTH			6
 
@@ -55,9 +56,11 @@
  * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
  *	     (only set when @upstream_port is not %NULL)
  * @safe_mode: ICM is in safe mode
+ * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
  * @is_supported: Checks if we can support ICM on this controller
  * @get_mode: Read and return the ICM firmware mode (optional)
  * @get_route: Find a route string for given switch
+ * @driver_ready: Send driver ready message to ICM
  * @device_connected: Handle device connected ICM message
  * @device_disconnected: Handle device disconnected ICM message
  * @xdomain_connected - Handle XDomain connected ICM message
@@ -67,11 +70,15 @@ struct icm {
 	struct mutex request_lock;
 	struct delayed_work rescan_work;
 	struct pci_dev *upstream_port;
+	size_t max_boot_acl;
 	int vnd_cap;
 	bool safe_mode;
 	bool (*is_supported)(struct tb *tb);
 	int (*get_mode)(struct tb *tb);
 	int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
+	int (*driver_ready)(struct tb *tb,
+			    enum tb_security_level *security_level,
+			    size_t *nboot_acl);
 	void (*device_connected)(struct tb *tb,
 				 const struct icm_pkg_header *hdr);
 	void (*device_disconnected)(struct tb *tb,
@@ -111,6 +118,12 @@ static inline u64 get_route(u32 route_hi, u32 route_lo)
 	return (u64)route_hi << 32 | route_lo;
 }
 
+static inline u64 get_parent_route(u64 route)
+{
+	int depth = tb_route_length(route);
+	return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
+}
+
 static bool icm_match(const struct tb_cfg_request *req,
 		      const struct ctl_pkg *pkg)
 {
@@ -245,6 +258,28 @@ err_free:
 	return ret;
 }
 
+static int
+icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+		    size_t *nboot_acl)
+{
+	struct icm_fr_pkg_driver_ready_response reply;
+	struct icm_pkg_driver_ready request = {
+		.hdr.code = ICM_DRIVER_READY,
+	};
+	int ret;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (security_level)
+		*security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
+
+	return 0;
+}
+
 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
 {
 	struct icm_fr_pkg_approve_device request;
@@ -260,7 +295,7 @@ static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
 	memset(&reply, 0, sizeof(reply));
 	/* Use larger timeout as establishing tunnels can take some time */
 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-			  1, 10000);
+			  1, ICM_APPROVE_TIMEOUT);
 	if (ret)
 		return ret;
 
@@ -374,6 +409,59 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 	return 0;
 }
 
+static void add_switch(struct tb_switch *parent_sw, u64 route,
+		       const uuid_t *uuid, u8 connection_id, u8 connection_key,
+		       u8 link, u8 depth, enum tb_security_level security_level,
+		       bool authorized, bool boot)
+{
+	struct tb_switch *sw;
+
+	sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
+	if (!sw)
+		return;
+
+	sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
+	sw->connection_id = connection_id;
+	sw->connection_key = connection_key;
+	sw->link = link;
+	sw->depth = depth;
+	sw->authorized = authorized;
+	sw->security_level = security_level;
+	sw->boot = boot;
+
+	/* Link the two switches now */
+	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
+	tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
+
+	if (tb_switch_add(sw)) {
+		tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+		tb_switch_put(sw);
+		return;
+	}
+}
+
+static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
+			  u64 route, u8 connection_id, u8 connection_key,
+			  u8 link, u8 depth, bool boot)
+{
+	/* Disconnect from parent */
+	tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+	/* Re-connect via updated port*/
+	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
+
+	/* Update with the new addressing information */
+	sw->config.route_hi = upper_32_bits(route);
+	sw->config.route_lo = lower_32_bits(route);
+	sw->connection_id = connection_id;
+	sw->connection_key = connection_key;
+	sw->link = link;
+	sw->depth = depth;
+	sw->boot = boot;
+
+	/* This switch still exists */
+	sw->is_unplugged = false;
+}
+
 static void remove_switch(struct tb_switch *sw)
 {
 	struct tb_switch *parent_sw;
@@ -383,15 +471,52 @@ static void remove_switch(struct tb_switch *sw)
 	tb_switch_remove(sw);
 }
 
+static void add_xdomain(struct tb_switch *sw, u64 route,
+			const uuid_t *local_uuid, const uuid_t *remote_uuid,
+			u8 link, u8 depth)
+{
+	struct tb_xdomain *xd;
+
+	xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
+	if (!xd)
+		return;
+
+	xd->link = link;
+	xd->depth = depth;
+
+	tb_port_at(route, sw)->xdomain = xd;
+
+	tb_xdomain_add(xd);
+}
+
+static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
+{
+	xd->link = link;
+	xd->route = route;
+	xd->is_unplugged = false;
+}
+
+static void remove_xdomain(struct tb_xdomain *xd)
+{
+	struct tb_switch *sw;
+
+	sw = tb_to_switch(xd->dev.parent);
+	tb_port_at(xd->route, sw)->xdomain = NULL;
+	tb_xdomain_remove(xd);
+}
+
 static void
 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 {
 	const struct icm_fr_event_device_connected *pkg =
 		(const struct icm_fr_event_device_connected *)hdr;
+	enum tb_security_level security_level;
 	struct tb_switch *sw, *parent_sw;
 	struct icm *icm = tb_priv(tb);
 	bool authorized = false;
+	struct tb_xdomain *xd;
 	u8 link, depth;
+	bool boot;
 	u64 route;
 	int ret;
 
@@ -399,6 +524,15 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 		ICM_LINK_INFO_DEPTH_SHIFT;
 	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
+	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
+			 ICM_FLAGS_SLEVEL_SHIFT;
+	boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+
+	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
+		tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
+			link, depth);
+		return;
+	}
 
 	ret = icm->get_route(tb, link, depth, &route);
 	if (ret) {
@@ -425,16 +559,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 		 */
 		if (sw->depth == depth && sw_phy_port == phy_port &&
 		    !!sw->authorized == authorized) {
-			tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
-			tb_port_at(route, parent_sw)->remote =
-				   tb_upstream_port(sw);
-			sw->config.route_hi = upper_32_bits(route);
-			sw->config.route_lo = lower_32_bits(route);
-			sw->connection_id = pkg->connection_id;
-			sw->connection_key = pkg->connection_key;
-			sw->link = link;
-			sw->depth = depth;
-			sw->is_unplugged = false;
+			update_switch(parent_sw, sw, route, pkg->connection_id,
+				      pkg->connection_key, link, depth, boot);
 			tb_switch_put(sw);
 			return;
 		}
@@ -467,6 +593,13 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 		tb_switch_put(sw);
 	}
 
+	/* Remove existing XDomain connection if found */
+	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
+	if (xd) {
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+
 	parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
 	if (!parent_sw) {
 		tb_err(tb, "failed to find parent switch for %u.%u\n",
@@ -474,30 +607,10 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 		return;
 	}
 
-	sw = tb_switch_alloc(tb, &parent_sw->dev, route);
-	if (!sw) {
-		tb_switch_put(parent_sw);
-		return;
-	}
-
-	sw->uuid = kmemdup(&pkg->ep_uuid, sizeof(pkg->ep_uuid), GFP_KERNEL);
-	sw->connection_id = pkg->connection_id;
-	sw->connection_key = pkg->connection_key;
-	sw->link = link;
-	sw->depth = depth;
-	sw->authorized = authorized;
-	sw->security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
-				ICM_FLAGS_SLEVEL_SHIFT;
-
-	/* Link the two switches now */
-	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
-	tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
+	add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id,
+		   pkg->connection_key, link, depth, security_level,
+		   authorized, boot);
 
-	ret = tb_switch_add(sw);
-	if (ret) {
-		tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
-		tb_switch_put(sw);
-	}
 	tb_switch_put(parent_sw);
 }
 
@@ -529,15 +642,6 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
 	tb_switch_put(sw);
 }
 
-static void remove_xdomain(struct tb_xdomain *xd)
-{
-	struct tb_switch *sw;
-
-	sw = tb_to_switch(xd->dev.parent);
-	tb_port_at(xd->route, sw)->xdomain = NULL;
-	tb_xdomain_remove(xd);
-}
-
 static void
 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 {
@@ -577,9 +681,7 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 		phy_port = phy_port_from_route(route, depth);
 
 		if (xd->depth == depth && xd_phy_port == phy_port) {
-			xd->link = link;
-			xd->route = route;
-			xd->is_unplugged = false;
+			update_xdomain(xd, route, link);
 			tb_xdomain_put(xd);
 			return;
 		}
@@ -629,19 +731,8 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 		return;
 	}
 
-	xd = tb_xdomain_alloc(sw->tb, &sw->dev, route,
-			      &pkg->local_uuid, &pkg->remote_uuid);
-	if (!xd) {
-		tb_switch_put(sw);
-		return;
-	}
-
-	xd->link = link;
-	xd->depth = depth;
-
-	tb_port_at(route, sw)->xdomain = xd;
-
-	tb_xdomain_add(xd);
+	add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
+		    depth);
 	tb_switch_put(sw);
 }
 
@@ -664,6 +755,351 @@ icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
 	}
 }
 
+static int
+icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+		    size_t *nboot_acl)
+{
+	struct icm_tr_pkg_driver_ready_response reply;
+	struct icm_pkg_driver_ready request = {
+		.hdr.code = ICM_DRIVER_READY,
+	};
+	int ret;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, 20000);
+	if (ret)
+		return ret;
+
+	if (security_level)
+		*security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
+	if (nboot_acl)
+		*nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
+				ICM_TR_INFO_BOOT_ACL_SHIFT;
+	return 0;
+}
+
+static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
+{
+	struct icm_tr_pkg_approve_device request;
+	struct icm_tr_pkg_approve_device reply;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_APPROVE_DEVICE;
+	request.route_lo = sw->config.route_lo;
+	request.route_hi = sw->config.route_hi;
+	request.connection_id = sw->connection_id;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_APPROVE_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+		tb_warn(tb, "PCIe tunnel creation failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+	struct icm_tr_pkg_add_device_key_response reply;
+	struct icm_tr_pkg_add_device_key request;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_ADD_DEVICE_KEY;
+	request.route_lo = sw->config.route_lo;
+	request.route_hi = sw->config.route_hi;
+	request.connection_id = sw->connection_id;
+	memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+		tb_warn(tb, "Adding key to switch failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
+				       const u8 *challenge, u8 *response)
+{
+	struct icm_tr_pkg_challenge_device_response reply;
+	struct icm_tr_pkg_challenge_device request;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_CHALLENGE_DEVICE;
+	request.route_lo = sw->config.route_lo;
+	request.route_hi = sw->config.route_hi;
+	request.connection_id = sw->connection_id;
+	memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EKEYREJECTED;
+	if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
+		return -ENOKEY;
+
+	memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
+
+	return 0;
+}
+
+static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+	struct icm_tr_pkg_approve_xdomain_response reply;
+	struct icm_tr_pkg_approve_xdomain request;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	request.hdr.code = ICM_APPROVE_XDOMAIN;
+	request.route_hi = upper_32_bits(xd->route);
+	request.route_lo = lower_32_bits(xd->route);
+	request.transmit_path = xd->transmit_path;
+	request.transmit_ring = xd->transmit_ring;
+	request.receive_path = xd->receive_path;
+	request.receive_ring = xd->receive_ring;
+	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EIO;
+
+	return 0;
+}
+
+static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
+				    int stage)
+{
+	struct icm_tr_pkg_disconnect_xdomain_response reply;
+	struct icm_tr_pkg_disconnect_xdomain request;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	request.hdr.code = ICM_DISCONNECT_XDOMAIN;
+	request.stage = stage;
+	request.route_hi = upper_32_bits(xd->route);
+	request.route_lo = lower_32_bits(xd->route);
+	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EIO;
+
+	return 0;
+}
+
+static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+	int ret;
+
+	ret = icm_tr_xdomain_tear_down(tb, xd, 1);
+	if (ret)
+		return ret;
+
+	usleep_range(10, 50);
+	return icm_tr_xdomain_tear_down(tb, xd, 2);
+}
+
+static void
+icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_tr_event_device_connected *pkg =
+		(const struct icm_tr_event_device_connected *)hdr;
+	enum tb_security_level security_level;
+	struct tb_switch *sw, *parent_sw;
+	struct tb_xdomain *xd;
+	bool authorized, boot;
+	u64 route;
+
+	/*
+	 * Currently we don't use the QoS information coming with the
+	 * device connected message so simply just ignore that extra
+	 * packet for now.
+	 */
+	if (pkg->hdr.packet_id)
+		return;
+
+	/*
+	 * After NVM upgrade adding root switch device fails because we
+	 * initiated reset. During that time ICM might still send device
+	 * connected message which we ignore here.
+	 */
+	if (!tb->root_switch)
+		return;
+
+	route = get_route(pkg->route_hi, pkg->route_lo);
+	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
+	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
+			 ICM_FLAGS_SLEVEL_SHIFT;
+	boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+
+	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
+		tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
+			route);
+		return;
+	}
+
+	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
+	if (sw) {
+		/* Update the switch if it is still in the same place */
+		if (tb_route(sw) == route && !!sw->authorized == authorized) {
+			parent_sw = tb_to_switch(sw->dev.parent);
+			update_switch(parent_sw, sw, route, pkg->connection_id,
+				      0, 0, 0, boot);
+			tb_switch_put(sw);
+			return;
+		}
+
+		remove_switch(sw);
+		tb_switch_put(sw);
+	}
+
+	/* Another switch with the same address */
+	sw = tb_switch_find_by_route(tb, route);
+	if (sw) {
+		remove_switch(sw);
+		tb_switch_put(sw);
+	}
+
+	/* XDomain connection with the same address */
+	xd = tb_xdomain_find_by_route(tb, route);
+	if (xd) {
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+
+	parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
+	if (!parent_sw) {
+		tb_err(tb, "failed to find parent switch for %llx\n", route);
+		return;
+	}
+
+	add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id,
+		   0, 0, 0, security_level, authorized, boot);
+
+	tb_switch_put(parent_sw);
+}
+
+static void
+icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_tr_event_device_disconnected *pkg =
+		(const struct icm_tr_event_device_disconnected *)hdr;
+	struct tb_switch *sw;
+	u64 route;
+
+	route = get_route(pkg->route_hi, pkg->route_lo);
+
+	sw = tb_switch_find_by_route(tb, route);
+	if (!sw) {
+		tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
+		return;
+	}
+
+	remove_switch(sw);
+	tb_switch_put(sw);
+}
+
+static void
+icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_tr_event_xdomain_connected *pkg =
+		(const struct icm_tr_event_xdomain_connected *)hdr;
+	struct tb_xdomain *xd;
+	struct tb_switch *sw;
+	u64 route;
+
+	if (!tb->root_switch)
+		return;
+
+	route = get_route(pkg->local_route_hi, pkg->local_route_lo);
+
+	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
+	if (xd) {
+		if (xd->route == route) {
+			update_xdomain(xd, route, 0);
+			tb_xdomain_put(xd);
+			return;
+		}
+
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+
+	/* An existing xdomain with the same address */
+	xd = tb_xdomain_find_by_route(tb, route);
+	if (xd) {
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+
+	/*
+	 * If the user disconnected a switch during suspend and
+	 * connected another host to the same port, remove the switch
+	 * first.
+	 */
+	sw = get_switch_at_route(tb->root_switch, route);
+	if (sw)
+		remove_switch(sw);
+
+	sw = tb_switch_find_by_route(tb, get_parent_route(route));
+	if (!sw) {
+		tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
+		return;
+	}
+
+	add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
+	tb_switch_put(sw);
+}
+
+static void
+icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_tr_event_xdomain_disconnected *pkg =
+		(const struct icm_tr_event_xdomain_disconnected *)hdr;
+	struct tb_xdomain *xd;
+	u64 route;
+
+	route = get_route(pkg->route_hi, pkg->route_lo);
+
+	xd = tb_xdomain_find_by_route(tb, route);
+	if (xd) {
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+}
+
 static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
 {
 	struct pci_dev *parent;
@@ -728,14 +1164,14 @@ static bool icm_ar_is_supported(struct tb *tb)
 static int icm_ar_get_mode(struct tb *tb)
 {
 	struct tb_nhi *nhi = tb->nhi;
-	int retries = 5;
+	int retries = 60;
 	u32 val;
 
 	do {
 		val = ioread32(nhi->iobase + REG_FW_STS);
 		if (val & REG_FW_STS_NVM_AUTH_DONE)
 			break;
-		msleep(30);
+		msleep(50);
 	} while (--retries);
 
 	if (!retries) {
@@ -746,6 +1182,30 @@ static int icm_ar_get_mode(struct tb *tb)
 	return nhi_mailbox_mode(nhi);
 }
 
+static int
+icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+		    size_t *nboot_acl)
+{
+	struct icm_ar_pkg_driver_ready_response reply;
+	struct icm_pkg_driver_ready request = {
+		.hdr.code = ICM_DRIVER_READY,
+	};
+	int ret;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (security_level)
+		*security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
+	if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
+		*nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
+				ICM_AR_INFO_BOOT_ACL_SHIFT;
+	return 0;
+}
+
 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
 {
 	struct icm_ar_pkg_get_route_response reply;
@@ -768,6 +1228,87 @@ static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
 	return 0;
 }
 
+static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
+{
+	struct icm_ar_pkg_preboot_acl_response reply;
+	struct icm_ar_pkg_preboot_acl request = {
+		.hdr = { .code = ICM_PREBOOT_ACL },
+	};
+	int ret, i;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EIO;
+
+	for (i = 0; i < nuuids; i++) {
+		u32 *uuid = (u32 *)&uuids[i];
+
+		uuid[0] = reply.acl[i].uuid_lo;
+		uuid[1] = reply.acl[i].uuid_hi;
+
+		if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
+			/* Map empty entries to null UUID */
+			uuid[0] = 0;
+			uuid[1] = 0;
+		} else {
+			/* Upper two DWs are always one's */
+			uuid[2] = 0xffffffff;
+			uuid[3] = 0xffffffff;
+		}
+	}
+
+	return ret;
+}
+
+static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
+			       size_t nuuids)
+{
+	struct icm_ar_pkg_preboot_acl_response reply;
+	struct icm_ar_pkg_preboot_acl request = {
+		.hdr = {
+			.code = ICM_PREBOOT_ACL,
+			.flags = ICM_FLAGS_WRITE,
+		},
+	};
+	int ret, i;
+
+	for (i = 0; i < nuuids; i++) {
+		const u32 *uuid = (const u32 *)&uuids[i];
+
+		if (uuid_is_null(&uuids[i])) {
+			/*
+			 * Map null UUID to the empty (all one) entries
+			 * for ICM.
+			 */
+			request.acl[i].uuid_lo = 0xffffffff;
+			request.acl[i].uuid_hi = 0xffffffff;
+		} else {
+			/* Two high DWs need to be set to all one */
+			if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
+				return -EINVAL;
+
+			request.acl[i].uuid_lo = uuid[0];
+			request.acl[i].uuid_hi = uuid[1];
+		}
+	}
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EIO;
+
+	return 0;
+}
+
 static void icm_handle_notification(struct work_struct *work)
 {
 	struct icm_notification *n = container_of(work, typeof(*n), work);
@@ -814,23 +1355,18 @@ static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
 }
 
 static int
-__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level)
+__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+		   size_t *nboot_acl)
 {
-	struct icm_pkg_driver_ready_response reply;
-	struct icm_pkg_driver_ready request = {
-		.hdr.code = ICM_DRIVER_READY,
-	};
-	unsigned int retries = 10;
+	struct icm *icm = tb_priv(tb);
+	unsigned int retries = 50;
 	int ret;
 
-	memset(&reply, 0, sizeof(reply));
-	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-			  1, ICM_TIMEOUT);
-	if (ret)
+	ret = icm->driver_ready(tb, security_level, nboot_acl);
+	if (ret) {
+		tb_err(tb, "failed to send driver ready to ICM\n");
 		return ret;
-
-	if (security_level)
-		*security_level = reply.security_level & 0xf;
+	}
 
 	/*
 	 * Hold on here until the switch config space is accessible so
@@ -848,6 +1384,7 @@ __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level)
 		msleep(50);
 	} while (--retries);
 
+	tb_err(tb, "failed to read root switch config space, giving up\n");
 	return -ETIMEDOUT;
 }
 
@@ -915,6 +1452,9 @@ static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
 	struct icm *icm = tb_priv(tb);
 	u32 val;
 
+	if (!icm->upstream_port)
+		return -ENODEV;
+
 	/* Put ARC to wait for CIO reset event to happen */
 	val = ioread32(nhi->iobase + REG_FW_STS);
 	val |= REG_FW_STS_CIO_RESET_REQ;
@@ -1054,6 +1594,9 @@ static int icm_firmware_init(struct tb *tb)
 			break;
 
 		default:
+			if (ret < 0)
+				return ret;
+
 			tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
 			return -ENODEV;
 		}
@@ -1089,7 +1632,18 @@ static int icm_driver_ready(struct tb *tb)
 		return 0;
 	}
 
-	return __icm_driver_ready(tb, &tb->security_level);
+	ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl);
+	if (ret)
+		return ret;
+
+	/*
+	 * Make sure the number of supported preboot ACL matches what we
+	 * expect or disable the whole feature.
+	 */
+	if (tb->nboot_acl > icm->max_boot_acl)
+		tb->nboot_acl = 0;
+
+	return 0;
 }
 
 static int icm_suspend(struct tb *tb)
@@ -1185,7 +1739,7 @@ static void icm_complete(struct tb *tb)
 	 * Now all existing children should be resumed, start events
 	 * from ICM to get updated status.
 	 */
-	__icm_driver_ready(tb, NULL);
+	__icm_driver_ready(tb, NULL, NULL);
 
 	/*
 	 * We do not get notifications of devices that have been
@@ -1238,7 +1792,7 @@ static int icm_disconnect_pcie_paths(struct tb *tb)
 	return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
 }
 
-/* Falcon Ridge and Alpine Ridge */
+/* Falcon Ridge */
 static const struct tb_cm_ops icm_fr_ops = {
 	.driver_ready = icm_driver_ready,
 	.start = icm_start,
@@ -1254,6 +1808,42 @@ static const struct tb_cm_ops icm_fr_ops = {
 	.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
 };
 
+/* Alpine Ridge */
+static const struct tb_cm_ops icm_ar_ops = {
+	.driver_ready = icm_driver_ready,
+	.start = icm_start,
+	.stop = icm_stop,
+	.suspend = icm_suspend,
+	.complete = icm_complete,
+	.handle_event = icm_handle_event,
+	.get_boot_acl = icm_ar_get_boot_acl,
+	.set_boot_acl = icm_ar_set_boot_acl,
+	.approve_switch = icm_fr_approve_switch,
+	.add_switch_key = icm_fr_add_switch_key,
+	.challenge_switch_key = icm_fr_challenge_switch_key,
+	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
+	.approve_xdomain_paths = icm_fr_approve_xdomain_paths,
+	.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
+};
+
+/* Titan Ridge */
+static const struct tb_cm_ops icm_tr_ops = {
+	.driver_ready = icm_driver_ready,
+	.start = icm_start,
+	.stop = icm_stop,
+	.suspend = icm_suspend,
+	.complete = icm_complete,
+	.handle_event = icm_handle_event,
+	.get_boot_acl = icm_ar_get_boot_acl,
+	.set_boot_acl = icm_ar_set_boot_acl,
+	.approve_switch = icm_tr_approve_switch,
+	.add_switch_key = icm_tr_add_switch_key,
+	.challenge_switch_key = icm_tr_challenge_switch_key,
+	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
+	.approve_xdomain_paths = icm_tr_approve_xdomain_paths,
+	.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
+};
+
 struct tb *icm_probe(struct tb_nhi *nhi)
 {
 	struct icm *icm;
@@ -1272,6 +1862,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
 		icm->is_supported = icm_fr_is_supported;
 		icm->get_route = icm_fr_get_route;
+		icm->driver_ready = icm_fr_driver_ready;
 		icm->device_connected = icm_fr_device_connected;
 		icm->device_disconnected = icm_fr_device_disconnected;
 		icm->xdomain_connected = icm_fr_xdomain_connected;
@@ -1284,14 +1875,29 @@ struct tb *icm_probe(struct tb_nhi *nhi)
 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
+		icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
 		icm->is_supported = icm_ar_is_supported;
 		icm->get_mode = icm_ar_get_mode;
 		icm->get_route = icm_ar_get_route;
+		icm->driver_ready = icm_ar_driver_ready;
 		icm->device_connected = icm_fr_device_connected;
 		icm->device_disconnected = icm_fr_device_disconnected;
 		icm->xdomain_connected = icm_fr_xdomain_connected;
 		icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
-		tb->cm_ops = &icm_fr_ops;
+		tb->cm_ops = &icm_ar_ops;
+		break;
+
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
+		icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
+		icm->is_supported = icm_ar_is_supported;
+		icm->get_mode = icm_ar_get_mode;
+		icm->driver_ready = icm_tr_driver_ready;
+		icm->device_connected = icm_tr_device_connected;
+		icm->device_disconnected = icm_tr_device_disconnected;
+		icm->xdomain_connected = icm_tr_xdomain_connected;
+		icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+		tb->cm_ops = &icm_tr_ops;
 		break;
 	}
 

+ 4 - 1
drivers/thunderbolt/nhi.c

@@ -1036,7 +1036,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		 */
 		tb_domain_put(tb);
 		nhi_shutdown(nhi);
-		return -EIO;
+		return res;
 	}
 	pci_set_drvdata(pdev, tb);
 
@@ -1064,6 +1064,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
 					    * we just disable hotplug, the
 					    * pci-tunnels stay alive.
 					    */
+	.thaw_noirq = nhi_resume_noirq,
 	.restore_noirq = nhi_resume_noirq,
 	.suspend = nhi_suspend,
 	.freeze = nhi_suspend,
@@ -1110,6 +1111,8 @@ static struct pci_device_id nhi_ids[] = {
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
 
 	{ 0,}
 };

+ 5 - 0
drivers/thunderbolt/nhi.h

@@ -45,5 +45,10 @@ enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI	0x15dc
 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI	0x15dd
 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI	0x15de
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE	0x15e7
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI		0x15e8
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE	0x15ea
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI		0x15eb
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE	0x15ef
 
 #endif

+ 60 - 1
drivers/thunderbolt/switch.c

@@ -716,6 +716,13 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
 	if (sw->authorized)
 		goto unlock;
 
+	/*
+	 * Make sure there is no PCIe rescan ongoing when a new PCIe
+	 * tunnel is created. Otherwise the PCIe rescan code might find
+	 * the new tunnel too early.
+	 */
+	pci_lock_rescan_remove();
+
 	switch (val) {
 	/* Approve switch */
 	case 1:
@@ -735,6 +742,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
 		break;
 	}
 
+	pci_unlock_rescan_remove();
+
 	if (!ret) {
 		sw->authorized = val;
 		/* Notify status change to the userspace */
@@ -766,6 +775,15 @@ static ssize_t authorized_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(authorized);
 
+static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%u\n", sw->boot);
+}
+static DEVICE_ATTR_RO(boot);
+
 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
 			   char *buf)
 {
@@ -942,6 +960,7 @@ static DEVICE_ATTR_RO(unique_id);
 
 static struct attribute *switch_attrs[] = {
 	&dev_attr_authorized.attr,
+	&dev_attr_boot.attr,
 	&dev_attr_device.attr,
 	&dev_attr_device_name.attr,
 	&dev_attr_key.attr,
@@ -970,6 +989,10 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
 		if (sw->dma_port)
 			return attr->mode;
 		return 0;
+	} else if (attr == &dev_attr_boot.attr) {
+		if (tb_route(sw))
+			return attr->mode;
+		return 0;
 	}
 
 	return sw->safe_mode ? 0 : attr->mode;
@@ -1028,6 +1051,9 @@ static int tb_switch_get_generation(struct tb_switch *sw)
 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
 		return 3;
 
 	default:
@@ -1470,6 +1496,7 @@ struct tb_sw_lookup {
 	u8 link;
 	u8 depth;
 	const uuid_t *uuid;
+	u64 route;
 };
 
 static int tb_switch_match(struct device *dev, void *data)
@@ -1485,6 +1512,11 @@ static int tb_switch_match(struct device *dev, void *data)
 	if (lookup->uuid)
 		return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
 
+	if (lookup->route) {
+		return sw->config.route_lo == lower_32_bits(lookup->route) &&
+		       sw->config.route_hi == upper_32_bits(lookup->route);
+	}
+
 	/* Root switch is matched only by depth */
 	if (!lookup->depth)
 		return !sw->depth;
@@ -1519,7 +1551,7 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
 }
 
 /**
- * tb_switch_find_by_link_depth() - Find switch by UUID
+ * tb_switch_find_by_uuid() - Find switch by UUID
  * @tb: Domain the switch belongs
  * @uuid: UUID to look for
  *
@@ -1542,6 +1574,33 @@ struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
 	return NULL;
 }
 
+/**
+ * tb_switch_find_by_route() - Find switch by route string
+ * @tb: Domain the switch belongs
+ * @route: Route string to look for
+ *
+ * Returned switch has reference count increased so the caller needs to
+ * call tb_switch_put() when done with the switch.
+ */
+struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
+{
+	struct tb_sw_lookup lookup;
+	struct device *dev;
+
+	if (!route)
+		return tb_switch_get(tb->root_switch);
+
+	memset(&lookup, 0, sizeof(lookup));
+	lookup.tb = tb;
+	lookup.route = route;
+
+	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
+	if (dev)
+		return tb_to_switch(dev);
+
+	return NULL;
+}
+
 void tb_switch_exit(void)
 {
 	ida_destroy(&nvm_ida);

+ 14 - 0
drivers/thunderbolt/tb.h

@@ -66,6 +66,7 @@ struct tb_switch_nvm {
  * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
  * @no_nvm_upgrade: Prevent NVM upgrade of this switch
  * @safe_mode: The switch is in safe-mode
+ * @boot: Whether the switch was already authorized on boot or not
  * @authorized: Whether the switch is authorized by user or policy
  * @work: Work used to automatically authorize a switch
  * @security_level: Switch supported security level
@@ -99,6 +100,7 @@ struct tb_switch {
 	struct tb_switch_nvm *nvm;
 	bool no_nvm_upgrade;
 	bool safe_mode;
+	bool boot;
 	unsigned int authorized;
 	struct work_struct work;
 	enum tb_security_level security_level;
@@ -198,6 +200,8 @@ struct tb_path {
  * @suspend: Connection manager specific suspend
  * @complete: Connection manager specific complete
  * @handle_event: Handle thunderbolt event
+ * @get_boot_acl: Get boot ACL list
+ * @set_boot_acl: Set boot ACL list
  * @approve_switch: Approve switch
  * @add_switch_key: Add key to switch
  * @challenge_switch_key: Challenge switch using key
@@ -215,6 +219,8 @@ struct tb_cm_ops {
 	void (*complete)(struct tb *tb);
 	void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
 			     const void *buf, size_t size);
+	int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
+	int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids);
 	int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
 	int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
 	int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
@@ -386,6 +392,14 @@ struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
 					       u8 depth);
 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
+struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
+
+static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
+{
+	if (sw)
+		get_device(&sw->dev);
+	return sw;
+}
 
 static inline void tb_switch_put(struct tb_switch *sw)
 {

+ 179 - 1
drivers/thunderbolt/tb_msgs.h

@@ -102,6 +102,8 @@ enum icm_pkg_code {
 	ICM_ADD_DEVICE_KEY = 0x6,
 	ICM_GET_ROUTE = 0xa,
 	ICM_APPROVE_XDOMAIN = 0x10,
+	ICM_DISCONNECT_XDOMAIN = 0x11,
+	ICM_PREBOOT_ACL = 0x18,
 };
 
 enum icm_event_code {
@@ -122,18 +124,23 @@ struct icm_pkg_header {
 #define ICM_FLAGS_NO_KEY		BIT(1)
 #define ICM_FLAGS_SLEVEL_SHIFT		3
 #define ICM_FLAGS_SLEVEL_MASK		GENMASK(4, 3)
+#define ICM_FLAGS_WRITE			BIT(7)
 
 struct icm_pkg_driver_ready {
 	struct icm_pkg_header hdr;
 };
 
-struct icm_pkg_driver_ready_response {
+/* Falcon Ridge only messages */
+
+struct icm_fr_pkg_driver_ready_response {
 	struct icm_pkg_header hdr;
 	u8 romver;
 	u8 ramver;
 	u16 security_level;
 };
 
+#define ICM_FR_SLEVEL_MASK		0xf
+
 /* Falcon Ridge & Alpine Ridge common messages */
 
 struct icm_fr_pkg_get_topology {
@@ -176,6 +183,8 @@ struct icm_fr_event_device_connected {
 #define ICM_LINK_INFO_DEPTH_SHIFT	4
 #define ICM_LINK_INFO_DEPTH_MASK	GENMASK(7, 4)
 #define ICM_LINK_INFO_APPROVED		BIT(8)
+#define ICM_LINK_INFO_REJECTED		BIT(9)
+#define ICM_LINK_INFO_BOOT		BIT(10)
 
 struct icm_fr_pkg_approve_device {
 	struct icm_pkg_header hdr;
@@ -270,6 +279,18 @@ struct icm_fr_pkg_approve_xdomain_response {
 
 /* Alpine Ridge only messages */
 
+struct icm_ar_pkg_driver_ready_response {
+	struct icm_pkg_header hdr;
+	u8 romver;
+	u8 ramver;
+	u16 info;
+};
+
+#define ICM_AR_INFO_SLEVEL_MASK		GENMASK(3, 0)
+#define ICM_AR_INFO_BOOT_ACL_SHIFT	7
+#define ICM_AR_INFO_BOOT_ACL_MASK	GENMASK(11, 7)
+#define ICM_AR_INFO_BOOT_ACL_SUPPORTED	BIT(13)
+
 struct icm_ar_pkg_get_route {
 	struct icm_pkg_header hdr;
 	u16 reserved;
@@ -284,6 +305,163 @@ struct icm_ar_pkg_get_route_response {
 	u32 route_lo;
 };
 
+struct icm_ar_boot_acl_entry {
+	u32 uuid_lo;
+	u32 uuid_hi;
+};
+
+#define ICM_AR_PREBOOT_ACL_ENTRIES	16
+
+struct icm_ar_pkg_preboot_acl {
+	struct icm_pkg_header hdr;
+	struct icm_ar_boot_acl_entry acl[ICM_AR_PREBOOT_ACL_ENTRIES];
+};
+
+struct icm_ar_pkg_preboot_acl_response {
+	struct icm_pkg_header hdr;
+	struct icm_ar_boot_acl_entry acl[ICM_AR_PREBOOT_ACL_ENTRIES];
+};
+
+/* Titan Ridge messages */
+
+struct icm_tr_pkg_driver_ready_response {
+	struct icm_pkg_header hdr;
+	u16 reserved1;
+	u16 info;
+	u32 nvm_version;
+	u16 device_id;
+	u16 reserved2;
+};
+
+#define ICM_TR_INFO_SLEVEL_MASK		GENMASK(2, 0)
+#define ICM_TR_INFO_BOOT_ACL_SHIFT	7
+#define ICM_TR_INFO_BOOT_ACL_MASK	GENMASK(12, 7)
+
+struct icm_tr_event_device_connected {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved;
+	u16 link_info;
+	u32 ep_name[55];
+};
+
+struct icm_tr_event_device_disconnected {
+	struct icm_pkg_header hdr;
+	u32 route_hi;
+	u32 route_lo;
+};
+
+struct icm_tr_event_xdomain_connected {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+	uuid_t remote_uuid;
+	uuid_t local_uuid;
+	u32 local_route_hi;
+	u32 local_route_lo;
+	u32 remote_route_hi;
+	u32 remote_route_lo;
+};
+
+struct icm_tr_event_xdomain_disconnected {
+	struct icm_pkg_header hdr;
+	u32 route_hi;
+	u32 route_lo;
+	uuid_t remote_uuid;
+};
+
+struct icm_tr_pkg_approve_device {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved1[3];
+};
+
+struct icm_tr_pkg_add_device_key {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved[3];
+	u32 key[8];
+};
+
+struct icm_tr_pkg_challenge_device {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved[3];
+	u32 challenge[8];
+};
+
+struct icm_tr_pkg_approve_xdomain {
+	struct icm_pkg_header hdr;
+	u32 route_hi;
+	u32 route_lo;
+	uuid_t remote_uuid;
+	u16 transmit_path;
+	u16 transmit_ring;
+	u16 receive_path;
+	u16 receive_ring;
+};
+
+struct icm_tr_pkg_disconnect_xdomain {
+	struct icm_pkg_header hdr;
+	u8 stage;
+	u8 reserved[3];
+	u32 route_hi;
+	u32 route_lo;
+	uuid_t remote_uuid;
+};
+
+struct icm_tr_pkg_challenge_device_response {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved[3];
+	u32 challenge[8];
+	u32 response[8];
+};
+
+struct icm_tr_pkg_add_device_key_response {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved[3];
+};
+
+struct icm_tr_pkg_approve_xdomain_response {
+	struct icm_pkg_header hdr;
+	u32 route_hi;
+	u32 route_lo;
+	uuid_t remote_uuid;
+	u16 transmit_path;
+	u16 transmit_ring;
+	u16 receive_path;
+	u16 receive_ring;
+};
+
+struct icm_tr_pkg_disconnect_xdomain_response {
+	struct icm_pkg_header hdr;
+	u8 stage;
+	u8 reserved[3];
+	u32 route_hi;
+	u32 route_lo;
+	uuid_t remote_uuid;
+};
+
 /* XDomain messages */
 
 struct tb_xdomain_header {

+ 35 - 12
drivers/thunderbolt/xdomain.c

@@ -1255,6 +1255,7 @@ struct tb_xdomain_lookup {
 	const uuid_t *uuid;
 	u8 link;
 	u8 depth;
+	u64 route;
 };
 
 static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
@@ -1275,9 +1276,13 @@ static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
 			if (lookup->uuid) {
 				if (uuid_equal(xd->remote_uuid, lookup->uuid))
 					return xd;
-			} else if (lookup->link == xd->link &&
+			} else if (lookup->link &&
+				   lookup->link == xd->link &&
 				   lookup->depth == xd->depth) {
 				return xd;
+			} else if (lookup->route &&
+				   lookup->route == xd->route) {
+				return xd;
 			}
 		} else if (port->remote) {
 			xd = switch_find_xdomain(port->remote->sw, lookup);
@@ -1313,12 +1318,7 @@ struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
 	lookup.uuid = uuid;
 
 	xd = switch_find_xdomain(tb->root_switch, &lookup);
-	if (xd) {
-		get_device(&xd->dev);
-		return xd;
-	}
-
-	return NULL;
+	return tb_xdomain_get(xd);
 }
 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
 
@@ -1349,13 +1349,36 @@ struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
 	lookup.depth = depth;
 
 	xd = switch_find_xdomain(tb->root_switch, &lookup);
-	if (xd) {
-		get_device(&xd->dev);
-		return xd;
-	}
+	return tb_xdomain_get(xd);
+}
 
-	return NULL;
+/**
+ * tb_xdomain_find_by_route() - Find an XDomain by route string
+ * @tb: Domain where the XDomain belongs to
+ * @route: XDomain route string
+ *
+ * Finds XDomain by walking through the Thunderbolt topology below @tb.
+ * The returned XDomain will have its reference count increased so the
+ * caller needs to call tb_xdomain_put() when it is done with the
+ * object.
+ *
+ * This will find all XDomains including the ones that are not yet added
+ * to the bus (handshake is still in progress).
+ *
+ * The caller needs to hold @tb->lock.
+ */
+struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
+{
+	struct tb_xdomain_lookup lookup;
+	struct tb_xdomain *xd;
+
+	memset(&lookup, 0, sizeof(lookup));
+	lookup.route = route;
+
+	xd = switch_find_xdomain(tb->root_switch, &lookup);
+	return tb_xdomain_get(xd);
 }
+EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
 
 bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
 			       const void *buf, size_t size)

+ 19 - 0
include/linux/thunderbolt.h

@@ -45,12 +45,16 @@ enum tb_cfg_pkg_type {
  * @TB_SECURITY_USER: User approval required at minimum
  * @TB_SECURITY_SECURE: One time saved key required at minimum
  * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
+ * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
+ *			 Thunderbolt dock (and Display Port). All PCIe
+ *			 links downstream of the dock are removed.
  */
 enum tb_security_level {
 	TB_SECURITY_NONE,
 	TB_SECURITY_USER,
 	TB_SECURITY_SECURE,
 	TB_SECURITY_DPONLY,
+	TB_SECURITY_USBONLY,
 };
 
 /**
@@ -65,6 +69,7 @@ enum tb_security_level {
  * @cm_ops: Connection manager specific operations vector
  * @index: Linux assigned domain number
  * @security_level: Current security level
+ * @nboot_acl: Number of boot ACLs the domain supports
  * @privdata: Private connection manager specific data
  */
 struct tb {
@@ -77,6 +82,7 @@ struct tb {
 	const struct tb_cm_ops *cm_ops;
 	int index;
 	enum tb_security_level security_level;
+	size_t nboot_acl;
 	unsigned long privdata[0];
 };
 
@@ -237,6 +243,7 @@ int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
 			    u16 receive_ring);
 int tb_xdomain_disable_paths(struct tb_xdomain *xd);
 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
+struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
 
 static inline struct tb_xdomain *
 tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
@@ -250,6 +257,18 @@ tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
 	return xd;
 }
 
+static inline struct tb_xdomain *
+tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
+{
+	struct tb_xdomain *xd;
+
+	mutex_lock(&tb->lock);
+	xd = tb_xdomain_find_by_route(tb, route);
+	mutex_unlock(&tb->lock);
+
+	return xd;
+}
+
 static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
 {
 	if (xd)