|
@@ -5716,6 +5716,7 @@ static int sd_degenerate(struct sched_domain *sd)
|
|
|
SD_BALANCE_FORK |
|
|
|
SD_BALANCE_EXEC |
|
|
|
SD_SHARE_CPUCAPACITY |
|
|
|
+ SD_ASYM_CPUCAPACITY |
|
|
|
SD_SHARE_PKG_RESOURCES |
|
|
|
SD_SHARE_POWERDOMAIN)) {
|
|
|
if (sd->groups != sd->groups->next)
|
|
@@ -5746,6 +5747,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
|
|
|
SD_BALANCE_NEWIDLE |
|
|
|
SD_BALANCE_FORK |
|
|
|
SD_BALANCE_EXEC |
|
|
|
+ SD_ASYM_CPUCAPACITY |
|
|
|
SD_SHARE_CPUCAPACITY |
|
|
|
SD_SHARE_PKG_RESOURCES |
|
|
|
SD_PREFER_SIBLING |
|
|
@@ -6363,6 +6365,7 @@ static int sched_domains_curr_level;
|
|
|
* SD_SHARE_PKG_RESOURCES - describes shared caches
|
|
|
* SD_NUMA - describes NUMA topologies
|
|
|
* SD_SHARE_POWERDOMAIN - describes shared power domain
|
|
|
+ * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies
|
|
|
*
|
|
|
* Odd one out, which beside describing the topology has a quirk also
|
|
|
* prescribes the desired behaviour that goes along with it:
|
|
@@ -6374,6 +6377,7 @@ static int sched_domains_curr_level;
|
|
|
SD_SHARE_PKG_RESOURCES | \
|
|
|
SD_NUMA | \
|
|
|
SD_ASYM_PACKING | \
|
|
|
+ SD_ASYM_CPUCAPACITY | \
|
|
|
SD_SHARE_POWERDOMAIN)
|
|
|
|
|
|
static struct sched_domain *
|