|
@@ -2206,12 +2206,16 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
|
|
* list of requested migratetype, possibly along with other pages from the same
|
|
|
* block, depending on fragmentation avoidance heuristics. Returns true if
|
|
|
* fallback was found so that __rmqueue_smallest() can grab it.
|
|
|
+ *
|
|
|
+ * The use of signed ints for order and current_order is a deliberate
|
|
|
+ * deviation from the rest of this file, to make the for loop
|
|
|
+ * condition simpler.
|
|
|
*/
|
|
|
static inline bool
|
|
|
-__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
|
|
|
+__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
|
|
|
{
|
|
|
struct free_area *area;
|
|
|
- unsigned int current_order;
|
|
|
+ int current_order;
|
|
|
struct page *page;
|
|
|
int fallback_mt;
|
|
|
bool can_steal;
|
|
@@ -2221,8 +2225,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
|
|
|
* approximates finding the pageblock with the most free pages, which
|
|
|
* would be too costly to do exactly.
|
|
|
*/
|
|
|
- for (current_order = MAX_ORDER-1;
|
|
|
- current_order >= order && current_order <= MAX_ORDER-1;
|
|
|
+ for (current_order = MAX_ORDER - 1; current_order >= order;
|
|
|
--current_order) {
|
|
|
area = &(zone->free_area[current_order]);
|
|
|
fallback_mt = find_suitable_fallback(area, current_order,
|