|
@@ -268,6 +268,7 @@ static inline void *mchunk_memmove(struct z3fold_header *zhdr,
|
|
|
zhdr->middle_chunks << CHUNK_SHIFT);
|
|
|
}
|
|
|
|
|
|
+#define BIG_CHUNK_GAP 3
|
|
|
/* Has to be called with lock held */
|
|
|
static int z3fold_compact_page(struct z3fold_header *zhdr)
|
|
|
{
|
|
@@ -286,8 +287,31 @@ static int z3fold_compact_page(struct z3fold_header *zhdr)
|
|
|
zhdr->middle_chunks = 0;
|
|
|
zhdr->start_middle = 0;
|
|
|
zhdr->first_num++;
|
|
|
+ return 1;
|
|
|
}
|
|
|
- return 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * moving data is expensive, so let's only do that if
|
|
|
+ * there's substantial gain (at least BIG_CHUNK_GAP chunks)
|
|
|
+ */
|
|
|
+ if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
|
|
|
+ zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
|
|
|
+ BIG_CHUNK_GAP) {
|
|
|
+ mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
|
|
|
+ zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
|
|
|
+ return 1;
|
|
|
+ } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
|
|
|
+ TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
|
|
|
+ + zhdr->middle_chunks) >=
|
|
|
+ BIG_CHUNK_GAP) {
|
|
|
+ unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
|
|
|
+ zhdr->middle_chunks;
|
|
|
+ mchunk_memmove(zhdr, new_start);
|
|
|
+ zhdr->start_middle = new_start;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|