|
@@ -1510,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
|
|
*/
|
|
*/
|
|
#define PART_DESC_ALLOC_STEP 32
|
|
#define PART_DESC_ALLOC_STEP 32
|
|
|
|
|
|
|
|
+struct part_desc_seq_scan_data {
|
|
|
|
+ struct udf_vds_record rec;
|
|
|
|
+ u32 partnum;
|
|
|
|
+};
|
|
|
|
+
|
|
struct desc_seq_scan_data {
|
|
struct desc_seq_scan_data {
|
|
struct udf_vds_record vds[VDS_POS_LENGTH];
|
|
struct udf_vds_record vds[VDS_POS_LENGTH];
|
|
unsigned int size_part_descs;
|
|
unsigned int size_part_descs;
|
|
- struct udf_vds_record *part_descs_loc;
|
|
|
|
|
|
+ unsigned int num_part_descs;
|
|
|
|
+ struct part_desc_seq_scan_data *part_descs_loc;
|
|
};
|
|
};
|
|
|
|
|
|
static struct udf_vds_record *handle_partition_descriptor(
|
|
static struct udf_vds_record *handle_partition_descriptor(
|
|
@@ -1522,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor(
|
|
{
|
|
{
|
|
struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
|
|
struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
|
|
int partnum;
|
|
int partnum;
|
|
|
|
+ int i;
|
|
|
|
|
|
partnum = le16_to_cpu(desc->partitionNumber);
|
|
partnum = le16_to_cpu(desc->partitionNumber);
|
|
- if (partnum >= data->size_part_descs) {
|
|
|
|
- struct udf_vds_record *new_loc;
|
|
|
|
|
|
+ for (i = 0; i < data->num_part_descs; i++)
|
|
|
|
+ if (partnum == data->part_descs_loc[i].partnum)
|
|
|
|
+ return &(data->part_descs_loc[i].rec);
|
|
|
|
+ if (data->num_part_descs >= data->size_part_descs) {
|
|
|
|
+ struct part_desc_seq_scan_data *new_loc;
|
|
unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
|
|
unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
|
|
|
|
|
|
new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
|
|
new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
|
|
@@ -1537,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor(
|
|
data->part_descs_loc = new_loc;
|
|
data->part_descs_loc = new_loc;
|
|
data->size_part_descs = new_size;
|
|
data->size_part_descs = new_size;
|
|
}
|
|
}
|
|
- return &(data->part_descs_loc[partnum]);
|
|
|
|
|
|
+ return &(data->part_descs_loc[data->num_part_descs++].rec);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -1587,6 +1597,7 @@ static noinline int udf_process_sequence(
|
|
|
|
|
|
memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
|
|
memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
|
|
data.size_part_descs = PART_DESC_ALLOC_STEP;
|
|
data.size_part_descs = PART_DESC_ALLOC_STEP;
|
|
|
|
+ data.num_part_descs = 0;
|
|
data.part_descs_loc = kcalloc(data.size_part_descs,
|
|
data.part_descs_loc = kcalloc(data.size_part_descs,
|
|
sizeof(*data.part_descs_loc),
|
|
sizeof(*data.part_descs_loc),
|
|
GFP_KERNEL);
|
|
GFP_KERNEL);
|
|
@@ -1598,7 +1609,6 @@ static noinline int udf_process_sequence(
|
|
* are in it.
|
|
* are in it.
|
|
*/
|
|
*/
|
|
for (; (!done && block <= lastblock); block++) {
|
|
for (; (!done && block <= lastblock); block++) {
|
|
-
|
|
|
|
bh = udf_read_tagged(sb, block, block, &ident);
|
|
bh = udf_read_tagged(sb, block, block, &ident);
|
|
if (!bh)
|
|
if (!bh)
|
|
break;
|
|
break;
|
|
@@ -1670,13 +1680,10 @@ static noinline int udf_process_sequence(
|
|
}
|
|
}
|
|
|
|
|
|
/* Now handle prevailing Partition Descriptors */
|
|
/* Now handle prevailing Partition Descriptors */
|
|
- for (i = 0; i < data.size_part_descs; i++) {
|
|
|
|
- if (data.part_descs_loc[i].block) {
|
|
|
|
- ret = udf_load_partdesc(sb,
|
|
|
|
- data.part_descs_loc[i].block);
|
|
|
|
- if (ret < 0)
|
|
|
|
- return ret;
|
|
|
|
- }
|
|
|
|
|
|
+ for (i = 0; i < data.num_part_descs; i++) {
|
|
|
|
+ ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
return 0;
|
|
return 0;
|