VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 107672

Last change on this file since 107672 was 107672, checked in by vboxsync, 4 months ago

Storage/VMDK.cpp: Fix unused variable parfait warnings + small cleanup, bugref:3409

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 378.4 KB
Line 
1/* $Id: VMDK.cpp 107672 2025-01-10 15:41:35Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_VD_VMDK
33#include <VBox/log.h> /* before VBox/vd-ifs.h */
34#include <VBox/vd-plugin.h>
35#include <VBox/err.h>
36
37#include <iprt/assert.h>
38#include <iprt/alloc.h>
39#include <iprt/base64.h>
40#include <iprt/ctype.h>
41#include <iprt/crc.h>
42#include <iprt/dvm.h>
43#include <iprt/uuid.h>
44#include <iprt/path.h>
45#include <iprt/rand.h>
46#include <iprt/sg.h>
47#include <iprt/sort.h>
48#include <iprt/string.h>
49#include <iprt/zip.h>
50#include <iprt/asm.h>
51#include <iprt/zero.h>
52#ifdef RT_OS_WINDOWS
53# include <iprt/utf16.h>
54# include <iprt/uni.h>
55# include <iprt/uni.h>
56# include <iprt/nt/nt-and-windows.h>
57# include <winioctl.h>
58#endif
59#ifdef RT_OS_LINUX
60# include <errno.h>
61# include <sys/stat.h>
62# include <iprt/dir.h>
63# include <iprt/symlink.h>
64# include <iprt/linux/sysfs.h>
65#endif
66#ifdef RT_OS_FREEBSD
67#include <libgeom.h>
68#include <sys/stat.h>
69#include <stdlib.h>
70#endif
71#ifdef RT_OS_SOLARIS
72#include <sys/dkio.h>
73#include <sys/vtoc.h>
74#include <sys/efi_partition.h>
75#include <unistd.h>
76#include <errno.h>
77#endif
78#ifdef RT_OS_DARWIN
79# include <sys/stat.h>
80# include <sys/disk.h>
81# include <errno.h>
82/* The following structure and IOCTLs are defined in znu bsd/sys/disk.h but
83 inside KERNEL ifdefs and thus stripped from the SDK edition of the header.
84 While we could try include the header from the Kernel.framework, it's a lot
85 easier to just add the structure and 4 defines here. */
86typedef struct
87{
88 uint64_t offset;
89 uint64_t length;
90 uint8_t reserved0128[12];
91 dev_t dev;
92} dk_physical_extent_t;
93# define DKIOCGETBASE _IOR( 'd', 73, uint64_t)
94# define DKIOCLOCKPHYSICALEXTENTS _IO( 'd', 81)
95# define DKIOCGETPHYSICALEXTENT _IOWR('d', 82, dk_physical_extent_t)
96# define DKIOCUNLOCKPHYSICALEXTENTS _IO( 'd', 83)
97#endif /* RT_OS_DARWIN */
98
99#include "VDBackends.h"
100
101
102/*********************************************************************************************************************************
103* Constants And Macros, Structures and Typedefs *
104*********************************************************************************************************************************/
105
106/** Maximum encoded string size (including NUL) we allow for VMDK images.
107 * Deliberately not set high to avoid running out of descriptor space. */
108#define VMDK_ENCODED_COMMENT_MAX 1024
109
110/** VMDK descriptor DDB entry for PCHS cylinders. */
111#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
112
113/** VMDK descriptor DDB entry for PCHS heads. */
114#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
115
116/** VMDK descriptor DDB entry for PCHS sectors. */
117#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
118
119/** VMDK descriptor DDB entry for LCHS cylinders. */
120#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
121
122/** VMDK descriptor DDB entry for LCHS heads. */
123#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
124
125/** VMDK descriptor DDB entry for LCHS sectors. */
126#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
127
128/** VMDK descriptor DDB entry for image UUID. */
129#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
130
131/** VMDK descriptor DDB entry for image modification UUID. */
132#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
133
134/** VMDK descriptor DDB entry for parent image UUID. */
135#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
136
137/** VMDK descriptor DDB entry for parent image modification UUID. */
138#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
139
140/** No compression for streamOptimized files. */
141#define VMDK_COMPRESSION_NONE 0
142
143/** Deflate compression for streamOptimized files. */
144#define VMDK_COMPRESSION_DEFLATE 1
145
146/** Marker that the actual GD value is stored in the footer. */
147#define VMDK_GD_AT_END 0xffffffffffffffffULL
148
149/** Marker for end-of-stream in streamOptimized images. */
150#define VMDK_MARKER_EOS 0
151
152/** Marker for grain table block in streamOptimized images. */
153#define VMDK_MARKER_GT 1
154
155/** Marker for grain directory block in streamOptimized images. */
156#define VMDK_MARKER_GD 2
157
158/** Marker for footer in streamOptimized images. */
159#define VMDK_MARKER_FOOTER 3
160
161/** Marker for unknown purpose in streamOptimized images.
162 * Shows up in very recent images created by vSphere, but only sporadically.
163 * They "forgot" to document that one in the VMDK specification. */
164#define VMDK_MARKER_UNSPECIFIED 4
165
166/** Dummy marker for "don't check the marker value". */
167#define VMDK_MARKER_IGNORE 0xffffffffU
168
169/**
170 * Magic number for hosted images created by VMware Workstation 4, VMware
171 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
172 */
173#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
174
175/** VMDK sector size in bytes. */
176#define VMDK_SECTOR_SIZE 512
177/** Max string buffer size for uint64_t with null term */
178#define UINT64_MAX_BUFF_SIZE 21
179/** Grain directory entry size in bytes */
180#define VMDK_GRAIN_DIR_ENTRY_SIZE 4
181/** Grain table size in bytes */
182#define VMDK_GRAIN_TABLE_SIZE 2048
183
184/**
185 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
186 * this header is also used for monolithic flat images.
187 */
188#pragma pack(1)
189typedef struct SparseExtentHeader
190{
191 uint32_t magicNumber;
192 uint32_t version;
193 uint32_t flags;
194 uint64_t capacity;
195 uint64_t grainSize;
196 uint64_t descriptorOffset;
197 uint64_t descriptorSize;
198 uint32_t numGTEsPerGT;
199 uint64_t rgdOffset;
200 uint64_t gdOffset;
201 uint64_t overHead;
202 bool uncleanShutdown;
203 char singleEndLineChar;
204 char nonEndLineChar;
205 char doubleEndLineChar1;
206 char doubleEndLineChar2;
207 uint16_t compressAlgorithm;
208 uint8_t pad[433];
209} SparseExtentHeader;
210#pragma pack()
211
212/** The maximum allowed descriptor size in the extent header in sectors. */
213#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
214
215/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
216 * divisible by the default grain size (64K) */
217#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
218
219/** VMDK streamOptimized file format marker. The type field may or may not
220 * be actually valid, but there's always data to read there. */
221#pragma pack(1)
222typedef struct VMDKMARKER
223{
224 uint64_t uSector;
225 uint32_t cbSize;
226 uint32_t uType;
227} VMDKMARKER, *PVMDKMARKER;
228#pragma pack()
229
230
231/** Convert sector number/size to byte offset/size. */
232#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
233
234/** Convert byte offset/size to sector number/size. */
235#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
236
237/**
238 * VMDK extent type.
239 */
240typedef enum VMDKETYPE
241{
242 /** Hosted sparse extent. */
243 VMDKETYPE_HOSTED_SPARSE = 1,
244 /** Flat extent. */
245 VMDKETYPE_FLAT,
246 /** Zero extent. */
247 VMDKETYPE_ZERO,
248 /** VMFS extent, used by ESX. */
249 VMDKETYPE_VMFS
250} VMDKETYPE, *PVMDKETYPE;
251
252/**
253 * VMDK access type for a extent.
254 */
255typedef enum VMDKACCESS
256{
257 /** No access allowed. */
258 VMDKACCESS_NOACCESS = 0,
259 /** Read-only access. */
260 VMDKACCESS_READONLY,
261 /** Read-write access. */
262 VMDKACCESS_READWRITE
263} VMDKACCESS, *PVMDKACCESS;
264
265/** Forward declaration for PVMDKIMAGE. */
266typedef struct VMDKIMAGE *PVMDKIMAGE;
267
268/**
269 * Extents files entry. Used for opening a particular file only once.
270 */
271typedef struct VMDKFILE
272{
273 /** Pointer to file path. Local copy. */
274 const char *pszFilename;
275 /** Pointer to base name. Local copy. */
276 const char *pszBasename;
277 /** File open flags for consistency checking. */
278 unsigned fOpen;
279 /** Handle for sync/async file abstraction.*/
280 PVDIOSTORAGE pStorage;
281 /** Reference counter. */
282 unsigned uReferences;
283 /** Flag whether the file should be deleted on last close. */
284 bool fDelete;
285 /** Pointer to the image we belong to (for debugging purposes). */
286 PVMDKIMAGE pImage;
287 /** Pointer to next file descriptor. */
288 struct VMDKFILE *pNext;
289 /** Pointer to the previous file descriptor. */
290 struct VMDKFILE *pPrev;
291} VMDKFILE, *PVMDKFILE;
292
293/**
294 * VMDK extent data structure.
295 */
296typedef struct VMDKEXTENT
297{
298 /** File handle. */
299 PVMDKFILE pFile;
300 /** Base name of the image extent. */
301 const char *pszBasename;
302 /** Full name of the image extent. */
303 const char *pszFullname;
304 /** Number of sectors in this extent. */
305 uint64_t cSectors;
306 /** Number of sectors per block (grain in VMDK speak). */
307 uint64_t cSectorsPerGrain;
308 /** Starting sector number of descriptor. */
309 uint64_t uDescriptorSector;
310 /** Size of descriptor in sectors. */
311 uint64_t cDescriptorSectors;
312 /** Starting sector number of grain directory. */
313 uint64_t uSectorGD;
314 /** Starting sector number of redundant grain directory. */
315 uint64_t uSectorRGD;
316 /** Total number of metadata sectors. */
317 uint64_t cOverheadSectors;
318 /** Nominal size (i.e. as described by the descriptor) of this extent. */
319 uint64_t cNominalSectors;
320 /** Sector offset (i.e. as described by the descriptor) of this extent. */
321 uint64_t uSectorOffset;
322 /** Number of entries in a grain table. */
323 uint32_t cGTEntries;
324 /** Number of sectors reachable via a grain directory entry. */
325 uint32_t cSectorsPerGDE;
326 /** Number of entries in the grain directory. */
327 uint32_t cGDEntries;
328 /** Pointer to the next free sector. Legacy information. Do not use. */
329 uint32_t uFreeSector;
330 /** Number of this extent in the list of images. */
331 uint32_t uExtent;
332 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
333 char *pDescData;
334 /** Pointer to the grain directory. */
335 uint32_t *pGD;
336 /** Pointer to the redundant grain directory. */
337 uint32_t *pRGD;
338 /** VMDK version of this extent. 1=1.0/1.1 */
339 uint32_t uVersion;
340 /** Type of this extent. */
341 VMDKETYPE enmType;
342 /** Access to this extent. */
343 VMDKACCESS enmAccess;
344 /** Flag whether this extent is marked as unclean. */
345 bool fUncleanShutdown;
346 /** Flag whether the metadata in the extent header needs to be updated. */
347 bool fMetaDirty;
348 /** Flag whether there is a footer in this extent. */
349 bool fFooter;
350 /** Compression type for this extent. */
351 uint16_t uCompression;
352 /** Append position for writing new grain. Only for sparse extents. */
353 uint64_t uAppendPosition;
354 /** Last grain which was accessed. Only for streamOptimized extents. */
355 uint32_t uLastGrainAccess;
356 /** Starting sector corresponding to the grain buffer. */
357 uint32_t uGrainSectorAbs;
358 /** Grain number corresponding to the grain buffer. */
359 uint32_t uGrain;
360 /** Actual size of the compressed data, only valid for reading. */
361 uint32_t cbGrainStreamRead;
362 /** Size of compressed grain buffer for streamOptimized extents. */
363 size_t cbCompGrain;
364 /** Compressed grain buffer for streamOptimized extents, with marker. */
365 void *pvCompGrain;
366 /** Decompressed grain buffer for streamOptimized extents. */
367 void *pvGrain;
368 /** Reference to the image in which this extent is used. Do not use this
369 * on a regular basis to avoid passing pImage references to functions
370 * explicitly. */
371 struct VMDKIMAGE *pImage;
372} VMDKEXTENT, *PVMDKEXTENT;
373
374/**
375 * Grain table cache size. Allocated per image.
376 */
377#define VMDK_GT_CACHE_SIZE 256
378
379/**
380 * Grain table block size. Smaller than an actual grain table block to allow
381 * more grain table blocks to be cached without having to allocate excessive
382 * amounts of memory for the cache.
383 */
384#define VMDK_GT_CACHELINE_SIZE 128
385
386
387/**
388 * Maximum number of lines in a descriptor file. Not worth the effort of
389 * making it variable. Descriptor files are generally very short (~20 lines),
390 * with the exception of sparse files split in 2G chunks, which need for the
391 * maximum size (almost 2T) exactly 1025 lines for the disk database.
392 */
393#define VMDK_DESCRIPTOR_LINES_MAX 1100U
394
395/**
396 * Parsed descriptor information. Allows easy access and update of the
397 * descriptor (whether separate file or not). Free form text files suck.
398 */
399typedef struct VMDKDESCRIPTOR
400{
401 /** Line number of first entry of the disk descriptor. */
402 unsigned uFirstDesc;
403 /** Line number of first entry in the extent description. */
404 unsigned uFirstExtent;
405 /** Line number of first disk database entry. */
406 unsigned uFirstDDB;
407 /** Total number of lines. */
408 unsigned cLines;
409 /** Total amount of memory available for the descriptor. */
410 size_t cbDescAlloc;
411 /** Set if descriptor has been changed and not yet written to disk. */
412 bool fDirty;
413 /** Array of pointers to the data in the descriptor. */
414 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
415 /** Array of line indices pointing to the next non-comment line. */
416 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
417} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
418
419
420/**
421 * Cache entry for translating extent/sector to a sector number in that
422 * extent.
423 */
424typedef struct VMDKGTCACHEENTRY
425{
426 /** Extent number for which this entry is valid. */
427 uint32_t uExtent;
428 /** GT data block number. */
429 uint64_t uGTBlock;
430 /** Data part of the cache entry. */
431 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
432} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
433
434/**
435 * Cache data structure for blocks of grain table entries. For now this is a
436 * fixed size direct mapping cache, but this should be adapted to the size of
437 * the sparse image and maybe converted to a set-associative cache. The
438 * implementation below implements a write-through cache with write allocate.
439 */
440typedef struct VMDKGTCACHE
441{
442 /** Cache entries. */
443 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
444 /** Number of cache entries (currently unused). */
445 unsigned cEntries;
446} VMDKGTCACHE, *PVMDKGTCACHE;
447
448/**
449 * Complete VMDK image data structure. Mainly a collection of extents and a few
450 * extra global data fields.
451 */
452typedef struct VMDKIMAGE
453{
454 /** Image name. */
455 const char *pszFilename;
456 /** Descriptor file if applicable. */
457 PVMDKFILE pFile;
458
459 /** Pointer to the per-disk VD interface list. */
460 PVDINTERFACE pVDIfsDisk;
461 /** Pointer to the per-image VD interface list. */
462 PVDINTERFACE pVDIfsImage;
463
464 /** Error interface. */
465 PVDINTERFACEERROR pIfError;
466 /** I/O interface. */
467 PVDINTERFACEIOINT pIfIo;
468
469
470 /** Pointer to the image extents. */
471 PVMDKEXTENT pExtents;
472 /** Number of image extents. */
473 unsigned cExtents;
474 /** Pointer to the files list, for opening a file referenced multiple
475 * times only once (happens mainly with raw partition access). */
476 PVMDKFILE pFiles;
477
478 /**
479 * Pointer to an array of segment entries for async I/O.
480 * This is an optimization because the task number to submit is not known
481 * and allocating/freeing an array in the read/write functions every time
482 * is too expensive.
483 */
484 PRTSGSEG paSegments;
485 /** Entries available in the segments array. */
486 unsigned cSegments;
487
488 /** Open flags passed by VBoxHD layer. */
489 unsigned uOpenFlags;
490 /** Image flags defined during creation or determined during open. */
491 unsigned uImageFlags;
492 /** Total size of the image. */
493 uint64_t cbSize;
494 /** Physical geometry of this image. */
495 VDGEOMETRY PCHSGeometry;
496 /** Logical geometry of this image. */
497 VDGEOMETRY LCHSGeometry;
498 /** Image UUID. */
499 RTUUID ImageUuid;
500 /** Image modification UUID. */
501 RTUUID ModificationUuid;
502 /** Parent image UUID. */
503 RTUUID ParentUuid;
504 /** Parent image modification UUID. */
505 RTUUID ParentModificationUuid;
506
507 /** Pointer to grain table cache, if this image contains sparse extents. */
508 PVMDKGTCACHE pGTCache;
509 /** Pointer to the descriptor (NULL if no separate descriptor file). */
510 char *pDescData;
511 /** Allocation size of the descriptor file. */
512 size_t cbDescAlloc;
513 /** Parsed descriptor file content. */
514 VMDKDESCRIPTOR Descriptor;
515 /** The static region list. */
516 VDREGIONLIST RegionList;
517} VMDKIMAGE;
518
519
520/** State for the input/output callout of the inflate reader/deflate writer. */
521typedef struct VMDKCOMPRESSIO
522{
523 /* Image this operation relates to. */
524 PVMDKIMAGE pImage;
525 /* Current read position. */
526 ssize_t iOffset;
527 /* Size of the compressed grain buffer (available data). */
528 size_t cbCompGrain;
529 /* Pointer to the compressed grain buffer. */
530 void *pvCompGrain;
531} VMDKCOMPRESSIO;
532
533
534/** Tracks async grain allocation. */
535typedef struct VMDKGRAINALLOCASYNC
536{
537 /** Flag whether the allocation failed. */
538 bool fIoErr;
539 /** Current number of transfers pending.
540 * If reached 0 and there is an error the old state is restored. */
541 unsigned cIoXfersPending;
542 /** Sector number */
543 uint64_t uSector;
544 /** Flag whether the grain table needs to be updated. */
545 bool fGTUpdateNeeded;
546 /** Extent the allocation happens. */
547 PVMDKEXTENT pExtent;
548 /** Position of the new grain, required for the grain table update. */
549 uint64_t uGrainOffset;
550 /** Grain table sector. */
551 uint64_t uGTSector;
552 /** Backup grain table sector. */
553 uint64_t uRGTSector;
554} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
555
556/**
557 * State information for vmdkRename() and helpers.
558 */
559typedef struct VMDKRENAMESTATE
560{
561 /** Array of old filenames. */
562 char **apszOldName;
563 /** Array of new filenames. */
564 char **apszNewName;
565 /** Array of new lines in the extent descriptor. */
566 char **apszNewLines;
567 /** Name of the old descriptor file if not a sparse image. */
568 char *pszOldDescName;
569 /** Flag whether we called vmdkFreeImage(). */
570 bool fImageFreed;
571 /** Flag whther the descriptor is embedded in the image (sparse) or
572 * in a separate file. */
573 bool fEmbeddedDesc;
574 /** Number of extents in the image. */
575 unsigned cExtents;
576 /** New base filename. */
577 char *pszNewBaseName;
578 /** The old base filename. */
579 char *pszOldBaseName;
580 /** New full filename. */
581 char *pszNewFullName;
582 /** Old full filename. */
583 char *pszOldFullName;
584 /** The old image name. */
585 const char *pszOldImageName;
586 /** Copy of the original VMDK descriptor. */
587 VMDKDESCRIPTOR DescriptorCopy;
588 /** Copy of the extent state for sparse images. */
589 VMDKEXTENT ExtentCopy;
590} VMDKRENAMESTATE;
591/** Pointer to a VMDK rename state. */
592typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
593
594
595/*********************************************************************************************************************************
596* Static Variables *
597*********************************************************************************************************************************/
598
599/** NULL-terminated array of supported file extensions. */
600static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
601{
602 {"vmdk", VDTYPE_HDD},
603 {NULL, VDTYPE_INVALID}
604};
605
606/** NULL-terminated array of configuration option. */
607static const VDCONFIGINFO s_aVmdkConfigInfo[] =
608{
609 /* Options for VMDK raw disks */
610 { "RawDrive", NULL, VDCFGVALUETYPE_STRING, 0 },
611 { "Partitions", NULL, VDCFGVALUETYPE_STRING, 0 },
612 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 },
613 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 },
614
615 /* End of options list */
616 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 }
617};
618
619
620/*********************************************************************************************************************************
621* Internal Functions *
622*********************************************************************************************************************************/
623
624static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
625static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
626 bool fDelete);
627
628static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
629static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
630static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
631static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
632
633static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
634 void *pvUser, int rcReq);
635
636/**
637 * Internal: open a file (using a file descriptor cache to ensure each file
638 * is only opened once - anything else can cause locking problems).
639 */
640static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
641 const char *pszBasename, const char *pszFilename, uint32_t fOpen)
642{
643 int rc = VINF_SUCCESS;
644 PVMDKFILE pVmdkFile;
645
646 for (pVmdkFile = pImage->pFiles;
647 pVmdkFile != NULL;
648 pVmdkFile = pVmdkFile->pNext)
649 {
650 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
651 {
652 Assert(fOpen == pVmdkFile->fOpen);
653 pVmdkFile->uReferences++;
654
655 *ppVmdkFile = pVmdkFile;
656
657 return rc;
658 }
659 }
660
661 /* If we get here, there's no matching entry in the cache. */
662 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
663 if (!pVmdkFile)
664 {
665 *ppVmdkFile = NULL;
666 return VERR_NO_MEMORY;
667 }
668
669 pVmdkFile->pszFilename = RTStrDup(pszFilename);
670 if (!pVmdkFile->pszFilename)
671 {
672 RTMemFree(pVmdkFile);
673 *ppVmdkFile = NULL;
674 return VERR_NO_MEMORY;
675 }
676
677 if (pszBasename)
678 {
679 pVmdkFile->pszBasename = RTStrDup(pszBasename);
680 if (!pVmdkFile->pszBasename)
681 {
682 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
683 RTMemFree(pVmdkFile);
684 *ppVmdkFile = NULL;
685 return VERR_NO_MEMORY;
686 }
687 }
688
689 pVmdkFile->fOpen = fOpen;
690
691 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
692 &pVmdkFile->pStorage);
693 if (RT_SUCCESS(rc))
694 {
695 pVmdkFile->uReferences = 1;
696 pVmdkFile->pImage = pImage;
697 pVmdkFile->pNext = pImage->pFiles;
698 if (pImage->pFiles)
699 pImage->pFiles->pPrev = pVmdkFile;
700 pImage->pFiles = pVmdkFile;
701 *ppVmdkFile = pVmdkFile;
702 }
703 else
704 {
705 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
706 RTMemFree(pVmdkFile);
707 *ppVmdkFile = NULL;
708 }
709
710 return rc;
711}
712
713/**
714 * Internal: close a file, updating the file descriptor cache.
715 */
716static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
717{
718 int rc = VINF_SUCCESS;
719 PVMDKFILE pVmdkFile = *ppVmdkFile;
720
721 AssertPtr(pVmdkFile);
722
723 pVmdkFile->fDelete |= fDelete;
724 Assert(pVmdkFile->uReferences);
725 pVmdkFile->uReferences--;
726 if (pVmdkFile->uReferences == 0)
727 {
728 PVMDKFILE pPrev;
729 PVMDKFILE pNext;
730
731 /* Unchain the element from the list. */
732 pPrev = pVmdkFile->pPrev;
733 pNext = pVmdkFile->pNext;
734
735 if (pNext)
736 pNext->pPrev = pPrev;
737 if (pPrev)
738 pPrev->pNext = pNext;
739 else
740 pImage->pFiles = pNext;
741
742 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
743
744 bool fFileDel = pVmdkFile->fDelete;
745 if ( pVmdkFile->pszBasename
746 && fFileDel)
747 {
748 const char *pszSuffix = RTPathSuffix(pVmdkFile->pszBasename);
749 if ( RTPathHasPath(pVmdkFile->pszBasename)
750 || !pszSuffix
751 || ( strcmp(pszSuffix, ".vmdk")
752 && strcmp(pszSuffix, ".bin")
753 && strcmp(pszSuffix, ".img")))
754 fFileDel = false;
755 }
756
757 if (fFileDel)
758 {
759 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
760 if (RT_SUCCESS(rc))
761 rc = rc2;
762 }
763 else if (pVmdkFile->fDelete)
764 LogRel(("VMDK: Denying deletion of %s\n", pVmdkFile->pszBasename));
765 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
766 if (pVmdkFile->pszBasename)
767 RTStrFree((char *)(void *)pVmdkFile->pszBasename);
768 RTMemFree(pVmdkFile);
769 }
770
771 *ppVmdkFile = NULL;
772 return rc;
773}
774
775/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
776#ifndef VMDK_USE_BLOCK_DECOMP_API
777static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
778{
779 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
780 size_t cbInjected = 0;
781
782 Assert(cbBuf);
783 if (pInflateState->iOffset < 0)
784 {
785 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
786 pvBuf = (uint8_t *)pvBuf + 1;
787 cbBuf--;
788 cbInjected = 1;
789 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
790 }
791 if (!cbBuf)
792 {
793 if (pcbBuf)
794 *pcbBuf = cbInjected;
795 return VINF_SUCCESS;
796 }
797 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
798 memcpy(pvBuf,
799 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
800 cbBuf);
801 pInflateState->iOffset += cbBuf;
802 Assert(pcbBuf);
803 *pcbBuf = cbBuf + cbInjected;
804 return VINF_SUCCESS;
805}
806#endif
807
808/**
809 * Internal: read from a file and inflate the compressed data,
810 * distinguishing between async and normal operation
811 */
812DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
813 uint64_t uOffset, void *pvBuf,
814 size_t cbToRead, const void *pcvMarker,
815 uint64_t *puLBA, uint32_t *pcbMarkerData)
816{
817 int rc;
818#ifndef VMDK_USE_BLOCK_DECOMP_API
819 PRTZIPDECOMP pZip = NULL;
820#endif
821 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
822 size_t cbCompSize, cbActuallyRead;
823
824 if (!pcvMarker)
825 {
826 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
827 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
828 if (RT_FAILURE(rc))
829 return rc;
830 }
831 else
832 {
833 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
834 /* pcvMarker endianness has already been partially transformed, fix it */
835 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
836 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
837 }
838
839 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
840 if (cbCompSize == 0)
841 {
842 AssertMsgFailed(("VMDK: corrupted marker\n"));
843 return VERR_VD_VMDK_INVALID_FORMAT;
844 }
845
846 /* Sanity check - the expansion ratio should be much less than 2. */
847 Assert(cbCompSize < 2 * cbToRead);
848 if (cbCompSize >= 2 * cbToRead)
849 return VERR_VD_VMDK_INVALID_FORMAT;
850
851 /* Compressed grain marker. Data follows immediately. */
852 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
853 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
854 (uint8_t *)pExtent->pvCompGrain
855 + RT_UOFFSETOF(VMDKMARKER, uType),
856 RT_ALIGN_Z( cbCompSize
857 + RT_UOFFSETOF(VMDKMARKER, uType),
858 512)
859 - RT_UOFFSETOF(VMDKMARKER, uType));
860
861 if (puLBA)
862 *puLBA = RT_LE2H_U64(pMarker->uSector);
863 if (pcbMarkerData)
864 *pcbMarkerData = RT_ALIGN( cbCompSize
865 + RT_UOFFSETOF(VMDKMARKER, uType),
866 512);
867
868#ifdef VMDK_USE_BLOCK_DECOMP_API
869 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
870 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
871 pvBuf, cbToRead, &cbActuallyRead);
872#else
873 VMDKCOMPRESSIO InflateState;
874 InflateState.pImage = pImage;
875 InflateState.iOffset = -1;
876 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
877 InflateState.pvCompGrain = pExtent->pvCompGrain;
878
879 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
880 if (RT_FAILURE(rc))
881 return rc;
882 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
883 RTZipDecompDestroy(pZip);
884#endif /* !VMDK_USE_BLOCK_DECOMP_API */
885 if (RT_FAILURE(rc))
886 {
887 if (rc == VERR_ZIP_CORRUPTED)
888 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
889 return rc;
890 }
891 if (cbActuallyRead != cbToRead)
892 rc = VERR_VD_VMDK_INVALID_FORMAT;
893 return rc;
894}
895
896static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
897{
898 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
899
900 Assert(cbBuf);
901 if (pDeflateState->iOffset < 0)
902 {
903 pvBuf = (const uint8_t *)pvBuf + 1;
904 cbBuf--;
905 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
906 }
907 if (!cbBuf)
908 return VINF_SUCCESS;
909 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
910 return VERR_BUFFER_OVERFLOW;
911 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
912 pvBuf, cbBuf);
913 pDeflateState->iOffset += cbBuf;
914 return VINF_SUCCESS;
915}
916
917/**
918 * Internal: deflate the uncompressed data and write to a file,
919 * distinguishing between async and normal operation
920 */
921DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
922 uint64_t uOffset, const void *pvBuf,
923 size_t cbToWrite, uint64_t uLBA,
924 uint32_t *pcbMarkerData)
925{
926 int rc;
927 PRTZIPCOMP pZip = NULL;
928 VMDKCOMPRESSIO DeflateState;
929
930 DeflateState.pImage = pImage;
931 DeflateState.iOffset = -1;
932 DeflateState.cbCompGrain = pExtent->cbCompGrain;
933 DeflateState.pvCompGrain = pExtent->pvCompGrain;
934
935 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
936 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
937 if (RT_FAILURE(rc))
938 return rc;
939 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
940 if (RT_SUCCESS(rc))
941 rc = RTZipCompFinish(pZip);
942 RTZipCompDestroy(pZip);
943 if (RT_SUCCESS(rc))
944 {
945 Assert( DeflateState.iOffset > 0
946 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
947
948 /* pad with zeroes to get to a full sector size */
949 uint32_t uSize = DeflateState.iOffset;
950 if (uSize % 512)
951 {
952 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
953 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
954 uSizeAlign - uSize);
955 uSize = uSizeAlign;
956 }
957
958 if (pcbMarkerData)
959 *pcbMarkerData = uSize;
960
961 /* Compressed grain marker. Data follows immediately. */
962 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
963 pMarker->uSector = RT_H2LE_U64(uLBA);
964 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
965 - RT_UOFFSETOF(VMDKMARKER, uType));
966 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
967 uOffset, pMarker, uSize);
968 if (RT_FAILURE(rc))
969 return rc;
970 }
971 return rc;
972}
973
974
975/**
976 * Internal: check if all files are closed, prevent leaking resources.
977 */
978static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
979{
980 int rc = VINF_SUCCESS, rc2;
981 PVMDKFILE pVmdkFile;
982
983 Assert(pImage->pFiles == NULL);
984 for (pVmdkFile = pImage->pFiles;
985 pVmdkFile != NULL;
986 pVmdkFile = pVmdkFile->pNext)
987 {
988 LogRel(("VMDK: leaking reference to file \"%s\"\n",
989 pVmdkFile->pszFilename));
990 pImage->pFiles = pVmdkFile->pNext;
991
992 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
993
994 if (RT_SUCCESS(rc))
995 rc = rc2;
996 }
997 return rc;
998}
999
1000/**
1001 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1002 * critical non-ASCII characters.
1003 */
1004static char *vmdkEncodeString(const char *psz)
1005{
1006 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1007 char *pszDst = szEnc;
1008
1009 AssertPtr(psz);
1010
1011 for (; *psz; psz = RTStrNextCp(psz))
1012 {
1013 char *pszDstPrev = pszDst;
1014 RTUNICP Cp = RTStrGetCp(psz);
1015 if (Cp == '\\')
1016 {
1017 pszDst = RTStrPutCp(pszDst, Cp);
1018 pszDst = RTStrPutCp(pszDst, Cp);
1019 }
1020 else if (Cp == '\n')
1021 {
1022 pszDst = RTStrPutCp(pszDst, '\\');
1023 pszDst = RTStrPutCp(pszDst, 'n');
1024 }
1025 else if (Cp == '\r')
1026 {
1027 pszDst = RTStrPutCp(pszDst, '\\');
1028 pszDst = RTStrPutCp(pszDst, 'r');
1029 }
1030 else
1031 pszDst = RTStrPutCp(pszDst, Cp);
1032 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1033 {
1034 pszDst = pszDstPrev;
1035 break;
1036 }
1037 }
1038 *pszDst = '\0';
1039 return RTStrDup(szEnc);
1040}
1041
1042/**
1043 * Internal: decode a string and store it into the specified string.
1044 */
1045static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1046{
1047 int rc = VINF_SUCCESS;
1048 char szBuf[4];
1049
1050 if (!cb)
1051 return VERR_BUFFER_OVERFLOW;
1052
1053 AssertPtr(psz);
1054
1055 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1056 {
1057 char *pszDst = szBuf;
1058 RTUNICP Cp = RTStrGetCp(pszEncoded);
1059 if (Cp == '\\')
1060 {
1061 pszEncoded = RTStrNextCp(pszEncoded);
1062 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1063 if (CpQ == 'n')
1064 RTStrPutCp(pszDst, '\n');
1065 else if (CpQ == 'r')
1066 RTStrPutCp(pszDst, '\r');
1067 else if (CpQ == '\0')
1068 {
1069 rc = VERR_VD_VMDK_INVALID_HEADER;
1070 break;
1071 }
1072 else
1073 RTStrPutCp(pszDst, CpQ);
1074 }
1075 else
1076 pszDst = RTStrPutCp(pszDst, Cp);
1077
1078 /* Need to leave space for terminating NUL. */
1079 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1080 {
1081 rc = VERR_BUFFER_OVERFLOW;
1082 break;
1083 }
1084 memcpy(psz, szBuf, pszDst - szBuf);
1085 psz += pszDst - szBuf;
1086 }
1087 *psz = '\0';
1088 return rc;
1089}
1090
1091/**
1092 * Internal: free all buffers associated with grain directories.
1093 */
1094static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1095{
1096 if (pExtent->pGD)
1097 {
1098 RTMemFree(pExtent->pGD);
1099 pExtent->pGD = NULL;
1100 }
1101 if (pExtent->pRGD)
1102 {
1103 RTMemFree(pExtent->pRGD);
1104 pExtent->pRGD = NULL;
1105 }
1106}
1107
1108/**
1109 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
1110 * images.
1111 */
1112static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1113{
1114 int rc = VINF_SUCCESS;
1115
1116 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1117 {
1118 /* streamOptimized extents need a compressed grain buffer, which must
1119 * be big enough to hold uncompressible data (which needs ~8 bytes
1120 * more than the uncompressed data), the marker and padding. */
1121 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
1122 + 8 + sizeof(VMDKMARKER), 512);
1123 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
1124 if (RT_LIKELY(pExtent->pvCompGrain))
1125 {
1126 /* streamOptimized extents need a decompressed grain buffer. */
1127 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1128 if (!pExtent->pvGrain)
1129 rc = VERR_NO_MEMORY;
1130 }
1131 else
1132 rc = VERR_NO_MEMORY;
1133 }
1134
1135 if (RT_FAILURE(rc))
1136 vmdkFreeStreamBuffers(pExtent);
1137 return rc;
1138}
1139
1140/**
1141 * Internal: allocate all buffers associated with grain directories.
1142 */
1143static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1144{
1145 RT_NOREF1(pImage);
1146 int rc = VINF_SUCCESS;
1147 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1148
1149 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1150 if (RT_LIKELY(pExtent->pGD))
1151 {
1152 if (pExtent->uSectorRGD)
1153 {
1154 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1155 if (RT_UNLIKELY(!pExtent->pRGD))
1156 rc = VERR_NO_MEMORY;
1157 }
1158 }
1159 else
1160 rc = VERR_NO_MEMORY;
1161
1162 if (RT_FAILURE(rc))
1163 vmdkFreeGrainDirectory(pExtent);
1164 return rc;
1165}
1166
1167/**
1168 * Converts the grain directory from little to host endianess.
1169 *
1170 * @param pGD The grain directory.
1171 * @param cGDEntries Number of entries in the grain directory to convert.
1172 */
1173DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1174{
1175 uint32_t *pGDTmp = pGD;
1176
1177 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1178 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1179}
1180
1181/**
1182 * Read the grain directory and allocated grain tables verifying them against
1183 * their back up copies if available.
1184 *
1185 * @returns VBox status code.
1186 * @param pImage Image instance data.
1187 * @param pExtent The VMDK extent.
1188 */
1189static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1190{
1191 int rc = VINF_SUCCESS;
1192 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1193
1194 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1195 && pExtent->uSectorGD != VMDK_GD_AT_END
1196 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1197
1198 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1199 if (RT_SUCCESS(rc))
1200 {
1201 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1202 * but in reality they are not compressed. */
1203 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1204 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1205 pExtent->pGD, cbGD);
1206 if (RT_SUCCESS(rc))
1207 {
1208 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1209
1210 if ( pExtent->uSectorRGD
1211 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1212 {
1213 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1214 * but in reality they are not compressed. */
1215 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1216 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1217 pExtent->pRGD, cbGD);
1218 if (RT_SUCCESS(rc))
1219 {
1220 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1221
1222 /* Check grain table and redundant grain table for consistency. */
1223 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1224 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1225 size_t cbGTBuffersMax = _1M;
1226
1227 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1228 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1229
1230 if ( !pTmpGT1
1231 || !pTmpGT2)
1232 rc = VERR_NO_MEMORY;
1233
1234 size_t i = 0;
1235 uint32_t *pGDTmp = pExtent->pGD;
1236 uint32_t *pRGDTmp = pExtent->pRGD;
1237
1238 /* Loop through all entries. */
1239 while (i < pExtent->cGDEntries)
1240 {
1241 uint32_t uGTStart = *pGDTmp;
1242 uint32_t uRGTStart = *pRGDTmp;
1243 size_t cbGTRead = cbGT;
1244
1245 /* If no grain table is allocated skip the entry. */
1246 if (*pGDTmp == 0 && *pRGDTmp == 0)
1247 {
1248 i++;
1249 continue;
1250 }
1251
1252 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1253 {
1254 /* Just one grain directory entry refers to a not yet allocated
1255 * grain table or both grain directory copies refer to the same
1256 * grain table. Not allowed. */
1257 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1258 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1259 break;
1260 }
1261
1262 i++;
1263 pGDTmp++;
1264 pRGDTmp++;
1265
1266 /*
1267 * Read a few tables at once if adjacent to decrease the number
1268 * of I/O requests. Read at maximum 1MB at once.
1269 */
1270 while ( i < pExtent->cGDEntries
1271 && cbGTRead < cbGTBuffersMax)
1272 {
1273 /* If no grain table is allocated skip the entry. */
1274 if (*pGDTmp == 0 && *pRGDTmp == 0)
1275 {
1276 i++;
1277 continue;
1278 }
1279
1280 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1281 {
1282 /* Just one grain directory entry refers to a not yet allocated
1283 * grain table or both grain directory copies refer to the same
1284 * grain table. Not allowed. */
1285 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1286 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1287 break;
1288 }
1289
1290 /* Check that the start offsets are adjacent.*/
1291 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1292 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1293 break;
1294
1295 i++;
1296 pGDTmp++;
1297 pRGDTmp++;
1298 cbGTRead += cbGT;
1299 }
1300
1301 /* Increase buffers if required. */
1302 if ( RT_SUCCESS(rc)
1303 && cbGTBuffers < cbGTRead)
1304 {
1305 uint32_t *pTmp;
1306 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1307 if (pTmp)
1308 {
1309 pTmpGT1 = pTmp;
1310 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1311 if (pTmp)
1312 pTmpGT2 = pTmp;
1313 else
1314 rc = VERR_NO_MEMORY;
1315 }
1316 else
1317 rc = VERR_NO_MEMORY;
1318
1319 if (rc == VERR_NO_MEMORY)
1320 {
1321 /* Reset to the old values. */
1322 rc = VINF_SUCCESS;
1323 i -= cbGTRead / cbGT;
1324 cbGTRead = cbGT;
1325
1326 /* Don't try to increase the buffer again in the next run. */
1327 cbGTBuffersMax = cbGTBuffers;
1328 }
1329 }
1330
1331 if (RT_SUCCESS(rc))
1332 {
1333 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1334 * but in reality they are not compressed. */
1335 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1336 VMDK_SECTOR2BYTE(uGTStart),
1337 pTmpGT1, cbGTRead);
1338 if (RT_FAILURE(rc))
1339 {
1340 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1341 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1342 break;
1343 }
1344 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1345 * but in reality they are not compressed. */
1346 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1347 VMDK_SECTOR2BYTE(uRGTStart),
1348 pTmpGT2, cbGTRead);
1349 if (RT_FAILURE(rc))
1350 {
1351 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1352 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1353 break;
1354 }
1355 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1356 {
1357 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1358 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1359 break;
1360 }
1361 }
1362 } /* while (i < pExtent->cGDEntries) */
1363
1364 /** @todo figure out what to do for unclean VMDKs. */
1365 if (pTmpGT1)
1366 RTMemFree(pTmpGT1);
1367 if (pTmpGT2)
1368 RTMemFree(pTmpGT2);
1369 }
1370 else
1371 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1372 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1373 }
1374 }
1375 else
1376 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1377 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1378 }
1379
1380 if (RT_FAILURE(rc))
1381 vmdkFreeGrainDirectory(pExtent);
1382 return rc;
1383}
1384
1385/**
1386 * Creates a new grain directory for the given extent at the given start sector.
1387 *
1388 * @returns VBox status code.
1389 * @param pImage Image instance data.
1390 * @param pExtent The VMDK extent.
1391 * @param uStartSector Where the grain directory should be stored in the image.
1392 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1393 */
1394static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1395 uint64_t uStartSector, bool fPreAlloc)
1396{
1397 int rc = VINF_SUCCESS;
1398 unsigned i;
1399 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1400 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1401 size_t cbGTRounded;
1402 uint64_t cbOverhead;
1403
1404 if (fPreAlloc)
1405 {
1406 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1407 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1408 }
1409 else
1410 {
1411 /* Use a dummy start sector for layout computation. */
1412 if (uStartSector == VMDK_GD_AT_END)
1413 uStartSector = 1;
1414 cbGTRounded = 0;
1415 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1416 }
1417
1418 /* For streamOptimized extents there is only one grain directory,
1419 * and for all others take redundant grain directory into account. */
1420 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1421 {
1422 cbOverhead = RT_ALIGN_64(cbOverhead,
1423 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1424 }
1425 else
1426 {
1427 cbOverhead += cbGDRounded + cbGTRounded;
1428 cbOverhead = RT_ALIGN_64(cbOverhead,
1429 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1430 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1431 }
1432
1433 if (RT_SUCCESS(rc))
1434 {
1435 pExtent->uAppendPosition = cbOverhead;
1436 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1437
1438 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1439 {
1440 pExtent->uSectorRGD = 0;
1441 pExtent->uSectorGD = uStartSector;
1442 }
1443 else
1444 {
1445 pExtent->uSectorRGD = uStartSector;
1446 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1447 }
1448
1449 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1450 if (RT_SUCCESS(rc))
1451 {
1452 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1453 if ( RT_SUCCESS(rc)
1454 && fPreAlloc)
1455 {
1456 uint32_t uGTSectorLE;
1457 uint64_t uOffsetSectors;
1458
1459 if (pExtent->pRGD)
1460 {
1461 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1462 for (i = 0; i < pExtent->cGDEntries; i++)
1463 {
1464 pExtent->pRGD[i] = uOffsetSectors;
1465 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1466 /* Write the redundant grain directory entry to disk. */
1467 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1468 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1469 &uGTSectorLE, sizeof(uGTSectorLE));
1470 if (RT_FAILURE(rc))
1471 {
1472 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1473 break;
1474 }
1475 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1476 }
1477 }
1478
1479 if (RT_SUCCESS(rc))
1480 {
1481 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1482 for (i = 0; i < pExtent->cGDEntries; i++)
1483 {
1484 pExtent->pGD[i] = uOffsetSectors;
1485 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1486 /* Write the grain directory entry to disk. */
1487 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1488 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1489 &uGTSectorLE, sizeof(uGTSectorLE));
1490 if (RT_FAILURE(rc))
1491 {
1492 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1493 break;
1494 }
1495 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1496 }
1497 }
1498 }
1499 }
1500 }
1501
1502 if (RT_FAILURE(rc))
1503 vmdkFreeGrainDirectory(pExtent);
1504 return rc;
1505}
1506
1507/**
1508 * Unquotes the given string returning the result in a separate buffer.
1509 *
1510 * @returns VBox status code.
1511 * @param pImage The VMDK image state.
1512 * @param pszStr The string to unquote.
1513 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1514 * free.
1515 * @param ppszNext Where to store the pointer to any character following
1516 * the quoted value, optional.
1517 */
1518static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1519 char **ppszUnquoted, char **ppszNext)
1520{
1521 const char *pszStart = pszStr;
1522 char *pszQ;
1523 char *pszUnquoted;
1524
1525 /* Skip over whitespace. */
1526 while (*pszStr == ' ' || *pszStr == '\t')
1527 pszStr++;
1528
1529 if (*pszStr != '"')
1530 {
1531 pszQ = (char *)pszStr;
1532 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1533 pszQ++;
1534 }
1535 else
1536 {
1537 pszStr++;
1538 pszQ = (char *)strchr(pszStr, '"');
1539 if (pszQ == NULL)
1540 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1541 pImage->pszFilename, pszStart);
1542 }
1543
1544 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1545 if (!pszUnquoted)
1546 return VERR_NO_MEMORY;
1547 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1548 pszUnquoted[pszQ - pszStr] = '\0';
1549 *ppszUnquoted = pszUnquoted;
1550 if (ppszNext)
1551 *ppszNext = pszQ + 1;
1552 return VINF_SUCCESS;
1553}
1554
1555static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1556 const char *pszLine)
1557{
1558 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1559 ssize_t cbDiff = strlen(pszLine) + 1;
1560
1561 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1562 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1563 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1564
1565 memcpy(pEnd, pszLine, cbDiff);
1566 pDescriptor->cLines++;
1567 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1568 pDescriptor->fDirty = true;
1569
1570 return VINF_SUCCESS;
1571}
1572
1573static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1574 const char *pszKey, const char **ppszValue)
1575{
1576 size_t cbKey = strlen(pszKey);
1577 const char *pszValue;
1578
1579 while (uStart != 0)
1580 {
1581 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1582 {
1583 /* Key matches, check for a '=' (preceded by whitespace). */
1584 pszValue = pDescriptor->aLines[uStart] + cbKey;
1585 while (*pszValue == ' ' || *pszValue == '\t')
1586 pszValue++;
1587 if (*pszValue == '=')
1588 {
1589 *ppszValue = pszValue + 1;
1590 break;
1591 }
1592 }
1593 uStart = pDescriptor->aNextLines[uStart];
1594 }
1595 return !!uStart;
1596}
1597
1598static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1599 unsigned uStart,
1600 const char *pszKey, const char *pszValue)
1601{
1602 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1603 size_t cbKey = strlen(pszKey);
1604 unsigned uLast = 0;
1605
1606 while (uStart != 0)
1607 {
1608 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1609 {
1610 /* Key matches, check for a '=' (preceded by whitespace). */
1611 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1612 while (*pszTmp == ' ' || *pszTmp == '\t')
1613 pszTmp++;
1614 if (*pszTmp == '=')
1615 {
1616 pszTmp++;
1617 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1618 * bloat and potentially out of space error? */
1619 while (*pszTmp == ' ' || *pszTmp == '\t')
1620 pszTmp++;
1621 break;
1622 }
1623 }
1624 if (!pDescriptor->aNextLines[uStart])
1625 uLast = uStart;
1626 uStart = pDescriptor->aNextLines[uStart];
1627 }
1628 if (uStart)
1629 {
1630 if (pszValue)
1631 {
1632 /* Key already exists, replace existing value. */
1633 size_t cbOldVal = strlen(pszTmp);
1634 size_t cbNewVal = strlen(pszValue);
1635 ssize_t cbDiff = cbNewVal - cbOldVal;
1636 /* Check for buffer overflow. */
1637 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1638 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1639 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1640
1641 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1642 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1643 memcpy(pszTmp, pszValue, cbNewVal + 1);
1644 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1645 pDescriptor->aLines[i] += cbDiff;
1646 }
1647 else
1648 {
1649 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1650 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1651 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1652 {
1653 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1654 if (pDescriptor->aNextLines[i])
1655 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1656 else
1657 pDescriptor->aNextLines[i-1] = 0;
1658 }
1659 pDescriptor->cLines--;
1660 /* Adjust starting line numbers of following descriptor sections. */
1661 if (uStart < pDescriptor->uFirstExtent)
1662 pDescriptor->uFirstExtent--;
1663 if (uStart < pDescriptor->uFirstDDB)
1664 pDescriptor->uFirstDDB--;
1665 }
1666 }
1667 else
1668 {
1669 /* Key doesn't exist, append after the last entry in this category. */
1670 if (!pszValue)
1671 {
1672 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1673 return VINF_SUCCESS;
1674 }
1675 cbKey = strlen(pszKey);
1676 size_t cbValue = strlen(pszValue);
1677 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1678 /* Check for buffer overflow. */
1679 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1680 || ( pDescriptor->aLines[pDescriptor->cLines]
1681 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1682 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1683 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1684 {
1685 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1686 if (pDescriptor->aNextLines[i - 1])
1687 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1688 else
1689 pDescriptor->aNextLines[i] = 0;
1690 }
1691 uStart = uLast + 1;
1692 pDescriptor->aNextLines[uLast] = uStart;
1693 pDescriptor->aNextLines[uStart] = 0;
1694 pDescriptor->cLines++;
1695 pszTmp = pDescriptor->aLines[uStart];
1696 memmove(pszTmp + cbDiff, pszTmp,
1697 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1698 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1699 pDescriptor->aLines[uStart][cbKey] = '=';
1700 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1701 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1702 pDescriptor->aLines[i] += cbDiff;
1703
1704 /* Adjust starting line numbers of following descriptor sections. */
1705 if (uStart <= pDescriptor->uFirstExtent)
1706 pDescriptor->uFirstExtent++;
1707 if (uStart <= pDescriptor->uFirstDDB)
1708 pDescriptor->uFirstDDB++;
1709 }
1710 pDescriptor->fDirty = true;
1711 return VINF_SUCCESS;
1712}
1713
1714static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1715 uint32_t *puValue)
1716{
1717 const char *pszValue;
1718
1719 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1720 &pszValue))
1721 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1722 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1723}
1724
1725/**
1726 * Returns the value of the given key as a string allocating the necessary memory.
1727 *
1728 * @returns VBox status code.
1729 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1730 * @param pImage The VMDK image state.
1731 * @param pDescriptor The descriptor to fetch the value from.
1732 * @param pszKey The key to get the value from.
1733 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1734 * free.
1735 */
1736static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1737 const char *pszKey, char **ppszValue)
1738{
1739 const char *pszValue;
1740 char *pszValueUnquoted;
1741
1742 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1743 &pszValue))
1744 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1745 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1746 if (RT_FAILURE(rc))
1747 return rc;
1748 *ppszValue = pszValueUnquoted;
1749 return rc;
1750}
1751
1752static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1753 const char *pszKey, const char *pszValue)
1754{
1755 char *pszValueQuoted;
1756
1757 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1758 if (!pszValueQuoted)
1759 return VERR_NO_STR_MEMORY;
1760 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1761 pszValueQuoted);
1762 RTStrFree(pszValueQuoted);
1763 return rc;
1764}
1765
1766static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1767 PVMDKDESCRIPTOR pDescriptor)
1768{
1769 RT_NOREF1(pImage);
1770 unsigned uEntry = pDescriptor->uFirstExtent;
1771 ssize_t cbDiff;
1772
1773 if (!uEntry)
1774 return;
1775
1776 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1777 /* Move everything including \0 in the entry marking the end of buffer. */
1778 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1779 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1780 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1781 {
1782 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1783 if (pDescriptor->aNextLines[i])
1784 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1785 else
1786 pDescriptor->aNextLines[i - 1] = 0;
1787 }
1788 pDescriptor->cLines--;
1789 if (pDescriptor->uFirstDDB)
1790 pDescriptor->uFirstDDB--;
1791
1792 return;
1793}
1794
1795static void vmdkDescExtRemoveByLine(PVMDKIMAGE pImage,
1796 PVMDKDESCRIPTOR pDescriptor, unsigned uLine)
1797{
1798 RT_NOREF1(pImage);
1799 unsigned uEntry = uLine;
1800 ssize_t cbDiff;
1801 if (!uEntry)
1802 return;
1803 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1804 /* Move everything including \0 in the entry marking the end of buffer. */
1805 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1806 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1807 for (unsigned i = uEntry; i <= pDescriptor->cLines; i++)
1808 {
1809 if (i != uEntry)
1810 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1811 if (pDescriptor->aNextLines[i])
1812 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1813 else
1814 pDescriptor->aNextLines[i - 1] = 0;
1815 }
1816 pDescriptor->cLines--;
1817 if (pDescriptor->uFirstDDB)
1818 pDescriptor->uFirstDDB--;
1819 return;
1820}
1821
1822static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1823 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1824 VMDKETYPE enmType, const char *pszBasename,
1825 uint64_t uSectorOffset)
1826{
1827 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1828 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1829 char *pszTmp;
1830 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1831 char szExt[1024];
1832 ssize_t cbDiff;
1833
1834 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1835 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1836
1837 /* Find last entry in extent description. */
1838 while (uStart)
1839 {
1840 if (!pDescriptor->aNextLines[uStart])
1841 uLast = uStart;
1842 uStart = pDescriptor->aNextLines[uStart];
1843 }
1844
1845 if (enmType == VMDKETYPE_ZERO)
1846 {
1847 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1848 cNominalSectors, apszType[enmType]);
1849 }
1850 else if (enmType == VMDKETYPE_FLAT)
1851 {
1852 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1853 apszAccess[enmAccess], cNominalSectors,
1854 apszType[enmType], pszBasename, uSectorOffset);
1855 }
1856 else
1857 {
1858 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1859 apszAccess[enmAccess], cNominalSectors,
1860 apszType[enmType], pszBasename);
1861 }
1862 cbDiff = strlen(szExt) + 1;
1863
1864 /* Check for buffer overflow. */
1865 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1866 || ( pDescriptor->aLines[pDescriptor->cLines]
1867 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1868 {
1869 if ((pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
1870 && !(pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1))
1871 {
1872 pImage->cbDescAlloc *= 2;
1873 pDescriptor->cbDescAlloc *= 2;
1874 }
1875 else
1876 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1877 }
1878
1879 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1880 {
1881 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1882 if (pDescriptor->aNextLines[i - 1])
1883 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1884 else
1885 pDescriptor->aNextLines[i] = 0;
1886 }
1887 uStart = uLast + 1;
1888 pDescriptor->aNextLines[uLast] = uStart;
1889 pDescriptor->aNextLines[uStart] = 0;
1890 pDescriptor->cLines++;
1891 pszTmp = pDescriptor->aLines[uStart];
1892 memmove(pszTmp + cbDiff, pszTmp,
1893 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1894 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1895 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1896 pDescriptor->aLines[i] += cbDiff;
1897
1898 /* Adjust starting line numbers of following descriptor sections. */
1899 if (uStart <= pDescriptor->uFirstDDB)
1900 pDescriptor->uFirstDDB++;
1901
1902 pDescriptor->fDirty = true;
1903 return VINF_SUCCESS;
1904}
1905
1906/**
1907 * Returns the value of the given key from the DDB as a string allocating
1908 * the necessary memory.
1909 *
1910 * @returns VBox status code.
1911 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1912 * @param pImage The VMDK image state.
1913 * @param pDescriptor The descriptor to fetch the value from.
1914 * @param pszKey The key to get the value from.
1915 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1916 * free.
1917 */
1918static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1919 const char *pszKey, char **ppszValue)
1920{
1921 const char *pszValue;
1922 char *pszValueUnquoted;
1923
1924 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1925 &pszValue))
1926 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1927 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1928 if (RT_FAILURE(rc))
1929 return rc;
1930 *ppszValue = pszValueUnquoted;
1931 return rc;
1932}
1933
1934static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1935 const char *pszKey, uint32_t *puValue)
1936{
1937 const char *pszValue;
1938 char *pszValueUnquoted;
1939
1940 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1941 &pszValue))
1942 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1943 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1944 if (RT_FAILURE(rc))
1945 return rc;
1946 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1947 RTMemTmpFree(pszValueUnquoted);
1948 return rc;
1949}
1950
1951static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1952 const char *pszKey, PRTUUID pUuid)
1953{
1954 const char *pszValue;
1955 char *pszValueUnquoted;
1956
1957 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1958 &pszValue))
1959 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1960 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1961 if (RT_FAILURE(rc))
1962 return rc;
1963 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1964 RTMemTmpFree(pszValueUnquoted);
1965 return rc;
1966}
1967
1968static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1969 const char *pszKey, const char *pszVal)
1970{
1971 int rc;
1972 char *pszValQuoted;
1973
1974 if (pszVal)
1975 {
1976 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1977 if (!pszValQuoted)
1978 return VERR_NO_STR_MEMORY;
1979 }
1980 else
1981 pszValQuoted = NULL;
1982 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1983 pszValQuoted);
1984 if (pszValQuoted)
1985 RTStrFree(pszValQuoted);
1986 return rc;
1987}
1988
1989static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1990 const char *pszKey, PCRTUUID pUuid)
1991{
1992 char *pszUuid;
1993
1994 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1995 if (!pszUuid)
1996 return VERR_NO_STR_MEMORY;
1997 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1998 pszUuid);
1999 RTStrFree(pszUuid);
2000 return rc;
2001}
2002
2003static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
2004 const char *pszKey, uint32_t uValue)
2005{
2006 char *pszValue;
2007
2008 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
2009 if (!pszValue)
2010 return VERR_NO_STR_MEMORY;
2011 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
2012 pszValue);
2013 RTStrFree(pszValue);
2014 return rc;
2015}
2016
2017/**
2018 * Splits the descriptor data into individual lines checking for correct line
2019 * endings and descriptor size.
2020 *
2021 * @returns VBox status code.
2022 * @param pImage The image instance.
2023 * @param pDesc The descriptor.
2024 * @param pszTmp The raw descriptor data from the image.
2025 */
2026static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
2027{
2028 unsigned cLine = 0;
2029 int rc = VINF_SUCCESS;
2030
2031 while ( RT_SUCCESS(rc)
2032 && *pszTmp != '\0')
2033 {
2034 pDesc->aLines[cLine++] = pszTmp;
2035 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
2036 {
2037 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
2038 rc = VERR_VD_VMDK_INVALID_HEADER;
2039 break;
2040 }
2041
2042 while (*pszTmp != '\0' && *pszTmp != '\n')
2043 {
2044 if (*pszTmp == '\r')
2045 {
2046 if (*(pszTmp + 1) != '\n')
2047 {
2048 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
2049 break;
2050 }
2051 else
2052 {
2053 /* Get rid of CR character. */
2054 *pszTmp = '\0';
2055 }
2056 }
2057 pszTmp++;
2058 }
2059
2060 if (RT_FAILURE(rc))
2061 break;
2062
2063 /* Get rid of LF character. */
2064 if (*pszTmp == '\n')
2065 {
2066 *pszTmp = '\0';
2067 pszTmp++;
2068 }
2069 }
2070
2071 if (RT_SUCCESS(rc))
2072 {
2073 pDesc->cLines = cLine;
2074 /* Pointer right after the end of the used part of the buffer. */
2075 pDesc->aLines[cLine] = pszTmp;
2076 }
2077
2078 return rc;
2079}
2080
2081static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
2082 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2083{
2084 pDescriptor->cbDescAlloc = cbDescData;
2085 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
2086 if (RT_SUCCESS(rc))
2087 {
2088 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
2089 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
2090 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
2091 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
2092 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2093 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
2094 else
2095 {
2096 unsigned uLastNonEmptyLine = 0;
2097
2098 /* Initialize those, because we need to be able to reopen an image. */
2099 pDescriptor->uFirstDesc = 0;
2100 pDescriptor->uFirstExtent = 0;
2101 pDescriptor->uFirstDDB = 0;
2102 for (unsigned i = 0; i < pDescriptor->cLines; i++)
2103 {
2104 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
2105 {
2106 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
2107 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
2108 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
2109 {
2110 /* An extent descriptor. */
2111 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
2112 {
2113 /* Incorrect ordering of entries. */
2114 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2115 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2116 break;
2117 }
2118 if (!pDescriptor->uFirstExtent)
2119 {
2120 pDescriptor->uFirstExtent = i;
2121 uLastNonEmptyLine = 0;
2122 }
2123 }
2124 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
2125 {
2126 /* A disk database entry. */
2127 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
2128 {
2129 /* Incorrect ordering of entries. */
2130 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2131 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2132 break;
2133 }
2134 if (!pDescriptor->uFirstDDB)
2135 {
2136 pDescriptor->uFirstDDB = i;
2137 uLastNonEmptyLine = 0;
2138 }
2139 }
2140 else
2141 {
2142 /* A normal entry. */
2143 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2144 {
2145 /* Incorrect ordering of entries. */
2146 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2147 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2148 break;
2149 }
2150 if (!pDescriptor->uFirstDesc)
2151 {
2152 pDescriptor->uFirstDesc = i;
2153 uLastNonEmptyLine = 0;
2154 }
2155 }
2156 if (uLastNonEmptyLine)
2157 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2158 uLastNonEmptyLine = i;
2159 }
2160 }
2161 }
2162 }
2163
2164 return rc;
2165}
2166
2167static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2168 PCVDGEOMETRY pPCHSGeometry)
2169{
2170 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2171 VMDK_DDB_GEO_PCHS_CYLINDERS,
2172 pPCHSGeometry->cCylinders);
2173 if (RT_FAILURE(rc))
2174 return rc;
2175 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2176 VMDK_DDB_GEO_PCHS_HEADS,
2177 pPCHSGeometry->cHeads);
2178 if (RT_FAILURE(rc))
2179 return rc;
2180 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2181 VMDK_DDB_GEO_PCHS_SECTORS,
2182 pPCHSGeometry->cSectors);
2183 return rc;
2184}
2185
2186static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2187 PCVDGEOMETRY pLCHSGeometry)
2188{
2189 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2190 VMDK_DDB_GEO_LCHS_CYLINDERS,
2191 pLCHSGeometry->cCylinders);
2192 if (RT_FAILURE(rc))
2193 return rc;
2194 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2195 VMDK_DDB_GEO_LCHS_HEADS,
2196
2197 pLCHSGeometry->cHeads);
2198 if (RT_FAILURE(rc))
2199 return rc;
2200 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2201 VMDK_DDB_GEO_LCHS_SECTORS,
2202 pLCHSGeometry->cSectors);
2203 return rc;
2204}
2205
2206static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2207 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2208{
2209 pDescriptor->uFirstDesc = 0;
2210 pDescriptor->uFirstExtent = 0;
2211 pDescriptor->uFirstDDB = 0;
2212 pDescriptor->cLines = 0;
2213 pDescriptor->cbDescAlloc = cbDescData;
2214 pDescriptor->fDirty = false;
2215 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2216 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2217
2218 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2219 if (RT_SUCCESS(rc))
2220 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2221 if (RT_SUCCESS(rc))
2222 {
2223 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2224 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2225 }
2226 if (RT_SUCCESS(rc))
2227 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2228 if (RT_SUCCESS(rc))
2229 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2230 if (RT_SUCCESS(rc))
2231 {
2232 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2233 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2234 }
2235 if (RT_SUCCESS(rc))
2236 {
2237 /* The trailing space is created by VMware, too. */
2238 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2239 }
2240 if (RT_SUCCESS(rc))
2241 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2242 if (RT_SUCCESS(rc))
2243 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2244 if (RT_SUCCESS(rc))
2245 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2246 if (RT_SUCCESS(rc))
2247 {
2248 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2249
2250 /* Now that the framework is in place, use the normal functions to insert
2251 * the remaining keys. */
2252 char szBuf[9];
2253 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2254 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2255 "CID", szBuf);
2256 }
2257 if (RT_SUCCESS(rc))
2258 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2259 "parentCID", "ffffffff");
2260 if (RT_SUCCESS(rc))
2261 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2262
2263 return rc;
2264}
2265
2266static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
2267{
2268 int rc;
2269 unsigned cExtents;
2270 unsigned uLine;
2271 unsigned i;
2272
2273 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2274 &pImage->Descriptor);
2275 if (RT_FAILURE(rc))
2276 return rc;
2277
2278 /* Check version, must be 1. */
2279 uint32_t uVersion;
2280 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2281 if (RT_FAILURE(rc))
2282 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2283 if (uVersion != 1)
2284 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2285
2286 /* Get image creation type and determine image flags. */
2287 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2288 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2289 &pszCreateType);
2290 if (RT_FAILURE(rc))
2291 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2292 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2293 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2294 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2295 else if ( !strcmp(pszCreateType, "partitionedDevice")
2296 || !strcmp(pszCreateType, "fullDevice"))
2297 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2298 else if (!strcmp(pszCreateType, "streamOptimized"))
2299 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2300 else if (!strcmp(pszCreateType, "vmfs"))
2301 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2302 RTMemTmpFree(pszCreateType);
2303
2304 /* Count the number of extent config entries. */
2305 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2306 uLine != 0;
2307 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2308 /* nothing */;
2309
2310 if (!pImage->pDescData && cExtents != 1)
2311 {
2312 /* Monolithic image, must have only one extent (already opened). */
2313 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2314 }
2315
2316 if (pImage->pDescData)
2317 {
2318 /* Non-monolithic image, extents need to be allocated. */
2319 rc = vmdkCreateExtents(pImage, cExtents);
2320 if (RT_FAILURE(rc))
2321 return rc;
2322 }
2323
2324 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2325 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2326 {
2327 char *pszLine = pImage->Descriptor.aLines[uLine];
2328
2329 /* Access type of the extent. */
2330 if (!strncmp(pszLine, "RW", 2))
2331 {
2332 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2333 pszLine += 2;
2334 }
2335 else if (!strncmp(pszLine, "RDONLY", 6))
2336 {
2337 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2338 pszLine += 6;
2339 }
2340 else if (!strncmp(pszLine, "NOACCESS", 8))
2341 {
2342 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2343 pszLine += 8;
2344 }
2345 else
2346 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2347 if (*pszLine++ != ' ')
2348 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2349
2350 /* Nominal size of the extent. */
2351 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2352 &pImage->pExtents[i].cNominalSectors);
2353 if (RT_FAILURE(rc))
2354 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2355 if (*pszLine++ != ' ')
2356 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2357
2358 /* Type of the extent. */
2359 if (!strncmp(pszLine, "SPARSE", 6))
2360 {
2361 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2362 pszLine += 6;
2363 }
2364 else if (!strncmp(pszLine, "FLAT", 4))
2365 {
2366 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2367 pszLine += 4;
2368 }
2369 else if (!strncmp(pszLine, "ZERO", 4))
2370 {
2371 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2372 pszLine += 4;
2373 }
2374 else if (!strncmp(pszLine, "VMFS", 4))
2375 {
2376 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2377 pszLine += 4;
2378 }
2379 else
2380 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2381
2382 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2383 {
2384 /* This one has no basename or offset. */
2385 if (*pszLine == ' ')
2386 pszLine++;
2387 if (*pszLine != '\0')
2388 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2389 pImage->pExtents[i].pszBasename = NULL;
2390 }
2391 else
2392 {
2393 /* All other extent types have basename and optional offset. */
2394 if (*pszLine++ != ' ')
2395 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2396
2397 /* Basename of the image. Surrounded by quotes. */
2398 char *pszBasename;
2399 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2400 if (RT_FAILURE(rc))
2401 return rc;
2402 pImage->pExtents[i].pszBasename = pszBasename;
2403 if (*pszLine == ' ')
2404 {
2405 pszLine++;
2406 if (*pszLine != '\0')
2407 {
2408 /* Optional offset in extent specified. */
2409 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2410 &pImage->pExtents[i].uSectorOffset);
2411 if (RT_FAILURE(rc))
2412 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2413 }
2414 }
2415
2416 if (*pszLine != '\0')
2417 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2418 }
2419 }
2420
2421 /* Determine PCHS geometry (autogenerate if necessary). */
2422 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2423 VMDK_DDB_GEO_PCHS_CYLINDERS,
2424 &pImage->PCHSGeometry.cCylinders);
2425 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2426 pImage->PCHSGeometry.cCylinders = 0;
2427 else if (RT_FAILURE(rc))
2428 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2429 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2430 VMDK_DDB_GEO_PCHS_HEADS,
2431 &pImage->PCHSGeometry.cHeads);
2432 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2433 pImage->PCHSGeometry.cHeads = 0;
2434 else if (RT_FAILURE(rc))
2435 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2436 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2437 VMDK_DDB_GEO_PCHS_SECTORS,
2438 &pImage->PCHSGeometry.cSectors);
2439 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2440 pImage->PCHSGeometry.cSectors = 0;
2441 else if (RT_FAILURE(rc))
2442 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2443 if ( pImage->PCHSGeometry.cCylinders == 0
2444 || pImage->PCHSGeometry.cHeads == 0
2445 || pImage->PCHSGeometry.cHeads > 16
2446 || pImage->PCHSGeometry.cSectors == 0
2447 || pImage->PCHSGeometry.cSectors > 63)
2448 {
2449 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2450 * as the total image size isn't known yet). */
2451 pImage->PCHSGeometry.cCylinders = 0;
2452 pImage->PCHSGeometry.cHeads = 16;
2453 pImage->PCHSGeometry.cSectors = 63;
2454 }
2455
2456 /* Determine LCHS geometry (set to 0 if not specified). */
2457 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2458 VMDK_DDB_GEO_LCHS_CYLINDERS,
2459 &pImage->LCHSGeometry.cCylinders);
2460 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2461 pImage->LCHSGeometry.cCylinders = 0;
2462 else if (RT_FAILURE(rc))
2463 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2464 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2465 VMDK_DDB_GEO_LCHS_HEADS,
2466 &pImage->LCHSGeometry.cHeads);
2467 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2468 pImage->LCHSGeometry.cHeads = 0;
2469 else if (RT_FAILURE(rc))
2470 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2471 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2472 VMDK_DDB_GEO_LCHS_SECTORS,
2473 &pImage->LCHSGeometry.cSectors);
2474 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2475 pImage->LCHSGeometry.cSectors = 0;
2476 else if (RT_FAILURE(rc))
2477 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2478 if ( pImage->LCHSGeometry.cCylinders == 0
2479 || pImage->LCHSGeometry.cHeads == 0
2480 || pImage->LCHSGeometry.cSectors == 0)
2481 {
2482 pImage->LCHSGeometry.cCylinders = 0;
2483 pImage->LCHSGeometry.cHeads = 0;
2484 pImage->LCHSGeometry.cSectors = 0;
2485 }
2486
2487 /* Get image UUID. */
2488 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2489 &pImage->ImageUuid);
2490 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2491 {
2492 /* Image without UUID. Probably created by VMware and not yet used
2493 * by VirtualBox. Can only be added for images opened in read/write
2494 * mode, so don't bother producing a sensible UUID otherwise. */
2495 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2496 RTUuidClear(&pImage->ImageUuid);
2497 else
2498 {
2499 rc = RTUuidCreate(&pImage->ImageUuid);
2500 if (RT_FAILURE(rc))
2501 return rc;
2502 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2503 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2504 if (RT_FAILURE(rc))
2505 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2506 }
2507 }
2508 else if (RT_FAILURE(rc))
2509 return rc;
2510
2511 /* Get image modification UUID. */
2512 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2513 VMDK_DDB_MODIFICATION_UUID,
2514 &pImage->ModificationUuid);
2515 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2516 {
2517 /* Image without UUID. Probably created by VMware and not yet used
2518 * by VirtualBox. Can only be added for images opened in read/write
2519 * mode, so don't bother producing a sensible UUID otherwise. */
2520 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2521 RTUuidClear(&pImage->ModificationUuid);
2522 else
2523 {
2524 rc = RTUuidCreate(&pImage->ModificationUuid);
2525 if (RT_FAILURE(rc))
2526 return rc;
2527 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2528 VMDK_DDB_MODIFICATION_UUID,
2529 &pImage->ModificationUuid);
2530 if (RT_FAILURE(rc))
2531 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2532 }
2533 }
2534 else if (RT_FAILURE(rc))
2535 return rc;
2536
2537 /* Get UUID of parent image. */
2538 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2539 &pImage->ParentUuid);
2540 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2541 {
2542 /* Image without UUID. Probably created by VMware and not yet used
2543 * by VirtualBox. Can only be added for images opened in read/write
2544 * mode, so don't bother producing a sensible UUID otherwise. */
2545 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2546 RTUuidClear(&pImage->ParentUuid);
2547 else
2548 {
2549 rc = RTUuidClear(&pImage->ParentUuid);
2550 if (RT_FAILURE(rc))
2551 return rc;
2552 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2553 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2554 if (RT_FAILURE(rc))
2555 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2556 }
2557 }
2558 else if (RT_FAILURE(rc))
2559 return rc;
2560
2561 /* Get parent image modification UUID. */
2562 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2563 VMDK_DDB_PARENT_MODIFICATION_UUID,
2564 &pImage->ParentModificationUuid);
2565 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2566 {
2567 /* Image without UUID. Probably created by VMware and not yet used
2568 * by VirtualBox. Can only be added for images opened in read/write
2569 * mode, so don't bother producing a sensible UUID otherwise. */
2570 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2571 RTUuidClear(&pImage->ParentModificationUuid);
2572 else
2573 {
2574 RTUuidClear(&pImage->ParentModificationUuid);
2575 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2576 VMDK_DDB_PARENT_MODIFICATION_UUID,
2577 &pImage->ParentModificationUuid);
2578 if (RT_FAILURE(rc))
2579 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2580 }
2581 }
2582 else if (RT_FAILURE(rc))
2583 return rc;
2584
2585 return VINF_SUCCESS;
2586}
2587
2588/**
2589 * Internal : Prepares the descriptor to write to the image.
2590 */
2591static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2592 void **ppvData, size_t *pcbData)
2593{
2594 int rc = VINF_SUCCESS;
2595
2596 /*
2597 * Allocate temporary descriptor buffer.
2598 * In case there is no limit allocate a default
2599 * and increase if required.
2600 */
2601 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2602 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2603 size_t offDescriptor = 0;
2604
2605 if (!pszDescriptor)
2606 return VERR_NO_MEMORY;
2607
2608 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2609 {
2610 const char *psz = pImage->Descriptor.aLines[i];
2611 size_t cb = strlen(psz);
2612
2613 /*
2614 * Increase the descriptor if there is no limit and
2615 * there is not enough room left for this line.
2616 */
2617 if (offDescriptor + cb + 1 > cbDescriptor)
2618 {
2619 if (cbLimit)
2620 {
2621 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2622 break;
2623 }
2624 else
2625 {
2626 char *pszDescriptorNew = NULL;
2627 LogFlow(("Increasing descriptor cache\n"));
2628
2629 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2630 if (!pszDescriptorNew)
2631 {
2632 rc = VERR_NO_MEMORY;
2633 break;
2634 }
2635 pszDescriptor = pszDescriptorNew;
2636 cbDescriptor += cb + 4 * _1K;
2637 }
2638 }
2639
2640 if (cb > 0)
2641 {
2642 memcpy(pszDescriptor + offDescriptor, psz, cb);
2643 offDescriptor += cb;
2644 }
2645
2646 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2647 offDescriptor++;
2648 }
2649
2650 if (RT_SUCCESS(rc))
2651 {
2652 *ppvData = pszDescriptor;
2653 *pcbData = offDescriptor;
2654 }
2655 else if (pszDescriptor)
2656 RTMemFree(pszDescriptor);
2657
2658 return rc;
2659}
2660
2661/**
2662 * Internal: write/update the descriptor part of the image.
2663 */
2664static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2665{
2666 int rc = VINF_SUCCESS;
2667 uint64_t cbLimit;
2668 uint64_t uOffset;
2669 PVMDKFILE pDescFile;
2670 void *pvDescriptor = NULL;
2671 size_t cbDescriptor;
2672
2673 if (pImage->pDescData)
2674 {
2675 /* Separate descriptor file. */
2676 uOffset = 0;
2677 cbLimit = 0;
2678 pDescFile = pImage->pFile;
2679 }
2680 else
2681 {
2682 /* Embedded descriptor file. */
2683 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2684 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2685 pDescFile = pImage->pExtents[0].pFile;
2686 }
2687 /* Bail out if there is no file to write to. */
2688 if (pDescFile == NULL)
2689 return VERR_INVALID_PARAMETER;
2690
2691 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2692 if (RT_SUCCESS(rc))
2693 {
2694 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2695 uOffset, pvDescriptor,
2696 cbLimit ? cbLimit : cbDescriptor,
2697 pIoCtx, NULL, NULL);
2698 if ( RT_FAILURE(rc)
2699 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2700 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2701 }
2702
2703 if (RT_SUCCESS(rc) && !cbLimit)
2704 {
2705 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2706 if (RT_FAILURE(rc))
2707 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2708 }
2709
2710 if (RT_SUCCESS(rc))
2711 pImage->Descriptor.fDirty = false;
2712
2713 if (pvDescriptor)
2714 RTMemFree(pvDescriptor);
2715 return rc;
2716
2717}
2718
2719/**
2720 * Internal: validate the consistency check values in a binary header.
2721 */
2722static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2723{
2724 int rc = VINF_SUCCESS;
2725 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2726 {
2727 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2728 return rc;
2729 }
2730 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2731 {
2732 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2733 return rc;
2734 }
2735 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2736 && ( pHeader->singleEndLineChar != '\n'
2737 || pHeader->nonEndLineChar != ' '
2738 || pHeader->doubleEndLineChar1 != '\r'
2739 || pHeader->doubleEndLineChar2 != '\n') )
2740 {
2741 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2742 return rc;
2743 }
2744 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2745 {
2746 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2747 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2748 return rc;
2749 }
2750 return rc;
2751}
2752
2753/**
2754 * Internal: read metadata belonging to an extent with binary header, i.e.
2755 * as found in monolithic files.
2756 */
2757static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2758 bool fMagicAlreadyRead)
2759{
2760 SparseExtentHeader Header;
2761 int rc;
2762
2763 if (!fMagicAlreadyRead)
2764 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2765 &Header, sizeof(Header));
2766 else
2767 {
2768 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2769 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2770 RT_UOFFSETOF(SparseExtentHeader, version),
2771 &Header.version,
2772 sizeof(Header)
2773 - RT_UOFFSETOF(SparseExtentHeader, version));
2774 }
2775
2776 if (RT_SUCCESS(rc))
2777 {
2778 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2779 if (RT_SUCCESS(rc))
2780 {
2781 uint64_t cbFile = 0;
2782
2783 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2784 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2785 pExtent->fFooter = true;
2786
2787 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2788 || ( pExtent->fFooter
2789 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2790 {
2791 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2792 if (RT_FAILURE(rc))
2793 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2794 }
2795
2796 if (RT_SUCCESS(rc))
2797 {
2798 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2799 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2800
2801 if ( pExtent->fFooter
2802 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2803 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2804 {
2805 /* Read the footer, which comes before the end-of-stream marker. */
2806 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2807 cbFile - 2*512, &Header,
2808 sizeof(Header));
2809 if (RT_FAILURE(rc))
2810 {
2811 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2812 rc = VERR_VD_VMDK_INVALID_HEADER;
2813 }
2814
2815 if (RT_SUCCESS(rc))
2816 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2817 /* Prohibit any writes to this extent. */
2818 pExtent->uAppendPosition = 0;
2819 }
2820
2821 if (RT_SUCCESS(rc))
2822 {
2823 pExtent->uVersion = RT_LE2H_U32(Header.version);
2824 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2825 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2826 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2827 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2828 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2829 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2830 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2831 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2832 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2833 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2834 {
2835 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2836 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2837 }
2838 else
2839 {
2840 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2841 pExtent->uSectorRGD = 0;
2842 }
2843
2844 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2845 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2846 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2847
2848 if ( RT_SUCCESS(rc)
2849 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2850 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2851 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2852 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2853 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2854 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2855
2856 if (RT_SUCCESS(rc))
2857 {
2858 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2859 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2860 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2861 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2862 else
2863 {
2864 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2865 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2866
2867 /* Fix up the number of descriptor sectors, as some flat images have
2868 * really just one, and this causes failures when inserting the UUID
2869 * values and other extra information. */
2870 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2871 {
2872 /* Do it the easy way - just fix it for flat images which have no
2873 * other complicated metadata which needs space too. */
2874 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2875 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2876 pExtent->cDescriptorSectors = 4;
2877 }
2878 }
2879 }
2880 }
2881 }
2882 }
2883 }
2884 else
2885 {
2886 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2887 rc = VERR_VD_VMDK_INVALID_HEADER;
2888 }
2889
2890 if (RT_FAILURE(rc))
2891 vmdkFreeExtentData(pImage, pExtent, false);
2892
2893 return rc;
2894}
2895
2896/**
2897 * Internal: read additional metadata belonging to an extent. For those
2898 * extents which have no additional metadata just verify the information.
2899 */
2900static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2901{
2902 int rc = VINF_SUCCESS;
2903
2904/* disabled the check as there are too many truncated vmdk images out there */
2905#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2906 uint64_t cbExtentSize;
2907 /* The image must be a multiple of a sector in size and contain the data
2908 * area (flat images only). If not, it means the image is at least
2909 * truncated, or even seriously garbled. */
2910 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2911 if (RT_FAILURE(rc))
2912 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2913 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2914 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2915 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2916 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2917#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2918 if ( RT_SUCCESS(rc)
2919 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2920 {
2921 /* The spec says that this must be a power of two and greater than 8,
2922 * but probably they meant not less than 8. */
2923 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2924 || pExtent->cSectorsPerGrain < 8)
2925 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2926 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2927 else
2928 {
2929 /* This code requires that a grain table must hold a power of two multiple
2930 * of the number of entries per GT cache entry. */
2931 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2932 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2933 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2934 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2935 else
2936 {
2937 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2938 if (RT_SUCCESS(rc))
2939 {
2940 /* Prohibit any writes to this streamOptimized extent. */
2941 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2942 pExtent->uAppendPosition = 0;
2943
2944 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2945 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2946 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2947 rc = vmdkReadGrainDirectory(pImage, pExtent);
2948 else
2949 {
2950 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2951 pExtent->cbGrainStreamRead = 0;
2952 }
2953 }
2954 }
2955 }
2956 }
2957
2958 if (RT_FAILURE(rc))
2959 vmdkFreeExtentData(pImage, pExtent, false);
2960
2961 return rc;
2962}
2963
2964/**
2965 * Internal: write/update the metadata for a sparse extent.
2966 */
2967static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2968 uint64_t uOffset, PVDIOCTX pIoCtx)
2969{
2970 SparseExtentHeader Header;
2971
2972 memset(&Header, '\0', sizeof(Header));
2973 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2974 Header.version = RT_H2LE_U32(pExtent->uVersion);
2975 Header.flags = RT_H2LE_U32(RT_BIT(0));
2976 if (pExtent->pRGD)
2977 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2978 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2979 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2980 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2981 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2982 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2983 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2984 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2985 if (pExtent->fFooter && uOffset == 0)
2986 {
2987 if (pExtent->pRGD)
2988 {
2989 Assert(pExtent->uSectorRGD);
2990 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2991 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2992 }
2993 else
2994 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2995 }
2996 else
2997 {
2998 if (pExtent->pRGD)
2999 {
3000 Assert(pExtent->uSectorRGD);
3001 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
3002 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3003 }
3004 else
3005 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3006 }
3007 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
3008 Header.uncleanShutdown = pExtent->fUncleanShutdown;
3009 Header.singleEndLineChar = '\n';
3010 Header.nonEndLineChar = ' ';
3011 Header.doubleEndLineChar1 = '\r';
3012 Header.doubleEndLineChar2 = '\n';
3013 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
3014
3015 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
3016 uOffset, &Header, sizeof(Header),
3017 pIoCtx, NULL, NULL);
3018 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
3019 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
3020 return rc;
3021}
3022
3023/**
3024 * Internal: free the buffers used for streamOptimized images.
3025 */
3026static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
3027{
3028 if (pExtent->pvCompGrain)
3029 {
3030 RTMemFree(pExtent->pvCompGrain);
3031 pExtent->pvCompGrain = NULL;
3032 }
3033 if (pExtent->pvGrain)
3034 {
3035 RTMemFree(pExtent->pvGrain);
3036 pExtent->pvGrain = NULL;
3037 }
3038}
3039
3040/**
3041 * Internal: free the memory used by the extent data structure, optionally
3042 * deleting the referenced files.
3043 *
3044 * @returns VBox status code.
3045 * @param pImage Pointer to the image instance data.
3046 * @param pExtent The extent to free.
3047 * @param fDelete Flag whether to delete the backing storage.
3048 */
3049static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3050 bool fDelete)
3051{
3052 int rc = VINF_SUCCESS;
3053
3054 vmdkFreeGrainDirectory(pExtent);
3055 if (pExtent->pDescData)
3056 {
3057 RTMemFree(pExtent->pDescData);
3058 pExtent->pDescData = NULL;
3059 }
3060 if (pExtent->pFile != NULL)
3061 {
3062 /* Do not delete raw extents, these have full and base names equal. */
3063 rc = vmdkFileClose(pImage, &pExtent->pFile,
3064 fDelete
3065 && pExtent->pszFullname
3066 && pExtent->pszBasename
3067 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
3068 }
3069 if (pExtent->pszBasename)
3070 {
3071 RTMemTmpFree((void *)pExtent->pszBasename);
3072 pExtent->pszBasename = NULL;
3073 }
3074 if (pExtent->pszFullname)
3075 {
3076 RTStrFree((char *)(void *)pExtent->pszFullname);
3077 pExtent->pszFullname = NULL;
3078 }
3079 vmdkFreeStreamBuffers(pExtent);
3080
3081 return rc;
3082}
3083
3084/**
3085 * Internal: allocate grain table cache if necessary for this image.
3086 */
3087static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3088{
3089 PVMDKEXTENT pExtent;
3090
3091 /* Allocate grain table cache if any sparse extent is present. */
3092 for (unsigned i = 0; i < pImage->cExtents; i++)
3093 {
3094 pExtent = &pImage->pExtents[i];
3095 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3096 {
3097 /* Allocate grain table cache. */
3098 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3099 if (!pImage->pGTCache)
3100 return VERR_NO_MEMORY;
3101 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3102 {
3103 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3104 pGCE->uExtent = UINT32_MAX;
3105 }
3106 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3107 break;
3108 }
3109 }
3110
3111 return VINF_SUCCESS;
3112}
3113
3114/**
3115 * Internal: allocate the given number of extents.
3116 */
3117static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3118{
3119 int rc = VINF_SUCCESS;
3120 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3121 if (pExtents)
3122 {
3123 for (unsigned i = 0; i < cExtents; i++)
3124 {
3125 pExtents[i].pFile = NULL;
3126 pExtents[i].pszBasename = NULL;
3127 pExtents[i].pszFullname = NULL;
3128 pExtents[i].pGD = NULL;
3129 pExtents[i].pRGD = NULL;
3130 pExtents[i].pDescData = NULL;
3131 pExtents[i].uVersion = 1;
3132 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3133 pExtents[i].uExtent = i;
3134 pExtents[i].pImage = pImage;
3135 }
3136 pImage->pExtents = pExtents;
3137 pImage->cExtents = cExtents;
3138 }
3139 else
3140 rc = VERR_NO_MEMORY;
3141
3142 return rc;
3143}
3144
3145/**
3146 * Internal: Create an additional file backed extent in split images.
3147 * Supports split sparse and flat images.
3148 *
3149 * @returns VBox status code.
3150 * @param pImage VMDK image instance.
3151 * @param cbSize Desiried size in bytes of new extent.
3152 */
3153static int vmdkAddFileBackedExtent(PVMDKIMAGE pImage, uint64_t cbSize)
3154{
3155 int rc = VINF_SUCCESS;
3156 unsigned uImageFlags = pImage->uImageFlags;
3157
3158 /* Check for unsupported image type. */
3159 if ((uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3160 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3161 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
3162 {
3163 return VERR_NOT_SUPPORTED;
3164 }
3165
3166 /* Allocate array of extents and copy existing extents to it. */
3167 PVMDKEXTENT pNewExtents = (PVMDKEXTENT)RTMemAllocZ((pImage->cExtents + 1) * sizeof(VMDKEXTENT));
3168 if (!pNewExtents)
3169 {
3170 return VERR_NO_MEMORY;
3171 }
3172
3173 memcpy(pNewExtents, pImage->pExtents, pImage->cExtents * sizeof(VMDKEXTENT));
3174
3175 /* Locate newly created extent and populate default metadata. */
3176 PVMDKEXTENT pExtent = &pNewExtents[pImage->cExtents];
3177
3178 pExtent->pFile = NULL;
3179 pExtent->pszBasename = NULL;
3180 pExtent->pszFullname = NULL;
3181 pExtent->pGD = NULL;
3182 pExtent->pRGD = NULL;
3183 pExtent->pDescData = NULL;
3184 pExtent->uVersion = 1;
3185 pExtent->uCompression = VMDK_COMPRESSION_NONE;
3186 pExtent->uExtent = pImage->cExtents;
3187 pExtent->pImage = pImage;
3188 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3189 pExtent->enmAccess = VMDKACCESS_READWRITE;
3190 pExtent->uSectorOffset = 0;
3191 pExtent->fMetaDirty = true;
3192
3193 /* Apply image type specific meta data. */
3194 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3195 {
3196 pExtent->enmType = VMDKETYPE_FLAT;
3197 }
3198 else
3199 {
3200 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3201 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
3202 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
3203 pExtent->cGTEntries = 512;
3204
3205 uint64_t const cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3206 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3207 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3208 }
3209
3210 /* Allocate and set file name for extent. */
3211 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3212 AssertPtr(pszBasenameSubstr);
3213
3214 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
3215 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3216 RTPathStripSuffix(pszBasenameBase);
3217 char *pszTmp;
3218 size_t cbTmp;
3219
3220 if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED)
3221 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3222 pExtent->uExtent + 1, pszBasenameSuff);
3223 else
3224 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, pExtent->uExtent + 1,
3225 pszBasenameSuff);
3226
3227 RTStrFree(pszBasenameBase);
3228 if (!pszTmp)
3229 return VERR_NO_STR_MEMORY;
3230 cbTmp = strlen(pszTmp) + 1;
3231 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3232 if (!pszBasename)
3233 {
3234 RTStrFree(pszTmp);
3235 return VERR_NO_MEMORY;
3236 }
3237
3238 memcpy(pszBasename, pszTmp, cbTmp);
3239 RTStrFree(pszTmp);
3240
3241 pExtent->pszBasename = pszBasename;
3242
3243 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3244 if (!pszBasedirectory)
3245 return VERR_NO_STR_MEMORY;
3246 RTPathStripFilename(pszBasedirectory);
3247 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
3248 RTStrFree(pszBasedirectory);
3249 if (!pszFullname)
3250 return VERR_NO_STR_MEMORY;
3251 pExtent->pszFullname = pszFullname;
3252
3253 /* Create file for extent. */
3254 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3255 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3256 true /* fCreate */));
3257 if (RT_FAILURE(rc))
3258 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3259
3260 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3261 {
3262 /* For flat images: Pre allocate file space. */
3263 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSize,
3264 0 /* fFlags */, NULL, 0, 0);
3265 if (RT_FAILURE(rc))
3266 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3267 }
3268 else
3269 {
3270 /* For sparse images: Allocate new grain directories/tables. */
3271 /* fPreAlloc should never be false because VMware can't use such images. */
3272 rc = vmdkCreateGrainDirectory(pImage, pExtent,
3273 RT_MAX( pExtent->uDescriptorSector
3274 + pExtent->cDescriptorSectors,
3275 1),
3276 true /* fPreAlloc */);
3277 if (RT_FAILURE(rc))
3278 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3279 }
3280
3281 /* Insert new extent into descriptor file. */
3282 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
3283 pExtent->cNominalSectors, pExtent->enmType,
3284 pExtent->pszBasename, pExtent->uSectorOffset);
3285 if (RT_FAILURE(rc))
3286 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
3287
3288 pImage->pExtents = pNewExtents;
3289 pImage->cExtents++;
3290
3291 return rc;
3292}
3293
3294/**
3295 * Reads and processes the descriptor embedded in sparse images.
3296 *
3297 * @returns VBox status code.
3298 * @param pImage VMDK image instance.
3299 * @param pFile The sparse file handle.
3300 */
3301static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
3302{
3303 /* It's a hosted single-extent image. */
3304 int rc = vmdkCreateExtents(pImage, 1);
3305 if (RT_SUCCESS(rc))
3306 {
3307 /* The opened file is passed to the extent. No separate descriptor
3308 * file, so no need to keep anything open for the image. */
3309 PVMDKEXTENT pExtent = &pImage->pExtents[0];
3310 pExtent->pFile = pFile;
3311 pImage->pFile = NULL;
3312 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3313 if (RT_LIKELY(pExtent->pszFullname))
3314 {
3315 /* As we're dealing with a monolithic image here, there must
3316 * be a descriptor embedded in the image file. */
3317 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
3318 if ( RT_SUCCESS(rc)
3319 && pExtent->uDescriptorSector
3320 && pExtent->cDescriptorSectors)
3321 {
3322 /* HACK: extend the descriptor if it is unusually small and it fits in
3323 * the unused space after the image header. Allows opening VMDK files
3324 * with extremely small descriptor in read/write mode.
3325 *
3326 * The previous version introduced a possible regression for VMDK stream
3327 * optimized images from VMware which tend to have only a single sector sized
3328 * descriptor. Increasing the descriptor size resulted in adding the various uuid
3329 * entries required to make it work with VBox but for stream optimized images
3330 * the updated binary header wasn't written to the disk creating a mismatch
3331 * between advertised and real descriptor size.
3332 *
3333 * The descriptor size will be increased even if opened readonly now if there
3334 * enough room but the new value will not be written back to the image.
3335 */
3336 if ( pExtent->cDescriptorSectors < 3
3337 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3338 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3339 {
3340 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
3341
3342 pExtent->cDescriptorSectors = 4;
3343 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3344 {
3345 /*
3346 * Update the on disk number now to make sure we don't introduce inconsistencies
3347 * in case of stream optimized images from VMware where the descriptor is just
3348 * one sector big (the binary header is not written to disk for complete
3349 * stream optimized images in vmdkFlushImage()).
3350 */
3351 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
3352 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
3353 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
3354 &u64DescSizeNew, sizeof(u64DescSizeNew));
3355 if (RT_FAILURE(rc))
3356 {
3357 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
3358 /* Restore the old size and carry on. */
3359 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
3360 }
3361 }
3362 }
3363 /* Read the descriptor from the extent. */
3364 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3365 if (RT_LIKELY(pExtent->pDescData))
3366 {
3367 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
3368 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3369 pExtent->pDescData,
3370 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3371 if (RT_SUCCESS(rc))
3372 {
3373 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3374 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3375 if ( RT_SUCCESS(rc)
3376 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3377 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
3378 {
3379 rc = vmdkReadMetaExtent(pImage, pExtent);
3380 if (RT_SUCCESS(rc))
3381 {
3382 /* Mark the extent as unclean if opened in read-write mode. */
3383 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3384 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3385 {
3386 pExtent->fUncleanShutdown = true;
3387 pExtent->fMetaDirty = true;
3388 }
3389 }
3390 }
3391 else if (RT_SUCCESS(rc))
3392 rc = VERR_NOT_SUPPORTED;
3393 }
3394 else
3395 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3396 }
3397 else
3398 rc = VERR_NO_MEMORY;
3399 }
3400 else if (RT_SUCCESS(rc))
3401 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3402 }
3403 else
3404 rc = VERR_NO_MEMORY;
3405 }
3406
3407 return rc;
3408}
3409
3410/**
3411 * Reads the descriptor from a pure text file.
3412 *
3413 * @returns VBox status code.
3414 * @param pImage VMDK image instance.
3415 * @param pFile The descriptor file handle.
3416 */
3417static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
3418{
3419 /* Allocate at least 10K, and make sure that there is 5K free space
3420 * in case new entries need to be added to the descriptor. Never
3421 * allocate more than 128K, because that's no valid descriptor file
3422 * and will result in the correct "truncated read" error handling. */
3423 uint64_t cbFileSize;
3424 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3425 if ( RT_SUCCESS(rc)
3426 && cbFileSize >= 50)
3427 {
3428 uint64_t cbSize = cbFileSize;
3429 if (cbSize % VMDK_SECTOR2BYTE(10))
3430 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3431 else
3432 cbSize += VMDK_SECTOR2BYTE(10);
3433 cbSize = RT_MIN(cbSize, _128K);
3434 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3435 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3436 if (RT_LIKELY(pImage->pDescData))
3437 {
3438 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
3439 RT_MIN(pImage->cbDescAlloc, cbFileSize));
3440 if (RT_SUCCESS(rc))
3441 {
3442#if 0 /** @todo Revisit */
3443 cbRead += sizeof(u32Magic);
3444 if (cbRead == pImage->cbDescAlloc)
3445 {
3446 /* Likely the read is truncated. Better fail a bit too early
3447 * (normally the descriptor is much smaller than our buffer). */
3448 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3449 goto out;
3450 }
3451#endif
3452 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3453 pImage->cbDescAlloc);
3454 if (RT_SUCCESS(rc))
3455 {
3456 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
3457 {
3458 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3459 if (pExtent->pszBasename)
3460 {
3461 /* Hack to figure out whether the specified name in the
3462 * extent descriptor is absolute. Doesn't always work, but
3463 * should be good enough for now. */
3464 char *pszFullname;
3465 /** @todo implement proper path absolute check. */
3466 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3467 {
3468 pszFullname = RTStrDup(pExtent->pszBasename);
3469 if (!pszFullname)
3470 {
3471 rc = VERR_NO_MEMORY;
3472 break;
3473 }
3474 }
3475 else
3476 {
3477 char *pszDirname = RTStrDup(pImage->pszFilename);
3478 if (!pszDirname)
3479 {
3480 rc = VERR_NO_MEMORY;
3481 break;
3482 }
3483 RTPathStripFilename(pszDirname);
3484 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3485 RTStrFree(pszDirname);
3486 if (!pszFullname)
3487 {
3488 rc = VERR_NO_STR_MEMORY;
3489 break;
3490 }
3491 }
3492 pExtent->pszFullname = pszFullname;
3493 }
3494 else
3495 pExtent->pszFullname = NULL;
3496
3497 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3498 switch (pExtent->enmType)
3499 {
3500 case VMDKETYPE_HOSTED_SPARSE:
3501 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3502 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3503 if (RT_FAILURE(rc))
3504 {
3505 /* Do NOT signal an appropriate error here, as the VD
3506 * layer has the choice of retrying the open if it
3507 * failed. */
3508 break;
3509 }
3510 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3511 false /* fMagicAlreadyRead */);
3512 if (RT_FAILURE(rc))
3513 break;
3514 rc = vmdkReadMetaExtent(pImage, pExtent);
3515 if (RT_FAILURE(rc))
3516 break;
3517
3518 /* Mark extent as unclean if opened in read-write mode. */
3519 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3520 {
3521 pExtent->fUncleanShutdown = true;
3522 pExtent->fMetaDirty = true;
3523 }
3524 break;
3525 case VMDKETYPE_VMFS:
3526 case VMDKETYPE_FLAT:
3527 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3528 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3529 if (RT_FAILURE(rc))
3530 {
3531 /* Do NOT signal an appropriate error here, as the VD
3532 * layer has the choice of retrying the open if it
3533 * failed. */
3534 break;
3535 }
3536 break;
3537 case VMDKETYPE_ZERO:
3538 /* Nothing to do. */
3539 break;
3540 default:
3541 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3542 }
3543 }
3544 }
3545 }
3546 else
3547 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3548 }
3549 else
3550 rc = VERR_NO_MEMORY;
3551 }
3552 else if (RT_SUCCESS(rc))
3553 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3554
3555 return rc;
3556}
3557
3558/**
3559 * Read and process the descriptor based on the image type.
3560 *
3561 * @returns VBox status code.
3562 * @param pImage VMDK image instance.
3563 * @param pFile VMDK file handle.
3564 */
3565static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3566{
3567 uint32_t u32Magic;
3568
3569 /* Read magic (if present). */
3570 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3571 &u32Magic, sizeof(u32Magic));
3572 if (RT_SUCCESS(rc))
3573 {
3574 /* Handle the file according to its magic number. */
3575 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3576 rc = vmdkDescriptorReadSparse(pImage, pFile);
3577 else
3578 rc = vmdkDescriptorReadAscii(pImage, pFile);
3579 }
3580 else
3581 {
3582 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3583 rc = VERR_VD_VMDK_INVALID_HEADER;
3584 }
3585
3586 return rc;
3587}
3588
3589/**
3590 * Internal: Open an image, constructing all necessary data structures.
3591 */
3592static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3593{
3594 pImage->uOpenFlags = uOpenFlags;
3595 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3596 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3597 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3598
3599 /*
3600 * Open the image.
3601 * We don't have to check for asynchronous access because
3602 * we only support raw access and the opened file is a description
3603 * file were no data is stored.
3604 */
3605 PVMDKFILE pFile;
3606 int rc = vmdkFileOpen(pImage, &pFile, NULL, pImage->pszFilename,
3607 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3608 if (RT_SUCCESS(rc))
3609 {
3610 pImage->pFile = pFile;
3611
3612 rc = vmdkDescriptorRead(pImage, pFile);
3613 if (RT_SUCCESS(rc))
3614 {
3615 /* Determine PCHS geometry if not set. */
3616 if (pImage->PCHSGeometry.cCylinders == 0)
3617 {
3618 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3619 / pImage->PCHSGeometry.cHeads
3620 / pImage->PCHSGeometry.cSectors;
3621 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3622 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3623 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3624 {
3625 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3626 AssertRC(rc);
3627 }
3628 }
3629
3630 /* Update the image metadata now in case has changed. */
3631 rc = vmdkFlushImage(pImage, NULL);
3632 if (RT_SUCCESS(rc))
3633 {
3634 /* Figure out a few per-image constants from the extents. */
3635 pImage->cbSize = 0;
3636 for (unsigned i = 0; i < pImage->cExtents; i++)
3637 {
3638 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3639 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3640 {
3641 /* Here used to be a check whether the nominal size of an extent
3642 * is a multiple of the grain size. The spec says that this is
3643 * always the case, but unfortunately some files out there in the
3644 * wild violate the spec (e.g. ReactOS 0.3.1). */
3645 }
3646 else if ( pExtent->enmType == VMDKETYPE_FLAT
3647 || pExtent->enmType == VMDKETYPE_ZERO)
3648 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3649
3650 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3651 }
3652
3653 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3654 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3655 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3656 rc = vmdkAllocateGrainTableCache(pImage);
3657 }
3658 }
3659 }
3660 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3661 * choice of retrying the open if it failed. */
3662
3663 if (RT_SUCCESS(rc))
3664 {
3665 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3666 pImage->RegionList.fFlags = 0;
3667 pImage->RegionList.cRegions = 1;
3668
3669 pRegion->offRegion = 0; /* Disk start. */
3670 pRegion->cbBlock = 512;
3671 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3672 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3673 pRegion->cbData = 512;
3674 pRegion->cbMetadata = 0;
3675 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3676 }
3677 else
3678 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3679 return rc;
3680}
3681
3682/**
3683 * Frees a raw descriptor.
3684 * @internal
3685 */
3686static int vmdkRawDescFree(PVDISKRAW pRawDesc)
3687{
3688 if (!pRawDesc)
3689 return VINF_SUCCESS;
3690
3691 RTStrFree(pRawDesc->pszRawDisk);
3692 pRawDesc->pszRawDisk = NULL;
3693
3694 /* Partitions: */
3695 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++)
3696 {
3697 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice);
3698 pRawDesc->pPartDescs[i].pszRawDevice = NULL;
3699
3700 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData);
3701 pRawDesc->pPartDescs[i].pvPartitionData = NULL;
3702 }
3703
3704 RTMemFree(pRawDesc->pPartDescs);
3705 pRawDesc->pPartDescs = NULL;
3706
3707 RTMemFree(pRawDesc);
3708 return VINF_SUCCESS;
3709}
3710
3711/**
3712 * Helper that grows the raw partition descriptor table by @a cToAdd entries,
3713 * returning the pointer to the first new entry.
3714 * @internal
3715 */
3716static int vmdkRawDescAppendPartDesc(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint32_t cToAdd, PVDISKRAWPARTDESC *ppRet)
3717{
3718 uint32_t const cOld = pRawDesc->cPartDescs;
3719 uint32_t const cNew = cOld + cToAdd;
3720 PVDISKRAWPARTDESC paNew = (PVDISKRAWPARTDESC)RTMemReallocZ(pRawDesc->pPartDescs,
3721 cOld * sizeof(pRawDesc->pPartDescs[0]),
3722 cNew * sizeof(pRawDesc->pPartDescs[0]));
3723 if (paNew)
3724 {
3725 pRawDesc->cPartDescs = cNew;
3726 pRawDesc->pPartDescs = paNew;
3727
3728 *ppRet = &paNew[cOld];
3729 return VINF_SUCCESS;
3730 }
3731 *ppRet = NULL;
3732 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
3733 N_("VMDK: Image path: '%s'. Out of memory growing the partition descriptors (%u -> %u)."),
3734 pImage->pszFilename, cOld, cNew);
3735}
3736
3737/**
3738 * @callback_method_impl{FNRTSORTCMP}
3739 */
3740static DECLCALLBACK(int) vmdkRawDescPartComp(void const *pvElement1, void const *pvElement2, void *pvUser)
3741{
3742 RT_NOREF(pvUser);
3743 int64_t const iDelta = ((PVDISKRAWPARTDESC)pvElement1)->offStartInVDisk - ((PVDISKRAWPARTDESC)pvElement2)->offStartInVDisk;
3744 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0;
3745}
3746
3747/**
3748 * Post processes the partition descriptors.
3749 *
3750 * Sorts them and check that they don't overlap.
3751 */
3752static int vmdkRawDescPostProcessPartitions(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint64_t cbSize)
3753{
3754 /*
3755 * Sort data areas in ascending order of start.
3756 */
3757 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL);
3758
3759 /*
3760 * Check that we don't have overlapping descriptors. If we do, that's an
3761 * indication that the drive is corrupt or that the RTDvm code is buggy.
3762 */
3763 VDISKRAWPARTDESC const *paPartDescs = pRawDesc->pPartDescs;
3764 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
3765 {
3766 uint64_t offLast = paPartDescs[i].offStartInVDisk + paPartDescs[i].cbData;
3767 if (offLast <= paPartDescs[i].offStartInVDisk)
3768 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3769 N_("VMDK: Image path: '%s'. Bogus partition descriptor #%u (%#RX64 LB %#RX64%s): Wrap around or zero"),
3770 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3771 paPartDescs[i].pvPartitionData ? " (data)" : "");
3772 offLast -= 1;
3773
3774 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk)
3775 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3776 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) overlaps with the next (%#RX64 LB %#RX64%s)"),
3777 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3778 paPartDescs[i].pvPartitionData ? " (data)" : "", paPartDescs[i + 1].offStartInVDisk,
3779 paPartDescs[i + 1].cbData, paPartDescs[i + 1].pvPartitionData ? " (data)" : "");
3780 if (offLast >= cbSize)
3781 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3782 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) goes beyond the end of the drive (%#RX64)"),
3783 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3784 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize);
3785 }
3786
3787 return VINF_SUCCESS;
3788}
3789
3790
3791#ifdef RT_OS_LINUX
3792/**
3793 * Searches the dir specified in @a pszBlockDevDir for subdirectories with a
3794 * 'dev' file matching @a uDevToLocate.
3795 *
3796 * This is used both
3797 *
3798 * @returns IPRT status code, errors have been reported properly.
3799 * @param pImage For error reporting.
3800 * @param pszBlockDevDir Input: Path to the directory search under.
3801 * Output: Path to the directory containing information
3802 * for @a uDevToLocate.
3803 * @param cbBlockDevDir The size of the buffer @a pszBlockDevDir points to.
3804 * @param uDevToLocate The device number of the block device info dir to
3805 * locate.
3806 * @param pszDevToLocate For error reporting.
3807 */
3808static int vmdkFindSysBlockDevPath(PVMDKIMAGE pImage, char *pszBlockDevDir, size_t cbBlockDevDir,
3809 dev_t uDevToLocate, const char *pszDevToLocate)
3810{
3811 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir);
3812 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW);
3813
3814 RTDIR hDir = NIL_RTDIR;
3815 int rc = RTDirOpen(&hDir, pszBlockDevDir);
3816 if (RT_SUCCESS(rc))
3817 {
3818 for (;;)
3819 {
3820 RTDIRENTRY Entry;
3821 rc = RTDirRead(hDir, &Entry, NULL);
3822 if (RT_SUCCESS(rc))
3823 {
3824 /* We're interested in directories and symlinks. */
3825 if ( Entry.enmType == RTDIRENTRYTYPE_DIRECTORY
3826 || Entry.enmType == RTDIRENTRYTYPE_SYMLINK
3827 || Entry.enmType == RTDIRENTRYTYPE_UNKNOWN)
3828 {
3829 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName);
3830 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */
3831
3832 dev_t uThisDevNo = ~uDevToLocate;
3833 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir);
3834 if (RT_SUCCESS(rc) && uThisDevNo == uDevToLocate)
3835 break;
3836 }
3837 }
3838 else
3839 {
3840 pszBlockDevDir[cchDir] = '\0';
3841 if (rc == VERR_NO_MORE_FILES)
3842 rc = vdIfError(pImage->pIfError, VERR_NOT_FOUND, RT_SRC_POS,
3843 N_("VMDK: Image path: '%s'. Failed to locate device corresponding to '%s' under '%s'"),
3844 pImage->pszFilename, pszDevToLocate, pszBlockDevDir);
3845 else
3846 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3847 N_("VMDK: Image path: '%s'. RTDirRead failed enumerating '%s': %Rrc"),
3848 pImage->pszFilename, pszBlockDevDir, rc);
3849 break;
3850 }
3851 }
3852 RTDirClose(hDir);
3853 }
3854 else
3855 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3856 N_("VMDK: Image path: '%s'. Failed to open dir '%s' for listing: %Rrc"),
3857 pImage->pszFilename, pszBlockDevDir, rc);
3858 return rc;
3859}
3860#endif /* RT_OS_LINUX */
3861
3862#ifdef RT_OS_FREEBSD
3863
3864
3865/**
3866 * Reads the config data from the provider and returns offset and size
3867 *
3868 * @return IPRT status code
3869 * @param pProvider GEOM provider representing partition
3870 * @param pcbOffset Placeholder for the offset of the partition
3871 * @param pcbSize Placeholder for the size of the partition
3872 */
3873static int vmdkReadPartitionsParamsFromProvider(gprovider *pProvider, uint64_t *pcbOffset, uint64_t *pcbSize)
3874{
3875 gconfig *pConfEntry;
3876 int rc = VERR_NOT_FOUND;
3877
3878 /*
3879 * Required parameters are located in the list containing key/value pairs.
3880 * Both key and value are in text form. Manuals tells nothing about the fact
3881 * that the both parameters should be present in the list. Thus, there are
3882 * cases when only one parameter is presented. To handle such cases we treat
3883 * absent params as zero allowing the caller decide the case is either correct
3884 * or an error.
3885 */
3886 uint64_t cbOffset = 0;
3887 uint64_t cbSize = 0;
3888 LIST_FOREACH(pConfEntry, &pProvider->lg_config, lg_config)
3889 {
3890 if (RTStrCmp(pConfEntry->lg_name, "offset") == 0)
3891 {
3892 cbOffset = RTStrToUInt64(pConfEntry->lg_val);
3893 rc = VINF_SUCCESS;
3894 }
3895 else if (RTStrCmp(pConfEntry->lg_name, "length") == 0)
3896 {
3897 cbSize = RTStrToUInt64(pConfEntry->lg_val);
3898 rc = VINF_SUCCESS;
3899 }
3900 }
3901 if (RT_SUCCESS(rc))
3902 {
3903 *pcbOffset = cbOffset;
3904 *pcbSize = cbSize;
3905 }
3906 return rc;
3907}
3908
3909
3910/**
3911 * Searches the partition specified by name and calculates its size and absolute offset.
3912 *
3913 * @return IPRT status code.
3914 * @param pParentClass Class containing pParentGeom
3915 * @param pszParentGeomName Name of the parent geom where we are looking for provider
3916 * @param pszProviderName Name of the provider we are looking for
3917 * @param pcbAbsoluteOffset Placeholder for the absolute offset of the partition, i.e. offset from the beginning of the disk
3918 * @param psbSize Placeholder for the size of the partition.
3919 */
3920static int vmdkFindPartitionParamsByName(gclass *pParentClass, const char *pszParentGeomName, const char *pszProviderName,
3921 uint64_t *pcbAbsoluteOffset, uint64_t *pcbSize)
3922{
3923 AssertReturn(pParentClass, VERR_INVALID_PARAMETER);
3924 AssertReturn(pszParentGeomName, VERR_INVALID_PARAMETER);
3925 AssertReturn(pszProviderName, VERR_INVALID_PARAMETER);
3926 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER);
3927 AssertReturn(pcbSize, VERR_INVALID_PARAMETER);
3928
3929 ggeom *pParentGeom;
3930 int rc = VERR_NOT_FOUND;
3931 LIST_FOREACH(pParentGeom, &pParentClass->lg_geom, lg_geom)
3932 {
3933 if (RTStrCmp(pParentGeom->lg_name, pszParentGeomName) == 0)
3934 {
3935 rc = VINF_SUCCESS;
3936 break;
3937 }
3938 }
3939 if (RT_FAILURE(rc))
3940 return rc;
3941
3942 gprovider *pProvider;
3943 /*
3944 * First, go over providers without handling EBR or BSDLabel
3945 * partitions for case when looking provider is child
3946 * of the givng geom, to reduce searching time
3947 */
3948 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3949 {
3950 if (RTStrCmp(pProvider->lg_name, pszProviderName) == 0)
3951 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize);
3952 }
3953
3954 /*
3955 * No provider found. Go over the parent geom again
3956 * and make recursions if geom represents EBR or BSDLabel.
3957 * In this case given parent geom contains only EBR or BSDLabel
3958 * partition itself and their own partitions are in the separate
3959 * geoms. Also, partition offsets are relative to geom, so
3960 * we have to add offset from child provider with parent geoms
3961 * provider
3962 */
3963
3964 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3965 {
3966 uint64_t cbOffset = 0;
3967 uint64_t cbSize = 0;
3968 rc = vmdkReadPartitionsParamsFromProvider(pProvider, &cbOffset, &cbSize);
3969 if (RT_FAILURE(rc))
3970 return rc;
3971
3972 uint64_t cbProviderOffset = 0;
3973 uint64_t cbProviderSize = 0;
3974 rc = vmdkFindPartitionParamsByName(pParentClass, pProvider->lg_name, pszProviderName, &cbProviderOffset, &cbProviderSize);
3975 if (RT_SUCCESS(rc))
3976 {
3977 *pcbAbsoluteOffset = cbOffset + cbProviderOffset;
3978 *pcbSize = cbProviderSize;
3979 return rc;
3980 }
3981 }
3982
3983 return VERR_NOT_FOUND;
3984}
3985#endif
3986
3987
3988/**
3989 * Attempts to verify the raw partition path.
3990 *
3991 * We don't want to trust RTDvm and the partition device node morphing blindly.
3992 */
3993static int vmdkRawDescVerifyPartitionPath(PVMDKIMAGE pImage, PVDISKRAWPARTDESC pPartDesc, uint32_t idxPartition,
3994 const char *pszRawDrive, RTFILE hRawDrive, uint32_t cbSector, RTDVMVOLUME hVol)
3995{
3996 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3997
3998 /*
3999 * Try open the raw partition device.
4000 */
4001 RTFILE hRawPart = NIL_RTFILE;
4002 int rc = RTFileOpen(&hRawPart, pPartDesc->pszRawDevice, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4003 if (RT_FAILURE(rc))
4004 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4005 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"),
4006 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc);
4007
4008 /*
4009 * Compare the partition UUID if we can get it.
4010 */
4011#ifdef RT_OS_WINDOWS
4012 DWORD cbReturned;
4013
4014 /* 1. Get the device numbers for both handles, they should have the same disk. */
4015 STORAGE_DEVICE_NUMBER DevNum1;
4016 RT_ZERO(DevNum1);
4017 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4018 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum1, sizeof(DevNum1), &cbReturned, NULL /*pOverlapped*/))
4019 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4020 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4021 pImage->pszFilename, pszRawDrive, GetLastError());
4022
4023 STORAGE_DEVICE_NUMBER DevNum2;
4024 RT_ZERO(DevNum2);
4025 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawPart), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4026 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum2, sizeof(DevNum2), &cbReturned, NULL /*pOverlapped*/))
4027 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4028 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4029 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError());
4030 if ( RT_SUCCESS(rc)
4031 && ( DevNum1.DeviceNumber != DevNum2.DeviceNumber
4032 || DevNum1.DeviceType != DevNum2.DeviceType))
4033 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4034 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x)"),
4035 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4036 DevNum1.DeviceNumber, DevNum2.DeviceNumber, DevNum1.DeviceType, DevNum2.DeviceType);
4037 if (RT_SUCCESS(rc))
4038 {
4039 /* Get the partitions from the raw drive and match up with the volume info
4040 from RTDvm. The partition number is found in DevNum2. */
4041 DWORD cbNeeded = 0;
4042 if ( DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
4043 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, NULL, 0, &cbNeeded, NULL /*pOverlapped*/)
4044 || cbNeeded < RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[1]))
4045 cbNeeded = RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[64]);
4046 cbNeeded += sizeof(PARTITION_INFORMATION_EX) * 2; /* just in case */
4047 DRIVE_LAYOUT_INFORMATION_EX *pLayout = (DRIVE_LAYOUT_INFORMATION_EX *)RTMemTmpAllocZ(cbNeeded);
4048 if (pLayout)
4049 {
4050 cbReturned = 0;
4051 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
4052 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, pLayout, cbNeeded, &cbReturned, NULL /*pOverlapped*/))
4053 {
4054 /* Find the entry with the given partition number (it's not an index, array contains empty MBR entries ++). */
4055 unsigned iEntry = 0;
4056 while ( iEntry < pLayout->PartitionCount
4057 && pLayout->PartitionEntry[iEntry].PartitionNumber != DevNum2.PartitionNumber)
4058 iEntry++;
4059 if (iEntry < pLayout->PartitionCount)
4060 {
4061 /* Compare the basics */
4062 PARTITION_INFORMATION_EX const * const pLayoutEntry = &pLayout->PartitionEntry[iEntry];
4063 if (pLayoutEntry->StartingOffset.QuadPart != (int64_t)pPartDesc->offStartInVDisk)
4064 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4065 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': StartingOffset %RU64, expected %RU64"),
4066 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4067 pLayoutEntry->StartingOffset.QuadPart, pPartDesc->offStartInVDisk);
4068 else if (pLayoutEntry->PartitionLength.QuadPart != (int64_t)pPartDesc->cbData)
4069 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4070 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionLength %RU64, expected %RU64"),
4071 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4072 pLayoutEntry->PartitionLength.QuadPart, pPartDesc->cbData);
4073 /** @todo We could compare the MBR type, GPT type and ID. */
4074 RT_NOREF(hVol);
4075 }
4076 else
4077 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4078 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionCount (%#x vs %#x)"),
4079 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4080 DevNum2.PartitionNumber, pLayout->PartitionCount);
4081# ifndef LOG_ENABLED
4082 if (RT_FAILURE(rc))
4083# endif
4084 {
4085 LogRel(("VMDK: Windows reports %u partitions for '%s':\n", pLayout->PartitionCount, pszRawDrive));
4086 PARTITION_INFORMATION_EX const *pEntry = &pLayout->PartitionEntry[0];
4087 for (DWORD i = 0; i < pLayout->PartitionCount; i++, pEntry++)
4088 {
4089 LogRel(("VMDK: #%u/%u: %016RU64 LB %016RU64 style=%d rewrite=%d",
4090 i, pEntry->PartitionNumber, pEntry->StartingOffset.QuadPart, pEntry->PartitionLength.QuadPart,
4091 pEntry->PartitionStyle, pEntry->RewritePartition));
4092 if (pEntry->PartitionStyle == PARTITION_STYLE_MBR)
4093 LogRel((" type=%#x boot=%d rec=%d hidden=%u\n", pEntry->Mbr.PartitionType, pEntry->Mbr.BootIndicator,
4094 pEntry->Mbr.RecognizedPartition, pEntry->Mbr.HiddenSectors));
4095 else if (pEntry->PartitionStyle == PARTITION_STYLE_GPT)
4096 LogRel((" type=%RTuuid id=%RTuuid aatrib=%RX64 name=%.36ls\n", &pEntry->Gpt.PartitionType,
4097 &pEntry->Gpt.PartitionId, pEntry->Gpt.Attributes, &pEntry->Gpt.Name[0]));
4098 else
4099 LogRel(("\n"));
4100 }
4101 LogRel(("VMDK: Looked for partition #%u (%u, '%s') at %RU64 LB %RU64\n", DevNum2.PartitionNumber,
4102 idxPartition, pPartDesc->pszRawDevice, pPartDesc->offStartInVDisk, pPartDesc->cbData));
4103 }
4104 }
4105 else
4106 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4107 N_("VMDK: Image path: '%s'. IOCTL_DISK_GET_DRIVE_LAYOUT_EX failed on '%s': %u (cb %u, cbRet %u)"),
4108 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError(), cbNeeded, cbReturned);
4109 RTMemTmpFree(pLayout);
4110 }
4111 else
4112 rc = VERR_NO_TMP_MEMORY;
4113 }
4114
4115#elif defined(RT_OS_LINUX)
4116 RT_NOREF(hVol);
4117
4118 /* Stat the two devices first to get their device numbers. (We probably
4119 could make some assumptions here about the major & minor number assignments
4120 for legacy nodes, but it doesn't hold up for nvme, so we'll skip that.) */
4121 struct stat StDrive, StPart;
4122 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
4123 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4124 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4125 else if (fstat((int)RTFileToNative(hRawPart), &StPart) != 0)
4126 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4127 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pPartDesc->pszRawDevice, errno);
4128 else
4129 {
4130 /* Scan the directories immediately under /sys/block/ for one with a
4131 'dev' file matching the drive's device number: */
4132 char szSysPath[RTPATH_MAX];
4133 rc = RTLinuxConstructPath(szSysPath, sizeof(szSysPath), "block/");
4134 AssertRCReturn(rc, rc); /* this shall not fail */
4135 if (RTDirExists(szSysPath))
4136 {
4137 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive);
4138
4139 /* Now, scan the directories under that again for a partition device
4140 matching the hRawPart device's number: */
4141 if (RT_SUCCESS(rc))
4142 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice);
4143
4144 /* Having found the /sys/block/device/partition/ path, we can finally
4145 read the partition attributes and compare with hVol. */
4146 if (RT_SUCCESS(rc))
4147 {
4148 /* partition number: */
4149 int64_t iLnxPartition = 0;
4150 rc = RTLinuxSysFsReadIntFile(10, &iLnxPartition, "%s/partition", szSysPath);
4151 if (RT_SUCCESS(rc) && iLnxPartition != idxPartition)
4152 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4153 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Partition number %RI64, expected %RU32"),
4154 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition);
4155 /* else: ignore failure? */
4156
4157 /* start offset: */
4158 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */
4159 if (RT_SUCCESS(rc))
4160 {
4161 int64_t offLnxStart = -1;
4162 rc = RTLinuxSysFsReadIntFile(10, &offLnxStart, "%s/start", szSysPath);
4163 offLnxStart *= cbLnxSector;
4164 if (RT_SUCCESS(rc) && offLnxStart != (int64_t)pPartDesc->offStartInVDisk)
4165 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4166 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
4167 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, offLnxStart, pPartDesc->offStartInVDisk);
4168 /* else: ignore failure? */
4169 }
4170
4171 /* the size: */
4172 if (RT_SUCCESS(rc))
4173 {
4174 int64_t cbLnxData = -1;
4175 rc = RTLinuxSysFsReadIntFile(10, &cbLnxData, "%s/size", szSysPath);
4176 cbLnxData *= cbLnxSector;
4177 if (RT_SUCCESS(rc) && cbLnxData != (int64_t)pPartDesc->cbData)
4178 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4179 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
4180 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbLnxData, pPartDesc->cbData);
4181 /* else: ignore failure? */
4182 }
4183 }
4184 }
4185 /* else: We've got nothing to work on, so only do content comparison. */
4186 }
4187
4188#elif defined(RT_OS_FREEBSD)
4189 char szDriveDevName[256];
4190 char* pszDevName = fdevname_r(RTFileToNative(hRawDrive), szDriveDevName, 256);
4191 if (pszDevName == NULL)
4192 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4193 N_("VMDK: Image path: '%s'. '%s' is not a drive path"), pImage->pszFilename, pszRawDrive);
4194 char szPartDevName[256];
4195 if (RT_SUCCESS(rc))
4196 {
4197 pszDevName = fdevname_r(RTFileToNative(hRawPart), szPartDevName, 256);
4198 if (pszDevName == NULL)
4199 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4200 N_("VMDK: Image path: '%s'. '%s' is not a partition path"), pImage->pszFilename, pPartDesc->pszRawDevice);
4201 }
4202 if (RT_SUCCESS(rc))
4203 {
4204 gmesh geomMesh;
4205 int err = geom_gettree(&geomMesh);
4206 if (err == 0)
4207 {
4208 /* Find root class containg partitions info */
4209 gclass* pPartClass;
4210 LIST_FOREACH(pPartClass, &geomMesh.lg_class, lg_class)
4211 {
4212 if (RTStrCmp(pPartClass->lg_name, "PART") == 0)
4213 break;
4214 }
4215 if (pPartClass == NULL || RTStrCmp(pPartClass->lg_name, "PART") != 0)
4216 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS,
4217 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename);
4218
4219
4220 if (RT_SUCCESS(rc))
4221 {
4222 /* Find provider representing partition device */
4223 uint64_t cbOffset;
4224 uint64_t cbSize;
4225 rc = vmdkFindPartitionParamsByName(pPartClass, szDriveDevName, szPartDevName, &cbOffset, &cbSize);
4226 if (RT_SUCCESS(rc))
4227 {
4228 if (cbOffset != pPartDesc->offStartInVDisk)
4229 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4230 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
4231 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4232 if (cbSize != pPartDesc->cbData)
4233 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4234 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
4235 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4236 }
4237 else
4238 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4239 N_("VMDK: Image path: '%s'. Error getting geom provider for the partition '%s' of the drive '%s' in the GEOM tree: %Rrc"),
4240 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc);
4241 }
4242
4243 geom_deletetree(&geomMesh);
4244 }
4245 else
4246 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(err), RT_SRC_POS,
4247 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err);
4248 }
4249
4250#elif defined(RT_OS_SOLARIS)
4251 RT_NOREF(hVol);
4252
4253 dk_cinfo dkiDriveInfo;
4254 dk_cinfo dkiPartInfo;
4255 if (ioctl(RTFileToNative(hRawDrive), DKIOCINFO, (caddr_t)&dkiDriveInfo) == -1)
4256 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4257 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4258 else if (ioctl(RTFileToNative(hRawPart), DKIOCINFO, (caddr_t)&dkiPartInfo) == -1)
4259 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4260 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4261 else if ( dkiDriveInfo.dki_ctype != dkiPartInfo.dki_ctype
4262 || dkiDriveInfo.dki_cnum != dkiPartInfo.dki_cnum
4263 || dkiDriveInfo.dki_addr != dkiPartInfo.dki_addr
4264 || dkiDriveInfo.dki_unit != dkiPartInfo.dki_unit
4265 || dkiDriveInfo.dki_slave != dkiPartInfo.dki_slave)
4266 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4267 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x)"),
4268 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4269 dkiDriveInfo.dki_ctype, dkiPartInfo.dki_ctype, dkiDriveInfo.dki_cnum, dkiPartInfo.dki_cnum,
4270 dkiDriveInfo.dki_addr, dkiPartInfo.dki_addr, dkiDriveInfo.dki_unit, dkiPartInfo.dki_unit,
4271 dkiDriveInfo.dki_slave, dkiPartInfo.dki_slave);
4272 else
4273 {
4274 uint64_t cbOffset = 0;
4275 uint64_t cbSize = 0;
4276 dk_gpt *pEfi = NULL;
4277 int idxEfiPart = efi_alloc_and_read(RTFileToNative(hRawPart), &pEfi);
4278 if (idxEfiPart >= 0)
4279 {
4280 if ((uint32_t)dkiPartInfo.dki_partition + 1 == idxPartition)
4281 {
4282 cbOffset = pEfi->efi_parts[idxEfiPart].p_start * pEfi->efi_lbasize;
4283 cbSize = pEfi->efi_parts[idxEfiPart].p_size * pEfi->efi_lbasize;
4284 }
4285 else
4286 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4287 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
4288 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4289 idxPartition, (uint32_t)dkiPartInfo.dki_partition + 1);
4290 efi_free(pEfi);
4291 }
4292 else
4293 {
4294 /*
4295 * Manual says the efi_alloc_and_read returns VT_EINVAL if no EFI partition table found.
4296 * Actually, the function returns any error, e.g. VT_ERROR. Thus, we are not sure, is it
4297 * real error or just no EFI table found. Therefore, let's try to obtain partition info
4298 * using another way. If there is an error, it returns errno which will be handled below.
4299 */
4300
4301 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition;
4302 if (numPartition > NDKMAP)
4303 numPartition -= NDKMAP;
4304 if (numPartition != idxPartition)
4305 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4306 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
4307 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4308 idxPartition, numPartition);
4309 else
4310 {
4311 dk_minfo_ext mediaInfo;
4312 if (ioctl(RTFileToNative(hRawPart), DKIOCGMEDIAINFOEXT, (caddr_t)&mediaInfo) == -1)
4313 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4314 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
4315 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4316 else
4317 {
4318 extpart_info extPartInfo;
4319 if (ioctl(RTFileToNative(hRawPart), DKIOCEXTPARTINFO, (caddr_t)&extPartInfo) != -1)
4320 {
4321 cbOffset = (uint64_t)extPartInfo.p_start * mediaInfo.dki_lbsize;
4322 cbSize = (uint64_t)extPartInfo.p_length * mediaInfo.dki_lbsize;
4323 }
4324 else
4325 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4326 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
4327 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4328 }
4329 }
4330 }
4331 if (RT_SUCCESS(rc) && cbOffset != pPartDesc->offStartInVDisk)
4332 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4333 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
4334 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4335
4336 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData)
4337 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4338 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
4339 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4340 }
4341
4342#elif defined(RT_OS_DARWIN)
4343 /* Stat the drive get its device number. */
4344 struct stat StDrive;
4345 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
4346 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4347 N_("VMDK: Image path: '%s'. fstat failed on '%s' (errno=%d)"), pImage->pszFilename, pszRawDrive, errno);
4348 else
4349 {
4350 if (ioctl(RTFileToNative(hRawPart), DKIOCLOCKPHYSICALEXTENTS, NULL) == -1)
4351 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4352 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to lock the partition (errno=%d)"),
4353 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4354 else
4355 {
4356 uint32_t cbBlockSize = 0;
4357 uint64_t cbOffset = 0;
4358 uint64_t cbSize = 0;
4359 if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKSIZE, (caddr_t)&cbBlockSize) == -1)
4360 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4361 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the sector size of the partition (errno=%d)"),
4362 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4363 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBASE, (caddr_t)&cbOffset) == -1)
4364 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4365 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the start offset of the partition (errno=%d)"),
4366 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4367 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKCOUNT, (caddr_t)&cbSize) == -1)
4368 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4369 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the size of the partition (errno=%d)"),
4370 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4371 else
4372 {
4373 cbSize *= (uint64_t)cbBlockSize;
4374 dk_physical_extent_t dkPartExtent = {0};
4375 dkPartExtent.offset = 0;
4376 dkPartExtent.length = cbSize;
4377 if (ioctl(RTFileToNative(hRawPart), DKIOCGETPHYSICALEXTENT, (caddr_t)&dkPartExtent) == -1)
4378 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4379 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain partition info (errno=%d)"),
4380 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4381 else
4382 {
4383 if (dkPartExtent.dev != StDrive.st_rdev)
4384 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4385 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Drive does not contain the partition"),
4386 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive);
4387 else if (cbOffset != pPartDesc->offStartInVDisk)
4388 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4389 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
4390 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4391 else if (cbSize != pPartDesc->cbData)
4392 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4393 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
4394 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4395 }
4396 }
4397
4398 if (ioctl(RTFileToNative(hRawPart), DKIOCUNLOCKPHYSICALEXTENTS, NULL) == -1)
4399 {
4400 int rc2 = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4401 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to unlock the partition (errno=%d)"),
4402 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4403 if (RT_SUCCESS(rc))
4404 rc = rc2;
4405 }
4406 }
4407 }
4408
4409#else
4410 RT_NOREF(hVol); /* PORTME */
4411 rc = VERR_NOT_SUPPORTED;
4412#endif
4413 if (RT_SUCCESS(rc))
4414 {
4415 /*
4416 * Compare the first 32 sectors of the partition.
4417 *
4418 * This might not be conclusive, but for partitions formatted with the more
4419 * common file systems it should be as they have a superblock copy at or near
4420 * the start of the partition (fat, fat32, ntfs, and ext4 does at least).
4421 */
4422 size_t const cbToCompare = (size_t)RT_MIN(pPartDesc->cbData / cbSector, 32) * cbSector;
4423 uint8_t *pbSector1 = (uint8_t *)RTMemTmpAlloc(cbToCompare * 2);
4424 if (pbSector1 != NULL)
4425 {
4426 uint8_t *pbSector2 = pbSector1 + cbToCompare;
4427
4428 /* Do the comparing, we repeat if it fails and the data might be volatile. */
4429 uint64_t uPrevCrc1 = 0;
4430 uint64_t uPrevCrc2 = 0;
4431 uint32_t cStable = 0;
4432 for (unsigned iTry = 0; iTry < 256; iTry++)
4433 {
4434 rc = RTFileReadAt(hRawDrive, pPartDesc->offStartInVDisk, pbSector1, cbToCompare, NULL);
4435 if (RT_SUCCESS(rc))
4436 {
4437 rc = RTFileReadAt(hRawPart, pPartDesc->offStartInDevice, pbSector2, cbToCompare, NULL);
4438 if (RT_SUCCESS(rc))
4439 {
4440 if (memcmp(pbSector1, pbSector2, cbToCompare) != 0)
4441 {
4442 rc = VERR_MISMATCH;
4443
4444 /* Do data stability checks before repeating: */
4445 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare);
4446 uint64_t const uCrc2 = RTCrc64(pbSector2, cbToCompare);
4447 if ( uPrevCrc1 != uCrc1
4448 || uPrevCrc2 != uCrc2)
4449 cStable = 0;
4450 else if (++cStable > 4)
4451 break;
4452 uPrevCrc1 = uCrc1;
4453 uPrevCrc2 = uCrc2;
4454 continue;
4455 }
4456 rc = VINF_SUCCESS;
4457 }
4458 else
4459 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4460 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4461 pImage->pszFilename, cbToCompare, pPartDesc->pszRawDevice, pPartDesc->offStartInDevice, rc);
4462 }
4463 else
4464 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4465 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4466 pImage->pszFilename, cbToCompare, pszRawDrive, pPartDesc->offStartInVDisk, rc);
4467 break;
4468 }
4469 if (rc == VERR_MISMATCH)
4470 {
4471 /* Find the first mismatching bytes: */
4472 size_t offMissmatch = 0;
4473 while (offMissmatch < cbToCompare && pbSector1[offMissmatch] == pbSector2[offMissmatch])
4474 offMissmatch++;
4475 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16);
4476
4477 if (cStable > 0)
4478 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4479 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (cStable=%d @%#zx: %.*Rhxs vs %.*Rhxs)"),
4480 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cStable,
4481 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]);
4482 else
4483 {
4484 LogRel(("VMDK: Image path: '%s'. Partition #%u path ('%s') verification undecided on '%s' because of unstable data! (@%#zx: %.*Rhxs vs %.*Rhxs)\n",
4485 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4486 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]));
4487 rc = -rc;
4488 }
4489 }
4490
4491 RTMemTmpFree(pbSector1);
4492 }
4493 else
4494 rc = vdIfError(pImage->pIfError, VERR_NO_TMP_MEMORY, RT_SRC_POS,
4495 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for a temporary read buffer\n"),
4496 pImage->pszFilename, cbToCompare * 2);
4497 }
4498 RTFileClose(hRawPart);
4499 return rc;
4500}
4501
4502#ifdef RT_OS_WINDOWS
4503/**
4504 * Construct the device name for the given partition number.
4505 */
4506static int vmdkRawDescWinMakePartitionName(PVMDKIMAGE pImage, const char *pszRawDrive, RTFILE hRawDrive, uint32_t idxPartition,
4507 char **ppszRawPartition)
4508{
4509 int rc = VINF_SUCCESS;
4510 DWORD cbReturned = 0;
4511 STORAGE_DEVICE_NUMBER DevNum;
4512 RT_ZERO(DevNum);
4513 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4514 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum, sizeof(DevNum), &cbReturned, NULL /*pOverlapped*/))
4515 RTStrAPrintf(ppszRawPartition, "\\\\.\\Harddisk%uPartition%u", DevNum.DeviceNumber, idxPartition);
4516 else
4517 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4518 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4519 pImage->pszFilename, pszRawDrive, GetLastError());
4520 return rc;
4521}
4522#endif /* RT_OS_WINDOWS */
4523
4524/**
4525 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the
4526 * 'Partitions' configuration value is present.
4527 *
4528 * @returns VBox status code, error message has been set on failure.
4529 *
4530 * @note Caller is assumed to clean up @a pRawDesc and release
4531 * @a *phVolToRelease.
4532 * @internal
4533 */
4534static int vmdkRawDescDoPartitions(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4535 RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector,
4536 uint32_t fPartitions, uint32_t fPartitionsReadOnly, bool fRelative,
4537 PRTDVMVOLUME phVolToRelease)
4538{
4539 *phVolToRelease = NIL_RTDVMVOLUME;
4540
4541 /* Check sanity/understanding. */
4542 Assert(fPartitions);
4543 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */
4544
4545 /*
4546 * Allocate on descriptor for each volume up front.
4547 */
4548 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr);
4549
4550 PVDISKRAWPARTDESC paPartDescs = NULL;
4551 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs);
4552 AssertRCReturn(rc, rc);
4553
4554 /*
4555 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them.
4556 */
4557 uint32_t fPartitionsLeft = fPartitions;
4558 RTDVMVOLUME hVol = NIL_RTDVMVOLUME; /* the current volume, needed for getting the next. */
4559 for (uint32_t i = 0; i < cVolumes; i++)
4560 {
4561 /*
4562 * Get the next/first volume and release the current.
4563 */
4564 RTDVMVOLUME hVolNext = NIL_RTDVMVOLUME;
4565 if (i == 0)
4566 rc = RTDvmMapQueryFirstVolume(hVolMgr, &hVolNext);
4567 else
4568 rc = RTDvmMapQueryNextVolume(hVolMgr, hVol, &hVolNext);
4569 if (RT_FAILURE(rc))
4570 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4571 N_("VMDK: Image path: '%s'. Volume enumeration failed at volume #%u on '%s' (%Rrc)"),
4572 pImage->pszFilename, i, pszRawDrive, rc);
4573 uint32_t cRefs = RTDvmVolumeRelease(hVol);
4574 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs);
4575 *phVolToRelease = hVol = hVolNext;
4576
4577 /*
4578 * Depending on the fPartitions selector and associated read-only mask,
4579 * the guest either gets read-write or read-only access (bits set)
4580 * or no access (selector bit clear, access directed to the VMDK).
4581 */
4582 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol);
4583
4584 uint64_t offVolumeEndIgnored = 0;
4585 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored);
4586 if (RT_FAILURE(rc))
4587 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4588 N_("VMDK: Image path: '%s'. Failed to get location of volume #%u on '%s' (%Rrc)"),
4589 pImage->pszFilename, i, pszRawDrive, rc);
4590 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk);
4591
4592 /* Note! The index must match IHostDrivePartition::number. */
4593 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST);
4594 if ( idxPartition < 32
4595 && (fPartitions & RT_BIT_32(idxPartition)))
4596 {
4597 fPartitionsLeft &= ~RT_BIT_32(idxPartition);
4598 if (fPartitionsReadOnly & RT_BIT_32(idxPartition))
4599 paPartDescs[i].uFlags |= VDISKRAW_READONLY;
4600
4601 if (!fRelative)
4602 {
4603 /*
4604 * Accessing the drive thru the main device node (pRawDesc->pszRawDisk).
4605 */
4606 paPartDescs[i].offStartInDevice = paPartDescs[i].offStartInVDisk;
4607 paPartDescs[i].pszRawDevice = RTStrDup(pszRawDrive);
4608 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4609 }
4610 else
4611 {
4612 /*
4613 * Relative means access the partition data via the device node for that
4614 * partition, allowing the sysadmin/OS to allow a user access to individual
4615 * partitions without necessarily being able to compromise the host OS.
4616 * Obviously, the creation of the VMDK requires read access to the main
4617 * device node for the drive, but that's a one-time thing and can be done
4618 * by the sysadmin. Here data starts at offset zero in the device node.
4619 */
4620 paPartDescs[i].offStartInDevice = 0;
4621
4622#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
4623 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */
4624 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition);
4625#elif defined(RT_OS_LINUX)
4626 /* Two naming schemes here: /dev/nvme0n1 -> /dev/nvme0n1p1; /dev/sda -> /dev/sda1 */
4627 RTStrAPrintf(&paPartDescs[i].pszRawDevice,
4628 RT_C_IS_DIGIT(pszRawDrive[strlen(pszRawDrive) - 1]) ? "%sp%u" : "%s%u", pszRawDrive, idxPartition);
4629#elif defined(RT_OS_WINDOWS)
4630 rc = vmdkRawDescWinMakePartitionName(pImage, pszRawDrive, hRawDrive, idxPartition, &paPartDescs[i].pszRawDevice);
4631 AssertRCReturn(rc, rc);
4632#elif defined(RT_OS_SOLARIS)
4633 if (pRawDesc->enmPartitioningType == VDISKPARTTYPE_MBR)
4634 {
4635 /*
4636 * MBR partitions have device nodes in form /dev/(r)dsk/cXtYdZpK
4637 * where X is the controller,
4638 * Y is target (SCSI device number),
4639 * Z is disk number,
4640 * K is partition number,
4641 * where p0 is the whole disk
4642 * p1-pN are the partitions of the disk
4643 */
4644 const char *pszRawDrivePath = pszRawDrive;
4645 char szDrivePath[RTPATH_MAX];
4646 size_t cbRawDrive = strlen(pszRawDrive);
4647 if ( cbRawDrive > 1 && strcmp(&pszRawDrive[cbRawDrive - 2], "p0") == 0)
4648 {
4649 memcpy(szDrivePath, pszRawDrive, cbRawDrive - 2);
4650 szDrivePath[cbRawDrive - 2] = '\0';
4651 pszRawDrivePath = szDrivePath;
4652 }
4653 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%sp%u", pszRawDrivePath, idxPartition);
4654 }
4655 else /* GPT */
4656 {
4657 /*
4658 * GPT partitions have device nodes in form /dev/(r)dsk/cXtYdZsK
4659 * where X is the controller,
4660 * Y is target (SCSI device number),
4661 * Z is disk number,
4662 * K is partition number, zero based. Can be only from 0 to 6.
4663 * Thus, only partitions numbered 0 through 6 have device nodes.
4664 */
4665 if (idxPartition > 7)
4666 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4667 N_("VMDK: Image path: '%s'. the partition #%u on '%s' has no device node and can not be specified with 'Relative' property"),
4668 pImage->pszFilename, idxPartition, pszRawDrive);
4669 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition - 1);
4670 }
4671#else
4672 AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* The option parsing code should have prevented this - PORTME */
4673#endif
4674 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4675
4676 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
4677 AssertRCReturn(rc, rc);
4678 }
4679 }
4680 else
4681 {
4682 /* Not accessible to the guest. */
4683 paPartDescs[i].offStartInDevice = 0;
4684 paPartDescs[i].pszRawDevice = NULL;
4685 }
4686 } /* for each volume */
4687
4688 RTDvmVolumeRelease(hVol);
4689 *phVolToRelease = NIL_RTDVMVOLUME;
4690
4691 /*
4692 * Check that we found all the partitions the user selected.
4693 */
4694 if (fPartitionsLeft)
4695 {
4696 char szLeft[3 * sizeof(fPartitions) * 8];
4697 size_t cchLeft = 0;
4698 for (unsigned i = 0; i < sizeof(fPartitions) * 8; i++)
4699 if (fPartitionsLeft & RT_BIT_32(i))
4700 cchLeft += RTStrPrintf(&szLeft[cchLeft], sizeof(szLeft) - cchLeft, cchLeft ? "%u" : ",%u", i);
4701 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4702 N_("VMDK: Image path: '%s'. Not all the specified partitions for drive '%s' was found: %s"),
4703 pImage->pszFilename, pszRawDrive, szLeft);
4704 }
4705
4706 return VINF_SUCCESS;
4707}
4708
4709/**
4710 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies
4711 * of the partition tables and associated padding areas when the 'Partitions'
4712 * configuration value is present.
4713 *
4714 * The guest is not allowed access to the partition tables, however it needs
4715 * them to be able to access the drive. So, create descriptors for each of the
4716 * tables and attach the current disk content. vmdkCreateRawImage() will later
4717 * write the content to the VMDK. Any changes the guest later makes to the
4718 * partition tables will then go to the VMDK copy, rather than the host drive.
4719 *
4720 * @returns VBox status code, error message has been set on failure.
4721 *
4722 * @note Caller is assumed to clean up @a pRawDesc
4723 * @internal
4724 */
4725static int vmdkRawDescDoCopyPartitionTables(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4726 const char *pszRawDrive, RTFILE hRawDrive, void *pvBootSector, size_t cbBootSector)
4727{
4728 /*
4729 * Query the locations.
4730 */
4731 /* Determin how many locations there are: */
4732 size_t cLocations = 0;
4733 int rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, NULL, 0, &cLocations);
4734 if (rc != VERR_BUFFER_OVERFLOW)
4735 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4736 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4737 pImage->pszFilename, pszRawDrive, rc);
4738 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5);
4739
4740 /* We can allocate the partition descriptors here to save an intentation level. */
4741 PVDISKRAWPARTDESC paPartDescs = NULL;
4742 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs);
4743 AssertRCReturn(rc, rc);
4744
4745 /* Allocate the result table and repeat the location table query: */
4746 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations);
4747 if (!paLocations)
4748 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes"),
4749 pImage->pszFilename, sizeof(paLocations[0]) * cLocations);
4750 rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, paLocations, cLocations, NULL);
4751 if (RT_SUCCESS(rc))
4752 {
4753 /*
4754 * Translate them into descriptors.
4755 *
4756 * We restrict the amount of partition alignment padding to 4MiB as more
4757 * will just be a waste of space. The use case for including the padding
4758 * are older boot loaders and boot manager (including one by a team member)
4759 * that put data and code in the 62 sectors between the MBR and the first
4760 * partition (total of 63). Later CHS was abandond and partition started
4761 * being aligned on power of two sector boundraries (typically 64KiB or
4762 * 1MiB depending on the media size).
4763 */
4764 for (size_t i = 0; i < cLocations && RT_SUCCESS(rc); i++)
4765 {
4766 Assert(paLocations[i].cb > 0);
4767 if (paLocations[i].cb <= _64M)
4768 {
4769 /* Create the partition descriptor entry: */
4770 //paPartDescs[i].pszRawDevice = NULL;
4771 //paPartDescs[i].offStartInDevice = 0;
4772 //paPartDescs[i].uFlags = 0;
4773 paPartDescs[i].offStartInVDisk = paLocations[i].off;
4774 paPartDescs[i].cbData = paLocations[i].cb;
4775 if (paPartDescs[i].cbData < _4M)
4776 paPartDescs[i].cbData = RT_MIN(paPartDescs[i].cbData + paLocations[i].cbPadding, _4M);
4777 paPartDescs[i].pvPartitionData = RTMemAllocZ((size_t)paPartDescs[i].cbData);
4778 if (paPartDescs[i].pvPartitionData)
4779 {
4780 /* Read the content from the drive: */
4781 rc = RTFileReadAt(hRawDrive, paPartDescs[i].offStartInVDisk, paPartDescs[i].pvPartitionData,
4782 (size_t)paPartDescs[i].cbData, NULL);
4783 if (RT_SUCCESS(rc))
4784 {
4785 /* Do we have custom boot sector code? */
4786 if (pvBootSector && cbBootSector && paPartDescs[i].offStartInVDisk == 0)
4787 {
4788 /* Note! Old code used to quietly drop the bootsector if it was considered too big.
4789 Instead we fail as we weren't able to do what the user requested us to do.
4790 Better if the user knows than starts questioning why the guest isn't
4791 booting as expected. */
4792 if (cbBootSector <= paPartDescs[i].cbData)
4793 memcpy(paPartDescs[i].pvPartitionData, pvBootSector, cbBootSector);
4794 else
4795 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4796 N_("VMDK: Image path: '%s'. The custom boot sector is too big: %zu bytes, %RU64 bytes available"),
4797 pImage->pszFilename, cbBootSector, paPartDescs[i].cbData);
4798 }
4799 }
4800 else
4801 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4802 N_("VMDK: Image path: '%s'. Failed to read partition at off %RU64 length %zu from '%s' (%Rrc)"),
4803 pImage->pszFilename, paPartDescs[i].offStartInVDisk,
4804 (size_t)paPartDescs[i].cbData, pszRawDrive, rc);
4805 }
4806 else
4807 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4808 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for copying the partition table at off %RU64"),
4809 pImage->pszFilename, (size_t)paPartDescs[i].cbData, paPartDescs[i].offStartInVDisk);
4810 }
4811 else
4812 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4813 N_("VMDK: Image path: '%s'. Partition table #%u at offset %RU64 in '%s' is to big: %RU64 bytes"),
4814 pImage->pszFilename, i, paLocations[i].off, pszRawDrive, paLocations[i].cb);
4815 }
4816 }
4817 else
4818 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4819 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4820 pImage->pszFilename, pszRawDrive, rc);
4821 RTMemFree(paLocations);
4822 return rc;
4823}
4824
4825/**
4826 * Opens the volume manager for the raw drive when in selected-partition mode.
4827 *
4828 * @param pImage The VMDK image (for errors).
4829 * @param hRawDrive The raw drive handle.
4830 * @param pszRawDrive The raw drive device path (for errors).
4831 * @param cbSector The sector size.
4832 * @param phVolMgr Where to return the handle to the volume manager on
4833 * success.
4834 * @returns VBox status code, errors have been reported.
4835 * @internal
4836 */
4837static int vmdkRawDescOpenVolMgr(PVMDKIMAGE pImage, RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector, PRTDVM phVolMgr)
4838{
4839 *phVolMgr = NIL_RTDVM;
4840
4841 RTVFSFILE hVfsFile = NIL_RTVFSFILE;
4842 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile);
4843 if (RT_FAILURE(rc))
4844 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4845 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"),
4846 pImage->pszFilename, pszRawDrive, rc);
4847
4848 RTDVM hVolMgr = NIL_RTDVM;
4849 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/);
4850
4851 RTVfsFileRelease(hVfsFile);
4852
4853 if (RT_FAILURE(rc))
4854 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4855 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"),
4856 pImage->pszFilename, pszRawDrive, rc);
4857
4858 rc = RTDvmMapOpen(hVolMgr);
4859 if (RT_SUCCESS(rc))
4860 {
4861 *phVolMgr = hVolMgr;
4862 return VINF_SUCCESS;
4863 }
4864 RTDvmRelease(hVolMgr);
4865 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Image path: '%s'. RTDvmMapOpen failed for '%s' (%Rrc)"),
4866 pImage->pszFilename, pszRawDrive, rc);
4867}
4868
4869/**
4870 * Opens the raw drive device and get the sizes for it.
4871 *
4872 * @param pImage The image (for error reporting).
4873 * @param pszRawDrive The device/whatever to open.
4874 * @param phRawDrive Where to return the file handle.
4875 * @param pcbRawDrive Where to return the size.
4876 * @param pcbSector Where to return the sector size.
4877 * @returns IPRT status code, errors have been reported.
4878 * @internal
4879 */
4880static int vmkdRawDescOpenDevice(PVMDKIMAGE pImage, const char *pszRawDrive,
4881 PRTFILE phRawDrive, uint64_t *pcbRawDrive, uint32_t *pcbSector)
4882{
4883 /*
4884 * Open the device for the raw drive.
4885 */
4886 RTFILE hRawDrive = NIL_RTFILE;
4887 int rc = RTFileOpen(&hRawDrive, pszRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4888 if (RT_FAILURE(rc))
4889 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4890 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"),
4891 pImage->pszFilename, pszRawDrive, rc);
4892
4893 /*
4894 * Get the sector size.
4895 */
4896 uint32_t cbSector = 0;
4897 rc = RTFileQuerySectorSize(hRawDrive, &cbSector);
4898 if (RT_SUCCESS(rc))
4899 {
4900 /* sanity checks */
4901 if ( cbSector >= 512
4902 && cbSector <= _64K
4903 && RT_IS_POWER_OF_TWO(cbSector))
4904 {
4905 /*
4906 * Get the size.
4907 */
4908 uint64_t cbRawDrive = 0;
4909 rc = RTFileQuerySize(hRawDrive, &cbRawDrive);
4910 if (RT_SUCCESS(rc))
4911 {
4912 /* Check whether cbSize is actually sensible. */
4913 if (cbRawDrive > cbSector && (cbRawDrive % cbSector) == 0)
4914 {
4915 *phRawDrive = hRawDrive;
4916 *pcbRawDrive = cbRawDrive;
4917 *pcbSector = cbSector;
4918 return VINF_SUCCESS;
4919 }
4920 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4921 N_("VMDK: Image path: '%s'. Got a bogus size for the raw drive '%s': %RU64 (sector size %u)"),
4922 pImage->pszFilename, pszRawDrive, cbRawDrive, cbSector);
4923 }
4924 else
4925 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4926 N_("VMDK: Image path: '%s'. Failed to query size of the drive '%s' (%Rrc)"),
4927 pImage->pszFilename, pszRawDrive, rc);
4928 }
4929 else
4930 rc = vdIfError(pImage->pIfError, VERR_OUT_OF_RANGE, RT_SRC_POS,
4931 N_("VMDK: Image path: '%s'. Unsupported sector size for '%s': %u (%#x)"),
4932 pImage->pszFilename, pszRawDrive, cbSector, cbSector);
4933 }
4934 else
4935 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4936 N_("VMDK: Image path: '%s'. Failed to get the sector size for '%s' (%Rrc)"),
4937 pImage->pszFilename, pszRawDrive, rc);
4938 RTFileClose(hRawDrive);
4939 return rc;
4940}
4941
4942/**
4943 * Reads the raw disk configuration, leaving initalization and cleanup to the
4944 * caller (regardless of return status).
4945 *
4946 * @returns VBox status code, errors properly reported.
4947 * @internal
4948 */
4949static int vmdkRawDescParseConfig(PVMDKIMAGE pImage, char **ppszRawDrive,
4950 uint32_t *pfPartitions, uint32_t *pfPartitionsReadOnly,
4951 void **ppvBootSector, size_t *pcbBootSector, bool *pfRelative,
4952 char **ppszFreeMe)
4953{
4954 PVDINTERFACECONFIG pImgCfg = VDIfConfigGet(pImage->pVDIfsImage);
4955 if (!pImgCfg)
4956 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4957 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename);
4958
4959 /*
4960 * RawDrive = path
4961 */
4962 int rc = VDCFGQueryStringAlloc(pImgCfg, "RawDrive", ppszRawDrive);
4963 if (RT_FAILURE(rc))
4964 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4965 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4966 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3);
4967
4968 /*
4969 * Partitions=n[r][,...]
4970 */
4971 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */;
4972 *pfPartitions = *pfPartitionsReadOnly = 0;
4973
4974 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe);
4975 if (RT_SUCCESS(rc))
4976 {
4977 char *psz = *ppszFreeMe;
4978 while (*psz != '\0')
4979 {
4980 char *pszNext;
4981 uint32_t u32;
4982 rc = RTStrToUInt32Ex(psz, &pszNext, 0, &u32);
4983 if (rc == VWRN_NUMBER_TOO_BIG || rc == VWRN_NEGATIVE_UNSIGNED)
4984 rc = -rc;
4985 if (RT_FAILURE(rc))
4986 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4987 N_("VMDK: Image path: '%s'. Parsing 'Partitions' config value failed. Incorrect value (%Rrc): %s"),
4988 pImage->pszFilename, rc, psz);
4989 if (u32 >= cMaxPartitionBits)
4990 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4991 N_("VMDK: Image path: '%s'. 'Partitions' config sub-value out of range: %RU32, max %RU32"),
4992 pImage->pszFilename, u32, cMaxPartitionBits);
4993 *pfPartitions |= RT_BIT_32(u32);
4994 psz = pszNext;
4995 if (*psz == 'r')
4996 {
4997 *pfPartitionsReadOnly |= RT_BIT_32(u32);
4998 psz++;
4999 }
5000 if (*psz == ',')
5001 psz++;
5002 else if (*psz != '\0')
5003 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5004 N_("VMDK: Image path: '%s'. Malformed 'Partitions' config value, expected separator: %s"),
5005 pImage->pszFilename, psz);
5006 }
5007
5008 RTStrFree(*ppszFreeMe);
5009 *ppszFreeMe = NULL;
5010 }
5011 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5012 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5013 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5014
5015 /*
5016 * BootSector=base64
5017 */
5018 rc = VDCFGQueryStringAlloc(pImgCfg, "BootSector", ppszFreeMe);
5019 if (RT_SUCCESS(rc))
5020 {
5021 ssize_t cbBootSector = RTBase64DecodedSize(*ppszFreeMe, NULL);
5022 if (cbBootSector < 0)
5023 return vdIfError(pImage->pIfError, VERR_INVALID_BASE64_ENCODING, RT_SRC_POS,
5024 N_("VMDK: Image path: '%s'. BASE64 decoding failed on the custom bootsector for '%s'"),
5025 pImage->pszFilename, *ppszRawDrive);
5026 if (cbBootSector == 0)
5027 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5028 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is zero bytes big"),
5029 pImage->pszFilename, *ppszRawDrive);
5030 if (cbBootSector > _4M) /* this is just a preliminary max */
5031 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5032 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"),
5033 pImage->pszFilename, *ppszRawDrive, cbBootSector);
5034
5035 /* Refuse the boot sector if whole-drive. This used to be done quietly,
5036 however, bird disagrees and thinks the user should be told that what
5037 he/she/it tries to do isn't possible. There should be less head
5038 scratching this way when the guest doesn't do the expected thing. */
5039 if (!*pfPartitions)
5040 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5041 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"),
5042 pImage->pszFilename, *ppszRawDrive);
5043
5044 *pcbBootSector = (size_t)cbBootSector;
5045 *ppvBootSector = RTMemAlloc((size_t)cbBootSector);
5046 if (!*ppvBootSector)
5047 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
5048 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"),
5049 pImage->pszFilename, cbBootSector, *ppszRawDrive);
5050
5051 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/);
5052 if (RT_FAILURE(rc))
5053 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
5054 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"),
5055 pImage->pszFilename, *ppszRawDrive, rc);
5056
5057 RTStrFree(*ppszFreeMe);
5058 *ppszFreeMe = NULL;
5059 }
5060 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5061 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5062 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5063
5064 /*
5065 * Relative=0/1
5066 */
5067 *pfRelative = false;
5068 rc = VDCFGQueryBool(pImgCfg, "Relative", pfRelative);
5069 if (RT_SUCCESS(rc))
5070 {
5071 if (!*pfPartitions && *pfRelative != false)
5072 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5073 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported for whole-drive configurations, only when selecting partitions"),
5074 pImage->pszFilename);
5075#if !defined(RT_OS_DARWIN) && !defined(RT_OS_LINUX) && !defined(RT_OS_FREEBSD) && !defined(RT_OS_WINDOWS) && !defined(RT_OS_SOLARIS) /* PORTME */
5076 if (*pfRelative == true)
5077 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5078 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported on this host OS"),
5079 pImage->pszFilename);
5080#endif
5081 }
5082 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5083 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5084 N_("VMDK: Image path: '%s'. Getting 'Relative' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5085 else
5086#ifdef RT_OS_DARWIN /* different default on macOS, see ticketref:1461 (comment 20). */
5087 *pfRelative = true;
5088#else
5089 *pfRelative = false;
5090#endif
5091
5092 return VINF_SUCCESS;
5093}
5094
5095/**
5096 * Creates a raw drive (nee disk) descriptor.
5097 *
5098 * This was originally done in VBoxInternalManage.cpp, but was copied (not move)
5099 * here much later. That's one of the reasons why we produce a descriptor just
5100 * like it does, rather than mixing directly into the vmdkCreateRawImage code.
5101 *
5102 * @returns VBox status code.
5103 * @param pImage The image.
5104 * @param ppRaw Where to return the raw drive descriptor. Caller must
5105 * free it using vmdkRawDescFree regardless of the status
5106 * code.
5107 * @internal
5108 */
5109static int vmdkMakeRawDescriptor(PVMDKIMAGE pImage, PVDISKRAW *ppRaw)
5110{
5111 /* Make sure it's NULL. */
5112 *ppRaw = NULL;
5113
5114 /*
5115 * Read the configuration.
5116 */
5117 char *pszRawDrive = NULL;
5118 uint32_t fPartitions = 0; /* zero if whole-drive */
5119 uint32_t fPartitionsReadOnly = 0; /* (subset of fPartitions) */
5120 void *pvBootSector = NULL;
5121 size_t cbBootSector = 0;
5122 bool fRelative = false;
5123 char *pszFreeMe = NULL; /* lazy bird cleanup. */
5124 int rc = vmdkRawDescParseConfig(pImage, &pszRawDrive, &fPartitions, &fPartitionsReadOnly,
5125 &pvBootSector, &cbBootSector, &fRelative, &pszFreeMe);
5126 RTStrFree(pszFreeMe);
5127 if (RT_SUCCESS(rc))
5128 {
5129 /*
5130 * Open the device, getting the sector size and drive size.
5131 */
5132 uint64_t cbSize = 0;
5133 uint32_t cbSector = 0;
5134 RTFILE hRawDrive = NIL_RTFILE;
5135 rc = vmkdRawDescOpenDevice(pImage, pszRawDrive, &hRawDrive, &cbSize, &cbSector);
5136 if (RT_SUCCESS(rc))
5137 {
5138 pImage->cbSize = cbSize;
5139 /*
5140 * Create the raw-drive descriptor
5141 */
5142 PVDISKRAW pRawDesc = (PVDISKRAW)RTMemAllocZ(sizeof(*pRawDesc));
5143 if (pRawDesc)
5144 {
5145 pRawDesc->szSignature[0] = 'R';
5146 pRawDesc->szSignature[1] = 'A';
5147 pRawDesc->szSignature[2] = 'W';
5148 //pRawDesc->szSignature[3] = '\0';
5149 if (!fPartitions)
5150 {
5151 /*
5152 * It's simple for when doing the whole drive.
5153 */
5154 pRawDesc->uFlags = VDISKRAW_DISK;
5155 rc = RTStrDupEx(&pRawDesc->pszRawDisk, pszRawDrive);
5156 }
5157 else
5158 {
5159 /*
5160 * In selected partitions mode we've got a lot more work ahead of us.
5161 */
5162 pRawDesc->uFlags = VDISKRAW_NORMAL;
5163 //pRawDesc->pszRawDisk = NULL;
5164 //pRawDesc->cPartDescs = 0;
5165 //pRawDesc->pPartDescs = NULL;
5166
5167 /* We need to parse the partition map to complete the descriptor: */
5168 RTDVM hVolMgr = NIL_RTDVM;
5169 rc = vmdkRawDescOpenVolMgr(pImage, hRawDrive, pszRawDrive, cbSector, &hVolMgr);
5170 if (RT_SUCCESS(rc))
5171 {
5172 RTDVMFORMATTYPE enmFormatType = RTDvmMapGetFormatType(hVolMgr);
5173 if ( enmFormatType == RTDVMFORMATTYPE_MBR
5174 || enmFormatType == RTDVMFORMATTYPE_GPT)
5175 {
5176 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR
5177 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT;
5178
5179 /* Add copies of the partition tables: */
5180 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive,
5181 pvBootSector, cbBootSector);
5182 if (RT_SUCCESS(rc))
5183 {
5184 /* Add descriptors for the partitions/volumes, indicating which
5185 should be accessible and how to access them: */
5186 RTDVMVOLUME hVolRelease = NIL_RTDVMVOLUME;
5187 rc = vmdkRawDescDoPartitions(pImage, hVolMgr, pRawDesc, hRawDrive, pszRawDrive, cbSector,
5188 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease);
5189 RTDvmVolumeRelease(hVolRelease);
5190
5191 /* Finally, sort the partition and check consistency (overlaps, etc): */
5192 if (RT_SUCCESS(rc))
5193 rc = vmdkRawDescPostProcessPartitions(pImage, pRawDesc, cbSize);
5194 }
5195 }
5196 else
5197 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
5198 N_("VMDK: Image path: '%s'. Unsupported partitioning for the disk '%s': %s"),
5199 pImage->pszFilename, pszRawDrive, RTDvmMapGetFormatType(hVolMgr));
5200 RTDvmRelease(hVolMgr);
5201 }
5202 }
5203 if (RT_SUCCESS(rc))
5204 {
5205 /*
5206 * We succeeded.
5207 */
5208 *ppRaw = pRawDesc;
5209 Log(("vmdkMakeRawDescriptor: fFlags=%#x enmPartitioningType=%d cPartDescs=%u pszRawDisk=%s\n",
5210 pRawDesc->uFlags, pRawDesc->enmPartitioningType, pRawDesc->cPartDescs, pRawDesc->pszRawDisk));
5211 if (pRawDesc->cPartDescs)
5212 {
5213 Log(("# VMDK offset Length Device offset PartDataPtr Device\n"));
5214 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
5215 Log(("%2u %14RU64 %14RU64 %14RU64 %#18p %s\n", i, pRawDesc->pPartDescs[i].offStartInVDisk,
5216 pRawDesc->pPartDescs[i].cbData, pRawDesc->pPartDescs[i].offStartInDevice,
5217 pRawDesc->pPartDescs[i].pvPartitionData, pRawDesc->pPartDescs[i].pszRawDevice));
5218 }
5219 }
5220 else
5221 vmdkRawDescFree(pRawDesc);
5222 }
5223 else
5224 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
5225 N_("VMDK: Image path: '%s'. Failed to allocate %u bytes for the raw drive descriptor"),
5226 pImage->pszFilename, sizeof(*pRawDesc));
5227 RTFileClose(hRawDrive);
5228 }
5229 }
5230 RTStrFree(pszRawDrive);
5231 RTMemFree(pvBootSector);
5232 return rc;
5233}
5234
5235/**
5236 * Internal: create VMDK images for raw disk/partition access.
5237 */
5238static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
5239 uint64_t cbSize)
5240{
5241 int rc = VINF_SUCCESS;
5242 PVMDKEXTENT pExtent;
5243
5244 if (pRaw->uFlags & VDISKRAW_DISK)
5245 {
5246 /* Full raw disk access. This requires setting up a descriptor
5247 * file and open the (flat) raw disk. */
5248 rc = vmdkCreateExtents(pImage, 1);
5249 if (RT_FAILURE(rc))
5250 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5251 pExtent = &pImage->pExtents[0];
5252 /* Create raw disk descriptor file. */
5253 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5254 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5255 true /* fCreate */));
5256 if (RT_FAILURE(rc))
5257 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
5258
5259 /* Set up basename for extent description. Cannot use StrDup. */
5260 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
5261 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5262 if (!pszBasename)
5263 return VERR_NO_MEMORY;
5264 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
5265 pExtent->pszBasename = pszBasename;
5266 /* For raw disks the full name is identical to the base name. */
5267 pExtent->pszFullname = RTStrDup(pszBasename);
5268 if (!pExtent->pszFullname)
5269 return VERR_NO_MEMORY;
5270 pExtent->enmType = VMDKETYPE_FLAT;
5271 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5272 pExtent->uSectorOffset = 0;
5273 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
5274 pExtent->fMetaDirty = false;
5275
5276 /* Open flat image, the raw disk. */
5277 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5278 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5279 false /* fCreate */));
5280 if (RT_FAILURE(rc))
5281 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
5282 }
5283 else
5284 {
5285 /* Raw partition access. This requires setting up a descriptor
5286 * file, write the partition information to a flat extent and
5287 * open all the (flat) raw disk partitions. */
5288
5289 /* First pass over the partition data areas to determine how many
5290 * extents we need. One data area can require up to 2 extents, as
5291 * it might be necessary to skip over unpartitioned space. */
5292 unsigned cExtents = 0;
5293 uint64_t uStart = 0;
5294 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
5295 {
5296 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
5297 if (uStart > pPart->offStartInVDisk)
5298 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5299 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
5300
5301 if (uStart < pPart->offStartInVDisk)
5302 cExtents++;
5303 uStart = pPart->offStartInVDisk + pPart->cbData;
5304 cExtents++;
5305 }
5306 /* Another extent for filling up the rest of the image. */
5307 if (uStart != cbSize)
5308 cExtents++;
5309
5310 rc = vmdkCreateExtents(pImage, cExtents);
5311 if (RT_FAILURE(rc))
5312 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5313
5314 /* Create raw partition descriptor file. */
5315 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5316 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5317 true /* fCreate */));
5318 if (RT_FAILURE(rc))
5319 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
5320
5321 /* Create base filename for the partition table extent. */
5322 /** @todo remove fixed buffer without creating memory leaks. */
5323 char pszPartition[1024];
5324 const char *pszBase = RTPathFilename(pImage->pszFilename);
5325 const char *pszSuff = RTPathSuffix(pszBase);
5326 if (pszSuff == NULL)
5327 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
5328 char *pszBaseBase = RTStrDup(pszBase);
5329 if (!pszBaseBase)
5330 return VERR_NO_MEMORY;
5331 RTPathStripSuffix(pszBaseBase);
5332 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
5333 pszBaseBase, pszSuff);
5334 RTStrFree(pszBaseBase);
5335
5336 /* Second pass over the partitions, now define all extents. */
5337 uint64_t uPartOffset = 0;
5338 cExtents = 0;
5339 uStart = 0;
5340 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
5341 {
5342 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
5343 pExtent = &pImage->pExtents[cExtents++];
5344
5345 if (uStart < pPart->offStartInVDisk)
5346 {
5347 pExtent->pszBasename = NULL;
5348 pExtent->pszFullname = NULL;
5349 pExtent->enmType = VMDKETYPE_ZERO;
5350 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->offStartInVDisk - uStart);
5351 pExtent->uSectorOffset = 0;
5352 pExtent->enmAccess = VMDKACCESS_READWRITE;
5353 pExtent->fMetaDirty = false;
5354 /* go to next extent */
5355 pExtent = &pImage->pExtents[cExtents++];
5356 }
5357 uStart = pPart->offStartInVDisk + pPart->cbData;
5358
5359 if (pPart->pvPartitionData)
5360 {
5361 /* Set up basename for extent description. Can't use StrDup. */
5362 size_t cbBasename = strlen(pszPartition) + 1;
5363 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5364 if (!pszBasename)
5365 return VERR_NO_MEMORY;
5366 memcpy(pszBasename, pszPartition, cbBasename);
5367 pExtent->pszBasename = pszBasename;
5368
5369 /* Set up full name for partition extent. */
5370 char *pszDirname = RTStrDup(pImage->pszFilename);
5371 if (!pszDirname)
5372 return VERR_NO_STR_MEMORY;
5373 RTPathStripFilename(pszDirname);
5374 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
5375 RTStrFree(pszDirname);
5376 if (!pszFullname)
5377 return VERR_NO_STR_MEMORY;
5378 pExtent->pszFullname = pszFullname;
5379 pExtent->enmType = VMDKETYPE_FLAT;
5380 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5381 pExtent->uSectorOffset = uPartOffset;
5382 pExtent->enmAccess = VMDKACCESS_READWRITE;
5383 pExtent->fMetaDirty = false;
5384
5385 /* Create partition table flat image. */
5386 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5387 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5388 true /* fCreate */));
5389 if (RT_FAILURE(rc))
5390 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
5391 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5392 VMDK_SECTOR2BYTE(uPartOffset),
5393 pPart->pvPartitionData,
5394 pPart->cbData);
5395 if (RT_FAILURE(rc))
5396 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
5397 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
5398 }
5399 else
5400 {
5401 if (pPart->pszRawDevice)
5402 {
5403 /* Set up basename for extent descr. Can't use StrDup. */
5404 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
5405 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5406 if (!pszBasename)
5407 return VERR_NO_MEMORY;
5408 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
5409 pExtent->pszBasename = pszBasename;
5410 /* For raw disks full name is identical to base name. */
5411 pExtent->pszFullname = RTStrDup(pszBasename);
5412 if (!pExtent->pszFullname)
5413 return VERR_NO_MEMORY;
5414 pExtent->enmType = VMDKETYPE_FLAT;
5415 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5416 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->offStartInDevice);
5417 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
5418 pExtent->fMetaDirty = false;
5419
5420 /* Open flat image, the raw partition. */
5421 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5422 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5423 false /* fCreate */));
5424 if (RT_FAILURE(rc))
5425 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
5426 }
5427 else
5428 {
5429 pExtent->pszBasename = NULL;
5430 pExtent->pszFullname = NULL;
5431 pExtent->enmType = VMDKETYPE_ZERO;
5432 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5433 pExtent->uSectorOffset = 0;
5434 pExtent->enmAccess = VMDKACCESS_READWRITE;
5435 pExtent->fMetaDirty = false;
5436 }
5437 }
5438 }
5439 /* Another extent for filling up the rest of the image. */
5440 if (uStart != cbSize)
5441 {
5442 pExtent = &pImage->pExtents[cExtents++];
5443 pExtent->pszBasename = NULL;
5444 pExtent->pszFullname = NULL;
5445 pExtent->enmType = VMDKETYPE_ZERO;
5446 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
5447 pExtent->uSectorOffset = 0;
5448 pExtent->enmAccess = VMDKACCESS_READWRITE;
5449 pExtent->fMetaDirty = false;
5450 }
5451 }
5452
5453 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5454 (pRaw->uFlags & VDISKRAW_DISK) ?
5455 "fullDevice" : "partitionedDevice");
5456 if (RT_FAILURE(rc))
5457 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5458 return rc;
5459}
5460
5461/**
5462 * Internal: create a regular (i.e. file-backed) VMDK image.
5463 */
5464static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
5465 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
5466 unsigned uPercentStart, unsigned uPercentSpan)
5467{
5468 int rc = VINF_SUCCESS;
5469 unsigned cExtents = 1;
5470 uint64_t cbOffset = 0;
5471 uint64_t cbRemaining = cbSize;
5472
5473 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5474 {
5475 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
5476 /* Do proper extent computation: need one smaller extent if the total
5477 * size isn't evenly divisible by the split size. */
5478 if (cbSize % VMDK_2G_SPLIT_SIZE)
5479 cExtents++;
5480 }
5481 rc = vmdkCreateExtents(pImage, cExtents);
5482 if (RT_FAILURE(rc))
5483 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5484
5485 /* Basename strings needed for constructing the extent names. */
5486 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5487 AssertPtr(pszBasenameSubstr);
5488 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5489
5490 /* Create separate descriptor file if necessary. */
5491 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
5492 {
5493 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5494 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5495 true /* fCreate */));
5496 if (RT_FAILURE(rc))
5497 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
5498 }
5499 else
5500 pImage->pFile = NULL;
5501
5502 /* Set up all extents. */
5503 for (unsigned i = 0; i < cExtents; i++)
5504 {
5505 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5506 uint64_t cbExtent = cbRemaining;
5507
5508 /* Set up fullname/basename for extent description. Cannot use StrDup
5509 * for basename, as it is not guaranteed that the memory can be freed
5510 * with RTMemTmpFree, which must be used as in other code paths
5511 * StrDup is not usable. */
5512 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5513 {
5514 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5515 if (!pszBasename)
5516 return VERR_NO_MEMORY;
5517 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5518 pExtent->pszBasename = pszBasename;
5519 }
5520 else
5521 {
5522 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
5523 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
5524 RTPathStripSuffix(pszBasenameBase);
5525 char *pszTmp;
5526 size_t cbTmp;
5527 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5528 {
5529 if (cExtents == 1)
5530 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
5531 pszBasenameSuff);
5532 else
5533 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
5534 i+1, pszBasenameSuff);
5535 }
5536 else
5537 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
5538 pszBasenameSuff);
5539 RTStrFree(pszBasenameBase);
5540 if (!pszTmp)
5541 return VERR_NO_STR_MEMORY;
5542 cbTmp = strlen(pszTmp) + 1;
5543 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
5544 if (!pszBasename)
5545 {
5546 RTStrFree(pszTmp);
5547 return VERR_NO_MEMORY;
5548 }
5549 memcpy(pszBasename, pszTmp, cbTmp);
5550 RTStrFree(pszTmp);
5551 pExtent->pszBasename = pszBasename;
5552 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5553 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
5554 }
5555 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5556 if (!pszBasedirectory)
5557 return VERR_NO_STR_MEMORY;
5558 RTPathStripFilename(pszBasedirectory);
5559 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5560 RTStrFree(pszBasedirectory);
5561 if (!pszFullname)
5562 return VERR_NO_STR_MEMORY;
5563 pExtent->pszFullname = pszFullname;
5564
5565 /* Create file for extent. */
5566 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5567 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5568 true /* fCreate */));
5569 if (RT_FAILURE(rc))
5570 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5571 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5572 {
5573 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
5574 0 /* fFlags */, pIfProgress,
5575 uPercentStart + cbOffset * uPercentSpan / cbSize,
5576 cbExtent * uPercentSpan / cbSize);
5577 if (RT_FAILURE(rc))
5578 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
5579 }
5580
5581 /* Place descriptor file information (where integrated). */
5582 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5583 {
5584 pExtent->uDescriptorSector = 1;
5585 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5586 /* The descriptor is part of the (only) extent. */
5587 pExtent->pDescData = pImage->pDescData;
5588 pImage->pDescData = NULL;
5589 }
5590
5591 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5592 {
5593 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5594 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
5595 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5596 pExtent->cGTEntries = 512;
5597
5598 uint64_t const cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5599 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5600 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5601 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5602 {
5603 /* The spec says version is 1 for all VMDKs, but the vast
5604 * majority of streamOptimized VMDKs actually contain
5605 * version 3 - so go with the majority. Both are accepted. */
5606 pExtent->uVersion = 3;
5607 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5608 }
5609 }
5610 else
5611 {
5612 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5613 pExtent->enmType = VMDKETYPE_VMFS;
5614 else
5615 pExtent->enmType = VMDKETYPE_FLAT;
5616 }
5617
5618 pExtent->enmAccess = VMDKACCESS_READWRITE;
5619 pExtent->fUncleanShutdown = true;
5620 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
5621 pExtent->uSectorOffset = 0;
5622 pExtent->fMetaDirty = true;
5623
5624 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5625 {
5626 /* fPreAlloc should never be false because VMware can't use such images. */
5627 rc = vmdkCreateGrainDirectory(pImage, pExtent,
5628 RT_MAX( pExtent->uDescriptorSector
5629 + pExtent->cDescriptorSectors,
5630 1),
5631 true /* fPreAlloc */);
5632 if (RT_FAILURE(rc))
5633 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5634 }
5635
5636 cbOffset += cbExtent;
5637
5638 if (RT_SUCCESS(rc))
5639 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
5640
5641 cbRemaining -= cbExtent;
5642 }
5643
5644 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5645 {
5646 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
5647 * controller type is set in an image. */
5648 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
5649 if (RT_FAILURE(rc))
5650 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
5651 }
5652
5653 const char *pszDescType = NULL;
5654 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5655 {
5656 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5657 pszDescType = "vmfs";
5658 else
5659 pszDescType = (cExtents == 1)
5660 ? "monolithicFlat" : "twoGbMaxExtentFlat";
5661 }
5662 else
5663 {
5664 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5665 pszDescType = "streamOptimized";
5666 else
5667 {
5668 pszDescType = (cExtents == 1)
5669 ? "monolithicSparse" : "twoGbMaxExtentSparse";
5670 }
5671 }
5672 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5673 pszDescType);
5674 if (RT_FAILURE(rc))
5675 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5676 return rc;
5677}
5678
5679/**
5680 * Internal: Create a real stream optimized VMDK using only linear writes.
5681 */
5682static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
5683{
5684 int rc = vmdkCreateExtents(pImage, 1);
5685 if (RT_FAILURE(rc))
5686 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5687
5688 /* Basename strings needed for constructing the extent names. */
5689 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5690 AssertPtr(pszBasenameSubstr);
5691 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5692
5693 /* No separate descriptor file. */
5694 pImage->pFile = NULL;
5695
5696 /* Set up all extents. */
5697 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5698
5699 /* Set up fullname/basename for extent description. Cannot use StrDup
5700 * for basename, as it is not guaranteed that the memory can be freed
5701 * with RTMemTmpFree, which must be used as in other code paths
5702 * StrDup is not usable. */
5703 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5704 if (!pszBasename)
5705 return VERR_NO_MEMORY;
5706 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5707 pExtent->pszBasename = pszBasename;
5708
5709 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5710 RTPathStripFilename(pszBasedirectory);
5711 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5712 RTStrFree(pszBasedirectory);
5713 if (!pszFullname)
5714 return VERR_NO_STR_MEMORY;
5715 pExtent->pszFullname = pszFullname;
5716
5717 /* Create file for extent. Make it write only, no reading allowed. */
5718 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5719 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5720 true /* fCreate */)
5721 & ~RTFILE_O_READ);
5722 if (RT_FAILURE(rc))
5723 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5724
5725 /* Place descriptor file information. */
5726 pExtent->uDescriptorSector = 1;
5727 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5728 /* The descriptor is part of the (only) extent. */
5729 pExtent->pDescData = pImage->pDescData;
5730 pImage->pDescData = NULL;
5731
5732 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5733 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
5734 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5735 pExtent->cGTEntries = 512;
5736
5737 uint64_t const cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5738 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5739 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5740
5741 /* The spec says version is 1 for all VMDKs, but the vast
5742 * majority of streamOptimized VMDKs actually contain
5743 * version 3 - so go with the majority. Both are accepted. */
5744 pExtent->uVersion = 3;
5745 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5746 pExtent->fFooter = true;
5747
5748 pExtent->enmAccess = VMDKACCESS_READONLY;
5749 pExtent->fUncleanShutdown = false;
5750 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5751 pExtent->uSectorOffset = 0;
5752 pExtent->fMetaDirty = true;
5753
5754 /* Create grain directory, without preallocating it straight away. It will
5755 * be constructed on the fly when writing out the data and written when
5756 * closing the image. The end effect is that the full grain directory is
5757 * allocated, which is a requirement of the VMDK specs. */
5758 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
5759 false /* fPreAlloc */);
5760 if (RT_FAILURE(rc))
5761 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5762
5763 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5764 "streamOptimized");
5765 if (RT_FAILURE(rc))
5766 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5767
5768 return rc;
5769}
5770
5771/**
5772 * Initializes the UUID fields in the DDB.
5773 *
5774 * @returns VBox status code.
5775 * @param pImage The VMDK image instance.
5776 */
5777static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
5778{
5779 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
5780 if (RT_SUCCESS(rc))
5781 {
5782 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
5783 if (RT_SUCCESS(rc))
5784 {
5785 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
5786 &pImage->ModificationUuid);
5787 if (RT_SUCCESS(rc))
5788 {
5789 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
5790 &pImage->ParentModificationUuid);
5791 if (RT_FAILURE(rc))
5792 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5793 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5794 }
5795 else
5796 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5797 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5798 }
5799 else
5800 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5801 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
5802 }
5803 else
5804 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5805 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
5806
5807 return rc;
5808}
5809
5810/**
5811 * Internal: The actual code for creating any VMDK variant currently in
5812 * existence on hosted environments.
5813 */
5814static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
5815 unsigned uImageFlags, const char *pszComment,
5816 PCVDGEOMETRY pPCHSGeometry,
5817 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5818 PVDINTERFACEPROGRESS pIfProgress,
5819 unsigned uPercentStart, unsigned uPercentSpan)
5820{
5821 pImage->uImageFlags = uImageFlags;
5822
5823 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
5824 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
5825 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
5826
5827 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
5828 &pImage->Descriptor);
5829 if (RT_SUCCESS(rc))
5830 {
5831 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5832 {
5833 /* Raw disk image (includes raw partition). */
5834 PVDISKRAW pRaw = NULL;
5835 rc = vmdkMakeRawDescriptor(pImage, &pRaw);
5836 if (RT_FAILURE(rc))
5837 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create raw descriptor for '%s'"),
5838 pImage->pszFilename);
5839 if (!cbSize)
5840 cbSize = pImage->cbSize;
5841
5842 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
5843 vmdkRawDescFree(pRaw);
5844 }
5845 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5846 {
5847 /* Stream optimized sparse image (monolithic). */
5848 rc = vmdkCreateStreamImage(pImage, cbSize);
5849 }
5850 else
5851 {
5852 /* Regular fixed or sparse image (monolithic or split). */
5853 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
5854 pIfProgress, uPercentStart,
5855 uPercentSpan * 95 / 100);
5856 }
5857
5858 if (RT_SUCCESS(rc))
5859 {
5860 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
5861
5862 pImage->cbSize = cbSize;
5863
5864 for (unsigned i = 0; i < pImage->cExtents; i++)
5865 {
5866 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5867
5868 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
5869 pExtent->cNominalSectors, pExtent->enmType,
5870 pExtent->pszBasename, pExtent->uSectorOffset);
5871 if (RT_FAILURE(rc))
5872 {
5873 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
5874 break;
5875 }
5876 }
5877
5878 if (RT_SUCCESS(rc))
5879 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
5880
5881 pImage->LCHSGeometry = *pLCHSGeometry;
5882 pImage->PCHSGeometry = *pPCHSGeometry;
5883
5884 if (RT_SUCCESS(rc))
5885 {
5886 if ( pPCHSGeometry->cCylinders != 0
5887 && pPCHSGeometry->cHeads != 0
5888 && pPCHSGeometry->cSectors != 0)
5889 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5890 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5891 {
5892 VDGEOMETRY RawDiskPCHSGeometry;
5893 RawDiskPCHSGeometry.cCylinders = (uint32_t)RT_MIN(pImage->cbSize / 512 / 16 / 63, 16383);
5894 RawDiskPCHSGeometry.cHeads = 16;
5895 RawDiskPCHSGeometry.cSectors = 63;
5896 rc = vmdkDescSetPCHSGeometry(pImage, &RawDiskPCHSGeometry);
5897 }
5898 }
5899
5900 if ( RT_SUCCESS(rc)
5901 && pLCHSGeometry->cCylinders != 0
5902 && pLCHSGeometry->cHeads != 0
5903 && pLCHSGeometry->cSectors != 0)
5904 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5905
5906 pImage->ImageUuid = *pUuid;
5907 RTUuidClear(&pImage->ParentUuid);
5908 RTUuidClear(&pImage->ModificationUuid);
5909 RTUuidClear(&pImage->ParentModificationUuid);
5910
5911 if (RT_SUCCESS(rc))
5912 rc = vmdkCreateImageDdbUuidsInit(pImage);
5913
5914 if (RT_SUCCESS(rc))
5915 rc = vmdkAllocateGrainTableCache(pImage);
5916
5917 if (RT_SUCCESS(rc))
5918 {
5919 rc = vmdkSetImageComment(pImage, pszComment);
5920 if (RT_FAILURE(rc))
5921 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
5922 }
5923
5924 if (RT_SUCCESS(rc))
5925 {
5926 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
5927
5928 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5929 {
5930 /* streamOptimized is a bit special, we cannot trigger the flush
5931 * until all data has been written. So we write the necessary
5932 * information explicitly. */
5933 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
5934 - pImage->Descriptor.aLines[0], 512));
5935 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
5936 if (RT_SUCCESS(rc))
5937 {
5938 rc = vmdkWriteDescriptor(pImage, NULL);
5939 if (RT_FAILURE(rc))
5940 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
5941 }
5942 else
5943 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
5944 }
5945 else
5946 rc = vmdkFlushImage(pImage, NULL);
5947 }
5948 }
5949 }
5950 else
5951 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
5952
5953
5954 if (RT_SUCCESS(rc))
5955 {
5956 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
5957 pImage->RegionList.fFlags = 0;
5958 pImage->RegionList.cRegions = 1;
5959
5960 pRegion->offRegion = 0; /* Disk start. */
5961 pRegion->cbBlock = 512;
5962 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
5963 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
5964 pRegion->cbData = 512;
5965 pRegion->cbMetadata = 0;
5966 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
5967
5968 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
5969 }
5970 else
5971 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
5972 return rc;
5973}
5974
5975/**
5976 * Internal: Update image comment.
5977 */
5978static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
5979{
5980 char *pszCommentEncoded = NULL;
5981 if (pszComment)
5982 {
5983 pszCommentEncoded = vmdkEncodeString(pszComment);
5984 if (!pszCommentEncoded)
5985 return VERR_NO_MEMORY;
5986 }
5987
5988 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
5989 "ddb.comment", pszCommentEncoded);
5990 if (pszCommentEncoded)
5991 RTStrFree(pszCommentEncoded);
5992 if (RT_FAILURE(rc))
5993 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
5994 return VINF_SUCCESS;
5995}
5996
5997/**
5998 * Internal. Clear the grain table buffer for real stream optimized writing.
5999 */
6000static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
6001{
6002 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
6003 for (uint32_t i = 0; i < cCacheLines; i++)
6004 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
6005 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
6006}
6007
6008/**
6009 * Internal. Flush the grain table buffer for real stream optimized writing.
6010 */
6011static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6012 uint32_t uGDEntry)
6013{
6014 int rc = VINF_SUCCESS;
6015 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
6016
6017 /* VMware does not write out completely empty grain tables in the case
6018 * of streamOptimized images, which according to my interpretation of
6019 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
6020 * handle it without problems do it the same way and save some bytes. */
6021 bool fAllZero = true;
6022 for (uint32_t i = 0; i < cCacheLines; i++)
6023 {
6024 /* Convert the grain table to little endian in place, as it will not
6025 * be used at all after this function has been called. */
6026 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
6027 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
6028 if (*pGTTmp)
6029 {
6030 fAllZero = false;
6031 break;
6032 }
6033 if (!fAllZero)
6034 break;
6035 }
6036 if (fAllZero)
6037 return VINF_SUCCESS;
6038
6039 uint64_t uFileOffset = pExtent->uAppendPosition;
6040 if (!uFileOffset)
6041 return VERR_INTERNAL_ERROR;
6042 /* Align to sector, as the previous write could have been any size. */
6043 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6044
6045 /* Grain table marker. */
6046 uint8_t aMarker[512];
6047 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
6048 memset(pMarker, '\0', sizeof(aMarker));
6049 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
6050 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
6051 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6052 aMarker, sizeof(aMarker));
6053 AssertRC(rc);
6054 uFileOffset += 512;
6055
6056 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
6057 return VERR_INTERNAL_ERROR;
6058
6059 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
6060
6061 for (uint32_t i = 0; i < cCacheLines; i++)
6062 {
6063 /* Convert the grain table to little endian in place, as it will not
6064 * be used at all after this function has been called. */
6065 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
6066 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
6067 *pGTTmp = RT_H2LE_U32(*pGTTmp);
6068
6069 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6070 &pImage->pGTCache->aGTCache[i].aGTData[0],
6071 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
6072 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
6073 if (RT_FAILURE(rc))
6074 break;
6075 }
6076 Assert(!(uFileOffset % 512));
6077 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
6078 return rc;
6079}
6080
6081/**
6082 * Internal. Free all allocated space for representing an image, and optionally
6083 * delete the image from disk.
6084 */
6085static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
6086{
6087 int rc = VINF_SUCCESS;
6088
6089 /* Freeing a never allocated image (e.g. because the open failed) is
6090 * not signalled as an error. After all nothing bad happens. */
6091 if (pImage)
6092 {
6093 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6094 {
6095 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6096 {
6097 /* Check if all extents are clean. */
6098 for (unsigned i = 0; i < pImage->cExtents; i++)
6099 {
6100 Assert(!pImage->pExtents[i].fUncleanShutdown);
6101 }
6102 }
6103 else
6104 {
6105 /* Mark all extents as clean. */
6106 for (unsigned i = 0; i < pImage->cExtents; i++)
6107 {
6108 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
6109 && pImage->pExtents[i].fUncleanShutdown)
6110 {
6111 pImage->pExtents[i].fUncleanShutdown = false;
6112 pImage->pExtents[i].fMetaDirty = true;
6113 }
6114
6115 /* From now on it's not safe to append any more data. */
6116 pImage->pExtents[i].uAppendPosition = 0;
6117 }
6118 }
6119 }
6120
6121 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6122 {
6123 /* No need to write any pending data if the file will be deleted
6124 * or if the new file wasn't successfully created. */
6125 if ( !fDelete && pImage->pExtents
6126 && pImage->pExtents[0].cGTEntries
6127 && pImage->pExtents[0].uAppendPosition)
6128 {
6129 PVMDKEXTENT pExtent = &pImage->pExtents[0];
6130 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
6131 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
6132 AssertRC(rc);
6133 vmdkStreamClearGT(pImage, pExtent);
6134 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
6135 {
6136 rc = vmdkStreamFlushGT(pImage, pExtent, i);
6137 AssertRC(rc);
6138 }
6139
6140 uint64_t uFileOffset = pExtent->uAppendPosition;
6141 if (!uFileOffset)
6142 return VERR_INTERNAL_ERROR;
6143 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6144
6145 /* From now on it's not safe to append any more data. */
6146 pExtent->uAppendPosition = 0;
6147
6148 /* Grain directory marker. */
6149 uint8_t aMarker[512];
6150 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
6151 memset(pMarker, '\0', sizeof(aMarker));
6152 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
6153 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
6154 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6155 aMarker, sizeof(aMarker));
6156 AssertRC(rc);
6157 uFileOffset += 512;
6158
6159 /* Write grain directory in little endian style. The array will
6160 * not be used after this, so convert in place. */
6161 uint32_t *pGDTmp = pExtent->pGD;
6162 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
6163 *pGDTmp = RT_H2LE_U32(*pGDTmp);
6164 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6165 uFileOffset, pExtent->pGD,
6166 pExtent->cGDEntries * sizeof(uint32_t));
6167 AssertRC(rc);
6168
6169 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
6170 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
6171 uFileOffset = RT_ALIGN_64( uFileOffset
6172 + pExtent->cGDEntries * sizeof(uint32_t),
6173 512);
6174
6175 /* Footer marker. */
6176 memset(pMarker, '\0', sizeof(aMarker));
6177 pMarker->uSector = VMDK_BYTE2SECTOR(512);
6178 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
6179 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6180 uFileOffset, aMarker, sizeof(aMarker));
6181 AssertRC(rc);
6182
6183 uFileOffset += 512;
6184 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
6185 AssertRC(rc);
6186
6187 uFileOffset += 512;
6188 /* End-of-stream marker. */
6189 memset(pMarker, '\0', sizeof(aMarker));
6190 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6191 uFileOffset, aMarker, sizeof(aMarker));
6192 AssertRC(rc);
6193 }
6194 }
6195 else if (!fDelete && fFlush)
6196 vmdkFlushImage(pImage, NULL);
6197
6198 if (pImage->pExtents != NULL)
6199 {
6200 for (unsigned i = 0 ; i < pImage->cExtents; i++)
6201 {
6202 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
6203 if (RT_SUCCESS(rc))
6204 rc = rc2; /* Propogate any error when closing the file. */
6205 }
6206 RTMemFree(pImage->pExtents);
6207 pImage->pExtents = NULL;
6208 }
6209 pImage->cExtents = 0;
6210 if (pImage->pFile != NULL)
6211 {
6212 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
6213 if (RT_SUCCESS(rc))
6214 rc = rc2; /* Propogate any error when closing the file. */
6215 }
6216 int rc2 = vmdkFileCheckAllClose(pImage);
6217 if (RT_SUCCESS(rc))
6218 rc = rc2; /* Propogate any error when closing the file. */
6219
6220 if (pImage->pGTCache)
6221 {
6222 RTMemFree(pImage->pGTCache);
6223 pImage->pGTCache = NULL;
6224 }
6225 if (pImage->pDescData)
6226 {
6227 RTMemFree(pImage->pDescData);
6228 pImage->pDescData = NULL;
6229 }
6230 }
6231
6232 LogFlowFunc(("returns %Rrc\n", rc));
6233 return rc;
6234}
6235
6236/**
6237 * Internal. Flush image data (and metadata) to disk.
6238 */
6239static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
6240{
6241 PVMDKEXTENT pExtent;
6242 int rc = VINF_SUCCESS;
6243
6244 /* Update descriptor if changed. */
6245 if (pImage->Descriptor.fDirty)
6246 rc = vmdkWriteDescriptor(pImage, pIoCtx);
6247
6248 if (RT_SUCCESS(rc))
6249 {
6250 for (unsigned i = 0; i < pImage->cExtents; i++)
6251 {
6252 pExtent = &pImage->pExtents[i];
6253 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
6254 {
6255 switch (pExtent->enmType)
6256 {
6257 case VMDKETYPE_HOSTED_SPARSE:
6258 if (!pExtent->fFooter)
6259 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
6260 else
6261 {
6262 uint64_t uFileOffset = pExtent->uAppendPosition;
6263 /* Simply skip writing anything if the streamOptimized
6264 * image hasn't been just created. */
6265 if (!uFileOffset)
6266 break;
6267 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6268 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
6269 uFileOffset, pIoCtx);
6270 }
6271 break;
6272 case VMDKETYPE_VMFS:
6273 case VMDKETYPE_FLAT:
6274 /* Nothing to do. */
6275 break;
6276 case VMDKETYPE_ZERO:
6277 default:
6278 AssertMsgFailed(("extent with type %d marked as dirty\n",
6279 pExtent->enmType));
6280 break;
6281 }
6282 }
6283
6284 if (RT_FAILURE(rc))
6285 break;
6286
6287 switch (pExtent->enmType)
6288 {
6289 case VMDKETYPE_HOSTED_SPARSE:
6290 case VMDKETYPE_VMFS:
6291 case VMDKETYPE_FLAT:
6292 /** @todo implement proper path absolute check. */
6293 if ( pExtent->pFile != NULL
6294 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6295 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
6296 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
6297 NULL, NULL);
6298 break;
6299 case VMDKETYPE_ZERO:
6300 /* No need to do anything for this extent. */
6301 break;
6302 default:
6303 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
6304 break;
6305 }
6306 }
6307 }
6308
6309 return rc;
6310}
6311
6312/**
6313 * Internal. Find extent corresponding to the sector number in the disk.
6314 */
6315static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
6316 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
6317{
6318 PVMDKEXTENT pExtent = NULL;
6319 int rc = VINF_SUCCESS;
6320
6321 for (unsigned i = 0; i < pImage->cExtents; i++)
6322 {
6323 if (offSector < pImage->pExtents[i].cNominalSectors)
6324 {
6325 pExtent = &pImage->pExtents[i];
6326 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
6327 break;
6328 }
6329 offSector -= pImage->pExtents[i].cNominalSectors;
6330 }
6331
6332 if (pExtent)
6333 *ppExtent = pExtent;
6334 else
6335 rc = VERR_IO_SECTOR_NOT_FOUND;
6336
6337 return rc;
6338}
6339
6340/**
6341 * Internal. Hash function for placing the grain table hash entries.
6342 */
6343static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
6344 unsigned uExtent)
6345{
6346 /** @todo this hash function is quite simple, maybe use a better one which
6347 * scrambles the bits better. */
6348 return (uSector + uExtent) % pCache->cEntries;
6349}
6350
6351/**
6352 * Internal. Get sector number in the extent file from the relative sector
6353 * number in the extent.
6354 */
6355static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
6356 PVMDKEXTENT pExtent, uint64_t uSector,
6357 uint64_t *puExtentSector)
6358{
6359 PVMDKGTCACHE pCache = pImage->pGTCache;
6360 uint64_t uGDIndex, uGTSector, uGTBlock;
6361 uint32_t uGTHash, uGTBlockIndex;
6362 PVMDKGTCACHEENTRY pGTCacheEntry;
6363 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
6364 int rc;
6365
6366 /* For newly created and readonly/sequentially opened streamOptimized
6367 * images this must be a no-op, as the grain directory is not there. */
6368 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6369 && pExtent->uAppendPosition)
6370 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6371 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
6372 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
6373 {
6374 *puExtentSector = 0;
6375 return VINF_SUCCESS;
6376 }
6377
6378 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6379 if (uGDIndex >= pExtent->cGDEntries)
6380 return VERR_OUT_OF_RANGE;
6381 uGTSector = pExtent->pGD[uGDIndex];
6382 if (!uGTSector)
6383 {
6384 /* There is no grain table referenced by this grain directory
6385 * entry. So there is absolutely no data in this area. */
6386 *puExtentSector = 0;
6387 return VINF_SUCCESS;
6388 }
6389
6390 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
6391 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
6392 pGTCacheEntry = &pCache->aGTCache[uGTHash];
6393 if ( pGTCacheEntry->uExtent != pExtent->uExtent
6394 || pGTCacheEntry->uGTBlock != uGTBlock)
6395 {
6396 /* Cache miss, fetch data from disk. */
6397 PVDMETAXFER pMetaXfer;
6398 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6399 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6400 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
6401 if (RT_FAILURE(rc))
6402 return rc;
6403 /* We can release the metadata transfer immediately. */
6404 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
6405 pGTCacheEntry->uExtent = pExtent->uExtent;
6406 pGTCacheEntry->uGTBlock = uGTBlock;
6407 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6408 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
6409 }
6410 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
6411 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
6412 if (uGrainSector)
6413 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
6414 else
6415 *puExtentSector = 0;
6416 return VINF_SUCCESS;
6417}
6418
6419/**
6420 * Internal. Writes the grain and also if necessary the grain tables.
6421 * Uses the grain table cache as a true grain table.
6422 */
6423static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6424 uint64_t uSector, PVDIOCTX pIoCtx,
6425 uint64_t cbWrite)
6426{
6427 uint32_t uGrain;
6428 uint32_t uGDEntry, uLastGDEntry;
6429 uint32_t cbGrain = 0;
6430 uint32_t uCacheLine, uCacheEntry;
6431 const void *pData;
6432 int rc;
6433
6434 /* Very strict requirements: always write at least one full grain, with
6435 * proper alignment. Everything else would require reading of already
6436 * written data, which we don't support for obvious reasons. The only
6437 * exception is the last grain, and only if the image size specifies
6438 * that only some portion holds data. In any case the write must be
6439 * within the image limits, no "overshoot" allowed. */
6440 if ( cbWrite == 0
6441 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
6442 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
6443 || uSector % pExtent->cSectorsPerGrain
6444 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
6445 return VERR_INVALID_PARAMETER;
6446
6447 /* Clip write range to at most the rest of the grain. */
6448 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
6449
6450 /* Do not allow to go back. */
6451 uGrain = uSector / pExtent->cSectorsPerGrain;
6452 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
6453 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
6454 uGDEntry = uGrain / pExtent->cGTEntries;
6455 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
6456 if (uGrain < pExtent->uLastGrainAccess)
6457 return VERR_VD_VMDK_INVALID_WRITE;
6458
6459 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
6460 * to allocate something, we also need to detect the situation ourself. */
6461 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
6462 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
6463 return VINF_SUCCESS;
6464
6465 if (uGDEntry != uLastGDEntry)
6466 {
6467 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
6468 if (RT_FAILURE(rc))
6469 return rc;
6470 vmdkStreamClearGT(pImage, pExtent);
6471 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
6472 {
6473 rc = vmdkStreamFlushGT(pImage, pExtent, i);
6474 if (RT_FAILURE(rc))
6475 return rc;
6476 }
6477 }
6478
6479 uint64_t uFileOffset;
6480 uFileOffset = pExtent->uAppendPosition;
6481 if (!uFileOffset)
6482 return VERR_INTERNAL_ERROR;
6483 /* Align to sector, as the previous write could have been any size. */
6484 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6485
6486 /* Paranoia check: extent type, grain table buffer presence and
6487 * grain table buffer space. Also grain table entry must be clear. */
6488 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
6489 || !pImage->pGTCache
6490 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
6491 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
6492 return VERR_INTERNAL_ERROR;
6493
6494 /* Update grain table entry. */
6495 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
6496
6497 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6498 {
6499 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
6500 memset((char *)pExtent->pvGrain + cbWrite, '\0',
6501 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
6502 pData = pExtent->pvGrain;
6503 }
6504 else
6505 {
6506 RTSGSEG Segment;
6507 unsigned cSegments = 1;
6508
6509 size_t cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6510 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
6511 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); RT_NOREF(cbSeg);
6512 pData = Segment.pvSeg;
6513 }
6514 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
6515 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6516 uSector, &cbGrain);
6517 if (RT_FAILURE(rc))
6518 {
6519 pExtent->uGrainSectorAbs = 0;
6520 AssertRC(rc);
6521 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
6522 }
6523 pExtent->uLastGrainAccess = uGrain;
6524 pExtent->uAppendPosition += cbGrain;
6525
6526 return rc;
6527}
6528
6529/**
6530 * Internal: Updates the grain table during grain allocation.
6531 */
6532static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6533 PVMDKGRAINALLOCASYNC pGrainAlloc)
6534{
6535 int rc = VINF_SUCCESS;
6536 PVMDKGTCACHE pCache = pImage->pGTCache;
6537 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
6538 uint32_t uGTHash, uGTBlockIndex;
6539 uint64_t uGTSector, uRGTSector, uGTBlock;
6540 uint64_t uSector = pGrainAlloc->uSector;
6541 PVMDKGTCACHEENTRY pGTCacheEntry;
6542
6543 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
6544 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
6545
6546 uGTSector = pGrainAlloc->uGTSector;
6547 uRGTSector = pGrainAlloc->uRGTSector;
6548 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6549
6550 /* Update the grain table (and the cache). */
6551 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
6552 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
6553 pGTCacheEntry = &pCache->aGTCache[uGTHash];
6554 if ( pGTCacheEntry->uExtent != pExtent->uExtent
6555 || pGTCacheEntry->uGTBlock != uGTBlock)
6556 {
6557 /* Cache miss, fetch data from disk. */
6558 LogFlow(("Cache miss, fetch data from disk\n"));
6559 PVDMETAXFER pMetaXfer = NULL;
6560 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6561 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6562 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6563 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
6564 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6565 {
6566 pGrainAlloc->cIoXfersPending++;
6567 pGrainAlloc->fGTUpdateNeeded = true;
6568 /* Leave early, we will be called again after the read completed. */
6569 LogFlowFunc(("Metadata read in progress, leaving\n"));
6570 return rc;
6571 }
6572 else if (RT_FAILURE(rc))
6573 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
6574 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
6575 pGTCacheEntry->uExtent = pExtent->uExtent;
6576 pGTCacheEntry->uGTBlock = uGTBlock;
6577 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6578 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
6579 }
6580 else
6581 {
6582 /* Cache hit. Convert grain table block back to disk format, otherwise
6583 * the code below will write garbage for all but the updated entry. */
6584 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6585 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
6586 }
6587 pGrainAlloc->fGTUpdateNeeded = false;
6588 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
6589 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
6590 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
6591 /* Update grain table on disk. */
6592 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6593 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6594 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6595 vmdkAllocGrainComplete, pGrainAlloc);
6596 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6597 pGrainAlloc->cIoXfersPending++;
6598 else if (RT_FAILURE(rc))
6599 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
6600 if (pExtent->pRGD)
6601 {
6602 /* Update backup grain table on disk. */
6603 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6604 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6605 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6606 vmdkAllocGrainComplete, pGrainAlloc);
6607 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6608 pGrainAlloc->cIoXfersPending++;
6609 else if (RT_FAILURE(rc))
6610 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
6611 }
6612
6613 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6614 return rc;
6615}
6616
6617/**
6618 * Internal - complete the grain allocation by updating disk grain table if required.
6619 */
6620static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
6621{
6622 RT_NOREF1(rcReq);
6623 int rc = VINF_SUCCESS;
6624 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6625 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
6626
6627 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
6628 pBackendData, pIoCtx, pvUser, rcReq));
6629
6630 pGrainAlloc->cIoXfersPending--;
6631 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
6632 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
6633
6634 if (!pGrainAlloc->cIoXfersPending)
6635 {
6636 /* Grain allocation completed. */
6637 RTMemFree(pGrainAlloc);
6638 }
6639
6640 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
6641 return rc;
6642}
6643
6644/**
6645 * Internal. Allocates a new grain table (if necessary).
6646 */
6647static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6648 uint64_t uSector, uint64_t cbWrite)
6649{
6650 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
6651 uint64_t uGDIndex, uGTSector, uRGTSector;
6652 uint64_t uFileOffset;
6653 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
6654 int rc;
6655
6656 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
6657 pCache, pExtent, pIoCtx, uSector, cbWrite));
6658
6659 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
6660 if (!pGrainAlloc)
6661 return VERR_NO_MEMORY;
6662
6663 pGrainAlloc->pExtent = pExtent;
6664 pGrainAlloc->uSector = uSector;
6665
6666 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6667 if (uGDIndex >= pExtent->cGDEntries)
6668 {
6669 RTMemFree(pGrainAlloc);
6670 return VERR_OUT_OF_RANGE;
6671 }
6672 uGTSector = pExtent->pGD[uGDIndex];
6673 if (pExtent->pRGD)
6674 uRGTSector = pExtent->pRGD[uGDIndex];
6675 else
6676 uRGTSector = 0; /**< avoid compiler warning */
6677 if (!uGTSector)
6678 {
6679 LogFlow(("Allocating new grain table\n"));
6680
6681 /* There is no grain table referenced by this grain directory
6682 * entry. So there is absolutely no data in this area. Allocate
6683 * a new grain table and put the reference to it in the GDs. */
6684 uFileOffset = pExtent->uAppendPosition;
6685 if (!uFileOffset)
6686 {
6687 RTMemFree(pGrainAlloc);
6688 return VERR_INTERNAL_ERROR;
6689 }
6690 Assert(!(uFileOffset % 512));
6691
6692 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6693 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6694
6695 /* Normally the grain table is preallocated for hosted sparse extents
6696 * that support more than 32 bit sector numbers. So this shouldn't
6697 * ever happen on a valid extent. */
6698 if (uGTSector > UINT32_MAX)
6699 {
6700 RTMemFree(pGrainAlloc);
6701 return VERR_VD_VMDK_INVALID_HEADER;
6702 }
6703
6704 /* Write grain table by writing the required number of grain table
6705 * cache chunks. Allocate memory dynamically here or we flood the
6706 * metadata cache with very small entries. */
6707 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
6708 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
6709
6710 if (!paGTDataTmp)
6711 {
6712 RTMemFree(pGrainAlloc);
6713 return VERR_NO_MEMORY;
6714 }
6715
6716 memset(paGTDataTmp, '\0', cbGTDataTmp);
6717 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6718 VMDK_SECTOR2BYTE(uGTSector),
6719 paGTDataTmp, cbGTDataTmp, pIoCtx,
6720 vmdkAllocGrainComplete, pGrainAlloc);
6721 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6722 pGrainAlloc->cIoXfersPending++;
6723 else if (RT_FAILURE(rc))
6724 {
6725 RTMemTmpFree(paGTDataTmp);
6726 RTMemFree(pGrainAlloc);
6727 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
6728 }
6729 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
6730 + cbGTDataTmp, 512);
6731
6732 if (pExtent->pRGD)
6733 {
6734 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
6735 uFileOffset = pExtent->uAppendPosition;
6736 if (!uFileOffset)
6737 return VERR_INTERNAL_ERROR;
6738 Assert(!(uFileOffset % 512));
6739 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6740
6741 /* Normally the redundant grain table is preallocated for hosted
6742 * sparse extents that support more than 32 bit sector numbers. So
6743 * this shouldn't ever happen on a valid extent. */
6744 if (uRGTSector > UINT32_MAX)
6745 {
6746 RTMemTmpFree(paGTDataTmp);
6747 return VERR_VD_VMDK_INVALID_HEADER;
6748 }
6749
6750 /* Write grain table by writing the required number of grain table
6751 * cache chunks. Allocate memory dynamically here or we flood the
6752 * metadata cache with very small entries. */
6753 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6754 VMDK_SECTOR2BYTE(uRGTSector),
6755 paGTDataTmp, cbGTDataTmp, pIoCtx,
6756 vmdkAllocGrainComplete, pGrainAlloc);
6757 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6758 pGrainAlloc->cIoXfersPending++;
6759 else if (RT_FAILURE(rc))
6760 {
6761 RTMemTmpFree(paGTDataTmp);
6762 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
6763 }
6764
6765 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
6766 }
6767
6768 RTMemTmpFree(paGTDataTmp);
6769
6770 /* Update the grain directory on disk (doing it before writing the
6771 * grain table will result in a garbled extent if the operation is
6772 * aborted for some reason. Otherwise the worst that can happen is
6773 * some unused sectors in the extent. */
6774 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
6775 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6776 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
6777 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
6778 vmdkAllocGrainComplete, pGrainAlloc);
6779 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6780 pGrainAlloc->cIoXfersPending++;
6781 else if (RT_FAILURE(rc))
6782 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
6783 if (pExtent->pRGD)
6784 {
6785 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
6786 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6787 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
6788 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
6789 vmdkAllocGrainComplete, pGrainAlloc);
6790 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6791 pGrainAlloc->cIoXfersPending++;
6792 else if (RT_FAILURE(rc))
6793 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
6794 }
6795
6796 /* As the final step update the in-memory copy of the GDs. */
6797 pExtent->pGD[uGDIndex] = uGTSector;
6798 if (pExtent->pRGD)
6799 pExtent->pRGD[uGDIndex] = uRGTSector;
6800 }
6801
6802 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6803 pGrainAlloc->uGTSector = uGTSector;
6804 pGrainAlloc->uRGTSector = uRGTSector;
6805
6806 uFileOffset = pExtent->uAppendPosition;
6807 if (!uFileOffset)
6808 return VERR_INTERNAL_ERROR;
6809 Assert(!(uFileOffset % 512));
6810
6811 pGrainAlloc->uGrainOffset = uFileOffset;
6812
6813 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6814 {
6815 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6816 ("Accesses to stream optimized images must be synchronous\n"),
6817 VERR_INVALID_STATE);
6818
6819 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6820 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
6821
6822 /* Invalidate cache, just in case some code incorrectly allows mixing
6823 * of reads and writes. Normally shouldn't be needed. */
6824 pExtent->uGrainSectorAbs = 0;
6825
6826 /* Write compressed data block and the markers. */
6827 uint32_t cbGrain = 0;
6828 RTSGSEG Segment;
6829 unsigned cSegments = 1;
6830
6831 size_t cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6832 &cSegments, cbWrite);
6833 Assert(cbSeg == cbWrite); RT_NOREF(cbSeg);
6834
6835 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
6836 Segment.pvSeg, cbWrite, uSector, &cbGrain);
6837 if (RT_FAILURE(rc))
6838 {
6839 AssertRC(rc);
6840 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
6841 }
6842 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
6843 pExtent->uAppendPosition += cbGrain;
6844 }
6845 else
6846 {
6847 /* Write the data. Always a full grain, or we're in big trouble. */
6848 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6849 uFileOffset, pIoCtx, cbWrite,
6850 vmdkAllocGrainComplete, pGrainAlloc);
6851 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6852 pGrainAlloc->cIoXfersPending++;
6853 else if (RT_FAILURE(rc))
6854 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
6855
6856 pExtent->uAppendPosition += cbWrite;
6857 }
6858
6859 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
6860
6861 if (!pGrainAlloc->cIoXfersPending)
6862 {
6863 /* Grain allocation completed. */
6864 RTMemFree(pGrainAlloc);
6865 }
6866
6867 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6868
6869 return rc;
6870}
6871
6872/**
6873 * Internal. Reads the contents by sequentially going over the compressed
6874 * grains (hoping that they are in sequence).
6875 */
6876static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6877 uint64_t uSector, PVDIOCTX pIoCtx,
6878 uint64_t cbRead)
6879{
6880 int rc;
6881
6882 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
6883 pImage, pExtent, uSector, pIoCtx, cbRead));
6884
6885 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6886 ("Async I/O not supported for sequential stream optimized images\n"),
6887 VERR_INVALID_STATE);
6888
6889 /* Do not allow to go back. */
6890 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
6891 if (uGrain < pExtent->uLastGrainAccess)
6892 return VERR_VD_VMDK_INVALID_STATE;
6893 pExtent->uLastGrainAccess = uGrain;
6894
6895 /* After a previous error do not attempt to recover, as it would need
6896 * seeking (in the general case backwards which is forbidden). */
6897 if (!pExtent->uGrainSectorAbs)
6898 return VERR_VD_VMDK_INVALID_STATE;
6899
6900 /* Check if we need to read something from the image or if what we have
6901 * in the buffer is good to fulfill the request. */
6902 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
6903 {
6904 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
6905 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
6906
6907 /* Get the marker from the next data block - and skip everything which
6908 * is not a compressed grain. If it's a compressed grain which is for
6909 * the requested sector (or after), read it. */
6910 VMDKMARKER Marker;
6911 do
6912 {
6913 RT_ZERO(Marker);
6914 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6915 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6916 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
6917 if (RT_FAILURE(rc))
6918 return rc;
6919 Marker.uSector = RT_LE2H_U64(Marker.uSector);
6920 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
6921
6922 if (Marker.cbSize == 0)
6923 {
6924 /* A marker for something else than a compressed grain. */
6925 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6926 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6927 + RT_UOFFSETOF(VMDKMARKER, uType),
6928 &Marker.uType, sizeof(Marker.uType));
6929 if (RT_FAILURE(rc))
6930 return rc;
6931 Marker.uType = RT_LE2H_U32(Marker.uType);
6932 switch (Marker.uType)
6933 {
6934 case VMDK_MARKER_EOS:
6935 uGrainSectorAbs++;
6936 /* Read (or mostly skip) to the end of file. Uses the
6937 * Marker (LBA sector) as it is unused anyway. This
6938 * makes sure that really everything is read in the
6939 * success case. If this read fails it means the image
6940 * is truncated, but this is harmless so ignore. */
6941 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6942 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6943 + 511,
6944 &Marker.uSector, 1);
6945 break;
6946 case VMDK_MARKER_GT:
6947 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
6948 break;
6949 case VMDK_MARKER_GD:
6950 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
6951 break;
6952 case VMDK_MARKER_FOOTER:
6953 uGrainSectorAbs += 2;
6954 break;
6955 case VMDK_MARKER_UNSPECIFIED:
6956 /* Skip over the contents of the unspecified marker
6957 * type 4 which exists in some vSphere created files. */
6958 /** @todo figure out what the payload means. */
6959 uGrainSectorAbs += 1;
6960 break;
6961 default:
6962 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
6963 pExtent->uGrainSectorAbs = 0;
6964 return VERR_VD_VMDK_INVALID_STATE;
6965 }
6966 pExtent->cbGrainStreamRead = 0;
6967 }
6968 else
6969 {
6970 /* A compressed grain marker. If it is at/after what we're
6971 * interested in read and decompress data. */
6972 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
6973 {
6974 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
6975 continue;
6976 }
6977 uint64_t uLBA = 0;
6978 uint32_t cbGrainStreamRead = 0;
6979 rc = vmdkFileInflateSync(pImage, pExtent,
6980 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6981 pExtent->pvGrain,
6982 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6983 &Marker, &uLBA, &cbGrainStreamRead);
6984 if (RT_FAILURE(rc))
6985 {
6986 pExtent->uGrainSectorAbs = 0;
6987 return rc;
6988 }
6989 if ( pExtent->uGrain
6990 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
6991 {
6992 pExtent->uGrainSectorAbs = 0;
6993 return VERR_VD_VMDK_INVALID_STATE;
6994 }
6995 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
6996 pExtent->cbGrainStreamRead = cbGrainStreamRead;
6997 break;
6998 }
6999 } while (Marker.uType != VMDK_MARKER_EOS);
7000
7001 pExtent->uGrainSectorAbs = uGrainSectorAbs;
7002
7003 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
7004 {
7005 pExtent->uGrain = UINT32_MAX;
7006 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
7007 * the next read would try to get more data, and we're at EOF. */
7008 pExtent->cbGrainStreamRead = 1;
7009 }
7010 }
7011
7012 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
7013 {
7014 /* The next data block we have is not for this area, so just return
7015 * that there is no data. */
7016 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
7017 return VERR_VD_BLOCK_FREE;
7018 }
7019
7020 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
7021 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
7022 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
7023 cbRead);
7024 LogFlowFunc(("returns VINF_SUCCESS\n"));
7025 return VINF_SUCCESS;
7026}
7027
7028/**
7029 * Replaces a fragment of a string with the specified string.
7030 *
7031 * @returns Pointer to the allocated UTF-8 string.
7032 * @param pszWhere UTF-8 string to search in.
7033 * @param pszWhat UTF-8 string to search for.
7034 * @param pszByWhat UTF-8 string to replace the found string with.
7035 *
7036 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
7037 * for updating the base name in the descriptor, the second is for
7038 * generating new filenames for extents. This code borked when
7039 * RTPathAbs started correcting the driver letter case on windows,
7040 * when strstr failed because the pExtent->pszFullname was not
7041 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
7042 * this by apply RTPathAbs to the places it wasn't applied.
7043 *
7044 * However, this highlights some undocumented ASSUMPTIONS as well as
7045 * terrible short commings of the approach.
7046 *
7047 * Given the right filename, it may also screw up the descriptor. Take
7048 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
7049 * we'll be asked to replace "Test0" with something, no problem. No,
7050 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
7051 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
7052 * its bum. The descriptor string must be parsed and reconstructed,
7053 * the lazy strstr approach doesn't cut it.
7054 *
7055 * I'm also curious as to what would be the correct escaping of '"' in
7056 * the file name and how that is supposed to be handled, because it
7057 * needs to be or such names must be rejected in several places (maybe
7058 * they are, I didn't check).
7059 *
7060 * When this function is used to replace the start of a path, I think
7061 * the assumption from the prep/setup code is that we kind of knows
7062 * what we're working on (I could be wrong). However, using strstr
7063 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
7064 * Especially on unix systems, weird stuff could happen if someone
7065 * unwittingly tinkers with the prep/setup code. What should really be
7066 * done here is using a new RTPathStartEx function that (via flags)
7067 * allows matching partial final component and returns the length of
7068 * what it matched up (in case it skipped slashes and '.' components).
7069 *
7070 */
7071static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
7072 const char *pszByWhat)
7073{
7074 AssertPtr(pszWhere);
7075 AssertPtr(pszWhat);
7076 AssertPtr(pszByWhat);
7077 const char *pszFoundStr = strstr(pszWhere, pszWhat);
7078 if (!pszFoundStr)
7079 {
7080 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
7081 return NULL;
7082 }
7083 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
7084 char *pszNewStr = RTStrAlloc(cbFinal);
7085 if (pszNewStr)
7086 {
7087 char *pszTmp = pszNewStr;
7088 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
7089 pszTmp += pszFoundStr - pszWhere;
7090 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
7091 pszTmp += strlen(pszByWhat);
7092 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
7093 }
7094 return pszNewStr;
7095}
7096
7097
7098/** @copydoc VDIMAGEBACKEND::pfnProbe */
7099static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
7100 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
7101{
7102 RT_NOREF(enmDesiredType);
7103 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
7104 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
7105 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7106 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7107
7108 int rc = VINF_SUCCESS;
7109 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7110 if (RT_LIKELY(pImage))
7111 {
7112 pImage->pszFilename = pszFilename;
7113 pImage->pFile = NULL;
7114 pImage->pExtents = NULL;
7115 pImage->pFiles = NULL;
7116 pImage->pGTCache = NULL;
7117 pImage->pDescData = NULL;
7118 pImage->pVDIfsDisk = pVDIfsDisk;
7119 pImage->pVDIfsImage = pVDIfsImage;
7120 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
7121 * much as possible in vmdkOpenImage. */
7122 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
7123 vmdkFreeImage(pImage, false, false /*fFlush*/);
7124 RTMemFree(pImage);
7125
7126 if (RT_SUCCESS(rc))
7127 *penmType = VDTYPE_HDD;
7128 }
7129 else
7130 rc = VERR_NO_MEMORY;
7131
7132 LogFlowFunc(("returns %Rrc\n", rc));
7133 return rc;
7134}
7135
7136/** @copydoc VDIMAGEBACKEND::pfnOpen */
7137static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
7138 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7139 VDTYPE enmType, void **ppBackendData)
7140{
7141 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
7142
7143 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
7144 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
7145 int rc;
7146
7147 /* Check open flags. All valid flags are supported. */
7148 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
7149 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7150 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7151
7152
7153 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7154 if (RT_LIKELY(pImage))
7155 {
7156 pImage->pszFilename = pszFilename;
7157 pImage->pFile = NULL;
7158 pImage->pExtents = NULL;
7159 pImage->pFiles = NULL;
7160 pImage->pGTCache = NULL;
7161 pImage->pDescData = NULL;
7162 pImage->pVDIfsDisk = pVDIfsDisk;
7163 pImage->pVDIfsImage = pVDIfsImage;
7164
7165 rc = vmdkOpenImage(pImage, uOpenFlags);
7166 if (RT_SUCCESS(rc))
7167 *ppBackendData = pImage;
7168 else
7169 RTMemFree(pImage);
7170 }
7171 else
7172 rc = VERR_NO_MEMORY;
7173
7174 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
7175 return rc;
7176}
7177
7178/** @copydoc VDIMAGEBACKEND::pfnCreate */
7179static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
7180 unsigned uImageFlags, const char *pszComment,
7181 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
7182 PCRTUUID pUuid, unsigned uOpenFlags,
7183 unsigned uPercentStart, unsigned uPercentSpan,
7184 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7185 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
7186 void **ppBackendData)
7187{
7188 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
7189 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
7190 int rc;
7191
7192 /* Check the VD container type and image flags. */
7193 if ( enmType != VDTYPE_HDD
7194 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
7195 return VERR_VD_INVALID_TYPE;
7196
7197 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
7198 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
7199 && ( !cbSize
7200 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)))
7201 return VERR_VD_INVALID_SIZE;
7202
7203 /* Check image flags for invalid combinations. */
7204 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7205 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
7206 return VERR_INVALID_PARAMETER;
7207
7208 /* Check open flags. All valid flags are supported. */
7209 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
7210 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7211 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7212 AssertPtrReturn(pPCHSGeometry, VERR_INVALID_POINTER);
7213 AssertPtrReturn(pLCHSGeometry, VERR_INVALID_POINTER);
7214 AssertReturn(!( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
7215 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
7216 VERR_INVALID_PARAMETER);
7217
7218 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7219 if (RT_LIKELY(pImage))
7220 {
7221 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7222
7223 pImage->pszFilename = pszFilename;
7224 pImage->pFile = NULL;
7225 pImage->pExtents = NULL;
7226 pImage->pFiles = NULL;
7227 pImage->pGTCache = NULL;
7228 pImage->pDescData = NULL;
7229 pImage->pVDIfsDisk = pVDIfsDisk;
7230 pImage->pVDIfsImage = pVDIfsImage;
7231 /* Descriptors for split images can be pretty large, especially if the
7232 * filename is long. So prepare for the worst, and allocate quite some
7233 * memory for the descriptor in this case. */
7234 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
7235 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
7236 else
7237 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
7238 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
7239 if (RT_LIKELY(pImage->pDescData))
7240 {
7241 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
7242 pPCHSGeometry, pLCHSGeometry, pUuid,
7243 pIfProgress, uPercentStart, uPercentSpan);
7244 if (RT_SUCCESS(rc))
7245 {
7246 /* So far the image is opened in read/write mode. Make sure the
7247 * image is opened in read-only mode if the caller requested that. */
7248 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7249 {
7250 vmdkFreeImage(pImage, false, true /*fFlush*/);
7251 rc = vmdkOpenImage(pImage, uOpenFlags);
7252 }
7253
7254 if (RT_SUCCESS(rc))
7255 *ppBackendData = pImage;
7256 }
7257
7258 if (RT_FAILURE(rc))
7259 RTMemFree(pImage->pDescData);
7260 }
7261 else
7262 rc = VERR_NO_MEMORY;
7263
7264 if (RT_FAILURE(rc))
7265 RTMemFree(pImage);
7266 }
7267 else
7268 rc = VERR_NO_MEMORY;
7269
7270 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
7271 return rc;
7272}
7273
7274/**
7275 * Prepares the state for renaming a VMDK image, setting up the state and allocating
7276 * memory.
7277 *
7278 * @returns VBox status code.
7279 * @param pImage VMDK image instance.
7280 * @param pRenameState The state to initialize.
7281 * @param pszFilename The new filename.
7282 */
7283static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
7284{
7285 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
7286
7287 int rc = VINF_SUCCESS;
7288
7289 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
7290
7291 /*
7292 * Allocate an array to store both old and new names of renamed files
7293 * in case we have to roll back the changes. Arrays are initialized
7294 * with zeros. We actually save stuff when and if we change it.
7295 */
7296 pRenameState->cExtents = pImage->cExtents;
7297 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
7298 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
7299 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
7300 if ( pRenameState->apszOldName
7301 && pRenameState->apszNewName
7302 && pRenameState->apszNewLines)
7303 {
7304 /* Save the descriptor size and position. */
7305 if (pImage->pDescData)
7306 {
7307 /* Separate descriptor file. */
7308 pRenameState->fEmbeddedDesc = false;
7309 }
7310 else
7311 {
7312 /* Embedded descriptor file. */
7313 pRenameState->ExtentCopy = pImage->pExtents[0];
7314 pRenameState->fEmbeddedDesc = true;
7315 }
7316
7317 /* Save the descriptor content. */
7318 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
7319 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
7320 {
7321 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
7322 if (!pRenameState->DescriptorCopy.aLines[i])
7323 {
7324 rc = VERR_NO_MEMORY;
7325 break;
7326 }
7327 }
7328
7329 if (RT_SUCCESS(rc))
7330 {
7331 /* Prepare both old and new base names used for string replacement. */
7332 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
7333 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
7334 RTPathStripSuffix(pRenameState->pszNewBaseName);
7335
7336 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
7337 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
7338 RTPathStripSuffix(pRenameState->pszOldBaseName);
7339
7340 /* Prepare both old and new full names used for string replacement.
7341 Note! Must abspath the stuff here, so the strstr weirdness later in
7342 the renaming process get a match against abspath'ed extent paths.
7343 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
7344 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
7345 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
7346 RTPathStripSuffix(pRenameState->pszNewFullName);
7347
7348 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
7349 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
7350 RTPathStripSuffix(pRenameState->pszOldFullName);
7351
7352 /* Save the old name for easy access to the old descriptor file. */
7353 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
7354 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
7355
7356 /* Save old image name. */
7357 pRenameState->pszOldImageName = pImage->pszFilename;
7358 }
7359 }
7360 else
7361 rc = VERR_NO_TMP_MEMORY;
7362
7363 return rc;
7364}
7365
7366/**
7367 * Destroys the given rename state, freeing all allocated memory.
7368 *
7369 * @param pRenameState The rename state to destroy.
7370 */
7371static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
7372{
7373 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
7374 if (pRenameState->DescriptorCopy.aLines[i])
7375 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
7376 if (pRenameState->apszOldName)
7377 {
7378 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7379 if (pRenameState->apszOldName[i])
7380 RTStrFree(pRenameState->apszOldName[i]);
7381 RTMemTmpFree(pRenameState->apszOldName);
7382 }
7383 if (pRenameState->apszNewName)
7384 {
7385 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7386 if (pRenameState->apszNewName[i])
7387 RTStrFree(pRenameState->apszNewName[i]);
7388 RTMemTmpFree(pRenameState->apszNewName);
7389 }
7390 if (pRenameState->apszNewLines)
7391 {
7392 for (unsigned i = 0; i < pRenameState->cExtents; i++)
7393 if (pRenameState->apszNewLines[i])
7394 RTStrFree(pRenameState->apszNewLines[i]);
7395 RTMemTmpFree(pRenameState->apszNewLines);
7396 }
7397 if (pRenameState->pszOldDescName)
7398 RTStrFree(pRenameState->pszOldDescName);
7399 if (pRenameState->pszOldBaseName)
7400 RTStrFree(pRenameState->pszOldBaseName);
7401 if (pRenameState->pszNewBaseName)
7402 RTStrFree(pRenameState->pszNewBaseName);
7403 if (pRenameState->pszOldFullName)
7404 RTStrFree(pRenameState->pszOldFullName);
7405 if (pRenameState->pszNewFullName)
7406 RTStrFree(pRenameState->pszNewFullName);
7407}
7408
7409/**
7410 * Rolls back the rename operation to the original state.
7411 *
7412 * @returns VBox status code.
7413 * @param pImage VMDK image instance.
7414 * @param pRenameState The rename state.
7415 */
7416static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
7417{
7418 int rc = VINF_SUCCESS;
7419
7420 if (!pRenameState->fImageFreed)
7421 {
7422 /*
7423 * Some extents may have been closed, close the rest. We will
7424 * re-open the whole thing later.
7425 */
7426 vmdkFreeImage(pImage, false, true /*fFlush*/);
7427 }
7428
7429 /* Rename files back. */
7430 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7431 {
7432 if (pRenameState->apszOldName[i])
7433 {
7434 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
7435 AssertRC(rc);
7436 }
7437 }
7438 /* Restore the old descriptor. */
7439 PVMDKFILE pFile;
7440 rc = vmdkFileOpen(pImage, &pFile, NULL, pRenameState->pszOldDescName,
7441 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
7442 false /* fCreate */));
7443 AssertRC(rc);
7444 if (pRenameState->fEmbeddedDesc)
7445 {
7446 pRenameState->ExtentCopy.pFile = pFile;
7447 pImage->pExtents = &pRenameState->ExtentCopy;
7448 }
7449 else
7450 {
7451 /* Shouldn't be null for separate descriptor.
7452 * There will be no access to the actual content.
7453 */
7454 pImage->pDescData = pRenameState->pszOldDescName;
7455 pImage->pFile = pFile;
7456 }
7457 pImage->Descriptor = pRenameState->DescriptorCopy;
7458 vmdkWriteDescriptor(pImage, NULL);
7459 vmdkFileClose(pImage, &pFile, false);
7460 /* Get rid of the stuff we implanted. */
7461 pImage->pExtents = NULL;
7462 pImage->pFile = NULL;
7463 pImage->pDescData = NULL;
7464 /* Re-open the image back. */
7465 pImage->pszFilename = pRenameState->pszOldImageName;
7466 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
7467
7468 return rc;
7469}
7470
7471/**
7472 * Rename worker doing the real work.
7473 *
7474 * @returns VBox status code.
7475 * @param pImage VMDK image instance.
7476 * @param pRenameState The rename state.
7477 * @param pszFilename The new filename.
7478 */
7479static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
7480{
7481 int rc = VINF_SUCCESS;
7482 unsigned i, line;
7483
7484 /* Update the descriptor with modified extent names. */
7485 for (i = 0, line = pImage->Descriptor.uFirstExtent;
7486 i < pRenameState->cExtents;
7487 i++, line = pImage->Descriptor.aNextLines[line])
7488 {
7489 /* Update the descriptor. */
7490 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
7491 pRenameState->pszOldBaseName,
7492 pRenameState->pszNewBaseName);
7493 if (!pRenameState->apszNewLines[i])
7494 {
7495 rc = VERR_NO_MEMORY;
7496 break;
7497 }
7498 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
7499 }
7500
7501 if (RT_SUCCESS(rc))
7502 {
7503 /* Make sure the descriptor gets written back. */
7504 pImage->Descriptor.fDirty = true;
7505 /* Flush the descriptor now, in case it is embedded. */
7506 vmdkFlushImage(pImage, NULL);
7507
7508 /* Close and rename/move extents. */
7509 for (i = 0; i < pRenameState->cExtents; i++)
7510 {
7511 PVMDKEXTENT pExtent = &pImage->pExtents[i];
7512 /* Compose new name for the extent. */
7513 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
7514 pRenameState->pszOldFullName,
7515 pRenameState->pszNewFullName);
7516 if (!pRenameState->apszNewName[i])
7517 {
7518 rc = VERR_NO_MEMORY;
7519 break;
7520 }
7521 /* Close the extent file. */
7522 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
7523 if (RT_FAILURE(rc))
7524 break;;
7525
7526 /* Rename the extent file. */
7527 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
7528 if (RT_FAILURE(rc))
7529 break;
7530 /* Remember the old name. */
7531 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
7532 }
7533
7534 if (RT_SUCCESS(rc))
7535 {
7536 /* Release all old stuff. */
7537 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
7538 if (RT_SUCCESS(rc))
7539 {
7540 pRenameState->fImageFreed = true;
7541
7542 /* Last elements of new/old name arrays are intended for
7543 * storing descriptor's names.
7544 */
7545 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
7546 /* Rename the descriptor file if it's separate. */
7547 if (!pRenameState->fEmbeddedDesc)
7548 {
7549 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
7550 if (RT_SUCCESS(rc))
7551 {
7552 /* Save old name only if we may need to change it back. */
7553 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
7554 }
7555 }
7556
7557 /* Update pImage with the new information. */
7558 pImage->pszFilename = pszFilename;
7559
7560 /* Open the new image. */
7561 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
7562 }
7563 }
7564 }
7565
7566 return rc;
7567}
7568
7569/** @copydoc VDIMAGEBACKEND::pfnRename */
7570static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
7571{
7572 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
7573
7574 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7575 VMDKRENAMESTATE RenameState;
7576
7577 memset(&RenameState, 0, sizeof(RenameState));
7578
7579 /* Check arguments. */
7580 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
7581 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7582 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7583 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK), VERR_INVALID_PARAMETER);
7584
7585 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
7586 if (RT_SUCCESS(rc))
7587 {
7588 /* --- Up to this point we have not done any damage yet. --- */
7589
7590 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
7591 /* Roll back all changes in case of failure. */
7592 if (RT_FAILURE(rc))
7593 {
7594 int rrc = vmdkRenameRollback(pImage, &RenameState);
7595 AssertRC(rrc);
7596 }
7597 }
7598
7599 vmdkRenameStateDestroy(&RenameState);
7600 LogFlowFunc(("returns %Rrc\n", rc));
7601 return rc;
7602}
7603
7604/** @copydoc VDIMAGEBACKEND::pfnClose */
7605static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
7606{
7607 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
7608 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7609
7610 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
7611 RTMemFree(pImage);
7612
7613 LogFlowFunc(("returns %Rrc\n", rc));
7614 return rc;
7615}
7616
7617/** @copydoc VDIMAGEBACKEND::pfnRead */
7618static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
7619 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
7620{
7621 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
7622 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
7623 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7624
7625 AssertPtr(pImage);
7626 Assert(uOffset % 512 == 0);
7627 Assert(cbToRead % 512 == 0);
7628 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
7629 AssertReturn(cbToRead, VERR_INVALID_PARAMETER);
7630 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
7631
7632 /* Find the extent and check access permissions as defined in the extent descriptor. */
7633 PVMDKEXTENT pExtent;
7634 uint64_t uSectorExtentRel;
7635 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7636 &pExtent, &uSectorExtentRel);
7637 if ( RT_SUCCESS(rc)
7638 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
7639 {
7640 /* Clip read range to remain in this extent. */
7641 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7642
7643 /* Handle the read according to the current extent type. */
7644 switch (pExtent->enmType)
7645 {
7646 case VMDKETYPE_HOSTED_SPARSE:
7647 {
7648 uint64_t uSectorExtentAbs;
7649
7650 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7651 if (RT_FAILURE(rc))
7652 break;
7653 /* Clip read range to at most the rest of the grain. */
7654 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
7655 Assert(!(cbToRead % 512));
7656 if (uSectorExtentAbs == 0)
7657 {
7658 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7659 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7660 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
7661 rc = VERR_VD_BLOCK_FREE;
7662 else
7663 rc = vmdkStreamReadSequential(pImage, pExtent,
7664 uSectorExtentRel,
7665 pIoCtx, cbToRead);
7666 }
7667 else
7668 {
7669 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7670 {
7671 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
7672 ("Async I/O is not supported for stream optimized VMDK's\n"));
7673
7674 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
7675 uSectorExtentAbs -= uSectorInGrain;
7676 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
7677 {
7678 uint64_t uLBA = 0; /* gcc maybe uninitialized */
7679 rc = vmdkFileInflateSync(pImage, pExtent,
7680 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7681 pExtent->pvGrain,
7682 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
7683 NULL, &uLBA, NULL);
7684 if (RT_FAILURE(rc))
7685 {
7686 pExtent->uGrainSectorAbs = 0;
7687 break;
7688 }
7689 pExtent->uGrainSectorAbs = uSectorExtentAbs;
7690 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
7691 Assert(uLBA == uSectorExtentRel);
7692 }
7693 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
7694 (uint8_t *)pExtent->pvGrain
7695 + VMDK_SECTOR2BYTE(uSectorInGrain),
7696 cbToRead);
7697 }
7698 else
7699 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7700 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7701 pIoCtx, cbToRead);
7702 }
7703 break;
7704 }
7705 case VMDKETYPE_VMFS:
7706 case VMDKETYPE_FLAT:
7707 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7708 VMDK_SECTOR2BYTE(uSectorExtentRel),
7709 pIoCtx, cbToRead);
7710 break;
7711 case VMDKETYPE_ZERO:
7712 {
7713 size_t cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
7714 Assert(cbSet == cbToRead); RT_NOREF(cbSet);
7715 break;
7716 }
7717 }
7718 if (pcbActuallyRead)
7719 *pcbActuallyRead = cbToRead;
7720 }
7721 else if (RT_SUCCESS(rc))
7722 rc = VERR_VD_VMDK_INVALID_STATE;
7723
7724 LogFlowFunc(("returns %Rrc\n", rc));
7725 return rc;
7726}
7727
7728/** @copydoc VDIMAGEBACKEND::pfnWrite */
7729static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
7730 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
7731 size_t *pcbPostRead, unsigned fWrite)
7732{
7733 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
7734 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
7735 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7736 int rc;
7737
7738 AssertPtr(pImage);
7739 Assert(uOffset % 512 == 0);
7740 Assert(cbToWrite % 512 == 0);
7741 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
7742 AssertReturn(cbToWrite, VERR_INVALID_PARAMETER);
7743
7744 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7745 {
7746 PVMDKEXTENT pExtent;
7747 uint64_t uSectorExtentRel;
7748 uint64_t uSectorExtentAbs;
7749
7750 /* No size check here, will do that later when the extent is located.
7751 * There are sparse images out there which according to the spec are
7752 * invalid, because the total size is not a multiple of the grain size.
7753 * Also for sparse images which are stitched together in odd ways (not at
7754 * grain boundaries, and with the nominal size not being a multiple of the
7755 * grain size), this would prevent writing to the last grain. */
7756
7757 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7758 &pExtent, &uSectorExtentRel);
7759 if (RT_SUCCESS(rc))
7760 {
7761 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
7762 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7763 && !pImage->pExtents[0].uAppendPosition
7764 && pExtent->enmAccess != VMDKACCESS_READONLY))
7765 rc = VERR_VD_VMDK_INVALID_STATE;
7766 else
7767 {
7768 /* Handle the write according to the current extent type. */
7769 switch (pExtent->enmType)
7770 {
7771 case VMDKETYPE_HOSTED_SPARSE:
7772 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7773 if (RT_SUCCESS(rc))
7774 {
7775 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
7776 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
7777 rc = VERR_VD_VMDK_INVALID_WRITE;
7778 else
7779 {
7780 /* Clip write range to at most the rest of the grain. */
7781 cbToWrite = RT_MIN(cbToWrite,
7782 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
7783 - uSectorExtentRel % pExtent->cSectorsPerGrain));
7784 if (uSectorExtentAbs == 0)
7785 {
7786 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7787 {
7788 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7789 {
7790 /* Full block write to a previously unallocated block.
7791 * Check if the caller wants to avoid the automatic alloc. */
7792 if (!(fWrite & VD_WRITE_NO_ALLOC))
7793 {
7794 /* Allocate GT and find out where to store the grain. */
7795 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
7796 uSectorExtentRel, cbToWrite);
7797 }
7798 else
7799 rc = VERR_VD_BLOCK_FREE;
7800 *pcbPreRead = 0;
7801 *pcbPostRead = 0;
7802 }
7803 else
7804 {
7805 /* Clip write range to remain in this extent. */
7806 cbToWrite = RT_MIN(cbToWrite,
7807 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
7808 + pExtent->cNominalSectors - uSectorExtentRel));
7809 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7810 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
7811 rc = VERR_VD_BLOCK_FREE;
7812 }
7813 }
7814 else
7815 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
7816 pIoCtx, cbToWrite);
7817 }
7818 else
7819 {
7820 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7821 {
7822 /* A partial write to a streamOptimized image is simply
7823 * invalid. It requires rewriting already compressed data
7824 * which is somewhere between expensive and impossible. */
7825 rc = VERR_VD_VMDK_INVALID_STATE;
7826 pExtent->uGrainSectorAbs = 0;
7827 AssertRC(rc);
7828 }
7829 else
7830 {
7831 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7832 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7833 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7834 pIoCtx, cbToWrite, NULL, NULL);
7835 }
7836 }
7837 }
7838 }
7839 break;
7840 case VMDKETYPE_VMFS:
7841 case VMDKETYPE_FLAT:
7842 /* Clip write range to remain in this extent. */
7843 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7844 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7845 VMDK_SECTOR2BYTE(uSectorExtentRel),
7846 pIoCtx, cbToWrite, NULL, NULL);
7847 break;
7848 case VMDKETYPE_ZERO:
7849 /* Clip write range to remain in this extent. */
7850 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7851 break;
7852 }
7853 }
7854
7855 if (pcbWriteProcess)
7856 *pcbWriteProcess = cbToWrite;
7857 }
7858 }
7859 else
7860 rc = VERR_VD_IMAGE_READ_ONLY;
7861
7862 LogFlowFunc(("returns %Rrc\n", rc));
7863 return rc;
7864}
7865
7866/** @copydoc VDIMAGEBACKEND::pfnFlush */
7867static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
7868{
7869 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7870
7871 return vmdkFlushImage(pImage, pIoCtx);
7872}
7873
7874/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
7875static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
7876{
7877 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7878 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7879
7880 AssertPtrReturn(pImage, 0);
7881
7882 return VMDK_IMAGE_VERSION;
7883}
7884
7885/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
7886static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
7887{
7888 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7889 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7890 uint64_t cb = 0;
7891
7892 AssertPtrReturn(pImage, 0);
7893
7894 if (pImage->pFile != NULL)
7895 {
7896 uint64_t cbFile;
7897 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
7898 if (RT_SUCCESS(rc))
7899 cb += cbFile;
7900 }
7901 for (unsigned i = 0; i < pImage->cExtents; i++)
7902 {
7903 if (pImage->pExtents[i].pFile != NULL)
7904 {
7905 uint64_t cbFile;
7906 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
7907 if (RT_SUCCESS(rc))
7908 cb += cbFile;
7909 }
7910 }
7911
7912 LogFlowFunc(("returns %lld\n", cb));
7913 return cb;
7914}
7915
7916/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
7917static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
7918{
7919 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
7920 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7921 int rc = VINF_SUCCESS;
7922
7923 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7924
7925 if (pImage->PCHSGeometry.cCylinders)
7926 *pPCHSGeometry = pImage->PCHSGeometry;
7927 else
7928 rc = VERR_VD_GEOMETRY_NOT_SET;
7929
7930 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7931 return rc;
7932}
7933
7934/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
7935static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
7936{
7937 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
7938 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7939 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7940 int rc = VINF_SUCCESS;
7941
7942 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7943
7944 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7945 {
7946 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7947 {
7948 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7949 if (RT_SUCCESS(rc))
7950 pImage->PCHSGeometry = *pPCHSGeometry;
7951 }
7952 else
7953 rc = VERR_NOT_SUPPORTED;
7954 }
7955 else
7956 rc = VERR_VD_IMAGE_READ_ONLY;
7957
7958 LogFlowFunc(("returns %Rrc\n", rc));
7959 return rc;
7960}
7961
7962/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
7963static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7964{
7965 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7966 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7967 int rc = VINF_SUCCESS;
7968
7969 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7970
7971 if (pImage->LCHSGeometry.cCylinders)
7972 *pLCHSGeometry = pImage->LCHSGeometry;
7973 else
7974 rc = VERR_VD_GEOMETRY_NOT_SET;
7975
7976 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7977 return rc;
7978}
7979
7980/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
7981static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7982{
7983 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
7984 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7985 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7986 int rc = VINF_SUCCESS;
7987
7988 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7989
7990 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7991 {
7992 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7993 {
7994 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7995 if (RT_SUCCESS(rc))
7996 pImage->LCHSGeometry = *pLCHSGeometry;
7997 }
7998 else
7999 rc = VERR_NOT_SUPPORTED;
8000 }
8001 else
8002 rc = VERR_VD_IMAGE_READ_ONLY;
8003
8004 LogFlowFunc(("returns %Rrc\n", rc));
8005 return rc;
8006}
8007
8008/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
8009static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
8010{
8011 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
8012 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
8013
8014 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
8015
8016 *ppRegionList = &pThis->RegionList;
8017 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
8018 return VINF_SUCCESS;
8019}
8020
8021/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
8022static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
8023{
8024 RT_NOREF1(pRegionList);
8025 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
8026 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
8027 AssertPtr(pThis); RT_NOREF(pThis);
8028
8029 /* Nothing to do here. */
8030}
8031
8032/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
8033static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
8034{
8035 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
8036 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8037
8038 AssertPtrReturn(pImage, 0);
8039
8040 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
8041 return pImage->uImageFlags;
8042}
8043
8044/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
8045static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
8046{
8047 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
8048 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8049
8050 AssertPtrReturn(pImage, 0);
8051
8052 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
8053 return pImage->uOpenFlags;
8054}
8055
8056/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
8057static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
8058{
8059 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
8060 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8061 int rc;
8062
8063 /* Image must be opened and the new flags must be valid. */
8064 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
8065 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
8066 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
8067 rc = VERR_INVALID_PARAMETER;
8068 else
8069 {
8070 /* StreamOptimized images need special treatment: reopen is prohibited. */
8071 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
8072 {
8073 if (pImage->uOpenFlags == uOpenFlags)
8074 rc = VINF_SUCCESS;
8075 else
8076 rc = VERR_INVALID_PARAMETER;
8077 }
8078 else
8079 {
8080 /* Implement this operation via reopening the image. */
8081 vmdkFreeImage(pImage, false, true /*fFlush*/);
8082 rc = vmdkOpenImage(pImage, uOpenFlags);
8083 }
8084 }
8085
8086 LogFlowFunc(("returns %Rrc\n", rc));
8087 return rc;
8088}
8089
8090/** @copydoc VDIMAGEBACKEND::pfnGetComment */
8091static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
8092{
8093 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
8094 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8095
8096 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8097
8098 char *pszCommentEncoded = NULL;
8099 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
8100 "ddb.comment", &pszCommentEncoded);
8101 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
8102 {
8103 pszCommentEncoded = NULL;
8104 rc = VINF_SUCCESS;
8105 }
8106
8107 if (RT_SUCCESS(rc))
8108 {
8109 if (pszComment && pszCommentEncoded)
8110 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
8111 else if (pszComment)
8112 *pszComment = '\0';
8113
8114 if (pszCommentEncoded)
8115 RTMemTmpFree(pszCommentEncoded);
8116 }
8117
8118 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
8119 return rc;
8120}
8121
8122/** @copydoc VDIMAGEBACKEND::pfnSetComment */
8123static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
8124{
8125 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
8126 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8127 int rc;
8128
8129 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8130
8131 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8132 {
8133 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8134 rc = vmdkSetImageComment(pImage, pszComment);
8135 else
8136 rc = VERR_NOT_SUPPORTED;
8137 }
8138 else
8139 rc = VERR_VD_IMAGE_READ_ONLY;
8140
8141 LogFlowFunc(("returns %Rrc\n", rc));
8142 return rc;
8143}
8144
8145/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
8146static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
8147{
8148 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8149 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8150
8151 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8152
8153 *pUuid = pImage->ImageUuid;
8154
8155 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8156 return VINF_SUCCESS;
8157}
8158
8159/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
8160static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
8161{
8162 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8163 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8164 int rc = VINF_SUCCESS;
8165
8166 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8167
8168 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8169 {
8170 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8171 {
8172 pImage->ImageUuid = *pUuid;
8173 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8174 VMDK_DDB_IMAGE_UUID, pUuid);
8175 if (RT_FAILURE(rc))
8176 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
8177 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
8178 }
8179 else
8180 rc = VERR_NOT_SUPPORTED;
8181 }
8182 else
8183 rc = VERR_VD_IMAGE_READ_ONLY;
8184
8185 LogFlowFunc(("returns %Rrc\n", rc));
8186 return rc;
8187}
8188
8189/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
8190static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
8191{
8192 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8193 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8194
8195 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8196
8197 *pUuid = pImage->ModificationUuid;
8198
8199 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8200 return VINF_SUCCESS;
8201}
8202
8203/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
8204static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
8205{
8206 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8207 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8208 int rc = VINF_SUCCESS;
8209
8210 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8211
8212 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8213 {
8214 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8215 {
8216 /* Only touch the modification uuid if it changed. */
8217 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
8218 {
8219 pImage->ModificationUuid = *pUuid;
8220 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8221 VMDK_DDB_MODIFICATION_UUID, pUuid);
8222 if (RT_FAILURE(rc))
8223 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
8224 }
8225 }
8226 else
8227 rc = VERR_NOT_SUPPORTED;
8228 }
8229 else
8230 rc = VERR_VD_IMAGE_READ_ONLY;
8231
8232 LogFlowFunc(("returns %Rrc\n", rc));
8233 return rc;
8234}
8235
8236/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
8237static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
8238{
8239 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8240 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8241
8242 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8243
8244 *pUuid = pImage->ParentUuid;
8245
8246 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8247 return VINF_SUCCESS;
8248}
8249
8250/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
8251static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
8252{
8253 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8254 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8255 int rc = VINF_SUCCESS;
8256
8257 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8258
8259 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8260 {
8261 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8262 {
8263 pImage->ParentUuid = *pUuid;
8264 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8265 VMDK_DDB_PARENT_UUID, pUuid);
8266 if (RT_FAILURE(rc))
8267 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
8268 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
8269 }
8270 else
8271 rc = VERR_NOT_SUPPORTED;
8272 }
8273 else
8274 rc = VERR_VD_IMAGE_READ_ONLY;
8275
8276 LogFlowFunc(("returns %Rrc\n", rc));
8277 return rc;
8278}
8279
8280/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
8281static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
8282{
8283 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8284 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8285
8286 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8287
8288 *pUuid = pImage->ParentModificationUuid;
8289
8290 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8291 return VINF_SUCCESS;
8292}
8293
8294/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
8295static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
8296{
8297 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8298 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8299 int rc = VINF_SUCCESS;
8300
8301 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8302
8303 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8304 {
8305 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8306 {
8307 pImage->ParentModificationUuid = *pUuid;
8308 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8309 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
8310 if (RT_FAILURE(rc))
8311 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
8312 }
8313 else
8314 rc = VERR_NOT_SUPPORTED;
8315 }
8316 else
8317 rc = VERR_VD_IMAGE_READ_ONLY;
8318
8319 LogFlowFunc(("returns %Rrc\n", rc));
8320 return rc;
8321}
8322
8323/** @copydoc VDIMAGEBACKEND::pfnDump */
8324static DECLCALLBACK(void) vmdkDump(void *pBackendData)
8325{
8326 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8327
8328 AssertPtrReturnVoid(pImage);
8329 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
8330 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
8331 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
8332 VMDK_BYTE2SECTOR(pImage->cbSize));
8333 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
8334 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
8335 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
8336 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
8337}
8338
8339
8340/**
8341 * Returns the size, in bytes, of the sparse extent overhead for
8342 * the number of desired total sectors and based on the current
8343 * sectors of the extent.
8344 *
8345 * @returns uint64_t size of new overhead in bytes.
8346 * @param pExtent VMDK extent instance.
8347 * @param cSectorsNew Number of desired total sectors.
8348 */
8349static uint64_t vmdkGetNewOverhead(PVMDKEXTENT pExtent, uint64_t cSectorsNew)
8350{
8351 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE;
8352 if (cSectorsNew % pExtent->cSectorsPerGDE)
8353 cNewDirEntries++;
8354
8355 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t);
8356 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512);
8357 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
8358 uint64_t cbNewOverhead = RT_ALIGN_Z(RT_MAX(pExtent->uDescriptorSector
8359 + pExtent->cDescriptorSectors, 1)
8360 + cbNewDirSize + cbNewAllTablesSize, 512);
8361 cbNewOverhead += cbNewDirSize + cbNewAllTablesSize;
8362 cbNewOverhead = RT_ALIGN_64(cbNewOverhead,
8363 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8364
8365 return cbNewOverhead;
8366}
8367
8368/**
8369 * Internal: Replaces the size (in sectors) of an extent in the descriptor file.
8370 *
8371 * @returns VBox status code.
8372 * @param pImage VMDK image instance.
8373 * @param pExtent VMDK extent instance.
8374 * @param uLine Line number of descriptor to change.
8375 * @param cSectorsOld Existing number of sectors.
8376 * @param cSectorsNew New number of sectors.
8377 */
8378static int vmdkReplaceExtentSize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, unsigned uLine, uint64_t cSectorsOld,
8379 uint64_t cSectorsNew)
8380{
8381 char szOldExtentSectors[UINT64_MAX_BUFF_SIZE];
8382 char szNewExtentSectors[UINT64_MAX_BUFF_SIZE];
8383
8384 ssize_t cbWritten = RTStrPrintf2(szOldExtentSectors, sizeof(szOldExtentSectors), "%llu", cSectorsOld);
8385 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szOldExtentSectors))
8386 return VERR_BUFFER_OVERFLOW;
8387
8388 cbWritten = RTStrPrintf2(szNewExtentSectors, sizeof(szNewExtentSectors), "%llu", cSectorsNew);
8389 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szNewExtentSectors))
8390 return VERR_BUFFER_OVERFLOW;
8391
8392 char *pszNewExtentLine = vmdkStrReplace(pImage->Descriptor.aLines[uLine],
8393 szOldExtentSectors,
8394 szNewExtentSectors);
8395
8396 if (RT_UNLIKELY(!pszNewExtentLine))
8397 return VERR_INVALID_PARAMETER;
8398
8399 vmdkDescExtRemoveByLine(pImage, &pImage->Descriptor, uLine);
8400 vmdkDescExtInsert(pImage, &pImage->Descriptor,
8401 pExtent->enmAccess, cSectorsNew,
8402 pExtent->enmType, pExtent->pszBasename, pExtent->uSectorOffset);
8403
8404 RTStrFree(pszNewExtentLine);
8405 pszNewExtentLine = NULL;
8406
8407 pImage->Descriptor.fDirty = true;
8408
8409 return VINF_SUCCESS;
8410}
8411
8412/**
8413 * Moves sectors down to make room for new overhead.
8414 * Used for sparse extent resize.
8415 *
8416 * @returns VBox status code.
8417 * @param pImage VMDK image instance.
8418 * @param pExtent VMDK extent instance.
8419 * @param cSectorsNew Number of sectors after resize.
8420 */
8421static int vmdkRelocateSectorsForSparseResize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
8422 uint64_t cSectorsNew)
8423{
8424 int rc = VINF_SUCCESS;
8425
8426 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
8427
8428 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
8429 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
8430
8431 uint64_t cbFile = 0;
8432 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
8433
8434 uint64_t uNewAppendPosition;
8435
8436 /* Calculate how many sectors need to be relocated. */
8437 unsigned cSectorsReloc = cOverheadSectorDiff;
8438 if (cbNewOverhead % VMDK_SECTOR_SIZE)
8439 cSectorsReloc++;
8440
8441 if (cSectorsReloc < pExtent->cSectors)
8442 uNewAppendPosition = RT_ALIGN_Z(cbFile + VMDK_SECTOR2BYTE(cOverheadSectorDiff), 512);
8443 else
8444 uNewAppendPosition = cbFile;
8445
8446 /*
8447 * Get the blocks we need to relocate first, they are appended to the end
8448 * of the image.
8449 */
8450 void *pvBuf = NULL, *pvZero = NULL;
8451 do
8452 {
8453 /* Allocate data buffer. */
8454 pvBuf = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8455 if (!pvBuf)
8456 {
8457 rc = VERR_NO_MEMORY;
8458 break;
8459 }
8460
8461 /* Allocate buffer for overwriting with zeroes. */
8462 pvZero = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8463 if (!pvZero)
8464 {
8465 RTMemFree(pvBuf);
8466 pvBuf = NULL;
8467
8468 rc = VERR_NO_MEMORY;
8469 break;
8470 }
8471
8472 uint32_t *aGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries);
8473 if(!aGTDataTmp)
8474 {
8475 RTMemFree(pvBuf);
8476 pvBuf = NULL;
8477
8478 RTMemFree(pvZero);
8479 pvZero = NULL;
8480
8481 rc = VERR_NO_MEMORY;
8482 break;
8483 }
8484
8485 uint32_t *aRGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries);
8486 if(!aRGTDataTmp)
8487 {
8488 RTMemFree(pvBuf);
8489 pvBuf = NULL;
8490
8491 RTMemFree(pvZero);
8492 pvZero = NULL;
8493
8494 RTMemFree(aGTDataTmp);
8495 aGTDataTmp = NULL;
8496
8497 rc = VERR_NO_MEMORY;
8498 break;
8499 }
8500
8501 /* Search for overlap sector in the grain table. */
8502 for (uint32_t idxGD = 0; idxGD < pExtent->cGDEntries; idxGD++)
8503 {
8504 uint64_t uGTSector = pExtent->pGD[idxGD];
8505 uint64_t uRGTSector = pExtent->pRGD[idxGD];
8506
8507 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8508 VMDK_SECTOR2BYTE(uGTSector),
8509 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8510
8511 if (RT_FAILURE(rc))
8512 break;
8513
8514 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8515 VMDK_SECTOR2BYTE(uRGTSector),
8516 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8517
8518 if (RT_FAILURE(rc))
8519 break;
8520
8521 for (uint32_t idxGT = 0; idxGT < pExtent->cGTEntries; idxGT++)
8522 {
8523 uint64_t aGTEntryLE = RT_LE2H_U64(aGTDataTmp[idxGT]);
8524 uint64_t aRGTEntryLE = RT_LE2H_U64(aRGTDataTmp[idxGT]);
8525
8526 /**
8527 * Check if grain table is valid. If not dump out with an error.
8528 * Shoudln't ever get here (given other checks) but good sanity check.
8529 */
8530 if (aGTEntryLE != aRGTEntryLE)
8531 {
8532 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
8533 N_("VMDK: inconsistent references within grain table in '%s'"), pExtent->pszFullname);
8534 break;
8535 }
8536
8537 if (aGTEntryLE < cNewOverheadSectors
8538 && aGTEntryLE != 0)
8539 {
8540 /* Read data and append grain to the end of the image. */
8541 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8542 VMDK_SECTOR2BYTE(aGTEntryLE), pvBuf,
8543 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8544 if (RT_FAILURE(rc))
8545 break;
8546
8547 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8548 uNewAppendPosition, pvBuf,
8549 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8550 if (RT_FAILURE(rc))
8551 break;
8552
8553 /* Zero out the old block area. */
8554 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8555 VMDK_SECTOR2BYTE(aGTEntryLE), pvZero,
8556 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8557 if (RT_FAILURE(rc))
8558 break;
8559
8560 /* Write updated grain tables to file */
8561 aGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition);
8562 aRGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition);
8563
8564 if (memcmp(aGTDataTmp, aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries))
8565 {
8566 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
8567 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
8568 break;
8569 }
8570
8571 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8572 VMDK_SECTOR2BYTE(uGTSector),
8573 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8574
8575 if (RT_FAILURE(rc))
8576 break;
8577
8578 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8579 VMDK_SECTOR2BYTE(uRGTSector),
8580 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8581
8582 break;
8583 }
8584 }
8585 }
8586
8587 RTMemFree(aGTDataTmp);
8588 aGTDataTmp = NULL;
8589
8590 RTMemFree(aRGTDataTmp);
8591 aRGTDataTmp = NULL;
8592
8593 if (RT_FAILURE(rc))
8594 break;
8595
8596 uNewAppendPosition += VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain);
8597 } while (0);
8598
8599 if (pvBuf)
8600 {
8601 RTMemFree(pvBuf);
8602 pvBuf = NULL;
8603 }
8604
8605 if (pvZero)
8606 {
8607 RTMemFree(pvZero);
8608 pvZero = NULL;
8609 }
8610
8611 // Update append position for extent
8612 pExtent->uAppendPosition = uNewAppendPosition;
8613
8614 return rc;
8615}
8616
8617/**
8618 * Resizes meta/overhead for sparse extent resize.
8619 *
8620 * @returns VBox status code.
8621 * @param pImage VMDK image instance.
8622 * @param pExtent VMDK extent instance.
8623 * @param cSectorsNew Number of sectors after resize.
8624 */
8625static int vmdkResizeSparseMeta(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
8626 uint64_t cSectorsNew)
8627{
8628 int rc = VINF_SUCCESS;
8629 uint32_t cOldGDEntries = pExtent->cGDEntries;
8630
8631 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE;
8632 if (cSectorsNew % pExtent->cSectorsPerGDE)
8633 cNewDirEntries++;
8634
8635 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t);
8636
8637 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512);
8638 uint64_t cbCurrDirSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE, 512);
8639 uint64_t cDirSectorDiff = VMDK_BYTE2SECTOR(cbNewDirSize - cbCurrDirSize);
8640
8641 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
8642 uint64_t cbCurrAllTablesSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE, 512);
8643 uint64_t cTableSectorDiff = VMDK_BYTE2SECTOR(cbNewAllTablesSize - cbCurrAllTablesSize);
8644
8645 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
8646 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
8647 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
8648
8649 /*
8650 * Get the blocks we need to relocate first, they are appended to the end
8651 * of the image.
8652 */
8653 void *pvBuf = NULL;
8654 AssertCompile(sizeof(g_abRTZero4K) >= VMDK_GRAIN_TABLE_SIZE);
8655
8656 do
8657 {
8658 /* Allocate data buffer. */
8659 pvBuf = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE);
8660 if (!pvBuf)
8661 {
8662 rc = VERR_NO_MEMORY;
8663 break;
8664 }
8665
8666 uint32_t uGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8667
8668 // points to last element in the grain table
8669 uint32_t uGTTail = uGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE;
8670 uint32_t cbGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff + cTableSectorDiff + cDirSectorDiff), 512);
8671
8672 for (int i = pExtent->cGDEntries - 1; i >= 0; i--)
8673 {
8674 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8675 uGTTail, pvBuf,
8676 VMDK_GRAIN_TABLE_SIZE);
8677 if (RT_FAILURE(rc))
8678 break;
8679
8680 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8681 RT_ALIGN_Z(uGTTail + cbGTOff, 512), pvBuf,
8682 VMDK_GRAIN_TABLE_SIZE);
8683 if (RT_FAILURE(rc))
8684 break;
8685
8686 // This overshoots when i == 0, but we don't need it anymore.
8687 uGTTail -= VMDK_GRAIN_TABLE_SIZE;
8688 }
8689
8690
8691 /* Find the end of the grain directory and start bumping everything down. Update locations of GT entries. */
8692 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8693 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pvBuf,
8694 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8695 if (RT_FAILURE(rc))
8696 break;
8697
8698 int * tmpBuf = (int *)pvBuf;
8699
8700 for (uint32_t i = 0; i < pExtent->cGDEntries; i++)
8701 {
8702 tmpBuf[i] = tmpBuf[i] + VMDK_BYTE2SECTOR(cbGTOff);
8703 pExtent->pGD[i] = pExtent->pGD[i] + VMDK_BYTE2SECTOR(cbGTOff);
8704 }
8705
8706 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8707 RT_ALIGN_Z(VMDK_SECTOR2BYTE(pExtent->uSectorGD + cTableSectorDiff + cDirSectorDiff), 512), pvBuf,
8708 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8709 if (RT_FAILURE(rc))
8710 break;
8711
8712 pExtent->uSectorGD = pExtent->uSectorGD + cDirSectorDiff + cTableSectorDiff;
8713
8714 /* Repeat both steps with the redundant grain table/directory. */
8715
8716 uint32_t uRGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8717
8718 // points to last element in the grain table
8719 uint32_t uRGTTail = uRGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE;
8720 uint32_t cbRGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff), 512);
8721
8722 for (int i = pExtent->cGDEntries - 1; i >= 0; i--)
8723 {
8724 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8725 uRGTTail, pvBuf,
8726 VMDK_GRAIN_TABLE_SIZE);
8727 if (RT_FAILURE(rc))
8728 break;
8729
8730 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8731 RT_ALIGN_Z(uRGTTail + cbRGTOff, 512), pvBuf,
8732 VMDK_GRAIN_TABLE_SIZE);
8733 if (RT_FAILURE(rc))
8734 break;
8735
8736 // This overshoots when i == 0, but we don't need it anymore.
8737 uRGTTail -= VMDK_GRAIN_TABLE_SIZE;
8738 }
8739
8740 /* Update locations of GT entries. */
8741 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8742 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf,
8743 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8744 if (RT_FAILURE(rc))
8745 break;
8746
8747 tmpBuf = (int *)pvBuf;
8748
8749 for (uint32_t i = 0; i < pExtent->cGDEntries; i++)
8750 {
8751 tmpBuf[i] = tmpBuf[i] + cDirSectorDiff;
8752 pExtent->pRGD[i] = pExtent->pRGD[i] + cDirSectorDiff;
8753 }
8754
8755 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8756 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf,
8757 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8758 if (RT_FAILURE(rc))
8759 break;
8760
8761 pExtent->uSectorRGD = pExtent->uSectorRGD;
8762 pExtent->cOverheadSectors += cOverheadSectorDiff;
8763
8764 } while (0);
8765
8766 if (pvBuf)
8767 {
8768 RTMemFree(pvBuf);
8769 pvBuf = NULL;
8770 }
8771
8772 pExtent->cGDEntries = cNewDirEntries;
8773
8774 // Allocate additional grain dir
8775 pExtent->pGD = (uint32_t *) RTMemReallocZ(pExtent->pGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD);
8776 if (RT_LIKELY(pExtent->pGD))
8777 {
8778 if (pExtent->uSectorRGD)
8779 {
8780 pExtent->pRGD = (uint32_t *)RTMemReallocZ(pExtent->pRGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD);
8781 if (RT_UNLIKELY(!pExtent->pRGD))
8782 rc = VERR_NO_MEMORY;
8783 }
8784 }
8785 else
8786 return VERR_NO_MEMORY;
8787
8788
8789 uint32_t uTmpDirVal = pExtent->pGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE;
8790 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++)
8791 {
8792 pExtent->pGD[i] = uTmpDirVal;
8793
8794 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8795 VMDK_SECTOR2BYTE(uTmpDirVal), &g_abRTZero4K[0],
8796 VMDK_GRAIN_TABLE_SIZE);
8797
8798 if (RT_FAILURE(rc))
8799 return rc;
8800
8801 uTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE;
8802 }
8803
8804 uint32_t uRTmpDirVal = pExtent->pRGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE;
8805 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++)
8806 {
8807 pExtent->pRGD[i] = uRTmpDirVal;
8808
8809 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8810 VMDK_SECTOR2BYTE(uRTmpDirVal), &g_abRTZero4K[0],
8811 VMDK_GRAIN_TABLE_SIZE);
8812
8813 if (RT_FAILURE(rc))
8814 return rc;
8815
8816 uRTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE;
8817 }
8818
8819 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8820 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pExtent->pGD,
8821 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8822 if (RT_FAILURE(rc))
8823 return rc;
8824
8825 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8826 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pExtent->pRGD,
8827 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8828 if (RT_FAILURE(rc))
8829 return rc;
8830
8831 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + pExtent->uExtent,
8832 pExtent->cNominalSectors, cSectorsNew);
8833 if (RT_FAILURE(rc))
8834 return rc;
8835
8836 return rc;
8837}
8838
8839/** @copydoc VDIMAGEBACKEND::pfnResize */
8840static DECLCALLBACK(int) vmdkResize(void *pBackendData, uint64_t cbSize,
8841 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
8842 unsigned uPercentStart, unsigned uPercentSpan,
8843 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
8844 PVDINTERFACE pVDIfsOperation)
8845{
8846 RT_NOREF5(uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation);
8847
8848 // Establish variables and objects needed
8849 int rc = VINF_SUCCESS;
8850 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8851 unsigned uImageFlags = pImage->uImageFlags;
8852 PVMDKEXTENT pExtent = &pImage->pExtents[0];
8853 pExtent->fMetaDirty = true;
8854
8855 uint64_t cSectorsNew = cbSize / VMDK_SECTOR_SIZE; /** < New number of sectors in the image after the resize */
8856 if (cbSize % VMDK_SECTOR_SIZE)
8857 cSectorsNew++;
8858
8859 uint64_t cSectorsOld = pImage->cbSize / VMDK_SECTOR_SIZE; /** < Number of sectors before the resize. Only for FLAT images. */
8860 if (pImage->cbSize % VMDK_SECTOR_SIZE)
8861 cSectorsOld++;
8862 unsigned cExtents = pImage->cExtents;
8863
8864 /* Check size is within min/max bounds. */
8865 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
8866 && ( !cbSize
8867 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)) )
8868 return VERR_VD_INVALID_SIZE;
8869
8870 /*
8871 * Making the image smaller is not supported at the moment.
8872 */
8873 /** @todo implement making the image smaller, it is the responsibility of
8874 * the user to know what they're doing. */
8875 if (cbSize < pImage->cbSize)
8876 rc = VERR_VD_SHRINK_NOT_SUPPORTED;
8877 else if (cbSize > pImage->cbSize)
8878 {
8879 /**
8880 * monolithicFlat. FIXED flag and not split up into 2 GB parts.
8881 */
8882 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8883 {
8884 /** Required space in bytes for the extent after the resize. */
8885 uint64_t cbSectorSpaceNew = cSectorsNew * VMDK_SECTOR_SIZE;
8886 pExtent = &pImage->pExtents[0];
8887
8888 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSectorSpaceNew,
8889 0 /* fFlags */, NULL,
8890 uPercentStart, uPercentSpan);
8891 if (RT_FAILURE(rc))
8892 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8893
8894 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent, cSectorsOld, cSectorsNew);
8895 if (RT_FAILURE(rc))
8896 return rc;
8897 }
8898
8899 /**
8900 * twoGbMaxExtentFlat. FIXED flag and SPLIT into 2 GB parts.
8901 */
8902 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8903 {
8904 /* Check to see how much space remains in last extent */
8905 bool fSpaceAvailible = false;
8906 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8907 if (cLastExtentRemSectors)
8908 fSpaceAvailible = true;
8909
8910 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;
8911
8912 /** Space remaining in current last extent file that we don't need to create another one. */
8913 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))
8914 {
8915 pExtent = &pImage->pExtents[cExtents - 1];
8916 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage,
8917 VMDK_SECTOR2BYTE(cSectorsNeeded + cLastExtentRemSectors),
8918 0 /* fFlags */, NULL, uPercentStart, uPercentSpan);
8919 if (RT_FAILURE(rc))
8920 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8921
8922 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1,
8923 pExtent->cNominalSectors, cSectorsNeeded + cLastExtentRemSectors);
8924 if (RT_FAILURE(rc))
8925 return rc;
8926 }
8927 //** Need more extent files to handle all the requested space. */
8928 else
8929 {
8930 if (fSpaceAvailible)
8931 {
8932 pExtent = &pImage->pExtents[cExtents - 1];
8933 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, VMDK_2G_SPLIT_SIZE,
8934 0 /* fFlags */, NULL,
8935 uPercentStart, uPercentSpan);
8936 if (RT_FAILURE(rc))
8937 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8938
8939 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;
8940
8941 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1,
8942 pExtent->cNominalSectors, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
8943 if (RT_FAILURE(rc))
8944 return rc;
8945 }
8946
8947 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;
8948 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)
8949 cNewExtents++;
8950
8951 for (unsigned i = cExtents;
8952 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8953 i++)
8954 {
8955 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);
8956 if (RT_FAILURE(rc))
8957 return rc;
8958
8959 pExtent = &pImage->pExtents[i];
8960
8961 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8962 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8963 }
8964
8965 if (cSectorsNeeded)
8966 {
8967 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));
8968 if (RT_FAILURE(rc))
8969 return rc;
8970 }
8971 }
8972 }
8973
8974 /**
8975 * monolithicSparse.
8976 */
8977 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8978 {
8979 // 1. Calculate sectors needed for new overhead.
8980
8981 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
8982 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
8983 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
8984
8985 // 2. Relocate sectors to make room for new GD/GT, update entries in GD/GT
8986 if (cOverheadSectorDiff > 0)
8987 {
8988 if (pExtent->cSectors > 0)
8989 {
8990 /* Do the relocation. */
8991 LogFlow(("Relocating VMDK sectors\n"));
8992 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNew);
8993 if (RT_FAILURE(rc))
8994 return rc;
8995
8996 rc = vmdkFlushImage(pImage, NULL);
8997 if (RT_FAILURE(rc))
8998 return rc;
8999 }
9000
9001 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNew);
9002 if (RT_FAILURE(rc))
9003 return rc;
9004 }
9005 }
9006
9007 /**
9008 * twoGbSparseExtent
9009 */
9010 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
9011 {
9012 /* Check to see how much space remains in last extent */
9013 bool fSpaceAvailible = false;
9014 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9015 if (cLastExtentRemSectors)
9016 fSpaceAvailible = true;
9017
9018 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;
9019
9020 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))
9021 {
9022 pExtent = &pImage->pExtents[cExtents - 1];
9023 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors);
9024 if (RT_FAILURE(rc))
9025 return rc;
9026
9027 rc = vmdkFlushImage(pImage, NULL);
9028 if (RT_FAILURE(rc))
9029 return rc;
9030
9031 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors);
9032 if (RT_FAILURE(rc))
9033 return rc;
9034 }
9035 else
9036 {
9037 if (fSpaceAvailible)
9038 {
9039 pExtent = &pImage->pExtents[cExtents - 1];
9040 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
9041 if (RT_FAILURE(rc))
9042 return rc;
9043
9044 rc = vmdkFlushImage(pImage, NULL);
9045 if (RT_FAILURE(rc))
9046 return rc;
9047
9048 rc = vmdkResizeSparseMeta(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
9049 if (RT_FAILURE(rc))
9050 return rc;
9051
9052 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;
9053 }
9054
9055 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;
9056 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)
9057 cNewExtents++;
9058
9059 for (unsigned i = cExtents;
9060 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9061 i++)
9062 {
9063 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);
9064 if (RT_FAILURE(rc))
9065 return rc;
9066
9067 pExtent = &pImage->pExtents[i];
9068
9069 rc = vmdkFlushImage(pImage, NULL);
9070 if (RT_FAILURE(rc))
9071 return rc;
9072
9073 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9074 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9075 }
9076
9077 if (cSectorsNeeded)
9078 {
9079 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));
9080 if (RT_FAILURE(rc))
9081 return rc;
9082
9083 pExtent = &pImage->pExtents[pImage->cExtents];
9084
9085 rc = vmdkFlushImage(pImage, NULL);
9086 if (RT_FAILURE(rc))
9087 return rc;
9088 }
9089 }
9090 }
9091
9092 /* Successful resize. Update metadata */
9093 if (RT_SUCCESS(rc))
9094 {
9095 /* Update size and new block count. */
9096 pImage->cbSize = cbSize;
9097 pExtent->cNominalSectors = cSectorsNew;
9098 pExtent->cSectors = cSectorsNew;
9099
9100 /* Update geometry. */
9101 pImage->PCHSGeometry = *pPCHSGeometry;
9102 pImage->LCHSGeometry = *pLCHSGeometry;
9103 }
9104
9105 /* Update header information in base image file. */
9106 pImage->Descriptor.fDirty = true;
9107 rc = vmdkWriteDescriptor(pImage, NULL);
9108
9109 if (RT_SUCCESS(rc))
9110 rc = vmdkFlushImage(pImage, NULL);
9111 }
9112 /* Same size doesn't change the image at all. */
9113
9114 LogFlowFunc(("returns %Rrc\n", rc));
9115 return rc;
9116}
9117
9118const VDIMAGEBACKEND g_VmdkBackend =
9119{
9120 /* u32Version */
9121 VD_IMGBACKEND_VERSION,
9122 /* pszBackendName */
9123 "VMDK",
9124 /* uBackendCaps */
9125 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
9126 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
9127 | VD_CAP_VFS | VD_CAP_PREFERRED,
9128 /* paFileExtensions */
9129 s_aVmdkFileExtensions,
9130 /* paConfigInfo */
9131 s_aVmdkConfigInfo,
9132 /* pfnProbe */
9133 vmdkProbe,
9134 /* pfnOpen */
9135 vmdkOpen,
9136 /* pfnCreate */
9137 vmdkCreate,
9138 /* pfnRename */
9139 vmdkRename,
9140 /* pfnClose */
9141 vmdkClose,
9142 /* pfnRead */
9143 vmdkRead,
9144 /* pfnWrite */
9145 vmdkWrite,
9146 /* pfnFlush */
9147 vmdkFlush,
9148 /* pfnDiscard */
9149 NULL,
9150 /* pfnGetVersion */
9151 vmdkGetVersion,
9152 /* pfnGetFileSize */
9153 vmdkGetFileSize,
9154 /* pfnGetPCHSGeometry */
9155 vmdkGetPCHSGeometry,
9156 /* pfnSetPCHSGeometry */
9157 vmdkSetPCHSGeometry,
9158 /* pfnGetLCHSGeometry */
9159 vmdkGetLCHSGeometry,
9160 /* pfnSetLCHSGeometry */
9161 vmdkSetLCHSGeometry,
9162 /* pfnQueryRegions */
9163 vmdkQueryRegions,
9164 /* pfnRegionListRelease */
9165 vmdkRegionListRelease,
9166 /* pfnGetImageFlags */
9167 vmdkGetImageFlags,
9168 /* pfnGetOpenFlags */
9169 vmdkGetOpenFlags,
9170 /* pfnSetOpenFlags */
9171 vmdkSetOpenFlags,
9172 /* pfnGetComment */
9173 vmdkGetComment,
9174 /* pfnSetComment */
9175 vmdkSetComment,
9176 /* pfnGetUuid */
9177 vmdkGetUuid,
9178 /* pfnSetUuid */
9179 vmdkSetUuid,
9180 /* pfnGetModificationUuid */
9181 vmdkGetModificationUuid,
9182 /* pfnSetModificationUuid */
9183 vmdkSetModificationUuid,
9184 /* pfnGetParentUuid */
9185 vmdkGetParentUuid,
9186 /* pfnSetParentUuid */
9187 vmdkSetParentUuid,
9188 /* pfnGetParentModificationUuid */
9189 vmdkGetParentModificationUuid,
9190 /* pfnSetParentModificationUuid */
9191 vmdkSetParentModificationUuid,
9192 /* pfnDump */
9193 vmdkDump,
9194 /* pfnGetTimestamp */
9195 NULL,
9196 /* pfnGetParentTimestamp */
9197 NULL,
9198 /* pfnSetParentTimestamp */
9199 NULL,
9200 /* pfnGetParentFilename */
9201 NULL,
9202 /* pfnSetParentFilename */
9203 NULL,
9204 /* pfnComposeLocation */
9205 genericFileComposeLocation,
9206 /* pfnComposeName */
9207 genericFileComposeName,
9208 /* pfnCompact */
9209 NULL,
9210 /* pfnResize */
9211 vmdkResize,
9212 /* pfnRepair */
9213 NULL,
9214 /* pfnTraverseMetadata */
9215 NULL,
9216 /* u32VersionEnd */
9217 VD_IMGBACKEND_VERSION
9218};
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette