VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 107671

Last change on this file since 107671 was 107671, checked in by vboxsync, 4 months ago

Storage/VMDK.cpp: Fix unused variable assignment (VERR_NO_MEMORY would get lost when allocating memory for the zero buffer fails) and replace allocating the buffer with using the zero 4K data object, bugref:3409

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 378.8 KB
Line 
1/* $Id: VMDK.cpp 107671 2025-01-10 15:38:26Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_VD_VMDK
33#include <VBox/log.h> /* before VBox/vd-ifs.h */
34#include <VBox/vd-plugin.h>
35#include <VBox/err.h>
36
37#include <iprt/assert.h>
38#include <iprt/alloc.h>
39#include <iprt/base64.h>
40#include <iprt/ctype.h>
41#include <iprt/crc.h>
42#include <iprt/dvm.h>
43#include <iprt/uuid.h>
44#include <iprt/path.h>
45#include <iprt/rand.h>
46#include <iprt/sg.h>
47#include <iprt/sort.h>
48#include <iprt/string.h>
49#include <iprt/zip.h>
50#include <iprt/asm.h>
51#include <iprt/zero.h>
52#ifdef RT_OS_WINDOWS
53# include <iprt/utf16.h>
54# include <iprt/uni.h>
55# include <iprt/uni.h>
56# include <iprt/nt/nt-and-windows.h>
57# include <winioctl.h>
58#endif
59#ifdef RT_OS_LINUX
60# include <errno.h>
61# include <sys/stat.h>
62# include <iprt/dir.h>
63# include <iprt/symlink.h>
64# include <iprt/linux/sysfs.h>
65#endif
66#ifdef RT_OS_FREEBSD
67#include <libgeom.h>
68#include <sys/stat.h>
69#include <stdlib.h>
70#endif
71#ifdef RT_OS_SOLARIS
72#include <sys/dkio.h>
73#include <sys/vtoc.h>
74#include <sys/efi_partition.h>
75#include <unistd.h>
76#include <errno.h>
77#endif
78#ifdef RT_OS_DARWIN
79# include <sys/stat.h>
80# include <sys/disk.h>
81# include <errno.h>
82/* The following structure and IOCTLs are defined in znu bsd/sys/disk.h but
83 inside KERNEL ifdefs and thus stripped from the SDK edition of the header.
84 While we could try include the header from the Kernel.framework, it's a lot
85 easier to just add the structure and 4 defines here. */
86typedef struct
87{
88 uint64_t offset;
89 uint64_t length;
90 uint8_t reserved0128[12];
91 dev_t dev;
92} dk_physical_extent_t;
93# define DKIOCGETBASE _IOR( 'd', 73, uint64_t)
94# define DKIOCLOCKPHYSICALEXTENTS _IO( 'd', 81)
95# define DKIOCGETPHYSICALEXTENT _IOWR('d', 82, dk_physical_extent_t)
96# define DKIOCUNLOCKPHYSICALEXTENTS _IO( 'd', 83)
97#endif /* RT_OS_DARWIN */
98
99#include "VDBackends.h"
100
101
102/*********************************************************************************************************************************
103* Constants And Macros, Structures and Typedefs *
104*********************************************************************************************************************************/
105
106/** Maximum encoded string size (including NUL) we allow for VMDK images.
107 * Deliberately not set high to avoid running out of descriptor space. */
108#define VMDK_ENCODED_COMMENT_MAX 1024
109
110/** VMDK descriptor DDB entry for PCHS cylinders. */
111#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
112
113/** VMDK descriptor DDB entry for PCHS heads. */
114#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
115
116/** VMDK descriptor DDB entry for PCHS sectors. */
117#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
118
119/** VMDK descriptor DDB entry for LCHS cylinders. */
120#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
121
122/** VMDK descriptor DDB entry for LCHS heads. */
123#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
124
125/** VMDK descriptor DDB entry for LCHS sectors. */
126#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
127
128/** VMDK descriptor DDB entry for image UUID. */
129#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
130
131/** VMDK descriptor DDB entry for image modification UUID. */
132#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
133
134/** VMDK descriptor DDB entry for parent image UUID. */
135#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
136
137/** VMDK descriptor DDB entry for parent image modification UUID. */
138#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
139
140/** No compression for streamOptimized files. */
141#define VMDK_COMPRESSION_NONE 0
142
143/** Deflate compression for streamOptimized files. */
144#define VMDK_COMPRESSION_DEFLATE 1
145
146/** Marker that the actual GD value is stored in the footer. */
147#define VMDK_GD_AT_END 0xffffffffffffffffULL
148
149/** Marker for end-of-stream in streamOptimized images. */
150#define VMDK_MARKER_EOS 0
151
152/** Marker for grain table block in streamOptimized images. */
153#define VMDK_MARKER_GT 1
154
155/** Marker for grain directory block in streamOptimized images. */
156#define VMDK_MARKER_GD 2
157
158/** Marker for footer in streamOptimized images. */
159#define VMDK_MARKER_FOOTER 3
160
161/** Marker for unknown purpose in streamOptimized images.
162 * Shows up in very recent images created by vSphere, but only sporadically.
163 * They "forgot" to document that one in the VMDK specification. */
164#define VMDK_MARKER_UNSPECIFIED 4
165
166/** Dummy marker for "don't check the marker value". */
167#define VMDK_MARKER_IGNORE 0xffffffffU
168
169/**
170 * Magic number for hosted images created by VMware Workstation 4, VMware
171 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
172 */
173#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
174
175/** VMDK sector size in bytes. */
176#define VMDK_SECTOR_SIZE 512
177/** Max string buffer size for uint64_t with null term */
178#define UINT64_MAX_BUFF_SIZE 21
179/** Grain directory entry size in bytes */
180#define VMDK_GRAIN_DIR_ENTRY_SIZE 4
181/** Grain table size in bytes */
182#define VMDK_GRAIN_TABLE_SIZE 2048
183
184/**
185 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
186 * this header is also used for monolithic flat images.
187 */
188#pragma pack(1)
189typedef struct SparseExtentHeader
190{
191 uint32_t magicNumber;
192 uint32_t version;
193 uint32_t flags;
194 uint64_t capacity;
195 uint64_t grainSize;
196 uint64_t descriptorOffset;
197 uint64_t descriptorSize;
198 uint32_t numGTEsPerGT;
199 uint64_t rgdOffset;
200 uint64_t gdOffset;
201 uint64_t overHead;
202 bool uncleanShutdown;
203 char singleEndLineChar;
204 char nonEndLineChar;
205 char doubleEndLineChar1;
206 char doubleEndLineChar2;
207 uint16_t compressAlgorithm;
208 uint8_t pad[433];
209} SparseExtentHeader;
210#pragma pack()
211
212/** The maximum allowed descriptor size in the extent header in sectors. */
213#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
214
215/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
216 * divisible by the default grain size (64K) */
217#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
218
219/** VMDK streamOptimized file format marker. The type field may or may not
220 * be actually valid, but there's always data to read there. */
221#pragma pack(1)
222typedef struct VMDKMARKER
223{
224 uint64_t uSector;
225 uint32_t cbSize;
226 uint32_t uType;
227} VMDKMARKER, *PVMDKMARKER;
228#pragma pack()
229
230
231/** Convert sector number/size to byte offset/size. */
232#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
233
234/** Convert byte offset/size to sector number/size. */
235#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
236
237/**
238 * VMDK extent type.
239 */
240typedef enum VMDKETYPE
241{
242 /** Hosted sparse extent. */
243 VMDKETYPE_HOSTED_SPARSE = 1,
244 /** Flat extent. */
245 VMDKETYPE_FLAT,
246 /** Zero extent. */
247 VMDKETYPE_ZERO,
248 /** VMFS extent, used by ESX. */
249 VMDKETYPE_VMFS
250} VMDKETYPE, *PVMDKETYPE;
251
252/**
253 * VMDK access type for a extent.
254 */
255typedef enum VMDKACCESS
256{
257 /** No access allowed. */
258 VMDKACCESS_NOACCESS = 0,
259 /** Read-only access. */
260 VMDKACCESS_READONLY,
261 /** Read-write access. */
262 VMDKACCESS_READWRITE
263} VMDKACCESS, *PVMDKACCESS;
264
265/** Forward declaration for PVMDKIMAGE. */
266typedef struct VMDKIMAGE *PVMDKIMAGE;
267
268/**
269 * Extents files entry. Used for opening a particular file only once.
270 */
271typedef struct VMDKFILE
272{
273 /** Pointer to file path. Local copy. */
274 const char *pszFilename;
275 /** Pointer to base name. Local copy. */
276 const char *pszBasename;
277 /** File open flags for consistency checking. */
278 unsigned fOpen;
279 /** Handle for sync/async file abstraction.*/
280 PVDIOSTORAGE pStorage;
281 /** Reference counter. */
282 unsigned uReferences;
283 /** Flag whether the file should be deleted on last close. */
284 bool fDelete;
285 /** Pointer to the image we belong to (for debugging purposes). */
286 PVMDKIMAGE pImage;
287 /** Pointer to next file descriptor. */
288 struct VMDKFILE *pNext;
289 /** Pointer to the previous file descriptor. */
290 struct VMDKFILE *pPrev;
291} VMDKFILE, *PVMDKFILE;
292
293/**
294 * VMDK extent data structure.
295 */
296typedef struct VMDKEXTENT
297{
298 /** File handle. */
299 PVMDKFILE pFile;
300 /** Base name of the image extent. */
301 const char *pszBasename;
302 /** Full name of the image extent. */
303 const char *pszFullname;
304 /** Number of sectors in this extent. */
305 uint64_t cSectors;
306 /** Number of sectors per block (grain in VMDK speak). */
307 uint64_t cSectorsPerGrain;
308 /** Starting sector number of descriptor. */
309 uint64_t uDescriptorSector;
310 /** Size of descriptor in sectors. */
311 uint64_t cDescriptorSectors;
312 /** Starting sector number of grain directory. */
313 uint64_t uSectorGD;
314 /** Starting sector number of redundant grain directory. */
315 uint64_t uSectorRGD;
316 /** Total number of metadata sectors. */
317 uint64_t cOverheadSectors;
318 /** Nominal size (i.e. as described by the descriptor) of this extent. */
319 uint64_t cNominalSectors;
320 /** Sector offset (i.e. as described by the descriptor) of this extent. */
321 uint64_t uSectorOffset;
322 /** Number of entries in a grain table. */
323 uint32_t cGTEntries;
324 /** Number of sectors reachable via a grain directory entry. */
325 uint32_t cSectorsPerGDE;
326 /** Number of entries in the grain directory. */
327 uint32_t cGDEntries;
328 /** Pointer to the next free sector. Legacy information. Do not use. */
329 uint32_t uFreeSector;
330 /** Number of this extent in the list of images. */
331 uint32_t uExtent;
332 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
333 char *pDescData;
334 /** Pointer to the grain directory. */
335 uint32_t *pGD;
336 /** Pointer to the redundant grain directory. */
337 uint32_t *pRGD;
338 /** VMDK version of this extent. 1=1.0/1.1 */
339 uint32_t uVersion;
340 /** Type of this extent. */
341 VMDKETYPE enmType;
342 /** Access to this extent. */
343 VMDKACCESS enmAccess;
344 /** Flag whether this extent is marked as unclean. */
345 bool fUncleanShutdown;
346 /** Flag whether the metadata in the extent header needs to be updated. */
347 bool fMetaDirty;
348 /** Flag whether there is a footer in this extent. */
349 bool fFooter;
350 /** Compression type for this extent. */
351 uint16_t uCompression;
352 /** Append position for writing new grain. Only for sparse extents. */
353 uint64_t uAppendPosition;
354 /** Last grain which was accessed. Only for streamOptimized extents. */
355 uint32_t uLastGrainAccess;
356 /** Starting sector corresponding to the grain buffer. */
357 uint32_t uGrainSectorAbs;
358 /** Grain number corresponding to the grain buffer. */
359 uint32_t uGrain;
360 /** Actual size of the compressed data, only valid for reading. */
361 uint32_t cbGrainStreamRead;
362 /** Size of compressed grain buffer for streamOptimized extents. */
363 size_t cbCompGrain;
364 /** Compressed grain buffer for streamOptimized extents, with marker. */
365 void *pvCompGrain;
366 /** Decompressed grain buffer for streamOptimized extents. */
367 void *pvGrain;
368 /** Reference to the image in which this extent is used. Do not use this
369 * on a regular basis to avoid passing pImage references to functions
370 * explicitly. */
371 struct VMDKIMAGE *pImage;
372} VMDKEXTENT, *PVMDKEXTENT;
373
374/**
375 * Grain table cache size. Allocated per image.
376 */
377#define VMDK_GT_CACHE_SIZE 256
378
379/**
380 * Grain table block size. Smaller than an actual grain table block to allow
381 * more grain table blocks to be cached without having to allocate excessive
382 * amounts of memory for the cache.
383 */
384#define VMDK_GT_CACHELINE_SIZE 128
385
386
387/**
388 * Maximum number of lines in a descriptor file. Not worth the effort of
389 * making it variable. Descriptor files are generally very short (~20 lines),
390 * with the exception of sparse files split in 2G chunks, which need for the
391 * maximum size (almost 2T) exactly 1025 lines for the disk database.
392 */
393#define VMDK_DESCRIPTOR_LINES_MAX 1100U
394
395/**
396 * Parsed descriptor information. Allows easy access and update of the
397 * descriptor (whether separate file or not). Free form text files suck.
398 */
399typedef struct VMDKDESCRIPTOR
400{
401 /** Line number of first entry of the disk descriptor. */
402 unsigned uFirstDesc;
403 /** Line number of first entry in the extent description. */
404 unsigned uFirstExtent;
405 /** Line number of first disk database entry. */
406 unsigned uFirstDDB;
407 /** Total number of lines. */
408 unsigned cLines;
409 /** Total amount of memory available for the descriptor. */
410 size_t cbDescAlloc;
411 /** Set if descriptor has been changed and not yet written to disk. */
412 bool fDirty;
413 /** Array of pointers to the data in the descriptor. */
414 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
415 /** Array of line indices pointing to the next non-comment line. */
416 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
417} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
418
419
420/**
421 * Cache entry for translating extent/sector to a sector number in that
422 * extent.
423 */
424typedef struct VMDKGTCACHEENTRY
425{
426 /** Extent number for which this entry is valid. */
427 uint32_t uExtent;
428 /** GT data block number. */
429 uint64_t uGTBlock;
430 /** Data part of the cache entry. */
431 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
432} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
433
434/**
435 * Cache data structure for blocks of grain table entries. For now this is a
436 * fixed size direct mapping cache, but this should be adapted to the size of
437 * the sparse image and maybe converted to a set-associative cache. The
438 * implementation below implements a write-through cache with write allocate.
439 */
440typedef struct VMDKGTCACHE
441{
442 /** Cache entries. */
443 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
444 /** Number of cache entries (currently unused). */
445 unsigned cEntries;
446} VMDKGTCACHE, *PVMDKGTCACHE;
447
448/**
449 * Complete VMDK image data structure. Mainly a collection of extents and a few
450 * extra global data fields.
451 */
452typedef struct VMDKIMAGE
453{
454 /** Image name. */
455 const char *pszFilename;
456 /** Descriptor file if applicable. */
457 PVMDKFILE pFile;
458
459 /** Pointer to the per-disk VD interface list. */
460 PVDINTERFACE pVDIfsDisk;
461 /** Pointer to the per-image VD interface list. */
462 PVDINTERFACE pVDIfsImage;
463
464 /** Error interface. */
465 PVDINTERFACEERROR pIfError;
466 /** I/O interface. */
467 PVDINTERFACEIOINT pIfIo;
468
469
470 /** Pointer to the image extents. */
471 PVMDKEXTENT pExtents;
472 /** Number of image extents. */
473 unsigned cExtents;
474 /** Pointer to the files list, for opening a file referenced multiple
475 * times only once (happens mainly with raw partition access). */
476 PVMDKFILE pFiles;
477
478 /**
479 * Pointer to an array of segment entries for async I/O.
480 * This is an optimization because the task number to submit is not known
481 * and allocating/freeing an array in the read/write functions every time
482 * is too expensive.
483 */
484 PRTSGSEG paSegments;
485 /** Entries available in the segments array. */
486 unsigned cSegments;
487
488 /** Open flags passed by VBoxHD layer. */
489 unsigned uOpenFlags;
490 /** Image flags defined during creation or determined during open. */
491 unsigned uImageFlags;
492 /** Total size of the image. */
493 uint64_t cbSize;
494 /** Physical geometry of this image. */
495 VDGEOMETRY PCHSGeometry;
496 /** Logical geometry of this image. */
497 VDGEOMETRY LCHSGeometry;
498 /** Image UUID. */
499 RTUUID ImageUuid;
500 /** Image modification UUID. */
501 RTUUID ModificationUuid;
502 /** Parent image UUID. */
503 RTUUID ParentUuid;
504 /** Parent image modification UUID. */
505 RTUUID ParentModificationUuid;
506
507 /** Pointer to grain table cache, if this image contains sparse extents. */
508 PVMDKGTCACHE pGTCache;
509 /** Pointer to the descriptor (NULL if no separate descriptor file). */
510 char *pDescData;
511 /** Allocation size of the descriptor file. */
512 size_t cbDescAlloc;
513 /** Parsed descriptor file content. */
514 VMDKDESCRIPTOR Descriptor;
515 /** The static region list. */
516 VDREGIONLIST RegionList;
517} VMDKIMAGE;
518
519
520/** State for the input/output callout of the inflate reader/deflate writer. */
521typedef struct VMDKCOMPRESSIO
522{
523 /* Image this operation relates to. */
524 PVMDKIMAGE pImage;
525 /* Current read position. */
526 ssize_t iOffset;
527 /* Size of the compressed grain buffer (available data). */
528 size_t cbCompGrain;
529 /* Pointer to the compressed grain buffer. */
530 void *pvCompGrain;
531} VMDKCOMPRESSIO;
532
533
534/** Tracks async grain allocation. */
535typedef struct VMDKGRAINALLOCASYNC
536{
537 /** Flag whether the allocation failed. */
538 bool fIoErr;
539 /** Current number of transfers pending.
540 * If reached 0 and there is an error the old state is restored. */
541 unsigned cIoXfersPending;
542 /** Sector number */
543 uint64_t uSector;
544 /** Flag whether the grain table needs to be updated. */
545 bool fGTUpdateNeeded;
546 /** Extent the allocation happens. */
547 PVMDKEXTENT pExtent;
548 /** Position of the new grain, required for the grain table update. */
549 uint64_t uGrainOffset;
550 /** Grain table sector. */
551 uint64_t uGTSector;
552 /** Backup grain table sector. */
553 uint64_t uRGTSector;
554} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
555
556/**
557 * State information for vmdkRename() and helpers.
558 */
559typedef struct VMDKRENAMESTATE
560{
561 /** Array of old filenames. */
562 char **apszOldName;
563 /** Array of new filenames. */
564 char **apszNewName;
565 /** Array of new lines in the extent descriptor. */
566 char **apszNewLines;
567 /** Name of the old descriptor file if not a sparse image. */
568 char *pszOldDescName;
569 /** Flag whether we called vmdkFreeImage(). */
570 bool fImageFreed;
571 /** Flag whther the descriptor is embedded in the image (sparse) or
572 * in a separate file. */
573 bool fEmbeddedDesc;
574 /** Number of extents in the image. */
575 unsigned cExtents;
576 /** New base filename. */
577 char *pszNewBaseName;
578 /** The old base filename. */
579 char *pszOldBaseName;
580 /** New full filename. */
581 char *pszNewFullName;
582 /** Old full filename. */
583 char *pszOldFullName;
584 /** The old image name. */
585 const char *pszOldImageName;
586 /** Copy of the original VMDK descriptor. */
587 VMDKDESCRIPTOR DescriptorCopy;
588 /** Copy of the extent state for sparse images. */
589 VMDKEXTENT ExtentCopy;
590} VMDKRENAMESTATE;
591/** Pointer to a VMDK rename state. */
592typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
593
594
595/*********************************************************************************************************************************
596* Static Variables *
597*********************************************************************************************************************************/
598
599/** NULL-terminated array of supported file extensions. */
600static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
601{
602 {"vmdk", VDTYPE_HDD},
603 {NULL, VDTYPE_INVALID}
604};
605
606/** NULL-terminated array of configuration option. */
607static const VDCONFIGINFO s_aVmdkConfigInfo[] =
608{
609 /* Options for VMDK raw disks */
610 { "RawDrive", NULL, VDCFGVALUETYPE_STRING, 0 },
611 { "Partitions", NULL, VDCFGVALUETYPE_STRING, 0 },
612 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 },
613 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 },
614
615 /* End of options list */
616 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 }
617};
618
619
620/*********************************************************************************************************************************
621* Internal Functions *
622*********************************************************************************************************************************/
623
624static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
625static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
626 bool fDelete);
627
628static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
629static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
630static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
631static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
632
633static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
634 void *pvUser, int rcReq);
635
636/**
637 * Internal: open a file (using a file descriptor cache to ensure each file
638 * is only opened once - anything else can cause locking problems).
639 */
640static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
641 const char *pszBasename, const char *pszFilename, uint32_t fOpen)
642{
643 int rc = VINF_SUCCESS;
644 PVMDKFILE pVmdkFile;
645
646 for (pVmdkFile = pImage->pFiles;
647 pVmdkFile != NULL;
648 pVmdkFile = pVmdkFile->pNext)
649 {
650 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
651 {
652 Assert(fOpen == pVmdkFile->fOpen);
653 pVmdkFile->uReferences++;
654
655 *ppVmdkFile = pVmdkFile;
656
657 return rc;
658 }
659 }
660
661 /* If we get here, there's no matching entry in the cache. */
662 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
663 if (!pVmdkFile)
664 {
665 *ppVmdkFile = NULL;
666 return VERR_NO_MEMORY;
667 }
668
669 pVmdkFile->pszFilename = RTStrDup(pszFilename);
670 if (!pVmdkFile->pszFilename)
671 {
672 RTMemFree(pVmdkFile);
673 *ppVmdkFile = NULL;
674 return VERR_NO_MEMORY;
675 }
676
677 if (pszBasename)
678 {
679 pVmdkFile->pszBasename = RTStrDup(pszBasename);
680 if (!pVmdkFile->pszBasename)
681 {
682 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
683 RTMemFree(pVmdkFile);
684 *ppVmdkFile = NULL;
685 return VERR_NO_MEMORY;
686 }
687 }
688
689 pVmdkFile->fOpen = fOpen;
690
691 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
692 &pVmdkFile->pStorage);
693 if (RT_SUCCESS(rc))
694 {
695 pVmdkFile->uReferences = 1;
696 pVmdkFile->pImage = pImage;
697 pVmdkFile->pNext = pImage->pFiles;
698 if (pImage->pFiles)
699 pImage->pFiles->pPrev = pVmdkFile;
700 pImage->pFiles = pVmdkFile;
701 *ppVmdkFile = pVmdkFile;
702 }
703 else
704 {
705 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
706 RTMemFree(pVmdkFile);
707 *ppVmdkFile = NULL;
708 }
709
710 return rc;
711}
712
713/**
714 * Internal: close a file, updating the file descriptor cache.
715 */
716static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
717{
718 int rc = VINF_SUCCESS;
719 PVMDKFILE pVmdkFile = *ppVmdkFile;
720
721 AssertPtr(pVmdkFile);
722
723 pVmdkFile->fDelete |= fDelete;
724 Assert(pVmdkFile->uReferences);
725 pVmdkFile->uReferences--;
726 if (pVmdkFile->uReferences == 0)
727 {
728 PVMDKFILE pPrev;
729 PVMDKFILE pNext;
730
731 /* Unchain the element from the list. */
732 pPrev = pVmdkFile->pPrev;
733 pNext = pVmdkFile->pNext;
734
735 if (pNext)
736 pNext->pPrev = pPrev;
737 if (pPrev)
738 pPrev->pNext = pNext;
739 else
740 pImage->pFiles = pNext;
741
742 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
743
744 bool fFileDel = pVmdkFile->fDelete;
745 if ( pVmdkFile->pszBasename
746 && fFileDel)
747 {
748 const char *pszSuffix = RTPathSuffix(pVmdkFile->pszBasename);
749 if ( RTPathHasPath(pVmdkFile->pszBasename)
750 || !pszSuffix
751 || ( strcmp(pszSuffix, ".vmdk")
752 && strcmp(pszSuffix, ".bin")
753 && strcmp(pszSuffix, ".img")))
754 fFileDel = false;
755 }
756
757 if (fFileDel)
758 {
759 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
760 if (RT_SUCCESS(rc))
761 rc = rc2;
762 }
763 else if (pVmdkFile->fDelete)
764 LogRel(("VMDK: Denying deletion of %s\n", pVmdkFile->pszBasename));
765 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
766 if (pVmdkFile->pszBasename)
767 RTStrFree((char *)(void *)pVmdkFile->pszBasename);
768 RTMemFree(pVmdkFile);
769 }
770
771 *ppVmdkFile = NULL;
772 return rc;
773}
774
775/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
776#ifndef VMDK_USE_BLOCK_DECOMP_API
777static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
778{
779 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
780 size_t cbInjected = 0;
781
782 Assert(cbBuf);
783 if (pInflateState->iOffset < 0)
784 {
785 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
786 pvBuf = (uint8_t *)pvBuf + 1;
787 cbBuf--;
788 cbInjected = 1;
789 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
790 }
791 if (!cbBuf)
792 {
793 if (pcbBuf)
794 *pcbBuf = cbInjected;
795 return VINF_SUCCESS;
796 }
797 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
798 memcpy(pvBuf,
799 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
800 cbBuf);
801 pInflateState->iOffset += cbBuf;
802 Assert(pcbBuf);
803 *pcbBuf = cbBuf + cbInjected;
804 return VINF_SUCCESS;
805}
806#endif
807
808/**
809 * Internal: read from a file and inflate the compressed data,
810 * distinguishing between async and normal operation
811 */
812DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
813 uint64_t uOffset, void *pvBuf,
814 size_t cbToRead, const void *pcvMarker,
815 uint64_t *puLBA, uint32_t *pcbMarkerData)
816{
817 int rc;
818#ifndef VMDK_USE_BLOCK_DECOMP_API
819 PRTZIPDECOMP pZip = NULL;
820#endif
821 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
822 size_t cbCompSize, cbActuallyRead;
823
824 if (!pcvMarker)
825 {
826 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
827 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
828 if (RT_FAILURE(rc))
829 return rc;
830 }
831 else
832 {
833 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
834 /* pcvMarker endianness has already been partially transformed, fix it */
835 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
836 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
837 }
838
839 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
840 if (cbCompSize == 0)
841 {
842 AssertMsgFailed(("VMDK: corrupted marker\n"));
843 return VERR_VD_VMDK_INVALID_FORMAT;
844 }
845
846 /* Sanity check - the expansion ratio should be much less than 2. */
847 Assert(cbCompSize < 2 * cbToRead);
848 if (cbCompSize >= 2 * cbToRead)
849 return VERR_VD_VMDK_INVALID_FORMAT;
850
851 /* Compressed grain marker. Data follows immediately. */
852 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
853 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
854 (uint8_t *)pExtent->pvCompGrain
855 + RT_UOFFSETOF(VMDKMARKER, uType),
856 RT_ALIGN_Z( cbCompSize
857 + RT_UOFFSETOF(VMDKMARKER, uType),
858 512)
859 - RT_UOFFSETOF(VMDKMARKER, uType));
860
861 if (puLBA)
862 *puLBA = RT_LE2H_U64(pMarker->uSector);
863 if (pcbMarkerData)
864 *pcbMarkerData = RT_ALIGN( cbCompSize
865 + RT_UOFFSETOF(VMDKMARKER, uType),
866 512);
867
868#ifdef VMDK_USE_BLOCK_DECOMP_API
869 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
870 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
871 pvBuf, cbToRead, &cbActuallyRead);
872#else
873 VMDKCOMPRESSIO InflateState;
874 InflateState.pImage = pImage;
875 InflateState.iOffset = -1;
876 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
877 InflateState.pvCompGrain = pExtent->pvCompGrain;
878
879 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
880 if (RT_FAILURE(rc))
881 return rc;
882 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
883 RTZipDecompDestroy(pZip);
884#endif /* !VMDK_USE_BLOCK_DECOMP_API */
885 if (RT_FAILURE(rc))
886 {
887 if (rc == VERR_ZIP_CORRUPTED)
888 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
889 return rc;
890 }
891 if (cbActuallyRead != cbToRead)
892 rc = VERR_VD_VMDK_INVALID_FORMAT;
893 return rc;
894}
895
896static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
897{
898 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
899
900 Assert(cbBuf);
901 if (pDeflateState->iOffset < 0)
902 {
903 pvBuf = (const uint8_t *)pvBuf + 1;
904 cbBuf--;
905 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
906 }
907 if (!cbBuf)
908 return VINF_SUCCESS;
909 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
910 return VERR_BUFFER_OVERFLOW;
911 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
912 pvBuf, cbBuf);
913 pDeflateState->iOffset += cbBuf;
914 return VINF_SUCCESS;
915}
916
917/**
918 * Internal: deflate the uncompressed data and write to a file,
919 * distinguishing between async and normal operation
920 */
921DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
922 uint64_t uOffset, const void *pvBuf,
923 size_t cbToWrite, uint64_t uLBA,
924 uint32_t *pcbMarkerData)
925{
926 int rc;
927 PRTZIPCOMP pZip = NULL;
928 VMDKCOMPRESSIO DeflateState;
929
930 DeflateState.pImage = pImage;
931 DeflateState.iOffset = -1;
932 DeflateState.cbCompGrain = pExtent->cbCompGrain;
933 DeflateState.pvCompGrain = pExtent->pvCompGrain;
934
935 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
936 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
937 if (RT_FAILURE(rc))
938 return rc;
939 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
940 if (RT_SUCCESS(rc))
941 rc = RTZipCompFinish(pZip);
942 RTZipCompDestroy(pZip);
943 if (RT_SUCCESS(rc))
944 {
945 Assert( DeflateState.iOffset > 0
946 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
947
948 /* pad with zeroes to get to a full sector size */
949 uint32_t uSize = DeflateState.iOffset;
950 if (uSize % 512)
951 {
952 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
953 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
954 uSizeAlign - uSize);
955 uSize = uSizeAlign;
956 }
957
958 if (pcbMarkerData)
959 *pcbMarkerData = uSize;
960
961 /* Compressed grain marker. Data follows immediately. */
962 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
963 pMarker->uSector = RT_H2LE_U64(uLBA);
964 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
965 - RT_UOFFSETOF(VMDKMARKER, uType));
966 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
967 uOffset, pMarker, uSize);
968 if (RT_FAILURE(rc))
969 return rc;
970 }
971 return rc;
972}
973
974
975/**
976 * Internal: check if all files are closed, prevent leaking resources.
977 */
978static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
979{
980 int rc = VINF_SUCCESS, rc2;
981 PVMDKFILE pVmdkFile;
982
983 Assert(pImage->pFiles == NULL);
984 for (pVmdkFile = pImage->pFiles;
985 pVmdkFile != NULL;
986 pVmdkFile = pVmdkFile->pNext)
987 {
988 LogRel(("VMDK: leaking reference to file \"%s\"\n",
989 pVmdkFile->pszFilename));
990 pImage->pFiles = pVmdkFile->pNext;
991
992 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
993
994 if (RT_SUCCESS(rc))
995 rc = rc2;
996 }
997 return rc;
998}
999
1000/**
1001 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1002 * critical non-ASCII characters.
1003 */
1004static char *vmdkEncodeString(const char *psz)
1005{
1006 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1007 char *pszDst = szEnc;
1008
1009 AssertPtr(psz);
1010
1011 for (; *psz; psz = RTStrNextCp(psz))
1012 {
1013 char *pszDstPrev = pszDst;
1014 RTUNICP Cp = RTStrGetCp(psz);
1015 if (Cp == '\\')
1016 {
1017 pszDst = RTStrPutCp(pszDst, Cp);
1018 pszDst = RTStrPutCp(pszDst, Cp);
1019 }
1020 else if (Cp == '\n')
1021 {
1022 pszDst = RTStrPutCp(pszDst, '\\');
1023 pszDst = RTStrPutCp(pszDst, 'n');
1024 }
1025 else if (Cp == '\r')
1026 {
1027 pszDst = RTStrPutCp(pszDst, '\\');
1028 pszDst = RTStrPutCp(pszDst, 'r');
1029 }
1030 else
1031 pszDst = RTStrPutCp(pszDst, Cp);
1032 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1033 {
1034 pszDst = pszDstPrev;
1035 break;
1036 }
1037 }
1038 *pszDst = '\0';
1039 return RTStrDup(szEnc);
1040}
1041
1042/**
1043 * Internal: decode a string and store it into the specified string.
1044 */
1045static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1046{
1047 int rc = VINF_SUCCESS;
1048 char szBuf[4];
1049
1050 if (!cb)
1051 return VERR_BUFFER_OVERFLOW;
1052
1053 AssertPtr(psz);
1054
1055 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1056 {
1057 char *pszDst = szBuf;
1058 RTUNICP Cp = RTStrGetCp(pszEncoded);
1059 if (Cp == '\\')
1060 {
1061 pszEncoded = RTStrNextCp(pszEncoded);
1062 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1063 if (CpQ == 'n')
1064 RTStrPutCp(pszDst, '\n');
1065 else if (CpQ == 'r')
1066 RTStrPutCp(pszDst, '\r');
1067 else if (CpQ == '\0')
1068 {
1069 rc = VERR_VD_VMDK_INVALID_HEADER;
1070 break;
1071 }
1072 else
1073 RTStrPutCp(pszDst, CpQ);
1074 }
1075 else
1076 pszDst = RTStrPutCp(pszDst, Cp);
1077
1078 /* Need to leave space for terminating NUL. */
1079 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1080 {
1081 rc = VERR_BUFFER_OVERFLOW;
1082 break;
1083 }
1084 memcpy(psz, szBuf, pszDst - szBuf);
1085 psz += pszDst - szBuf;
1086 }
1087 *psz = '\0';
1088 return rc;
1089}
1090
1091/**
1092 * Internal: free all buffers associated with grain directories.
1093 */
1094static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1095{
1096 if (pExtent->pGD)
1097 {
1098 RTMemFree(pExtent->pGD);
1099 pExtent->pGD = NULL;
1100 }
1101 if (pExtent->pRGD)
1102 {
1103 RTMemFree(pExtent->pRGD);
1104 pExtent->pRGD = NULL;
1105 }
1106}
1107
1108/**
1109 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
1110 * images.
1111 */
1112static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1113{
1114 int rc = VINF_SUCCESS;
1115
1116 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1117 {
1118 /* streamOptimized extents need a compressed grain buffer, which must
1119 * be big enough to hold uncompressible data (which needs ~8 bytes
1120 * more than the uncompressed data), the marker and padding. */
1121 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
1122 + 8 + sizeof(VMDKMARKER), 512);
1123 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
1124 if (RT_LIKELY(pExtent->pvCompGrain))
1125 {
1126 /* streamOptimized extents need a decompressed grain buffer. */
1127 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1128 if (!pExtent->pvGrain)
1129 rc = VERR_NO_MEMORY;
1130 }
1131 else
1132 rc = VERR_NO_MEMORY;
1133 }
1134
1135 if (RT_FAILURE(rc))
1136 vmdkFreeStreamBuffers(pExtent);
1137 return rc;
1138}
1139
1140/**
1141 * Internal: allocate all buffers associated with grain directories.
1142 */
1143static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1144{
1145 RT_NOREF1(pImage);
1146 int rc = VINF_SUCCESS;
1147 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1148
1149 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1150 if (RT_LIKELY(pExtent->pGD))
1151 {
1152 if (pExtent->uSectorRGD)
1153 {
1154 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1155 if (RT_UNLIKELY(!pExtent->pRGD))
1156 rc = VERR_NO_MEMORY;
1157 }
1158 }
1159 else
1160 rc = VERR_NO_MEMORY;
1161
1162 if (RT_FAILURE(rc))
1163 vmdkFreeGrainDirectory(pExtent);
1164 return rc;
1165}
1166
1167/**
1168 * Converts the grain directory from little to host endianess.
1169 *
1170 * @param pGD The grain directory.
1171 * @param cGDEntries Number of entries in the grain directory to convert.
1172 */
1173DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1174{
1175 uint32_t *pGDTmp = pGD;
1176
1177 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1178 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1179}
1180
1181/**
1182 * Read the grain directory and allocated grain tables verifying them against
1183 * their back up copies if available.
1184 *
1185 * @returns VBox status code.
1186 * @param pImage Image instance data.
1187 * @param pExtent The VMDK extent.
1188 */
1189static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1190{
1191 int rc = VINF_SUCCESS;
1192 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1193
1194 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1195 && pExtent->uSectorGD != VMDK_GD_AT_END
1196 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1197
1198 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1199 if (RT_SUCCESS(rc))
1200 {
1201 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1202 * but in reality they are not compressed. */
1203 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1204 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1205 pExtent->pGD, cbGD);
1206 if (RT_SUCCESS(rc))
1207 {
1208 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1209
1210 if ( pExtent->uSectorRGD
1211 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1212 {
1213 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1214 * but in reality they are not compressed. */
1215 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1216 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1217 pExtent->pRGD, cbGD);
1218 if (RT_SUCCESS(rc))
1219 {
1220 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1221
1222 /* Check grain table and redundant grain table for consistency. */
1223 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1224 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1225 size_t cbGTBuffersMax = _1M;
1226
1227 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1228 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1229
1230 if ( !pTmpGT1
1231 || !pTmpGT2)
1232 rc = VERR_NO_MEMORY;
1233
1234 size_t i = 0;
1235 uint32_t *pGDTmp = pExtent->pGD;
1236 uint32_t *pRGDTmp = pExtent->pRGD;
1237
1238 /* Loop through all entries. */
1239 while (i < pExtent->cGDEntries)
1240 {
1241 uint32_t uGTStart = *pGDTmp;
1242 uint32_t uRGTStart = *pRGDTmp;
1243 size_t cbGTRead = cbGT;
1244
1245 /* If no grain table is allocated skip the entry. */
1246 if (*pGDTmp == 0 && *pRGDTmp == 0)
1247 {
1248 i++;
1249 continue;
1250 }
1251
1252 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1253 {
1254 /* Just one grain directory entry refers to a not yet allocated
1255 * grain table or both grain directory copies refer to the same
1256 * grain table. Not allowed. */
1257 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1258 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1259 break;
1260 }
1261
1262 i++;
1263 pGDTmp++;
1264 pRGDTmp++;
1265
1266 /*
1267 * Read a few tables at once if adjacent to decrease the number
1268 * of I/O requests. Read at maximum 1MB at once.
1269 */
1270 while ( i < pExtent->cGDEntries
1271 && cbGTRead < cbGTBuffersMax)
1272 {
1273 /* If no grain table is allocated skip the entry. */
1274 if (*pGDTmp == 0 && *pRGDTmp == 0)
1275 {
1276 i++;
1277 continue;
1278 }
1279
1280 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1281 {
1282 /* Just one grain directory entry refers to a not yet allocated
1283 * grain table or both grain directory copies refer to the same
1284 * grain table. Not allowed. */
1285 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1286 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1287 break;
1288 }
1289
1290 /* Check that the start offsets are adjacent.*/
1291 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1292 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1293 break;
1294
1295 i++;
1296 pGDTmp++;
1297 pRGDTmp++;
1298 cbGTRead += cbGT;
1299 }
1300
1301 /* Increase buffers if required. */
1302 if ( RT_SUCCESS(rc)
1303 && cbGTBuffers < cbGTRead)
1304 {
1305 uint32_t *pTmp;
1306 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1307 if (pTmp)
1308 {
1309 pTmpGT1 = pTmp;
1310 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1311 if (pTmp)
1312 pTmpGT2 = pTmp;
1313 else
1314 rc = VERR_NO_MEMORY;
1315 }
1316 else
1317 rc = VERR_NO_MEMORY;
1318
1319 if (rc == VERR_NO_MEMORY)
1320 {
1321 /* Reset to the old values. */
1322 rc = VINF_SUCCESS;
1323 i -= cbGTRead / cbGT;
1324 cbGTRead = cbGT;
1325
1326 /* Don't try to increase the buffer again in the next run. */
1327 cbGTBuffersMax = cbGTBuffers;
1328 }
1329 }
1330
1331 if (RT_SUCCESS(rc))
1332 {
1333 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1334 * but in reality they are not compressed. */
1335 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1336 VMDK_SECTOR2BYTE(uGTStart),
1337 pTmpGT1, cbGTRead);
1338 if (RT_FAILURE(rc))
1339 {
1340 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1341 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1342 break;
1343 }
1344 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1345 * but in reality they are not compressed. */
1346 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1347 VMDK_SECTOR2BYTE(uRGTStart),
1348 pTmpGT2, cbGTRead);
1349 if (RT_FAILURE(rc))
1350 {
1351 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1352 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1353 break;
1354 }
1355 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1356 {
1357 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1358 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1359 break;
1360 }
1361 }
1362 } /* while (i < pExtent->cGDEntries) */
1363
1364 /** @todo figure out what to do for unclean VMDKs. */
1365 if (pTmpGT1)
1366 RTMemFree(pTmpGT1);
1367 if (pTmpGT2)
1368 RTMemFree(pTmpGT2);
1369 }
1370 else
1371 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1372 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1373 }
1374 }
1375 else
1376 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1377 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1378 }
1379
1380 if (RT_FAILURE(rc))
1381 vmdkFreeGrainDirectory(pExtent);
1382 return rc;
1383}
1384
1385/**
1386 * Creates a new grain directory for the given extent at the given start sector.
1387 *
1388 * @returns VBox status code.
1389 * @param pImage Image instance data.
1390 * @param pExtent The VMDK extent.
1391 * @param uStartSector Where the grain directory should be stored in the image.
1392 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1393 */
1394static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1395 uint64_t uStartSector, bool fPreAlloc)
1396{
1397 int rc = VINF_SUCCESS;
1398 unsigned i;
1399 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1400 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1401 size_t cbGTRounded;
1402 uint64_t cbOverhead;
1403
1404 if (fPreAlloc)
1405 {
1406 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1407 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1408 }
1409 else
1410 {
1411 /* Use a dummy start sector for layout computation. */
1412 if (uStartSector == VMDK_GD_AT_END)
1413 uStartSector = 1;
1414 cbGTRounded = 0;
1415 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1416 }
1417
1418 /* For streamOptimized extents there is only one grain directory,
1419 * and for all others take redundant grain directory into account. */
1420 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1421 {
1422 cbOverhead = RT_ALIGN_64(cbOverhead,
1423 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1424 }
1425 else
1426 {
1427 cbOverhead += cbGDRounded + cbGTRounded;
1428 cbOverhead = RT_ALIGN_64(cbOverhead,
1429 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1430 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1431 }
1432
1433 if (RT_SUCCESS(rc))
1434 {
1435 pExtent->uAppendPosition = cbOverhead;
1436 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1437
1438 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1439 {
1440 pExtent->uSectorRGD = 0;
1441 pExtent->uSectorGD = uStartSector;
1442 }
1443 else
1444 {
1445 pExtent->uSectorRGD = uStartSector;
1446 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1447 }
1448
1449 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1450 if (RT_SUCCESS(rc))
1451 {
1452 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1453 if ( RT_SUCCESS(rc)
1454 && fPreAlloc)
1455 {
1456 uint32_t uGTSectorLE;
1457 uint64_t uOffsetSectors;
1458
1459 if (pExtent->pRGD)
1460 {
1461 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1462 for (i = 0; i < pExtent->cGDEntries; i++)
1463 {
1464 pExtent->pRGD[i] = uOffsetSectors;
1465 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1466 /* Write the redundant grain directory entry to disk. */
1467 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1468 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1469 &uGTSectorLE, sizeof(uGTSectorLE));
1470 if (RT_FAILURE(rc))
1471 {
1472 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1473 break;
1474 }
1475 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1476 }
1477 }
1478
1479 if (RT_SUCCESS(rc))
1480 {
1481 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1482 for (i = 0; i < pExtent->cGDEntries; i++)
1483 {
1484 pExtent->pGD[i] = uOffsetSectors;
1485 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1486 /* Write the grain directory entry to disk. */
1487 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1488 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1489 &uGTSectorLE, sizeof(uGTSectorLE));
1490 if (RT_FAILURE(rc))
1491 {
1492 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1493 break;
1494 }
1495 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1496 }
1497 }
1498 }
1499 }
1500 }
1501
1502 if (RT_FAILURE(rc))
1503 vmdkFreeGrainDirectory(pExtent);
1504 return rc;
1505}
1506
1507/**
1508 * Unquotes the given string returning the result in a separate buffer.
1509 *
1510 * @returns VBox status code.
1511 * @param pImage The VMDK image state.
1512 * @param pszStr The string to unquote.
1513 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1514 * free.
1515 * @param ppszNext Where to store the pointer to any character following
1516 * the quoted value, optional.
1517 */
1518static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1519 char **ppszUnquoted, char **ppszNext)
1520{
1521 const char *pszStart = pszStr;
1522 char *pszQ;
1523 char *pszUnquoted;
1524
1525 /* Skip over whitespace. */
1526 while (*pszStr == ' ' || *pszStr == '\t')
1527 pszStr++;
1528
1529 if (*pszStr != '"')
1530 {
1531 pszQ = (char *)pszStr;
1532 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1533 pszQ++;
1534 }
1535 else
1536 {
1537 pszStr++;
1538 pszQ = (char *)strchr(pszStr, '"');
1539 if (pszQ == NULL)
1540 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1541 pImage->pszFilename, pszStart);
1542 }
1543
1544 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1545 if (!pszUnquoted)
1546 return VERR_NO_MEMORY;
1547 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1548 pszUnquoted[pszQ - pszStr] = '\0';
1549 *ppszUnquoted = pszUnquoted;
1550 if (ppszNext)
1551 *ppszNext = pszQ + 1;
1552 return VINF_SUCCESS;
1553}
1554
1555static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1556 const char *pszLine)
1557{
1558 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1559 ssize_t cbDiff = strlen(pszLine) + 1;
1560
1561 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1562 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1563 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1564
1565 memcpy(pEnd, pszLine, cbDiff);
1566 pDescriptor->cLines++;
1567 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1568 pDescriptor->fDirty = true;
1569
1570 return VINF_SUCCESS;
1571}
1572
1573static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1574 const char *pszKey, const char **ppszValue)
1575{
1576 size_t cbKey = strlen(pszKey);
1577 const char *pszValue;
1578
1579 while (uStart != 0)
1580 {
1581 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1582 {
1583 /* Key matches, check for a '=' (preceded by whitespace). */
1584 pszValue = pDescriptor->aLines[uStart] + cbKey;
1585 while (*pszValue == ' ' || *pszValue == '\t')
1586 pszValue++;
1587 if (*pszValue == '=')
1588 {
1589 *ppszValue = pszValue + 1;
1590 break;
1591 }
1592 }
1593 uStart = pDescriptor->aNextLines[uStart];
1594 }
1595 return !!uStart;
1596}
1597
1598static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1599 unsigned uStart,
1600 const char *pszKey, const char *pszValue)
1601{
1602 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1603 size_t cbKey = strlen(pszKey);
1604 unsigned uLast = 0;
1605
1606 while (uStart != 0)
1607 {
1608 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1609 {
1610 /* Key matches, check for a '=' (preceded by whitespace). */
1611 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1612 while (*pszTmp == ' ' || *pszTmp == '\t')
1613 pszTmp++;
1614 if (*pszTmp == '=')
1615 {
1616 pszTmp++;
1617 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1618 * bloat and potentially out of space error? */
1619 while (*pszTmp == ' ' || *pszTmp == '\t')
1620 pszTmp++;
1621 break;
1622 }
1623 }
1624 if (!pDescriptor->aNextLines[uStart])
1625 uLast = uStart;
1626 uStart = pDescriptor->aNextLines[uStart];
1627 }
1628 if (uStart)
1629 {
1630 if (pszValue)
1631 {
1632 /* Key already exists, replace existing value. */
1633 size_t cbOldVal = strlen(pszTmp);
1634 size_t cbNewVal = strlen(pszValue);
1635 ssize_t cbDiff = cbNewVal - cbOldVal;
1636 /* Check for buffer overflow. */
1637 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1638 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1639 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1640
1641 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1642 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1643 memcpy(pszTmp, pszValue, cbNewVal + 1);
1644 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1645 pDescriptor->aLines[i] += cbDiff;
1646 }
1647 else
1648 {
1649 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1650 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1651 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1652 {
1653 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1654 if (pDescriptor->aNextLines[i])
1655 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1656 else
1657 pDescriptor->aNextLines[i-1] = 0;
1658 }
1659 pDescriptor->cLines--;
1660 /* Adjust starting line numbers of following descriptor sections. */
1661 if (uStart < pDescriptor->uFirstExtent)
1662 pDescriptor->uFirstExtent--;
1663 if (uStart < pDescriptor->uFirstDDB)
1664 pDescriptor->uFirstDDB--;
1665 }
1666 }
1667 else
1668 {
1669 /* Key doesn't exist, append after the last entry in this category. */
1670 if (!pszValue)
1671 {
1672 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1673 return VINF_SUCCESS;
1674 }
1675 cbKey = strlen(pszKey);
1676 size_t cbValue = strlen(pszValue);
1677 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1678 /* Check for buffer overflow. */
1679 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1680 || ( pDescriptor->aLines[pDescriptor->cLines]
1681 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1682 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1683 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1684 {
1685 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1686 if (pDescriptor->aNextLines[i - 1])
1687 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1688 else
1689 pDescriptor->aNextLines[i] = 0;
1690 }
1691 uStart = uLast + 1;
1692 pDescriptor->aNextLines[uLast] = uStart;
1693 pDescriptor->aNextLines[uStart] = 0;
1694 pDescriptor->cLines++;
1695 pszTmp = pDescriptor->aLines[uStart];
1696 memmove(pszTmp + cbDiff, pszTmp,
1697 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1698 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1699 pDescriptor->aLines[uStart][cbKey] = '=';
1700 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1701 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1702 pDescriptor->aLines[i] += cbDiff;
1703
1704 /* Adjust starting line numbers of following descriptor sections. */
1705 if (uStart <= pDescriptor->uFirstExtent)
1706 pDescriptor->uFirstExtent++;
1707 if (uStart <= pDescriptor->uFirstDDB)
1708 pDescriptor->uFirstDDB++;
1709 }
1710 pDescriptor->fDirty = true;
1711 return VINF_SUCCESS;
1712}
1713
1714static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1715 uint32_t *puValue)
1716{
1717 const char *pszValue;
1718
1719 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1720 &pszValue))
1721 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1722 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1723}
1724
1725/**
1726 * Returns the value of the given key as a string allocating the necessary memory.
1727 *
1728 * @returns VBox status code.
1729 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1730 * @param pImage The VMDK image state.
1731 * @param pDescriptor The descriptor to fetch the value from.
1732 * @param pszKey The key to get the value from.
1733 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1734 * free.
1735 */
1736static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1737 const char *pszKey, char **ppszValue)
1738{
1739 const char *pszValue;
1740 char *pszValueUnquoted;
1741
1742 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1743 &pszValue))
1744 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1745 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1746 if (RT_FAILURE(rc))
1747 return rc;
1748 *ppszValue = pszValueUnquoted;
1749 return rc;
1750}
1751
1752static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1753 const char *pszKey, const char *pszValue)
1754{
1755 char *pszValueQuoted;
1756
1757 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1758 if (!pszValueQuoted)
1759 return VERR_NO_STR_MEMORY;
1760 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1761 pszValueQuoted);
1762 RTStrFree(pszValueQuoted);
1763 return rc;
1764}
1765
1766static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1767 PVMDKDESCRIPTOR pDescriptor)
1768{
1769 RT_NOREF1(pImage);
1770 unsigned uEntry = pDescriptor->uFirstExtent;
1771 ssize_t cbDiff;
1772
1773 if (!uEntry)
1774 return;
1775
1776 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1777 /* Move everything including \0 in the entry marking the end of buffer. */
1778 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1779 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1780 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1781 {
1782 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1783 if (pDescriptor->aNextLines[i])
1784 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1785 else
1786 pDescriptor->aNextLines[i - 1] = 0;
1787 }
1788 pDescriptor->cLines--;
1789 if (pDescriptor->uFirstDDB)
1790 pDescriptor->uFirstDDB--;
1791
1792 return;
1793}
1794
1795static void vmdkDescExtRemoveByLine(PVMDKIMAGE pImage,
1796 PVMDKDESCRIPTOR pDescriptor, unsigned uLine)
1797{
1798 RT_NOREF1(pImage);
1799 unsigned uEntry = uLine;
1800 ssize_t cbDiff;
1801 if (!uEntry)
1802 return;
1803 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1804 /* Move everything including \0 in the entry marking the end of buffer. */
1805 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1806 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1807 for (unsigned i = uEntry; i <= pDescriptor->cLines; i++)
1808 {
1809 if (i != uEntry)
1810 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1811 if (pDescriptor->aNextLines[i])
1812 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1813 else
1814 pDescriptor->aNextLines[i - 1] = 0;
1815 }
1816 pDescriptor->cLines--;
1817 if (pDescriptor->uFirstDDB)
1818 pDescriptor->uFirstDDB--;
1819 return;
1820}
1821
1822static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1823 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1824 VMDKETYPE enmType, const char *pszBasename,
1825 uint64_t uSectorOffset)
1826{
1827 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1828 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1829 char *pszTmp;
1830 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1831 char szExt[1024];
1832 ssize_t cbDiff;
1833
1834 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1835 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1836
1837 /* Find last entry in extent description. */
1838 while (uStart)
1839 {
1840 if (!pDescriptor->aNextLines[uStart])
1841 uLast = uStart;
1842 uStart = pDescriptor->aNextLines[uStart];
1843 }
1844
1845 if (enmType == VMDKETYPE_ZERO)
1846 {
1847 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1848 cNominalSectors, apszType[enmType]);
1849 }
1850 else if (enmType == VMDKETYPE_FLAT)
1851 {
1852 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1853 apszAccess[enmAccess], cNominalSectors,
1854 apszType[enmType], pszBasename, uSectorOffset);
1855 }
1856 else
1857 {
1858 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1859 apszAccess[enmAccess], cNominalSectors,
1860 apszType[enmType], pszBasename);
1861 }
1862 cbDiff = strlen(szExt) + 1;
1863
1864 /* Check for buffer overflow. */
1865 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1866 || ( pDescriptor->aLines[pDescriptor->cLines]
1867 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1868 {
1869 if ((pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
1870 && !(pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1))
1871 {
1872 pImage->cbDescAlloc *= 2;
1873 pDescriptor->cbDescAlloc *= 2;
1874 }
1875 else
1876 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1877 }
1878
1879 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1880 {
1881 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1882 if (pDescriptor->aNextLines[i - 1])
1883 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1884 else
1885 pDescriptor->aNextLines[i] = 0;
1886 }
1887 uStart = uLast + 1;
1888 pDescriptor->aNextLines[uLast] = uStart;
1889 pDescriptor->aNextLines[uStart] = 0;
1890 pDescriptor->cLines++;
1891 pszTmp = pDescriptor->aLines[uStart];
1892 memmove(pszTmp + cbDiff, pszTmp,
1893 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1894 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1895 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1896 pDescriptor->aLines[i] += cbDiff;
1897
1898 /* Adjust starting line numbers of following descriptor sections. */
1899 if (uStart <= pDescriptor->uFirstDDB)
1900 pDescriptor->uFirstDDB++;
1901
1902 pDescriptor->fDirty = true;
1903 return VINF_SUCCESS;
1904}
1905
1906/**
1907 * Returns the value of the given key from the DDB as a string allocating
1908 * the necessary memory.
1909 *
1910 * @returns VBox status code.
1911 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1912 * @param pImage The VMDK image state.
1913 * @param pDescriptor The descriptor to fetch the value from.
1914 * @param pszKey The key to get the value from.
1915 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1916 * free.
1917 */
1918static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1919 const char *pszKey, char **ppszValue)
1920{
1921 const char *pszValue;
1922 char *pszValueUnquoted;
1923
1924 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1925 &pszValue))
1926 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1927 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1928 if (RT_FAILURE(rc))
1929 return rc;
1930 *ppszValue = pszValueUnquoted;
1931 return rc;
1932}
1933
1934static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1935 const char *pszKey, uint32_t *puValue)
1936{
1937 const char *pszValue;
1938 char *pszValueUnquoted;
1939
1940 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1941 &pszValue))
1942 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1943 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1944 if (RT_FAILURE(rc))
1945 return rc;
1946 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1947 RTMemTmpFree(pszValueUnquoted);
1948 return rc;
1949}
1950
1951static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1952 const char *pszKey, PRTUUID pUuid)
1953{
1954 const char *pszValue;
1955 char *pszValueUnquoted;
1956
1957 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1958 &pszValue))
1959 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1960 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1961 if (RT_FAILURE(rc))
1962 return rc;
1963 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1964 RTMemTmpFree(pszValueUnquoted);
1965 return rc;
1966}
1967
1968static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1969 const char *pszKey, const char *pszVal)
1970{
1971 int rc;
1972 char *pszValQuoted;
1973
1974 if (pszVal)
1975 {
1976 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1977 if (!pszValQuoted)
1978 return VERR_NO_STR_MEMORY;
1979 }
1980 else
1981 pszValQuoted = NULL;
1982 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1983 pszValQuoted);
1984 if (pszValQuoted)
1985 RTStrFree(pszValQuoted);
1986 return rc;
1987}
1988
1989static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1990 const char *pszKey, PCRTUUID pUuid)
1991{
1992 char *pszUuid;
1993
1994 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1995 if (!pszUuid)
1996 return VERR_NO_STR_MEMORY;
1997 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1998 pszUuid);
1999 RTStrFree(pszUuid);
2000 return rc;
2001}
2002
2003static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
2004 const char *pszKey, uint32_t uValue)
2005{
2006 char *pszValue;
2007
2008 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
2009 if (!pszValue)
2010 return VERR_NO_STR_MEMORY;
2011 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
2012 pszValue);
2013 RTStrFree(pszValue);
2014 return rc;
2015}
2016
2017/**
2018 * Splits the descriptor data into individual lines checking for correct line
2019 * endings and descriptor size.
2020 *
2021 * @returns VBox status code.
2022 * @param pImage The image instance.
2023 * @param pDesc The descriptor.
2024 * @param pszTmp The raw descriptor data from the image.
2025 */
2026static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
2027{
2028 unsigned cLine = 0;
2029 int rc = VINF_SUCCESS;
2030
2031 while ( RT_SUCCESS(rc)
2032 && *pszTmp != '\0')
2033 {
2034 pDesc->aLines[cLine++] = pszTmp;
2035 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
2036 {
2037 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
2038 rc = VERR_VD_VMDK_INVALID_HEADER;
2039 break;
2040 }
2041
2042 while (*pszTmp != '\0' && *pszTmp != '\n')
2043 {
2044 if (*pszTmp == '\r')
2045 {
2046 if (*(pszTmp + 1) != '\n')
2047 {
2048 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
2049 break;
2050 }
2051 else
2052 {
2053 /* Get rid of CR character. */
2054 *pszTmp = '\0';
2055 }
2056 }
2057 pszTmp++;
2058 }
2059
2060 if (RT_FAILURE(rc))
2061 break;
2062
2063 /* Get rid of LF character. */
2064 if (*pszTmp == '\n')
2065 {
2066 *pszTmp = '\0';
2067 pszTmp++;
2068 }
2069 }
2070
2071 if (RT_SUCCESS(rc))
2072 {
2073 pDesc->cLines = cLine;
2074 /* Pointer right after the end of the used part of the buffer. */
2075 pDesc->aLines[cLine] = pszTmp;
2076 }
2077
2078 return rc;
2079}
2080
2081static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
2082 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2083{
2084 pDescriptor->cbDescAlloc = cbDescData;
2085 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
2086 if (RT_SUCCESS(rc))
2087 {
2088 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
2089 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
2090 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
2091 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
2092 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2093 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
2094 else
2095 {
2096 unsigned uLastNonEmptyLine = 0;
2097
2098 /* Initialize those, because we need to be able to reopen an image. */
2099 pDescriptor->uFirstDesc = 0;
2100 pDescriptor->uFirstExtent = 0;
2101 pDescriptor->uFirstDDB = 0;
2102 for (unsigned i = 0; i < pDescriptor->cLines; i++)
2103 {
2104 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
2105 {
2106 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
2107 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
2108 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
2109 {
2110 /* An extent descriptor. */
2111 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
2112 {
2113 /* Incorrect ordering of entries. */
2114 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2115 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2116 break;
2117 }
2118 if (!pDescriptor->uFirstExtent)
2119 {
2120 pDescriptor->uFirstExtent = i;
2121 uLastNonEmptyLine = 0;
2122 }
2123 }
2124 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
2125 {
2126 /* A disk database entry. */
2127 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
2128 {
2129 /* Incorrect ordering of entries. */
2130 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2131 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2132 break;
2133 }
2134 if (!pDescriptor->uFirstDDB)
2135 {
2136 pDescriptor->uFirstDDB = i;
2137 uLastNonEmptyLine = 0;
2138 }
2139 }
2140 else
2141 {
2142 /* A normal entry. */
2143 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2144 {
2145 /* Incorrect ordering of entries. */
2146 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2147 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2148 break;
2149 }
2150 if (!pDescriptor->uFirstDesc)
2151 {
2152 pDescriptor->uFirstDesc = i;
2153 uLastNonEmptyLine = 0;
2154 }
2155 }
2156 if (uLastNonEmptyLine)
2157 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2158 uLastNonEmptyLine = i;
2159 }
2160 }
2161 }
2162 }
2163
2164 return rc;
2165}
2166
2167static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2168 PCVDGEOMETRY pPCHSGeometry)
2169{
2170 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2171 VMDK_DDB_GEO_PCHS_CYLINDERS,
2172 pPCHSGeometry->cCylinders);
2173 if (RT_FAILURE(rc))
2174 return rc;
2175 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2176 VMDK_DDB_GEO_PCHS_HEADS,
2177 pPCHSGeometry->cHeads);
2178 if (RT_FAILURE(rc))
2179 return rc;
2180 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2181 VMDK_DDB_GEO_PCHS_SECTORS,
2182 pPCHSGeometry->cSectors);
2183 return rc;
2184}
2185
2186static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2187 PCVDGEOMETRY pLCHSGeometry)
2188{
2189 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2190 VMDK_DDB_GEO_LCHS_CYLINDERS,
2191 pLCHSGeometry->cCylinders);
2192 if (RT_FAILURE(rc))
2193 return rc;
2194 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2195 VMDK_DDB_GEO_LCHS_HEADS,
2196
2197 pLCHSGeometry->cHeads);
2198 if (RT_FAILURE(rc))
2199 return rc;
2200 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2201 VMDK_DDB_GEO_LCHS_SECTORS,
2202 pLCHSGeometry->cSectors);
2203 return rc;
2204}
2205
2206static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2207 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2208{
2209 pDescriptor->uFirstDesc = 0;
2210 pDescriptor->uFirstExtent = 0;
2211 pDescriptor->uFirstDDB = 0;
2212 pDescriptor->cLines = 0;
2213 pDescriptor->cbDescAlloc = cbDescData;
2214 pDescriptor->fDirty = false;
2215 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2216 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2217
2218 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2219 if (RT_SUCCESS(rc))
2220 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2221 if (RT_SUCCESS(rc))
2222 {
2223 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2224 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2225 }
2226 if (RT_SUCCESS(rc))
2227 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2228 if (RT_SUCCESS(rc))
2229 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2230 if (RT_SUCCESS(rc))
2231 {
2232 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2233 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2234 }
2235 if (RT_SUCCESS(rc))
2236 {
2237 /* The trailing space is created by VMware, too. */
2238 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2239 }
2240 if (RT_SUCCESS(rc))
2241 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2242 if (RT_SUCCESS(rc))
2243 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2244 if (RT_SUCCESS(rc))
2245 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2246 if (RT_SUCCESS(rc))
2247 {
2248 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2249
2250 /* Now that the framework is in place, use the normal functions to insert
2251 * the remaining keys. */
2252 char szBuf[9];
2253 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2254 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2255 "CID", szBuf);
2256 }
2257 if (RT_SUCCESS(rc))
2258 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2259 "parentCID", "ffffffff");
2260 if (RT_SUCCESS(rc))
2261 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2262
2263 return rc;
2264}
2265
2266static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
2267{
2268 int rc;
2269 unsigned cExtents;
2270 unsigned uLine;
2271 unsigned i;
2272
2273 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2274 &pImage->Descriptor);
2275 if (RT_FAILURE(rc))
2276 return rc;
2277
2278 /* Check version, must be 1. */
2279 uint32_t uVersion;
2280 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2281 if (RT_FAILURE(rc))
2282 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2283 if (uVersion != 1)
2284 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2285
2286 /* Get image creation type and determine image flags. */
2287 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2288 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2289 &pszCreateType);
2290 if (RT_FAILURE(rc))
2291 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2292 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2293 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2294 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2295 else if ( !strcmp(pszCreateType, "partitionedDevice")
2296 || !strcmp(pszCreateType, "fullDevice"))
2297 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2298 else if (!strcmp(pszCreateType, "streamOptimized"))
2299 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2300 else if (!strcmp(pszCreateType, "vmfs"))
2301 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2302 RTMemTmpFree(pszCreateType);
2303
2304 /* Count the number of extent config entries. */
2305 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2306 uLine != 0;
2307 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2308 /* nothing */;
2309
2310 if (!pImage->pDescData && cExtents != 1)
2311 {
2312 /* Monolithic image, must have only one extent (already opened). */
2313 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2314 }
2315
2316 if (pImage->pDescData)
2317 {
2318 /* Non-monolithic image, extents need to be allocated. */
2319 rc = vmdkCreateExtents(pImage, cExtents);
2320 if (RT_FAILURE(rc))
2321 return rc;
2322 }
2323
2324 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2325 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2326 {
2327 char *pszLine = pImage->Descriptor.aLines[uLine];
2328
2329 /* Access type of the extent. */
2330 if (!strncmp(pszLine, "RW", 2))
2331 {
2332 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2333 pszLine += 2;
2334 }
2335 else if (!strncmp(pszLine, "RDONLY", 6))
2336 {
2337 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2338 pszLine += 6;
2339 }
2340 else if (!strncmp(pszLine, "NOACCESS", 8))
2341 {
2342 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2343 pszLine += 8;
2344 }
2345 else
2346 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2347 if (*pszLine++ != ' ')
2348 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2349
2350 /* Nominal size of the extent. */
2351 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2352 &pImage->pExtents[i].cNominalSectors);
2353 if (RT_FAILURE(rc))
2354 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2355 if (*pszLine++ != ' ')
2356 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2357
2358 /* Type of the extent. */
2359 if (!strncmp(pszLine, "SPARSE", 6))
2360 {
2361 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2362 pszLine += 6;
2363 }
2364 else if (!strncmp(pszLine, "FLAT", 4))
2365 {
2366 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2367 pszLine += 4;
2368 }
2369 else if (!strncmp(pszLine, "ZERO", 4))
2370 {
2371 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2372 pszLine += 4;
2373 }
2374 else if (!strncmp(pszLine, "VMFS", 4))
2375 {
2376 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2377 pszLine += 4;
2378 }
2379 else
2380 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2381
2382 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2383 {
2384 /* This one has no basename or offset. */
2385 if (*pszLine == ' ')
2386 pszLine++;
2387 if (*pszLine != '\0')
2388 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2389 pImage->pExtents[i].pszBasename = NULL;
2390 }
2391 else
2392 {
2393 /* All other extent types have basename and optional offset. */
2394 if (*pszLine++ != ' ')
2395 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2396
2397 /* Basename of the image. Surrounded by quotes. */
2398 char *pszBasename;
2399 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2400 if (RT_FAILURE(rc))
2401 return rc;
2402 pImage->pExtents[i].pszBasename = pszBasename;
2403 if (*pszLine == ' ')
2404 {
2405 pszLine++;
2406 if (*pszLine != '\0')
2407 {
2408 /* Optional offset in extent specified. */
2409 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2410 &pImage->pExtents[i].uSectorOffset);
2411 if (RT_FAILURE(rc))
2412 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2413 }
2414 }
2415
2416 if (*pszLine != '\0')
2417 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2418 }
2419 }
2420
2421 /* Determine PCHS geometry (autogenerate if necessary). */
2422 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2423 VMDK_DDB_GEO_PCHS_CYLINDERS,
2424 &pImage->PCHSGeometry.cCylinders);
2425 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2426 pImage->PCHSGeometry.cCylinders = 0;
2427 else if (RT_FAILURE(rc))
2428 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2429 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2430 VMDK_DDB_GEO_PCHS_HEADS,
2431 &pImage->PCHSGeometry.cHeads);
2432 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2433 pImage->PCHSGeometry.cHeads = 0;
2434 else if (RT_FAILURE(rc))
2435 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2436 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2437 VMDK_DDB_GEO_PCHS_SECTORS,
2438 &pImage->PCHSGeometry.cSectors);
2439 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2440 pImage->PCHSGeometry.cSectors = 0;
2441 else if (RT_FAILURE(rc))
2442 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2443 if ( pImage->PCHSGeometry.cCylinders == 0
2444 || pImage->PCHSGeometry.cHeads == 0
2445 || pImage->PCHSGeometry.cHeads > 16
2446 || pImage->PCHSGeometry.cSectors == 0
2447 || pImage->PCHSGeometry.cSectors > 63)
2448 {
2449 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2450 * as the total image size isn't known yet). */
2451 pImage->PCHSGeometry.cCylinders = 0;
2452 pImage->PCHSGeometry.cHeads = 16;
2453 pImage->PCHSGeometry.cSectors = 63;
2454 }
2455
2456 /* Determine LCHS geometry (set to 0 if not specified). */
2457 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2458 VMDK_DDB_GEO_LCHS_CYLINDERS,
2459 &pImage->LCHSGeometry.cCylinders);
2460 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2461 pImage->LCHSGeometry.cCylinders = 0;
2462 else if (RT_FAILURE(rc))
2463 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2464 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2465 VMDK_DDB_GEO_LCHS_HEADS,
2466 &pImage->LCHSGeometry.cHeads);
2467 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2468 pImage->LCHSGeometry.cHeads = 0;
2469 else if (RT_FAILURE(rc))
2470 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2471 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2472 VMDK_DDB_GEO_LCHS_SECTORS,
2473 &pImage->LCHSGeometry.cSectors);
2474 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2475 pImage->LCHSGeometry.cSectors = 0;
2476 else if (RT_FAILURE(rc))
2477 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2478 if ( pImage->LCHSGeometry.cCylinders == 0
2479 || pImage->LCHSGeometry.cHeads == 0
2480 || pImage->LCHSGeometry.cSectors == 0)
2481 {
2482 pImage->LCHSGeometry.cCylinders = 0;
2483 pImage->LCHSGeometry.cHeads = 0;
2484 pImage->LCHSGeometry.cSectors = 0;
2485 }
2486
2487 /* Get image UUID. */
2488 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2489 &pImage->ImageUuid);
2490 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2491 {
2492 /* Image without UUID. Probably created by VMware and not yet used
2493 * by VirtualBox. Can only be added for images opened in read/write
2494 * mode, so don't bother producing a sensible UUID otherwise. */
2495 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2496 RTUuidClear(&pImage->ImageUuid);
2497 else
2498 {
2499 rc = RTUuidCreate(&pImage->ImageUuid);
2500 if (RT_FAILURE(rc))
2501 return rc;
2502 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2503 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2504 if (RT_FAILURE(rc))
2505 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2506 }
2507 }
2508 else if (RT_FAILURE(rc))
2509 return rc;
2510
2511 /* Get image modification UUID. */
2512 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2513 VMDK_DDB_MODIFICATION_UUID,
2514 &pImage->ModificationUuid);
2515 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2516 {
2517 /* Image without UUID. Probably created by VMware and not yet used
2518 * by VirtualBox. Can only be added for images opened in read/write
2519 * mode, so don't bother producing a sensible UUID otherwise. */
2520 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2521 RTUuidClear(&pImage->ModificationUuid);
2522 else
2523 {
2524 rc = RTUuidCreate(&pImage->ModificationUuid);
2525 if (RT_FAILURE(rc))
2526 return rc;
2527 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2528 VMDK_DDB_MODIFICATION_UUID,
2529 &pImage->ModificationUuid);
2530 if (RT_FAILURE(rc))
2531 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2532 }
2533 }
2534 else if (RT_FAILURE(rc))
2535 return rc;
2536
2537 /* Get UUID of parent image. */
2538 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2539 &pImage->ParentUuid);
2540 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2541 {
2542 /* Image without UUID. Probably created by VMware and not yet used
2543 * by VirtualBox. Can only be added for images opened in read/write
2544 * mode, so don't bother producing a sensible UUID otherwise. */
2545 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2546 RTUuidClear(&pImage->ParentUuid);
2547 else
2548 {
2549 rc = RTUuidClear(&pImage->ParentUuid);
2550 if (RT_FAILURE(rc))
2551 return rc;
2552 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2553 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2554 if (RT_FAILURE(rc))
2555 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2556 }
2557 }
2558 else if (RT_FAILURE(rc))
2559 return rc;
2560
2561 /* Get parent image modification UUID. */
2562 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2563 VMDK_DDB_PARENT_MODIFICATION_UUID,
2564 &pImage->ParentModificationUuid);
2565 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2566 {
2567 /* Image without UUID. Probably created by VMware and not yet used
2568 * by VirtualBox. Can only be added for images opened in read/write
2569 * mode, so don't bother producing a sensible UUID otherwise. */
2570 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2571 RTUuidClear(&pImage->ParentModificationUuid);
2572 else
2573 {
2574 RTUuidClear(&pImage->ParentModificationUuid);
2575 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2576 VMDK_DDB_PARENT_MODIFICATION_UUID,
2577 &pImage->ParentModificationUuid);
2578 if (RT_FAILURE(rc))
2579 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2580 }
2581 }
2582 else if (RT_FAILURE(rc))
2583 return rc;
2584
2585 return VINF_SUCCESS;
2586}
2587
2588/**
2589 * Internal : Prepares the descriptor to write to the image.
2590 */
2591static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2592 void **ppvData, size_t *pcbData)
2593{
2594 int rc = VINF_SUCCESS;
2595
2596 /*
2597 * Allocate temporary descriptor buffer.
2598 * In case there is no limit allocate a default
2599 * and increase if required.
2600 */
2601 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2602 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2603 size_t offDescriptor = 0;
2604
2605 if (!pszDescriptor)
2606 return VERR_NO_MEMORY;
2607
2608 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2609 {
2610 const char *psz = pImage->Descriptor.aLines[i];
2611 size_t cb = strlen(psz);
2612
2613 /*
2614 * Increase the descriptor if there is no limit and
2615 * there is not enough room left for this line.
2616 */
2617 if (offDescriptor + cb + 1 > cbDescriptor)
2618 {
2619 if (cbLimit)
2620 {
2621 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2622 break;
2623 }
2624 else
2625 {
2626 char *pszDescriptorNew = NULL;
2627 LogFlow(("Increasing descriptor cache\n"));
2628
2629 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2630 if (!pszDescriptorNew)
2631 {
2632 rc = VERR_NO_MEMORY;
2633 break;
2634 }
2635 pszDescriptor = pszDescriptorNew;
2636 cbDescriptor += cb + 4 * _1K;
2637 }
2638 }
2639
2640 if (cb > 0)
2641 {
2642 memcpy(pszDescriptor + offDescriptor, psz, cb);
2643 offDescriptor += cb;
2644 }
2645
2646 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2647 offDescriptor++;
2648 }
2649
2650 if (RT_SUCCESS(rc))
2651 {
2652 *ppvData = pszDescriptor;
2653 *pcbData = offDescriptor;
2654 }
2655 else if (pszDescriptor)
2656 RTMemFree(pszDescriptor);
2657
2658 return rc;
2659}
2660
2661/**
2662 * Internal: write/update the descriptor part of the image.
2663 */
2664static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2665{
2666 int rc = VINF_SUCCESS;
2667 uint64_t cbLimit;
2668 uint64_t uOffset;
2669 PVMDKFILE pDescFile;
2670 void *pvDescriptor = NULL;
2671 size_t cbDescriptor;
2672
2673 if (pImage->pDescData)
2674 {
2675 /* Separate descriptor file. */
2676 uOffset = 0;
2677 cbLimit = 0;
2678 pDescFile = pImage->pFile;
2679 }
2680 else
2681 {
2682 /* Embedded descriptor file. */
2683 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2684 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2685 pDescFile = pImage->pExtents[0].pFile;
2686 }
2687 /* Bail out if there is no file to write to. */
2688 if (pDescFile == NULL)
2689 return VERR_INVALID_PARAMETER;
2690
2691 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2692 if (RT_SUCCESS(rc))
2693 {
2694 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2695 uOffset, pvDescriptor,
2696 cbLimit ? cbLimit : cbDescriptor,
2697 pIoCtx, NULL, NULL);
2698 if ( RT_FAILURE(rc)
2699 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2700 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2701 }
2702
2703 if (RT_SUCCESS(rc) && !cbLimit)
2704 {
2705 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2706 if (RT_FAILURE(rc))
2707 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2708 }
2709
2710 if (RT_SUCCESS(rc))
2711 pImage->Descriptor.fDirty = false;
2712
2713 if (pvDescriptor)
2714 RTMemFree(pvDescriptor);
2715 return rc;
2716
2717}
2718
2719/**
2720 * Internal: validate the consistency check values in a binary header.
2721 */
2722static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2723{
2724 int rc = VINF_SUCCESS;
2725 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2726 {
2727 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2728 return rc;
2729 }
2730 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2731 {
2732 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2733 return rc;
2734 }
2735 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2736 && ( pHeader->singleEndLineChar != '\n'
2737 || pHeader->nonEndLineChar != ' '
2738 || pHeader->doubleEndLineChar1 != '\r'
2739 || pHeader->doubleEndLineChar2 != '\n') )
2740 {
2741 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2742 return rc;
2743 }
2744 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2745 {
2746 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2747 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2748 return rc;
2749 }
2750 return rc;
2751}
2752
2753/**
2754 * Internal: read metadata belonging to an extent with binary header, i.e.
2755 * as found in monolithic files.
2756 */
2757static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2758 bool fMagicAlreadyRead)
2759{
2760 SparseExtentHeader Header;
2761 int rc;
2762
2763 if (!fMagicAlreadyRead)
2764 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2765 &Header, sizeof(Header));
2766 else
2767 {
2768 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2769 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2770 RT_UOFFSETOF(SparseExtentHeader, version),
2771 &Header.version,
2772 sizeof(Header)
2773 - RT_UOFFSETOF(SparseExtentHeader, version));
2774 }
2775
2776 if (RT_SUCCESS(rc))
2777 {
2778 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2779 if (RT_SUCCESS(rc))
2780 {
2781 uint64_t cbFile = 0;
2782
2783 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2784 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2785 pExtent->fFooter = true;
2786
2787 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2788 || ( pExtent->fFooter
2789 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2790 {
2791 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2792 if (RT_FAILURE(rc))
2793 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2794 }
2795
2796 if (RT_SUCCESS(rc))
2797 {
2798 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2799 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2800
2801 if ( pExtent->fFooter
2802 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2803 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2804 {
2805 /* Read the footer, which comes before the end-of-stream marker. */
2806 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2807 cbFile - 2*512, &Header,
2808 sizeof(Header));
2809 if (RT_FAILURE(rc))
2810 {
2811 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2812 rc = VERR_VD_VMDK_INVALID_HEADER;
2813 }
2814
2815 if (RT_SUCCESS(rc))
2816 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2817 /* Prohibit any writes to this extent. */
2818 pExtent->uAppendPosition = 0;
2819 }
2820
2821 if (RT_SUCCESS(rc))
2822 {
2823 pExtent->uVersion = RT_LE2H_U32(Header.version);
2824 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2825 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2826 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2827 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2828 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2829 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2830 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2831 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2832 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2833 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2834 {
2835 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2836 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2837 }
2838 else
2839 {
2840 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2841 pExtent->uSectorRGD = 0;
2842 }
2843
2844 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2845 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2846 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2847
2848 if ( RT_SUCCESS(rc)
2849 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2850 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2851 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2852 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2853 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2854 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2855
2856 if (RT_SUCCESS(rc))
2857 {
2858 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2859 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2860 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2861 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2862 else
2863 {
2864 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2865 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2866
2867 /* Fix up the number of descriptor sectors, as some flat images have
2868 * really just one, and this causes failures when inserting the UUID
2869 * values and other extra information. */
2870 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2871 {
2872 /* Do it the easy way - just fix it for flat images which have no
2873 * other complicated metadata which needs space too. */
2874 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2875 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2876 pExtent->cDescriptorSectors = 4;
2877 }
2878 }
2879 }
2880 }
2881 }
2882 }
2883 }
2884 else
2885 {
2886 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2887 rc = VERR_VD_VMDK_INVALID_HEADER;
2888 }
2889
2890 if (RT_FAILURE(rc))
2891 vmdkFreeExtentData(pImage, pExtent, false);
2892
2893 return rc;
2894}
2895
2896/**
2897 * Internal: read additional metadata belonging to an extent. For those
2898 * extents which have no additional metadata just verify the information.
2899 */
2900static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2901{
2902 int rc = VINF_SUCCESS;
2903
2904/* disabled the check as there are too many truncated vmdk images out there */
2905#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2906 uint64_t cbExtentSize;
2907 /* The image must be a multiple of a sector in size and contain the data
2908 * area (flat images only). If not, it means the image is at least
2909 * truncated, or even seriously garbled. */
2910 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2911 if (RT_FAILURE(rc))
2912 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2913 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2914 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2915 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2916 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2917#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2918 if ( RT_SUCCESS(rc)
2919 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2920 {
2921 /* The spec says that this must be a power of two and greater than 8,
2922 * but probably they meant not less than 8. */
2923 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2924 || pExtent->cSectorsPerGrain < 8)
2925 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2926 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2927 else
2928 {
2929 /* This code requires that a grain table must hold a power of two multiple
2930 * of the number of entries per GT cache entry. */
2931 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2932 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2933 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2934 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2935 else
2936 {
2937 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2938 if (RT_SUCCESS(rc))
2939 {
2940 /* Prohibit any writes to this streamOptimized extent. */
2941 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2942 pExtent->uAppendPosition = 0;
2943
2944 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2945 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2946 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2947 rc = vmdkReadGrainDirectory(pImage, pExtent);
2948 else
2949 {
2950 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2951 pExtent->cbGrainStreamRead = 0;
2952 }
2953 }
2954 }
2955 }
2956 }
2957
2958 if (RT_FAILURE(rc))
2959 vmdkFreeExtentData(pImage, pExtent, false);
2960
2961 return rc;
2962}
2963
2964/**
2965 * Internal: write/update the metadata for a sparse extent.
2966 */
2967static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2968 uint64_t uOffset, PVDIOCTX pIoCtx)
2969{
2970 SparseExtentHeader Header;
2971
2972 memset(&Header, '\0', sizeof(Header));
2973 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2974 Header.version = RT_H2LE_U32(pExtent->uVersion);
2975 Header.flags = RT_H2LE_U32(RT_BIT(0));
2976 if (pExtent->pRGD)
2977 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2978 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2979 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2980 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2981 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2982 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2983 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2984 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2985 if (pExtent->fFooter && uOffset == 0)
2986 {
2987 if (pExtent->pRGD)
2988 {
2989 Assert(pExtent->uSectorRGD);
2990 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2991 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2992 }
2993 else
2994 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2995 }
2996 else
2997 {
2998 if (pExtent->pRGD)
2999 {
3000 Assert(pExtent->uSectorRGD);
3001 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
3002 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3003 }
3004 else
3005 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3006 }
3007 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
3008 Header.uncleanShutdown = pExtent->fUncleanShutdown;
3009 Header.singleEndLineChar = '\n';
3010 Header.nonEndLineChar = ' ';
3011 Header.doubleEndLineChar1 = '\r';
3012 Header.doubleEndLineChar2 = '\n';
3013 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
3014
3015 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
3016 uOffset, &Header, sizeof(Header),
3017 pIoCtx, NULL, NULL);
3018 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
3019 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
3020 return rc;
3021}
3022
3023/**
3024 * Internal: free the buffers used for streamOptimized images.
3025 */
3026static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
3027{
3028 if (pExtent->pvCompGrain)
3029 {
3030 RTMemFree(pExtent->pvCompGrain);
3031 pExtent->pvCompGrain = NULL;
3032 }
3033 if (pExtent->pvGrain)
3034 {
3035 RTMemFree(pExtent->pvGrain);
3036 pExtent->pvGrain = NULL;
3037 }
3038}
3039
3040/**
3041 * Internal: free the memory used by the extent data structure, optionally
3042 * deleting the referenced files.
3043 *
3044 * @returns VBox status code.
3045 * @param pImage Pointer to the image instance data.
3046 * @param pExtent The extent to free.
3047 * @param fDelete Flag whether to delete the backing storage.
3048 */
3049static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3050 bool fDelete)
3051{
3052 int rc = VINF_SUCCESS;
3053
3054 vmdkFreeGrainDirectory(pExtent);
3055 if (pExtent->pDescData)
3056 {
3057 RTMemFree(pExtent->pDescData);
3058 pExtent->pDescData = NULL;
3059 }
3060 if (pExtent->pFile != NULL)
3061 {
3062 /* Do not delete raw extents, these have full and base names equal. */
3063 rc = vmdkFileClose(pImage, &pExtent->pFile,
3064 fDelete
3065 && pExtent->pszFullname
3066 && pExtent->pszBasename
3067 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
3068 }
3069 if (pExtent->pszBasename)
3070 {
3071 RTMemTmpFree((void *)pExtent->pszBasename);
3072 pExtent->pszBasename = NULL;
3073 }
3074 if (pExtent->pszFullname)
3075 {
3076 RTStrFree((char *)(void *)pExtent->pszFullname);
3077 pExtent->pszFullname = NULL;
3078 }
3079 vmdkFreeStreamBuffers(pExtent);
3080
3081 return rc;
3082}
3083
3084/**
3085 * Internal: allocate grain table cache if necessary for this image.
3086 */
3087static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3088{
3089 PVMDKEXTENT pExtent;
3090
3091 /* Allocate grain table cache if any sparse extent is present. */
3092 for (unsigned i = 0; i < pImage->cExtents; i++)
3093 {
3094 pExtent = &pImage->pExtents[i];
3095 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3096 {
3097 /* Allocate grain table cache. */
3098 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3099 if (!pImage->pGTCache)
3100 return VERR_NO_MEMORY;
3101 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3102 {
3103 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3104 pGCE->uExtent = UINT32_MAX;
3105 }
3106 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3107 break;
3108 }
3109 }
3110
3111 return VINF_SUCCESS;
3112}
3113
3114/**
3115 * Internal: allocate the given number of extents.
3116 */
3117static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3118{
3119 int rc = VINF_SUCCESS;
3120 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3121 if (pExtents)
3122 {
3123 for (unsigned i = 0; i < cExtents; i++)
3124 {
3125 pExtents[i].pFile = NULL;
3126 pExtents[i].pszBasename = NULL;
3127 pExtents[i].pszFullname = NULL;
3128 pExtents[i].pGD = NULL;
3129 pExtents[i].pRGD = NULL;
3130 pExtents[i].pDescData = NULL;
3131 pExtents[i].uVersion = 1;
3132 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3133 pExtents[i].uExtent = i;
3134 pExtents[i].pImage = pImage;
3135 }
3136 pImage->pExtents = pExtents;
3137 pImage->cExtents = cExtents;
3138 }
3139 else
3140 rc = VERR_NO_MEMORY;
3141
3142 return rc;
3143}
3144
3145/**
3146 * Internal: Create an additional file backed extent in split images.
3147 * Supports split sparse and flat images.
3148 *
3149 * @returns VBox status code.
3150 * @param pImage VMDK image instance.
3151 * @param cbSize Desiried size in bytes of new extent.
3152 */
3153static int vmdkAddFileBackedExtent(PVMDKIMAGE pImage, uint64_t cbSize)
3154{
3155 int rc = VINF_SUCCESS;
3156 unsigned uImageFlags = pImage->uImageFlags;
3157
3158 /* Check for unsupported image type. */
3159 if ((uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3160 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3161 || (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
3162 {
3163 return VERR_NOT_SUPPORTED;
3164 }
3165
3166 /* Allocate array of extents and copy existing extents to it. */
3167 PVMDKEXTENT pNewExtents = (PVMDKEXTENT)RTMemAllocZ((pImage->cExtents + 1) * sizeof(VMDKEXTENT));
3168 if (!pNewExtents)
3169 {
3170 return VERR_NO_MEMORY;
3171 }
3172
3173 memcpy(pNewExtents, pImage->pExtents, pImage->cExtents * sizeof(VMDKEXTENT));
3174
3175 /* Locate newly created extent and populate default metadata. */
3176 PVMDKEXTENT pExtent = &pNewExtents[pImage->cExtents];
3177
3178 pExtent->pFile = NULL;
3179 pExtent->pszBasename = NULL;
3180 pExtent->pszFullname = NULL;
3181 pExtent->pGD = NULL;
3182 pExtent->pRGD = NULL;
3183 pExtent->pDescData = NULL;
3184 pExtent->uVersion = 1;
3185 pExtent->uCompression = VMDK_COMPRESSION_NONE;
3186 pExtent->uExtent = pImage->cExtents;
3187 pExtent->pImage = pImage;
3188 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3189 pExtent->enmAccess = VMDKACCESS_READWRITE;
3190 pExtent->uSectorOffset = 0;
3191 pExtent->fMetaDirty = true;
3192
3193 /* Apply image type specific meta data. */
3194 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3195 {
3196 pExtent->enmType = VMDKETYPE_FLAT;
3197 }
3198 else
3199 {
3200 uint64_t cSectorsPerGDE, cSectorsPerGD;
3201 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3202 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
3203 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
3204 pExtent->cGTEntries = 512;
3205 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3206 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3207 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3208 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3209 }
3210
3211 /* Allocate and set file name for extent. */
3212 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3213 AssertPtr(pszBasenameSubstr);
3214
3215 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
3216 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3217 RTPathStripSuffix(pszBasenameBase);
3218 char *pszTmp;
3219 size_t cbTmp;
3220
3221 if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED)
3222 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3223 pExtent->uExtent + 1, pszBasenameSuff);
3224 else
3225 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, pExtent->uExtent + 1,
3226 pszBasenameSuff);
3227
3228 RTStrFree(pszBasenameBase);
3229 if (!pszTmp)
3230 return VERR_NO_STR_MEMORY;
3231 cbTmp = strlen(pszTmp) + 1;
3232 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3233 if (!pszBasename)
3234 {
3235 RTStrFree(pszTmp);
3236 return VERR_NO_MEMORY;
3237 }
3238
3239 memcpy(pszBasename, pszTmp, cbTmp);
3240 RTStrFree(pszTmp);
3241
3242 pExtent->pszBasename = pszBasename;
3243
3244 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3245 if (!pszBasedirectory)
3246 return VERR_NO_STR_MEMORY;
3247 RTPathStripFilename(pszBasedirectory);
3248 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
3249 RTStrFree(pszBasedirectory);
3250 if (!pszFullname)
3251 return VERR_NO_STR_MEMORY;
3252 pExtent->pszFullname = pszFullname;
3253
3254 /* Create file for extent. */
3255 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3256 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3257 true /* fCreate */));
3258 if (RT_FAILURE(rc))
3259 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3260
3261 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3262 {
3263 /* For flat images: Pre allocate file space. */
3264 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSize,
3265 0 /* fFlags */, NULL, 0, 0);
3266 if (RT_FAILURE(rc))
3267 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3268 }
3269 else
3270 {
3271 /* For sparse images: Allocate new grain directories/tables. */
3272 /* fPreAlloc should never be false because VMware can't use such images. */
3273 rc = vmdkCreateGrainDirectory(pImage, pExtent,
3274 RT_MAX( pExtent->uDescriptorSector
3275 + pExtent->cDescriptorSectors,
3276 1),
3277 true /* fPreAlloc */);
3278 if (RT_FAILURE(rc))
3279 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3280 }
3281
3282 /* Insert new extent into descriptor file. */
3283 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
3284 pExtent->cNominalSectors, pExtent->enmType,
3285 pExtent->pszBasename, pExtent->uSectorOffset);
3286 if (RT_FAILURE(rc))
3287 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
3288
3289 pImage->pExtents = pNewExtents;
3290 pImage->cExtents++;
3291
3292 return rc;
3293}
3294
3295/**
3296 * Reads and processes the descriptor embedded in sparse images.
3297 *
3298 * @returns VBox status code.
3299 * @param pImage VMDK image instance.
3300 * @param pFile The sparse file handle.
3301 */
3302static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
3303{
3304 /* It's a hosted single-extent image. */
3305 int rc = vmdkCreateExtents(pImage, 1);
3306 if (RT_SUCCESS(rc))
3307 {
3308 /* The opened file is passed to the extent. No separate descriptor
3309 * file, so no need to keep anything open for the image. */
3310 PVMDKEXTENT pExtent = &pImage->pExtents[0];
3311 pExtent->pFile = pFile;
3312 pImage->pFile = NULL;
3313 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3314 if (RT_LIKELY(pExtent->pszFullname))
3315 {
3316 /* As we're dealing with a monolithic image here, there must
3317 * be a descriptor embedded in the image file. */
3318 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
3319 if ( RT_SUCCESS(rc)
3320 && pExtent->uDescriptorSector
3321 && pExtent->cDescriptorSectors)
3322 {
3323 /* HACK: extend the descriptor if it is unusually small and it fits in
3324 * the unused space after the image header. Allows opening VMDK files
3325 * with extremely small descriptor in read/write mode.
3326 *
3327 * The previous version introduced a possible regression for VMDK stream
3328 * optimized images from VMware which tend to have only a single sector sized
3329 * descriptor. Increasing the descriptor size resulted in adding the various uuid
3330 * entries required to make it work with VBox but for stream optimized images
3331 * the updated binary header wasn't written to the disk creating a mismatch
3332 * between advertised and real descriptor size.
3333 *
3334 * The descriptor size will be increased even if opened readonly now if there
3335 * enough room but the new value will not be written back to the image.
3336 */
3337 if ( pExtent->cDescriptorSectors < 3
3338 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3339 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3340 {
3341 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
3342
3343 pExtent->cDescriptorSectors = 4;
3344 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3345 {
3346 /*
3347 * Update the on disk number now to make sure we don't introduce inconsistencies
3348 * in case of stream optimized images from VMware where the descriptor is just
3349 * one sector big (the binary header is not written to disk for complete
3350 * stream optimized images in vmdkFlushImage()).
3351 */
3352 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
3353 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
3354 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
3355 &u64DescSizeNew, sizeof(u64DescSizeNew));
3356 if (RT_FAILURE(rc))
3357 {
3358 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
3359 /* Restore the old size and carry on. */
3360 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
3361 }
3362 }
3363 }
3364 /* Read the descriptor from the extent. */
3365 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3366 if (RT_LIKELY(pExtent->pDescData))
3367 {
3368 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
3369 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3370 pExtent->pDescData,
3371 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3372 if (RT_SUCCESS(rc))
3373 {
3374 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3375 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3376 if ( RT_SUCCESS(rc)
3377 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3378 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
3379 {
3380 rc = vmdkReadMetaExtent(pImage, pExtent);
3381 if (RT_SUCCESS(rc))
3382 {
3383 /* Mark the extent as unclean if opened in read-write mode. */
3384 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3385 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3386 {
3387 pExtent->fUncleanShutdown = true;
3388 pExtent->fMetaDirty = true;
3389 }
3390 }
3391 }
3392 else if (RT_SUCCESS(rc))
3393 rc = VERR_NOT_SUPPORTED;
3394 }
3395 else
3396 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3397 }
3398 else
3399 rc = VERR_NO_MEMORY;
3400 }
3401 else if (RT_SUCCESS(rc))
3402 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3403 }
3404 else
3405 rc = VERR_NO_MEMORY;
3406 }
3407
3408 return rc;
3409}
3410
3411/**
3412 * Reads the descriptor from a pure text file.
3413 *
3414 * @returns VBox status code.
3415 * @param pImage VMDK image instance.
3416 * @param pFile The descriptor file handle.
3417 */
3418static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
3419{
3420 /* Allocate at least 10K, and make sure that there is 5K free space
3421 * in case new entries need to be added to the descriptor. Never
3422 * allocate more than 128K, because that's no valid descriptor file
3423 * and will result in the correct "truncated read" error handling. */
3424 uint64_t cbFileSize;
3425 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3426 if ( RT_SUCCESS(rc)
3427 && cbFileSize >= 50)
3428 {
3429 uint64_t cbSize = cbFileSize;
3430 if (cbSize % VMDK_SECTOR2BYTE(10))
3431 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3432 else
3433 cbSize += VMDK_SECTOR2BYTE(10);
3434 cbSize = RT_MIN(cbSize, _128K);
3435 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3436 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3437 if (RT_LIKELY(pImage->pDescData))
3438 {
3439 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
3440 RT_MIN(pImage->cbDescAlloc, cbFileSize));
3441 if (RT_SUCCESS(rc))
3442 {
3443#if 0 /** @todo Revisit */
3444 cbRead += sizeof(u32Magic);
3445 if (cbRead == pImage->cbDescAlloc)
3446 {
3447 /* Likely the read is truncated. Better fail a bit too early
3448 * (normally the descriptor is much smaller than our buffer). */
3449 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3450 goto out;
3451 }
3452#endif
3453 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3454 pImage->cbDescAlloc);
3455 if (RT_SUCCESS(rc))
3456 {
3457 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
3458 {
3459 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3460 if (pExtent->pszBasename)
3461 {
3462 /* Hack to figure out whether the specified name in the
3463 * extent descriptor is absolute. Doesn't always work, but
3464 * should be good enough for now. */
3465 char *pszFullname;
3466 /** @todo implement proper path absolute check. */
3467 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3468 {
3469 pszFullname = RTStrDup(pExtent->pszBasename);
3470 if (!pszFullname)
3471 {
3472 rc = VERR_NO_MEMORY;
3473 break;
3474 }
3475 }
3476 else
3477 {
3478 char *pszDirname = RTStrDup(pImage->pszFilename);
3479 if (!pszDirname)
3480 {
3481 rc = VERR_NO_MEMORY;
3482 break;
3483 }
3484 RTPathStripFilename(pszDirname);
3485 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3486 RTStrFree(pszDirname);
3487 if (!pszFullname)
3488 {
3489 rc = VERR_NO_STR_MEMORY;
3490 break;
3491 }
3492 }
3493 pExtent->pszFullname = pszFullname;
3494 }
3495 else
3496 pExtent->pszFullname = NULL;
3497
3498 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3499 switch (pExtent->enmType)
3500 {
3501 case VMDKETYPE_HOSTED_SPARSE:
3502 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3503 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3504 if (RT_FAILURE(rc))
3505 {
3506 /* Do NOT signal an appropriate error here, as the VD
3507 * layer has the choice of retrying the open if it
3508 * failed. */
3509 break;
3510 }
3511 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3512 false /* fMagicAlreadyRead */);
3513 if (RT_FAILURE(rc))
3514 break;
3515 rc = vmdkReadMetaExtent(pImage, pExtent);
3516 if (RT_FAILURE(rc))
3517 break;
3518
3519 /* Mark extent as unclean if opened in read-write mode. */
3520 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3521 {
3522 pExtent->fUncleanShutdown = true;
3523 pExtent->fMetaDirty = true;
3524 }
3525 break;
3526 case VMDKETYPE_VMFS:
3527 case VMDKETYPE_FLAT:
3528 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3529 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3530 if (RT_FAILURE(rc))
3531 {
3532 /* Do NOT signal an appropriate error here, as the VD
3533 * layer has the choice of retrying the open if it
3534 * failed. */
3535 break;
3536 }
3537 break;
3538 case VMDKETYPE_ZERO:
3539 /* Nothing to do. */
3540 break;
3541 default:
3542 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3543 }
3544 }
3545 }
3546 }
3547 else
3548 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3549 }
3550 else
3551 rc = VERR_NO_MEMORY;
3552 }
3553 else if (RT_SUCCESS(rc))
3554 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3555
3556 return rc;
3557}
3558
3559/**
3560 * Read and process the descriptor based on the image type.
3561 *
3562 * @returns VBox status code.
3563 * @param pImage VMDK image instance.
3564 * @param pFile VMDK file handle.
3565 */
3566static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3567{
3568 uint32_t u32Magic;
3569
3570 /* Read magic (if present). */
3571 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3572 &u32Magic, sizeof(u32Magic));
3573 if (RT_SUCCESS(rc))
3574 {
3575 /* Handle the file according to its magic number. */
3576 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3577 rc = vmdkDescriptorReadSparse(pImage, pFile);
3578 else
3579 rc = vmdkDescriptorReadAscii(pImage, pFile);
3580 }
3581 else
3582 {
3583 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3584 rc = VERR_VD_VMDK_INVALID_HEADER;
3585 }
3586
3587 return rc;
3588}
3589
3590/**
3591 * Internal: Open an image, constructing all necessary data structures.
3592 */
3593static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3594{
3595 pImage->uOpenFlags = uOpenFlags;
3596 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3597 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3598 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3599
3600 /*
3601 * Open the image.
3602 * We don't have to check for asynchronous access because
3603 * we only support raw access and the opened file is a description
3604 * file were no data is stored.
3605 */
3606 PVMDKFILE pFile;
3607 int rc = vmdkFileOpen(pImage, &pFile, NULL, pImage->pszFilename,
3608 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3609 if (RT_SUCCESS(rc))
3610 {
3611 pImage->pFile = pFile;
3612
3613 rc = vmdkDescriptorRead(pImage, pFile);
3614 if (RT_SUCCESS(rc))
3615 {
3616 /* Determine PCHS geometry if not set. */
3617 if (pImage->PCHSGeometry.cCylinders == 0)
3618 {
3619 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3620 / pImage->PCHSGeometry.cHeads
3621 / pImage->PCHSGeometry.cSectors;
3622 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3623 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3624 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3625 {
3626 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3627 AssertRC(rc);
3628 }
3629 }
3630
3631 /* Update the image metadata now in case has changed. */
3632 rc = vmdkFlushImage(pImage, NULL);
3633 if (RT_SUCCESS(rc))
3634 {
3635 /* Figure out a few per-image constants from the extents. */
3636 pImage->cbSize = 0;
3637 for (unsigned i = 0; i < pImage->cExtents; i++)
3638 {
3639 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3640 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3641 {
3642 /* Here used to be a check whether the nominal size of an extent
3643 * is a multiple of the grain size. The spec says that this is
3644 * always the case, but unfortunately some files out there in the
3645 * wild violate the spec (e.g. ReactOS 0.3.1). */
3646 }
3647 else if ( pExtent->enmType == VMDKETYPE_FLAT
3648 || pExtent->enmType == VMDKETYPE_ZERO)
3649 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3650
3651 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3652 }
3653
3654 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3655 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3656 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3657 rc = vmdkAllocateGrainTableCache(pImage);
3658 }
3659 }
3660 }
3661 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3662 * choice of retrying the open if it failed. */
3663
3664 if (RT_SUCCESS(rc))
3665 {
3666 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3667 pImage->RegionList.fFlags = 0;
3668 pImage->RegionList.cRegions = 1;
3669
3670 pRegion->offRegion = 0; /* Disk start. */
3671 pRegion->cbBlock = 512;
3672 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3673 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3674 pRegion->cbData = 512;
3675 pRegion->cbMetadata = 0;
3676 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3677 }
3678 else
3679 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3680 return rc;
3681}
3682
3683/**
3684 * Frees a raw descriptor.
3685 * @internal
3686 */
3687static int vmdkRawDescFree(PVDISKRAW pRawDesc)
3688{
3689 if (!pRawDesc)
3690 return VINF_SUCCESS;
3691
3692 RTStrFree(pRawDesc->pszRawDisk);
3693 pRawDesc->pszRawDisk = NULL;
3694
3695 /* Partitions: */
3696 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++)
3697 {
3698 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice);
3699 pRawDesc->pPartDescs[i].pszRawDevice = NULL;
3700
3701 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData);
3702 pRawDesc->pPartDescs[i].pvPartitionData = NULL;
3703 }
3704
3705 RTMemFree(pRawDesc->pPartDescs);
3706 pRawDesc->pPartDescs = NULL;
3707
3708 RTMemFree(pRawDesc);
3709 return VINF_SUCCESS;
3710}
3711
3712/**
3713 * Helper that grows the raw partition descriptor table by @a cToAdd entries,
3714 * returning the pointer to the first new entry.
3715 * @internal
3716 */
3717static int vmdkRawDescAppendPartDesc(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint32_t cToAdd, PVDISKRAWPARTDESC *ppRet)
3718{
3719 uint32_t const cOld = pRawDesc->cPartDescs;
3720 uint32_t const cNew = cOld + cToAdd;
3721 PVDISKRAWPARTDESC paNew = (PVDISKRAWPARTDESC)RTMemReallocZ(pRawDesc->pPartDescs,
3722 cOld * sizeof(pRawDesc->pPartDescs[0]),
3723 cNew * sizeof(pRawDesc->pPartDescs[0]));
3724 if (paNew)
3725 {
3726 pRawDesc->cPartDescs = cNew;
3727 pRawDesc->pPartDescs = paNew;
3728
3729 *ppRet = &paNew[cOld];
3730 return VINF_SUCCESS;
3731 }
3732 *ppRet = NULL;
3733 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
3734 N_("VMDK: Image path: '%s'. Out of memory growing the partition descriptors (%u -> %u)."),
3735 pImage->pszFilename, cOld, cNew);
3736}
3737
3738/**
3739 * @callback_method_impl{FNRTSORTCMP}
3740 */
3741static DECLCALLBACK(int) vmdkRawDescPartComp(void const *pvElement1, void const *pvElement2, void *pvUser)
3742{
3743 RT_NOREF(pvUser);
3744 int64_t const iDelta = ((PVDISKRAWPARTDESC)pvElement1)->offStartInVDisk - ((PVDISKRAWPARTDESC)pvElement2)->offStartInVDisk;
3745 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0;
3746}
3747
3748/**
3749 * Post processes the partition descriptors.
3750 *
3751 * Sorts them and check that they don't overlap.
3752 */
3753static int vmdkRawDescPostProcessPartitions(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint64_t cbSize)
3754{
3755 /*
3756 * Sort data areas in ascending order of start.
3757 */
3758 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL);
3759
3760 /*
3761 * Check that we don't have overlapping descriptors. If we do, that's an
3762 * indication that the drive is corrupt or that the RTDvm code is buggy.
3763 */
3764 VDISKRAWPARTDESC const *paPartDescs = pRawDesc->pPartDescs;
3765 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
3766 {
3767 uint64_t offLast = paPartDescs[i].offStartInVDisk + paPartDescs[i].cbData;
3768 if (offLast <= paPartDescs[i].offStartInVDisk)
3769 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3770 N_("VMDK: Image path: '%s'. Bogus partition descriptor #%u (%#RX64 LB %#RX64%s): Wrap around or zero"),
3771 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3772 paPartDescs[i].pvPartitionData ? " (data)" : "");
3773 offLast -= 1;
3774
3775 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk)
3776 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3777 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) overlaps with the next (%#RX64 LB %#RX64%s)"),
3778 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3779 paPartDescs[i].pvPartitionData ? " (data)" : "", paPartDescs[i + 1].offStartInVDisk,
3780 paPartDescs[i + 1].cbData, paPartDescs[i + 1].pvPartitionData ? " (data)" : "");
3781 if (offLast >= cbSize)
3782 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3783 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) goes beyond the end of the drive (%#RX64)"),
3784 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3785 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize);
3786 }
3787
3788 return VINF_SUCCESS;
3789}
3790
3791
3792#ifdef RT_OS_LINUX
3793/**
3794 * Searches the dir specified in @a pszBlockDevDir for subdirectories with a
3795 * 'dev' file matching @a uDevToLocate.
3796 *
3797 * This is used both
3798 *
3799 * @returns IPRT status code, errors have been reported properly.
3800 * @param pImage For error reporting.
3801 * @param pszBlockDevDir Input: Path to the directory search under.
3802 * Output: Path to the directory containing information
3803 * for @a uDevToLocate.
3804 * @param cbBlockDevDir The size of the buffer @a pszBlockDevDir points to.
3805 * @param uDevToLocate The device number of the block device info dir to
3806 * locate.
3807 * @param pszDevToLocate For error reporting.
3808 */
3809static int vmdkFindSysBlockDevPath(PVMDKIMAGE pImage, char *pszBlockDevDir, size_t cbBlockDevDir,
3810 dev_t uDevToLocate, const char *pszDevToLocate)
3811{
3812 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir);
3813 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW);
3814
3815 RTDIR hDir = NIL_RTDIR;
3816 int rc = RTDirOpen(&hDir, pszBlockDevDir);
3817 if (RT_SUCCESS(rc))
3818 {
3819 for (;;)
3820 {
3821 RTDIRENTRY Entry;
3822 rc = RTDirRead(hDir, &Entry, NULL);
3823 if (RT_SUCCESS(rc))
3824 {
3825 /* We're interested in directories and symlinks. */
3826 if ( Entry.enmType == RTDIRENTRYTYPE_DIRECTORY
3827 || Entry.enmType == RTDIRENTRYTYPE_SYMLINK
3828 || Entry.enmType == RTDIRENTRYTYPE_UNKNOWN)
3829 {
3830 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName);
3831 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */
3832
3833 dev_t uThisDevNo = ~uDevToLocate;
3834 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir);
3835 if (RT_SUCCESS(rc) && uThisDevNo == uDevToLocate)
3836 break;
3837 }
3838 }
3839 else
3840 {
3841 pszBlockDevDir[cchDir] = '\0';
3842 if (rc == VERR_NO_MORE_FILES)
3843 rc = vdIfError(pImage->pIfError, VERR_NOT_FOUND, RT_SRC_POS,
3844 N_("VMDK: Image path: '%s'. Failed to locate device corresponding to '%s' under '%s'"),
3845 pImage->pszFilename, pszDevToLocate, pszBlockDevDir);
3846 else
3847 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3848 N_("VMDK: Image path: '%s'. RTDirRead failed enumerating '%s': %Rrc"),
3849 pImage->pszFilename, pszBlockDevDir, rc);
3850 break;
3851 }
3852 }
3853 RTDirClose(hDir);
3854 }
3855 else
3856 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3857 N_("VMDK: Image path: '%s'. Failed to open dir '%s' for listing: %Rrc"),
3858 pImage->pszFilename, pszBlockDevDir, rc);
3859 return rc;
3860}
3861#endif /* RT_OS_LINUX */
3862
3863#ifdef RT_OS_FREEBSD
3864
3865
3866/**
3867 * Reads the config data from the provider and returns offset and size
3868 *
3869 * @return IPRT status code
3870 * @param pProvider GEOM provider representing partition
3871 * @param pcbOffset Placeholder for the offset of the partition
3872 * @param pcbSize Placeholder for the size of the partition
3873 */
3874static int vmdkReadPartitionsParamsFromProvider(gprovider *pProvider, uint64_t *pcbOffset, uint64_t *pcbSize)
3875{
3876 gconfig *pConfEntry;
3877 int rc = VERR_NOT_FOUND;
3878
3879 /*
3880 * Required parameters are located in the list containing key/value pairs.
3881 * Both key and value are in text form. Manuals tells nothing about the fact
3882 * that the both parameters should be present in the list. Thus, there are
3883 * cases when only one parameter is presented. To handle such cases we treat
3884 * absent params as zero allowing the caller decide the case is either correct
3885 * or an error.
3886 */
3887 uint64_t cbOffset = 0;
3888 uint64_t cbSize = 0;
3889 LIST_FOREACH(pConfEntry, &pProvider->lg_config, lg_config)
3890 {
3891 if (RTStrCmp(pConfEntry->lg_name, "offset") == 0)
3892 {
3893 cbOffset = RTStrToUInt64(pConfEntry->lg_val);
3894 rc = VINF_SUCCESS;
3895 }
3896 else if (RTStrCmp(pConfEntry->lg_name, "length") == 0)
3897 {
3898 cbSize = RTStrToUInt64(pConfEntry->lg_val);
3899 rc = VINF_SUCCESS;
3900 }
3901 }
3902 if (RT_SUCCESS(rc))
3903 {
3904 *pcbOffset = cbOffset;
3905 *pcbSize = cbSize;
3906 }
3907 return rc;
3908}
3909
3910
3911/**
3912 * Searches the partition specified by name and calculates its size and absolute offset.
3913 *
3914 * @return IPRT status code.
3915 * @param pParentClass Class containing pParentGeom
3916 * @param pszParentGeomName Name of the parent geom where we are looking for provider
3917 * @param pszProviderName Name of the provider we are looking for
3918 * @param pcbAbsoluteOffset Placeholder for the absolute offset of the partition, i.e. offset from the beginning of the disk
3919 * @param psbSize Placeholder for the size of the partition.
3920 */
3921static int vmdkFindPartitionParamsByName(gclass *pParentClass, const char *pszParentGeomName, const char *pszProviderName,
3922 uint64_t *pcbAbsoluteOffset, uint64_t *pcbSize)
3923{
3924 AssertReturn(pParentClass, VERR_INVALID_PARAMETER);
3925 AssertReturn(pszParentGeomName, VERR_INVALID_PARAMETER);
3926 AssertReturn(pszProviderName, VERR_INVALID_PARAMETER);
3927 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER);
3928 AssertReturn(pcbSize, VERR_INVALID_PARAMETER);
3929
3930 ggeom *pParentGeom;
3931 int rc = VERR_NOT_FOUND;
3932 LIST_FOREACH(pParentGeom, &pParentClass->lg_geom, lg_geom)
3933 {
3934 if (RTStrCmp(pParentGeom->lg_name, pszParentGeomName) == 0)
3935 {
3936 rc = VINF_SUCCESS;
3937 break;
3938 }
3939 }
3940 if (RT_FAILURE(rc))
3941 return rc;
3942
3943 gprovider *pProvider;
3944 /*
3945 * First, go over providers without handling EBR or BSDLabel
3946 * partitions for case when looking provider is child
3947 * of the givng geom, to reduce searching time
3948 */
3949 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3950 {
3951 if (RTStrCmp(pProvider->lg_name, pszProviderName) == 0)
3952 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize);
3953 }
3954
3955 /*
3956 * No provider found. Go over the parent geom again
3957 * and make recursions if geom represents EBR or BSDLabel.
3958 * In this case given parent geom contains only EBR or BSDLabel
3959 * partition itself and their own partitions are in the separate
3960 * geoms. Also, partition offsets are relative to geom, so
3961 * we have to add offset from child provider with parent geoms
3962 * provider
3963 */
3964
3965 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3966 {
3967 uint64_t cbOffset = 0;
3968 uint64_t cbSize = 0;
3969 rc = vmdkReadPartitionsParamsFromProvider(pProvider, &cbOffset, &cbSize);
3970 if (RT_FAILURE(rc))
3971 return rc;
3972
3973 uint64_t cbProviderOffset = 0;
3974 uint64_t cbProviderSize = 0;
3975 rc = vmdkFindPartitionParamsByName(pParentClass, pProvider->lg_name, pszProviderName, &cbProviderOffset, &cbProviderSize);
3976 if (RT_SUCCESS(rc))
3977 {
3978 *pcbAbsoluteOffset = cbOffset + cbProviderOffset;
3979 *pcbSize = cbProviderSize;
3980 return rc;
3981 }
3982 }
3983
3984 return VERR_NOT_FOUND;
3985}
3986#endif
3987
3988
3989/**
3990 * Attempts to verify the raw partition path.
3991 *
3992 * We don't want to trust RTDvm and the partition device node morphing blindly.
3993 */
3994static int vmdkRawDescVerifyPartitionPath(PVMDKIMAGE pImage, PVDISKRAWPARTDESC pPartDesc, uint32_t idxPartition,
3995 const char *pszRawDrive, RTFILE hRawDrive, uint32_t cbSector, RTDVMVOLUME hVol)
3996{
3997 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3998
3999 /*
4000 * Try open the raw partition device.
4001 */
4002 RTFILE hRawPart = NIL_RTFILE;
4003 int rc = RTFileOpen(&hRawPart, pPartDesc->pszRawDevice, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4004 if (RT_FAILURE(rc))
4005 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4006 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"),
4007 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc);
4008
4009 /*
4010 * Compare the partition UUID if we can get it.
4011 */
4012#ifdef RT_OS_WINDOWS
4013 DWORD cbReturned;
4014
4015 /* 1. Get the device numbers for both handles, they should have the same disk. */
4016 STORAGE_DEVICE_NUMBER DevNum1;
4017 RT_ZERO(DevNum1);
4018 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4019 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum1, sizeof(DevNum1), &cbReturned, NULL /*pOverlapped*/))
4020 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4021 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4022 pImage->pszFilename, pszRawDrive, GetLastError());
4023
4024 STORAGE_DEVICE_NUMBER DevNum2;
4025 RT_ZERO(DevNum2);
4026 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawPart), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4027 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum2, sizeof(DevNum2), &cbReturned, NULL /*pOverlapped*/))
4028 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4029 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4030 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError());
4031 if ( RT_SUCCESS(rc)
4032 && ( DevNum1.DeviceNumber != DevNum2.DeviceNumber
4033 || DevNum1.DeviceType != DevNum2.DeviceType))
4034 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4035 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x)"),
4036 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4037 DevNum1.DeviceNumber, DevNum2.DeviceNumber, DevNum1.DeviceType, DevNum2.DeviceType);
4038 if (RT_SUCCESS(rc))
4039 {
4040 /* Get the partitions from the raw drive and match up with the volume info
4041 from RTDvm. The partition number is found in DevNum2. */
4042 DWORD cbNeeded = 0;
4043 if ( DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
4044 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, NULL, 0, &cbNeeded, NULL /*pOverlapped*/)
4045 || cbNeeded < RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[1]))
4046 cbNeeded = RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[64]);
4047 cbNeeded += sizeof(PARTITION_INFORMATION_EX) * 2; /* just in case */
4048 DRIVE_LAYOUT_INFORMATION_EX *pLayout = (DRIVE_LAYOUT_INFORMATION_EX *)RTMemTmpAllocZ(cbNeeded);
4049 if (pLayout)
4050 {
4051 cbReturned = 0;
4052 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
4053 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, pLayout, cbNeeded, &cbReturned, NULL /*pOverlapped*/))
4054 {
4055 /* Find the entry with the given partition number (it's not an index, array contains empty MBR entries ++). */
4056 unsigned iEntry = 0;
4057 while ( iEntry < pLayout->PartitionCount
4058 && pLayout->PartitionEntry[iEntry].PartitionNumber != DevNum2.PartitionNumber)
4059 iEntry++;
4060 if (iEntry < pLayout->PartitionCount)
4061 {
4062 /* Compare the basics */
4063 PARTITION_INFORMATION_EX const * const pLayoutEntry = &pLayout->PartitionEntry[iEntry];
4064 if (pLayoutEntry->StartingOffset.QuadPart != (int64_t)pPartDesc->offStartInVDisk)
4065 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4066 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': StartingOffset %RU64, expected %RU64"),
4067 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4068 pLayoutEntry->StartingOffset.QuadPart, pPartDesc->offStartInVDisk);
4069 else if (pLayoutEntry->PartitionLength.QuadPart != (int64_t)pPartDesc->cbData)
4070 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4071 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionLength %RU64, expected %RU64"),
4072 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4073 pLayoutEntry->PartitionLength.QuadPart, pPartDesc->cbData);
4074 /** @todo We could compare the MBR type, GPT type and ID. */
4075 RT_NOREF(hVol);
4076 }
4077 else
4078 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4079 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionCount (%#x vs %#x)"),
4080 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4081 DevNum2.PartitionNumber, pLayout->PartitionCount);
4082# ifndef LOG_ENABLED
4083 if (RT_FAILURE(rc))
4084# endif
4085 {
4086 LogRel(("VMDK: Windows reports %u partitions for '%s':\n", pLayout->PartitionCount, pszRawDrive));
4087 PARTITION_INFORMATION_EX const *pEntry = &pLayout->PartitionEntry[0];
4088 for (DWORD i = 0; i < pLayout->PartitionCount; i++, pEntry++)
4089 {
4090 LogRel(("VMDK: #%u/%u: %016RU64 LB %016RU64 style=%d rewrite=%d",
4091 i, pEntry->PartitionNumber, pEntry->StartingOffset.QuadPart, pEntry->PartitionLength.QuadPart,
4092 pEntry->PartitionStyle, pEntry->RewritePartition));
4093 if (pEntry->PartitionStyle == PARTITION_STYLE_MBR)
4094 LogRel((" type=%#x boot=%d rec=%d hidden=%u\n", pEntry->Mbr.PartitionType, pEntry->Mbr.BootIndicator,
4095 pEntry->Mbr.RecognizedPartition, pEntry->Mbr.HiddenSectors));
4096 else if (pEntry->PartitionStyle == PARTITION_STYLE_GPT)
4097 LogRel((" type=%RTuuid id=%RTuuid aatrib=%RX64 name=%.36ls\n", &pEntry->Gpt.PartitionType,
4098 &pEntry->Gpt.PartitionId, pEntry->Gpt.Attributes, &pEntry->Gpt.Name[0]));
4099 else
4100 LogRel(("\n"));
4101 }
4102 LogRel(("VMDK: Looked for partition #%u (%u, '%s') at %RU64 LB %RU64\n", DevNum2.PartitionNumber,
4103 idxPartition, pPartDesc->pszRawDevice, pPartDesc->offStartInVDisk, pPartDesc->cbData));
4104 }
4105 }
4106 else
4107 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4108 N_("VMDK: Image path: '%s'. IOCTL_DISK_GET_DRIVE_LAYOUT_EX failed on '%s': %u (cb %u, cbRet %u)"),
4109 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError(), cbNeeded, cbReturned);
4110 RTMemTmpFree(pLayout);
4111 }
4112 else
4113 rc = VERR_NO_TMP_MEMORY;
4114 }
4115
4116#elif defined(RT_OS_LINUX)
4117 RT_NOREF(hVol);
4118
4119 /* Stat the two devices first to get their device numbers. (We probably
4120 could make some assumptions here about the major & minor number assignments
4121 for legacy nodes, but it doesn't hold up for nvme, so we'll skip that.) */
4122 struct stat StDrive, StPart;
4123 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
4124 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4125 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4126 else if (fstat((int)RTFileToNative(hRawPart), &StPart) != 0)
4127 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4128 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pPartDesc->pszRawDevice, errno);
4129 else
4130 {
4131 /* Scan the directories immediately under /sys/block/ for one with a
4132 'dev' file matching the drive's device number: */
4133 char szSysPath[RTPATH_MAX];
4134 rc = RTLinuxConstructPath(szSysPath, sizeof(szSysPath), "block/");
4135 AssertRCReturn(rc, rc); /* this shall not fail */
4136 if (RTDirExists(szSysPath))
4137 {
4138 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive);
4139
4140 /* Now, scan the directories under that again for a partition device
4141 matching the hRawPart device's number: */
4142 if (RT_SUCCESS(rc))
4143 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice);
4144
4145 /* Having found the /sys/block/device/partition/ path, we can finally
4146 read the partition attributes and compare with hVol. */
4147 if (RT_SUCCESS(rc))
4148 {
4149 /* partition number: */
4150 int64_t iLnxPartition = 0;
4151 rc = RTLinuxSysFsReadIntFile(10, &iLnxPartition, "%s/partition", szSysPath);
4152 if (RT_SUCCESS(rc) && iLnxPartition != idxPartition)
4153 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4154 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Partition number %RI64, expected %RU32"),
4155 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition);
4156 /* else: ignore failure? */
4157
4158 /* start offset: */
4159 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */
4160 if (RT_SUCCESS(rc))
4161 {
4162 int64_t offLnxStart = -1;
4163 rc = RTLinuxSysFsReadIntFile(10, &offLnxStart, "%s/start", szSysPath);
4164 offLnxStart *= cbLnxSector;
4165 if (RT_SUCCESS(rc) && offLnxStart != (int64_t)pPartDesc->offStartInVDisk)
4166 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4167 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
4168 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, offLnxStart, pPartDesc->offStartInVDisk);
4169 /* else: ignore failure? */
4170 }
4171
4172 /* the size: */
4173 if (RT_SUCCESS(rc))
4174 {
4175 int64_t cbLnxData = -1;
4176 rc = RTLinuxSysFsReadIntFile(10, &cbLnxData, "%s/size", szSysPath);
4177 cbLnxData *= cbLnxSector;
4178 if (RT_SUCCESS(rc) && cbLnxData != (int64_t)pPartDesc->cbData)
4179 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4180 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
4181 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbLnxData, pPartDesc->cbData);
4182 /* else: ignore failure? */
4183 }
4184 }
4185 }
4186 /* else: We've got nothing to work on, so only do content comparison. */
4187 }
4188
4189#elif defined(RT_OS_FREEBSD)
4190 char szDriveDevName[256];
4191 char* pszDevName = fdevname_r(RTFileToNative(hRawDrive), szDriveDevName, 256);
4192 if (pszDevName == NULL)
4193 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4194 N_("VMDK: Image path: '%s'. '%s' is not a drive path"), pImage->pszFilename, pszRawDrive);
4195 char szPartDevName[256];
4196 if (RT_SUCCESS(rc))
4197 {
4198 pszDevName = fdevname_r(RTFileToNative(hRawPart), szPartDevName, 256);
4199 if (pszDevName == NULL)
4200 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4201 N_("VMDK: Image path: '%s'. '%s' is not a partition path"), pImage->pszFilename, pPartDesc->pszRawDevice);
4202 }
4203 if (RT_SUCCESS(rc))
4204 {
4205 gmesh geomMesh;
4206 int err = geom_gettree(&geomMesh);
4207 if (err == 0)
4208 {
4209 /* Find root class containg partitions info */
4210 gclass* pPartClass;
4211 LIST_FOREACH(pPartClass, &geomMesh.lg_class, lg_class)
4212 {
4213 if (RTStrCmp(pPartClass->lg_name, "PART") == 0)
4214 break;
4215 }
4216 if (pPartClass == NULL || RTStrCmp(pPartClass->lg_name, "PART") != 0)
4217 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS,
4218 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename);
4219
4220
4221 if (RT_SUCCESS(rc))
4222 {
4223 /* Find provider representing partition device */
4224 uint64_t cbOffset;
4225 uint64_t cbSize;
4226 rc = vmdkFindPartitionParamsByName(pPartClass, szDriveDevName, szPartDevName, &cbOffset, &cbSize);
4227 if (RT_SUCCESS(rc))
4228 {
4229 if (cbOffset != pPartDesc->offStartInVDisk)
4230 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4231 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
4232 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4233 if (cbSize != pPartDesc->cbData)
4234 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4235 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
4236 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4237 }
4238 else
4239 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4240 N_("VMDK: Image path: '%s'. Error getting geom provider for the partition '%s' of the drive '%s' in the GEOM tree: %Rrc"),
4241 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc);
4242 }
4243
4244 geom_deletetree(&geomMesh);
4245 }
4246 else
4247 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(err), RT_SRC_POS,
4248 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err);
4249 }
4250
4251#elif defined(RT_OS_SOLARIS)
4252 RT_NOREF(hVol);
4253
4254 dk_cinfo dkiDriveInfo;
4255 dk_cinfo dkiPartInfo;
4256 if (ioctl(RTFileToNative(hRawDrive), DKIOCINFO, (caddr_t)&dkiDriveInfo) == -1)
4257 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4258 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4259 else if (ioctl(RTFileToNative(hRawPart), DKIOCINFO, (caddr_t)&dkiPartInfo) == -1)
4260 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4261 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4262 else if ( dkiDriveInfo.dki_ctype != dkiPartInfo.dki_ctype
4263 || dkiDriveInfo.dki_cnum != dkiPartInfo.dki_cnum
4264 || dkiDriveInfo.dki_addr != dkiPartInfo.dki_addr
4265 || dkiDriveInfo.dki_unit != dkiPartInfo.dki_unit
4266 || dkiDriveInfo.dki_slave != dkiPartInfo.dki_slave)
4267 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4268 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x)"),
4269 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4270 dkiDriveInfo.dki_ctype, dkiPartInfo.dki_ctype, dkiDriveInfo.dki_cnum, dkiPartInfo.dki_cnum,
4271 dkiDriveInfo.dki_addr, dkiPartInfo.dki_addr, dkiDriveInfo.dki_unit, dkiPartInfo.dki_unit,
4272 dkiDriveInfo.dki_slave, dkiPartInfo.dki_slave);
4273 else
4274 {
4275 uint64_t cbOffset = 0;
4276 uint64_t cbSize = 0;
4277 dk_gpt *pEfi = NULL;
4278 int idxEfiPart = efi_alloc_and_read(RTFileToNative(hRawPart), &pEfi);
4279 if (idxEfiPart >= 0)
4280 {
4281 if ((uint32_t)dkiPartInfo.dki_partition + 1 == idxPartition)
4282 {
4283 cbOffset = pEfi->efi_parts[idxEfiPart].p_start * pEfi->efi_lbasize;
4284 cbSize = pEfi->efi_parts[idxEfiPart].p_size * pEfi->efi_lbasize;
4285 }
4286 else
4287 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4288 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
4289 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4290 idxPartition, (uint32_t)dkiPartInfo.dki_partition + 1);
4291 efi_free(pEfi);
4292 }
4293 else
4294 {
4295 /*
4296 * Manual says the efi_alloc_and_read returns VT_EINVAL if no EFI partition table found.
4297 * Actually, the function returns any error, e.g. VT_ERROR. Thus, we are not sure, is it
4298 * real error or just no EFI table found. Therefore, let's try to obtain partition info
4299 * using another way. If there is an error, it returns errno which will be handled below.
4300 */
4301
4302 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition;
4303 if (numPartition > NDKMAP)
4304 numPartition -= NDKMAP;
4305 if (numPartition != idxPartition)
4306 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4307 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
4308 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4309 idxPartition, numPartition);
4310 else
4311 {
4312 dk_minfo_ext mediaInfo;
4313 if (ioctl(RTFileToNative(hRawPart), DKIOCGMEDIAINFOEXT, (caddr_t)&mediaInfo) == -1)
4314 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4315 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
4316 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4317 else
4318 {
4319 extpart_info extPartInfo;
4320 if (ioctl(RTFileToNative(hRawPart), DKIOCEXTPARTINFO, (caddr_t)&extPartInfo) != -1)
4321 {
4322 cbOffset = (uint64_t)extPartInfo.p_start * mediaInfo.dki_lbsize;
4323 cbSize = (uint64_t)extPartInfo.p_length * mediaInfo.dki_lbsize;
4324 }
4325 else
4326 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4327 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
4328 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4329 }
4330 }
4331 }
4332 if (RT_SUCCESS(rc) && cbOffset != pPartDesc->offStartInVDisk)
4333 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4334 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
4335 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4336
4337 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData)
4338 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4339 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
4340 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4341 }
4342
4343#elif defined(RT_OS_DARWIN)
4344 /* Stat the drive get its device number. */
4345 struct stat StDrive;
4346 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
4347 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4348 N_("VMDK: Image path: '%s'. fstat failed on '%s' (errno=%d)"), pImage->pszFilename, pszRawDrive, errno);
4349 else
4350 {
4351 if (ioctl(RTFileToNative(hRawPart), DKIOCLOCKPHYSICALEXTENTS, NULL) == -1)
4352 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4353 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to lock the partition (errno=%d)"),
4354 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4355 else
4356 {
4357 uint32_t cbBlockSize = 0;
4358 uint64_t cbOffset = 0;
4359 uint64_t cbSize = 0;
4360 if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKSIZE, (caddr_t)&cbBlockSize) == -1)
4361 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4362 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the sector size of the partition (errno=%d)"),
4363 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4364 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBASE, (caddr_t)&cbOffset) == -1)
4365 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4366 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the start offset of the partition (errno=%d)"),
4367 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4368 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKCOUNT, (caddr_t)&cbSize) == -1)
4369 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4370 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the size of the partition (errno=%d)"),
4371 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4372 else
4373 {
4374 cbSize *= (uint64_t)cbBlockSize;
4375 dk_physical_extent_t dkPartExtent = {0};
4376 dkPartExtent.offset = 0;
4377 dkPartExtent.length = cbSize;
4378 if (ioctl(RTFileToNative(hRawPart), DKIOCGETPHYSICALEXTENT, (caddr_t)&dkPartExtent) == -1)
4379 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4380 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain partition info (errno=%d)"),
4381 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4382 else
4383 {
4384 if (dkPartExtent.dev != StDrive.st_rdev)
4385 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4386 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Drive does not contain the partition"),
4387 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive);
4388 else if (cbOffset != pPartDesc->offStartInVDisk)
4389 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4390 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
4391 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4392 else if (cbSize != pPartDesc->cbData)
4393 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4394 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
4395 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4396 }
4397 }
4398
4399 if (ioctl(RTFileToNative(hRawPart), DKIOCUNLOCKPHYSICALEXTENTS, NULL) == -1)
4400 {
4401 int rc2 = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4402 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to unlock the partition (errno=%d)"),
4403 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4404 if (RT_SUCCESS(rc))
4405 rc = rc2;
4406 }
4407 }
4408 }
4409
4410#else
4411 RT_NOREF(hVol); /* PORTME */
4412 rc = VERR_NOT_SUPPORTED;
4413#endif
4414 if (RT_SUCCESS(rc))
4415 {
4416 /*
4417 * Compare the first 32 sectors of the partition.
4418 *
4419 * This might not be conclusive, but for partitions formatted with the more
4420 * common file systems it should be as they have a superblock copy at or near
4421 * the start of the partition (fat, fat32, ntfs, and ext4 does at least).
4422 */
4423 size_t const cbToCompare = (size_t)RT_MIN(pPartDesc->cbData / cbSector, 32) * cbSector;
4424 uint8_t *pbSector1 = (uint8_t *)RTMemTmpAlloc(cbToCompare * 2);
4425 if (pbSector1 != NULL)
4426 {
4427 uint8_t *pbSector2 = pbSector1 + cbToCompare;
4428
4429 /* Do the comparing, we repeat if it fails and the data might be volatile. */
4430 uint64_t uPrevCrc1 = 0;
4431 uint64_t uPrevCrc2 = 0;
4432 uint32_t cStable = 0;
4433 for (unsigned iTry = 0; iTry < 256; iTry++)
4434 {
4435 rc = RTFileReadAt(hRawDrive, pPartDesc->offStartInVDisk, pbSector1, cbToCompare, NULL);
4436 if (RT_SUCCESS(rc))
4437 {
4438 rc = RTFileReadAt(hRawPart, pPartDesc->offStartInDevice, pbSector2, cbToCompare, NULL);
4439 if (RT_SUCCESS(rc))
4440 {
4441 if (memcmp(pbSector1, pbSector2, cbToCompare) != 0)
4442 {
4443 rc = VERR_MISMATCH;
4444
4445 /* Do data stability checks before repeating: */
4446 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare);
4447 uint64_t const uCrc2 = RTCrc64(pbSector2, cbToCompare);
4448 if ( uPrevCrc1 != uCrc1
4449 || uPrevCrc2 != uCrc2)
4450 cStable = 0;
4451 else if (++cStable > 4)
4452 break;
4453 uPrevCrc1 = uCrc1;
4454 uPrevCrc2 = uCrc2;
4455 continue;
4456 }
4457 rc = VINF_SUCCESS;
4458 }
4459 else
4460 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4461 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4462 pImage->pszFilename, cbToCompare, pPartDesc->pszRawDevice, pPartDesc->offStartInDevice, rc);
4463 }
4464 else
4465 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4466 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4467 pImage->pszFilename, cbToCompare, pszRawDrive, pPartDesc->offStartInVDisk, rc);
4468 break;
4469 }
4470 if (rc == VERR_MISMATCH)
4471 {
4472 /* Find the first mismatching bytes: */
4473 size_t offMissmatch = 0;
4474 while (offMissmatch < cbToCompare && pbSector1[offMissmatch] == pbSector2[offMissmatch])
4475 offMissmatch++;
4476 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16);
4477
4478 if (cStable > 0)
4479 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4480 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (cStable=%d @%#zx: %.*Rhxs vs %.*Rhxs)"),
4481 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cStable,
4482 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]);
4483 else
4484 {
4485 LogRel(("VMDK: Image path: '%s'. Partition #%u path ('%s') verification undecided on '%s' because of unstable data! (@%#zx: %.*Rhxs vs %.*Rhxs)\n",
4486 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4487 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]));
4488 rc = -rc;
4489 }
4490 }
4491
4492 RTMemTmpFree(pbSector1);
4493 }
4494 else
4495 rc = vdIfError(pImage->pIfError, VERR_NO_TMP_MEMORY, RT_SRC_POS,
4496 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for a temporary read buffer\n"),
4497 pImage->pszFilename, cbToCompare * 2);
4498 }
4499 RTFileClose(hRawPart);
4500 return rc;
4501}
4502
4503#ifdef RT_OS_WINDOWS
4504/**
4505 * Construct the device name for the given partition number.
4506 */
4507static int vmdkRawDescWinMakePartitionName(PVMDKIMAGE pImage, const char *pszRawDrive, RTFILE hRawDrive, uint32_t idxPartition,
4508 char **ppszRawPartition)
4509{
4510 int rc = VINF_SUCCESS;
4511 DWORD cbReturned = 0;
4512 STORAGE_DEVICE_NUMBER DevNum;
4513 RT_ZERO(DevNum);
4514 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4515 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum, sizeof(DevNum), &cbReturned, NULL /*pOverlapped*/))
4516 RTStrAPrintf(ppszRawPartition, "\\\\.\\Harddisk%uPartition%u", DevNum.DeviceNumber, idxPartition);
4517 else
4518 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4519 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4520 pImage->pszFilename, pszRawDrive, GetLastError());
4521 return rc;
4522}
4523#endif /* RT_OS_WINDOWS */
4524
4525/**
4526 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the
4527 * 'Partitions' configuration value is present.
4528 *
4529 * @returns VBox status code, error message has been set on failure.
4530 *
4531 * @note Caller is assumed to clean up @a pRawDesc and release
4532 * @a *phVolToRelease.
4533 * @internal
4534 */
4535static int vmdkRawDescDoPartitions(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4536 RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector,
4537 uint32_t fPartitions, uint32_t fPartitionsReadOnly, bool fRelative,
4538 PRTDVMVOLUME phVolToRelease)
4539{
4540 *phVolToRelease = NIL_RTDVMVOLUME;
4541
4542 /* Check sanity/understanding. */
4543 Assert(fPartitions);
4544 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */
4545
4546 /*
4547 * Allocate on descriptor for each volume up front.
4548 */
4549 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr);
4550
4551 PVDISKRAWPARTDESC paPartDescs = NULL;
4552 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs);
4553 AssertRCReturn(rc, rc);
4554
4555 /*
4556 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them.
4557 */
4558 uint32_t fPartitionsLeft = fPartitions;
4559 RTDVMVOLUME hVol = NIL_RTDVMVOLUME; /* the current volume, needed for getting the next. */
4560 for (uint32_t i = 0; i < cVolumes; i++)
4561 {
4562 /*
4563 * Get the next/first volume and release the current.
4564 */
4565 RTDVMVOLUME hVolNext = NIL_RTDVMVOLUME;
4566 if (i == 0)
4567 rc = RTDvmMapQueryFirstVolume(hVolMgr, &hVolNext);
4568 else
4569 rc = RTDvmMapQueryNextVolume(hVolMgr, hVol, &hVolNext);
4570 if (RT_FAILURE(rc))
4571 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4572 N_("VMDK: Image path: '%s'. Volume enumeration failed at volume #%u on '%s' (%Rrc)"),
4573 pImage->pszFilename, i, pszRawDrive, rc);
4574 uint32_t cRefs = RTDvmVolumeRelease(hVol);
4575 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs);
4576 *phVolToRelease = hVol = hVolNext;
4577
4578 /*
4579 * Depending on the fPartitions selector and associated read-only mask,
4580 * the guest either gets read-write or read-only access (bits set)
4581 * or no access (selector bit clear, access directed to the VMDK).
4582 */
4583 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol);
4584
4585 uint64_t offVolumeEndIgnored = 0;
4586 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored);
4587 if (RT_FAILURE(rc))
4588 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4589 N_("VMDK: Image path: '%s'. Failed to get location of volume #%u on '%s' (%Rrc)"),
4590 pImage->pszFilename, i, pszRawDrive, rc);
4591 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk);
4592
4593 /* Note! The index must match IHostDrivePartition::number. */
4594 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST);
4595 if ( idxPartition < 32
4596 && (fPartitions & RT_BIT_32(idxPartition)))
4597 {
4598 fPartitionsLeft &= ~RT_BIT_32(idxPartition);
4599 if (fPartitionsReadOnly & RT_BIT_32(idxPartition))
4600 paPartDescs[i].uFlags |= VDISKRAW_READONLY;
4601
4602 if (!fRelative)
4603 {
4604 /*
4605 * Accessing the drive thru the main device node (pRawDesc->pszRawDisk).
4606 */
4607 paPartDescs[i].offStartInDevice = paPartDescs[i].offStartInVDisk;
4608 paPartDescs[i].pszRawDevice = RTStrDup(pszRawDrive);
4609 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4610 }
4611 else
4612 {
4613 /*
4614 * Relative means access the partition data via the device node for that
4615 * partition, allowing the sysadmin/OS to allow a user access to individual
4616 * partitions without necessarily being able to compromise the host OS.
4617 * Obviously, the creation of the VMDK requires read access to the main
4618 * device node for the drive, but that's a one-time thing and can be done
4619 * by the sysadmin. Here data starts at offset zero in the device node.
4620 */
4621 paPartDescs[i].offStartInDevice = 0;
4622
4623#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
4624 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */
4625 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition);
4626#elif defined(RT_OS_LINUX)
4627 /* Two naming schemes here: /dev/nvme0n1 -> /dev/nvme0n1p1; /dev/sda -> /dev/sda1 */
4628 RTStrAPrintf(&paPartDescs[i].pszRawDevice,
4629 RT_C_IS_DIGIT(pszRawDrive[strlen(pszRawDrive) - 1]) ? "%sp%u" : "%s%u", pszRawDrive, idxPartition);
4630#elif defined(RT_OS_WINDOWS)
4631 rc = vmdkRawDescWinMakePartitionName(pImage, pszRawDrive, hRawDrive, idxPartition, &paPartDescs[i].pszRawDevice);
4632 AssertRCReturn(rc, rc);
4633#elif defined(RT_OS_SOLARIS)
4634 if (pRawDesc->enmPartitioningType == VDISKPARTTYPE_MBR)
4635 {
4636 /*
4637 * MBR partitions have device nodes in form /dev/(r)dsk/cXtYdZpK
4638 * where X is the controller,
4639 * Y is target (SCSI device number),
4640 * Z is disk number,
4641 * K is partition number,
4642 * where p0 is the whole disk
4643 * p1-pN are the partitions of the disk
4644 */
4645 const char *pszRawDrivePath = pszRawDrive;
4646 char szDrivePath[RTPATH_MAX];
4647 size_t cbRawDrive = strlen(pszRawDrive);
4648 if ( cbRawDrive > 1 && strcmp(&pszRawDrive[cbRawDrive - 2], "p0") == 0)
4649 {
4650 memcpy(szDrivePath, pszRawDrive, cbRawDrive - 2);
4651 szDrivePath[cbRawDrive - 2] = '\0';
4652 pszRawDrivePath = szDrivePath;
4653 }
4654 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%sp%u", pszRawDrivePath, idxPartition);
4655 }
4656 else /* GPT */
4657 {
4658 /*
4659 * GPT partitions have device nodes in form /dev/(r)dsk/cXtYdZsK
4660 * where X is the controller,
4661 * Y is target (SCSI device number),
4662 * Z is disk number,
4663 * K is partition number, zero based. Can be only from 0 to 6.
4664 * Thus, only partitions numbered 0 through 6 have device nodes.
4665 */
4666 if (idxPartition > 7)
4667 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4668 N_("VMDK: Image path: '%s'. the partition #%u on '%s' has no device node and can not be specified with 'Relative' property"),
4669 pImage->pszFilename, idxPartition, pszRawDrive);
4670 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition - 1);
4671 }
4672#else
4673 AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* The option parsing code should have prevented this - PORTME */
4674#endif
4675 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4676
4677 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
4678 AssertRCReturn(rc, rc);
4679 }
4680 }
4681 else
4682 {
4683 /* Not accessible to the guest. */
4684 paPartDescs[i].offStartInDevice = 0;
4685 paPartDescs[i].pszRawDevice = NULL;
4686 }
4687 } /* for each volume */
4688
4689 RTDvmVolumeRelease(hVol);
4690 *phVolToRelease = NIL_RTDVMVOLUME;
4691
4692 /*
4693 * Check that we found all the partitions the user selected.
4694 */
4695 if (fPartitionsLeft)
4696 {
4697 char szLeft[3 * sizeof(fPartitions) * 8];
4698 size_t cchLeft = 0;
4699 for (unsigned i = 0; i < sizeof(fPartitions) * 8; i++)
4700 if (fPartitionsLeft & RT_BIT_32(i))
4701 cchLeft += RTStrPrintf(&szLeft[cchLeft], sizeof(szLeft) - cchLeft, cchLeft ? "%u" : ",%u", i);
4702 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4703 N_("VMDK: Image path: '%s'. Not all the specified partitions for drive '%s' was found: %s"),
4704 pImage->pszFilename, pszRawDrive, szLeft);
4705 }
4706
4707 return VINF_SUCCESS;
4708}
4709
4710/**
4711 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies
4712 * of the partition tables and associated padding areas when the 'Partitions'
4713 * configuration value is present.
4714 *
4715 * The guest is not allowed access to the partition tables, however it needs
4716 * them to be able to access the drive. So, create descriptors for each of the
4717 * tables and attach the current disk content. vmdkCreateRawImage() will later
4718 * write the content to the VMDK. Any changes the guest later makes to the
4719 * partition tables will then go to the VMDK copy, rather than the host drive.
4720 *
4721 * @returns VBox status code, error message has been set on failure.
4722 *
4723 * @note Caller is assumed to clean up @a pRawDesc
4724 * @internal
4725 */
4726static int vmdkRawDescDoCopyPartitionTables(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4727 const char *pszRawDrive, RTFILE hRawDrive, void *pvBootSector, size_t cbBootSector)
4728{
4729 /*
4730 * Query the locations.
4731 */
4732 /* Determin how many locations there are: */
4733 size_t cLocations = 0;
4734 int rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, NULL, 0, &cLocations);
4735 if (rc != VERR_BUFFER_OVERFLOW)
4736 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4737 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4738 pImage->pszFilename, pszRawDrive, rc);
4739 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5);
4740
4741 /* We can allocate the partition descriptors here to save an intentation level. */
4742 PVDISKRAWPARTDESC paPartDescs = NULL;
4743 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs);
4744 AssertRCReturn(rc, rc);
4745
4746 /* Allocate the result table and repeat the location table query: */
4747 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations);
4748 if (!paLocations)
4749 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes"),
4750 pImage->pszFilename, sizeof(paLocations[0]) * cLocations);
4751 rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, paLocations, cLocations, NULL);
4752 if (RT_SUCCESS(rc))
4753 {
4754 /*
4755 * Translate them into descriptors.
4756 *
4757 * We restrict the amount of partition alignment padding to 4MiB as more
4758 * will just be a waste of space. The use case for including the padding
4759 * are older boot loaders and boot manager (including one by a team member)
4760 * that put data and code in the 62 sectors between the MBR and the first
4761 * partition (total of 63). Later CHS was abandond and partition started
4762 * being aligned on power of two sector boundraries (typically 64KiB or
4763 * 1MiB depending on the media size).
4764 */
4765 for (size_t i = 0; i < cLocations && RT_SUCCESS(rc); i++)
4766 {
4767 Assert(paLocations[i].cb > 0);
4768 if (paLocations[i].cb <= _64M)
4769 {
4770 /* Create the partition descriptor entry: */
4771 //paPartDescs[i].pszRawDevice = NULL;
4772 //paPartDescs[i].offStartInDevice = 0;
4773 //paPartDescs[i].uFlags = 0;
4774 paPartDescs[i].offStartInVDisk = paLocations[i].off;
4775 paPartDescs[i].cbData = paLocations[i].cb;
4776 if (paPartDescs[i].cbData < _4M)
4777 paPartDescs[i].cbData = RT_MIN(paPartDescs[i].cbData + paLocations[i].cbPadding, _4M);
4778 paPartDescs[i].pvPartitionData = RTMemAllocZ((size_t)paPartDescs[i].cbData);
4779 if (paPartDescs[i].pvPartitionData)
4780 {
4781 /* Read the content from the drive: */
4782 rc = RTFileReadAt(hRawDrive, paPartDescs[i].offStartInVDisk, paPartDescs[i].pvPartitionData,
4783 (size_t)paPartDescs[i].cbData, NULL);
4784 if (RT_SUCCESS(rc))
4785 {
4786 /* Do we have custom boot sector code? */
4787 if (pvBootSector && cbBootSector && paPartDescs[i].offStartInVDisk == 0)
4788 {
4789 /* Note! Old code used to quietly drop the bootsector if it was considered too big.
4790 Instead we fail as we weren't able to do what the user requested us to do.
4791 Better if the user knows than starts questioning why the guest isn't
4792 booting as expected. */
4793 if (cbBootSector <= paPartDescs[i].cbData)
4794 memcpy(paPartDescs[i].pvPartitionData, pvBootSector, cbBootSector);
4795 else
4796 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4797 N_("VMDK: Image path: '%s'. The custom boot sector is too big: %zu bytes, %RU64 bytes available"),
4798 pImage->pszFilename, cbBootSector, paPartDescs[i].cbData);
4799 }
4800 }
4801 else
4802 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4803 N_("VMDK: Image path: '%s'. Failed to read partition at off %RU64 length %zu from '%s' (%Rrc)"),
4804 pImage->pszFilename, paPartDescs[i].offStartInVDisk,
4805 (size_t)paPartDescs[i].cbData, pszRawDrive, rc);
4806 }
4807 else
4808 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4809 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for copying the partition table at off %RU64"),
4810 pImage->pszFilename, (size_t)paPartDescs[i].cbData, paPartDescs[i].offStartInVDisk);
4811 }
4812 else
4813 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4814 N_("VMDK: Image path: '%s'. Partition table #%u at offset %RU64 in '%s' is to big: %RU64 bytes"),
4815 pImage->pszFilename, i, paLocations[i].off, pszRawDrive, paLocations[i].cb);
4816 }
4817 }
4818 else
4819 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4820 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4821 pImage->pszFilename, pszRawDrive, rc);
4822 RTMemFree(paLocations);
4823 return rc;
4824}
4825
4826/**
4827 * Opens the volume manager for the raw drive when in selected-partition mode.
4828 *
4829 * @param pImage The VMDK image (for errors).
4830 * @param hRawDrive The raw drive handle.
4831 * @param pszRawDrive The raw drive device path (for errors).
4832 * @param cbSector The sector size.
4833 * @param phVolMgr Where to return the handle to the volume manager on
4834 * success.
4835 * @returns VBox status code, errors have been reported.
4836 * @internal
4837 */
4838static int vmdkRawDescOpenVolMgr(PVMDKIMAGE pImage, RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector, PRTDVM phVolMgr)
4839{
4840 *phVolMgr = NIL_RTDVM;
4841
4842 RTVFSFILE hVfsFile = NIL_RTVFSFILE;
4843 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile);
4844 if (RT_FAILURE(rc))
4845 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4846 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"),
4847 pImage->pszFilename, pszRawDrive, rc);
4848
4849 RTDVM hVolMgr = NIL_RTDVM;
4850 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/);
4851
4852 RTVfsFileRelease(hVfsFile);
4853
4854 if (RT_FAILURE(rc))
4855 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4856 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"),
4857 pImage->pszFilename, pszRawDrive, rc);
4858
4859 rc = RTDvmMapOpen(hVolMgr);
4860 if (RT_SUCCESS(rc))
4861 {
4862 *phVolMgr = hVolMgr;
4863 return VINF_SUCCESS;
4864 }
4865 RTDvmRelease(hVolMgr);
4866 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Image path: '%s'. RTDvmMapOpen failed for '%s' (%Rrc)"),
4867 pImage->pszFilename, pszRawDrive, rc);
4868}
4869
4870/**
4871 * Opens the raw drive device and get the sizes for it.
4872 *
4873 * @param pImage The image (for error reporting).
4874 * @param pszRawDrive The device/whatever to open.
4875 * @param phRawDrive Where to return the file handle.
4876 * @param pcbRawDrive Where to return the size.
4877 * @param pcbSector Where to return the sector size.
4878 * @returns IPRT status code, errors have been reported.
4879 * @internal
4880 */
4881static int vmkdRawDescOpenDevice(PVMDKIMAGE pImage, const char *pszRawDrive,
4882 PRTFILE phRawDrive, uint64_t *pcbRawDrive, uint32_t *pcbSector)
4883{
4884 /*
4885 * Open the device for the raw drive.
4886 */
4887 RTFILE hRawDrive = NIL_RTFILE;
4888 int rc = RTFileOpen(&hRawDrive, pszRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4889 if (RT_FAILURE(rc))
4890 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4891 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"),
4892 pImage->pszFilename, pszRawDrive, rc);
4893
4894 /*
4895 * Get the sector size.
4896 */
4897 uint32_t cbSector = 0;
4898 rc = RTFileQuerySectorSize(hRawDrive, &cbSector);
4899 if (RT_SUCCESS(rc))
4900 {
4901 /* sanity checks */
4902 if ( cbSector >= 512
4903 && cbSector <= _64K
4904 && RT_IS_POWER_OF_TWO(cbSector))
4905 {
4906 /*
4907 * Get the size.
4908 */
4909 uint64_t cbRawDrive = 0;
4910 rc = RTFileQuerySize(hRawDrive, &cbRawDrive);
4911 if (RT_SUCCESS(rc))
4912 {
4913 /* Check whether cbSize is actually sensible. */
4914 if (cbRawDrive > cbSector && (cbRawDrive % cbSector) == 0)
4915 {
4916 *phRawDrive = hRawDrive;
4917 *pcbRawDrive = cbRawDrive;
4918 *pcbSector = cbSector;
4919 return VINF_SUCCESS;
4920 }
4921 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4922 N_("VMDK: Image path: '%s'. Got a bogus size for the raw drive '%s': %RU64 (sector size %u)"),
4923 pImage->pszFilename, pszRawDrive, cbRawDrive, cbSector);
4924 }
4925 else
4926 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4927 N_("VMDK: Image path: '%s'. Failed to query size of the drive '%s' (%Rrc)"),
4928 pImage->pszFilename, pszRawDrive, rc);
4929 }
4930 else
4931 rc = vdIfError(pImage->pIfError, VERR_OUT_OF_RANGE, RT_SRC_POS,
4932 N_("VMDK: Image path: '%s'. Unsupported sector size for '%s': %u (%#x)"),
4933 pImage->pszFilename, pszRawDrive, cbSector, cbSector);
4934 }
4935 else
4936 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4937 N_("VMDK: Image path: '%s'. Failed to get the sector size for '%s' (%Rrc)"),
4938 pImage->pszFilename, pszRawDrive, rc);
4939 RTFileClose(hRawDrive);
4940 return rc;
4941}
4942
4943/**
4944 * Reads the raw disk configuration, leaving initalization and cleanup to the
4945 * caller (regardless of return status).
4946 *
4947 * @returns VBox status code, errors properly reported.
4948 * @internal
4949 */
4950static int vmdkRawDescParseConfig(PVMDKIMAGE pImage, char **ppszRawDrive,
4951 uint32_t *pfPartitions, uint32_t *pfPartitionsReadOnly,
4952 void **ppvBootSector, size_t *pcbBootSector, bool *pfRelative,
4953 char **ppszFreeMe)
4954{
4955 PVDINTERFACECONFIG pImgCfg = VDIfConfigGet(pImage->pVDIfsImage);
4956 if (!pImgCfg)
4957 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4958 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename);
4959
4960 /*
4961 * RawDrive = path
4962 */
4963 int rc = VDCFGQueryStringAlloc(pImgCfg, "RawDrive", ppszRawDrive);
4964 if (RT_FAILURE(rc))
4965 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4966 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4967 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3);
4968
4969 /*
4970 * Partitions=n[r][,...]
4971 */
4972 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */;
4973 *pfPartitions = *pfPartitionsReadOnly = 0;
4974
4975 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe);
4976 if (RT_SUCCESS(rc))
4977 {
4978 char *psz = *ppszFreeMe;
4979 while (*psz != '\0')
4980 {
4981 char *pszNext;
4982 uint32_t u32;
4983 rc = RTStrToUInt32Ex(psz, &pszNext, 0, &u32);
4984 if (rc == VWRN_NUMBER_TOO_BIG || rc == VWRN_NEGATIVE_UNSIGNED)
4985 rc = -rc;
4986 if (RT_FAILURE(rc))
4987 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4988 N_("VMDK: Image path: '%s'. Parsing 'Partitions' config value failed. Incorrect value (%Rrc): %s"),
4989 pImage->pszFilename, rc, psz);
4990 if (u32 >= cMaxPartitionBits)
4991 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4992 N_("VMDK: Image path: '%s'. 'Partitions' config sub-value out of range: %RU32, max %RU32"),
4993 pImage->pszFilename, u32, cMaxPartitionBits);
4994 *pfPartitions |= RT_BIT_32(u32);
4995 psz = pszNext;
4996 if (*psz == 'r')
4997 {
4998 *pfPartitionsReadOnly |= RT_BIT_32(u32);
4999 psz++;
5000 }
5001 if (*psz == ',')
5002 psz++;
5003 else if (*psz != '\0')
5004 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5005 N_("VMDK: Image path: '%s'. Malformed 'Partitions' config value, expected separator: %s"),
5006 pImage->pszFilename, psz);
5007 }
5008
5009 RTStrFree(*ppszFreeMe);
5010 *ppszFreeMe = NULL;
5011 }
5012 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5013 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5014 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5015
5016 /*
5017 * BootSector=base64
5018 */
5019 rc = VDCFGQueryStringAlloc(pImgCfg, "BootSector", ppszFreeMe);
5020 if (RT_SUCCESS(rc))
5021 {
5022 ssize_t cbBootSector = RTBase64DecodedSize(*ppszFreeMe, NULL);
5023 if (cbBootSector < 0)
5024 return vdIfError(pImage->pIfError, VERR_INVALID_BASE64_ENCODING, RT_SRC_POS,
5025 N_("VMDK: Image path: '%s'. BASE64 decoding failed on the custom bootsector for '%s'"),
5026 pImage->pszFilename, *ppszRawDrive);
5027 if (cbBootSector == 0)
5028 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5029 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is zero bytes big"),
5030 pImage->pszFilename, *ppszRawDrive);
5031 if (cbBootSector > _4M) /* this is just a preliminary max */
5032 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5033 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"),
5034 pImage->pszFilename, *ppszRawDrive, cbBootSector);
5035
5036 /* Refuse the boot sector if whole-drive. This used to be done quietly,
5037 however, bird disagrees and thinks the user should be told that what
5038 he/she/it tries to do isn't possible. There should be less head
5039 scratching this way when the guest doesn't do the expected thing. */
5040 if (!*pfPartitions)
5041 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5042 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"),
5043 pImage->pszFilename, *ppszRawDrive);
5044
5045 *pcbBootSector = (size_t)cbBootSector;
5046 *ppvBootSector = RTMemAlloc((size_t)cbBootSector);
5047 if (!*ppvBootSector)
5048 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
5049 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"),
5050 pImage->pszFilename, cbBootSector, *ppszRawDrive);
5051
5052 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/);
5053 if (RT_FAILURE(rc))
5054 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
5055 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"),
5056 pImage->pszFilename, *ppszRawDrive, rc);
5057
5058 RTStrFree(*ppszFreeMe);
5059 *ppszFreeMe = NULL;
5060 }
5061 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5062 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5063 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5064
5065 /*
5066 * Relative=0/1
5067 */
5068 *pfRelative = false;
5069 rc = VDCFGQueryBool(pImgCfg, "Relative", pfRelative);
5070 if (RT_SUCCESS(rc))
5071 {
5072 if (!*pfPartitions && *pfRelative != false)
5073 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5074 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported for whole-drive configurations, only when selecting partitions"),
5075 pImage->pszFilename);
5076#if !defined(RT_OS_DARWIN) && !defined(RT_OS_LINUX) && !defined(RT_OS_FREEBSD) && !defined(RT_OS_WINDOWS) && !defined(RT_OS_SOLARIS) /* PORTME */
5077 if (*pfRelative == true)
5078 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5079 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported on this host OS"),
5080 pImage->pszFilename);
5081#endif
5082 }
5083 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
5084 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5085 N_("VMDK: Image path: '%s'. Getting 'Relative' configuration failed (%Rrc)"), pImage->pszFilename, rc);
5086 else
5087#ifdef RT_OS_DARWIN /* different default on macOS, see ticketref:1461 (comment 20). */
5088 *pfRelative = true;
5089#else
5090 *pfRelative = false;
5091#endif
5092
5093 return VINF_SUCCESS;
5094}
5095
5096/**
5097 * Creates a raw drive (nee disk) descriptor.
5098 *
5099 * This was originally done in VBoxInternalManage.cpp, but was copied (not move)
5100 * here much later. That's one of the reasons why we produce a descriptor just
5101 * like it does, rather than mixing directly into the vmdkCreateRawImage code.
5102 *
5103 * @returns VBox status code.
5104 * @param pImage The image.
5105 * @param ppRaw Where to return the raw drive descriptor. Caller must
5106 * free it using vmdkRawDescFree regardless of the status
5107 * code.
5108 * @internal
5109 */
5110static int vmdkMakeRawDescriptor(PVMDKIMAGE pImage, PVDISKRAW *ppRaw)
5111{
5112 /* Make sure it's NULL. */
5113 *ppRaw = NULL;
5114
5115 /*
5116 * Read the configuration.
5117 */
5118 char *pszRawDrive = NULL;
5119 uint32_t fPartitions = 0; /* zero if whole-drive */
5120 uint32_t fPartitionsReadOnly = 0; /* (subset of fPartitions) */
5121 void *pvBootSector = NULL;
5122 size_t cbBootSector = 0;
5123 bool fRelative = false;
5124 char *pszFreeMe = NULL; /* lazy bird cleanup. */
5125 int rc = vmdkRawDescParseConfig(pImage, &pszRawDrive, &fPartitions, &fPartitionsReadOnly,
5126 &pvBootSector, &cbBootSector, &fRelative, &pszFreeMe);
5127 RTStrFree(pszFreeMe);
5128 if (RT_SUCCESS(rc))
5129 {
5130 /*
5131 * Open the device, getting the sector size and drive size.
5132 */
5133 uint64_t cbSize = 0;
5134 uint32_t cbSector = 0;
5135 RTFILE hRawDrive = NIL_RTFILE;
5136 rc = vmkdRawDescOpenDevice(pImage, pszRawDrive, &hRawDrive, &cbSize, &cbSector);
5137 if (RT_SUCCESS(rc))
5138 {
5139 pImage->cbSize = cbSize;
5140 /*
5141 * Create the raw-drive descriptor
5142 */
5143 PVDISKRAW pRawDesc = (PVDISKRAW)RTMemAllocZ(sizeof(*pRawDesc));
5144 if (pRawDesc)
5145 {
5146 pRawDesc->szSignature[0] = 'R';
5147 pRawDesc->szSignature[1] = 'A';
5148 pRawDesc->szSignature[2] = 'W';
5149 //pRawDesc->szSignature[3] = '\0';
5150 if (!fPartitions)
5151 {
5152 /*
5153 * It's simple for when doing the whole drive.
5154 */
5155 pRawDesc->uFlags = VDISKRAW_DISK;
5156 rc = RTStrDupEx(&pRawDesc->pszRawDisk, pszRawDrive);
5157 }
5158 else
5159 {
5160 /*
5161 * In selected partitions mode we've got a lot more work ahead of us.
5162 */
5163 pRawDesc->uFlags = VDISKRAW_NORMAL;
5164 //pRawDesc->pszRawDisk = NULL;
5165 //pRawDesc->cPartDescs = 0;
5166 //pRawDesc->pPartDescs = NULL;
5167
5168 /* We need to parse the partition map to complete the descriptor: */
5169 RTDVM hVolMgr = NIL_RTDVM;
5170 rc = vmdkRawDescOpenVolMgr(pImage, hRawDrive, pszRawDrive, cbSector, &hVolMgr);
5171 if (RT_SUCCESS(rc))
5172 {
5173 RTDVMFORMATTYPE enmFormatType = RTDvmMapGetFormatType(hVolMgr);
5174 if ( enmFormatType == RTDVMFORMATTYPE_MBR
5175 || enmFormatType == RTDVMFORMATTYPE_GPT)
5176 {
5177 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR
5178 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT;
5179
5180 /* Add copies of the partition tables: */
5181 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive,
5182 pvBootSector, cbBootSector);
5183 if (RT_SUCCESS(rc))
5184 {
5185 /* Add descriptors for the partitions/volumes, indicating which
5186 should be accessible and how to access them: */
5187 RTDVMVOLUME hVolRelease = NIL_RTDVMVOLUME;
5188 rc = vmdkRawDescDoPartitions(pImage, hVolMgr, pRawDesc, hRawDrive, pszRawDrive, cbSector,
5189 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease);
5190 RTDvmVolumeRelease(hVolRelease);
5191
5192 /* Finally, sort the partition and check consistency (overlaps, etc): */
5193 if (RT_SUCCESS(rc))
5194 rc = vmdkRawDescPostProcessPartitions(pImage, pRawDesc, cbSize);
5195 }
5196 }
5197 else
5198 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
5199 N_("VMDK: Image path: '%s'. Unsupported partitioning for the disk '%s': %s"),
5200 pImage->pszFilename, pszRawDrive, RTDvmMapGetFormatType(hVolMgr));
5201 RTDvmRelease(hVolMgr);
5202 }
5203 }
5204 if (RT_SUCCESS(rc))
5205 {
5206 /*
5207 * We succeeded.
5208 */
5209 *ppRaw = pRawDesc;
5210 Log(("vmdkMakeRawDescriptor: fFlags=%#x enmPartitioningType=%d cPartDescs=%u pszRawDisk=%s\n",
5211 pRawDesc->uFlags, pRawDesc->enmPartitioningType, pRawDesc->cPartDescs, pRawDesc->pszRawDisk));
5212 if (pRawDesc->cPartDescs)
5213 {
5214 Log(("# VMDK offset Length Device offset PartDataPtr Device\n"));
5215 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
5216 Log(("%2u %14RU64 %14RU64 %14RU64 %#18p %s\n", i, pRawDesc->pPartDescs[i].offStartInVDisk,
5217 pRawDesc->pPartDescs[i].cbData, pRawDesc->pPartDescs[i].offStartInDevice,
5218 pRawDesc->pPartDescs[i].pvPartitionData, pRawDesc->pPartDescs[i].pszRawDevice));
5219 }
5220 }
5221 else
5222 vmdkRawDescFree(pRawDesc);
5223 }
5224 else
5225 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
5226 N_("VMDK: Image path: '%s'. Failed to allocate %u bytes for the raw drive descriptor"),
5227 pImage->pszFilename, sizeof(*pRawDesc));
5228 RTFileClose(hRawDrive);
5229 }
5230 }
5231 RTStrFree(pszRawDrive);
5232 RTMemFree(pvBootSector);
5233 return rc;
5234}
5235
5236/**
5237 * Internal: create VMDK images for raw disk/partition access.
5238 */
5239static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
5240 uint64_t cbSize)
5241{
5242 int rc = VINF_SUCCESS;
5243 PVMDKEXTENT pExtent;
5244
5245 if (pRaw->uFlags & VDISKRAW_DISK)
5246 {
5247 /* Full raw disk access. This requires setting up a descriptor
5248 * file and open the (flat) raw disk. */
5249 rc = vmdkCreateExtents(pImage, 1);
5250 if (RT_FAILURE(rc))
5251 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5252 pExtent = &pImage->pExtents[0];
5253 /* Create raw disk descriptor file. */
5254 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5255 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5256 true /* fCreate */));
5257 if (RT_FAILURE(rc))
5258 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
5259
5260 /* Set up basename for extent description. Cannot use StrDup. */
5261 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
5262 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5263 if (!pszBasename)
5264 return VERR_NO_MEMORY;
5265 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
5266 pExtent->pszBasename = pszBasename;
5267 /* For raw disks the full name is identical to the base name. */
5268 pExtent->pszFullname = RTStrDup(pszBasename);
5269 if (!pExtent->pszFullname)
5270 return VERR_NO_MEMORY;
5271 pExtent->enmType = VMDKETYPE_FLAT;
5272 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5273 pExtent->uSectorOffset = 0;
5274 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
5275 pExtent->fMetaDirty = false;
5276
5277 /* Open flat image, the raw disk. */
5278 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5279 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5280 false /* fCreate */));
5281 if (RT_FAILURE(rc))
5282 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
5283 }
5284 else
5285 {
5286 /* Raw partition access. This requires setting up a descriptor
5287 * file, write the partition information to a flat extent and
5288 * open all the (flat) raw disk partitions. */
5289
5290 /* First pass over the partition data areas to determine how many
5291 * extents we need. One data area can require up to 2 extents, as
5292 * it might be necessary to skip over unpartitioned space. */
5293 unsigned cExtents = 0;
5294 uint64_t uStart = 0;
5295 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
5296 {
5297 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
5298 if (uStart > pPart->offStartInVDisk)
5299 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5300 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
5301
5302 if (uStart < pPart->offStartInVDisk)
5303 cExtents++;
5304 uStart = pPart->offStartInVDisk + pPart->cbData;
5305 cExtents++;
5306 }
5307 /* Another extent for filling up the rest of the image. */
5308 if (uStart != cbSize)
5309 cExtents++;
5310
5311 rc = vmdkCreateExtents(pImage, cExtents);
5312 if (RT_FAILURE(rc))
5313 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5314
5315 /* Create raw partition descriptor file. */
5316 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5317 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5318 true /* fCreate */));
5319 if (RT_FAILURE(rc))
5320 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
5321
5322 /* Create base filename for the partition table extent. */
5323 /** @todo remove fixed buffer without creating memory leaks. */
5324 char pszPartition[1024];
5325 const char *pszBase = RTPathFilename(pImage->pszFilename);
5326 const char *pszSuff = RTPathSuffix(pszBase);
5327 if (pszSuff == NULL)
5328 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
5329 char *pszBaseBase = RTStrDup(pszBase);
5330 if (!pszBaseBase)
5331 return VERR_NO_MEMORY;
5332 RTPathStripSuffix(pszBaseBase);
5333 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
5334 pszBaseBase, pszSuff);
5335 RTStrFree(pszBaseBase);
5336
5337 /* Second pass over the partitions, now define all extents. */
5338 uint64_t uPartOffset = 0;
5339 cExtents = 0;
5340 uStart = 0;
5341 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
5342 {
5343 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
5344 pExtent = &pImage->pExtents[cExtents++];
5345
5346 if (uStart < pPart->offStartInVDisk)
5347 {
5348 pExtent->pszBasename = NULL;
5349 pExtent->pszFullname = NULL;
5350 pExtent->enmType = VMDKETYPE_ZERO;
5351 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->offStartInVDisk - uStart);
5352 pExtent->uSectorOffset = 0;
5353 pExtent->enmAccess = VMDKACCESS_READWRITE;
5354 pExtent->fMetaDirty = false;
5355 /* go to next extent */
5356 pExtent = &pImage->pExtents[cExtents++];
5357 }
5358 uStart = pPart->offStartInVDisk + pPart->cbData;
5359
5360 if (pPart->pvPartitionData)
5361 {
5362 /* Set up basename for extent description. Can't use StrDup. */
5363 size_t cbBasename = strlen(pszPartition) + 1;
5364 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5365 if (!pszBasename)
5366 return VERR_NO_MEMORY;
5367 memcpy(pszBasename, pszPartition, cbBasename);
5368 pExtent->pszBasename = pszBasename;
5369
5370 /* Set up full name for partition extent. */
5371 char *pszDirname = RTStrDup(pImage->pszFilename);
5372 if (!pszDirname)
5373 return VERR_NO_STR_MEMORY;
5374 RTPathStripFilename(pszDirname);
5375 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
5376 RTStrFree(pszDirname);
5377 if (!pszFullname)
5378 return VERR_NO_STR_MEMORY;
5379 pExtent->pszFullname = pszFullname;
5380 pExtent->enmType = VMDKETYPE_FLAT;
5381 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5382 pExtent->uSectorOffset = uPartOffset;
5383 pExtent->enmAccess = VMDKACCESS_READWRITE;
5384 pExtent->fMetaDirty = false;
5385
5386 /* Create partition table flat image. */
5387 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5388 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5389 true /* fCreate */));
5390 if (RT_FAILURE(rc))
5391 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
5392 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5393 VMDK_SECTOR2BYTE(uPartOffset),
5394 pPart->pvPartitionData,
5395 pPart->cbData);
5396 if (RT_FAILURE(rc))
5397 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
5398 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
5399 }
5400 else
5401 {
5402 if (pPart->pszRawDevice)
5403 {
5404 /* Set up basename for extent descr. Can't use StrDup. */
5405 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
5406 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5407 if (!pszBasename)
5408 return VERR_NO_MEMORY;
5409 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
5410 pExtent->pszBasename = pszBasename;
5411 /* For raw disks full name is identical to base name. */
5412 pExtent->pszFullname = RTStrDup(pszBasename);
5413 if (!pExtent->pszFullname)
5414 return VERR_NO_MEMORY;
5415 pExtent->enmType = VMDKETYPE_FLAT;
5416 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5417 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->offStartInDevice);
5418 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
5419 pExtent->fMetaDirty = false;
5420
5421 /* Open flat image, the raw partition. */
5422 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5423 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5424 false /* fCreate */));
5425 if (RT_FAILURE(rc))
5426 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
5427 }
5428 else
5429 {
5430 pExtent->pszBasename = NULL;
5431 pExtent->pszFullname = NULL;
5432 pExtent->enmType = VMDKETYPE_ZERO;
5433 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5434 pExtent->uSectorOffset = 0;
5435 pExtent->enmAccess = VMDKACCESS_READWRITE;
5436 pExtent->fMetaDirty = false;
5437 }
5438 }
5439 }
5440 /* Another extent for filling up the rest of the image. */
5441 if (uStart != cbSize)
5442 {
5443 pExtent = &pImage->pExtents[cExtents++];
5444 pExtent->pszBasename = NULL;
5445 pExtent->pszFullname = NULL;
5446 pExtent->enmType = VMDKETYPE_ZERO;
5447 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
5448 pExtent->uSectorOffset = 0;
5449 pExtent->enmAccess = VMDKACCESS_READWRITE;
5450 pExtent->fMetaDirty = false;
5451 }
5452 }
5453
5454 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5455 (pRaw->uFlags & VDISKRAW_DISK) ?
5456 "fullDevice" : "partitionedDevice");
5457 if (RT_FAILURE(rc))
5458 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5459 return rc;
5460}
5461
5462/**
5463 * Internal: create a regular (i.e. file-backed) VMDK image.
5464 */
5465static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
5466 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
5467 unsigned uPercentStart, unsigned uPercentSpan)
5468{
5469 int rc = VINF_SUCCESS;
5470 unsigned cExtents = 1;
5471 uint64_t cbOffset = 0;
5472 uint64_t cbRemaining = cbSize;
5473
5474 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5475 {
5476 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
5477 /* Do proper extent computation: need one smaller extent if the total
5478 * size isn't evenly divisible by the split size. */
5479 if (cbSize % VMDK_2G_SPLIT_SIZE)
5480 cExtents++;
5481 }
5482 rc = vmdkCreateExtents(pImage, cExtents);
5483 if (RT_FAILURE(rc))
5484 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5485
5486 /* Basename strings needed for constructing the extent names. */
5487 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5488 AssertPtr(pszBasenameSubstr);
5489 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5490
5491 /* Create separate descriptor file if necessary. */
5492 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
5493 {
5494 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5495 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5496 true /* fCreate */));
5497 if (RT_FAILURE(rc))
5498 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
5499 }
5500 else
5501 pImage->pFile = NULL;
5502
5503 /* Set up all extents. */
5504 for (unsigned i = 0; i < cExtents; i++)
5505 {
5506 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5507 uint64_t cbExtent = cbRemaining;
5508
5509 /* Set up fullname/basename for extent description. Cannot use StrDup
5510 * for basename, as it is not guaranteed that the memory can be freed
5511 * with RTMemTmpFree, which must be used as in other code paths
5512 * StrDup is not usable. */
5513 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5514 {
5515 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5516 if (!pszBasename)
5517 return VERR_NO_MEMORY;
5518 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5519 pExtent->pszBasename = pszBasename;
5520 }
5521 else
5522 {
5523 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
5524 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
5525 RTPathStripSuffix(pszBasenameBase);
5526 char *pszTmp;
5527 size_t cbTmp;
5528 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5529 {
5530 if (cExtents == 1)
5531 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
5532 pszBasenameSuff);
5533 else
5534 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
5535 i+1, pszBasenameSuff);
5536 }
5537 else
5538 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
5539 pszBasenameSuff);
5540 RTStrFree(pszBasenameBase);
5541 if (!pszTmp)
5542 return VERR_NO_STR_MEMORY;
5543 cbTmp = strlen(pszTmp) + 1;
5544 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
5545 if (!pszBasename)
5546 {
5547 RTStrFree(pszTmp);
5548 return VERR_NO_MEMORY;
5549 }
5550 memcpy(pszBasename, pszTmp, cbTmp);
5551 RTStrFree(pszTmp);
5552 pExtent->pszBasename = pszBasename;
5553 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5554 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
5555 }
5556 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5557 if (!pszBasedirectory)
5558 return VERR_NO_STR_MEMORY;
5559 RTPathStripFilename(pszBasedirectory);
5560 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5561 RTStrFree(pszBasedirectory);
5562 if (!pszFullname)
5563 return VERR_NO_STR_MEMORY;
5564 pExtent->pszFullname = pszFullname;
5565
5566 /* Create file for extent. */
5567 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5568 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5569 true /* fCreate */));
5570 if (RT_FAILURE(rc))
5571 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5572 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5573 {
5574 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
5575 0 /* fFlags */, pIfProgress,
5576 uPercentStart + cbOffset * uPercentSpan / cbSize,
5577 cbExtent * uPercentSpan / cbSize);
5578 if (RT_FAILURE(rc))
5579 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
5580 }
5581
5582 /* Place descriptor file information (where integrated). */
5583 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5584 {
5585 pExtent->uDescriptorSector = 1;
5586 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5587 /* The descriptor is part of the (only) extent. */
5588 pExtent->pDescData = pImage->pDescData;
5589 pImage->pDescData = NULL;
5590 }
5591
5592 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5593 {
5594 uint64_t cSectorsPerGDE, cSectorsPerGD;
5595 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5596 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
5597 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5598 pExtent->cGTEntries = 512;
5599 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5600 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5601 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5602 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5603 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5604 {
5605 /* The spec says version is 1 for all VMDKs, but the vast
5606 * majority of streamOptimized VMDKs actually contain
5607 * version 3 - so go with the majority. Both are accepted. */
5608 pExtent->uVersion = 3;
5609 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5610 }
5611 }
5612 else
5613 {
5614 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5615 pExtent->enmType = VMDKETYPE_VMFS;
5616 else
5617 pExtent->enmType = VMDKETYPE_FLAT;
5618 }
5619
5620 pExtent->enmAccess = VMDKACCESS_READWRITE;
5621 pExtent->fUncleanShutdown = true;
5622 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
5623 pExtent->uSectorOffset = 0;
5624 pExtent->fMetaDirty = true;
5625
5626 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5627 {
5628 /* fPreAlloc should never be false because VMware can't use such images. */
5629 rc = vmdkCreateGrainDirectory(pImage, pExtent,
5630 RT_MAX( pExtent->uDescriptorSector
5631 + pExtent->cDescriptorSectors,
5632 1),
5633 true /* fPreAlloc */);
5634 if (RT_FAILURE(rc))
5635 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5636 }
5637
5638 cbOffset += cbExtent;
5639
5640 if (RT_SUCCESS(rc))
5641 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
5642
5643 cbRemaining -= cbExtent;
5644 }
5645
5646 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5647 {
5648 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
5649 * controller type is set in an image. */
5650 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
5651 if (RT_FAILURE(rc))
5652 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
5653 }
5654
5655 const char *pszDescType = NULL;
5656 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5657 {
5658 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5659 pszDescType = "vmfs";
5660 else
5661 pszDescType = (cExtents == 1)
5662 ? "monolithicFlat" : "twoGbMaxExtentFlat";
5663 }
5664 else
5665 {
5666 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5667 pszDescType = "streamOptimized";
5668 else
5669 {
5670 pszDescType = (cExtents == 1)
5671 ? "monolithicSparse" : "twoGbMaxExtentSparse";
5672 }
5673 }
5674 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5675 pszDescType);
5676 if (RT_FAILURE(rc))
5677 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5678 return rc;
5679}
5680
5681/**
5682 * Internal: Create a real stream optimized VMDK using only linear writes.
5683 */
5684static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
5685{
5686 int rc = vmdkCreateExtents(pImage, 1);
5687 if (RT_FAILURE(rc))
5688 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5689
5690 /* Basename strings needed for constructing the extent names. */
5691 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5692 AssertPtr(pszBasenameSubstr);
5693 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5694
5695 /* No separate descriptor file. */
5696 pImage->pFile = NULL;
5697
5698 /* Set up all extents. */
5699 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5700
5701 /* Set up fullname/basename for extent description. Cannot use StrDup
5702 * for basename, as it is not guaranteed that the memory can be freed
5703 * with RTMemTmpFree, which must be used as in other code paths
5704 * StrDup is not usable. */
5705 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5706 if (!pszBasename)
5707 return VERR_NO_MEMORY;
5708 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5709 pExtent->pszBasename = pszBasename;
5710
5711 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5712 RTPathStripFilename(pszBasedirectory);
5713 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5714 RTStrFree(pszBasedirectory);
5715 if (!pszFullname)
5716 return VERR_NO_STR_MEMORY;
5717 pExtent->pszFullname = pszFullname;
5718
5719 /* Create file for extent. Make it write only, no reading allowed. */
5720 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5721 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5722 true /* fCreate */)
5723 & ~RTFILE_O_READ);
5724 if (RT_FAILURE(rc))
5725 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5726
5727 /* Place descriptor file information. */
5728 pExtent->uDescriptorSector = 1;
5729 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5730 /* The descriptor is part of the (only) extent. */
5731 pExtent->pDescData = pImage->pDescData;
5732 pImage->pDescData = NULL;
5733
5734 uint64_t cSectorsPerGDE, cSectorsPerGD;
5735 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5736 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
5737 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5738 pExtent->cGTEntries = 512;
5739 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5740 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5741 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5742 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5743
5744 /* The spec says version is 1 for all VMDKs, but the vast
5745 * majority of streamOptimized VMDKs actually contain
5746 * version 3 - so go with the majority. Both are accepted. */
5747 pExtent->uVersion = 3;
5748 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5749 pExtent->fFooter = true;
5750
5751 pExtent->enmAccess = VMDKACCESS_READONLY;
5752 pExtent->fUncleanShutdown = false;
5753 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5754 pExtent->uSectorOffset = 0;
5755 pExtent->fMetaDirty = true;
5756
5757 /* Create grain directory, without preallocating it straight away. It will
5758 * be constructed on the fly when writing out the data and written when
5759 * closing the image. The end effect is that the full grain directory is
5760 * allocated, which is a requirement of the VMDK specs. */
5761 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
5762 false /* fPreAlloc */);
5763 if (RT_FAILURE(rc))
5764 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5765
5766 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5767 "streamOptimized");
5768 if (RT_FAILURE(rc))
5769 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5770
5771 return rc;
5772}
5773
5774/**
5775 * Initializes the UUID fields in the DDB.
5776 *
5777 * @returns VBox status code.
5778 * @param pImage The VMDK image instance.
5779 */
5780static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
5781{
5782 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
5783 if (RT_SUCCESS(rc))
5784 {
5785 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
5786 if (RT_SUCCESS(rc))
5787 {
5788 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
5789 &pImage->ModificationUuid);
5790 if (RT_SUCCESS(rc))
5791 {
5792 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
5793 &pImage->ParentModificationUuid);
5794 if (RT_FAILURE(rc))
5795 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5796 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5797 }
5798 else
5799 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5800 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5801 }
5802 else
5803 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5804 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
5805 }
5806 else
5807 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5808 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
5809
5810 return rc;
5811}
5812
5813/**
5814 * Internal: The actual code for creating any VMDK variant currently in
5815 * existence on hosted environments.
5816 */
5817static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
5818 unsigned uImageFlags, const char *pszComment,
5819 PCVDGEOMETRY pPCHSGeometry,
5820 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5821 PVDINTERFACEPROGRESS pIfProgress,
5822 unsigned uPercentStart, unsigned uPercentSpan)
5823{
5824 pImage->uImageFlags = uImageFlags;
5825
5826 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
5827 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
5828 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
5829
5830 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
5831 &pImage->Descriptor);
5832 if (RT_SUCCESS(rc))
5833 {
5834 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5835 {
5836 /* Raw disk image (includes raw partition). */
5837 PVDISKRAW pRaw = NULL;
5838 rc = vmdkMakeRawDescriptor(pImage, &pRaw);
5839 if (RT_FAILURE(rc))
5840 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create raw descriptor for '%s'"),
5841 pImage->pszFilename);
5842 if (!cbSize)
5843 cbSize = pImage->cbSize;
5844
5845 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
5846 vmdkRawDescFree(pRaw);
5847 }
5848 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5849 {
5850 /* Stream optimized sparse image (monolithic). */
5851 rc = vmdkCreateStreamImage(pImage, cbSize);
5852 }
5853 else
5854 {
5855 /* Regular fixed or sparse image (monolithic or split). */
5856 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
5857 pIfProgress, uPercentStart,
5858 uPercentSpan * 95 / 100);
5859 }
5860
5861 if (RT_SUCCESS(rc))
5862 {
5863 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
5864
5865 pImage->cbSize = cbSize;
5866
5867 for (unsigned i = 0; i < pImage->cExtents; i++)
5868 {
5869 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5870
5871 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
5872 pExtent->cNominalSectors, pExtent->enmType,
5873 pExtent->pszBasename, pExtent->uSectorOffset);
5874 if (RT_FAILURE(rc))
5875 {
5876 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
5877 break;
5878 }
5879 }
5880
5881 if (RT_SUCCESS(rc))
5882 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
5883
5884 pImage->LCHSGeometry = *pLCHSGeometry;
5885 pImage->PCHSGeometry = *pPCHSGeometry;
5886
5887 if (RT_SUCCESS(rc))
5888 {
5889 if ( pPCHSGeometry->cCylinders != 0
5890 && pPCHSGeometry->cHeads != 0
5891 && pPCHSGeometry->cSectors != 0)
5892 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5893 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5894 {
5895 VDGEOMETRY RawDiskPCHSGeometry;
5896 RawDiskPCHSGeometry.cCylinders = (uint32_t)RT_MIN(pImage->cbSize / 512 / 16 / 63, 16383);
5897 RawDiskPCHSGeometry.cHeads = 16;
5898 RawDiskPCHSGeometry.cSectors = 63;
5899 rc = vmdkDescSetPCHSGeometry(pImage, &RawDiskPCHSGeometry);
5900 }
5901 }
5902
5903 if ( RT_SUCCESS(rc)
5904 && pLCHSGeometry->cCylinders != 0
5905 && pLCHSGeometry->cHeads != 0
5906 && pLCHSGeometry->cSectors != 0)
5907 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5908
5909 pImage->ImageUuid = *pUuid;
5910 RTUuidClear(&pImage->ParentUuid);
5911 RTUuidClear(&pImage->ModificationUuid);
5912 RTUuidClear(&pImage->ParentModificationUuid);
5913
5914 if (RT_SUCCESS(rc))
5915 rc = vmdkCreateImageDdbUuidsInit(pImage);
5916
5917 if (RT_SUCCESS(rc))
5918 rc = vmdkAllocateGrainTableCache(pImage);
5919
5920 if (RT_SUCCESS(rc))
5921 {
5922 rc = vmdkSetImageComment(pImage, pszComment);
5923 if (RT_FAILURE(rc))
5924 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
5925 }
5926
5927 if (RT_SUCCESS(rc))
5928 {
5929 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
5930
5931 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5932 {
5933 /* streamOptimized is a bit special, we cannot trigger the flush
5934 * until all data has been written. So we write the necessary
5935 * information explicitly. */
5936 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
5937 - pImage->Descriptor.aLines[0], 512));
5938 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
5939 if (RT_SUCCESS(rc))
5940 {
5941 rc = vmdkWriteDescriptor(pImage, NULL);
5942 if (RT_FAILURE(rc))
5943 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
5944 }
5945 else
5946 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
5947 }
5948 else
5949 rc = vmdkFlushImage(pImage, NULL);
5950 }
5951 }
5952 }
5953 else
5954 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
5955
5956
5957 if (RT_SUCCESS(rc))
5958 {
5959 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
5960 pImage->RegionList.fFlags = 0;
5961 pImage->RegionList.cRegions = 1;
5962
5963 pRegion->offRegion = 0; /* Disk start. */
5964 pRegion->cbBlock = 512;
5965 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
5966 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
5967 pRegion->cbData = 512;
5968 pRegion->cbMetadata = 0;
5969 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
5970
5971 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
5972 }
5973 else
5974 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
5975 return rc;
5976}
5977
5978/**
5979 * Internal: Update image comment.
5980 */
5981static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
5982{
5983 char *pszCommentEncoded = NULL;
5984 if (pszComment)
5985 {
5986 pszCommentEncoded = vmdkEncodeString(pszComment);
5987 if (!pszCommentEncoded)
5988 return VERR_NO_MEMORY;
5989 }
5990
5991 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
5992 "ddb.comment", pszCommentEncoded);
5993 if (pszCommentEncoded)
5994 RTStrFree(pszCommentEncoded);
5995 if (RT_FAILURE(rc))
5996 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
5997 return VINF_SUCCESS;
5998}
5999
6000/**
6001 * Internal. Clear the grain table buffer for real stream optimized writing.
6002 */
6003static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
6004{
6005 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
6006 for (uint32_t i = 0; i < cCacheLines; i++)
6007 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
6008 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
6009}
6010
6011/**
6012 * Internal. Flush the grain table buffer for real stream optimized writing.
6013 */
6014static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6015 uint32_t uGDEntry)
6016{
6017 int rc = VINF_SUCCESS;
6018 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
6019
6020 /* VMware does not write out completely empty grain tables in the case
6021 * of streamOptimized images, which according to my interpretation of
6022 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
6023 * handle it without problems do it the same way and save some bytes. */
6024 bool fAllZero = true;
6025 for (uint32_t i = 0; i < cCacheLines; i++)
6026 {
6027 /* Convert the grain table to little endian in place, as it will not
6028 * be used at all after this function has been called. */
6029 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
6030 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
6031 if (*pGTTmp)
6032 {
6033 fAllZero = false;
6034 break;
6035 }
6036 if (!fAllZero)
6037 break;
6038 }
6039 if (fAllZero)
6040 return VINF_SUCCESS;
6041
6042 uint64_t uFileOffset = pExtent->uAppendPosition;
6043 if (!uFileOffset)
6044 return VERR_INTERNAL_ERROR;
6045 /* Align to sector, as the previous write could have been any size. */
6046 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6047
6048 /* Grain table marker. */
6049 uint8_t aMarker[512];
6050 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
6051 memset(pMarker, '\0', sizeof(aMarker));
6052 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
6053 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
6054 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6055 aMarker, sizeof(aMarker));
6056 AssertRC(rc);
6057 uFileOffset += 512;
6058
6059 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
6060 return VERR_INTERNAL_ERROR;
6061
6062 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
6063
6064 for (uint32_t i = 0; i < cCacheLines; i++)
6065 {
6066 /* Convert the grain table to little endian in place, as it will not
6067 * be used at all after this function has been called. */
6068 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
6069 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
6070 *pGTTmp = RT_H2LE_U32(*pGTTmp);
6071
6072 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6073 &pImage->pGTCache->aGTCache[i].aGTData[0],
6074 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
6075 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
6076 if (RT_FAILURE(rc))
6077 break;
6078 }
6079 Assert(!(uFileOffset % 512));
6080 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
6081 return rc;
6082}
6083
6084/**
6085 * Internal. Free all allocated space for representing an image, and optionally
6086 * delete the image from disk.
6087 */
6088static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
6089{
6090 int rc = VINF_SUCCESS;
6091
6092 /* Freeing a never allocated image (e.g. because the open failed) is
6093 * not signalled as an error. After all nothing bad happens. */
6094 if (pImage)
6095 {
6096 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6097 {
6098 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6099 {
6100 /* Check if all extents are clean. */
6101 for (unsigned i = 0; i < pImage->cExtents; i++)
6102 {
6103 Assert(!pImage->pExtents[i].fUncleanShutdown);
6104 }
6105 }
6106 else
6107 {
6108 /* Mark all extents as clean. */
6109 for (unsigned i = 0; i < pImage->cExtents; i++)
6110 {
6111 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
6112 && pImage->pExtents[i].fUncleanShutdown)
6113 {
6114 pImage->pExtents[i].fUncleanShutdown = false;
6115 pImage->pExtents[i].fMetaDirty = true;
6116 }
6117
6118 /* From now on it's not safe to append any more data. */
6119 pImage->pExtents[i].uAppendPosition = 0;
6120 }
6121 }
6122 }
6123
6124 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6125 {
6126 /* No need to write any pending data if the file will be deleted
6127 * or if the new file wasn't successfully created. */
6128 if ( !fDelete && pImage->pExtents
6129 && pImage->pExtents[0].cGTEntries
6130 && pImage->pExtents[0].uAppendPosition)
6131 {
6132 PVMDKEXTENT pExtent = &pImage->pExtents[0];
6133 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
6134 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
6135 AssertRC(rc);
6136 vmdkStreamClearGT(pImage, pExtent);
6137 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
6138 {
6139 rc = vmdkStreamFlushGT(pImage, pExtent, i);
6140 AssertRC(rc);
6141 }
6142
6143 uint64_t uFileOffset = pExtent->uAppendPosition;
6144 if (!uFileOffset)
6145 return VERR_INTERNAL_ERROR;
6146 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6147
6148 /* From now on it's not safe to append any more data. */
6149 pExtent->uAppendPosition = 0;
6150
6151 /* Grain directory marker. */
6152 uint8_t aMarker[512];
6153 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
6154 memset(pMarker, '\0', sizeof(aMarker));
6155 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
6156 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
6157 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6158 aMarker, sizeof(aMarker));
6159 AssertRC(rc);
6160 uFileOffset += 512;
6161
6162 /* Write grain directory in little endian style. The array will
6163 * not be used after this, so convert in place. */
6164 uint32_t *pGDTmp = pExtent->pGD;
6165 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
6166 *pGDTmp = RT_H2LE_U32(*pGDTmp);
6167 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6168 uFileOffset, pExtent->pGD,
6169 pExtent->cGDEntries * sizeof(uint32_t));
6170 AssertRC(rc);
6171
6172 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
6173 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
6174 uFileOffset = RT_ALIGN_64( uFileOffset
6175 + pExtent->cGDEntries * sizeof(uint32_t),
6176 512);
6177
6178 /* Footer marker. */
6179 memset(pMarker, '\0', sizeof(aMarker));
6180 pMarker->uSector = VMDK_BYTE2SECTOR(512);
6181 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
6182 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6183 uFileOffset, aMarker, sizeof(aMarker));
6184 AssertRC(rc);
6185
6186 uFileOffset += 512;
6187 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
6188 AssertRC(rc);
6189
6190 uFileOffset += 512;
6191 /* End-of-stream marker. */
6192 memset(pMarker, '\0', sizeof(aMarker));
6193 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6194 uFileOffset, aMarker, sizeof(aMarker));
6195 AssertRC(rc);
6196 }
6197 }
6198 else if (!fDelete && fFlush)
6199 vmdkFlushImage(pImage, NULL);
6200
6201 if (pImage->pExtents != NULL)
6202 {
6203 for (unsigned i = 0 ; i < pImage->cExtents; i++)
6204 {
6205 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
6206 if (RT_SUCCESS(rc))
6207 rc = rc2; /* Propogate any error when closing the file. */
6208 }
6209 RTMemFree(pImage->pExtents);
6210 pImage->pExtents = NULL;
6211 }
6212 pImage->cExtents = 0;
6213 if (pImage->pFile != NULL)
6214 {
6215 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
6216 if (RT_SUCCESS(rc))
6217 rc = rc2; /* Propogate any error when closing the file. */
6218 }
6219 int rc2 = vmdkFileCheckAllClose(pImage);
6220 if (RT_SUCCESS(rc))
6221 rc = rc2; /* Propogate any error when closing the file. */
6222
6223 if (pImage->pGTCache)
6224 {
6225 RTMemFree(pImage->pGTCache);
6226 pImage->pGTCache = NULL;
6227 }
6228 if (pImage->pDescData)
6229 {
6230 RTMemFree(pImage->pDescData);
6231 pImage->pDescData = NULL;
6232 }
6233 }
6234
6235 LogFlowFunc(("returns %Rrc\n", rc));
6236 return rc;
6237}
6238
6239/**
6240 * Internal. Flush image data (and metadata) to disk.
6241 */
6242static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
6243{
6244 PVMDKEXTENT pExtent;
6245 int rc = VINF_SUCCESS;
6246
6247 /* Update descriptor if changed. */
6248 if (pImage->Descriptor.fDirty)
6249 rc = vmdkWriteDescriptor(pImage, pIoCtx);
6250
6251 if (RT_SUCCESS(rc))
6252 {
6253 for (unsigned i = 0; i < pImage->cExtents; i++)
6254 {
6255 pExtent = &pImage->pExtents[i];
6256 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
6257 {
6258 switch (pExtent->enmType)
6259 {
6260 case VMDKETYPE_HOSTED_SPARSE:
6261 if (!pExtent->fFooter)
6262 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
6263 else
6264 {
6265 uint64_t uFileOffset = pExtent->uAppendPosition;
6266 /* Simply skip writing anything if the streamOptimized
6267 * image hasn't been just created. */
6268 if (!uFileOffset)
6269 break;
6270 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6271 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
6272 uFileOffset, pIoCtx);
6273 }
6274 break;
6275 case VMDKETYPE_VMFS:
6276 case VMDKETYPE_FLAT:
6277 /* Nothing to do. */
6278 break;
6279 case VMDKETYPE_ZERO:
6280 default:
6281 AssertMsgFailed(("extent with type %d marked as dirty\n",
6282 pExtent->enmType));
6283 break;
6284 }
6285 }
6286
6287 if (RT_FAILURE(rc))
6288 break;
6289
6290 switch (pExtent->enmType)
6291 {
6292 case VMDKETYPE_HOSTED_SPARSE:
6293 case VMDKETYPE_VMFS:
6294 case VMDKETYPE_FLAT:
6295 /** @todo implement proper path absolute check. */
6296 if ( pExtent->pFile != NULL
6297 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6298 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
6299 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
6300 NULL, NULL);
6301 break;
6302 case VMDKETYPE_ZERO:
6303 /* No need to do anything for this extent. */
6304 break;
6305 default:
6306 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
6307 break;
6308 }
6309 }
6310 }
6311
6312 return rc;
6313}
6314
6315/**
6316 * Internal. Find extent corresponding to the sector number in the disk.
6317 */
6318static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
6319 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
6320{
6321 PVMDKEXTENT pExtent = NULL;
6322 int rc = VINF_SUCCESS;
6323
6324 for (unsigned i = 0; i < pImage->cExtents; i++)
6325 {
6326 if (offSector < pImage->pExtents[i].cNominalSectors)
6327 {
6328 pExtent = &pImage->pExtents[i];
6329 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
6330 break;
6331 }
6332 offSector -= pImage->pExtents[i].cNominalSectors;
6333 }
6334
6335 if (pExtent)
6336 *ppExtent = pExtent;
6337 else
6338 rc = VERR_IO_SECTOR_NOT_FOUND;
6339
6340 return rc;
6341}
6342
6343/**
6344 * Internal. Hash function for placing the grain table hash entries.
6345 */
6346static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
6347 unsigned uExtent)
6348{
6349 /** @todo this hash function is quite simple, maybe use a better one which
6350 * scrambles the bits better. */
6351 return (uSector + uExtent) % pCache->cEntries;
6352}
6353
6354/**
6355 * Internal. Get sector number in the extent file from the relative sector
6356 * number in the extent.
6357 */
6358static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
6359 PVMDKEXTENT pExtent, uint64_t uSector,
6360 uint64_t *puExtentSector)
6361{
6362 PVMDKGTCACHE pCache = pImage->pGTCache;
6363 uint64_t uGDIndex, uGTSector, uGTBlock;
6364 uint32_t uGTHash, uGTBlockIndex;
6365 PVMDKGTCACHEENTRY pGTCacheEntry;
6366 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
6367 int rc;
6368
6369 /* For newly created and readonly/sequentially opened streamOptimized
6370 * images this must be a no-op, as the grain directory is not there. */
6371 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6372 && pExtent->uAppendPosition)
6373 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6374 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
6375 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
6376 {
6377 *puExtentSector = 0;
6378 return VINF_SUCCESS;
6379 }
6380
6381 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6382 if (uGDIndex >= pExtent->cGDEntries)
6383 return VERR_OUT_OF_RANGE;
6384 uGTSector = pExtent->pGD[uGDIndex];
6385 if (!uGTSector)
6386 {
6387 /* There is no grain table referenced by this grain directory
6388 * entry. So there is absolutely no data in this area. */
6389 *puExtentSector = 0;
6390 return VINF_SUCCESS;
6391 }
6392
6393 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
6394 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
6395 pGTCacheEntry = &pCache->aGTCache[uGTHash];
6396 if ( pGTCacheEntry->uExtent != pExtent->uExtent
6397 || pGTCacheEntry->uGTBlock != uGTBlock)
6398 {
6399 /* Cache miss, fetch data from disk. */
6400 PVDMETAXFER pMetaXfer;
6401 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6402 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6403 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
6404 if (RT_FAILURE(rc))
6405 return rc;
6406 /* We can release the metadata transfer immediately. */
6407 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
6408 pGTCacheEntry->uExtent = pExtent->uExtent;
6409 pGTCacheEntry->uGTBlock = uGTBlock;
6410 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6411 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
6412 }
6413 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
6414 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
6415 if (uGrainSector)
6416 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
6417 else
6418 *puExtentSector = 0;
6419 return VINF_SUCCESS;
6420}
6421
6422/**
6423 * Internal. Writes the grain and also if necessary the grain tables.
6424 * Uses the grain table cache as a true grain table.
6425 */
6426static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6427 uint64_t uSector, PVDIOCTX pIoCtx,
6428 uint64_t cbWrite)
6429{
6430 uint32_t uGrain;
6431 uint32_t uGDEntry, uLastGDEntry;
6432 uint32_t cbGrain = 0;
6433 uint32_t uCacheLine, uCacheEntry;
6434 const void *pData;
6435 int rc;
6436
6437 /* Very strict requirements: always write at least one full grain, with
6438 * proper alignment. Everything else would require reading of already
6439 * written data, which we don't support for obvious reasons. The only
6440 * exception is the last grain, and only if the image size specifies
6441 * that only some portion holds data. In any case the write must be
6442 * within the image limits, no "overshoot" allowed. */
6443 if ( cbWrite == 0
6444 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
6445 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
6446 || uSector % pExtent->cSectorsPerGrain
6447 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
6448 return VERR_INVALID_PARAMETER;
6449
6450 /* Clip write range to at most the rest of the grain. */
6451 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
6452
6453 /* Do not allow to go back. */
6454 uGrain = uSector / pExtent->cSectorsPerGrain;
6455 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
6456 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
6457 uGDEntry = uGrain / pExtent->cGTEntries;
6458 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
6459 if (uGrain < pExtent->uLastGrainAccess)
6460 return VERR_VD_VMDK_INVALID_WRITE;
6461
6462 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
6463 * to allocate something, we also need to detect the situation ourself. */
6464 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
6465 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
6466 return VINF_SUCCESS;
6467
6468 if (uGDEntry != uLastGDEntry)
6469 {
6470 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
6471 if (RT_FAILURE(rc))
6472 return rc;
6473 vmdkStreamClearGT(pImage, pExtent);
6474 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
6475 {
6476 rc = vmdkStreamFlushGT(pImage, pExtent, i);
6477 if (RT_FAILURE(rc))
6478 return rc;
6479 }
6480 }
6481
6482 uint64_t uFileOffset;
6483 uFileOffset = pExtent->uAppendPosition;
6484 if (!uFileOffset)
6485 return VERR_INTERNAL_ERROR;
6486 /* Align to sector, as the previous write could have been any size. */
6487 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6488
6489 /* Paranoia check: extent type, grain table buffer presence and
6490 * grain table buffer space. Also grain table entry must be clear. */
6491 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
6492 || !pImage->pGTCache
6493 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
6494 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
6495 return VERR_INTERNAL_ERROR;
6496
6497 /* Update grain table entry. */
6498 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
6499
6500 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6501 {
6502 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
6503 memset((char *)pExtent->pvGrain + cbWrite, '\0',
6504 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
6505 pData = pExtent->pvGrain;
6506 }
6507 else
6508 {
6509 RTSGSEG Segment;
6510 unsigned cSegments = 1;
6511
6512 size_t cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6513 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
6514 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)); RT_NOREF(cbSeg);
6515 pData = Segment.pvSeg;
6516 }
6517 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
6518 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6519 uSector, &cbGrain);
6520 if (RT_FAILURE(rc))
6521 {
6522 pExtent->uGrainSectorAbs = 0;
6523 AssertRC(rc);
6524 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
6525 }
6526 pExtent->uLastGrainAccess = uGrain;
6527 pExtent->uAppendPosition += cbGrain;
6528
6529 return rc;
6530}
6531
6532/**
6533 * Internal: Updates the grain table during grain allocation.
6534 */
6535static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6536 PVMDKGRAINALLOCASYNC pGrainAlloc)
6537{
6538 int rc = VINF_SUCCESS;
6539 PVMDKGTCACHE pCache = pImage->pGTCache;
6540 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
6541 uint32_t uGTHash, uGTBlockIndex;
6542 uint64_t uGTSector, uRGTSector, uGTBlock;
6543 uint64_t uSector = pGrainAlloc->uSector;
6544 PVMDKGTCACHEENTRY pGTCacheEntry;
6545
6546 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
6547 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
6548
6549 uGTSector = pGrainAlloc->uGTSector;
6550 uRGTSector = pGrainAlloc->uRGTSector;
6551 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6552
6553 /* Update the grain table (and the cache). */
6554 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
6555 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
6556 pGTCacheEntry = &pCache->aGTCache[uGTHash];
6557 if ( pGTCacheEntry->uExtent != pExtent->uExtent
6558 || pGTCacheEntry->uGTBlock != uGTBlock)
6559 {
6560 /* Cache miss, fetch data from disk. */
6561 LogFlow(("Cache miss, fetch data from disk\n"));
6562 PVDMETAXFER pMetaXfer = NULL;
6563 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6564 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6565 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6566 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
6567 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6568 {
6569 pGrainAlloc->cIoXfersPending++;
6570 pGrainAlloc->fGTUpdateNeeded = true;
6571 /* Leave early, we will be called again after the read completed. */
6572 LogFlowFunc(("Metadata read in progress, leaving\n"));
6573 return rc;
6574 }
6575 else if (RT_FAILURE(rc))
6576 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
6577 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
6578 pGTCacheEntry->uExtent = pExtent->uExtent;
6579 pGTCacheEntry->uGTBlock = uGTBlock;
6580 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6581 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
6582 }
6583 else
6584 {
6585 /* Cache hit. Convert grain table block back to disk format, otherwise
6586 * the code below will write garbage for all but the updated entry. */
6587 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6588 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
6589 }
6590 pGrainAlloc->fGTUpdateNeeded = false;
6591 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
6592 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
6593 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
6594 /* Update grain table on disk. */
6595 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6596 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6597 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6598 vmdkAllocGrainComplete, pGrainAlloc);
6599 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6600 pGrainAlloc->cIoXfersPending++;
6601 else if (RT_FAILURE(rc))
6602 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
6603 if (pExtent->pRGD)
6604 {
6605 /* Update backup grain table on disk. */
6606 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6607 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6608 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6609 vmdkAllocGrainComplete, pGrainAlloc);
6610 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6611 pGrainAlloc->cIoXfersPending++;
6612 else if (RT_FAILURE(rc))
6613 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
6614 }
6615
6616 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6617 return rc;
6618}
6619
6620/**
6621 * Internal - complete the grain allocation by updating disk grain table if required.
6622 */
6623static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
6624{
6625 RT_NOREF1(rcReq);
6626 int rc = VINF_SUCCESS;
6627 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6628 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
6629
6630 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
6631 pBackendData, pIoCtx, pvUser, rcReq));
6632
6633 pGrainAlloc->cIoXfersPending--;
6634 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
6635 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
6636
6637 if (!pGrainAlloc->cIoXfersPending)
6638 {
6639 /* Grain allocation completed. */
6640 RTMemFree(pGrainAlloc);
6641 }
6642
6643 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
6644 return rc;
6645}
6646
6647/**
6648 * Internal. Allocates a new grain table (if necessary).
6649 */
6650static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6651 uint64_t uSector, uint64_t cbWrite)
6652{
6653 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
6654 uint64_t uGDIndex, uGTSector, uRGTSector;
6655 uint64_t uFileOffset;
6656 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
6657 int rc;
6658
6659 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
6660 pCache, pExtent, pIoCtx, uSector, cbWrite));
6661
6662 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
6663 if (!pGrainAlloc)
6664 return VERR_NO_MEMORY;
6665
6666 pGrainAlloc->pExtent = pExtent;
6667 pGrainAlloc->uSector = uSector;
6668
6669 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6670 if (uGDIndex >= pExtent->cGDEntries)
6671 {
6672 RTMemFree(pGrainAlloc);
6673 return VERR_OUT_OF_RANGE;
6674 }
6675 uGTSector = pExtent->pGD[uGDIndex];
6676 if (pExtent->pRGD)
6677 uRGTSector = pExtent->pRGD[uGDIndex];
6678 else
6679 uRGTSector = 0; /**< avoid compiler warning */
6680 if (!uGTSector)
6681 {
6682 LogFlow(("Allocating new grain table\n"));
6683
6684 /* There is no grain table referenced by this grain directory
6685 * entry. So there is absolutely no data in this area. Allocate
6686 * a new grain table and put the reference to it in the GDs. */
6687 uFileOffset = pExtent->uAppendPosition;
6688 if (!uFileOffset)
6689 {
6690 RTMemFree(pGrainAlloc);
6691 return VERR_INTERNAL_ERROR;
6692 }
6693 Assert(!(uFileOffset % 512));
6694
6695 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6696 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6697
6698 /* Normally the grain table is preallocated for hosted sparse extents
6699 * that support more than 32 bit sector numbers. So this shouldn't
6700 * ever happen on a valid extent. */
6701 if (uGTSector > UINT32_MAX)
6702 {
6703 RTMemFree(pGrainAlloc);
6704 return VERR_VD_VMDK_INVALID_HEADER;
6705 }
6706
6707 /* Write grain table by writing the required number of grain table
6708 * cache chunks. Allocate memory dynamically here or we flood the
6709 * metadata cache with very small entries. */
6710 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
6711 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
6712
6713 if (!paGTDataTmp)
6714 {
6715 RTMemFree(pGrainAlloc);
6716 return VERR_NO_MEMORY;
6717 }
6718
6719 memset(paGTDataTmp, '\0', cbGTDataTmp);
6720 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6721 VMDK_SECTOR2BYTE(uGTSector),
6722 paGTDataTmp, cbGTDataTmp, pIoCtx,
6723 vmdkAllocGrainComplete, pGrainAlloc);
6724 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6725 pGrainAlloc->cIoXfersPending++;
6726 else if (RT_FAILURE(rc))
6727 {
6728 RTMemTmpFree(paGTDataTmp);
6729 RTMemFree(pGrainAlloc);
6730 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
6731 }
6732 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
6733 + cbGTDataTmp, 512);
6734
6735 if (pExtent->pRGD)
6736 {
6737 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
6738 uFileOffset = pExtent->uAppendPosition;
6739 if (!uFileOffset)
6740 return VERR_INTERNAL_ERROR;
6741 Assert(!(uFileOffset % 512));
6742 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6743
6744 /* Normally the redundant grain table is preallocated for hosted
6745 * sparse extents that support more than 32 bit sector numbers. So
6746 * this shouldn't ever happen on a valid extent. */
6747 if (uRGTSector > UINT32_MAX)
6748 {
6749 RTMemTmpFree(paGTDataTmp);
6750 return VERR_VD_VMDK_INVALID_HEADER;
6751 }
6752
6753 /* Write grain table by writing the required number of grain table
6754 * cache chunks. Allocate memory dynamically here or we flood the
6755 * metadata cache with very small entries. */
6756 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6757 VMDK_SECTOR2BYTE(uRGTSector),
6758 paGTDataTmp, cbGTDataTmp, pIoCtx,
6759 vmdkAllocGrainComplete, pGrainAlloc);
6760 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6761 pGrainAlloc->cIoXfersPending++;
6762 else if (RT_FAILURE(rc))
6763 {
6764 RTMemTmpFree(paGTDataTmp);
6765 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
6766 }
6767
6768 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
6769 }
6770
6771 RTMemTmpFree(paGTDataTmp);
6772
6773 /* Update the grain directory on disk (doing it before writing the
6774 * grain table will result in a garbled extent if the operation is
6775 * aborted for some reason. Otherwise the worst that can happen is
6776 * some unused sectors in the extent. */
6777 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
6778 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6779 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
6780 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
6781 vmdkAllocGrainComplete, pGrainAlloc);
6782 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6783 pGrainAlloc->cIoXfersPending++;
6784 else if (RT_FAILURE(rc))
6785 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
6786 if (pExtent->pRGD)
6787 {
6788 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
6789 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6790 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
6791 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
6792 vmdkAllocGrainComplete, pGrainAlloc);
6793 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6794 pGrainAlloc->cIoXfersPending++;
6795 else if (RT_FAILURE(rc))
6796 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
6797 }
6798
6799 /* As the final step update the in-memory copy of the GDs. */
6800 pExtent->pGD[uGDIndex] = uGTSector;
6801 if (pExtent->pRGD)
6802 pExtent->pRGD[uGDIndex] = uRGTSector;
6803 }
6804
6805 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6806 pGrainAlloc->uGTSector = uGTSector;
6807 pGrainAlloc->uRGTSector = uRGTSector;
6808
6809 uFileOffset = pExtent->uAppendPosition;
6810 if (!uFileOffset)
6811 return VERR_INTERNAL_ERROR;
6812 Assert(!(uFileOffset % 512));
6813
6814 pGrainAlloc->uGrainOffset = uFileOffset;
6815
6816 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6817 {
6818 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6819 ("Accesses to stream optimized images must be synchronous\n"),
6820 VERR_INVALID_STATE);
6821
6822 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6823 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
6824
6825 /* Invalidate cache, just in case some code incorrectly allows mixing
6826 * of reads and writes. Normally shouldn't be needed. */
6827 pExtent->uGrainSectorAbs = 0;
6828
6829 /* Write compressed data block and the markers. */
6830 uint32_t cbGrain = 0;
6831 RTSGSEG Segment;
6832 unsigned cSegments = 1;
6833
6834 size_t cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6835 &cSegments, cbWrite);
6836 Assert(cbSeg == cbWrite); RT_NOREF(cbSeg);
6837
6838 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
6839 Segment.pvSeg, cbWrite, uSector, &cbGrain);
6840 if (RT_FAILURE(rc))
6841 {
6842 AssertRC(rc);
6843 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
6844 }
6845 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
6846 pExtent->uAppendPosition += cbGrain;
6847 }
6848 else
6849 {
6850 /* Write the data. Always a full grain, or we're in big trouble. */
6851 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6852 uFileOffset, pIoCtx, cbWrite,
6853 vmdkAllocGrainComplete, pGrainAlloc);
6854 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6855 pGrainAlloc->cIoXfersPending++;
6856 else if (RT_FAILURE(rc))
6857 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
6858
6859 pExtent->uAppendPosition += cbWrite;
6860 }
6861
6862 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
6863
6864 if (!pGrainAlloc->cIoXfersPending)
6865 {
6866 /* Grain allocation completed. */
6867 RTMemFree(pGrainAlloc);
6868 }
6869
6870 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6871
6872 return rc;
6873}
6874
6875/**
6876 * Internal. Reads the contents by sequentially going over the compressed
6877 * grains (hoping that they are in sequence).
6878 */
6879static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6880 uint64_t uSector, PVDIOCTX pIoCtx,
6881 uint64_t cbRead)
6882{
6883 int rc;
6884
6885 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
6886 pImage, pExtent, uSector, pIoCtx, cbRead));
6887
6888 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6889 ("Async I/O not supported for sequential stream optimized images\n"),
6890 VERR_INVALID_STATE);
6891
6892 /* Do not allow to go back. */
6893 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
6894 if (uGrain < pExtent->uLastGrainAccess)
6895 return VERR_VD_VMDK_INVALID_STATE;
6896 pExtent->uLastGrainAccess = uGrain;
6897
6898 /* After a previous error do not attempt to recover, as it would need
6899 * seeking (in the general case backwards which is forbidden). */
6900 if (!pExtent->uGrainSectorAbs)
6901 return VERR_VD_VMDK_INVALID_STATE;
6902
6903 /* Check if we need to read something from the image or if what we have
6904 * in the buffer is good to fulfill the request. */
6905 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
6906 {
6907 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
6908 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
6909
6910 /* Get the marker from the next data block - and skip everything which
6911 * is not a compressed grain. If it's a compressed grain which is for
6912 * the requested sector (or after), read it. */
6913 VMDKMARKER Marker;
6914 do
6915 {
6916 RT_ZERO(Marker);
6917 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6918 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6919 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
6920 if (RT_FAILURE(rc))
6921 return rc;
6922 Marker.uSector = RT_LE2H_U64(Marker.uSector);
6923 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
6924
6925 if (Marker.cbSize == 0)
6926 {
6927 /* A marker for something else than a compressed grain. */
6928 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6929 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6930 + RT_UOFFSETOF(VMDKMARKER, uType),
6931 &Marker.uType, sizeof(Marker.uType));
6932 if (RT_FAILURE(rc))
6933 return rc;
6934 Marker.uType = RT_LE2H_U32(Marker.uType);
6935 switch (Marker.uType)
6936 {
6937 case VMDK_MARKER_EOS:
6938 uGrainSectorAbs++;
6939 /* Read (or mostly skip) to the end of file. Uses the
6940 * Marker (LBA sector) as it is unused anyway. This
6941 * makes sure that really everything is read in the
6942 * success case. If this read fails it means the image
6943 * is truncated, but this is harmless so ignore. */
6944 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6945 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6946 + 511,
6947 &Marker.uSector, 1);
6948 break;
6949 case VMDK_MARKER_GT:
6950 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
6951 break;
6952 case VMDK_MARKER_GD:
6953 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
6954 break;
6955 case VMDK_MARKER_FOOTER:
6956 uGrainSectorAbs += 2;
6957 break;
6958 case VMDK_MARKER_UNSPECIFIED:
6959 /* Skip over the contents of the unspecified marker
6960 * type 4 which exists in some vSphere created files. */
6961 /** @todo figure out what the payload means. */
6962 uGrainSectorAbs += 1;
6963 break;
6964 default:
6965 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
6966 pExtent->uGrainSectorAbs = 0;
6967 return VERR_VD_VMDK_INVALID_STATE;
6968 }
6969 pExtent->cbGrainStreamRead = 0;
6970 }
6971 else
6972 {
6973 /* A compressed grain marker. If it is at/after what we're
6974 * interested in read and decompress data. */
6975 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
6976 {
6977 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
6978 continue;
6979 }
6980 uint64_t uLBA = 0;
6981 uint32_t cbGrainStreamRead = 0;
6982 rc = vmdkFileInflateSync(pImage, pExtent,
6983 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6984 pExtent->pvGrain,
6985 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6986 &Marker, &uLBA, &cbGrainStreamRead);
6987 if (RT_FAILURE(rc))
6988 {
6989 pExtent->uGrainSectorAbs = 0;
6990 return rc;
6991 }
6992 if ( pExtent->uGrain
6993 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
6994 {
6995 pExtent->uGrainSectorAbs = 0;
6996 return VERR_VD_VMDK_INVALID_STATE;
6997 }
6998 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
6999 pExtent->cbGrainStreamRead = cbGrainStreamRead;
7000 break;
7001 }
7002 } while (Marker.uType != VMDK_MARKER_EOS);
7003
7004 pExtent->uGrainSectorAbs = uGrainSectorAbs;
7005
7006 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
7007 {
7008 pExtent->uGrain = UINT32_MAX;
7009 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
7010 * the next read would try to get more data, and we're at EOF. */
7011 pExtent->cbGrainStreamRead = 1;
7012 }
7013 }
7014
7015 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
7016 {
7017 /* The next data block we have is not for this area, so just return
7018 * that there is no data. */
7019 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
7020 return VERR_VD_BLOCK_FREE;
7021 }
7022
7023 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
7024 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
7025 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
7026 cbRead);
7027 LogFlowFunc(("returns VINF_SUCCESS\n"));
7028 return VINF_SUCCESS;
7029}
7030
7031/**
7032 * Replaces a fragment of a string with the specified string.
7033 *
7034 * @returns Pointer to the allocated UTF-8 string.
7035 * @param pszWhere UTF-8 string to search in.
7036 * @param pszWhat UTF-8 string to search for.
7037 * @param pszByWhat UTF-8 string to replace the found string with.
7038 *
7039 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
7040 * for updating the base name in the descriptor, the second is for
7041 * generating new filenames for extents. This code borked when
7042 * RTPathAbs started correcting the driver letter case on windows,
7043 * when strstr failed because the pExtent->pszFullname was not
7044 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
7045 * this by apply RTPathAbs to the places it wasn't applied.
7046 *
7047 * However, this highlights some undocumented ASSUMPTIONS as well as
7048 * terrible short commings of the approach.
7049 *
7050 * Given the right filename, it may also screw up the descriptor. Take
7051 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
7052 * we'll be asked to replace "Test0" with something, no problem. No,
7053 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
7054 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
7055 * its bum. The descriptor string must be parsed and reconstructed,
7056 * the lazy strstr approach doesn't cut it.
7057 *
7058 * I'm also curious as to what would be the correct escaping of '"' in
7059 * the file name and how that is supposed to be handled, because it
7060 * needs to be or such names must be rejected in several places (maybe
7061 * they are, I didn't check).
7062 *
7063 * When this function is used to replace the start of a path, I think
7064 * the assumption from the prep/setup code is that we kind of knows
7065 * what we're working on (I could be wrong). However, using strstr
7066 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
7067 * Especially on unix systems, weird stuff could happen if someone
7068 * unwittingly tinkers with the prep/setup code. What should really be
7069 * done here is using a new RTPathStartEx function that (via flags)
7070 * allows matching partial final component and returns the length of
7071 * what it matched up (in case it skipped slashes and '.' components).
7072 *
7073 */
7074static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
7075 const char *pszByWhat)
7076{
7077 AssertPtr(pszWhere);
7078 AssertPtr(pszWhat);
7079 AssertPtr(pszByWhat);
7080 const char *pszFoundStr = strstr(pszWhere, pszWhat);
7081 if (!pszFoundStr)
7082 {
7083 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
7084 return NULL;
7085 }
7086 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
7087 char *pszNewStr = RTStrAlloc(cbFinal);
7088 if (pszNewStr)
7089 {
7090 char *pszTmp = pszNewStr;
7091 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
7092 pszTmp += pszFoundStr - pszWhere;
7093 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
7094 pszTmp += strlen(pszByWhat);
7095 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
7096 }
7097 return pszNewStr;
7098}
7099
7100
7101/** @copydoc VDIMAGEBACKEND::pfnProbe */
7102static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
7103 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
7104{
7105 RT_NOREF(enmDesiredType);
7106 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
7107 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
7108 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7109 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7110
7111 int rc = VINF_SUCCESS;
7112 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7113 if (RT_LIKELY(pImage))
7114 {
7115 pImage->pszFilename = pszFilename;
7116 pImage->pFile = NULL;
7117 pImage->pExtents = NULL;
7118 pImage->pFiles = NULL;
7119 pImage->pGTCache = NULL;
7120 pImage->pDescData = NULL;
7121 pImage->pVDIfsDisk = pVDIfsDisk;
7122 pImage->pVDIfsImage = pVDIfsImage;
7123 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
7124 * much as possible in vmdkOpenImage. */
7125 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
7126 vmdkFreeImage(pImage, false, false /*fFlush*/);
7127 RTMemFree(pImage);
7128
7129 if (RT_SUCCESS(rc))
7130 *penmType = VDTYPE_HDD;
7131 }
7132 else
7133 rc = VERR_NO_MEMORY;
7134
7135 LogFlowFunc(("returns %Rrc\n", rc));
7136 return rc;
7137}
7138
7139/** @copydoc VDIMAGEBACKEND::pfnOpen */
7140static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
7141 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7142 VDTYPE enmType, void **ppBackendData)
7143{
7144 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
7145
7146 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
7147 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
7148 int rc;
7149
7150 /* Check open flags. All valid flags are supported. */
7151 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
7152 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7153 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7154
7155
7156 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7157 if (RT_LIKELY(pImage))
7158 {
7159 pImage->pszFilename = pszFilename;
7160 pImage->pFile = NULL;
7161 pImage->pExtents = NULL;
7162 pImage->pFiles = NULL;
7163 pImage->pGTCache = NULL;
7164 pImage->pDescData = NULL;
7165 pImage->pVDIfsDisk = pVDIfsDisk;
7166 pImage->pVDIfsImage = pVDIfsImage;
7167
7168 rc = vmdkOpenImage(pImage, uOpenFlags);
7169 if (RT_SUCCESS(rc))
7170 *ppBackendData = pImage;
7171 else
7172 RTMemFree(pImage);
7173 }
7174 else
7175 rc = VERR_NO_MEMORY;
7176
7177 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
7178 return rc;
7179}
7180
7181/** @copydoc VDIMAGEBACKEND::pfnCreate */
7182static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
7183 unsigned uImageFlags, const char *pszComment,
7184 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
7185 PCRTUUID pUuid, unsigned uOpenFlags,
7186 unsigned uPercentStart, unsigned uPercentSpan,
7187 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7188 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
7189 void **ppBackendData)
7190{
7191 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
7192 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
7193 int rc;
7194
7195 /* Check the VD container type and image flags. */
7196 if ( enmType != VDTYPE_HDD
7197 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
7198 return VERR_VD_INVALID_TYPE;
7199
7200 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
7201 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
7202 && ( !cbSize
7203 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)))
7204 return VERR_VD_INVALID_SIZE;
7205
7206 /* Check image flags for invalid combinations. */
7207 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7208 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
7209 return VERR_INVALID_PARAMETER;
7210
7211 /* Check open flags. All valid flags are supported. */
7212 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
7213 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7214 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7215 AssertPtrReturn(pPCHSGeometry, VERR_INVALID_POINTER);
7216 AssertPtrReturn(pLCHSGeometry, VERR_INVALID_POINTER);
7217 AssertReturn(!( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
7218 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
7219 VERR_INVALID_PARAMETER);
7220
7221 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7222 if (RT_LIKELY(pImage))
7223 {
7224 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7225
7226 pImage->pszFilename = pszFilename;
7227 pImage->pFile = NULL;
7228 pImage->pExtents = NULL;
7229 pImage->pFiles = NULL;
7230 pImage->pGTCache = NULL;
7231 pImage->pDescData = NULL;
7232 pImage->pVDIfsDisk = pVDIfsDisk;
7233 pImage->pVDIfsImage = pVDIfsImage;
7234 /* Descriptors for split images can be pretty large, especially if the
7235 * filename is long. So prepare for the worst, and allocate quite some
7236 * memory for the descriptor in this case. */
7237 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
7238 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
7239 else
7240 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
7241 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
7242 if (RT_LIKELY(pImage->pDescData))
7243 {
7244 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
7245 pPCHSGeometry, pLCHSGeometry, pUuid,
7246 pIfProgress, uPercentStart, uPercentSpan);
7247 if (RT_SUCCESS(rc))
7248 {
7249 /* So far the image is opened in read/write mode. Make sure the
7250 * image is opened in read-only mode if the caller requested that. */
7251 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7252 {
7253 vmdkFreeImage(pImage, false, true /*fFlush*/);
7254 rc = vmdkOpenImage(pImage, uOpenFlags);
7255 }
7256
7257 if (RT_SUCCESS(rc))
7258 *ppBackendData = pImage;
7259 }
7260
7261 if (RT_FAILURE(rc))
7262 RTMemFree(pImage->pDescData);
7263 }
7264 else
7265 rc = VERR_NO_MEMORY;
7266
7267 if (RT_FAILURE(rc))
7268 RTMemFree(pImage);
7269 }
7270 else
7271 rc = VERR_NO_MEMORY;
7272
7273 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
7274 return rc;
7275}
7276
7277/**
7278 * Prepares the state for renaming a VMDK image, setting up the state and allocating
7279 * memory.
7280 *
7281 * @returns VBox status code.
7282 * @param pImage VMDK image instance.
7283 * @param pRenameState The state to initialize.
7284 * @param pszFilename The new filename.
7285 */
7286static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
7287{
7288 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
7289
7290 int rc = VINF_SUCCESS;
7291
7292 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
7293
7294 /*
7295 * Allocate an array to store both old and new names of renamed files
7296 * in case we have to roll back the changes. Arrays are initialized
7297 * with zeros. We actually save stuff when and if we change it.
7298 */
7299 pRenameState->cExtents = pImage->cExtents;
7300 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
7301 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
7302 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
7303 if ( pRenameState->apszOldName
7304 && pRenameState->apszNewName
7305 && pRenameState->apszNewLines)
7306 {
7307 /* Save the descriptor size and position. */
7308 if (pImage->pDescData)
7309 {
7310 /* Separate descriptor file. */
7311 pRenameState->fEmbeddedDesc = false;
7312 }
7313 else
7314 {
7315 /* Embedded descriptor file. */
7316 pRenameState->ExtentCopy = pImage->pExtents[0];
7317 pRenameState->fEmbeddedDesc = true;
7318 }
7319
7320 /* Save the descriptor content. */
7321 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
7322 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
7323 {
7324 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
7325 if (!pRenameState->DescriptorCopy.aLines[i])
7326 {
7327 rc = VERR_NO_MEMORY;
7328 break;
7329 }
7330 }
7331
7332 if (RT_SUCCESS(rc))
7333 {
7334 /* Prepare both old and new base names used for string replacement. */
7335 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
7336 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
7337 RTPathStripSuffix(pRenameState->pszNewBaseName);
7338
7339 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
7340 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
7341 RTPathStripSuffix(pRenameState->pszOldBaseName);
7342
7343 /* Prepare both old and new full names used for string replacement.
7344 Note! Must abspath the stuff here, so the strstr weirdness later in
7345 the renaming process get a match against abspath'ed extent paths.
7346 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
7347 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
7348 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
7349 RTPathStripSuffix(pRenameState->pszNewFullName);
7350
7351 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
7352 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
7353 RTPathStripSuffix(pRenameState->pszOldFullName);
7354
7355 /* Save the old name for easy access to the old descriptor file. */
7356 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
7357 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
7358
7359 /* Save old image name. */
7360 pRenameState->pszOldImageName = pImage->pszFilename;
7361 }
7362 }
7363 else
7364 rc = VERR_NO_TMP_MEMORY;
7365
7366 return rc;
7367}
7368
7369/**
7370 * Destroys the given rename state, freeing all allocated memory.
7371 *
7372 * @param pRenameState The rename state to destroy.
7373 */
7374static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
7375{
7376 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
7377 if (pRenameState->DescriptorCopy.aLines[i])
7378 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
7379 if (pRenameState->apszOldName)
7380 {
7381 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7382 if (pRenameState->apszOldName[i])
7383 RTStrFree(pRenameState->apszOldName[i]);
7384 RTMemTmpFree(pRenameState->apszOldName);
7385 }
7386 if (pRenameState->apszNewName)
7387 {
7388 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7389 if (pRenameState->apszNewName[i])
7390 RTStrFree(pRenameState->apszNewName[i]);
7391 RTMemTmpFree(pRenameState->apszNewName);
7392 }
7393 if (pRenameState->apszNewLines)
7394 {
7395 for (unsigned i = 0; i < pRenameState->cExtents; i++)
7396 if (pRenameState->apszNewLines[i])
7397 RTStrFree(pRenameState->apszNewLines[i]);
7398 RTMemTmpFree(pRenameState->apszNewLines);
7399 }
7400 if (pRenameState->pszOldDescName)
7401 RTStrFree(pRenameState->pszOldDescName);
7402 if (pRenameState->pszOldBaseName)
7403 RTStrFree(pRenameState->pszOldBaseName);
7404 if (pRenameState->pszNewBaseName)
7405 RTStrFree(pRenameState->pszNewBaseName);
7406 if (pRenameState->pszOldFullName)
7407 RTStrFree(pRenameState->pszOldFullName);
7408 if (pRenameState->pszNewFullName)
7409 RTStrFree(pRenameState->pszNewFullName);
7410}
7411
7412/**
7413 * Rolls back the rename operation to the original state.
7414 *
7415 * @returns VBox status code.
7416 * @param pImage VMDK image instance.
7417 * @param pRenameState The rename state.
7418 */
7419static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
7420{
7421 int rc = VINF_SUCCESS;
7422
7423 if (!pRenameState->fImageFreed)
7424 {
7425 /*
7426 * Some extents may have been closed, close the rest. We will
7427 * re-open the whole thing later.
7428 */
7429 vmdkFreeImage(pImage, false, true /*fFlush*/);
7430 }
7431
7432 /* Rename files back. */
7433 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7434 {
7435 if (pRenameState->apszOldName[i])
7436 {
7437 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
7438 AssertRC(rc);
7439 }
7440 }
7441 /* Restore the old descriptor. */
7442 PVMDKFILE pFile;
7443 rc = vmdkFileOpen(pImage, &pFile, NULL, pRenameState->pszOldDescName,
7444 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
7445 false /* fCreate */));
7446 AssertRC(rc);
7447 if (pRenameState->fEmbeddedDesc)
7448 {
7449 pRenameState->ExtentCopy.pFile = pFile;
7450 pImage->pExtents = &pRenameState->ExtentCopy;
7451 }
7452 else
7453 {
7454 /* Shouldn't be null for separate descriptor.
7455 * There will be no access to the actual content.
7456 */
7457 pImage->pDescData = pRenameState->pszOldDescName;
7458 pImage->pFile = pFile;
7459 }
7460 pImage->Descriptor = pRenameState->DescriptorCopy;
7461 vmdkWriteDescriptor(pImage, NULL);
7462 vmdkFileClose(pImage, &pFile, false);
7463 /* Get rid of the stuff we implanted. */
7464 pImage->pExtents = NULL;
7465 pImage->pFile = NULL;
7466 pImage->pDescData = NULL;
7467 /* Re-open the image back. */
7468 pImage->pszFilename = pRenameState->pszOldImageName;
7469 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
7470
7471 return rc;
7472}
7473
7474/**
7475 * Rename worker doing the real work.
7476 *
7477 * @returns VBox status code.
7478 * @param pImage VMDK image instance.
7479 * @param pRenameState The rename state.
7480 * @param pszFilename The new filename.
7481 */
7482static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
7483{
7484 int rc = VINF_SUCCESS;
7485 unsigned i, line;
7486
7487 /* Update the descriptor with modified extent names. */
7488 for (i = 0, line = pImage->Descriptor.uFirstExtent;
7489 i < pRenameState->cExtents;
7490 i++, line = pImage->Descriptor.aNextLines[line])
7491 {
7492 /* Update the descriptor. */
7493 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
7494 pRenameState->pszOldBaseName,
7495 pRenameState->pszNewBaseName);
7496 if (!pRenameState->apszNewLines[i])
7497 {
7498 rc = VERR_NO_MEMORY;
7499 break;
7500 }
7501 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
7502 }
7503
7504 if (RT_SUCCESS(rc))
7505 {
7506 /* Make sure the descriptor gets written back. */
7507 pImage->Descriptor.fDirty = true;
7508 /* Flush the descriptor now, in case it is embedded. */
7509 vmdkFlushImage(pImage, NULL);
7510
7511 /* Close and rename/move extents. */
7512 for (i = 0; i < pRenameState->cExtents; i++)
7513 {
7514 PVMDKEXTENT pExtent = &pImage->pExtents[i];
7515 /* Compose new name for the extent. */
7516 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
7517 pRenameState->pszOldFullName,
7518 pRenameState->pszNewFullName);
7519 if (!pRenameState->apszNewName[i])
7520 {
7521 rc = VERR_NO_MEMORY;
7522 break;
7523 }
7524 /* Close the extent file. */
7525 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
7526 if (RT_FAILURE(rc))
7527 break;;
7528
7529 /* Rename the extent file. */
7530 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
7531 if (RT_FAILURE(rc))
7532 break;
7533 /* Remember the old name. */
7534 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
7535 }
7536
7537 if (RT_SUCCESS(rc))
7538 {
7539 /* Release all old stuff. */
7540 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
7541 if (RT_SUCCESS(rc))
7542 {
7543 pRenameState->fImageFreed = true;
7544
7545 /* Last elements of new/old name arrays are intended for
7546 * storing descriptor's names.
7547 */
7548 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
7549 /* Rename the descriptor file if it's separate. */
7550 if (!pRenameState->fEmbeddedDesc)
7551 {
7552 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
7553 if (RT_SUCCESS(rc))
7554 {
7555 /* Save old name only if we may need to change it back. */
7556 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
7557 }
7558 }
7559
7560 /* Update pImage with the new information. */
7561 pImage->pszFilename = pszFilename;
7562
7563 /* Open the new image. */
7564 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
7565 }
7566 }
7567 }
7568
7569 return rc;
7570}
7571
7572/** @copydoc VDIMAGEBACKEND::pfnRename */
7573static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
7574{
7575 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
7576
7577 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7578 VMDKRENAMESTATE RenameState;
7579
7580 memset(&RenameState, 0, sizeof(RenameState));
7581
7582 /* Check arguments. */
7583 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
7584 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7585 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7586 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK), VERR_INVALID_PARAMETER);
7587
7588 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
7589 if (RT_SUCCESS(rc))
7590 {
7591 /* --- Up to this point we have not done any damage yet. --- */
7592
7593 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
7594 /* Roll back all changes in case of failure. */
7595 if (RT_FAILURE(rc))
7596 {
7597 int rrc = vmdkRenameRollback(pImage, &RenameState);
7598 AssertRC(rrc);
7599 }
7600 }
7601
7602 vmdkRenameStateDestroy(&RenameState);
7603 LogFlowFunc(("returns %Rrc\n", rc));
7604 return rc;
7605}
7606
7607/** @copydoc VDIMAGEBACKEND::pfnClose */
7608static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
7609{
7610 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
7611 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7612
7613 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
7614 RTMemFree(pImage);
7615
7616 LogFlowFunc(("returns %Rrc\n", rc));
7617 return rc;
7618}
7619
7620/** @copydoc VDIMAGEBACKEND::pfnRead */
7621static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
7622 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
7623{
7624 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
7625 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
7626 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7627
7628 AssertPtr(pImage);
7629 Assert(uOffset % 512 == 0);
7630 Assert(cbToRead % 512 == 0);
7631 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
7632 AssertReturn(cbToRead, VERR_INVALID_PARAMETER);
7633 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
7634
7635 /* Find the extent and check access permissions as defined in the extent descriptor. */
7636 PVMDKEXTENT pExtent;
7637 uint64_t uSectorExtentRel;
7638 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7639 &pExtent, &uSectorExtentRel);
7640 if ( RT_SUCCESS(rc)
7641 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
7642 {
7643 /* Clip read range to remain in this extent. */
7644 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7645
7646 /* Handle the read according to the current extent type. */
7647 switch (pExtent->enmType)
7648 {
7649 case VMDKETYPE_HOSTED_SPARSE:
7650 {
7651 uint64_t uSectorExtentAbs;
7652
7653 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7654 if (RT_FAILURE(rc))
7655 break;
7656 /* Clip read range to at most the rest of the grain. */
7657 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
7658 Assert(!(cbToRead % 512));
7659 if (uSectorExtentAbs == 0)
7660 {
7661 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7662 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7663 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
7664 rc = VERR_VD_BLOCK_FREE;
7665 else
7666 rc = vmdkStreamReadSequential(pImage, pExtent,
7667 uSectorExtentRel,
7668 pIoCtx, cbToRead);
7669 }
7670 else
7671 {
7672 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7673 {
7674 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
7675 ("Async I/O is not supported for stream optimized VMDK's\n"));
7676
7677 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
7678 uSectorExtentAbs -= uSectorInGrain;
7679 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
7680 {
7681 uint64_t uLBA = 0; /* gcc maybe uninitialized */
7682 rc = vmdkFileInflateSync(pImage, pExtent,
7683 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7684 pExtent->pvGrain,
7685 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
7686 NULL, &uLBA, NULL);
7687 if (RT_FAILURE(rc))
7688 {
7689 pExtent->uGrainSectorAbs = 0;
7690 break;
7691 }
7692 pExtent->uGrainSectorAbs = uSectorExtentAbs;
7693 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
7694 Assert(uLBA == uSectorExtentRel);
7695 }
7696 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
7697 (uint8_t *)pExtent->pvGrain
7698 + VMDK_SECTOR2BYTE(uSectorInGrain),
7699 cbToRead);
7700 }
7701 else
7702 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7703 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7704 pIoCtx, cbToRead);
7705 }
7706 break;
7707 }
7708 case VMDKETYPE_VMFS:
7709 case VMDKETYPE_FLAT:
7710 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7711 VMDK_SECTOR2BYTE(uSectorExtentRel),
7712 pIoCtx, cbToRead);
7713 break;
7714 case VMDKETYPE_ZERO:
7715 {
7716 size_t cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
7717 Assert(cbSet == cbToRead); RT_NOREF(cbSet);
7718 break;
7719 }
7720 }
7721 if (pcbActuallyRead)
7722 *pcbActuallyRead = cbToRead;
7723 }
7724 else if (RT_SUCCESS(rc))
7725 rc = VERR_VD_VMDK_INVALID_STATE;
7726
7727 LogFlowFunc(("returns %Rrc\n", rc));
7728 return rc;
7729}
7730
7731/** @copydoc VDIMAGEBACKEND::pfnWrite */
7732static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
7733 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
7734 size_t *pcbPostRead, unsigned fWrite)
7735{
7736 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
7737 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
7738 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7739 int rc;
7740
7741 AssertPtr(pImage);
7742 Assert(uOffset % 512 == 0);
7743 Assert(cbToWrite % 512 == 0);
7744 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
7745 AssertReturn(cbToWrite, VERR_INVALID_PARAMETER);
7746
7747 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7748 {
7749 PVMDKEXTENT pExtent;
7750 uint64_t uSectorExtentRel;
7751 uint64_t uSectorExtentAbs;
7752
7753 /* No size check here, will do that later when the extent is located.
7754 * There are sparse images out there which according to the spec are
7755 * invalid, because the total size is not a multiple of the grain size.
7756 * Also for sparse images which are stitched together in odd ways (not at
7757 * grain boundaries, and with the nominal size not being a multiple of the
7758 * grain size), this would prevent writing to the last grain. */
7759
7760 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7761 &pExtent, &uSectorExtentRel);
7762 if (RT_SUCCESS(rc))
7763 {
7764 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
7765 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7766 && !pImage->pExtents[0].uAppendPosition
7767 && pExtent->enmAccess != VMDKACCESS_READONLY))
7768 rc = VERR_VD_VMDK_INVALID_STATE;
7769 else
7770 {
7771 /* Handle the write according to the current extent type. */
7772 switch (pExtent->enmType)
7773 {
7774 case VMDKETYPE_HOSTED_SPARSE:
7775 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7776 if (RT_SUCCESS(rc))
7777 {
7778 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
7779 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
7780 rc = VERR_VD_VMDK_INVALID_WRITE;
7781 else
7782 {
7783 /* Clip write range to at most the rest of the grain. */
7784 cbToWrite = RT_MIN(cbToWrite,
7785 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
7786 - uSectorExtentRel % pExtent->cSectorsPerGrain));
7787 if (uSectorExtentAbs == 0)
7788 {
7789 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7790 {
7791 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7792 {
7793 /* Full block write to a previously unallocated block.
7794 * Check if the caller wants to avoid the automatic alloc. */
7795 if (!(fWrite & VD_WRITE_NO_ALLOC))
7796 {
7797 /* Allocate GT and find out where to store the grain. */
7798 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
7799 uSectorExtentRel, cbToWrite);
7800 }
7801 else
7802 rc = VERR_VD_BLOCK_FREE;
7803 *pcbPreRead = 0;
7804 *pcbPostRead = 0;
7805 }
7806 else
7807 {
7808 /* Clip write range to remain in this extent. */
7809 cbToWrite = RT_MIN(cbToWrite,
7810 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
7811 + pExtent->cNominalSectors - uSectorExtentRel));
7812 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7813 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
7814 rc = VERR_VD_BLOCK_FREE;
7815 }
7816 }
7817 else
7818 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
7819 pIoCtx, cbToWrite);
7820 }
7821 else
7822 {
7823 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7824 {
7825 /* A partial write to a streamOptimized image is simply
7826 * invalid. It requires rewriting already compressed data
7827 * which is somewhere between expensive and impossible. */
7828 rc = VERR_VD_VMDK_INVALID_STATE;
7829 pExtent->uGrainSectorAbs = 0;
7830 AssertRC(rc);
7831 }
7832 else
7833 {
7834 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7835 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7836 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7837 pIoCtx, cbToWrite, NULL, NULL);
7838 }
7839 }
7840 }
7841 }
7842 break;
7843 case VMDKETYPE_VMFS:
7844 case VMDKETYPE_FLAT:
7845 /* Clip write range to remain in this extent. */
7846 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7847 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7848 VMDK_SECTOR2BYTE(uSectorExtentRel),
7849 pIoCtx, cbToWrite, NULL, NULL);
7850 break;
7851 case VMDKETYPE_ZERO:
7852 /* Clip write range to remain in this extent. */
7853 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7854 break;
7855 }
7856 }
7857
7858 if (pcbWriteProcess)
7859 *pcbWriteProcess = cbToWrite;
7860 }
7861 }
7862 else
7863 rc = VERR_VD_IMAGE_READ_ONLY;
7864
7865 LogFlowFunc(("returns %Rrc\n", rc));
7866 return rc;
7867}
7868
7869/** @copydoc VDIMAGEBACKEND::pfnFlush */
7870static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
7871{
7872 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7873
7874 return vmdkFlushImage(pImage, pIoCtx);
7875}
7876
7877/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
7878static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
7879{
7880 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7881 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7882
7883 AssertPtrReturn(pImage, 0);
7884
7885 return VMDK_IMAGE_VERSION;
7886}
7887
7888/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
7889static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
7890{
7891 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7892 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7893 uint64_t cb = 0;
7894
7895 AssertPtrReturn(pImage, 0);
7896
7897 if (pImage->pFile != NULL)
7898 {
7899 uint64_t cbFile;
7900 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
7901 if (RT_SUCCESS(rc))
7902 cb += cbFile;
7903 }
7904 for (unsigned i = 0; i < pImage->cExtents; i++)
7905 {
7906 if (pImage->pExtents[i].pFile != NULL)
7907 {
7908 uint64_t cbFile;
7909 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
7910 if (RT_SUCCESS(rc))
7911 cb += cbFile;
7912 }
7913 }
7914
7915 LogFlowFunc(("returns %lld\n", cb));
7916 return cb;
7917}
7918
7919/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
7920static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
7921{
7922 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
7923 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7924 int rc = VINF_SUCCESS;
7925
7926 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7927
7928 if (pImage->PCHSGeometry.cCylinders)
7929 *pPCHSGeometry = pImage->PCHSGeometry;
7930 else
7931 rc = VERR_VD_GEOMETRY_NOT_SET;
7932
7933 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7934 return rc;
7935}
7936
7937/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
7938static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
7939{
7940 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
7941 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7942 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7943 int rc = VINF_SUCCESS;
7944
7945 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7946
7947 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7948 {
7949 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7950 {
7951 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7952 if (RT_SUCCESS(rc))
7953 pImage->PCHSGeometry = *pPCHSGeometry;
7954 }
7955 else
7956 rc = VERR_NOT_SUPPORTED;
7957 }
7958 else
7959 rc = VERR_VD_IMAGE_READ_ONLY;
7960
7961 LogFlowFunc(("returns %Rrc\n", rc));
7962 return rc;
7963}
7964
7965/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
7966static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7967{
7968 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7969 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7970 int rc = VINF_SUCCESS;
7971
7972 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7973
7974 if (pImage->LCHSGeometry.cCylinders)
7975 *pLCHSGeometry = pImage->LCHSGeometry;
7976 else
7977 rc = VERR_VD_GEOMETRY_NOT_SET;
7978
7979 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7980 return rc;
7981}
7982
7983/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
7984static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7985{
7986 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
7987 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7988 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7989 int rc = VINF_SUCCESS;
7990
7991 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7992
7993 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7994 {
7995 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7996 {
7997 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7998 if (RT_SUCCESS(rc))
7999 pImage->LCHSGeometry = *pLCHSGeometry;
8000 }
8001 else
8002 rc = VERR_NOT_SUPPORTED;
8003 }
8004 else
8005 rc = VERR_VD_IMAGE_READ_ONLY;
8006
8007 LogFlowFunc(("returns %Rrc\n", rc));
8008 return rc;
8009}
8010
8011/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
8012static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
8013{
8014 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
8015 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
8016
8017 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
8018
8019 *ppRegionList = &pThis->RegionList;
8020 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
8021 return VINF_SUCCESS;
8022}
8023
8024/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
8025static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
8026{
8027 RT_NOREF1(pRegionList);
8028 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
8029 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
8030 AssertPtr(pThis); RT_NOREF(pThis);
8031
8032 /* Nothing to do here. */
8033}
8034
8035/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
8036static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
8037{
8038 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
8039 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8040
8041 AssertPtrReturn(pImage, 0);
8042
8043 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
8044 return pImage->uImageFlags;
8045}
8046
8047/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
8048static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
8049{
8050 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
8051 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8052
8053 AssertPtrReturn(pImage, 0);
8054
8055 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
8056 return pImage->uOpenFlags;
8057}
8058
8059/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
8060static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
8061{
8062 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
8063 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8064 int rc;
8065
8066 /* Image must be opened and the new flags must be valid. */
8067 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
8068 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
8069 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
8070 rc = VERR_INVALID_PARAMETER;
8071 else
8072 {
8073 /* StreamOptimized images need special treatment: reopen is prohibited. */
8074 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
8075 {
8076 if (pImage->uOpenFlags == uOpenFlags)
8077 rc = VINF_SUCCESS;
8078 else
8079 rc = VERR_INVALID_PARAMETER;
8080 }
8081 else
8082 {
8083 /* Implement this operation via reopening the image. */
8084 vmdkFreeImage(pImage, false, true /*fFlush*/);
8085 rc = vmdkOpenImage(pImage, uOpenFlags);
8086 }
8087 }
8088
8089 LogFlowFunc(("returns %Rrc\n", rc));
8090 return rc;
8091}
8092
8093/** @copydoc VDIMAGEBACKEND::pfnGetComment */
8094static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
8095{
8096 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
8097 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8098
8099 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8100
8101 char *pszCommentEncoded = NULL;
8102 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
8103 "ddb.comment", &pszCommentEncoded);
8104 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
8105 {
8106 pszCommentEncoded = NULL;
8107 rc = VINF_SUCCESS;
8108 }
8109
8110 if (RT_SUCCESS(rc))
8111 {
8112 if (pszComment && pszCommentEncoded)
8113 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
8114 else if (pszComment)
8115 *pszComment = '\0';
8116
8117 if (pszCommentEncoded)
8118 RTMemTmpFree(pszCommentEncoded);
8119 }
8120
8121 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
8122 return rc;
8123}
8124
8125/** @copydoc VDIMAGEBACKEND::pfnSetComment */
8126static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
8127{
8128 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
8129 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8130 int rc;
8131
8132 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8133
8134 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8135 {
8136 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8137 rc = vmdkSetImageComment(pImage, pszComment);
8138 else
8139 rc = VERR_NOT_SUPPORTED;
8140 }
8141 else
8142 rc = VERR_VD_IMAGE_READ_ONLY;
8143
8144 LogFlowFunc(("returns %Rrc\n", rc));
8145 return rc;
8146}
8147
8148/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
8149static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
8150{
8151 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8152 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8153
8154 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8155
8156 *pUuid = pImage->ImageUuid;
8157
8158 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8159 return VINF_SUCCESS;
8160}
8161
8162/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
8163static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
8164{
8165 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8166 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8167 int rc = VINF_SUCCESS;
8168
8169 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8170
8171 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8172 {
8173 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8174 {
8175 pImage->ImageUuid = *pUuid;
8176 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8177 VMDK_DDB_IMAGE_UUID, pUuid);
8178 if (RT_FAILURE(rc))
8179 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
8180 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
8181 }
8182 else
8183 rc = VERR_NOT_SUPPORTED;
8184 }
8185 else
8186 rc = VERR_VD_IMAGE_READ_ONLY;
8187
8188 LogFlowFunc(("returns %Rrc\n", rc));
8189 return rc;
8190}
8191
8192/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
8193static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
8194{
8195 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8196 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8197
8198 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8199
8200 *pUuid = pImage->ModificationUuid;
8201
8202 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8203 return VINF_SUCCESS;
8204}
8205
8206/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
8207static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
8208{
8209 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8210 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8211 int rc = VINF_SUCCESS;
8212
8213 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8214
8215 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8216 {
8217 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8218 {
8219 /* Only touch the modification uuid if it changed. */
8220 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
8221 {
8222 pImage->ModificationUuid = *pUuid;
8223 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8224 VMDK_DDB_MODIFICATION_UUID, pUuid);
8225 if (RT_FAILURE(rc))
8226 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
8227 }
8228 }
8229 else
8230 rc = VERR_NOT_SUPPORTED;
8231 }
8232 else
8233 rc = VERR_VD_IMAGE_READ_ONLY;
8234
8235 LogFlowFunc(("returns %Rrc\n", rc));
8236 return rc;
8237}
8238
8239/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
8240static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
8241{
8242 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8243 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8244
8245 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8246
8247 *pUuid = pImage->ParentUuid;
8248
8249 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8250 return VINF_SUCCESS;
8251}
8252
8253/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
8254static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
8255{
8256 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8257 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8258 int rc = VINF_SUCCESS;
8259
8260 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8261
8262 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8263 {
8264 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8265 {
8266 pImage->ParentUuid = *pUuid;
8267 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8268 VMDK_DDB_PARENT_UUID, pUuid);
8269 if (RT_FAILURE(rc))
8270 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
8271 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
8272 }
8273 else
8274 rc = VERR_NOT_SUPPORTED;
8275 }
8276 else
8277 rc = VERR_VD_IMAGE_READ_ONLY;
8278
8279 LogFlowFunc(("returns %Rrc\n", rc));
8280 return rc;
8281}
8282
8283/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
8284static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
8285{
8286 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8287 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8288
8289 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8290
8291 *pUuid = pImage->ParentModificationUuid;
8292
8293 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8294 return VINF_SUCCESS;
8295}
8296
8297/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
8298static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
8299{
8300 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8301 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8302 int rc = VINF_SUCCESS;
8303
8304 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8305
8306 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8307 {
8308 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8309 {
8310 pImage->ParentModificationUuid = *pUuid;
8311 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8312 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
8313 if (RT_FAILURE(rc))
8314 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
8315 }
8316 else
8317 rc = VERR_NOT_SUPPORTED;
8318 }
8319 else
8320 rc = VERR_VD_IMAGE_READ_ONLY;
8321
8322 LogFlowFunc(("returns %Rrc\n", rc));
8323 return rc;
8324}
8325
8326/** @copydoc VDIMAGEBACKEND::pfnDump */
8327static DECLCALLBACK(void) vmdkDump(void *pBackendData)
8328{
8329 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8330
8331 AssertPtrReturnVoid(pImage);
8332 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
8333 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
8334 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
8335 VMDK_BYTE2SECTOR(pImage->cbSize));
8336 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
8337 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
8338 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
8339 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
8340}
8341
8342
8343/**
8344 * Returns the size, in bytes, of the sparse extent overhead for
8345 * the number of desired total sectors and based on the current
8346 * sectors of the extent.
8347 *
8348 * @returns uint64_t size of new overhead in bytes.
8349 * @param pExtent VMDK extent instance.
8350 * @param cSectorsNew Number of desired total sectors.
8351 */
8352static uint64_t vmdkGetNewOverhead(PVMDKEXTENT pExtent, uint64_t cSectorsNew)
8353{
8354 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE;
8355 if (cSectorsNew % pExtent->cSectorsPerGDE)
8356 cNewDirEntries++;
8357
8358 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t);
8359 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512);
8360 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
8361 uint64_t cbNewOverhead = RT_ALIGN_Z(RT_MAX(pExtent->uDescriptorSector
8362 + pExtent->cDescriptorSectors, 1)
8363 + cbNewDirSize + cbNewAllTablesSize, 512);
8364 cbNewOverhead += cbNewDirSize + cbNewAllTablesSize;
8365 cbNewOverhead = RT_ALIGN_64(cbNewOverhead,
8366 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8367
8368 return cbNewOverhead;
8369}
8370
8371/**
8372 * Internal: Replaces the size (in sectors) of an extent in the descriptor file.
8373 *
8374 * @returns VBox status code.
8375 * @param pImage VMDK image instance.
8376 * @param pExtent VMDK extent instance.
8377 * @param uLine Line number of descriptor to change.
8378 * @param cSectorsOld Existing number of sectors.
8379 * @param cSectorsNew New number of sectors.
8380 */
8381static int vmdkReplaceExtentSize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, unsigned uLine, uint64_t cSectorsOld,
8382 uint64_t cSectorsNew)
8383{
8384 char szOldExtentSectors[UINT64_MAX_BUFF_SIZE];
8385 char szNewExtentSectors[UINT64_MAX_BUFF_SIZE];
8386
8387 ssize_t cbWritten = RTStrPrintf2(szOldExtentSectors, sizeof(szOldExtentSectors), "%llu", cSectorsOld);
8388 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szOldExtentSectors))
8389 return VERR_BUFFER_OVERFLOW;
8390
8391 cbWritten = RTStrPrintf2(szNewExtentSectors, sizeof(szNewExtentSectors), "%llu", cSectorsNew);
8392 if (cbWritten <= 0 || cbWritten > (ssize_t)sizeof(szNewExtentSectors))
8393 return VERR_BUFFER_OVERFLOW;
8394
8395 char *pszNewExtentLine = vmdkStrReplace(pImage->Descriptor.aLines[uLine],
8396 szOldExtentSectors,
8397 szNewExtentSectors);
8398
8399 if (RT_UNLIKELY(!pszNewExtentLine))
8400 return VERR_INVALID_PARAMETER;
8401
8402 vmdkDescExtRemoveByLine(pImage, &pImage->Descriptor, uLine);
8403 vmdkDescExtInsert(pImage, &pImage->Descriptor,
8404 pExtent->enmAccess, cSectorsNew,
8405 pExtent->enmType, pExtent->pszBasename, pExtent->uSectorOffset);
8406
8407 RTStrFree(pszNewExtentLine);
8408 pszNewExtentLine = NULL;
8409
8410 pImage->Descriptor.fDirty = true;
8411
8412 return VINF_SUCCESS;
8413}
8414
8415/**
8416 * Moves sectors down to make room for new overhead.
8417 * Used for sparse extent resize.
8418 *
8419 * @returns VBox status code.
8420 * @param pImage VMDK image instance.
8421 * @param pExtent VMDK extent instance.
8422 * @param cSectorsNew Number of sectors after resize.
8423 */
8424static int vmdkRelocateSectorsForSparseResize(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
8425 uint64_t cSectorsNew)
8426{
8427 int rc = VINF_SUCCESS;
8428
8429 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
8430
8431 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
8432 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
8433
8434 uint64_t cbFile = 0;
8435 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
8436
8437 uint64_t uNewAppendPosition;
8438
8439 /* Calculate how many sectors need to be relocated. */
8440 unsigned cSectorsReloc = cOverheadSectorDiff;
8441 if (cbNewOverhead % VMDK_SECTOR_SIZE)
8442 cSectorsReloc++;
8443
8444 if (cSectorsReloc < pExtent->cSectors)
8445 uNewAppendPosition = RT_ALIGN_Z(cbFile + VMDK_SECTOR2BYTE(cOverheadSectorDiff), 512);
8446 else
8447 uNewAppendPosition = cbFile;
8448
8449 /*
8450 * Get the blocks we need to relocate first, they are appended to the end
8451 * of the image.
8452 */
8453 void *pvBuf = NULL, *pvZero = NULL;
8454 do
8455 {
8456 /* Allocate data buffer. */
8457 pvBuf = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8458 if (!pvBuf)
8459 {
8460 rc = VERR_NO_MEMORY;
8461 break;
8462 }
8463
8464 /* Allocate buffer for overwriting with zeroes. */
8465 pvZero = RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8466 if (!pvZero)
8467 {
8468 RTMemFree(pvBuf);
8469 pvBuf = NULL;
8470
8471 rc = VERR_NO_MEMORY;
8472 break;
8473 }
8474
8475 uint32_t *aGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries);
8476 if(!aGTDataTmp)
8477 {
8478 RTMemFree(pvBuf);
8479 pvBuf = NULL;
8480
8481 RTMemFree(pvZero);
8482 pvZero = NULL;
8483
8484 rc = VERR_NO_MEMORY;
8485 break;
8486 }
8487
8488 uint32_t *aRGTDataTmp = (uint32_t *)RTMemAllocZ(sizeof(uint32_t) * pExtent->cGTEntries);
8489 if(!aRGTDataTmp)
8490 {
8491 RTMemFree(pvBuf);
8492 pvBuf = NULL;
8493
8494 RTMemFree(pvZero);
8495 pvZero = NULL;
8496
8497 RTMemFree(aGTDataTmp);
8498 aGTDataTmp = NULL;
8499
8500 rc = VERR_NO_MEMORY;
8501 break;
8502 }
8503
8504 /* Search for overlap sector in the grain table. */
8505 for (uint32_t idxGD = 0; idxGD < pExtent->cGDEntries; idxGD++)
8506 {
8507 uint64_t uGTSector = pExtent->pGD[idxGD];
8508 uint64_t uRGTSector = pExtent->pRGD[idxGD];
8509
8510 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8511 VMDK_SECTOR2BYTE(uGTSector),
8512 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8513
8514 if (RT_FAILURE(rc))
8515 break;
8516
8517 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8518 VMDK_SECTOR2BYTE(uRGTSector),
8519 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8520
8521 if (RT_FAILURE(rc))
8522 break;
8523
8524 for (uint32_t idxGT = 0; idxGT < pExtent->cGTEntries; idxGT++)
8525 {
8526 uint64_t aGTEntryLE = RT_LE2H_U64(aGTDataTmp[idxGT]);
8527 uint64_t aRGTEntryLE = RT_LE2H_U64(aRGTDataTmp[idxGT]);
8528
8529 /**
8530 * Check if grain table is valid. If not dump out with an error.
8531 * Shoudln't ever get here (given other checks) but good sanity check.
8532 */
8533 if (aGTEntryLE != aRGTEntryLE)
8534 {
8535 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
8536 N_("VMDK: inconsistent references within grain table in '%s'"), pExtent->pszFullname);
8537 break;
8538 }
8539
8540 if (aGTEntryLE < cNewOverheadSectors
8541 && aGTEntryLE != 0)
8542 {
8543 /* Read data and append grain to the end of the image. */
8544 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8545 VMDK_SECTOR2BYTE(aGTEntryLE), pvBuf,
8546 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8547 if (RT_FAILURE(rc))
8548 break;
8549
8550 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8551 uNewAppendPosition, pvBuf,
8552 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8553 if (RT_FAILURE(rc))
8554 break;
8555
8556 /* Zero out the old block area. */
8557 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8558 VMDK_SECTOR2BYTE(aGTEntryLE), pvZero,
8559 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
8560 if (RT_FAILURE(rc))
8561 break;
8562
8563 /* Write updated grain tables to file */
8564 aGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition);
8565 aRGTDataTmp[idxGT] = VMDK_BYTE2SECTOR(uNewAppendPosition);
8566
8567 if (memcmp(aGTDataTmp, aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries))
8568 {
8569 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
8570 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
8571 break;
8572 }
8573
8574 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8575 VMDK_SECTOR2BYTE(uGTSector),
8576 aGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8577
8578 if (RT_FAILURE(rc))
8579 break;
8580
8581 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8582 VMDK_SECTOR2BYTE(uRGTSector),
8583 aRGTDataTmp, sizeof(uint32_t) * pExtent->cGTEntries);
8584
8585 break;
8586 }
8587 }
8588 }
8589
8590 RTMemFree(aGTDataTmp);
8591 aGTDataTmp = NULL;
8592
8593 RTMemFree(aRGTDataTmp);
8594 aRGTDataTmp = NULL;
8595
8596 if (RT_FAILURE(rc))
8597 break;
8598
8599 uNewAppendPosition += VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain);
8600 } while (0);
8601
8602 if (pvBuf)
8603 {
8604 RTMemFree(pvBuf);
8605 pvBuf = NULL;
8606 }
8607
8608 if (pvZero)
8609 {
8610 RTMemFree(pvZero);
8611 pvZero = NULL;
8612 }
8613
8614 // Update append position for extent
8615 pExtent->uAppendPosition = uNewAppendPosition;
8616
8617 return rc;
8618}
8619
8620/**
8621 * Resizes meta/overhead for sparse extent resize.
8622 *
8623 * @returns VBox status code.
8624 * @param pImage VMDK image instance.
8625 * @param pExtent VMDK extent instance.
8626 * @param cSectorsNew Number of sectors after resize.
8627 */
8628static int vmdkResizeSparseMeta(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
8629 uint64_t cSectorsNew)
8630{
8631 int rc = VINF_SUCCESS;
8632 uint32_t cOldGDEntries = pExtent->cGDEntries;
8633
8634 uint64_t cNewDirEntries = cSectorsNew / pExtent->cSectorsPerGDE;
8635 if (cSectorsNew % pExtent->cSectorsPerGDE)
8636 cNewDirEntries++;
8637
8638 size_t cbNewGD = cNewDirEntries * sizeof(uint32_t);
8639
8640 uint64_t cbNewDirSize = RT_ALIGN_64(cbNewGD, 512);
8641 uint64_t cbCurrDirSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE, 512);
8642 uint64_t cDirSectorDiff = VMDK_BYTE2SECTOR(cbNewDirSize - cbCurrDirSize);
8643
8644 uint64_t cbNewAllTablesSize = RT_ALIGN_64(cNewDirEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
8645 uint64_t cbCurrAllTablesSize = RT_ALIGN_64(pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE, 512);
8646 uint64_t cTableSectorDiff = VMDK_BYTE2SECTOR(cbNewAllTablesSize - cbCurrAllTablesSize);
8647
8648 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
8649 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
8650 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
8651
8652 /*
8653 * Get the blocks we need to relocate first, they are appended to the end
8654 * of the image.
8655 */
8656 void *pvBuf = NULL;
8657 AssertCompile(sizeof(g_abRTZero4K) >= VMDK_GRAIN_TABLE_SIZE);
8658
8659 do
8660 {
8661 /* Allocate data buffer. */
8662 pvBuf = RTMemAllocZ(VMDK_GRAIN_TABLE_SIZE);
8663 if (!pvBuf)
8664 {
8665 rc = VERR_NO_MEMORY;
8666 break;
8667 }
8668
8669 uint32_t uGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8670
8671 // points to last element in the grain table
8672 uint32_t uGTTail = uGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE;
8673 uint32_t cbGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff + cTableSectorDiff + cDirSectorDiff), 512);
8674
8675 for (int i = pExtent->cGDEntries - 1; i >= 0; i--)
8676 {
8677 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8678 uGTTail, pvBuf,
8679 VMDK_GRAIN_TABLE_SIZE);
8680 if (RT_FAILURE(rc))
8681 break;
8682
8683 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8684 RT_ALIGN_Z(uGTTail + cbGTOff, 512), pvBuf,
8685 VMDK_GRAIN_TABLE_SIZE);
8686 if (RT_FAILURE(rc))
8687 break;
8688
8689 // This overshoots when i == 0, but we don't need it anymore.
8690 uGTTail -= VMDK_GRAIN_TABLE_SIZE;
8691 }
8692
8693
8694 /* Find the end of the grain directory and start bumping everything down. Update locations of GT entries. */
8695 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8696 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pvBuf,
8697 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8698 if (RT_FAILURE(rc))
8699 break;
8700
8701 int * tmpBuf = (int *)pvBuf;
8702
8703 for (uint32_t i = 0; i < pExtent->cGDEntries; i++)
8704 {
8705 tmpBuf[i] = tmpBuf[i] + VMDK_BYTE2SECTOR(cbGTOff);
8706 pExtent->pGD[i] = pExtent->pGD[i] + VMDK_BYTE2SECTOR(cbGTOff);
8707 }
8708
8709 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8710 RT_ALIGN_Z(VMDK_SECTOR2BYTE(pExtent->uSectorGD + cTableSectorDiff + cDirSectorDiff), 512), pvBuf,
8711 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8712 if (RT_FAILURE(rc))
8713 break;
8714
8715 pExtent->uSectorGD = pExtent->uSectorGD + cDirSectorDiff + cTableSectorDiff;
8716
8717 /* Repeat both steps with the redundant grain table/directory. */
8718
8719 uint32_t uRGTStart = VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + (cOldGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8720
8721 // points to last element in the grain table
8722 uint32_t uRGTTail = uRGTStart + (pExtent->cGDEntries * VMDK_GRAIN_TABLE_SIZE) - VMDK_GRAIN_TABLE_SIZE;
8723 uint32_t cbRGTOff = RT_ALIGN_Z(VMDK_SECTOR2BYTE(cDirSectorDiff), 512);
8724
8725 for (int i = pExtent->cGDEntries - 1; i >= 0; i--)
8726 {
8727 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8728 uRGTTail, pvBuf,
8729 VMDK_GRAIN_TABLE_SIZE);
8730 if (RT_FAILURE(rc))
8731 break;
8732
8733 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8734 RT_ALIGN_Z(uRGTTail + cbRGTOff, 512), pvBuf,
8735 VMDK_GRAIN_TABLE_SIZE);
8736 if (RT_FAILURE(rc))
8737 break;
8738
8739 // This overshoots when i == 0, but we don't need it anymore.
8740 uRGTTail -= VMDK_GRAIN_TABLE_SIZE;
8741 }
8742
8743 /* Update locations of GT entries. */
8744 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
8745 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf,
8746 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8747 if (RT_FAILURE(rc))
8748 break;
8749
8750 tmpBuf = (int *)pvBuf;
8751
8752 for (uint32_t i = 0; i < pExtent->cGDEntries; i++)
8753 {
8754 tmpBuf[i] = tmpBuf[i] + cDirSectorDiff;
8755 pExtent->pRGD[i] = pExtent->pRGD[i] + cDirSectorDiff;
8756 }
8757
8758 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8759 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pvBuf,
8760 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8761 if (RT_FAILURE(rc))
8762 break;
8763
8764 pExtent->uSectorRGD = pExtent->uSectorRGD;
8765 pExtent->cOverheadSectors += cOverheadSectorDiff;
8766
8767 } while (0);
8768
8769 if (pvBuf)
8770 {
8771 RTMemFree(pvBuf);
8772 pvBuf = NULL;
8773 }
8774
8775 pExtent->cGDEntries = cNewDirEntries;
8776
8777 // Allocate additional grain dir
8778 pExtent->pGD = (uint32_t *) RTMemReallocZ(pExtent->pGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD);
8779 if (RT_LIKELY(pExtent->pGD))
8780 {
8781 if (pExtent->uSectorRGD)
8782 {
8783 pExtent->pRGD = (uint32_t *)RTMemReallocZ(pExtent->pRGD, pExtent->cGDEntries * sizeof(uint32_t), cbNewGD);
8784 if (RT_UNLIKELY(!pExtent->pRGD))
8785 rc = VERR_NO_MEMORY;
8786 }
8787 }
8788 else
8789 return VERR_NO_MEMORY;
8790
8791
8792 uint32_t uTmpDirVal = pExtent->pGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE;
8793 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++)
8794 {
8795 pExtent->pGD[i] = uTmpDirVal;
8796
8797 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8798 VMDK_SECTOR2BYTE(uTmpDirVal), &g_abRTZero4K[0],
8799 VMDK_GRAIN_TABLE_SIZE);
8800
8801 if (RT_FAILURE(rc))
8802 return rc;
8803
8804 uTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE;
8805 }
8806
8807 uint32_t uRTmpDirVal = pExtent->pRGD[cOldGDEntries - 1] + VMDK_GRAIN_DIR_ENTRY_SIZE;
8808 for (uint32_t i = cOldGDEntries; i < pExtent->cGDEntries; i++)
8809 {
8810 pExtent->pRGD[i] = uRTmpDirVal;
8811
8812 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8813 VMDK_SECTOR2BYTE(uRTmpDirVal), &g_abRTZero4K[0],
8814 VMDK_GRAIN_TABLE_SIZE);
8815
8816 if (RT_FAILURE(rc))
8817 return rc;
8818
8819 uRTmpDirVal += VMDK_GRAIN_DIR_ENTRY_SIZE;
8820 }
8821
8822 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8823 VMDK_SECTOR2BYTE(pExtent->uSectorGD), pExtent->pGD,
8824 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8825 if (RT_FAILURE(rc))
8826 return rc;
8827
8828 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
8829 VMDK_SECTOR2BYTE(pExtent->uSectorRGD), pExtent->pRGD,
8830 pExtent->cGDEntries * VMDK_GRAIN_DIR_ENTRY_SIZE);
8831 if (RT_FAILURE(rc))
8832 return rc;
8833
8834 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + pExtent->uExtent,
8835 pExtent->cNominalSectors, cSectorsNew);
8836 if (RT_FAILURE(rc))
8837 return rc;
8838
8839 return rc;
8840}
8841
8842/** @copydoc VDIMAGEBACKEND::pfnResize */
8843static DECLCALLBACK(int) vmdkResize(void *pBackendData, uint64_t cbSize,
8844 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
8845 unsigned uPercentStart, unsigned uPercentSpan,
8846 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
8847 PVDINTERFACE pVDIfsOperation)
8848{
8849 RT_NOREF5(uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation);
8850
8851 // Establish variables and objects needed
8852 int rc = VINF_SUCCESS;
8853 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8854 unsigned uImageFlags = pImage->uImageFlags;
8855 PVMDKEXTENT pExtent = &pImage->pExtents[0];
8856 pExtent->fMetaDirty = true;
8857
8858 uint64_t cSectorsNew = cbSize / VMDK_SECTOR_SIZE; /** < New number of sectors in the image after the resize */
8859 if (cbSize % VMDK_SECTOR_SIZE)
8860 cSectorsNew++;
8861
8862 uint64_t cSectorsOld = pImage->cbSize / VMDK_SECTOR_SIZE; /** < Number of sectors before the resize. Only for FLAT images. */
8863 if (pImage->cbSize % VMDK_SECTOR_SIZE)
8864 cSectorsOld++;
8865 unsigned cExtents = pImage->cExtents;
8866
8867 /* Check size is within min/max bounds. */
8868 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
8869 && ( !cbSize
8870 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)) )
8871 return VERR_VD_INVALID_SIZE;
8872
8873 /*
8874 * Making the image smaller is not supported at the moment.
8875 */
8876 /** @todo implement making the image smaller, it is the responsibility of
8877 * the user to know what they're doing. */
8878 if (cbSize < pImage->cbSize)
8879 rc = VERR_VD_SHRINK_NOT_SUPPORTED;
8880 else if (cbSize > pImage->cbSize)
8881 {
8882 /**
8883 * monolithicFlat. FIXED flag and not split up into 2 GB parts.
8884 */
8885 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8886 {
8887 /** Required space in bytes for the extent after the resize. */
8888 uint64_t cbSectorSpaceNew = cSectorsNew * VMDK_SECTOR_SIZE;
8889 pExtent = &pImage->pExtents[0];
8890
8891 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSectorSpaceNew,
8892 0 /* fFlags */, NULL,
8893 uPercentStart, uPercentSpan);
8894 if (RT_FAILURE(rc))
8895 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8896
8897 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent, cSectorsOld, cSectorsNew);
8898 if (RT_FAILURE(rc))
8899 return rc;
8900 }
8901
8902 /**
8903 * twoGbMaxExtentFlat. FIXED flag and SPLIT into 2 GB parts.
8904 */
8905 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8906 {
8907 /* Check to see how much space remains in last extent */
8908 bool fSpaceAvailible = false;
8909 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8910 if (cLastExtentRemSectors)
8911 fSpaceAvailible = true;
8912
8913 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;
8914
8915 /** Space remaining in current last extent file that we don't need to create another one. */
8916 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))
8917 {
8918 pExtent = &pImage->pExtents[cExtents - 1];
8919 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage,
8920 VMDK_SECTOR2BYTE(cSectorsNeeded + cLastExtentRemSectors),
8921 0 /* fFlags */, NULL, uPercentStart, uPercentSpan);
8922 if (RT_FAILURE(rc))
8923 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8924
8925 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1,
8926 pExtent->cNominalSectors, cSectorsNeeded + cLastExtentRemSectors);
8927 if (RT_FAILURE(rc))
8928 return rc;
8929 }
8930 //** Need more extent files to handle all the requested space. */
8931 else
8932 {
8933 if (fSpaceAvailible)
8934 {
8935 pExtent = &pImage->pExtents[cExtents - 1];
8936 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, VMDK_2G_SPLIT_SIZE,
8937 0 /* fFlags */, NULL,
8938 uPercentStart, uPercentSpan);
8939 if (RT_FAILURE(rc))
8940 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8941
8942 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;
8943
8944 rc = vmdkReplaceExtentSize(pImage, pExtent, pImage->Descriptor.uFirstExtent + cExtents - 1,
8945 pExtent->cNominalSectors, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
8946 if (RT_FAILURE(rc))
8947 return rc;
8948 }
8949
8950 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;
8951 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)
8952 cNewExtents++;
8953
8954 for (unsigned i = cExtents;
8955 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8956 i++)
8957 {
8958 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);
8959 if (RT_FAILURE(rc))
8960 return rc;
8961
8962 pExtent = &pImage->pExtents[i];
8963
8964 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8965 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8966 }
8967
8968 if (cSectorsNeeded)
8969 {
8970 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));
8971 if (RT_FAILURE(rc))
8972 return rc;
8973 }
8974 }
8975 }
8976
8977 /**
8978 * monolithicSparse.
8979 */
8980 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8981 {
8982 // 1. Calculate sectors needed for new overhead.
8983
8984 uint64_t cbNewOverhead = vmdkGetNewOverhead(pExtent, cSectorsNew);
8985 uint64_t cNewOverheadSectors = VMDK_BYTE2SECTOR(cbNewOverhead);
8986 uint64_t cOverheadSectorDiff = cNewOverheadSectors - pExtent->cOverheadSectors;
8987
8988 // 2. Relocate sectors to make room for new GD/GT, update entries in GD/GT
8989 if (cOverheadSectorDiff > 0)
8990 {
8991 if (pExtent->cSectors > 0)
8992 {
8993 /* Do the relocation. */
8994 LogFlow(("Relocating VMDK sectors\n"));
8995 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNew);
8996 if (RT_FAILURE(rc))
8997 return rc;
8998
8999 rc = vmdkFlushImage(pImage, NULL);
9000 if (RT_FAILURE(rc))
9001 return rc;
9002 }
9003
9004 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNew);
9005 if (RT_FAILURE(rc))
9006 return rc;
9007 }
9008 }
9009
9010 /**
9011 * twoGbSparseExtent
9012 */
9013 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
9014 {
9015 /* Check to see how much space remains in last extent */
9016 bool fSpaceAvailible = false;
9017 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9018 if (cLastExtentRemSectors)
9019 fSpaceAvailible = true;
9020
9021 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;
9022
9023 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))
9024 {
9025 pExtent = &pImage->pExtents[cExtents - 1];
9026 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors);
9027 if (RT_FAILURE(rc))
9028 return rc;
9029
9030 rc = vmdkFlushImage(pImage, NULL);
9031 if (RT_FAILURE(rc))
9032 return rc;
9033
9034 rc = vmdkResizeSparseMeta(pImage, pExtent, cSectorsNeeded + cLastExtentRemSectors);
9035 if (RT_FAILURE(rc))
9036 return rc;
9037 }
9038 else
9039 {
9040 if (fSpaceAvailible)
9041 {
9042 pExtent = &pImage->pExtents[cExtents - 1];
9043 rc = vmdkRelocateSectorsForSparseResize(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
9044 if (RT_FAILURE(rc))
9045 return rc;
9046
9047 rc = vmdkFlushImage(pImage, NULL);
9048 if (RT_FAILURE(rc))
9049 return rc;
9050
9051 rc = vmdkResizeSparseMeta(pImage, pExtent, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
9052 if (RT_FAILURE(rc))
9053 return rc;
9054
9055 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;
9056 }
9057
9058 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;
9059 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)
9060 cNewExtents++;
9061
9062 for (unsigned i = cExtents;
9063 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9064 i++)
9065 {
9066 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);
9067 if (RT_FAILURE(rc))
9068 return rc;
9069
9070 pExtent = &pImage->pExtents[i];
9071
9072 rc = vmdkFlushImage(pImage, NULL);
9073 if (RT_FAILURE(rc))
9074 return rc;
9075
9076 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9077 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
9078 }
9079
9080 if (cSectorsNeeded)
9081 {
9082 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));
9083 if (RT_FAILURE(rc))
9084 return rc;
9085
9086 pExtent = &pImage->pExtents[pImage->cExtents];
9087
9088 rc = vmdkFlushImage(pImage, NULL);
9089 if (RT_FAILURE(rc))
9090 return rc;
9091 }
9092 }
9093 }
9094
9095 /* Successful resize. Update metadata */
9096 if (RT_SUCCESS(rc))
9097 {
9098 /* Update size and new block count. */
9099 pImage->cbSize = cbSize;
9100 pExtent->cNominalSectors = cSectorsNew;
9101 pExtent->cSectors = cSectorsNew;
9102
9103 /* Update geometry. */
9104 pImage->PCHSGeometry = *pPCHSGeometry;
9105 pImage->LCHSGeometry = *pLCHSGeometry;
9106 }
9107
9108 /* Update header information in base image file. */
9109 pImage->Descriptor.fDirty = true;
9110 rc = vmdkWriteDescriptor(pImage, NULL);
9111
9112 if (RT_SUCCESS(rc))
9113 rc = vmdkFlushImage(pImage, NULL);
9114 }
9115 /* Same size doesn't change the image at all. */
9116
9117 LogFlowFunc(("returns %Rrc\n", rc));
9118 return rc;
9119}
9120
9121const VDIMAGEBACKEND g_VmdkBackend =
9122{
9123 /* u32Version */
9124 VD_IMGBACKEND_VERSION,
9125 /* pszBackendName */
9126 "VMDK",
9127 /* uBackendCaps */
9128 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
9129 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
9130 | VD_CAP_VFS | VD_CAP_PREFERRED,
9131 /* paFileExtensions */
9132 s_aVmdkFileExtensions,
9133 /* paConfigInfo */
9134 s_aVmdkConfigInfo,
9135 /* pfnProbe */
9136 vmdkProbe,
9137 /* pfnOpen */
9138 vmdkOpen,
9139 /* pfnCreate */
9140 vmdkCreate,
9141 /* pfnRename */
9142 vmdkRename,
9143 /* pfnClose */
9144 vmdkClose,
9145 /* pfnRead */
9146 vmdkRead,
9147 /* pfnWrite */
9148 vmdkWrite,
9149 /* pfnFlush */
9150 vmdkFlush,
9151 /* pfnDiscard */
9152 NULL,
9153 /* pfnGetVersion */
9154 vmdkGetVersion,
9155 /* pfnGetFileSize */
9156 vmdkGetFileSize,
9157 /* pfnGetPCHSGeometry */
9158 vmdkGetPCHSGeometry,
9159 /* pfnSetPCHSGeometry */
9160 vmdkSetPCHSGeometry,
9161 /* pfnGetLCHSGeometry */
9162 vmdkGetLCHSGeometry,
9163 /* pfnSetLCHSGeometry */
9164 vmdkSetLCHSGeometry,
9165 /* pfnQueryRegions */
9166 vmdkQueryRegions,
9167 /* pfnRegionListRelease */
9168 vmdkRegionListRelease,
9169 /* pfnGetImageFlags */
9170 vmdkGetImageFlags,
9171 /* pfnGetOpenFlags */
9172 vmdkGetOpenFlags,
9173 /* pfnSetOpenFlags */
9174 vmdkSetOpenFlags,
9175 /* pfnGetComment */
9176 vmdkGetComment,
9177 /* pfnSetComment */
9178 vmdkSetComment,
9179 /* pfnGetUuid */
9180 vmdkGetUuid,
9181 /* pfnSetUuid */
9182 vmdkSetUuid,
9183 /* pfnGetModificationUuid */
9184 vmdkGetModificationUuid,
9185 /* pfnSetModificationUuid */
9186 vmdkSetModificationUuid,
9187 /* pfnGetParentUuid */
9188 vmdkGetParentUuid,
9189 /* pfnSetParentUuid */
9190 vmdkSetParentUuid,
9191 /* pfnGetParentModificationUuid */
9192 vmdkGetParentModificationUuid,
9193 /* pfnSetParentModificationUuid */
9194 vmdkSetParentModificationUuid,
9195 /* pfnDump */
9196 vmdkDump,
9197 /* pfnGetTimestamp */
9198 NULL,
9199 /* pfnGetParentTimestamp */
9200 NULL,
9201 /* pfnSetParentTimestamp */
9202 NULL,
9203 /* pfnGetParentFilename */
9204 NULL,
9205 /* pfnSetParentFilename */
9206 NULL,
9207 /* pfnComposeLocation */
9208 genericFileComposeLocation,
9209 /* pfnComposeName */
9210 genericFileComposeName,
9211 /* pfnCompact */
9212 NULL,
9213 /* pfnResize */
9214 vmdkResize,
9215 /* pfnRepair */
9216 NULL,
9217 /* pfnTraverseMetadata */
9218 NULL,
9219 /* u32VersionEnd */
9220 VD_IMGBACKEND_VERSION
9221};
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette