2 * The PCI Library -- Physical memory mapping for DJGPP
4 * Copyright (c) 2023 Pali Rohár <pali@kernel.org>
6 * Can be freely distributed and used under the terms of the GNU GPL v2+
8 * SPDX-License-Identifier: GPL-2.0-or-later
16 #include <stdio.h> /* for __DJGPP__ and __DJGPP_MINOR__, available since DJGPP v2.02 and defined indirectly via sys/version.h */
17 #include <string.h> /* for ffs() */
18 #include <malloc.h> /* for memalign() */
21 #include <crt0.h> /* for _crt0_startup_flags, __djgpp_memory_handle_list, __djgpp_memory_handle_size and __djgpp_memory_handle() */
22 #include <sys/nearptr.h> /* for __djgpp_conventional_base, __djgpp_nearptr_enable() and __djgpp_nearptr_disable() */
25 #define EOVERFLOW 40 /* defined since DJGPP v2.04 */
29 * For using __djgpp_conventional_base it is needed to ensure that Unix-like
30 * sbrk algorithm is not active (by setting _CRT0_FLAG_NONMOVE_SBRK startup flag)
31 * and avoiding to call functions like system, spawn*, or exec*.
33 int _crt0_startup_flags = _CRT0_FLAG_NONMOVE_SBRK;
36 aligned_alloc(size_t alignment, size_t size)
39 * Unfortunately DJGPP prior to 2.6 has broken memalign() function,
40 * so for older DJGPP versions use malloc() with manual aligning.
42 #if !defined(__DJGPP__) || __DJGPP__ < 2 || (__DJGPP__ == 2 && __DJGPP_MINOR__ < 6)
43 void *ptr_alloc, *ptr_aligned;
48 ptr_alloc = malloc(size + alignment);
52 ptr_aligned = (void *)(((unsigned long)ptr_alloc & ~(alignment-1)) + alignment);
55 * Store original pointer from malloc() before our aligned pointer.
56 * DJGPP malloc()'ed ptr_alloc is aligned to 8 bytes, our ptr_alloc is
57 * aligned at least to 8 bytes, so we have always 4 bytes of free space
58 * before memory where is pointing ptr_alloc.
60 *((unsigned long *)ptr_aligned-1) = (unsigned long)ptr_alloc;
64 return memalign(alignment, size);
69 aligned_free(void *ptr)
71 #if !defined(__DJGPP__) || __DJGPP__ < 2 || (__DJGPP__ == 2 && __DJGPP_MINOR__ < 6)
72 /* Take original pointer returned by malloc() for releasing memory. */
73 ptr = (void *)*((unsigned long *)ptr-1);
79 find_sbrk_memory_handle(void *ptr, unsigned long max_length UNUSED /*pre-v2.04*/, unsigned long pagesize UNUSED /*pre-v2.04*/, const __djgpp_sbrk_handle **sh, unsigned long *sh_size)
82 * Find a DJGPP's sbrk memory handle which belongs to the ptr address pointer
83 * and detects size of this memory handle. DJGPP since v2.04 has arrays
84 * __djgpp_memory_handle_list[] and __djgpp_memory_handle_size[] with sbrk
85 * ranges which can be simple traversed. Older DJGPP versions have only
86 * __djgpp_memory_handle() function which returns information to which handle
87 * passed pointer belongs. So finding the size of the memory handle for DJGPP
88 * pre-v2.04 version is slower, its time complexity is O(N^2).
90 #if !defined(__DJGPP__) || __DJGPP__ < 2 || (__DJGPP__ == 2 && __DJGPP_MINOR__ < 4)
92 const __djgpp_sbrk_handle *sh2;
93 unsigned long end_offset;
95 *sh = __djgpp_memory_handle((unsigned long)ptr);
97 for (end_offset = max_length-1; end_offset != 0; end_offset = end_offset > pagesize ? end_offset - pagesize : 0)
99 sh2 = __djgpp_memory_handle((unsigned long)ptr + end_offset);
103 * If sh or sh2 is NULL then it is probably a memory corruption in
104 * DJGPP's __djgpp_memory_handle_list[] structure.
108 if ((*sh)->handle == sh2->handle)
115 * If end page of the sh handle was not found then it is probably a memory
116 * corruption in DJGPP's __djgpp_memory_handle_list[] structure.
121 *sh_size = (unsigned long)ptr + end_offset+1 - (*sh)->address;
128 for (i = 0; i < sizeof(__djgpp_memory_handle_list)/sizeof(__djgpp_memory_handle_list[0]) && (i == 0 || __djgpp_memory_handle_list[i].address != 0); i++)
130 if ((unsigned long)ptr >= __djgpp_memory_handle_list[i].address &&
131 (unsigned long)ptr < __djgpp_memory_handle_list[i].address + __djgpp_memory_handle_size[i])
135 if ((i != 0 && __djgpp_memory_handle_list[i].address == 0) || __djgpp_memory_handle_size[i] == 0)
138 * If address range was not found in __djgpp_memory_handle_list[]
139 * then it is probably memory corruption in this list.
144 *sh = &__djgpp_memory_handle_list[i];
145 *sh_size = __djgpp_memory_handle_size[i];
152 set_and_get_page_attributes(__dpmi_meminfo *mi, short *attributes)
158 /* __dpmi_set_page_attributes modifies mi.size */
160 if (__dpmi_set_page_attributes(mi, attributes) != 0)
162 error = __dpmi_error;
166 case 0x0507: /* Unsupported function (returned by DPMI 0.9 host, error number is same as DPMI function number) */
167 case 0x8001: /* Unsupported function (returned by DPMI 1.0 host) */
170 case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
171 case 0x8013: /* Physical memory unavailable */
172 case 0x8014: /* Backing store unavailable */
175 case 0x8002: /* Invalid state (page in wrong state for request) */
176 case 0x8021: /* Invalid value (illegal request in bits 0-2 of one or more page attribute words) */
177 case 0x8023: /* Invalid handle (in ESI) */
178 case 0x8025: /* Invalid linear address (specified range is not within specified block) */
181 default: /* Other unspecified error */
189 /* Cleanup output buffer. */
190 for (i = 0; i < mi->size; i++)
193 if (__dpmi_get_page_attributes(mi, attributes) != 0)
195 error = __dpmi_error;
199 case 0x0506: /* Unsupported function (returned by DPMI 0.9 host, error number is same as DPMI function number) */
200 case 0x8001: /* Unsupported function (returned by DPMI 1.0 host) */
203 case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
206 case 0x8023: /* Invalid handle (in ESI) */
207 case 0x8025: /* Invalid linear address (specified range is not within specified block) */
210 default: /* Other unspecified error */
221 physmem_init_config(struct pci_access *a)
223 pci_define_param(a, "devmem.path", "auto", "DJGPP physical memory access method: auto, devmap, physmap");
227 physmem_access(struct pci_access *a UNUSED, int w UNUSED)
232 #define PHYSMEM_DEVICE_MAPPING ((struct physmem *)1)
233 #define PHYSMEM_PHYSADDR_MAPPING ((struct physmem *)2)
235 static int fat_ds_count;
238 physmem_open(struct pci_access *a, int w UNUSED)
240 const char *devmem = pci_get_param(a, "devmem.path");
241 __dpmi_version_ret version;
248 if (strcmp(devmem, "auto") == 0)
253 else if (strcmp(devmem, "devmap") == 0)
258 else if (strcmp(devmem, "physmap") == 0)
269 ret = __dpmi_get_version(&version);
271 a->debug("detected unknown DPMI host...");
275 * Call DPMI 1.0 function __dpmi_get_capabilities() for detecting if DPMI
276 * host supports Device mapping. Some DPMI 0.9 hosts like Windows's NTVDM
277 * do not support this function, so does not fill capabilities and vendor
278 * buffer, but returns success. Detect this kind of failure by checking
279 * if AX register (low 16-bits of capabilities variable) was not modified
280 * and contains the number of called DPMI function (0x0401).
282 vendor[0] = vendor[1] = vendor[2] = 0;
283 ret = __dpmi_get_capabilities(&capabilities, vendor);
284 if (ret == 0 && (capabilities & 0xffff) == 0x0401)
288 a->debug("detected DPMI %u.%02u host %.126s %u.%u with flags 0x%x and capabilities 0x%x...",
289 (unsigned)version.major, (unsigned)version.minor, vendor+2,
290 (unsigned)(unsigned char)vendor[0], (unsigned)(unsigned char)vendor[1],
291 (unsigned)version.flags, capabilities);
293 a->debug("detected DPMI %u.%02u host with flags 0x%x...",
294 (unsigned)version.major, (unsigned)version.minor, (unsigned)version.flags);
298 * If device mapping was selected then use __dpmi_map_device_in_memory_block()
299 * for physical memory mapping. Does not have to be supported by DPMI 0.9 host.
300 * Device mapping is supported when capability bit 2 is set.
304 if (ret == 0 && (capabilities & (1<<2)))
306 a->debug("using physical memory access via Device Mapping...");
307 return PHYSMEM_DEVICE_MAPPING;
309 a->debug("DPMI Device Mapping not supported...");
313 * If device mapping was not tried or not supported by DPMI host then fallback
314 * to __dpmi_physical_address_mapping(). But this requires Fat DS descriptor,
315 * meaning to increase DS descriptor limit to 4 GB, which does not have to be
316 * supported by some DPMI hosts.
320 if (fat_ds_count != 0 || __djgpp_nearptr_enable())
323 a->debug("using physical memory access via Physical Address Mapping...");
324 return PHYSMEM_PHYSADDR_MAPPING;
328 * DJGPP prior to 2.6 has semi-broken __djgpp_nearptr_enable() function.
329 * On failure it may let DS descriptor limit in semi-broken state. So for
330 * older DJGPP versions call __djgpp_nearptr_disable() which fixes it.
332 #if !defined(__DJGPP__) || __DJGPP__ < 2 || (__DJGPP__ == 2 && __DJGPP_MINOR__ < 6)
333 __djgpp_nearptr_disable();
335 a->debug("DPMI Physical Address Mapping not usable because Fat DS descriptor not supported...");
339 * Otherwise we do not have access to physical memory mapping. Theoretically
340 * it could be possible to use __dpmi_physical_address_mapping() and then
341 * create new segment where mapped linear address would be available, but this
342 * would require to access memory in newly created segment via far pointers,
343 * which is not only mess in the native 32-bit application but also these far
344 * pointers are not supported by gcc. If DPMI host does not allow us to change
345 * DS descriptor limit to 4 GB then it is mostly due to security reasons and
346 * probably does not allow access to physical memory mapping. This applies
347 * for non-DOS OS systems with integrated DPMI hosts like in Windows NT NTVDM
348 * or older version of Linux dosemu.
350 a->debug("physical memory access not allowed...");
356 physmem_close(struct physmem *physmem)
358 /* Disable 4 GB limit on DS descriptor if it was the last user. */
359 if (physmem == PHYSMEM_PHYSADDR_MAPPING)
362 if (fat_ds_count == 0)
363 __djgpp_nearptr_disable();
368 physmem_get_pagesize(struct physmem *physmem UNUSED)
370 static unsigned long pagesize;
373 if (__dpmi_get_page_size(&pagesize) != 0)
375 if (pagesize & (pagesize-1))
378 pagesize = 4096; /* Fallback value, the most commonly used on x86. */
384 physmem_map(struct physmem *physmem, u64 addr, size_t length, int w)
386 long pagesize = physmem_get_pagesize(physmem);
387 unsigned pagesize_shift = ffs(pagesize)-1;
388 const __djgpp_sbrk_handle *sh;
389 unsigned long sh_size;
399 /* Align length to page size. */
400 if (length & (pagesize-1))
401 length = (length & ~(pagesize-1)) + pagesize;
403 /* Mapping of physical memory above 4 GB is not possible. */
404 if (addr >= 0xffffffffUL || addr + length > 0xffffffffUL)
410 if (physmem == PHYSMEM_DEVICE_MAPPING)
413 * __dpmi_map_device_in_memory_block() maps physical memory to any
414 * page-aligned linear address for which we have DPMI memory handle. But
415 * DPMI host does not have to support mapping of memory below 1 MB which
416 * lies in RAM, and is not device memory.
418 * __djgpp_map_physical_memory() function is DJGPP wrapper around
419 * __dpmi_map_device_in_memory_block() which properly handles memory
420 * range that span multiple DPMI memory handles. It is common that
421 * DJGPP sbrk() or malloc() allocator returns continuous memory range
422 * which is backed by two or more DPMI memory handles which represents
423 * consecutive memory ranges without any gap.
425 * __dpmi_map_conventional_memory_in_memory_block() aliases memory range
426 * specified by page-aligned linear address to another page-aligned linear
427 * address. This can be used for mapping memory below 1 MB which lies in
428 * RAM and for which cannot be used __dpmi_map_device_in_memory_block().
429 * This function calls takes (virtual) linear address as opposite of the
430 * __dpmi_map_device_in_memory_block() which takes physical address.
432 * Unfortunately __djgpp_map_physical_memory() internally calls only
433 * __djgpp_map_physical_memory() function and does not return information
434 * for which memory range the call failed. So it cannot be used for
435 * generic memory mapping requests.
437 * Also it does not return usefull errno. And even in the latest released
438 * DJGPP version v2.5 this function has suboptimal implementation. Its
439 * time complexity is O(N^2) (where N is number of pages).
441 * So do not use __djgpp_map_physical_memory() function and instead write
442 * own logic handling virtual memory ranges which spans multiple DPMI
443 * memory handles and manually calls __dpmi_map_device_in_memory_block()
444 * or __dpmi_map_conventional_memory_in_memory_block() per every handle.
446 * We can easily access only linear addresses in our DS segment which
447 * is managed by DJGPP sbrk allocator. So allocate page-aligned range
448 * by aligned_alloc() (our wrapper around malloc()/memalign()) and then
449 * for every subrange which is backed by different DPMI memory handle
450 * call appropriate mapping function which correctly calculated offset
451 * and length to have continuous representation of physical memory range.
453 * This approach has disadvantage that for each mapping it is required
454 * to reserve and allocate committed memory in RAM with the size of the
455 * mapping itself. This has negative impact for mappings of large sizes.
456 * Unfortunately this is the only way because DJGPP sbrk allocator does
457 * not have any (public) function for directly allocating uncommitted
458 * memory which is not backed by the RAM. Even if DJGPP sbrk code is
459 * extended for this functionality, the corresponding DPMI function
460 * __dpmi_allocate_linear_memory() is DPMI 1.0 function and not widely
461 * supported by DPMI hosts, even the default DJGPP's CWSDPMI does not
465 ptr = aligned_alloc(pagesize, length);
472 for (offset = 0; offset < length; offset += (mi.size << pagesize_shift))
475 * Find a memory handle with its size which belongs to the pointer
476 * address ptr+offset. Base address and size of the memory handle
477 * must be page aligned for memory mapping support.
479 if (!find_sbrk_memory_handle(ptr + offset, length - offset, pagesize, &sh, &sh_size) ||
480 (sh->address & (pagesize-1)) || (sh_size & (pagesize-1)))
483 * Failure detected. If we have some partial mapping, try to undo
484 * it via physmem_unmap() which also release ptr. If we do not
485 * have partial mapping, just release ptr.
488 physmem_unmap(physmem, ptr, offset);
495 mi.handle = sh->handle;
496 mi.address = (unsigned long)ptr + offset - sh->address;
497 mi.size = (length - offset) >> pagesize_shift;
498 if (mi.size > ((sh_size - mi.address) >> pagesize_shift))
499 mi.size = (sh_size - mi.address) >> pagesize_shift;
500 if (__dpmi_map_device_in_memory_block(&mi, addr + offset) != 0)
503 * __dpmi_map_device_in_memory_block() may fail for memory range
504 * which belongs to non-device memory below 1 MB. DPMI host in
505 * this case returns DPMI error code 0x8003 (System integrity -
506 * invalid device address). For example this is behavior of DPMI
507 * host HX HDPMI32, which strictly differs between non-device and
508 * device memory. If the physical memory range belongs to the
509 * non-device conventional memory and DPMI host uses 1:1 mappings
510 * for memory below 1 MB then we can try to alias range of linear
511 * address below 1 MB to DJGPP's accessible linear address range.
512 * For this aliasing of linear (not the physical) memory address
513 * ranges below 1 MB boundary is there an additional DPMI 1.0
514 * function __dpmi_map_conventional_memory_in_memory_block().
515 * But DPMI host does not have to support it. HDPMI32 supports it.
516 * If the memory range crosses 1 MB boundary then call it only for
517 * the subrange of memory which below 1 MB boundary and let the
518 * remaining subrange for the next iteration of the outer loop.
519 * Because the remaining memory range is above 1 MB limit, only
520 * the __dpmi_map_device_in_memory_block() would be used. This
521 * approach makes continues linear range of the mapped memory.
523 if (__dpmi_error == 0x8003 && addr + offset < 1*1024*1024UL)
526 if (addr + offset + (mi.size << pagesize_shift) > 1*1024*1024UL)
527 mi.size = (1*1024*1024UL - addr - offset) >> pagesize_shift;
528 if (__dpmi_map_conventional_memory_in_memory_block(&mi, addr + offset) != 0)
531 * Save __dpmi_error because any DJGPP function may change
532 * it. If we have some partial mapping, try to undo it via
533 * physmem_unmap() which also release ptr. If we do not
534 * have partial mapping, just release ptr.
536 error = __dpmi_error;
538 physmem_unmap(physmem, ptr, offset);
543 case 0x0509: /* Unsupported function (returned by DPMI 0.9 host, error number is same as DPMI function number) */
544 case 0x8001: /* Unsupported function (returned by DPMI 1.0 host) */
546 * Conventional Memory Mapping is not supported.
547 * Device Mapping is supported, but DPMI host rejected
548 * Device Mapping request. So reports same errno value
549 * as from the failed Device Mapping switch case,
550 * which is ENXIO (because __dpmi_error == 0x8003).
554 case 0x8003: /* System integrity (invalid conventional memory address) */
557 case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
560 case 0x8023: /* Invalid handle (in ESI) */
561 case 0x8025: /* Invalid linear address (specified range is not within specified block, or EBX/EDX is not page aligned) */
564 default: /* Other unspecified error */
574 * Save __dpmi_error because any DJGPP function may change
575 * it. If we have some partial mapping, try to undo it via
576 * physmem_unmap() which also release ptr. If we do not
577 * have partial mapping, just release ptr.
579 error = __dpmi_error;
581 physmem_unmap(physmem, ptr, offset);
586 case 0x0508: /* Unsupported function (returned by DPMI 0.9 host, error number is same as DPMI function number) */
587 case 0x8001: /* Unsupported function (returned by DPMI 1.0 host) */
590 case 0x8003: /* System integrity (invalid device address) */
593 case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
596 case 0x8023: /* Invalid handle (in ESI) */
597 case 0x8025: /* Invalid linear address (specified range is not within specified block or EBX/EDX is not page-aligned) */
600 default: /* Other unspecified error */
609 * For read-only mapping try to change page attributes with not changing
610 * page type (3) and setting read-only access (bit 3 unset). Ignore any
611 * failure as this function requires DPMI 1.0 host and so it does not have
612 * to be supported by other DPMI 0.9 hosts. Note that by default newly
613 * created mapping has read/write access and so we can use it also for
614 * mappings which were requested as read-only too.
618 attributes = malloc(mi.size * sizeof(*attributes));
622 for (i = 0; i < mi.size; i++)
623 attributes[i] = (0<<3) | 3;
625 /* __dpmi_set_page_attributes modifies mi.size */
627 __dpmi_set_page_attributes(&mi, attributes);
637 else if (physmem == PHYSMEM_PHYSADDR_MAPPING)
640 * __dpmi_physical_address_mapping() is DPMI 0.9 function and so does not
641 * require device mapping support. But DPMI hosts often allow to used it
642 * only for memory above 1 MB and also we do not have control where DPMI
643 * host maps physical memory. Because this is DPMI 0.9 function, error
644 * code on failure does not have to be provided. If DPMI host does not
645 * provide error code then in __dpmi_error variable is stored the called
646 * DPMI function number (0x0800 is for Physical Address Mapping).
647 * Error codes are provided only by DPMI 1.0 hosts.
652 if (__dpmi_physical_address_mapping(&mi) != 0)
655 * __dpmi_physical_address_mapping() may fail for memory range which
656 * starts below 1 MB. DPMI 1.0 host in this case returns DPMI error
657 * code 0x8021 (Invalid value - address is below 1 MB boundary).
658 * DPMI 0.9 host does not provide error code, so __dpmi_error contains
659 * value 0x0800. For example this is behavior of the default DJGPP's
660 * DPMI host CWSDPMI and also of Windows 3.x DPMI host. On the other
661 * hand DPMI host HX HDPMI32 or Windows 9x DPMI host allow requests
662 * for memory ranges below 1 MB and do not fail.
664 if ((__dpmi_error == 0x0800 || __dpmi_error == 0x8021) && addr < 1*1024*1024UL)
667 * Expects that conventional memory below 1 MB is always 1:1
668 * mapped. On non-paging DPMI hosts it is always truth and paging
669 * DPMI hosts should do it too or at least provide mapping with
670 * compatible or emulated content for compatibility with existing
671 * DOS applications. So check that requested range is below 1 MB.
673 if (addr + length > 1*1024*1024UL)
680 * Simulate successful __dpmi_physical_address_mapping() call by
681 * setting the 1:1 mapped address.
687 switch (__dpmi_error)
689 case 0x0800: /* Error code was not provided (returned by DPMI 0.9 host, error number is same as DPMI function number) */
692 case 0x8003: /* System integrity (DPMI host memory region) */
693 case 0x8021: /* Invalid value (address is below 1 MB boundary) */
696 case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
699 default: /* Other unspecified error */
708 * Function returns linear address of the mapping. On non-paging DPMI
709 * hosts it does nothing and just returns same passed physical address.
710 * With DS descriptor limit set to 4 GB (set by __djgpp_nearptr_enable())
711 * we have direct access to any linear address. Direct access to specified
712 * linear address is from the __djgpp_conventional_base offset. Note that
713 * this is always read/write access, and there is no way to make access
716 ptr = (void *)(mi.address + __djgpp_conventional_base);
719 * DJGPP CRT code on paging DPMI hosts enables NULL pointer protection by
720 * disabling access to the zero page. If we are running on DPMI host which
721 * does 1:1 mapping and we were asked for physical address range mapping
722 * which includes also our zero page, then we have to disable NULL pointer
723 * protection to allow access to that mapped page. Detect this by checking
724 * that our zero page [0, pagesize-1] does not conflict with the returned
725 * address range [ptr, ptr+length] (note that length is already multiply
726 * of pagesize) and change page attributes to committed page type (1) and
727 * set read/write access (bit 3 set). Ignore any failure as this function
728 * requires DPMI 1.0 host and so it does not have to be supported by other
729 * DPMI 0.9 hosts. In this case DJGPP CRT code did not enable NULL pointer
730 * protection and so zero page can be normally accessed.
732 if ((unsigned long)ptr - 1 > (unsigned long)ptr - 1 + length)
734 mi.handle = __djgpp_memory_handle_list[0].handle;
736 mi.size = 1; /* number of pages */
737 one_pg_attr = (1<<3) | 1;
738 /* __dpmi_set_page_attributes modifies mi.size */
739 __dpmi_set_page_attributes(&mi, &one_pg_attr);
745 /* invalid physmem parameter */
751 physmem_unmap(struct physmem *physmem, void *ptr, size_t length)
753 long pagesize = physmem_get_pagesize(physmem);
754 unsigned pagesize_shift = ffs(pagesize)-1;
755 const __djgpp_sbrk_handle *sh;
756 unsigned long sh_size;
762 /* Align length to page size. */
763 if (length & (pagesize-1))
764 length = (length & ~(pagesize-1)) + pagesize;
766 if (physmem == PHYSMEM_DEVICE_MAPPING)
769 * Memory mapped by __dpmi_map_conventional_memory_in_memory_block() or by
770 * __dpmi_map_device_in_memory_block() can be unmapped by changing page
771 * attributes back to the what allocator use: page type to committed (1),
772 * access to read/write (bit 3 set) and not setting initial page access
773 * and dirty bits (bit 4 unset).
775 * There is a DJGPP function __djgpp_set_page_attributes() which sets page
776 * attributes for the memory range specified by ptr pointer, but it has
777 * same disadvantages as __djgpp_map_physical_memory() function (see
778 * comment in map functionality). So use __dpmi_set_page_attributes()
781 * If changing page attributes fails then do not return memory back to the
782 * malloc pool because it is still mapped to physical memory and cannot be
783 * used by allocator for general purpose anymore.
785 * Some DPMI hosts like HDPMI pre-v3.22 (part of HX pre-v2.22) or DPMIONE
786 * do not support changing page type directly from mapped to committed.
787 * But they support changing it indirectly: first from mapped to uncommitted
788 * and then from uncommitted to committed. So if direct change from mapped
789 * to committed fails then try workaround via indirect change.
792 static int do_indirect_change = 0;
794 for (offset = 0; offset < length; offset += (mi.size << pagesize_shift))
797 * Find a memory handle with its size which belongs to the pointer
798 * address ptr+offset. Base address and size of the memory handle
799 * must be page aligned for changing page attributes.
801 if (!find_sbrk_memory_handle(ptr + offset, length - offset, pagesize, &sh, &sh_size) ||
802 (sh->address & (pagesize-1)) || (sh_size & (pagesize-1)))
808 mi.handle = sh->handle;
809 mi.address = (unsigned long)ptr + offset - sh->address;
810 mi.size = (length - offset) >> pagesize_shift;
811 if (mi.size > ((sh_size - mi.address) >> pagesize_shift))
812 mi.size = (sh_size - mi.address) >> pagesize_shift;
814 attributes = malloc(mi.size * sizeof(*attributes));
821 retry_via_indirect_change:
822 if (do_indirect_change)
824 for (i = 0; i < mi.size; i++)
825 attributes[i] = (0<<4) | (0<<3) | 0; /* 0 = page type uncommitted */
827 if (set_and_get_page_attributes(&mi, attributes) != 0)
830 for (i = 0; i < mi.size; i++)
832 /* Check that every page type is uncommitted (0). */
833 if ((attributes[i] & 0x7) != 0)
842 for (i = 0; i < mi.size; i++)
843 attributes[i] = (0<<4) | (1<<3) | 1; /* 1 = page type committed */
845 if (set_and_get_page_attributes(&mi, attributes) != 0)
848 for (i = 0; i < mi.size; i++)
850 /* Check that every page type is committed (1) and has read/write access (bit 3 set). */
851 if (((attributes[i] & 0x7) != 1) || !(attributes[i] & (1<<3)))
853 if (!do_indirect_change)
856 * Some DPMI hosts do not support changing page type
857 * from mapped to committed but for such change request
858 * do not report any error. Try following workaround:
859 * Change page type indirectly. First change page type
860 * from mapped to uncommitted and then to committed.
862 do_indirect_change = 1;
863 goto retry_via_indirect_change;
875 * Now we are sure that ptr is backed by committed memory which can be
876 * returned back to the DJGPP sbrk pool.
881 else if (physmem == PHYSMEM_PHYSADDR_MAPPING)
884 * Physical address mapping done by __dpmi_physical_address_mapping() can
885 * be unmapped only by __dpmi_free_physical_address_mapping() function.
886 * This function takes linear address of the mapped region. Direct access
887 * pointer refers to linear address from the __djgpp_conventional_base
888 * offset. On non-paging DPMI hosts, physical memory cannot be unmapped at
889 * all because whole physical memory is always available and so this
890 * function either fails or does nothing. Moreover this unmapping function
891 * requires DPMI 1.0 host as opposite of the mapping function which is
892 * available also in DPMI 0.9. It means that DPMI 0.9 hosts do not provide
893 * ability to unmap already mapped physical addresses. This DPMI unmapping
894 * function is not commonly supported by DPMI hosts, even the default
895 * DJGPP's CWSDPMI does not support it. But few alternative DPMI host like
896 * PMODE/DJ, WDOSX, HDPMI32 or DPMIONE support it. So expects failure from
897 * this function call, in most cases it is not possible to unmap physical
898 * memory which was previously mapped by __dpmi_physical_address_mapping().
900 mi.address = (unsigned long)ptr - __djgpp_conventional_base;
901 if (__dpmi_free_physical_address_mapping(&mi) != 0)
904 * Do not report error when DPMI function failed with error code
905 * 0x8025 (invalid linear address) and linear address is below 1 MB.
906 * First 1 MB of memory space should stay always mapped.
908 if (__dpmi_error != 0x8025 || mi.address >= 1*1024*1024UL)
910 switch (__dpmi_error)
912 case 0x0801: /* Unsupported function (returned by DPMI 0.9 host, error number is same as DPMI function number) */
913 case 0x8001: /* Unsupported function (returned by DPMI 1.0 host) */
916 case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
919 case 0x8025: /* Invalid linear address */
922 default: /* Other unspecified error */
933 /* invalid physmem parameter */