2 * The PCI Library -- Direct Configuration access via memory mapped ports
4 * Copyright (c) 2022 Pali Rohár <pali@kernel.org>
6 * Can be freely distributed and used under the terms of the GNU GPL v2+.
8 * SPDX-License-Identifier: GPL-2.0-or-later
12 * Tell 32-bit platforms that we are interested in 64-bit variant of off_t type
13 * as 32-bit variant of off_t type is signed and so it cannot represent all
14 * possible 32-bit offsets. It is required because off_t type is used by mmap().
16 #define _FILE_OFFSET_BITS 64
27 #include <sys/types.h>
32 #define OFF_MAX (off_t)((1ULL << (sizeof(off_t) * CHAR_BIT - 1)) - 1)
46 munmap_regs(struct pci_access *a)
48 struct mmio_cache *cache = a->backend_data;
53 munmap(cache->addr_map, pagesize);
54 if (cache->addr_page != cache->data_page)
55 munmap(cache->data_map, pagesize);
57 pci_mfree(a->backend_data);
58 a->backend_data = NULL;
62 mmap_regs(struct pci_access *a, off_t addr_reg, off_t data_reg, int data_off, volatile void **addr, volatile void **data)
64 struct mmio_cache *cache = a->backend_data;
65 off_t addr_page = addr_reg & ~(pagesize-1);
66 off_t data_page = data_reg & ~(pagesize-1);
67 void *addr_map = MAP_FAILED;
68 void *data_map = MAP_FAILED;
70 if (cache && cache->addr_page == addr_page)
71 addr_map = cache->addr_map;
73 if (cache && cache->data_page == data_page)
74 data_map = cache->data_map;
76 if (addr_map == MAP_FAILED)
77 addr_map = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, a->fd, addr_page);
79 if (addr_map == MAP_FAILED)
82 if (data_map == MAP_FAILED)
84 if (data_page == addr_page)
87 data_map = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, a->fd, data_page);
90 if (data_map == MAP_FAILED)
92 if (!cache || cache->addr_map != addr_map)
93 munmap(addr_map, pagesize);
97 if (cache && cache->addr_page != addr_page)
98 munmap(cache->addr_map, pagesize);
100 if (cache && cache->data_page != data_page && cache->data_page != cache->addr_page)
101 munmap(cache->data_map, pagesize);
104 cache = a->backend_data = pci_malloc(a, sizeof(*cache));
106 cache->addr_page = addr_page;
107 cache->data_page = data_page;
108 cache->addr_map = addr_map;
109 cache->data_map = data_map;
111 *addr = (unsigned char *)addr_map + (addr_reg & (pagesize-1));
112 *data = (unsigned char *)data_map + (data_reg & (pagesize-1)) + data_off;
117 writeb(unsigned char value, volatile void *addr)
119 *(volatile unsigned char *)addr = value;
123 writew(unsigned short value, volatile void *addr)
125 *(volatile unsigned short *)addr = value;
129 writel(u32 value, volatile void *addr)
131 *(volatile u32 *)addr = value;
135 readb(volatile void *addr)
137 return *(volatile unsigned char *)addr;
140 static unsigned short
141 readw(volatile void *addr)
143 return *(volatile unsigned short *)addr;
147 readl(volatile void *addr)
149 return *(volatile u32 *)addr;
153 validate_addrs(const char *addrs)
155 const char *sep, *next;
156 unsigned long long num;
164 next = strchr(addrs, ',');
166 next = addrs + strlen(addrs);
168 sep = strchr(addrs, '/');
172 if (!isxdigit(*addrs) || !isxdigit(*(sep+1)))
176 num = strtoull(addrs, &endptr, 16);
177 if (errno || endptr != sep || (num & 3) || num > OFF_MAX)
181 num = strtoull(sep+1, &endptr, 16);
182 if (errno || endptr != next || (num & 3) || num > OFF_MAX)
193 get_domain_count(const char *addrs)
196 while (addrs = strchr(addrs, ','))
205 get_domain_addr(const char *addrs, int domain, off_t *addr_reg, off_t *data_reg)
211 addrs = strchr(addrs, ',');
217 *addr_reg = strtoull(addrs, &endptr, 16);
218 *data_reg = strtoull(endptr+1, NULL, 16);
224 conf1_config(struct pci_access *a)
226 pci_define_param(a, "devmem.path", PCI_PATH_DEVMEM_DEVICE, "Path to the /dev/mem device");
227 pci_define_param(a, "mmio-conf1.addrs", "", "Physical addresses of memory mapped Intel conf1 interface"); /* format: 0xaddr1/0xdata1,0xaddr2/0xdata2,... */
231 conf1_ext_config(struct pci_access *a)
233 pci_define_param(a, "devmem.path", PCI_PATH_DEVMEM_DEVICE, "Path to the /dev/mem device");
234 pci_define_param(a, "mmio-conf1-ext.addrs", "", "Physical addresses of memory mapped Intel conf1 extended interface"); /* format: 0xaddr1/0xdata1,0xaddr2/0xdata2,... */
238 detect(struct pci_access *a, char *addrs_param_name)
240 char *addrs = pci_get_param(a, addrs_param_name);
241 char *devmem = pci_get_param(a, "devmem.path");
245 a->debug("%s was not specified", addrs_param_name);
249 if (!validate_addrs(addrs))
251 a->debug("%s has invalid address format %s", addrs_param_name, addrs);
255 if (access(devmem, R_OK | W_OK))
257 a->debug("cannot access %s: %s", devmem, strerror(errno));
261 a->debug("using %s with %s", devmem, addrs);
266 conf1_detect(struct pci_access *a)
268 return detect(a, "mmio-conf1.addrs");
272 conf1_ext_detect(struct pci_access *a)
274 return detect(a, "mmio-conf1-ext.addrs");
278 get_addrs_param_name(struct pci_access *a)
280 if (a->methods->config == conf1_ext_config)
281 return "mmio-conf1-ext.addrs";
283 return "mmio-conf1.addrs";
287 conf1_init(struct pci_access *a)
289 char *addrs_param_name = get_addrs_param_name(a);
290 char *addrs = pci_get_param(a, addrs_param_name);
291 char *devmem = pci_get_param(a, "devmem.path");
293 pagesize = sysconf(_SC_PAGESIZE);
295 a->error("Cannot get page size: %s", strerror(errno));
298 a->error("Option %s was not specified.", addrs_param_name);
300 if (!validate_addrs(addrs))
301 a->error("Option %s has invalid address format \"%s\".", addrs_param_name, addrs);
303 a->fd = open(devmem, O_RDWR | O_DSYNC); /* O_DSYNC bypass CPU cache for mmap() on Linux */
305 a->error("Cannot open %s: %s.", devmem, strerror(errno));
309 conf1_cleanup(struct pci_access *a)
320 conf1_scan(struct pci_access *a)
322 char *addrs_param_name = get_addrs_param_name(a);
323 char *addrs = pci_get_param(a, addrs_param_name);
324 int domain_count = get_domain_count(addrs);
327 for (domain = 0; domain < domain_count; domain++)
328 pci_generic_scan_domain(a, domain);
332 conf1_ext_read(struct pci_dev *d, int pos, byte *buf, int len)
334 char *addrs_param_name = get_addrs_param_name(d->access);
335 char *addrs = pci_get_param(d->access, addrs_param_name);
336 volatile void *addr, *data;
337 off_t addr_reg, data_reg;
342 if (len != 1 && len != 2 && len != 4)
343 return pci_generic_block_read(d, pos, buf, len);
345 if (!get_domain_addr(addrs, d->domain, &addr_reg, &data_reg))
348 if (!mmap_regs(d->access, addr_reg, data_reg, pos&3, &addr, &data))
351 writel(0x80000000 | ((pos & 0xf00) << 16) | ((d->bus & 0xff) << 16) | (PCI_DEVFN(d->dev, d->func) << 8) | (pos & 0xfc), addr);
352 readl(addr); /* write barrier for address */
357 buf[0] = readb(data);
360 ((u16 *) buf)[0] = readw(data);
363 ((u32 *) buf)[0] = readl(data);
371 conf1_read(struct pci_dev *d, int pos, byte *buf, int len)
376 return conf1_ext_read(d, pos, buf, len);
380 conf1_ext_write(struct pci_dev *d, int pos, byte *buf, int len)
382 char *addrs_param_name = get_addrs_param_name(d->access);
383 char *addrs = pci_get_param(d->access, addrs_param_name);
384 volatile void *addr, *data;
385 off_t addr_reg, data_reg;
390 if (len != 1 && len != 2 && len != 4)
391 return pci_generic_block_write(d, pos, buf, len);
393 if (!get_domain_addr(addrs, d->domain, &addr_reg, &data_reg))
396 if (!mmap_regs(d->access, addr_reg, data_reg, pos&3, &addr, &data))
399 writel(0x80000000 | ((pos & 0xf00) << 16) | ((d->bus & 0xff) << 16) | (PCI_DEVFN(d->dev, d->func) << 8) | (pos & 0xfc), addr);
400 readl(addr); /* write barrier for address */
405 writeb(buf[0], data);
408 writew(((u16 *) buf)[0], data);
411 writel(((u32 *) buf)[0], data);
416 * write barrier for data
417 * Note that we cannot read from data port because it may have side effect.
418 * Instead we read from address port (which should not have side effect) to
419 * create a barrier between two conf1_write() calls. But this does not have
420 * to be 100% correct as it does not ensure barrier on data port itself.
421 * Correct way is to issue CPU instruction for full hw sync barrier but gcc
422 * does not provide any (builtin) function yet.
430 conf1_write(struct pci_dev *d, int pos, byte *buf, int len)
435 return conf1_ext_write(d, pos, buf, len);
438 struct pci_methods pm_mmio_conf1 = {
440 "Raw memory mapped I/O port access using Intel conf1 interface",
446 pci_generic_fill_info,
451 NULL /* cleanup_dev */
454 struct pci_methods pm_mmio_conf1_ext = {
456 "Raw memory mapped I/O port access using Intel conf1 extended interface",
462 pci_generic_fill_info,
467 NULL /* cleanup_dev */