2 * The PCI Utilities -- Verify and prepare devices before margining
4 * Copyright (c) 2023-2024 KNS Group LLC (YADRO)
6 * Can be freely distributed and used under the terms of the GNU GPL v2+.
8 * SPDX-License-Identifier: GPL-2.0-or-later
15 static u16 special_hw[][4] =
16 // Vendor ID, Device ID, Revision ID, margin_hw
17 { { 0x8086, 0x347A, 0x4, MARGIN_ICE_LAKE_RC },
18 { 0xFFFF, 0, 0, MARGIN_HW_DEFAULT }
22 detect_unique_hw(struct pci_dev *dev)
24 u16 vendor = pci_read_word(dev, PCI_VENDOR_ID);
25 u16 device = pci_read_word(dev, PCI_DEVICE_ID);
26 u8 revision = pci_read_byte(dev, PCI_REVISION_ID);
28 for (int i = 0; special_hw[i][0] != 0xFFFF; i++)
30 if (vendor == special_hw[i][0] && device == special_hw[i][1] && revision == special_hw[i][2])
31 return special_hw[i][3];
33 return MARGIN_HW_DEFAULT;
37 margin_port_is_down(struct pci_dev *dev)
39 struct pci_cap *cap = pci_find_cap(dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
42 u8 type = pci_read_byte(dev, PCI_HEADER_TYPE) & 0x7F;
43 u8 dir = GET_REG_MASK(pci_read_word(dev, cap->addr + PCI_EXP_FLAGS), PCI_EXP_FLAGS_TYPE);
45 if (type == PCI_HEADER_TYPE_BRIDGE
46 && (dir == PCI_EXP_TYPE_ROOT_PORT || dir == PCI_EXP_TYPE_DOWNSTREAM))
53 margin_find_pair(struct pci_access *pacc, struct pci_dev *dev, struct pci_dev **down_port,
54 struct pci_dev **up_port)
56 struct pci_cap *cap = pci_find_cap(dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
59 bool given_down = margin_port_is_down(dev);
61 for (struct pci_dev *p = pacc->devices; p; p = p->next)
63 if (given_down && pci_read_byte(dev, PCI_SECONDARY_BUS) == p->bus && dev->domain == p->domain
70 else if (!given_down && pci_read_byte(p, PCI_SECONDARY_BUS) == dev->bus
71 && dev->domain == p->domain)
82 margin_verify_link(struct pci_dev *down_port, struct pci_dev *up_port)
84 struct pci_cap *cap = pci_find_cap(down_port, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
87 if ((pci_read_word(down_port, cap->addr + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_SPEED) < 4)
89 if ((pci_read_word(down_port, cap->addr + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_SPEED) > 5)
92 u8 down_sec = pci_read_byte(down_port, PCI_SECONDARY_BUS);
94 // Verify that devices are linked, down_port is Root Port or Downstream Port of Switch,
95 // up_port is Function 0 of a Device
96 if (!(down_sec == up_port->bus && margin_port_is_down(down_port) && up_port->func == 0))
99 struct pci_cap *pm = pci_find_cap(up_port, PCI_CAP_ID_PM, PCI_CAP_NORMAL);
100 return pm && !(pci_read_word(up_port, pm->addr + PCI_PM_CTRL) & PCI_PM_CTRL_STATE_MASK); // D0
104 margin_check_ready_bit(struct pci_dev *dev)
106 struct pci_cap *lmr = pci_find_cap(dev, PCI_EXT_CAP_ID_LMR, PCI_CAP_EXTENDED);
107 return lmr && (pci_read_word(dev, lmr->addr + PCI_LMR_PORT_STS) & PCI_LMR_PORT_STS_READY);
110 /* Awaits device at 16 GT/s or higher */
111 static struct margin_dev
112 fill_dev_wrapper(struct pci_dev *dev)
114 struct pci_cap *cap = pci_find_cap(dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
115 struct margin_dev res
117 .lmr_cap_addr = pci_find_cap(dev, PCI_EXT_CAP_ID_LMR, PCI_CAP_EXTENDED)->addr,
118 .width = GET_REG_MASK(pci_read_word(dev, cap->addr + PCI_EXP_LNKSTA), PCI_EXP_LNKSTA_WIDTH),
120 = (!!(pci_read_word(dev, cap->addr + PCI_EXP_LNKSTA2) & PCI_EXP_LINKSTA2_RETIMER))
121 + (!!(pci_read_word(dev, cap->addr + PCI_EXP_LNKSTA2) & PCI_EXP_LINKSTA2_2RETIMERS)),
122 .link_speed = (pci_read_word(dev, cap->addr + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_SPEED),
123 .hw = detect_unique_hw(dev) };
128 margin_fill_link(struct pci_dev *down_port, struct pci_dev *up_port, struct margin_link *wrappers)
130 memset(wrappers, 0, sizeof(*wrappers));
131 if (!margin_verify_link(down_port, up_port))
133 wrappers->down_port = fill_dev_wrapper(down_port);
134 wrappers->up_port = fill_dev_wrapper(up_port);
138 /* Disable ASPM, set Hardware Autonomous Speed/Width Disable bits */
140 margin_prep_dev(struct margin_dev *dev)
142 struct pci_cap *pcie = pci_find_cap(dev->dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
146 u16 lnk_ctl = pci_read_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL);
147 dev->aspm = lnk_ctl & PCI_EXP_LNKCTL_ASPM;
148 dev->hawd = !!(lnk_ctl & PCI_EXP_LNKCTL_HWAUTWD);
149 lnk_ctl &= ~PCI_EXP_LNKCTL_ASPM;
150 pci_write_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL, lnk_ctl);
151 if (pci_read_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM)
154 lnk_ctl |= PCI_EXP_LNKCTL_HWAUTWD;
155 pci_write_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL, lnk_ctl);
157 u16 lnk_ctl2 = pci_read_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL2);
158 dev->hasd = !!(lnk_ctl2 & PCI_EXP_LNKCTL2_SPEED_DIS);
159 lnk_ctl2 |= PCI_EXP_LNKCTL2_SPEED_DIS;
160 pci_write_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL2, lnk_ctl2);
165 /* Restore Device ASPM, Hardware Autonomous Speed/Width settings */
167 margin_restore_dev(struct margin_dev *dev)
169 struct pci_cap *pcie = pci_find_cap(dev->dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
173 u16 lnk_ctl = pci_read_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL);
174 lnk_ctl = SET_REG_MASK(lnk_ctl, PCI_EXP_LNKCAP_ASPM, dev->aspm);
175 lnk_ctl = SET_REG_MASK(lnk_ctl, PCI_EXP_LNKCTL_HWAUTWD, dev->hawd);
176 pci_write_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL, lnk_ctl);
178 u16 lnk_ctl2 = pci_read_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL2);
179 lnk_ctl2 = SET_REG_MASK(lnk_ctl2, PCI_EXP_LNKCTL2_SPEED_DIS, dev->hasd);
180 pci_write_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL2, lnk_ctl2);
184 margin_prep_link(struct margin_link *link)
188 if (!margin_prep_dev(&link->down_port))
190 if (!margin_prep_dev(&link->up_port))
192 margin_restore_dev(&link->down_port);
199 margin_restore_link(struct margin_link *link)
201 margin_restore_dev(&link->down_port);
202 margin_restore_dev(&link->up_port);