2 * The PCI Utilities -- Obtain the margin information of the Link
4 * Copyright (c) 2023 KNS Group LLC (YADRO)
6 * Can be freely distributed and used under the terms of the GNU GPL v2+.
8 * SPDX-License-Identifier: GPL-2.0-or-later
17 /* Macro helpers for Margining command parsing */
19 typedef u16 margin_cmd;
21 /* Margining command parsing */
23 #define LMR_CMD_RECVN MASK(2, 0)
24 #define LMR_CMD_TYPE MASK(5, 3)
25 #define LMR_CMD_PAYLOAD MASK(15, 8)
29 // Report Capabilities
30 #define LMR_PLD_VOLT_SUPPORT BIT(8)
31 #define LMR_PLD_IND_U_D_VOLT BIT(9)
32 #define LMR_PLD_IND_L_R_TIM BIT(10)
33 #define LMR_PLD_SAMPLE_REPORT_METHOD BIT(11)
34 #define LMR_PLD_IND_ERR_SAMPLER BIT(12)
36 #define LMR_PLD_MAX_T_STEPS MASK(13, 8)
37 #define LMR_PLD_MAX_V_STEPS MASK(14, 8)
38 #define LMR_PLD_MAX_OFFSET MASK(14, 8)
39 #define LMR_PLD_MAX_LANES MASK(12, 8)
40 #define LMR_PLD_SAMPLE_RATE MASK(13, 8)
43 #define LMR_PLD_MARGIN_T_STEPS MASK(13, 8)
44 #define LMR_PLD_T_GO_LEFT BIT(14)
47 #define LMR_PLD_MARGIN_V_STEPS MASK(14, 8)
48 #define LMR_PLD_V_GO_DOWN BIT(15)
51 #define LMR_PLD_ERR_CNT MASK(13, 8)
52 #define LMR_PLD_MARGIN_STS MASK(15, 14)
54 /* Address calc macro for Lanes Margining registers */
56 #define LMR_LANE_CTRL(lmr_cap_addr, lane) ((lmr_cap_addr) + 8 + 4 * (lane))
57 #define LMR_LANE_STATUS(lmr_cap_addr, lane) ((lmr_cap_addr) + 10 + 4 * (lane))
59 /* Margining Commands */
61 #define MARG_TIM(go_left, step, recvn) margin_make_cmd(((go_left) << 6) | (step), 3, recvn)
62 #define MARG_VOLT(go_down, step, recvn) margin_make_cmd(((go_down) << 7) | (step), 4, recvn)
65 #define REPORT_CAPS(recvn) margin_make_cmd(0x88, 1, recvn)
66 #define REPORT_VOL_STEPS(recvn) margin_make_cmd(0x89, 1, recvn)
67 #define REPORT_TIM_STEPS(recvn) margin_make_cmd(0x8A, 1, recvn)
68 #define REPORT_TIM_OFFSET(recvn) margin_make_cmd(0x8B, 1, recvn)
69 #define REPORT_VOL_OFFSET(recvn) margin_make_cmd(0x8C, 1, recvn)
70 #define REPORT_SAMPL_RATE_V(recvn) margin_make_cmd(0x8D, 1, recvn)
71 #define REPORT_SAMPL_RATE_T(recvn) margin_make_cmd(0x8E, 1, recvn)
72 #define REPORT_SAMPLE_CNT(recvn) margin_make_cmd(0x8F, 1, recvn)
73 #define REPORT_MAX_LANES(recvn) margin_make_cmd(0x90, 1, recvn)
76 #define NO_COMMAND margin_make_cmd(0x9C, 7, 0)
77 #define CLEAR_ERROR_LOG(recvn) margin_make_cmd(0x55, 2, recvn)
78 #define GO_TO_NORMAL_SETTINGS(recvn) margin_make_cmd(0xF, 2, recvn)
79 #define SET_ERROR_LIMIT(error_limit, recvn) margin_make_cmd(0xC0 | (error_limit), 2, recvn)
93 ts.tv_sec = msec / 1000;
94 ts.tv_nsec = (msec % 1000) * 1000000;
98 res = nanosleep(&ts, &ts);
99 } while (res && errno == EINTR);
105 margin_make_cmd(u8 payload, u8 type, u8 recvn)
107 return SET_REG_MASK(0, LMR_CMD_PAYLOAD, payload) | SET_REG_MASK(0, LMR_CMD_TYPE, type)
108 | SET_REG_MASK(0, LMR_CMD_RECVN, recvn);
112 margin_set_cmd(struct margin_dev *dev, u8 lane, margin_cmd cmd)
114 pci_write_word(dev->dev, LMR_LANE_CTRL(dev->lmr_cap_addr, lane), cmd);
116 return pci_read_word(dev->dev, LMR_LANE_STATUS(dev->lmr_cap_addr, lane)) == cmd;
120 margin_report_cmd(struct margin_dev *dev, u8 lane, margin_cmd cmd, margin_cmd *result)
122 pci_write_word(dev->dev, LMR_LANE_CTRL(dev->lmr_cap_addr, lane), cmd);
124 *result = pci_read_word(dev->dev, LMR_LANE_STATUS(dev->lmr_cap_addr, lane));
125 return GET_REG_MASK(*result, LMR_CMD_TYPE) == GET_REG_MASK(cmd, LMR_CMD_TYPE)
126 && GET_REG_MASK(*result, LMR_CMD_RECVN) == GET_REG_MASK(cmd, LMR_CMD_RECVN)
127 && margin_set_cmd(dev, lane, NO_COMMAND);
131 margin_apply_hw_quirks(struct margin_recv *recv)
133 switch (recv->dev->hw)
135 case MARGIN_ICE_LAKE_RC:
136 if (recv->recvn == 1)
137 recv->params->volt_offset = 12;
145 read_params_internal(struct margin_dev *dev, u8 recvn, bool lane_reversal,
146 struct margin_params *params)
149 u8 lane = lane_reversal ? dev->width - 1 : 0;
150 margin_set_cmd(dev, lane, NO_COMMAND);
151 bool status = margin_report_cmd(dev, lane, REPORT_CAPS(recvn), &resp);
154 params->volt_support = GET_REG_MASK(resp, LMR_PLD_VOLT_SUPPORT);
155 params->ind_up_down_volt = GET_REG_MASK(resp, LMR_PLD_IND_U_D_VOLT);
156 params->ind_left_right_tim = GET_REG_MASK(resp, LMR_PLD_IND_L_R_TIM);
157 params->sample_report_method = GET_REG_MASK(resp, LMR_PLD_SAMPLE_REPORT_METHOD);
158 params->ind_error_sampler = GET_REG_MASK(resp, LMR_PLD_IND_ERR_SAMPLER);
159 status = margin_report_cmd(dev, lane, REPORT_VOL_STEPS(recvn), &resp);
163 params->volt_steps = GET_REG_MASK(resp, LMR_PLD_MAX_V_STEPS);
164 status = margin_report_cmd(dev, lane, REPORT_TIM_STEPS(recvn), &resp);
168 params->timing_steps = GET_REG_MASK(resp, LMR_PLD_MAX_T_STEPS);
169 status = margin_report_cmd(dev, lane, REPORT_TIM_OFFSET(recvn), &resp);
173 params->timing_offset = GET_REG_MASK(resp, LMR_PLD_MAX_OFFSET);
174 status = margin_report_cmd(dev, lane, REPORT_VOL_OFFSET(recvn), &resp);
178 params->volt_offset = GET_REG_MASK(resp, LMR_PLD_MAX_OFFSET);
179 status = margin_report_cmd(dev, lane, REPORT_SAMPL_RATE_V(recvn), &resp);
183 params->sample_rate_v = GET_REG_MASK(resp, LMR_PLD_SAMPLE_RATE);
184 status = margin_report_cmd(dev, lane, REPORT_SAMPL_RATE_T(recvn), &resp);
188 params->sample_rate_t = GET_REG_MASK(resp, LMR_PLD_SAMPLE_RATE);
189 status = margin_report_cmd(dev, lane, REPORT_MAX_LANES(recvn), &resp);
192 params->max_lanes = GET_REG_MASK(resp, LMR_PLD_MAX_LANES);
196 /* Margin all lanes_n lanes simultaneously */
198 margin_test_lanes(struct margin_lanes_data arg)
201 margin_cmd lane_status;
204 bool timing = (arg.dir == TIM_LEFT || arg.dir == TIM_RIGHT);
209 step_cmd = MARG_TIM(arg.dir == TIM_LEFT, steps_done, arg.recv->recvn);
214 step_cmd = MARG_VOLT(arg.dir == VOLT_DOWN, steps_done, arg.recv->recvn);
217 bool failed_lanes[32] = { 0 };
218 u8 alive_lanes = arg.lanes_n;
220 for (int i = 0; i < arg.lanes_n; i++)
222 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
223 margin_set_cmd(arg.recv->dev, arg.results[i].lane,
224 SET_ERROR_LIMIT(arg.recv->error_limit, arg.recv->recvn));
225 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
226 arg.results[i].steps[arg.dir] = arg.steps_lane_total;
227 arg.results[i].statuses[arg.dir] = MARGIN_THR;
230 while (alive_lanes > 0 && steps_done < arg.steps_lane_total)
235 step_cmd = SET_REG_MASK(step_cmd, LMR_PLD_MARGIN_T_STEPS, steps_done);
237 step_cmd = SET_REG_MASK(step_cmd, LMR_PLD_MARGIN_V_STEPS, steps_done);
239 for (int i = 0; i < arg.lanes_n; i++)
241 if (!failed_lanes[i])
244 int ctrl_addr = LMR_LANE_CTRL(arg.recv->dev->lmr_cap_addr, arg.results[i].lane);
245 pci_write_word(arg.recv->dev->dev, ctrl_addr, step_cmd);
248 msleep(MARGIN_STEP_MS);
250 for (int i = 0; i < arg.lanes_n; i++)
252 if (!failed_lanes[i])
254 int status_addr = LMR_LANE_STATUS(arg.recv->dev->lmr_cap_addr, arg.results[i].lane);
255 lane_status = pci_read_word(arg.recv->dev->dev, status_addr);
256 u8 step_status = GET_REG_MASK(lane_status, LMR_PLD_MARGIN_STS);
257 if (!(GET_REG_MASK(lane_status, LMR_CMD_TYPE) == marg_type
258 && GET_REG_MASK(lane_status, LMR_CMD_RECVN) == arg.recv->recvn
260 && GET_REG_MASK(lane_status, LMR_PLD_ERR_CNT) <= arg.recv->error_limit
261 && margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND)))
264 failed_lanes[i] = true;
265 arg.results[i].steps[arg.dir] = steps_done - 1;
266 arg.results[i].statuses[arg.dir]
267 = (step_status == 3 || step_status == 1 ? MARGIN_NAK : MARGIN_LIM);
272 arg.steps_lane_done = steps_done;
273 margin_log_margining(arg);
276 for (int i = 0; i < arg.lanes_n; i++)
278 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
279 margin_set_cmd(arg.recv->dev, arg.results[i].lane, CLEAR_ERROR_LOG(arg.recv->recvn));
280 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
281 margin_set_cmd(arg.recv->dev, arg.results[i].lane, GO_TO_NORMAL_SETTINGS(arg.recv->recvn));
282 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
286 /* Awaits that Receiver is prepared through prep_dev function */
288 margin_test_receiver(struct margin_dev *dev, u8 recvn, struct margin_args *args,
289 struct margin_results *results)
291 u8 *lanes_to_margin = args->lanes;
292 u8 lanes_n = args->lanes_n;
294 struct margin_params params;
295 struct margin_recv recv = { .dev = dev,
297 .lane_reversal = false,
299 .parallel_lanes = args->parallel_lanes ? args->parallel_lanes : 1,
300 .error_limit = args->error_limit };
302 results->recvn = recvn;
303 results->lanes_n = lanes_n;
304 margin_log_recvn(&recv);
306 if (!margin_check_ready_bit(dev->dev))
308 margin_log("\nMargining Ready bit is Clear.\n");
309 results->test_status = MARGIN_TEST_READY_BIT;
313 if (!read_params_internal(dev, recvn, recv.lane_reversal, ¶ms))
315 recv.lane_reversal = true;
316 if (!read_params_internal(dev, recvn, recv.lane_reversal, ¶ms))
318 margin_log("\nError during caps reading.\n");
319 results->test_status = MARGIN_TEST_CAPS;
324 results->params = params;
326 if (recv.parallel_lanes > params.max_lanes + 1)
327 recv.parallel_lanes = params.max_lanes + 1;
328 margin_apply_hw_quirks(&recv);
329 margin_log_hw_quirks(&recv);
331 results->tim_off_reported = params.timing_offset != 0;
332 results->volt_off_reported = params.volt_offset != 0;
333 double tim_offset = results->tim_off_reported ? (double)params.timing_offset : 50.0;
334 double volt_offset = results->volt_off_reported ? (double)params.volt_offset : 50.0;
336 results->tim_coef = tim_offset / (double)params.timing_steps;
337 results->volt_coef = volt_offset / (double)params.volt_steps * 10.0;
339 results->lane_reversal = recv.lane_reversal;
340 results->link_speed = dev->link_speed;
341 results->test_status = MARGIN_TEST_OK;
343 margin_log_receiver(&recv);
345 results->lanes = xmalloc(sizeof(struct margin_res_lane) * lanes_n);
346 for (int i = 0; i < lanes_n; i++)
348 results->lanes[i].lane
349 = recv.lane_reversal ? dev->width - lanes_to_margin[i] - 1 : lanes_to_margin[i];
352 if (args->run_margin)
354 if (args->verbosity > 0)
356 struct margin_lanes_data lanes_data
357 = { .recv = &recv, .verbosity = args->verbosity, .steps_utility = args->steps_utility };
359 enum margin_dir dir[] = { TIM_LEFT, TIM_RIGHT, VOLT_UP, VOLT_DOWN };
363 u8 steps_t = args->steps_t ? args->steps_t : params.timing_steps;
364 u8 steps_v = args->steps_v ? args->steps_v : params.volt_steps;
366 while (lanes_done != lanes_n)
368 use_lanes = (lanes_done + recv.parallel_lanes > lanes_n) ? lanes_n - lanes_done :
370 lanes_data.lanes_numbers = lanes_to_margin + lanes_done;
371 lanes_data.lanes_n = use_lanes;
372 lanes_data.results = results->lanes + lanes_done;
374 for (int i = 0; i < 4; i++)
376 bool timing = dir[i] == TIM_LEFT || dir[i] == TIM_RIGHT;
377 if (!timing && !params.volt_support)
379 if (dir[i] == TIM_RIGHT && !params.ind_left_right_tim)
381 if (dir[i] == VOLT_DOWN && !params.ind_up_down_volt)
384 lanes_data.ind = timing ? params.ind_left_right_tim : params.ind_up_down_volt;
385 lanes_data.dir = dir[i];
386 lanes_data.steps_lane_total = timing ? steps_t : steps_v;
387 if (*args->steps_utility >= lanes_data.steps_lane_total)
388 *args->steps_utility -= lanes_data.steps_lane_total;
390 *args->steps_utility = 0;
391 margin_test_lanes(lanes_data);
393 lanes_done += use_lanes;
395 if (args->verbosity > 0)
397 if (recv.lane_reversal)
399 for (int i = 0; i < lanes_n; i++)
400 results->lanes[i].lane = lanes_to_margin[i];
408 margin_read_params(struct pci_access *pacc, struct pci_dev *dev, u8 recvn,
409 struct margin_params *params)
411 struct pci_cap *cap = pci_find_cap(dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
414 u8 dev_dir = GET_REG_MASK(pci_read_word(dev, cap->addr + PCI_EXP_FLAGS), PCI_EXP_FLAGS_TYPE);
417 if (dev_dir == PCI_EXP_TYPE_ROOT_PORT || dev_dir == PCI_EXP_TYPE_DOWNSTREAM)
432 if (dev_down && recvn == 6)
434 if (!dev_down && recvn != 6)
437 struct pci_dev *down = NULL;
438 struct pci_dev *up = NULL;
439 struct margin_link link;
441 for (struct pci_dev *p = pacc->devices; p; p = p->next)
443 if (dev_down && pci_read_byte(dev, PCI_SECONDARY_BUS) == p->bus && dev->domain == p->domain
450 else if (!dev_down && pci_read_byte(p, PCI_SECONDARY_BUS) == dev->bus
451 && dev->domain == p->domain)
462 if (!margin_fill_link(down, up, &link))
465 struct margin_dev *dut = (dev_down ? &link.down_port : &link.up_port);
466 if (!margin_check_ready_bit(dut->dev))
469 if (!margin_prep_link(&link))
473 bool lane_reversal = false;
474 status = read_params_internal(dut, recvn, lane_reversal, params);
477 lane_reversal = true;
478 status = read_params_internal(dut, recvn, lane_reversal, params);
481 margin_restore_link(&link);
486 enum margin_test_status
487 margin_process_args(struct margin_dev *dev, struct margin_args *args)
489 u8 receivers_n = 2 + 2 * dev->retimers_n;
493 for (int i = 1; i < receivers_n; i++)
494 args->recvs[i - 1] = i;
495 args->recvs[receivers_n - 1] = 6;
496 args->recvs_n = receivers_n;
500 for (int i = 0; i < args->recvs_n; i++)
502 u8 recvn = args->recvs[i];
503 if (recvn < 1 || recvn > 6 || (recvn != 6 && recvn > receivers_n - 1))
505 return MARGIN_TEST_ARGS_RECVS;
512 args->lanes_n = dev->width;
513 for (int i = 0; i < args->lanes_n; i++)
518 for (int i = 0; i < args->lanes_n; i++)
520 if (args->lanes[i] >= dev->width)
522 return MARGIN_TEST_ARGS_LANES;
527 return MARGIN_TEST_OK;
530 struct margin_results *
531 margin_test_link(struct margin_link *link, struct margin_args *args, u8 *recvs_n)
533 bool status = margin_prep_link(link);
535 u8 receivers_n = status ? args->recvs_n : 1;
536 u8 *receivers = args->recvs;
538 margin_log_link(link);
540 struct margin_results *results = xmalloc(sizeof(*results) * receivers_n);
544 results[0].test_status = MARGIN_TEST_ASPM;
545 margin_log("\nCouldn't disable ASPM on the given Link.\n");
550 struct margin_dev *dut;
551 for (int i = 0; i < receivers_n; i++)
553 dut = receivers[i] == 6 ? &link->up_port : &link->down_port;
554 margin_test_receiver(dut, receivers[i], args, &results[i]);
557 margin_restore_link(link);
560 *recvs_n = receivers_n;
565 margin_free_results(struct margin_results *results, u8 results_n)
567 for (int i = 0; i < results_n; i++)
569 if (results[i].test_status == MARGIN_TEST_OK)
570 free(results[i].lanes);