2 * The PCI Utilities -- Obtain the margin information of the Link
4 * Copyright (c) 2023 KNS Group LLC (YADRO)
6 * Can be freely distributed and used under the terms of the GNU GPL v2+.
8 * SPDX-License-Identifier: GPL-2.0-or-later
17 /* Macro helpers for Margining command parsing */
19 typedef u16 margin_cmd;
21 /* Margining command parsing */
23 #define LMR_CMD_RECVN MASK(2, 0)
24 #define LMR_CMD_TYPE MASK(5, 3)
25 #define LMR_CMD_PAYLOAD MASK(15, 8)
29 // Report Capabilities
30 #define LMR_PLD_VOLT_SUPPORT BIT(8)
31 #define LMR_PLD_IND_U_D_VOLT BIT(9)
32 #define LMR_PLD_IND_L_R_TIM BIT(10)
33 #define LMR_PLD_SAMPLE_REPORT_METHOD BIT(11)
34 #define LMR_PLD_IND_ERR_SAMPLER BIT(12)
36 #define LMR_PLD_MAX_T_STEPS MASK(13, 8)
37 #define LMR_PLD_MAX_V_STEPS MASK(14, 8)
38 #define LMR_PLD_MAX_OFFSET MASK(14, 8)
39 #define LMR_PLD_MAX_LANES MASK(12, 8)
40 #define LMR_PLD_SAMPLE_RATE MASK(13, 8)
43 #define LMR_PLD_MARGIN_T_STEPS MASK(13, 8)
44 #define LMR_PLD_T_GO_LEFT BIT(14)
47 #define LMR_PLD_MARGIN_V_STEPS MASK(14, 8)
48 #define LMR_PLD_V_GO_DOWN BIT(15)
51 #define LMR_PLD_ERR_CNT MASK(13, 8)
52 #define LMR_PLD_MARGIN_STS MASK(15, 14)
54 /* Address calc macro for Lanes Margining registers */
56 #define LMR_LANE_CTRL(lmr_cap_addr, lane) ((lmr_cap_addr) + 8 + 4 * (lane))
57 #define LMR_LANE_STATUS(lmr_cap_addr, lane) ((lmr_cap_addr) + 10 + 4 * (lane))
59 /* Margining Commands */
61 #define MARG_TIM(go_left, step, recvn) margin_make_cmd(((go_left) << 6) | (step), 3, recvn)
62 #define MARG_VOLT(go_down, step, recvn) margin_make_cmd(((go_down) << 7) | (step), 4, recvn)
65 #define REPORT_CAPS(recvn) margin_make_cmd(0x88, 1, recvn)
66 #define REPORT_VOL_STEPS(recvn) margin_make_cmd(0x89, 1, recvn)
67 #define REPORT_TIM_STEPS(recvn) margin_make_cmd(0x8A, 1, recvn)
68 #define REPORT_TIM_OFFSET(recvn) margin_make_cmd(0x8B, 1, recvn)
69 #define REPORT_VOL_OFFSET(recvn) margin_make_cmd(0x8C, 1, recvn)
70 #define REPORT_SAMPL_RATE_V(recvn) margin_make_cmd(0x8D, 1, recvn)
71 #define REPORT_SAMPL_RATE_T(recvn) margin_make_cmd(0x8E, 1, recvn)
72 #define REPORT_SAMPLE_CNT(recvn) margin_make_cmd(0x8F, 1, recvn)
73 #define REPORT_MAX_LANES(recvn) margin_make_cmd(0x90, 1, recvn)
76 #define NO_COMMAND margin_make_cmd(0x9C, 7, 0)
77 #define CLEAR_ERROR_LOG(recvn) margin_make_cmd(0x55, 2, recvn)
78 #define GO_TO_NORMAL_SETTINGS(recvn) margin_make_cmd(0xF, 2, recvn)
79 #define SET_ERROR_LIMIT(error_limit, recvn) margin_make_cmd(0xC0 | (error_limit), 2, recvn)
93 ts.tv_sec = msec / 1000;
94 ts.tv_nsec = (msec % 1000) * 1000000;
98 res = nanosleep(&ts, &ts);
99 } while (res && errno == EINTR);
105 margin_make_cmd(u8 payload, u8 type, u8 recvn)
107 return SET_REG_MASK(0, LMR_CMD_PAYLOAD, payload) | SET_REG_MASK(0, LMR_CMD_TYPE, type)
108 | SET_REG_MASK(0, LMR_CMD_RECVN, recvn);
112 margin_set_cmd(struct margin_dev *dev, u8 lane, margin_cmd cmd)
114 pci_write_word(dev->dev, LMR_LANE_CTRL(dev->lmr_cap_addr, lane), cmd);
116 return pci_read_word(dev->dev, LMR_LANE_STATUS(dev->lmr_cap_addr, lane)) == cmd;
120 margin_report_cmd(struct margin_dev *dev, u8 lane, margin_cmd cmd, margin_cmd *result)
122 pci_write_word(dev->dev, LMR_LANE_CTRL(dev->lmr_cap_addr, lane), cmd);
124 *result = pci_read_word(dev->dev, LMR_LANE_STATUS(dev->lmr_cap_addr, lane));
125 return GET_REG_MASK(*result, LMR_CMD_TYPE) == GET_REG_MASK(cmd, LMR_CMD_TYPE)
126 && GET_REG_MASK(*result, LMR_CMD_RECVN) == GET_REG_MASK(cmd, LMR_CMD_RECVN)
127 && margin_set_cmd(dev, lane, NO_COMMAND);
131 read_params_internal(struct margin_dev *dev, u8 recvn, bool lane_reversal,
132 struct margin_params *params)
135 u8 lane = lane_reversal ? dev->width - 1 : 0;
136 margin_set_cmd(dev, lane, NO_COMMAND);
137 bool status = margin_report_cmd(dev, lane, REPORT_CAPS(recvn), &resp);
140 params->volt_support = GET_REG_MASK(resp, LMR_PLD_VOLT_SUPPORT);
141 params->ind_up_down_volt = GET_REG_MASK(resp, LMR_PLD_IND_U_D_VOLT);
142 params->ind_left_right_tim = GET_REG_MASK(resp, LMR_PLD_IND_L_R_TIM);
143 params->sample_report_method = GET_REG_MASK(resp, LMR_PLD_SAMPLE_REPORT_METHOD);
144 params->ind_error_sampler = GET_REG_MASK(resp, LMR_PLD_IND_ERR_SAMPLER);
145 status = margin_report_cmd(dev, lane, REPORT_VOL_STEPS(recvn), &resp);
149 params->volt_steps = GET_REG_MASK(resp, LMR_PLD_MAX_V_STEPS);
150 status = margin_report_cmd(dev, lane, REPORT_TIM_STEPS(recvn), &resp);
154 params->timing_steps = GET_REG_MASK(resp, LMR_PLD_MAX_T_STEPS);
155 status = margin_report_cmd(dev, lane, REPORT_TIM_OFFSET(recvn), &resp);
159 params->timing_offset = GET_REG_MASK(resp, LMR_PLD_MAX_OFFSET);
160 status = margin_report_cmd(dev, lane, REPORT_VOL_OFFSET(recvn), &resp);
164 params->volt_offset = GET_REG_MASK(resp, LMR_PLD_MAX_OFFSET);
165 status = margin_report_cmd(dev, lane, REPORT_SAMPL_RATE_V(recvn), &resp);
169 params->sample_rate_v = GET_REG_MASK(resp, LMR_PLD_SAMPLE_RATE);
170 status = margin_report_cmd(dev, lane, REPORT_SAMPL_RATE_T(recvn), &resp);
174 params->sample_rate_t = GET_REG_MASK(resp, LMR_PLD_SAMPLE_RATE);
175 status = margin_report_cmd(dev, lane, REPORT_MAX_LANES(recvn), &resp);
178 params->max_lanes = GET_REG_MASK(resp, LMR_PLD_MAX_LANES);
182 /* Margin all lanes_n lanes simultaneously */
184 margin_test_lanes(struct margin_lanes_data arg)
187 margin_cmd lane_status;
190 bool timing = (arg.dir == TIM_LEFT || arg.dir == TIM_RIGHT);
195 step_cmd = MARG_TIM(arg.dir == TIM_LEFT, steps_done, arg.recv->recvn);
200 step_cmd = MARG_VOLT(arg.dir == VOLT_DOWN, steps_done, arg.recv->recvn);
203 bool failed_lanes[32] = { 0 };
204 u8 alive_lanes = arg.lanes_n;
206 for (int i = 0; i < arg.lanes_n; i++)
208 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
209 margin_set_cmd(arg.recv->dev, arg.results[i].lane,
210 SET_ERROR_LIMIT(arg.recv->error_limit, arg.recv->recvn));
211 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
212 arg.results[i].steps[arg.dir] = arg.steps_lane_total;
213 arg.results[i].statuses[arg.dir] = MARGIN_THR;
216 while (alive_lanes > 0 && steps_done < arg.steps_lane_total)
221 step_cmd = SET_REG_MASK(step_cmd, LMR_PLD_MARGIN_T_STEPS, steps_done);
223 step_cmd = SET_REG_MASK(step_cmd, LMR_PLD_MARGIN_V_STEPS, steps_done);
225 for (int i = 0; i < arg.lanes_n; i++)
227 if (!failed_lanes[i])
230 int ctrl_addr = LMR_LANE_CTRL(arg.recv->dev->lmr_cap_addr, arg.results[i].lane);
231 pci_write_word(arg.recv->dev->dev, ctrl_addr, step_cmd);
234 msleep(MARGIN_STEP_MS);
236 for (int i = 0; i < arg.lanes_n; i++)
238 if (!failed_lanes[i])
240 int status_addr = LMR_LANE_STATUS(arg.recv->dev->lmr_cap_addr, arg.results[i].lane);
241 lane_status = pci_read_word(arg.recv->dev->dev, status_addr);
242 u8 step_status = GET_REG_MASK(lane_status, LMR_PLD_MARGIN_STS);
243 if (!(GET_REG_MASK(lane_status, LMR_CMD_TYPE) == marg_type
244 && GET_REG_MASK(lane_status, LMR_CMD_RECVN) == arg.recv->recvn
246 && GET_REG_MASK(lane_status, LMR_PLD_ERR_CNT) <= arg.recv->error_limit
247 && margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND)))
250 failed_lanes[i] = true;
251 arg.results[i].steps[arg.dir] = steps_done - 1;
252 arg.results[i].statuses[arg.dir]
253 = (step_status == 3 || step_status == 1 ? MARGIN_NAK : MARGIN_LIM);
258 arg.steps_lane_done = steps_done;
259 margin_log_margining(arg);
262 for (int i = 0; i < arg.lanes_n; i++)
264 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
265 margin_set_cmd(arg.recv->dev, arg.results[i].lane, CLEAR_ERROR_LOG(arg.recv->recvn));
266 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
267 margin_set_cmd(arg.recv->dev, arg.results[i].lane, GO_TO_NORMAL_SETTINGS(arg.recv->recvn));
268 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
272 /* Awaits that Receiver is prepared through prep_dev function */
274 margin_test_receiver(struct margin_dev *dev, u8 recvn, struct margin_args *args,
275 struct margin_results *results)
277 u8 *lanes_to_margin = args->lanes;
278 u8 lanes_n = args->lanes_n;
280 struct margin_params params;
281 struct margin_recv recv = { .dev = dev,
283 .lane_reversal = false,
285 .parallel_lanes = args->parallel_lanes ? args->parallel_lanes : 1,
286 .error_limit = args->error_limit };
288 results->recvn = recvn;
289 results->lanes_n = lanes_n;
290 margin_log_recvn(&recv);
292 if (!margin_check_ready_bit(dev->dev))
294 margin_log("\nMargining Ready bit is Clear.\n");
295 results->test_status = MARGIN_TEST_READY_BIT;
299 if (!read_params_internal(dev, recvn, recv.lane_reversal, ¶ms))
301 recv.lane_reversal = true;
302 if (!read_params_internal(dev, recvn, recv.lane_reversal, ¶ms))
304 margin_log("\nError during caps reading.\n");
305 results->test_status = MARGIN_TEST_CAPS;
310 results->params = params;
312 if (recv.parallel_lanes > params.max_lanes + 1)
313 recv.parallel_lanes = params.max_lanes + 1;
315 results->tim_coef = (double)params.timing_offset / (double)params.timing_steps;
316 results->volt_coef = (double)params.volt_offset / (double)params.volt_steps * 10.0;
318 results->lane_reversal = recv.lane_reversal;
319 results->link_speed = dev->link_speed;
320 results->test_status = MARGIN_TEST_OK;
322 margin_log_receiver(&recv);
324 results->lanes = xmalloc(sizeof(struct margin_res_lane) * lanes_n);
325 for (int i = 0; i < lanes_n; i++)
327 results->lanes[i].lane
328 = recv.lane_reversal ? dev->width - lanes_to_margin[i] - 1 : lanes_to_margin[i];
331 if (args->run_margin)
333 if (args->verbosity > 0)
335 struct margin_lanes_data lanes_data
336 = { .recv = &recv, .verbosity = args->verbosity, .steps_utility = args->steps_utility };
338 enum margin_dir dir[] = { TIM_LEFT, TIM_RIGHT, VOLT_UP, VOLT_DOWN };
342 u8 steps_t = args->steps_t ? args->steps_t : params.timing_steps;
343 u8 steps_v = args->steps_v ? args->steps_v : params.volt_steps;
345 while (lanes_done != lanes_n)
347 use_lanes = (lanes_done + recv.parallel_lanes > lanes_n) ? lanes_n - lanes_done :
349 lanes_data.lanes_numbers = lanes_to_margin + lanes_done;
350 lanes_data.lanes_n = use_lanes;
351 lanes_data.results = results->lanes + lanes_done;
353 for (int i = 0; i < 4; i++)
355 bool timing = dir[i] == TIM_LEFT || dir[i] == TIM_RIGHT;
356 if (!timing && !params.volt_support)
358 if (dir[i] == TIM_RIGHT && !params.ind_left_right_tim)
360 if (dir[i] == VOLT_DOWN && !params.ind_up_down_volt)
363 lanes_data.ind = timing ? params.ind_left_right_tim : params.ind_up_down_volt;
364 lanes_data.dir = dir[i];
365 lanes_data.steps_lane_total = timing ? steps_t : steps_v;
366 if (*args->steps_utility >= lanes_data.steps_lane_total)
367 *args->steps_utility -= lanes_data.steps_lane_total;
369 *args->steps_utility = 0;
370 margin_test_lanes(lanes_data);
372 lanes_done += use_lanes;
374 if (args->verbosity > 0)
376 if (recv.lane_reversal)
378 for (int i = 0; i < lanes_n; i++)
379 results->lanes[i].lane = lanes_to_margin[i];
387 margin_read_params(struct pci_access *pacc, struct pci_dev *dev, u8 recvn,
388 struct margin_params *params)
390 struct pci_cap *cap = pci_find_cap(dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
393 u8 dev_dir = GET_REG_MASK(pci_read_word(dev, cap->addr + PCI_EXP_FLAGS), PCI_EXP_FLAGS_TYPE);
396 if (dev_dir == PCI_EXP_TYPE_ROOT_PORT || dev_dir == PCI_EXP_TYPE_DOWNSTREAM)
411 if (dev_down && recvn == 6)
413 if (!dev_down && recvn != 6)
416 struct pci_dev *down = NULL;
417 struct pci_dev *up = NULL;
418 struct margin_link link;
420 for (struct pci_dev *p = pacc->devices; p; p = p->next)
422 if (dev_down && pci_read_byte(dev, PCI_SECONDARY_BUS) == p->bus && dev->domain == p->domain
429 else if (!dev_down && pci_read_byte(p, PCI_SECONDARY_BUS) == dev->bus
430 && dev->domain == p->domain)
441 if (!margin_fill_link(down, up, &link))
444 struct margin_dev *dut = (dev_down ? &link.down_port : &link.up_port);
445 if (!margin_check_ready_bit(dut->dev))
448 if (!margin_prep_link(&link))
452 bool lane_reversal = false;
453 status = read_params_internal(dut, recvn, lane_reversal, params);
456 lane_reversal = true;
457 status = read_params_internal(dut, recvn, lane_reversal, params);
460 margin_restore_link(&link);
465 enum margin_test_status
466 margin_process_args(struct margin_dev *dev, struct margin_args *args)
468 u8 receivers_n = 2 + 2 * dev->retimers_n;
472 for (int i = 1; i < receivers_n; i++)
473 args->recvs[i - 1] = i;
474 args->recvs[receivers_n - 1] = 6;
475 args->recvs_n = receivers_n;
479 for (int i = 0; i < args->recvs_n; i++)
481 u8 recvn = args->recvs[i];
482 if (recvn < 1 || recvn > 6 || (recvn != 6 && recvn > receivers_n - 1))
484 return MARGIN_TEST_ARGS_RECVS;
491 args->lanes_n = dev->width;
492 for (int i = 0; i < args->lanes_n; i++)
497 for (int i = 0; i < args->lanes_n; i++)
499 if (args->lanes[i] >= dev->width)
501 return MARGIN_TEST_ARGS_LANES;
506 return MARGIN_TEST_OK;
509 struct margin_results *
510 margin_test_link(struct margin_link *link, struct margin_args *args, u8 *recvs_n)
512 bool status = margin_prep_link(link);
514 u8 receivers_n = status ? args->recvs_n : 1;
515 u8 *receivers = args->recvs;
517 margin_log_link(link);
519 struct margin_results *results = xmalloc(sizeof(*results) * receivers_n);
523 results[0].test_status = MARGIN_TEST_ASPM;
524 margin_log("\nCouldn't disable ASPM on the given Link.\n");
529 struct margin_dev *dut;
530 for (int i = 0; i < receivers_n; i++)
532 dut = receivers[i] == 6 ? &link->up_port : &link->down_port;
533 margin_test_receiver(dut, receivers[i], args, &results[i]);
536 margin_restore_link(link);
539 *recvs_n = receivers_n;
544 margin_free_results(struct margin_results *results, u8 results_n)
546 for (int i = 0; i < results_n; i++)
548 if (results[i].test_status == MARGIN_TEST_OK)
549 free(results[i].lanes);