2 * The PCI Utilities -- Obtain the margin information of the Link
4 * Copyright (c) 2023-2024 KNS Group LLC (YADRO)
6 * Can be freely distributed and used under the terms of the GNU GPL v2+.
8 * SPDX-License-Identifier: GPL-2.0-or-later
21 /* Macro helpers for Margining command parsing */
23 typedef u16 margin_cmd;
25 /* Margining command parsing */
27 #define LMR_CMD_RECVN MASK(2, 0)
28 #define LMR_CMD_TYPE MASK(5, 3)
29 #define LMR_CMD_PAYLOAD MASK(15, 8)
33 // Report Capabilities
34 #define LMR_PLD_VOLT_SUPPORT BIT(8)
35 #define LMR_PLD_IND_U_D_VOLT BIT(9)
36 #define LMR_PLD_IND_L_R_TIM BIT(10)
37 #define LMR_PLD_SAMPLE_REPORT_METHOD BIT(11)
38 #define LMR_PLD_IND_ERR_SAMPLER BIT(12)
40 #define LMR_PLD_MAX_T_STEPS MASK(13, 8)
41 #define LMR_PLD_MAX_V_STEPS MASK(14, 8)
42 #define LMR_PLD_MAX_OFFSET MASK(14, 8)
43 #define LMR_PLD_MAX_LANES MASK(12, 8)
44 #define LMR_PLD_SAMPLE_RATE MASK(13, 8)
47 #define LMR_PLD_MARGIN_T_STEPS MASK(13, 8)
48 #define LMR_PLD_T_GO_LEFT BIT(14)
51 #define LMR_PLD_MARGIN_V_STEPS MASK(14, 8)
52 #define LMR_PLD_V_GO_DOWN BIT(15)
55 #define LMR_PLD_ERR_CNT MASK(13, 8)
56 #define LMR_PLD_MARGIN_STS MASK(15, 14)
58 /* Address calc macro for Lanes Margining registers */
60 #define LMR_LANE_CTRL(lmr_cap_addr, lane) ((lmr_cap_addr) + 8 + 4 * (lane))
61 #define LMR_LANE_STATUS(lmr_cap_addr, lane) ((lmr_cap_addr) + 10 + 4 * (lane))
63 /* Margining Commands */
65 #define MARG_TIM(go_left, step, recvn) margin_make_cmd(((go_left) << 6) | (step), 3, recvn)
66 #define MARG_VOLT(go_down, step, recvn) margin_make_cmd(((go_down) << 7) | (step), 4, recvn)
69 #define REPORT_CAPS(recvn) margin_make_cmd(0x88, 1, recvn)
70 #define REPORT_VOL_STEPS(recvn) margin_make_cmd(0x89, 1, recvn)
71 #define REPORT_TIM_STEPS(recvn) margin_make_cmd(0x8A, 1, recvn)
72 #define REPORT_TIM_OFFSET(recvn) margin_make_cmd(0x8B, 1, recvn)
73 #define REPORT_VOL_OFFSET(recvn) margin_make_cmd(0x8C, 1, recvn)
74 #define REPORT_SAMPL_RATE_V(recvn) margin_make_cmd(0x8D, 1, recvn)
75 #define REPORT_SAMPL_RATE_T(recvn) margin_make_cmd(0x8E, 1, recvn)
76 #define REPORT_SAMPLE_CNT(recvn) margin_make_cmd(0x8F, 1, recvn)
77 #define REPORT_MAX_LANES(recvn) margin_make_cmd(0x90, 1, recvn)
80 #define NO_COMMAND margin_make_cmd(0x9C, 7, 0)
81 #define CLEAR_ERROR_LOG(recvn) margin_make_cmd(0x55, 2, recvn)
82 #define GO_TO_NORMAL_SETTINGS(recvn) margin_make_cmd(0xF, 2, recvn)
83 #define SET_ERROR_LIMIT(error_limit, recvn) margin_make_cmd(0xC0 | (error_limit), 2, recvn)
88 #if defined(PCI_OS_WINDOWS)
91 #elif defined(PCI_OS_DJGPP)
92 if (msec * 1000 < 11264)
107 ts.tv_sec = msec / 1000;
108 ts.tv_nsec = (msec % 1000) * 1000000;
112 res = nanosleep(&ts, &ts);
113 } while (res && errno == EINTR);
120 margin_make_cmd(u8 payload, u8 type, u8 recvn)
122 return SET_REG_MASK(0, LMR_CMD_PAYLOAD, payload) | SET_REG_MASK(0, LMR_CMD_TYPE, type)
123 | SET_REG_MASK(0, LMR_CMD_RECVN, recvn);
127 margin_set_cmd(struct margin_dev *dev, u8 lane, margin_cmd cmd)
129 pci_write_word(dev->dev, LMR_LANE_CTRL(dev->lmr_cap_addr, lane), cmd);
131 return pci_read_word(dev->dev, LMR_LANE_STATUS(dev->lmr_cap_addr, lane)) == cmd;
135 margin_report_cmd(struct margin_dev *dev, u8 lane, margin_cmd cmd, margin_cmd *result)
137 pci_write_word(dev->dev, LMR_LANE_CTRL(dev->lmr_cap_addr, lane), cmd);
139 *result = pci_read_word(dev->dev, LMR_LANE_STATUS(dev->lmr_cap_addr, lane));
140 return GET_REG_MASK(*result, LMR_CMD_TYPE) == GET_REG_MASK(cmd, LMR_CMD_TYPE)
141 && GET_REG_MASK(*result, LMR_CMD_RECVN) == GET_REG_MASK(cmd, LMR_CMD_RECVN)
142 && margin_set_cmd(dev, lane, NO_COMMAND);
146 margin_apply_hw_quirks(struct margin_recv *recv, struct margin_link_args *args)
148 switch (recv->dev->hw)
150 case MARGIN_ICE_LAKE_RC:
151 if (recv->recvn == 1)
153 recv->params->volt_offset = 12;
154 args->recv_args[recv->recvn - 1].t.one_side_is_whole = true;
155 args->recv_args[recv->recvn - 1].t.valid = true;
164 read_params_internal(struct margin_dev *dev, u8 recvn, bool lane_reversal,
165 struct margin_params *params)
168 u8 lane = lane_reversal ? dev->max_width - 1 : 0;
169 margin_set_cmd(dev, lane, NO_COMMAND);
170 bool status = margin_report_cmd(dev, lane, REPORT_CAPS(recvn), &resp);
173 params->volt_support = GET_REG_MASK(resp, LMR_PLD_VOLT_SUPPORT);
174 params->ind_up_down_volt = GET_REG_MASK(resp, LMR_PLD_IND_U_D_VOLT);
175 params->ind_left_right_tim = GET_REG_MASK(resp, LMR_PLD_IND_L_R_TIM);
176 params->sample_report_method = GET_REG_MASK(resp, LMR_PLD_SAMPLE_REPORT_METHOD);
177 params->ind_error_sampler = GET_REG_MASK(resp, LMR_PLD_IND_ERR_SAMPLER);
178 status = margin_report_cmd(dev, lane, REPORT_VOL_STEPS(recvn), &resp);
182 params->volt_steps = GET_REG_MASK(resp, LMR_PLD_MAX_V_STEPS);
183 status = margin_report_cmd(dev, lane, REPORT_TIM_STEPS(recvn), &resp);
187 params->timing_steps = GET_REG_MASK(resp, LMR_PLD_MAX_T_STEPS);
188 status = margin_report_cmd(dev, lane, REPORT_TIM_OFFSET(recvn), &resp);
192 params->timing_offset = GET_REG_MASK(resp, LMR_PLD_MAX_OFFSET);
193 status = margin_report_cmd(dev, lane, REPORT_VOL_OFFSET(recvn), &resp);
197 params->volt_offset = GET_REG_MASK(resp, LMR_PLD_MAX_OFFSET);
198 status = margin_report_cmd(dev, lane, REPORT_SAMPL_RATE_V(recvn), &resp);
202 params->sample_rate_v = GET_REG_MASK(resp, LMR_PLD_SAMPLE_RATE);
203 status = margin_report_cmd(dev, lane, REPORT_SAMPL_RATE_T(recvn), &resp);
207 params->sample_rate_t = GET_REG_MASK(resp, LMR_PLD_SAMPLE_RATE);
208 status = margin_report_cmd(dev, lane, REPORT_MAX_LANES(recvn), &resp);
211 params->max_lanes = GET_REG_MASK(resp, LMR_PLD_MAX_LANES);
215 /* Margin all lanes_n lanes simultaneously */
217 margin_test_lanes(struct margin_lanes_data arg)
220 margin_cmd lane_status;
223 bool timing = (arg.dir == TIM_LEFT || arg.dir == TIM_RIGHT);
228 step_cmd = MARG_TIM(arg.dir == TIM_LEFT, steps_done, arg.recv->recvn);
233 step_cmd = MARG_VOLT(arg.dir == VOLT_DOWN, steps_done, arg.recv->recvn);
236 bool failed_lanes[32] = { 0 };
237 u8 alive_lanes = arg.lanes_n;
239 for (int i = 0; i < arg.lanes_n; i++)
241 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
242 margin_set_cmd(arg.recv->dev, arg.results[i].lane,
243 SET_ERROR_LIMIT(arg.recv->error_limit, arg.recv->recvn));
244 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
245 arg.results[i].steps[arg.dir] = arg.steps_lane_total;
246 arg.results[i].statuses[arg.dir] = MARGIN_THR;
249 while (alive_lanes > 0 && steps_done < arg.steps_lane_total)
254 step_cmd = SET_REG_MASK(step_cmd, LMR_PLD_MARGIN_T_STEPS, steps_done);
256 step_cmd = SET_REG_MASK(step_cmd, LMR_PLD_MARGIN_V_STEPS, steps_done);
258 for (int i = 0; i < arg.lanes_n; i++)
260 if (!failed_lanes[i])
263 int ctrl_addr = LMR_LANE_CTRL(arg.recv->dev->lmr_cap_addr, arg.results[i].lane);
264 pci_write_word(arg.recv->dev->dev, ctrl_addr, step_cmd);
267 msleep(arg.recv->dwell_time * 1000);
269 for (int i = 0; i < arg.lanes_n; i++)
271 if (!failed_lanes[i])
273 int status_addr = LMR_LANE_STATUS(arg.recv->dev->lmr_cap_addr, arg.results[i].lane);
274 lane_status = pci_read_word(arg.recv->dev->dev, status_addr);
275 u8 step_status = GET_REG_MASK(lane_status, LMR_PLD_MARGIN_STS);
276 if (!(GET_REG_MASK(lane_status, LMR_CMD_TYPE) == marg_type
277 && GET_REG_MASK(lane_status, LMR_CMD_RECVN) == arg.recv->recvn
279 && GET_REG_MASK(lane_status, LMR_PLD_ERR_CNT) <= arg.recv->error_limit
280 && margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND)))
283 failed_lanes[i] = true;
284 arg.results[i].steps[arg.dir] = steps_done - 1;
285 arg.results[i].statuses[arg.dir]
286 = (step_status == 3 || step_status == 1 ? MARGIN_NAK : MARGIN_LIM);
291 arg.steps_lane_done = steps_done;
292 margin_log_margining(arg);
295 for (int i = 0; i < arg.lanes_n; i++)
297 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
298 margin_set_cmd(arg.recv->dev, arg.results[i].lane, CLEAR_ERROR_LOG(arg.recv->recvn));
299 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
300 margin_set_cmd(arg.recv->dev, arg.results[i].lane, GO_TO_NORMAL_SETTINGS(arg.recv->recvn));
301 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
305 /* Awaits that Receiver is prepared through prep_dev function */
307 margin_test_receiver(struct margin_dev *dev, u8 recvn, struct margin_link_args *args,
308 struct margin_results *results)
310 u8 *lanes_to_margin = args->lanes;
311 u8 lanes_n = args->lanes_n;
313 struct margin_params params;
314 struct margin_recv recv = { .dev = dev,
316 .lane_reversal = false,
318 .parallel_lanes = args->parallel_lanes ? args->parallel_lanes : 1,
319 .error_limit = args->common->error_limit,
320 .dwell_time = args->common->dwell_time };
322 results->recvn = recvn;
323 results->lanes_n = lanes_n;
324 margin_log_recvn(&recv);
326 if (!margin_check_ready_bit(dev->dev))
328 margin_log("\nMargining Ready bit is Clear.\n");
329 results->test_status = MARGIN_TEST_READY_BIT;
333 if (!read_params_internal(dev, recvn, recv.lane_reversal, ¶ms))
335 recv.lane_reversal = true;
336 if (!read_params_internal(dev, recvn, recv.lane_reversal, ¶ms))
338 margin_log("\nError during caps reading.\n");
339 results->test_status = MARGIN_TEST_CAPS;
344 results->params = params;
346 if (recv.parallel_lanes > params.max_lanes + 1)
347 recv.parallel_lanes = params.max_lanes + 1;
348 margin_apply_hw_quirks(&recv, args);
349 margin_log_hw_quirks(&recv);
351 results->tim_off_reported = params.timing_offset != 0;
352 results->volt_off_reported = params.volt_offset != 0;
353 double tim_offset = results->tim_off_reported ? (double)params.timing_offset : 50.0;
354 double volt_offset = results->volt_off_reported ? (double)params.volt_offset : 50.0;
356 results->tim_coef = tim_offset / (double)params.timing_steps;
357 results->volt_coef = volt_offset / (double)params.volt_steps * 10.0;
359 results->lane_reversal = recv.lane_reversal;
360 results->link_speed = dev->link_speed;
361 results->test_status = MARGIN_TEST_OK;
363 margin_log_receiver(&recv);
365 results->lanes = xmalloc(sizeof(struct margin_res_lane) * lanes_n);
366 for (int i = 0; i < lanes_n; i++)
368 results->lanes[i].lane
369 = recv.lane_reversal ? dev->max_width - lanes_to_margin[i] - 1 : lanes_to_margin[i];
372 if (args->common->run_margin)
374 if (args->common->verbosity > 0)
376 struct margin_lanes_data lanes_data = { .recv = &recv,
377 .verbosity = args->common->verbosity,
378 .steps_utility = &args->common->steps_utility };
380 enum margin_dir dir[] = { TIM_LEFT, TIM_RIGHT, VOLT_UP, VOLT_DOWN };
384 u8 steps_t = args->steps_t ? args->steps_t : params.timing_steps;
385 u8 steps_v = args->steps_v ? args->steps_v : params.volt_steps;
387 while (lanes_done != lanes_n)
389 use_lanes = (lanes_done + recv.parallel_lanes > lanes_n) ? lanes_n - lanes_done :
391 lanes_data.lanes_numbers = lanes_to_margin + lanes_done;
392 lanes_data.lanes_n = use_lanes;
393 lanes_data.results = results->lanes + lanes_done;
395 for (int i = 0; i < 4; i++)
397 bool timing = dir[i] == TIM_LEFT || dir[i] == TIM_RIGHT;
398 if (!timing && !params.volt_support)
400 if (dir[i] == TIM_RIGHT && !params.ind_left_right_tim)
402 if (dir[i] == VOLT_DOWN && !params.ind_up_down_volt)
405 lanes_data.ind = timing ? params.ind_left_right_tim : params.ind_up_down_volt;
406 lanes_data.dir = dir[i];
407 lanes_data.steps_lane_total = timing ? steps_t : steps_v;
408 if (args->common->steps_utility >= lanes_data.steps_lane_total)
409 args->common->steps_utility -= lanes_data.steps_lane_total;
411 args->common->steps_utility = 0;
412 margin_test_lanes(lanes_data);
414 lanes_done += use_lanes;
416 if (args->common->verbosity > 0)
418 if (recv.lane_reversal)
420 for (int i = 0; i < lanes_n; i++)
421 results->lanes[i].lane = lanes_to_margin[i];
429 margin_read_params(struct pci_access *pacc, struct pci_dev *dev, u8 recvn,
430 struct margin_params *params)
432 struct pci_cap *cap = pci_find_cap(dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
436 bool dev_down = margin_port_is_down(dev);
448 if (dev_down && recvn == 6)
450 if (!dev_down && recvn != 6)
453 struct pci_dev *down = NULL;
454 struct pci_dev *up = NULL;
455 struct margin_link link;
457 if (!margin_find_pair(pacc, dev, &down, &up))
460 if (!margin_fill_link(down, up, &link))
463 struct margin_dev *dut = (dev_down ? &link.down_port : &link.up_port);
464 if (!margin_check_ready_bit(dut->dev))
467 if (!margin_prep_link(&link))
471 bool lane_reversal = false;
472 status = read_params_internal(dut, recvn, lane_reversal, params);
475 lane_reversal = true;
476 status = read_params_internal(dut, recvn, lane_reversal, params);
479 margin_restore_link(&link);
484 enum margin_test_status
485 margin_process_args(struct margin_link *link)
487 struct margin_dev *dev = &link->down_port;
488 struct margin_link_args *args = &link->args;
490 u8 receivers_n = 2 + 2 * dev->retimers_n;
494 for (int i = 1; i < receivers_n; i++)
495 args->recvs[i - 1] = i;
496 args->recvs[receivers_n - 1] = 6;
497 args->recvs_n = receivers_n;
501 for (int i = 0; i < args->recvs_n; i++)
503 u8 recvn = args->recvs[i];
504 if (recvn < 1 || recvn > 6 || (recvn != 6 && recvn > receivers_n - 1))
506 return MARGIN_TEST_ARGS_RECVS;
513 args->lanes_n = dev->neg_width;
514 for (int i = 0; i < args->lanes_n; i++)
519 for (int i = 0; i < args->lanes_n; i++)
521 if (args->lanes[i] >= dev->neg_width)
523 return MARGIN_TEST_ARGS_LANES;
528 return MARGIN_TEST_OK;
531 struct margin_results *
532 margin_test_link(struct margin_link *link, u8 *recvs_n)
534 struct margin_link_args *args = &link->args;
536 bool status = margin_prep_link(link);
538 u8 receivers_n = status ? args->recvs_n : 1;
539 u8 *receivers = args->recvs;
541 margin_log_link(link);
543 struct margin_results *results = xmalloc(sizeof(*results) * receivers_n);
547 results[0].test_status = MARGIN_TEST_ASPM;
548 margin_log("\nCouldn't disable ASPM on the given Link.\n");
553 struct margin_dev *dut;
554 for (int i = 0; i < receivers_n; i++)
556 dut = receivers[i] == 6 ? &link->up_port : &link->down_port;
557 margin_test_receiver(dut, receivers[i], args, &results[i]);
560 margin_restore_link(link);
563 *recvs_n = receivers_n;
568 margin_free_results(struct margin_results *results, u8 results_n)
570 for (int i = 0; i < results_n; i++)
572 if (results[i].test_status == MARGIN_TEST_OK)
573 free(results[i].lanes);