|
@@ -2279,17 +2279,23 @@ static void a0_portstatus(struct hfi1_pportdata *ppd,
|
|
|
{
|
|
{
|
|
|
if (!is_bx(ppd->dd)) {
|
|
if (!is_bx(ppd->dd)) {
|
|
|
unsigned long vl;
|
|
unsigned long vl;
|
|
|
- u64 max_vl_xmit_wait = 0, tmp;
|
|
|
|
|
|
|
+ u64 sum_vl_xmit_wait = 0;
|
|
|
u32 vl_all_mask = VL_MASK_ALL;
|
|
u32 vl_all_mask = VL_MASK_ALL;
|
|
|
|
|
|
|
|
for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
|
|
for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
|
|
|
8 * sizeof(vl_all_mask)) {
|
|
8 * sizeof(vl_all_mask)) {
|
|
|
- tmp = read_port_cntr(ppd, C_TX_WAIT_VL,
|
|
|
|
|
- idx_from_vl(vl));
|
|
|
|
|
- if (tmp > max_vl_xmit_wait)
|
|
|
|
|
- max_vl_xmit_wait = tmp;
|
|
|
|
|
|
|
+ u64 tmp = sum_vl_xmit_wait +
|
|
|
|
|
+ read_port_cntr(ppd, C_TX_WAIT_VL,
|
|
|
|
|
+ idx_from_vl(vl));
|
|
|
|
|
+ if (tmp < sum_vl_xmit_wait) {
|
|
|
|
|
+ /* we wrapped */
|
|
|
|
|
+ sum_vl_xmit_wait = (u64)~0;
|
|
|
|
|
+ break;
|
|
|
|
|
+ }
|
|
|
|
|
+ sum_vl_xmit_wait = tmp;
|
|
|
}
|
|
}
|
|
|
- rsp->port_xmit_wait = cpu_to_be64(max_vl_xmit_wait);
|
|
|
|
|
|
|
+ if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
|
|
|
|
|
+ rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -2491,18 +2497,19 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
|
|
|
return error_counter_summary;
|
|
return error_counter_summary;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-static void a0_datacounters(struct hfi1_devdata *dd, struct _port_dctrs *rsp,
|
|
|
|
|
|
|
+static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
|
|
|
u32 vl_select_mask)
|
|
u32 vl_select_mask)
|
|
|
{
|
|
{
|
|
|
- if (!is_bx(dd)) {
|
|
|
|
|
|
|
+ if (!is_bx(ppd->dd)) {
|
|
|
unsigned long vl;
|
|
unsigned long vl;
|
|
|
- int vfi = 0;
|
|
|
|
|
u64 sum_vl_xmit_wait = 0;
|
|
u64 sum_vl_xmit_wait = 0;
|
|
|
|
|
+ u32 vl_all_mask = VL_MASK_ALL;
|
|
|
|
|
|
|
|
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
|
|
|
|
|
- 8 * sizeof(vl_select_mask)) {
|
|
|
|
|
|
|
+ for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
|
|
|
|
|
+ 8 * sizeof(vl_all_mask)) {
|
|
|
u64 tmp = sum_vl_xmit_wait +
|
|
u64 tmp = sum_vl_xmit_wait +
|
|
|
- be64_to_cpu(rsp->vls[vfi++].port_vl_xmit_wait);
|
|
|
|
|
|
|
+ read_port_cntr(ppd, C_TX_WAIT_VL,
|
|
|
|
|
+ idx_from_vl(vl));
|
|
|
if (tmp < sum_vl_xmit_wait) {
|
|
if (tmp < sum_vl_xmit_wait) {
|
|
|
/* we wrapped */
|
|
/* we wrapped */
|
|
|
sum_vl_xmit_wait = (u64) ~0;
|
|
sum_vl_xmit_wait = (u64) ~0;
|
|
@@ -2665,7 +2672,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
|
|
|
vfi++;
|
|
vfi++;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- a0_datacounters(dd, rsp, vl_select_mask);
|
|
|
|
|
|
|
+ a0_datacounters(ppd, rsp, vl_select_mask);
|
|
|
|
|
|
|
|
if (resp_len)
|
|
if (resp_len)
|
|
|
*resp_len += response_data_size;
|
|
*resp_len += response_data_size;
|