Merge branch 'qed-misc-cleanups-and-fixes'

Yuval Mintz says:

====================
qed: Misc cleanups and fixes

Patches #1 and #2 revolve around register access performed by driver;
The first merely adds some debug, while the second does some fixing
of incorrect PTT usage as well as preventing issues similar to those
fixed by 6f437d4319 ("qed: Don't use attention PTT for configuring BW").

Patch #3 better configures HW for architecture where cacheline isn't 64B.

Patches #4-#8 all affect iSCSI related functionaility -
adding statistics information [both to driver & management firmware],
passing information on number of resources to qedi, and simplifying
the Out-of-order implementation in SW.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-04-06 14:26:32 -07:00
commit 54280168f3
17 changed files with 275 additions and 153 deletions

View File

@ -225,8 +225,9 @@ enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
QED_RDMA_CNQ,
QED_VF_L2_QUE,
QED_ISCSI_CQ,
QED_FCOE_CQ,
QED_VF_L2_QUE,
QED_MAX_FEATURES,
};

View File

@ -1438,7 +1438,7 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
}
}
void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_qm_pf_rt_init_params params;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
@ -1464,7 +1464,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
}
/* CM PF */
@ -1822,9 +1822,9 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
qed_prs_init_common(p_hwfn);
}
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
qed_qm_init_pf(p_hwfn);
qed_qm_init_pf(p_hwfn, p_ptt);
qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn);
qed_cdu_init_pf(p_hwfn);

View File

@ -172,19 +172,18 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
*
*
* @param p_hwfn
* @param p_ptt
*/
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief qed_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
* @param p_ptt
*/
void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Reconfigures QM pf on the fly

View File

@ -75,7 +75,8 @@ enum BAR_ID {
BAR_ID_1 /* Used for doorbells */
};
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@ -84,7 +85,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
if (IS_VF(p_hwfn->cdev))
return 1 << 17;
val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
val = qed_rd(p_hwfn, p_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@ -780,7 +781,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
qed_qm_init_pf(p_hwfn);
qed_qm_init_pf(p_hwfn, p_ptt);
/* activate init tool on runtime array */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@ -1191,6 +1192,57 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
}
}
static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 val, wr_mbs, cache_line_size;
val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
switch (val) {
case 0:
wr_mbs = 128;
break;
case 1:
wr_mbs = 256;
break;
case 2:
wr_mbs = 512;
break;
default:
DP_INFO(p_hwfn,
"Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
val);
return;
}
cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs);
switch (cache_line_size) {
case 32:
val = 0;
break;
case 64:
val = 1;
break;
case 128:
val = 2;
break;
case 256:
val = 3;
break;
default:
DP_INFO(p_hwfn,
"Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
cache_line_size);
}
if (L1_CACHE_BYTES > wr_mbs)
DP_INFO(p_hwfn,
"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
L1_CACHE_BYTES, wr_mbs);
STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
}
static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode)
{
@ -1227,17 +1279,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_cxt_hw_init_common(p_hwfn);
/* Close gate from NIG to BRB/Storm; By default they are open, but
* we close them to prevent NIG from passing data to reset blocks.
* Should have been done in the ENGINE phase, but init-tool lacks
* proper port-pretend capabilities.
*/
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
qed_port_unpretend(p_hwfn, p_ptt);
qed_init_cache_line_size(p_hwfn, p_ptt);
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc)
@ -1320,7 +1362,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
int rc = 0;
u8 cond;
db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1);
db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
if (p_hwfn->cdev->num_hwfns > 1)
db_bar_size /= 2;
@ -1431,7 +1473,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
p_hwfn->qm_info.pf_rl = 100000;
}
qed_cxt_hw_init_pf(p_hwfn);
qed_cxt_hw_init_pf(p_hwfn, p_ptt);
qed_int_igu_init_rt(p_hwfn);
@ -1852,18 +1894,21 @@ int qed_hw_stop(struct qed_dev *cdev)
return rc2;
}
void qed_hw_stop_fastpath(struct qed_dev *cdev)
int qed_hw_stop_fastpath(struct qed_dev *cdev)
{
int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
struct qed_ptt *p_ptt;
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
continue;
}
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EAGAIN;
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
@ -1881,17 +1926,28 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
/* Need to wait 1ms to guarantee SBs are cleared */
usleep_range(1000, 2000);
qed_ptt_release(p_hwfn, p_ptt);
}
return 0;
}
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{
struct qed_ptt *p_ptt;
if (IS_VF(p_hwfn->cdev))
return;
return 0;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EAGAIN;
/* Re-open incoming traffic */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
qed_ptt_release(p_hwfn, p_ptt);
return 0;
}
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
@ -1989,12 +2045,17 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
QED_VF_L2_QUE));
}
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB),
RESC_NUM(p_hwfn,
QED_CMDQS_CQS));
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
"#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d\n",
"#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n",
(int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
(int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
RESC_NUM(p_hwfn, QED_SB));
}
@ -2697,9 +2758,9 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
return qed_hw_get_resc(p_hwfn, p_ptt);
}
static int qed_get_dev_info(struct qed_dev *cdev)
static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_dev *cdev = p_hwfn->cdev;
u16 device_id_mask;
u32 tmp;
@ -2721,15 +2782,13 @@ static int qed_get_dev_info(struct qed_dev *cdev)
return -EBUSY;
}
cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM);
cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_REV);
cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
MASK_FIELD(CHIP_REV, cdev->chip_rev);
/* Learn number of HW-functions */
tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CMT_ENABLED_FOR_PAIR);
tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
@ -2738,11 +2797,10 @@ static int qed_get_dev_info(struct qed_dev *cdev)
cdev->num_hwfns = 1;
}
cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt,
MISCS_REG_CHIP_TEST_REG) >> 4;
MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_METAL);
cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, cdev->chip_metal);
DP_INFO(cdev->hwfns,
@ -2795,7 +2853,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
rc = qed_get_dev_info(p_hwfn->cdev);
rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc)
goto err1;
}
@ -2866,11 +2924,14 @@ int qed_hw_prepare(struct qed_dev *cdev,
u8 __iomem *addr;
/* adjust bar offset for second engine */
addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
addr = cdev->regview +
qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
BAR_ID_0) / 2;
p_regview = addr;
/* adjust doorbell bar offset for second engine */
addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
addr = cdev->doorbells +
qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
BAR_ID_1) / 2;
p_doorbell = addr;
/* prepare second hw function */

View File

@ -165,17 +165,19 @@ int qed_hw_stop(struct qed_dev *cdev);
*
* @param cdev
*
* @return int
*/
void qed_hw_stop_fastpath(struct qed_dev *cdev);
int qed_hw_stop_fastpath(struct qed_dev *cdev);
/**
* @brief qed_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
*
* @param cdev
* @param p_hwfn
*
* @return int
*/
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
/**

View File

@ -340,10 +340,10 @@ qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
static int
qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u32 active_segs = 0;
@ -765,6 +765,7 @@ static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
static int qed_fcoe_stop(struct qed_dev *cdev)
{
struct qed_ptt *p_ptt;
int rc;
if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
@ -778,10 +779,15 @@ static int qed_fcoe_stop(struct qed_dev *cdev)
return -EINVAL;
}
p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (!p_ptt)
return -EAGAIN;
/* Stop the fcoe */
rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev),
rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
QED_SPQ_MODE_EBLOCK, NULL);
cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
return rc;
}

View File

@ -58,6 +58,7 @@ struct qed_ptt {
struct list_head list_entry;
unsigned int idx;
struct pxp_ptt_entry pxp;
u8 hwfn_id;
};
struct qed_ptt_pool {
@ -79,6 +80,7 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
p_pool->ptts[i].idx = i;
p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
p_pool->ptts[i].pxp.pretend.control = 0;
p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
if (i >= RESERVED_PTT_MAX)
list_add(&p_pool->ptts[i].list_entry,
&p_pool->free_list);
@ -193,6 +195,11 @@ static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
offset = hw_addr - win_hw_addr;
if (p_ptt->hwfn_id != p_hwfn->my_id)
DP_NOTICE(p_hwfn,
"ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
/* Verify the address is within the window */
if (hw_addr < win_hw_addr ||
offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {

View File

@ -181,6 +181,15 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_params = &p_hwfn->pf_params.iscsi_pf_params;
p_queue = &p_init->q_params;
/* Sanity */
if (p_params->num_queues > p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]) {
DP_ERR(p_hwfn,
"Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
p_params->num_queues,
p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]);
return -EINVAL;
}
SET_FIELD(p_init->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
@ -864,6 +873,8 @@ static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt);
p_stats->iscsi_rx_packet_cnt =
HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt);
p_stats->iscsi_rx_new_ooo_isle_events_cnt =
HILO_64_REGPAIR(tstats.iscsi_rx_new_ooo_isle_events_cnt);
p_stats->iscsi_cmdq_threshold_cnt =
le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt);
p_stats->iscsi_rq_threshold_cnt =
@ -1010,6 +1021,8 @@ static int qed_fill_iscsi_dev_info(struct qed_dev *cdev,
info->secondary_bdq_rq_addr =
qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
info->num_cqs = FEAT_NUM(hwfn, QED_ISCSI_CQ);
return rc;
}
@ -1311,6 +1324,26 @@ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
}
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats)
{
struct qed_iscsi_stats proto_stats;
/* Retrieve FW statistics */
memset(&proto_stats, 0, sizeof(proto_stats));
if (qed_iscsi_stats(cdev, &proto_stats)) {
DP_VERBOSE(cdev, QED_MSG_STORAGE,
"Failed to collect ISCSI statistics\n");
return;
}
/* Translate FW statistics into struct */
stats->rx_pdus = proto_stats.iscsi_rx_total_pdu_cnt;
stats->tx_pdus = proto_stats.iscsi_tx_total_pdu_cnt;
stats->rx_bytes = proto_stats.iscsi_rx_bytes_cnt;
stats->tx_bytes = proto_stats.iscsi_tx_bytes_cnt;
}
static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
.common = &qed_common_ops_pass,
.ll2 = &qed_ll2_ops_pass,

View File

@ -64,13 +64,25 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info);
/**
* @brief - Fills provided statistics struct with statistics.
*
* @param cdev
* @param stats - points to struct that will be filled with statistics.
*/
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline struct qed_iscsi_info *qed_iscsi_alloc(
struct qed_hwfn *p_hwfn) { return NULL; }
static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info) {}
static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info) {}
struct qed_iscsi_info *p_iscsi_info) {}
static inline void
qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats) {}
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
#endif

View File

@ -1929,7 +1929,11 @@ static int qed_start_vport(struct qed_dev *cdev,
return rc;
}
qed_hw_start_fastpath(p_hwfn);
rc = qed_hw_start_fastpath(p_hwfn);
if (rc) {
DP_ERR(cdev, "Failed to start VPORT fastpath\n");
return rc;
}
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
"Started V-PORT %d with MTU %d\n",
@ -2172,7 +2176,13 @@ static int qed_start_txq(struct qed_dev *cdev,
#define QED_HW_STOP_RETRY_LIMIT (10)
static int qed_fastpath_stop(struct qed_dev *cdev)
{
qed_hw_stop_fastpath(cdev);
int rc;
rc = qed_hw_stop_fastpath(cdev);
if (rc) {
DP_ERR(cdev, "Failed to stop Fastpath\n");
return rc;
}
return 0;
}

View File

@ -1408,13 +1408,21 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
struct qed_ll2_info *p_ll2_conn;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
u8 qid;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EAGAIN;
p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
if (!p_ll2_conn)
return -EINVAL;
if (!p_ll2_conn) {
rc = -EINVAL;
goto out;
}
p_rx = &p_ll2_conn->rx_queue;
p_tx = &p_ll2_conn->tx_queue;
@ -1447,7 +1455,9 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
p_tx->cur_completing_frag_num = 0;
*p_tx->p_fw_cons = 0;
qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
if (rc)
goto out;
qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
p_ll2_conn->queue_id = qid;
@ -1461,26 +1471,28 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
if (rc)
return rc;
goto out;
rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
if (rc)
return rc;
goto out;
if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8914, 0,
QED_LLH_FILTER_ETHERTYPE);
}
out:
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
@ -1831,23 +1843,30 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
{
struct qed_ll2_info *p_ll2_conn = NULL;
int rc = -EINVAL;
struct qed_ptt *p_ptt;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EAGAIN;
p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
if (!p_ll2_conn)
return -EINVAL;
if (!p_ll2_conn) {
rc = -EINVAL;
goto out;
}
/* Stop Tx & Rx of connection, if needed */
if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
return rc;
goto out;
qed_ll2_txq_flush(p_hwfn, connection_handle);
}
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
return rc;
goto out;
qed_ll2_rxq_flush(p_hwfn, connection_handle);
}
@ -1855,14 +1874,16 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8914, 0,
QED_LLH_FILTER_ETHERTYPE);
}
out:
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}

View File

@ -55,6 +55,8 @@
#include "qed_dev_api.h"
#include "qed_ll2.h"
#include "qed_fcoe.h"
#include "qed_iscsi.h"
#include "qed_mcp.h"
#include "qed_hw.h"
#include "qed_selftest.h"
@ -1679,6 +1681,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
case QED_MCP_FCOE_STATS:
qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
break;
case QED_MCP_ISCSI_STATS:
qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
break;
default:
DP_ERR(cdev, "Invalid protocol type = %d\n", type);
return;

View File

@ -41,6 +41,7 @@
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_ooo.h"
#include "qed_cxt.h"
static struct qed_ooo_archipelago
*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
@ -48,15 +49,18 @@ static struct qed_ooo_archipelago
*p_ooo_info,
u32 cid)
{
struct qed_ooo_archipelago *p_archipelago = NULL;
u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
struct qed_ooo_archipelago *p_archipelago;
list_for_each_entry(p_archipelago,
&p_ooo_info->archipelagos_list, list_entry) {
if (p_archipelago->cid == cid)
return p_archipelago;
}
if (idx >= p_ooo_info->max_num_archipelagos)
return NULL;
return NULL;
p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
if (list_empty(&p_archipelago->isles_list))
return NULL;
return p_archipelago;
}
static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
@ -97,8 +101,8 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{
u16 max_num_archipelagos = 0, cid_base;
struct qed_ooo_info *p_ooo_info;
u16 max_num_archipelagos = 0;
u16 max_num_isles = 0;
u32 i;
@ -110,6 +114,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ISCSI);
if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn,
@ -121,11 +126,12 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info)
return NULL;
p_ooo_info->cid_base = cid_base;
p_ooo_info->max_num_archipelagos = max_num_archipelagos;
INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list);
INIT_LIST_HEAD(&p_ooo_info->archipelagos_list);
p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
sizeof(struct qed_ooo_isle),
@ -146,11 +152,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info->p_archipelagos_mem)
goto no_archipelagos_mem;
for (i = 0; i < max_num_archipelagos; i++) {
for (i = 0; i < max_num_archipelagos; i++)
INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry,
&p_ooo_info->free_archipelagos_list);
}
p_ooo_info->ooo_history.p_cqes =
kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
@ -178,21 +181,9 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_archipelago *p_archipelago;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
bool b_found = false;
if (list_empty(&p_ooo_info->archipelagos_list))
return;
list_for_each_entry(p_archipelago,
&p_ooo_info->archipelagos_list, list_entry) {
if (p_archipelago->cid == cid) {
list_del(&p_archipelago->list_entry);
b_found = true;
break;
}
}
if (!b_found)
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
if (!p_archipelago)
return;
while (!list_empty(&p_archipelago->isles_list)) {
@ -216,27 +207,21 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
list_add_tail(&p_archipelago->list_entry,
&p_ooo_info->free_archipelagos_list);
}
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{
struct qed_ooo_archipelago *p_arch;
struct qed_ooo_archipelago *p_archipelago;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
u32 i;
while (!list_empty(&p_ooo_info->archipelagos_list)) {
p_arch = list_first_entry(&p_ooo_info->archipelagos_list,
struct qed_ooo_archipelago,
list_entry);
for (i = 0; i < p_ooo_info->max_num_archipelagos; i++) {
p_archipelago = &(p_ooo_info->p_archipelagos_mem[i]);
list_del(&p_arch->list_entry);
while (!list_empty(&p_arch->isles_list)) {
p_isle = list_first_entry(&p_arch->isles_list,
while (!list_empty(&p_archipelago->isles_list)) {
p_isle = list_first_entry(&p_archipelago->isles_list,
struct qed_ooo_isle,
list_entry);
@ -258,8 +243,6 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
list_add_tail(&p_arch->list_entry,
&p_ooo_info->free_archipelagos_list);
}
if (!list_empty(&p_ooo_info->ready_buffers_list))
list_splice_tail_init(&p_ooo_info->ready_buffers_list,
@ -378,12 +361,6 @@ void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
p_ooo_info->cur_isles_number--;
list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
}
if (list_empty(&p_archipelago->isles_list)) {
list_del(&p_archipelago->list_entry);
list_add(&p_archipelago->list_entry,
&p_ooo_info->free_archipelagos_list);
}
}
void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
@ -426,28 +403,10 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
return;
}
if (!p_archipelago &&
!list_empty(&p_ooo_info->free_archipelagos_list)) {
p_archipelago =
list_first_entry(&p_ooo_info->free_archipelagos_list,
struct qed_ooo_archipelago, list_entry);
if (!p_archipelago) {
u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
list_del(&p_archipelago->list_entry);
if (!list_empty(&p_archipelago->isles_list)) {
DP_NOTICE(p_hwfn,
"Free OOO connection is not empty\n");
INIT_LIST_HEAD(&p_archipelago->isles_list);
}
p_archipelago->cid = cid;
list_add(&p_archipelago->list_entry,
&p_ooo_info->archipelagos_list);
} else if (!p_archipelago) {
DP_NOTICE(p_hwfn, "No more free OOO connections\n");
list_add(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
list_add(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
return;
p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
}
list_add(&p_buffer->list_entry, &p_isle->buffers_list);
@ -517,11 +476,6 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
} else {
list_splice_tail_init(&p_right_isle->buffers_list,
&p_ooo_info->ready_buffers_list);
if (list_empty(&p_archipelago->isles_list)) {
list_del(&p_archipelago->list_entry);
list_add(&p_archipelago->list_entry,
&p_ooo_info->free_archipelagos_list);
}
}
list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
}

View File

@ -60,9 +60,7 @@ struct qed_ooo_isle {
};
struct qed_ooo_archipelago {
struct list_head list_entry;
struct list_head isles_list;
u32 cid;
};
struct qed_ooo_history {
@ -75,14 +73,14 @@ struct qed_ooo_info {
struct list_head free_buffers_list;
struct list_head ready_buffers_list;
struct list_head free_isles_list;
struct list_head free_archipelagos_list;
struct list_head archipelagos_list;
struct qed_ooo_archipelago *p_archipelagos_mem;
struct qed_ooo_isle *p_isles_mem;
struct qed_ooo_history ooo_history;
u32 cur_isles_number;
u32 max_isles_number;
u32 gen_isles_number;
u16 max_num_archipelagos;
u16 cid_base;
};
#if IS_ENABLED(CONFIG_QED_ISCSI)

View File

@ -1551,6 +1551,7 @@
#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL
#define PSWRQ2_REG_WR_MBS0 0x240400UL
#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL
#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL

View File

@ -119,6 +119,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
u8 *p_fw_ret, bool skip_quick_poll)
{
struct qed_spq_comp_done *comp_done;
struct qed_ptt *p_ptt;
int rc;
/* A relatively short polling period w/o sleeping, to allow the FW to
@ -135,8 +136,14 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
if (!rc)
return 0;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt) {
DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
return -EAGAIN;
}
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
rc = qed_mcp_drain(p_hwfn, p_ptt);
if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n");
goto err;
@ -145,15 +152,18 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/* Retry after drain */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc)
return 0;
goto out;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
if (comp_done->done == 1) {
if (comp_done->done == 1)
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}
out:
qed_ptt_release(p_hwfn, p_ptt);
return 0;
err:
qed_ptt_release(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),

View File

@ -67,6 +67,8 @@ struct qed_dev_iscsi_info {
void __iomem *primary_dbq_rq_addr;
void __iomem *secondary_bdq_rq_addr;
u8 num_cqs;
};
struct qed_iscsi_id_params {