|
@@ -68,7 +68,7 @@ srpc_serv_portal(int svc_id)
|
|
}
|
|
}
|
|
|
|
|
|
/* forward ref's */
|
|
/* forward ref's */
|
|
-int srpc_handle_rpc(struct swi_workitem *wi);
|
|
|
|
|
|
+void srpc_handle_rpc(struct swi_workitem *wi);
|
|
|
|
|
|
void srpc_get_counters(struct srpc_counters *cnt)
|
|
void srpc_get_counters(struct srpc_counters *cnt)
|
|
{
|
|
{
|
|
@@ -178,7 +178,7 @@ srpc_init_server_rpc(struct srpc_server_rpc *rpc,
|
|
memset(rpc, 0, sizeof(*rpc));
|
|
memset(rpc, 0, sizeof(*rpc));
|
|
swi_init_workitem(&rpc->srpc_wi, srpc_handle_rpc,
|
|
swi_init_workitem(&rpc->srpc_wi, srpc_handle_rpc,
|
|
srpc_serv_is_framework(scd->scd_svc) ?
|
|
srpc_serv_is_framework(scd->scd_svc) ?
|
|
- lst_sched_serial : lst_sched_test[scd->scd_cpt]);
|
|
|
|
|
|
+ lst_serial_wq : lst_test_wq[scd->scd_cpt]);
|
|
|
|
|
|
rpc->srpc_ev.ev_fired = 1; /* no event expected now */
|
|
rpc->srpc_ev.ev_fired = 1; /* no event expected now */
|
|
|
|
|
|
@@ -242,7 +242,7 @@ srpc_service_nrpcs(struct srpc_service *svc)
|
|
max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
|
|
max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
|
|
}
|
|
}
|
|
|
|
|
|
-int srpc_add_buffer(struct swi_workitem *wi);
|
|
|
|
|
|
+void srpc_add_buffer(struct swi_workitem *wi);
|
|
|
|
|
|
static int
|
|
static int
|
|
srpc_service_init(struct srpc_service *svc)
|
|
srpc_service_init(struct srpc_service *svc)
|
|
@@ -277,11 +277,11 @@ srpc_service_init(struct srpc_service *svc)
|
|
scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
|
|
scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * NB: don't use lst_sched_serial for adding buffer,
|
|
|
|
|
|
+ * NB: don't use lst_serial_wq for adding buffer,
|
|
* see details in srpc_service_add_buffers()
|
|
* see details in srpc_service_add_buffers()
|
|
*/
|
|
*/
|
|
swi_init_workitem(&scd->scd_buf_wi,
|
|
swi_init_workitem(&scd->scd_buf_wi,
|
|
- srpc_add_buffer, lst_sched_test[i]);
|
|
|
|
|
|
+ srpc_add_buffer, lst_test_wq[i]);
|
|
|
|
|
|
if (i && srpc_serv_is_framework(svc)) {
|
|
if (i && srpc_serv_is_framework(svc)) {
|
|
/*
|
|
/*
|
|
@@ -513,7 +513,7 @@ __must_hold(&scd->scd_lock)
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
-int
|
|
|
|
|
|
+void
|
|
srpc_add_buffer(struct swi_workitem *wi)
|
|
srpc_add_buffer(struct swi_workitem *wi)
|
|
{
|
|
{
|
|
struct srpc_service_cd *scd = container_of(wi, struct srpc_service_cd, scd_buf_wi);
|
|
struct srpc_service_cd *scd = container_of(wi, struct srpc_service_cd, scd_buf_wi);
|
|
@@ -572,7 +572,6 @@ srpc_add_buffer(struct swi_workitem *wi)
|
|
}
|
|
}
|
|
|
|
|
|
spin_unlock(&scd->scd_lock);
|
|
spin_unlock(&scd->scd_lock);
|
|
- return 0;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
int
|
|
int
|
|
@@ -604,15 +603,15 @@ srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
|
|
spin_lock(&scd->scd_lock);
|
|
spin_lock(&scd->scd_lock);
|
|
/*
|
|
/*
|
|
* NB: srpc_service_add_buffers() can be called inside
|
|
* NB: srpc_service_add_buffers() can be called inside
|
|
- * thread context of lst_sched_serial, and we don't normally
|
|
|
|
|
|
+ * thread context of lst_serial_wq, and we don't normally
|
|
* allow to sleep inside thread context of WI scheduler
|
|
* allow to sleep inside thread context of WI scheduler
|
|
* because it will block current scheduler thread from doing
|
|
* because it will block current scheduler thread from doing
|
|
* anything else, even worse, it could deadlock if it's
|
|
* anything else, even worse, it could deadlock if it's
|
|
* waiting on result from another WI of the same scheduler.
|
|
* waiting on result from another WI of the same scheduler.
|
|
* However, it's safe at here because scd_buf_wi is scheduled
|
|
* However, it's safe at here because scd_buf_wi is scheduled
|
|
- * by thread in a different WI scheduler (lst_sched_test),
|
|
|
|
|
|
+ * by thread in a different WI scheduler (lst_test_wq),
|
|
* so we don't have any risk of deadlock, though this could
|
|
* so we don't have any risk of deadlock, though this could
|
|
- * block all WIs pending on lst_sched_serial for a moment
|
|
|
|
|
|
+ * block all WIs pending on lst_serial_wq for a moment
|
|
* which is not good but not fatal.
|
|
* which is not good but not fatal.
|
|
*/
|
|
*/
|
|
lst_wait_until(scd->scd_buf_err ||
|
|
lst_wait_until(scd->scd_buf_err ||
|
|
@@ -659,11 +658,9 @@ srpc_finish_service(struct srpc_service *sv)
|
|
LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
|
|
LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
|
|
|
|
|
|
cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
|
|
cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
|
|
|
|
+ swi_cancel_workitem(&scd->scd_buf_wi);
|
|
|
|
+
|
|
spin_lock(&scd->scd_lock);
|
|
spin_lock(&scd->scd_lock);
|
|
- if (!swi_deschedule_workitem(&scd->scd_buf_wi)) {
|
|
|
|
- spin_unlock(&scd->scd_lock);
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
|
|
|
|
if (scd->scd_buf_nposted > 0) {
|
|
if (scd->scd_buf_nposted > 0) {
|
|
CDEBUG(D_NET, "waiting for %d posted buffers to unlink\n",
|
|
CDEBUG(D_NET, "waiting for %d posted buffers to unlink\n",
|
|
@@ -679,11 +676,9 @@ srpc_finish_service(struct srpc_service *sv)
|
|
|
|
|
|
rpc = list_entry(scd->scd_rpc_active.next,
|
|
rpc = list_entry(scd->scd_rpc_active.next,
|
|
struct srpc_server_rpc, srpc_list);
|
|
struct srpc_server_rpc, srpc_list);
|
|
- CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
|
|
|
|
|
|
+ CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s, ev fired %d type %d status %d lnet %d\n",
|
|
rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
|
|
rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
|
|
swi_state2str(rpc->srpc_wi.swi_state),
|
|
swi_state2str(rpc->srpc_wi.swi_state),
|
|
- rpc->srpc_wi.swi_workitem.wi_scheduled,
|
|
|
|
- rpc->srpc_wi.swi_workitem.wi_running,
|
|
|
|
rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
|
|
rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
|
|
rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
|
|
rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
|
|
spin_unlock(&scd->scd_lock);
|
|
spin_unlock(&scd->scd_lock);
|
|
@@ -946,7 +941,6 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
|
|
* Cancel pending schedules and prevent future schedule attempts:
|
|
* Cancel pending schedules and prevent future schedule attempts:
|
|
*/
|
|
*/
|
|
LASSERT(rpc->srpc_ev.ev_fired);
|
|
LASSERT(rpc->srpc_ev.ev_fired);
|
|
- swi_exit_workitem(&rpc->srpc_wi);
|
|
|
|
|
|
|
|
if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
|
|
if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
|
|
buffer = list_entry(scd->scd_buf_blocked.next,
|
|
buffer = list_entry(scd->scd_buf_blocked.next,
|
|
@@ -964,7 +958,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
|
|
}
|
|
}
|
|
|
|
|
|
/* handles an incoming RPC */
|
|
/* handles an incoming RPC */
|
|
-int
|
|
|
|
|
|
+void
|
|
srpc_handle_rpc(struct swi_workitem *wi)
|
|
srpc_handle_rpc(struct swi_workitem *wi)
|
|
{
|
|
{
|
|
struct srpc_server_rpc *rpc = container_of(wi, struct srpc_server_rpc, srpc_wi);
|
|
struct srpc_server_rpc *rpc = container_of(wi, struct srpc_server_rpc, srpc_wi);
|
|
@@ -986,9 +980,8 @@ srpc_handle_rpc(struct swi_workitem *wi)
|
|
|
|
|
|
if (ev->ev_fired) { /* no more event, OK to finish */
|
|
if (ev->ev_fired) { /* no more event, OK to finish */
|
|
srpc_server_rpc_done(rpc, -ESHUTDOWN);
|
|
srpc_server_rpc_done(rpc, -ESHUTDOWN);
|
|
- return 1;
|
|
|
|
}
|
|
}
|
|
- return 0;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
spin_unlock(&scd->scd_lock);
|
|
spin_unlock(&scd->scd_lock);
|
|
@@ -1006,7 +999,7 @@ srpc_handle_rpc(struct swi_workitem *wi)
|
|
if (!msg->msg_magic) {
|
|
if (!msg->msg_magic) {
|
|
/* moaned already in srpc_lnet_ev_handler */
|
|
/* moaned already in srpc_lnet_ev_handler */
|
|
srpc_server_rpc_done(rpc, EBADMSG);
|
|
srpc_server_rpc_done(rpc, EBADMSG);
|
|
- return 1;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
srpc_unpack_msg_hdr(msg);
|
|
srpc_unpack_msg_hdr(msg);
|
|
@@ -1022,7 +1015,7 @@ srpc_handle_rpc(struct swi_workitem *wi)
|
|
LASSERT(!reply->status || !rpc->srpc_bulk);
|
|
LASSERT(!reply->status || !rpc->srpc_bulk);
|
|
if (rc) {
|
|
if (rc) {
|
|
srpc_server_rpc_done(rpc, rc);
|
|
srpc_server_rpc_done(rpc, rc);
|
|
- return 1;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1031,7 +1024,7 @@ srpc_handle_rpc(struct swi_workitem *wi)
|
|
if (rpc->srpc_bulk) {
|
|
if (rpc->srpc_bulk) {
|
|
rc = srpc_do_bulk(rpc);
|
|
rc = srpc_do_bulk(rpc);
|
|
if (!rc)
|
|
if (!rc)
|
|
- return 0; /* wait for bulk */
|
|
|
|
|
|
+ return; /* wait for bulk */
|
|
|
|
|
|
LASSERT(ev->ev_fired);
|
|
LASSERT(ev->ev_fired);
|
|
ev->ev_status = rc;
|
|
ev->ev_status = rc;
|
|
@@ -1049,16 +1042,16 @@ srpc_handle_rpc(struct swi_workitem *wi)
|
|
|
|
|
|
if (rc) {
|
|
if (rc) {
|
|
srpc_server_rpc_done(rpc, rc);
|
|
srpc_server_rpc_done(rpc, rc);
|
|
- return 1;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
|
|
wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
|
|
rc = srpc_send_reply(rpc);
|
|
rc = srpc_send_reply(rpc);
|
|
if (!rc)
|
|
if (!rc)
|
|
- return 0; /* wait for reply */
|
|
|
|
|
|
+ return; /* wait for reply */
|
|
srpc_server_rpc_done(rpc, rc);
|
|
srpc_server_rpc_done(rpc, rc);
|
|
- return 1;
|
|
|
|
|
|
+ return;
|
|
|
|
|
|
case SWI_STATE_REPLY_SUBMITTED:
|
|
case SWI_STATE_REPLY_SUBMITTED:
|
|
if (!ev->ev_fired) {
|
|
if (!ev->ev_fired) {
|
|
@@ -1071,10 +1064,8 @@ srpc_handle_rpc(struct swi_workitem *wi)
|
|
|
|
|
|
wi->swi_state = SWI_STATE_DONE;
|
|
wi->swi_state = SWI_STATE_DONE;
|
|
srpc_server_rpc_done(rpc, ev->ev_status);
|
|
srpc_server_rpc_done(rpc, ev->ev_status);
|
|
- return 1;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
-
|
|
|
|
- return 0;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -1169,7 +1160,6 @@ srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
|
|
* Cancel pending schedules and prevent future schedule attempts:
|
|
* Cancel pending schedules and prevent future schedule attempts:
|
|
*/
|
|
*/
|
|
LASSERT(!srpc_event_pending(rpc));
|
|
LASSERT(!srpc_event_pending(rpc));
|
|
- swi_exit_workitem(wi);
|
|
|
|
|
|
|
|
spin_unlock(&rpc->crpc_lock);
|
|
spin_unlock(&rpc->crpc_lock);
|
|
|
|
|
|
@@ -1177,7 +1167,7 @@ srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
|
|
}
|
|
}
|
|
|
|
|
|
/* sends an outgoing RPC */
|
|
/* sends an outgoing RPC */
|
|
-int
|
|
|
|
|
|
+void
|
|
srpc_send_rpc(struct swi_workitem *wi)
|
|
srpc_send_rpc(struct swi_workitem *wi)
|
|
{
|
|
{
|
|
int rc = 0;
|
|
int rc = 0;
|
|
@@ -1213,7 +1203,7 @@ srpc_send_rpc(struct swi_workitem *wi)
|
|
rc = srpc_prepare_reply(rpc);
|
|
rc = srpc_prepare_reply(rpc);
|
|
if (rc) {
|
|
if (rc) {
|
|
srpc_client_rpc_done(rpc, rc);
|
|
srpc_client_rpc_done(rpc, rc);
|
|
- return 1;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
rc = srpc_prepare_bulk(rpc);
|
|
rc = srpc_prepare_bulk(rpc);
|
|
@@ -1290,7 +1280,7 @@ srpc_send_rpc(struct swi_workitem *wi)
|
|
|
|
|
|
wi->swi_state = SWI_STATE_DONE;
|
|
wi->swi_state = SWI_STATE_DONE;
|
|
srpc_client_rpc_done(rpc, rc);
|
|
srpc_client_rpc_done(rpc, rc);
|
|
- return 1;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
if (rc) {
|
|
if (rc) {
|
|
@@ -1307,10 +1297,9 @@ abort:
|
|
|
|
|
|
if (!srpc_event_pending(rpc)) {
|
|
if (!srpc_event_pending(rpc)) {
|
|
srpc_client_rpc_done(rpc, -EINTR);
|
|
srpc_client_rpc_done(rpc, -EINTR);
|
|
- return 1;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- return 0;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
struct srpc_client_rpc *
|
|
struct srpc_client_rpc *
|