|
|
@@ -195,6 +195,7 @@ union ip_vs_sync_conn {
|
|
|
#define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
|
|
|
|
|
|
struct ip_vs_sync_thread_data {
|
|
|
+ struct task_struct *task;
|
|
|
struct netns_ipvs *ipvs;
|
|
|
struct socket *sock;
|
|
|
char *buf;
|
|
|
@@ -374,8 +375,11 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs,
|
|
|
max(IPVS_SYNC_SEND_DELAY, 1));
|
|
|
ms->sync_queue_len++;
|
|
|
list_add_tail(&sb->list, &ms->sync_queue);
|
|
|
- if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE)
|
|
|
- wake_up_process(ms->master_thread);
|
|
|
+ if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) {
|
|
|
+ int id = (int)(ms - ipvs->ms);
|
|
|
+
|
|
|
+ wake_up_process(ipvs->master_tinfo[id].task);
|
|
|
+ }
|
|
|
} else
|
|
|
ip_vs_sync_buff_release(sb);
|
|
|
spin_unlock(&ipvs->sync_lock);
|
|
|
@@ -1636,8 +1640,10 @@ static void master_wakeup_work_handler(struct work_struct *work)
|
|
|
spin_lock_bh(&ipvs->sync_lock);
|
|
|
if (ms->sync_queue_len &&
|
|
|
ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
|
|
|
+ int id = (int)(ms - ipvs->ms);
|
|
|
+
|
|
|
ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
|
|
|
- wake_up_process(ms->master_thread);
|
|
|
+ wake_up_process(ipvs->master_tinfo[id].task);
|
|
|
}
|
|
|
spin_unlock_bh(&ipvs->sync_lock);
|
|
|
}
|
|
|
@@ -1703,10 +1709,6 @@ done:
|
|
|
if (sb)
|
|
|
ip_vs_sync_buff_release(sb);
|
|
|
|
|
|
- /* release the sending multicast socket */
|
|
|
- sock_release(tinfo->sock);
|
|
|
- kfree(tinfo);
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
@@ -1740,11 +1742,6 @@ static int sync_thread_backup(void *data)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* release the sending multicast socket */
|
|
|
- sock_release(tinfo->sock);
|
|
|
- kfree(tinfo->buf);
|
|
|
- kfree(tinfo);
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
@@ -1752,8 +1749,8 @@ static int sync_thread_backup(void *data)
|
|
|
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
|
|
int state)
|
|
|
{
|
|
|
- struct ip_vs_sync_thread_data *tinfo = NULL;
|
|
|
- struct task_struct **array = NULL, *task;
|
|
|
+ struct ip_vs_sync_thread_data *ti = NULL, *tinfo;
|
|
|
+ struct task_struct *task;
|
|
|
struct net_device *dev;
|
|
|
char *name;
|
|
|
int (*threadfn)(void *data);
|
|
|
@@ -1822,7 +1819,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
|
|
threadfn = sync_thread_master;
|
|
|
} else if (state == IP_VS_STATE_BACKUP) {
|
|
|
result = -EEXIST;
|
|
|
- if (ipvs->backup_threads)
|
|
|
+ if (ipvs->backup_tinfo)
|
|
|
goto out_early;
|
|
|
|
|
|
ipvs->bcfg = *c;
|
|
|
@@ -1849,28 +1846,22 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
|
|
master_wakeup_work_handler);
|
|
|
ms->ipvs = ipvs;
|
|
|
}
|
|
|
- } else {
|
|
|
- array = kcalloc(count, sizeof(struct task_struct *),
|
|
|
- GFP_KERNEL);
|
|
|
- result = -ENOMEM;
|
|
|
- if (!array)
|
|
|
- goto out;
|
|
|
}
|
|
|
+ result = -ENOMEM;
|
|
|
+ ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!ti)
|
|
|
+ goto out;
|
|
|
|
|
|
for (id = 0; id < count; id++) {
|
|
|
- result = -ENOMEM;
|
|
|
- tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
|
|
|
- if (!tinfo)
|
|
|
- goto out;
|
|
|
+ tinfo = &ti[id];
|
|
|
tinfo->ipvs = ipvs;
|
|
|
- tinfo->sock = NULL;
|
|
|
if (state == IP_VS_STATE_BACKUP) {
|
|
|
+ result = -ENOMEM;
|
|
|
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
|
|
|
GFP_KERNEL);
|
|
|
if (!tinfo->buf)
|
|
|
goto out;
|
|
|
- } else {
|
|
|
- tinfo->buf = NULL;
|
|
|
}
|
|
|
tinfo->id = id;
|
|
|
if (state == IP_VS_STATE_MASTER)
|
|
|
@@ -1885,17 +1876,15 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
|
|
result = PTR_ERR(task);
|
|
|
goto out;
|
|
|
}
|
|
|
- tinfo = NULL;
|
|
|
- if (state == IP_VS_STATE_MASTER)
|
|
|
- ipvs->ms[id].master_thread = task;
|
|
|
- else
|
|
|
- array[id] = task;
|
|
|
+ tinfo->task = task;
|
|
|
}
|
|
|
|
|
|
/* mark as active */
|
|
|
|
|
|
- if (state == IP_VS_STATE_BACKUP)
|
|
|
- ipvs->backup_threads = array;
|
|
|
+ if (state == IP_VS_STATE_MASTER)
|
|
|
+ ipvs->master_tinfo = ti;
|
|
|
+ else
|
|
|
+ ipvs->backup_tinfo = ti;
|
|
|
spin_lock_bh(&ipvs->sync_buff_lock);
|
|
|
ipvs->sync_state |= state;
|
|
|
spin_unlock_bh(&ipvs->sync_buff_lock);
|
|
|
@@ -1910,29 +1899,31 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
|
|
|
|
|
out:
|
|
|
/* We do not need RTNL lock anymore, release it here so that
|
|
|
- * sock_release below and in the kthreads can use rtnl_lock
|
|
|
- * to leave the mcast group.
|
|
|
+ * sock_release below can use rtnl_lock to leave the mcast group.
|
|
|
*/
|
|
|
rtnl_unlock();
|
|
|
- count = id;
|
|
|
- while (count-- > 0) {
|
|
|
- if (state == IP_VS_STATE_MASTER)
|
|
|
- kthread_stop(ipvs->ms[count].master_thread);
|
|
|
- else
|
|
|
- kthread_stop(array[count]);
|
|
|
+ id = min(id, count - 1);
|
|
|
+ if (ti) {
|
|
|
+ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
|
|
|
+ if (tinfo->task)
|
|
|
+ kthread_stop(tinfo->task);
|
|
|
+ }
|
|
|
}
|
|
|
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
|
|
|
kfree(ipvs->ms);
|
|
|
ipvs->ms = NULL;
|
|
|
}
|
|
|
mutex_unlock(&ipvs->sync_mutex);
|
|
|
- if (tinfo) {
|
|
|
- if (tinfo->sock)
|
|
|
- sock_release(tinfo->sock);
|
|
|
- kfree(tinfo->buf);
|
|
|
- kfree(tinfo);
|
|
|
+
|
|
|
+ /* No more mutexes, release socks */
|
|
|
+ if (ti) {
|
|
|
+ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
|
|
|
+ if (tinfo->sock)
|
|
|
+ sock_release(tinfo->sock);
|
|
|
+ kfree(tinfo->buf);
|
|
|
+ }
|
|
|
+ kfree(ti);
|
|
|
}
|
|
|
- kfree(array);
|
|
|
return result;
|
|
|
|
|
|
out_early:
|
|
|
@@ -1944,15 +1935,18 @@ out_early:
|
|
|
|
|
|
int stop_sync_thread(struct netns_ipvs *ipvs, int state)
|
|
|
{
|
|
|
- struct task_struct **array;
|
|
|
+ struct ip_vs_sync_thread_data *ti, *tinfo;
|
|
|
int id;
|
|
|
int retc = -EINVAL;
|
|
|
|
|
|
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
|
|
|
|
|
|
+ mutex_lock(&ipvs->sync_mutex);
|
|
|
if (state == IP_VS_STATE_MASTER) {
|
|
|
+ retc = -ESRCH;
|
|
|
if (!ipvs->ms)
|
|
|
- return -ESRCH;
|
|
|
+ goto err;
|
|
|
+ ti = ipvs->master_tinfo;
|
|
|
|
|
|
/*
|
|
|
* The lock synchronizes with sb_queue_tail(), so that we don't
|
|
|
@@ -1971,38 +1965,56 @@ int stop_sync_thread(struct netns_ipvs *ipvs, int state)
|
|
|
struct ipvs_master_sync_state *ms = &ipvs->ms[id];
|
|
|
int ret;
|
|
|
|
|
|
+ tinfo = &ti[id];
|
|
|
pr_info("stopping master sync thread %d ...\n",
|
|
|
- task_pid_nr(ms->master_thread));
|
|
|
+ task_pid_nr(tinfo->task));
|
|
|
cancel_delayed_work_sync(&ms->master_wakeup_work);
|
|
|
- ret = kthread_stop(ms->master_thread);
|
|
|
+ ret = kthread_stop(tinfo->task);
|
|
|
if (retc >= 0)
|
|
|
retc = ret;
|
|
|
}
|
|
|
kfree(ipvs->ms);
|
|
|
ipvs->ms = NULL;
|
|
|
+ ipvs->master_tinfo = NULL;
|
|
|
} else if (state == IP_VS_STATE_BACKUP) {
|
|
|
- if (!ipvs->backup_threads)
|
|
|
- return -ESRCH;
|
|
|
+ retc = -ESRCH;
|
|
|
+ if (!ipvs->backup_tinfo)
|
|
|
+ goto err;
|
|
|
+ ti = ipvs->backup_tinfo;
|
|
|
|
|
|
ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
|
|
|
- array = ipvs->backup_threads;
|
|
|
retc = 0;
|
|
|
for (id = ipvs->threads_mask; id >= 0; id--) {
|
|
|
int ret;
|
|
|
|
|
|
+ tinfo = &ti[id];
|
|
|
pr_info("stopping backup sync thread %d ...\n",
|
|
|
- task_pid_nr(array[id]));
|
|
|
- ret = kthread_stop(array[id]);
|
|
|
+ task_pid_nr(tinfo->task));
|
|
|
+ ret = kthread_stop(tinfo->task);
|
|
|
if (retc >= 0)
|
|
|
retc = ret;
|
|
|
}
|
|
|
- kfree(array);
|
|
|
- ipvs->backup_threads = NULL;
|
|
|
+ ipvs->backup_tinfo = NULL;
|
|
|
+ } else {
|
|
|
+ goto err;
|
|
|
}
|
|
|
+ id = ipvs->threads_mask;
|
|
|
+ mutex_unlock(&ipvs->sync_mutex);
|
|
|
+
|
|
|
+ /* No more mutexes, release socks */
|
|
|
+ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
|
|
|
+ if (tinfo->sock)
|
|
|
+ sock_release(tinfo->sock);
|
|
|
+ kfree(tinfo->buf);
|
|
|
+ }
|
|
|
+ kfree(ti);
|
|
|
|
|
|
/* decrease the module use count */
|
|
|
ip_vs_use_count_dec();
|
|
|
+ return retc;
|
|
|
|
|
|
+err:
|
|
|
+ mutex_unlock(&ipvs->sync_mutex);
|
|
|
return retc;
|
|
|
}
|
|
|
|
|
|
@@ -2021,7 +2033,6 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
|
|
|
{
|
|
|
int retc;
|
|
|
|
|
|
- mutex_lock(&ipvs->sync_mutex);
|
|
|
retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
|
|
|
if (retc && retc != -ESRCH)
|
|
|
pr_err("Failed to stop Master Daemon\n");
|
|
|
@@ -2029,5 +2040,4 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
|
|
|
retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
|
|
|
if (retc && retc != -ESRCH)
|
|
|
pr_err("Failed to stop Backup Daemon\n");
|
|
|
- mutex_unlock(&ipvs->sync_mutex);
|
|
|
}
|