summaryrefslogtreecommitdiff
path: root/drivers/staging/lustre/lnet/klnds
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2018-03-29 07:26:48 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-04-23 15:52:52 +0300
commite990f1c6eccd6389c3ce321d8bf05cdb0747b761 (patch)
treecfa5d2490dc2d507e2a5bfa069985d494472c8f5 /drivers/staging/lustre/lnet/klnds
parentb0fdb5702533eda27357102ff221297fe5cf009b (diff)
downloadlinux-e990f1c6eccd6389c3ce321d8bf05cdb0747b761.tar.xz
staging: lustre: libcfs: discard cfs_time_shift().
This function simply multiplies by HZ and adds jiffies. This is simple enough to be opencoded, and doing so makes the code easier to read. Same for cfs_time_shift_64() Signed-off-by: NeilBrown <neilb@suse.com> Reviewed-by: James Simmons <jsimmons@infradead.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/lustre/lnet/klnds')
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c12
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c4
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c4
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c12
4 files changed, 16 insertions, 16 deletions
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 7df07f39b849..276bf486f64b 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1446,7 +1446,7 @@ static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
if (rc)
goto out_fpo;
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ fpo->fpo_deadline = jiffies + IBLND_POOL_DEADLINE * HZ;
fpo->fpo_owner = fps;
*pp_fpo = fpo;
@@ -1619,7 +1619,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
spin_lock(&fps->fps_lock);
version = fps->fps_version;
list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ fpo->fpo_deadline = jiffies + IBLND_POOL_DEADLINE * HZ;
fpo->fpo_map_count++;
if (fpo->fpo_is_fmr) {
@@ -1743,7 +1743,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
fps->fps_version++;
list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
} else {
- fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ fps->fps_next_retry = jiffies + IBLND_POOL_RETRY * HZ;
}
spin_unlock(&fps->fps_lock);
@@ -1764,7 +1764,7 @@ static void kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int
memset(pool, 0, sizeof(*pool));
INIT_LIST_HEAD(&pool->po_free_list);
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ pool->po_deadline = jiffies + IBLND_POOL_DEADLINE * HZ;
pool->po_owner = ps;
pool->po_size = size;
}
@@ -1899,7 +1899,7 @@ struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps)
continue;
pool->po_allocated++;
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ pool->po_deadline = jiffies + IBLND_POOL_DEADLINE * HZ;
node = pool->po_free_list.next;
list_del(node);
@@ -1947,7 +1947,7 @@ struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps)
if (!rc) {
list_add_tail(&pool->po_list, &ps->ps_pool_list);
} else {
- ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ ps->ps_next_retry = jiffies + IBLND_POOL_RETRY * HZ;
CERROR("Can't allocate new %s pool because out of memory\n",
ps->ps_name);
}
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index c1c3277f4c1f..f9761d8f2e3e 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -3700,13 +3700,13 @@ kiblnd_failover_thread(void *arg)
LASSERT(dev->ibd_failover);
dev->ibd_failover = 0;
if (rc >= 0) { /* Device is OK or failover succeed */
- dev->ibd_next_failover = cfs_time_shift(3);
+ dev->ibd_next_failover = jiffies + 3 * HZ;
continue;
}
/* failed to failover, retry later */
dev->ibd_next_failover =
- cfs_time_shift(min(dev->ibd_failed_failover, 10));
+ jiffies + min(dev->ibd_failed_failover, 10) * HZ;
if (kiblnd_dev_can_failover(dev)) {
list_add_tail(&dev->ibd_fail_list,
&kiblnd_data.kib_failed_devs);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 4546618c1c12..16c1ab0b0bd9 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -1287,7 +1287,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
conn->ksnc_tx_last_post = jiffies;
/* Set the deadline for the outgoing HELLO to drain */
conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
- conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ conn->ksnc_tx_deadline = jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
mb(); /* order with adding to peer's conn list */
list_add(&conn->ksnc_list, &peer->ksnp_conns);
@@ -1852,7 +1852,7 @@ ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when)
if (bufnob < conn->ksnc_tx_bufnob) {
/* something got ACKed */
conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
peer->ksnp_last_alive = now;
conn->ksnc_tx_bufnob = bufnob;
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 5b34c7c030ad..1ace54c9b133 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -221,7 +221,7 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
* something got ACKed
*/
conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_tx_bufnob = bufnob;
mb();
@@ -269,7 +269,7 @@ ksocknal_recv_iter(struct ksock_conn *conn)
conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
@@ -405,7 +405,7 @@ ksocknal_check_zc_req(struct ksock_tx *tx)
/* ZC_REQ is going to be pinned to the peer */
tx->tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
@@ -677,7 +677,7 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
if (list_empty(&conn->ksnc_tx_queue) && !bufnob) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_tx_bufnob = 0;
@@ -858,7 +858,7 @@ ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
ksocknal_find_connecting_route_locked(peer)) {
/* the message is going to be pinned to the peer */
tx->tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
/* Queue the message until a connection is established */
list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
@@ -2308,7 +2308,7 @@ ksocknal_send_keepalive_locked(struct ksock_peer *peer)
* retry 10 secs later, so we wouldn't put pressure
* on this peer if we failed to send keepalive this time
*/
- peer->ksnp_send_keepalive = cfs_time_shift(10);
+ peer->ksnp_send_keepalive = jiffies + 10 * HZ;
conn = ksocknal_find_conn_locked(peer, NULL, 1);
if (conn) {