diff --git a/lnet/klnds/o2iblnd/o2iblnd.c b/lnet/klnds/o2iblnd/o2iblnd.c index 3adff1f..37e7f0c 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.c +++ b/lnet/klnds/o2iblnd/o2iblnd.c @@ -684,6 +684,7 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) int vectors; int off; int i; + lnet_nid_t ibp_nid; vectors = conn->ibc_cmid->device->num_comp_vectors; if (vectors <= 1) @@ -692,7 +693,8 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt); /* hash NID to CPU id in this partition... */ - off = conn->ibc_peer->ibp_nid % cpumask_weight(mask); + ibp_nid = conn->ibc_peer->ibp_nid; + off = do_div(ibp_nid, cpumask_weight(mask)); for_each_cpu(i, mask) { if (off-- == 0) return i % vectors; diff --git a/lnet/lnet/net_fault.c b/lnet/lnet/net_fault.c index 661cc5d..e3b9f7f 100644 --- a/lnet/lnet/net_fault.c +++ b/lnet/lnet/net_fault.c @@ -174,7 +174,8 @@ lnet_drop_rule_add(struct lnet_fault_attr *attr) rule->dr_drop_time = cfs_time_shift(cfs_rand() % attr->u.drop.da_interval); } else { - rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate; + unsigned tmp = cfs_rand(); + rule->dr_drop_at = do_div(tmp, attr->u.drop.da_rate); } lnet_net_lock(LNET_LOCK_EX); @@ -281,10 +282,12 @@ lnet_drop_rule_reset(void) memset(&rule->dr_stat, 0, sizeof(rule->dr_stat)); if (attr->u.drop.da_rate != 0) { - rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate; + unsigned tmp = cfs_rand(); + rule->dr_drop_at = do_div(tmp, attr->u.drop.da_rate); } else { - rule->dr_drop_time = cfs_time_shift(cfs_rand() % - attr->u.drop.da_interval); + unsigned tmp = cfs_rand(); + rule->dr_drop_time = cfs_time_shift(do_div(tmp, + attr->u.drop.da_interval)); rule->dr_time_base = cfs_time_shift(attr->u.drop. da_interval); } @@ -334,11 +337,14 @@ drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src, } } else { /* rate based drop */ + __u64 count; drop = rule->dr_stat.fs_count++ == rule->dr_drop_at; - if (rule->dr_stat.fs_count % attr->u.drop.da_rate == 0) { + count = rule->dr_stat.fs_count; + if (do_div(count, attr->u.drop.da_rate) == 0) { + unsigned tmp = cfs_rand(); rule->dr_drop_at = rule->dr_stat.fs_count + - cfs_rand() % attr->u.drop.da_rate; + do_div(tmp, attr->u.drop.da_rate); CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n", libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_dst), rule->dr_drop_at); @@ -502,11 +508,14 @@ delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src, } } else { /* rate based delay */ + __u64 count; delay = rule->dl_stat.fs_count++ == rule->dl_delay_at; /* generate the next random rate sequence */ - if (rule->dl_stat.fs_count % attr->u.delay.la_rate == 0) { + count = rule->dl_stat.fs_count; + if (do_div(count, attr->u.delay.la_rate) == 0) { + unsigned tmp = cfs_rand(); rule->dl_delay_at = rule->dl_stat.fs_count + - cfs_rand() % attr->u.delay.la_rate; + do_div(tmp, attr->u.delay.la_rate); CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n", libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_dst), rule->dl_delay_at); @@ -782,7 +791,8 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr) rule->dl_delay_time = cfs_time_shift(cfs_rand() % attr->u.delay.la_interval); } else { - rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate; + unsigned tmp = cfs_rand(); + rule->dl_delay_at = do_div(tmp, attr->u.delay.la_rate); } rule->dl_msg_send = -1; @@ -929,10 +939,12 @@ lnet_delay_rule_reset(void) memset(&rule->dl_stat, 0, sizeof(rule->dl_stat)); if (attr->u.delay.la_rate != 0) { - rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate; + unsigned tmp = cfs_rand(); + rule->dl_delay_at = do_div(tmp, attr->u.delay.la_rate); } else { - rule->dl_delay_time = cfs_time_shift(cfs_rand() % - attr->u.delay.la_interval); + unsigned tmp = cfs_rand(); + rule->dl_delay_time = cfs_time_shift(do_div(tmp, + attr->u.delay.la_interval)); rule->dl_time_base = cfs_time_shift(attr->u.delay. la_interval); } diff --git a/lustre/include/lustre_lmv.h b/lustre/include/lustre_lmv.h index 6c13fe7..075b12d 100644 --- a/lustre/include/lustre_lmv.h +++ b/lustre/include/lustre_lmv.h @@ -117,12 +117,14 @@ static inline unsigned int lmv_hash_all_chars(unsigned int count, const char *name, int namelen) { unsigned int c = 0; + __u64 ctmp; const unsigned char *p = (const unsigned char *)name; while (--namelen >= 0) c += p[namelen]; - c = c % count; + ctmp = c; + c = do_div(ctmp, count); return c; } @@ -130,11 +132,11 @@ lmv_hash_all_chars(unsigned int count, const char *name, int namelen) static inline unsigned int lmv_hash_fnv1a(unsigned int count, const char *name, int namelen) { - __u64 hash; + __u64 hash, htmp; - hash = lustre_hash_fnv_1a_64(name, namelen); + htmp = lustre_hash_fnv_1a_64(name, namelen); - hash = hash % count; + hash = do_div(htmp, count); return hash; } diff --git a/lustre/obdclass/llog.c b/lustre/obdclass/llog.c index 7bdd90f..4ad3875 100644 --- a/lustre/obdclass/llog.c +++ b/lustre/obdclass/llog.c @@ -403,7 +403,7 @@ static int llog_process_thread(void *arg) struct llog_process_cat_data *cd = lpi->lpi_catdata; char *buf; size_t chunk_size; - __u64 cur_offset; + __u64 cur_offset, tmp_offset; int rc = 0, index = 1, last_index; int saved_index = 0; int last_called_index = 0; @@ -463,7 +463,8 @@ repeat: * The absolute offset of the current chunk is calculated * from cur_offset value and stored in chunk_offset variable. */ - if (cur_offset % chunk_size != 0) { + tmp_offset = cur_offset; + if (do_div(tmp_offset, chunk_size) != 0) { partial_chunk = true; chunk_offset = cur_offset & ~(chunk_size - 1); } else { diff --git a/lustre/obdclass/llog_test.c b/lustre/obdclass/llog_test.c index 259c83b..c39929c 100644 --- a/lustre/obdclass/llog_test.c +++ b/lustre/obdclass/llog_test.c @@ -255,7 +255,8 @@ static int test3_check_n_add_cb(const struct lu_env *env, if (lgh->lgh_cur_offset != rec_offset) { /* there can be padding record */ - if ((lgh->lgh_cur_offset % chunk_size == 0) && + __u64 tmp = lgh->lgh_cur_offset; + if ((do_div(tmp, chunk_size) == 0) && (lgh->lgh_cur_offset - rec_offset < rec->lrh_len + LLOG_MIN_REC_SIZE)) { rec_offset = lgh->lgh_cur_offset; diff --git a/lustre/ptlrpc/nrs_tbf.c b/lustre/ptlrpc/nrs_tbf.c index 7fb0f4e..8647b03 100644 --- a/lustre/ptlrpc/nrs_tbf.c +++ b/lustre/ptlrpc/nrs_tbf.c @@ -289,7 +289,8 @@ nrs_tbf_rule_start(struct ptlrpc_nrs_policy *policy, memcpy(rule->tr_name, start->tc_name, strlen(start->tc_name)); rule->tr_rpc_rate = start->tc_rpc_rate; - rule->tr_nsecs = NSEC_PER_SEC / rule->tr_rpc_rate; + rule->tr_nsecs = NSEC_PER_SEC; + do_div(rule->tr_nsecs, rule->tr_rpc_rate); rule->tr_depth = tbf_depth; atomic_set(&rule->tr_ref, 1); INIT_LIST_HEAD(&rule->tr_cli_list); @@ -337,7 +338,8 @@ nrs_tbf_rule_change(struct ptlrpc_nrs_policy *policy, return -ENOENT; rule->tr_rpc_rate = change->tc_rpc_rate; - rule->tr_nsecs = NSEC_PER_SEC / rule->tr_rpc_rate; + rule->tr_nsecs = NSEC_PER_SEC; + do_div(rule->tr_nsecs, rule->tr_rpc_rate); rule->tr_generation++; nrs_tbf_rule_put(rule); @@ -1388,7 +1390,7 @@ struct ptlrpc_nrs_request *nrs_tbf_req_get(struct ptlrpc_nrs_policy *policy, nr_u.tbf.tr_list); } else { __u64 now = ktime_to_ns(ktime_get()); - __u64 passed; + __u64 passed, tmp; long ntoken; __u64 deadline; @@ -1396,7 +1398,9 @@ struct ptlrpc_nrs_request *nrs_tbf_req_get(struct ptlrpc_nrs_policy *policy, cli->tc_nsecs; LASSERT(now >= cli->tc_check_time); passed = now - cli->tc_check_time; - ntoken = (passed * cli->tc_rpc_rate) / NSEC_PER_SEC; + tmp = passed * cli->tc_rpc_rate; + do_div(tmp, NSEC_PER_SEC); + ntoken = tmp; ntoken += cli->tc_ntoken; if (ntoken > cli->tc_depth) ntoken = cli->tc_depth;