diff --git a/lnet/lnet/config.c b/lnet/lnet/config.c index d8e332e..7c9896f 100644 --- a/lnet/lnet/config.c +++ b/lnet/lnet/config.c @@ -112,9 +112,8 @@ lnet_ni_free(struct lnet_ni *ni) pthread_mutex_destroy(&ni->ni_lock); # endif #endif - for (i = 0; i < LNET_MAX_INTERFACES; i++) { - if (ni->ni_interfaces[i] == NULL) - continue; + for (i = 0; i < LNET_MAX_INTERFACES && + ni->ni_interfaces[i] != NULL; i++) { LIBCFS_FREE(ni->ni_interfaces[i], strlen(ni->ni_interfaces[i]) + 1); } @@ -194,7 +193,7 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, cfs_list_t *nilist) } int -lnet_parse_networks(cfs_list_t *nilist, char *networks, int *ni_count) +lnet_parse_networks(struct list_head *nilist, char *networks, int *ni_count) { struct cfs_expr_list *el = NULL; int tokensize = strlen(networks) + 1; diff --git a/lnet/lnet/peer.c b/lnet/lnet/peer.c index 66110a0..3d3bc4e 100644 --- a/lnet/lnet/peer.c +++ b/lnet/lnet/peer.c @@ -104,27 +104,32 @@ lnet_peer_tables_destroy(void) } static void -lnet_peer_hash_cleanup_locked(lnet_ni_t *ni, struct list_head *peers, - struct lnet_peer_table *ptable) +lnet_peer_table_cleanup_locked(lnet_ni_t *ni, struct list_head *peers, + struct lnet_peer_table *ptable) { lnet_peer_t *lp; lnet_peer_t *tmp; - - list_for_each_entry_safe(lp, tmp, peers, lp_hashlist) { - lp = list_entry(peers->next, lnet_peer_t, lp_hashlist); - if (ni != NULL && ni != lp->lp_ni) - continue; - list_del_init(&lp->lp_hashlist); - /* Lose hash table's ref */ - ptable->pt_zombies++; - lnet_peer_decref_locked(lp); + int i; + + for (i = 0; i < LNET_PEER_HASH_SIZE; i++) { + list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i], + lp_hashlist) { + if (ni != NULL && ni != lp->lp_ni) + continue; + + list_del_init(&lp->lp_hashlist); + /* Lose hash table's ref */ + ptable->pt_zombies++; + lnet_peer_decref_locked(lp); + } } } static void -lnet_peer_deathrow_wait_locked(struct lnet_peer_table *ptable, int cpt_locked) +lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable, + int cpt_locked) { - int i; + int i; for (i = 3; ptable->pt_zombies != 0; i++) { lnet_net_unlock(cpt_locked); @@ -140,17 +145,24 @@ lnet_peer_deathrow_wait_locked(struct lnet_peer_table *ptable, int cpt_locked) } static void -lnet_peer_del_routes_locked(lnet_ni_t *ni, struct list_head *peers, - int cpt_locked) +lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable, + int cpt_locked) { lnet_peer_t *lp; lnet_peer_t *tmp; + lnet_nid_t lp_nid; + int i; + + for (i = 0; i < LNET_PEER_HASH_SIZE; i++) { + list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i], + lp_hashlist) { + if (ni != lp->lp_ni) + continue; - list_for_each_entry_safe(lp, tmp, peers, lp_hashlist) { - if (ni != lp->lp_ni) - continue; - if (lp->lp_rtr_refcount > 0) { - lnet_nid_t lp_nid = lp->lp_nid; + if (lp->lp_rtr_refcount == 0) + continue; + + lp_nid = lp->lp_nid; lnet_net_unlock(cpt_locked); lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lp_nid); @@ -163,58 +175,40 @@ void lnet_peer_tables_cleanup(lnet_ni_t *ni) { struct lnet_peer_table *ptable; + lnet_peer_t *lp; + LIST_HEAD (deathrow); int i; int j; - if (ni == NULL) { - LASSERT(the_lnet.ln_shutdown); - } else { - /* If just deleting the peers for a NI, get rid - * of any routes these peers are gateways for. */ - cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { - lnet_net_lock(i); - - for (j = 0; j < LNET_PEER_HASH_SIZE; j++) { - struct list_head *peers = &ptable->pt_hash[j]; - - lnet_peer_del_routes_locked(ni, peers, i); - } - - lnet_net_unlock(i); - } + LASSERT(the_lnet.ln_shutdown || ni != NULL); + /* If just deleting the peers for a NI, get rid of any routes these + * peers are gateways for. */ + cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { + lnet_net_lock(i); + lnet_peer_table_del_rtrs_locked(ni, ptable, i); + lnet_net_unlock(i); } /* Start the process of moving the applicable peers to * deathrow. */ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { lnet_net_lock(i); - - for (j = 0; j < LNET_PEER_HASH_SIZE; j++) { - struct list_head *peers = &ptable->pt_hash[j]; - - lnet_peer_hash_cleanup_locked(ni, peers, ptable); - } - + lnet_peer_table_cleanup_locked(ni, ptable); lnet_net_unlock(i); } /* Cleanup all entries on deathrow. */ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { - CFS_LIST_HEAD (deathrow); - lnet_peer_t *lp; - lnet_net_lock(i); - lnet_peer_deathrow_wait_locked(ptable, i); - cfs_list_splice_init(&ptable->pt_deathrow, &deathrow); - + lnet_peer_table_deathrow_wait_locked(ptable, i); + list_splice_init(&ptable->pt_deathrow, &deathrow); lnet_net_unlock(i); + } - while (!cfs_list_empty(&deathrow)) { - lp = cfs_list_entry(deathrow.next, - lnet_peer_t, lp_hashlist); - cfs_list_del(&lp->lp_hashlist); - LIBCFS_FREE(lp, sizeof(*lp)); - } + while (!list_empty(&deathrow)) { + lp = list_entry(deathrow.next, lnet_peer_t, lp_hashlist); + list_del(&lp->lp_hashlist); + LIBCFS_FREE(lp, sizeof(*lp)); } }