global: replace rwlock with mtx if never rlocked

There were multiple places where a rwlock was used despite never
rlocking, so just change these into mtxs. This was done with the aid of
Coccinelle's spatch, using this input:

    #spatch -j 4 --recursive-includes --include-headers-for-types --include-headers --in-place --macro-file <seebelow.h>

    virtual after_start

    @initialize:ocaml@
    @@

    let has_write_table = Hashtbl.create 101
    let has_read_table = Hashtbl.create 101

    let ok i m =
      let entry = (i,m) in
      Hashtbl.mem has_write_table entry && not(Hashtbl.mem has_read_table entry)

    @hasw depends on !after_start@
    identifier i,m;
    struct i x;
    @@

    (
    rw_wlock(&x.m)
    |
    rw_wunlock(&x.m)
    )

    @script:ocaml@
    i << hasw.i;
    m << hasw.m;
    @@
    Hashtbl.replace has_write_table (i,m) ()

    @hasr depends on !after_start@
    identifier i,m;
    struct i x;
    @@

    (
    rw_rlock(&x.m)
    |
    rw_runlock(&x.m)
    )

    @script:ocaml@
    i << hasr.i;
    m << hasr.m;
    @@
    Hashtbl.replace has_read_table (i,m) ()

    @finalize:ocaml depends on !after_start@
    wt << merge.has_write_table;
    rt << merge.has_read_table;
    @@

    let redo ts dst =
      List.iter (Hashtbl.iter (fun k _ -> Hashtbl.add dst k ())) ts in
    redo wt has_write_table;
    redo rt has_read_table;

    let it = new iteration() in
    it#add_virtual_rule After_start;
    it#register()

    (* ----------------------------------------------------------- *)

    @depends on after_start@
    identifier i;
    identifier m : script:ocaml(i) { ok i m };
    @@

    struct i {
      ...
    - struct rwlock m;
    + struct mtx m;
      ...
    }

    @depends on after_start disable fld_to_ptr@
    identifier m;
    identifier i : script:ocaml(m) { ok i m };
    struct i x;
    @@

    - rw_wlock
    + mtx_lock
       (&x.m)

    @depends on after_start disable fld_to_ptr@
    identifier m;
    identifier i : script:ocaml(m) { ok i m };
    struct i x;
    @@

    - rw_wunlock
    + mtx_unlock
       (&x.m)

    @depends on after_start disable fld_to_ptr@
    identifier m;
    expression e;
    identifier i : script:ocaml(m) { ok i m };
    struct i x;
    @@

    - rw_init(&x.m, e);
    + mtx_init(&x.m, e, NULL, MTX_DEF);

    @depends on after_start disable fld_to_ptr@
    identifier m;
    identifier i : script:ocaml(m) { ok i m };
    struct i x;
    @@

    - rw_destroy
    + mtx_destroy
       (&x.m)

    @depends on after_start disable fld_to_ptr, ptr_to_array@
    identifier m;
    identifier i : script:ocaml(m) { ok i m };
    struct i *x;
    @@

    - rw_wlock
    + mtx_lock
       (&x->m)

    @depends on after_start disable fld_to_ptr, ptr_to_array@
    identifier m;
    identifier i : script:ocaml(m) { ok i m };
    struct i *x;
    @@

    - rw_wunlock
    + mtx_unlock
       (&x->m)

    @depends on after_start disable fld_to_ptr, ptr_to_array@
    identifier m;
    expression e;
    identifier i : script:ocaml(m) { ok i m };
    struct i *x;
    @@

    - rw_init(&x->m, e);
    + mtx_init(&x->m, e, NULL, MTX_DEF);

    @depends on after_start disable fld_to_ptr, ptr_to_array@
    identifier m;
    identifier i : script:ocaml(m) { ok i m };
    struct i *x;
    @@

    - rw_destroy
    + mtx_destroy
       (&x->m)

A few macros needed to be provided manually for the parser to work:

    #define LIST_HEAD(x,y) int
    #define TAILQ_HEAD(x,y) int
    #define STAILQ_HEAD(x,y) int
    #define CK_LIST_HEAD(x,y) int
    #define CK_LIST_ENTRY(x) int
    #define LIST_ENTRY(x) int
    #define TAILQ_ENTRY(x) int
    #define STAILQ_ENTRY(x) int

Co-authored-by: Julia Lawall <julia.lawall@inria.fr>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
This commit is contained in:
Jason A. Donenfeld 2021-06-05 23:15:14 +02:00
parent 5158e2c0f4
commit 0955fa72f5
4 changed files with 44 additions and 45 deletions

View File

@ -10,7 +10,6 @@
- Review all included headers, and minimize a bit.
- Figure out clear locking rules for network stack stuff -- when different
functions run under what locks and what they race with.
- There are a few rwlocks that only ever wlock. Make these mtx instead.
### Crypto TODO

View File

@ -47,7 +47,7 @@ struct ratelimit_entry {
struct ratelimit {
uint8_t rl_secret[SIPHASH_KEY_LENGTH];
struct rwlock rl_lock;
struct mtx rl_mtx;
struct callout rl_gc;
LIST_HEAD(, ratelimit_entry) rl_table[RATELIMIT_SIZE];
size_t rl_table_num;
@ -107,14 +107,14 @@ cookie_checker_init(struct cookie_checker *cc)
bzero(cc, sizeof(*cc));
rw_init(&cc->cc_key_lock, "cookie_checker_key");
rw_init(&cc->cc_secret_lock, "cookie_checker_secret");
mtx_init(&cc->cc_secret_mtx, "cookie_checker_secret", NULL, MTX_DEF);
}
void
cookie_checker_free(struct cookie_checker *cc)
{
rw_destroy(&cc->cc_key_lock);
rw_destroy(&cc->cc_secret_lock);
mtx_destroy(&cc->cc_secret_mtx);
explicit_bzero(cc, sizeof(*cc));
}
@ -307,7 +307,7 @@ make_cookie(struct cookie_checker *cc, uint8_t cookie[COOKIE_COOKIE_SIZE],
{
struct blake2s_state state;
rw_wlock(&cc->cc_secret_lock);
mtx_lock(&cc->cc_secret_mtx);
if (timer_expired(cc->cc_secret_birthdate,
COOKIE_SECRET_MAX_AGE, 0)) {
arc4random_buf(cc->cc_secret, COOKIE_SECRET_SIZE);
@ -315,7 +315,7 @@ make_cookie(struct cookie_checker *cc, uint8_t cookie[COOKIE_COOKIE_SIZE],
}
blake2s_init_key(&state, COOKIE_COOKIE_SIZE, cc->cc_secret,
COOKIE_SECRET_SIZE);
rw_wunlock(&cc->cc_secret_lock);
mtx_unlock(&cc->cc_secret_mtx);
if (sa->sa_family == AF_INET) {
blake2s_update(&state, (uint8_t *)&satosin(sa)->sin_addr,
@ -340,8 +340,8 @@ static void
ratelimit_init(struct ratelimit *rl)
{
size_t i;
rw_init(&rl->rl_lock, "ratelimit_lock");
callout_init_rw(&rl->rl_gc, &rl->rl_lock, 0);
mtx_init(&rl->rl_mtx, "ratelimit_lock", NULL, MTX_DEF);
callout_init_rw(&rl->rl_gc, &rl->rl_mtx, 0);
arc4random_buf(rl->rl_secret, sizeof(rl->rl_secret));
for (i = 0; i < RATELIMIT_SIZE; i++)
LIST_INIT(&rl->rl_table[i]);
@ -351,17 +351,17 @@ ratelimit_init(struct ratelimit *rl)
static void
ratelimit_deinit(struct ratelimit *rl)
{
rw_wlock(&rl->rl_lock);
mtx_lock(&rl->rl_mtx);
callout_stop(&rl->rl_gc);
ratelimit_gc(rl, true);
rw_wunlock(&rl->rl_lock);
rw_destroy(&rl->rl_lock);
mtx_unlock(&rl->rl_mtx);
mtx_destroy(&rl->rl_mtx);
}
static void
ratelimit_gc_callout(void *_rl)
{
/* callout will wlock rl_lock for us */
/* callout will lock rl_mtx for us */
ratelimit_gc(_rl, false);
}
@ -386,7 +386,7 @@ ratelimit_gc(struct ratelimit *rl, bool force)
struct ratelimit_entry *r, *tr;
sbintime_t expiry;
rw_assert(&rl->rl_lock, RA_WLOCKED);
mtx_assert(&rl->rl_mtx, MA_OWNED);
if (rl->rl_table_num == 0)
return;
@ -428,7 +428,7 @@ ratelimit_allow(struct ratelimit *rl, struct sockaddr *sa, struct vnet *vnet)
return ret;
bucket = siphash13(rl->rl_secret, &key, len) & RATELIMIT_MASK;
rw_wlock(&rl->rl_lock);
mtx_lock(&rl->rl_mtx);
LIST_FOREACH(r, &rl->rl_table[bucket], r_entry) {
if (bcmp(&r->r_key, &key, len) != 0)
@ -481,7 +481,7 @@ ratelimit_allow(struct ratelimit *rl, struct sockaddr *sa, struct vnet *vnet)
ok:
ret = 0;
error:
rw_wunlock(&rl->rl_lock);
mtx_unlock(&rl->rl_mtx);
return ret;
}

View File

@ -45,7 +45,7 @@ struct cookie_checker {
uint8_t cc_mac1_key[COOKIE_KEY_SIZE];
uint8_t cc_cookie_key[COOKIE_KEY_SIZE];
struct rwlock cc_secret_lock;
struct mtx cc_secret_mtx;
sbintime_t cc_secret_birthdate; /* sbinuptime */
uint8_t cc_secret[COOKIE_SECRET_SIZE];
};

View File

@ -111,7 +111,7 @@ struct noise_remote {
struct noise_local *r_local;
void *r_arg;
struct rwlock r_keypair_lock;
struct mtx r_keypair_mtx;
struct noise_keypair *r_next, *r_current, *r_previous;
struct epoch_context r_smr;
@ -129,11 +129,11 @@ struct noise_local {
void *l_arg;
void (*l_cleanup)(struct noise_local *);
struct rwlock l_remote_lock;
struct mtx l_remote_mtx;
size_t l_remote_num;
CK_LIST_HEAD(,noise_remote) l_remote_hash[HT_REMOTE_SIZE];
struct rwlock l_index_lock;
struct mtx l_index_mtx;
CK_LIST_HEAD(,noise_index) l_index_hash[HT_INDEX_SIZE];
};
@ -195,12 +195,12 @@ noise_local_alloc(void *arg)
l->l_arg = arg;
l->l_cleanup = NULL;
rw_init(&l->l_remote_lock, "noise_remote");
mtx_init(&l->l_remote_mtx, "noise_remote", NULL, MTX_DEF);
l->l_remote_num = 0;
for (i = 0; i < HT_REMOTE_SIZE; i++)
CK_LIST_INIT(&l->l_remote_hash[i]);
rw_init(&l->l_index_lock, "noise_index");
mtx_init(&l->l_index_mtx, "noise_index", NULL, MTX_DEF);
for (i = 0; i < HT_INDEX_SIZE; i++)
CK_LIST_INIT(&l->l_index_hash[i]);
@ -221,8 +221,8 @@ noise_local_put(struct noise_local *l)
if (l->l_cleanup != NULL)
l->l_cleanup(l);
rw_destroy(&l->l_identity_lock);
rw_destroy(&l->l_remote_lock);
rw_destroy(&l->l_index_lock);
mtx_destroy(&l->l_remote_mtx);
mtx_destroy(&l->l_index_mtx);
explicit_bzero(l, sizeof(*l));
free(l, M_NOISE);
}
@ -311,7 +311,7 @@ noise_remote_alloc(struct noise_local *l, void *arg,
r->r_local = noise_local_ref(l);
r->r_arg = arg;
rw_init(&r->r_keypair_lock, "noise_keypair");
mtx_init(&r->r_keypair_mtx, "noise_keypair", NULL, MTX_DEF);
return (r);
}
@ -326,7 +326,7 @@ noise_remote_enable(struct noise_remote *r)
/* Insert to hashtable */
idx = siphash24(l->l_hash_key, r->r_public, NOISE_PUBLIC_KEY_LEN) & HT_REMOTE_MASK;
rw_wlock(&l->l_remote_lock);
mtx_lock(&l->l_remote_mtx);
if (!r->r_entry_inserted) {
if (l->l_remote_num < MAX_REMOTE_PER_LOCAL) {
r->r_entry_inserted = true;
@ -336,7 +336,7 @@ noise_remote_enable(struct noise_remote *r)
ret = ENOSPC;
}
}
rw_wunlock(&l->l_remote_lock);
mtx_unlock(&l->l_remote_mtx);
return ret;
}
@ -346,13 +346,13 @@ noise_remote_disable(struct noise_remote *r)
{
struct noise_local *l = r->r_local;
/* remove from hashtable */
rw_wlock(&l->l_remote_lock);
mtx_lock(&l->l_remote_mtx);
if (r->r_entry_inserted) {
r->r_entry_inserted = false;
CK_LIST_REMOVE(r, r_entry);
l->l_remote_num--;
};
rw_wunlock(&l->l_remote_lock);
mtx_unlock(&l->l_remote_mtx);
}
struct noise_remote *
@ -394,15 +394,15 @@ assign_id:
goto assign_id;
}
rw_wlock(&l->l_index_lock);
mtx_lock(&l->l_index_mtx);
CK_LIST_FOREACH(i, &l->l_index_hash[idx], i_entry) {
if (i->i_local_index == r_i->i_local_index) {
rw_wunlock(&l->l_index_lock);
mtx_unlock(&l->l_index_mtx);
goto assign_id;
}
}
CK_LIST_INSERT_HEAD(&l->l_index_hash[idx], r_i, i_entry);
rw_wunlock(&l->l_index_lock);
mtx_unlock(&l->l_index_mtx);
NET_EPOCH_EXIT(et);
}
@ -447,10 +447,10 @@ noise_remote_index_remove(struct noise_local *l, struct noise_remote *r)
{
rw_assert(&r->r_handshake_lock, RA_WLOCKED);
if (r->r_handshake_state != HANDSHAKE_DEAD) {
rw_wlock(&l->l_index_lock);
mtx_lock(&l->l_index_mtx);
r->r_handshake_state = HANDSHAKE_DEAD;
CK_LIST_REMOVE(&r->r_index, i_entry);
rw_wunlock(&l->l_index_lock);
mtx_unlock(&l->l_index_mtx);
return (1);
}
return (0);
@ -472,7 +472,7 @@ noise_remote_smr_free(struct epoch_context *smr)
r->r_cleanup(r);
noise_local_put(r->r_local);
rw_destroy(&r->r_handshake_lock);
rw_destroy(&r->r_keypair_lock);
mtx_destroy(&r->r_keypair_mtx);
explicit_bzero(r, sizeof(*r));
free(r, M_NOISE);
}
@ -564,7 +564,7 @@ noise_remote_keypairs_clear(struct noise_remote *r)
{
struct noise_keypair *kp;
rw_wlock(&r->r_keypair_lock);
mtx_lock(&r->r_keypair_mtx);
kp = ck_pr_load_ptr(&r->r_next);
ck_pr_store_ptr(&r->r_next, NULL);
noise_keypair_drop(kp);
@ -576,7 +576,7 @@ noise_remote_keypairs_clear(struct noise_remote *r)
kp = ck_pr_load_ptr(&r->r_previous);
ck_pr_store_ptr(&r->r_previous, NULL);
noise_keypair_drop(kp);
rw_wunlock(&r->r_keypair_lock);
mtx_unlock(&r->r_keypair_mtx);
}
static void
@ -606,7 +606,7 @@ noise_add_new_keypair(struct noise_local *l, struct noise_remote *r,
struct noise_index *r_i = &r->r_index;
/* Insert into the keypair table */
rw_wlock(&r->r_keypair_lock);
mtx_lock(&r->r_keypair_mtx);
next = ck_pr_load_ptr(&r->r_next);
current = ck_pr_load_ptr(&r->r_current);
previous = ck_pr_load_ptr(&r->r_previous);
@ -628,7 +628,7 @@ noise_add_new_keypair(struct noise_local *l, struct noise_remote *r,
noise_keypair_drop(previous);
}
rw_wunlock(&r->r_keypair_lock);
mtx_unlock(&r->r_keypair_mtx);
/* Insert into index table */
rw_assert(&r->r_handshake_lock, RA_WLOCKED);
@ -637,11 +637,11 @@ noise_add_new_keypair(struct noise_local *l, struct noise_remote *r,
kp->kp_index.i_local_index = r_i->i_local_index;
kp->kp_index.i_remote_index = r_i->i_remote_index;
rw_wlock(&l->l_index_lock);
mtx_lock(&l->l_index_mtx);
CK_LIST_INSERT_BEFORE(r_i, &kp->kp_index, i_entry);
r->r_handshake_state = HANDSHAKE_DEAD;
CK_LIST_REMOVE(r_i, i_entry);
rw_wunlock(&l->l_index_lock);
mtx_unlock(&l->l_index_mtx);
explicit_bzero(&r->r_handshake, sizeof(r->r_handshake));
}
@ -732,9 +732,9 @@ noise_keypair_received_with(struct noise_keypair *kp)
if (kp != ck_pr_load_ptr(&r->r_next))
return (0);
rw_wlock(&r->r_keypair_lock);
mtx_lock(&r->r_keypair_mtx);
if (kp != ck_pr_load_ptr(&r->r_next)) {
rw_wunlock(&r->r_keypair_lock);
mtx_unlock(&r->r_keypair_mtx);
return (0);
}
@ -743,7 +743,7 @@ noise_keypair_received_with(struct noise_keypair *kp)
noise_keypair_drop(old);
ck_pr_store_ptr(&r->r_current, kp);
ck_pr_store_ptr(&r->r_next, NULL);
rw_wunlock(&r->r_keypair_lock);
mtx_unlock(&r->r_keypair_mtx);
return (ECONNRESET);
}
@ -778,9 +778,9 @@ noise_keypair_drop(struct noise_keypair *kp)
r = kp->kp_remote;
l = r->r_local;
rw_wlock(&l->l_index_lock);
mtx_lock(&l->l_index_mtx);
CK_LIST_REMOVE(&kp->kp_index, i_entry);
rw_wunlock(&l->l_index_lock);
mtx_unlock(&l->l_index_mtx);
noise_keypair_put(kp);
}