diff options
-rw-r--r-- | arch/x86/crypto/aesni-intel_glue.c | 37 | ||||
-rw-r--r-- | crypto/ctr.c | 173 | ||||
-rw-r--r-- | crypto/tcrypt.c | 4 | ||||
-rw-r--r-- | crypto/tcrypt.h | 1 | ||||
-rw-r--r-- | include/net/xfrm.h | 4 | ||||
-rw-r--r-- | include/uapi/linux/snmp.h | 1 | ||||
-rw-r--r-- | net/xfrm/xfrm_algo.c | 3 | ||||
-rw-r--r-- | net/xfrm/xfrm_output.c | 6 | ||||
-rw-r--r-- | net/xfrm/xfrm_proc.c | 1 | ||||
-rw-r--r-- | net/xfrm/xfrm_state.c | 131 |
10 files changed, 185 insertions, 176 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 1b9c22b..a0795da 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -40,10 +40,6 @@ #include <linux/workqueue.h> #include <linux/spinlock.h> -#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE) -#define HAS_CTR -#endif - #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE) #define HAS_PCBC #endif @@ -395,12 +391,6 @@ static int ablk_ctr_init(struct crypto_tfm *tfm) return ablk_init_common(tfm, "__driver-ctr-aes-aesni"); } -#ifdef HAS_CTR -static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm) -{ - return ablk_init_common(tfm, "rfc3686(__driver-ctr-aes-aesni)"); -} -#endif #endif #ifdef HAS_PCBC @@ -1158,33 +1148,6 @@ static struct crypto_alg aesni_algs[] = { { .maxauthsize = 16, }, }, -#ifdef HAS_CTR -}, { - .cra_name = "rfc3686(ctr(aes))", - .cra_driver_name = "rfc3686-ctr-aes-aesni", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct async_helper_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = ablk_rfc3686_ctr_init, - .cra_exit = ablk_exit, - .cra_u = { - .ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE + - CTR_RFC3686_NONCE_SIZE, - .max_keysize = AES_MAX_KEY_SIZE + - CTR_RFC3686_NONCE_SIZE, - .ivsize = CTR_RFC3686_IV_SIZE, - .setkey = ablk_set_key, - .encrypt = ablk_encrypt, - .decrypt = ablk_decrypt, - .geniv = "seqiv", - }, - }, -#endif #endif #ifdef HAS_PCBC }, { diff --git a/crypto/ctr.c b/crypto/ctr.c index 4ca7222..1f2997c 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -12,6 +12,7 @@ #include <crypto/algapi.h> #include <crypto/ctr.h> +#include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -25,10 +26,15 @@ struct crypto_ctr_ctx { }; struct crypto_rfc3686_ctx { - struct crypto_blkcipher *child; + struct crypto_ablkcipher *child; u8 nonce[CTR_RFC3686_NONCE_SIZE]; }; +struct crypto_rfc3686_req_ctx { + u8 iv[CTR_RFC3686_BLOCK_SIZE]; + struct ablkcipher_request subreq CRYPTO_MINALIGN_ATTR; +}; + static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, unsigned int keylen) { @@ -243,11 +249,11 @@ static struct crypto_template crypto_ctr_tmpl = { .module = THIS_MODULE, }; -static int crypto_rfc3686_setkey(struct crypto_tfm *parent, const u8 *key, - unsigned int keylen) +static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent, + const u8 *key, unsigned int keylen) { - struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(parent); - struct crypto_blkcipher *child = ctx->child; + struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(parent); + struct crypto_ablkcipher *child = ctx->child; int err; /* the nonce is stored in bytes at end of key */ @@ -259,59 +265,64 @@ static int crypto_rfc3686_setkey(struct crypto_tfm *parent, const u8 *key, keylen -= CTR_RFC3686_NONCE_SIZE; - crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_blkcipher_setkey(child, key, keylen); - crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); + crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_ablkcipher_setkey(child, key, keylen); + crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); return err; } -static int crypto_rfc3686_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int crypto_rfc3686_crypt(struct ablkcipher_request *req) { - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_rfc3686_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_blkcipher *child = ctx->child; - unsigned long alignmask = crypto_blkcipher_alignmask(tfm); - u8 ivblk[CTR_RFC3686_BLOCK_SIZE + alignmask]; - u8 *iv = PTR_ALIGN(ivblk + 0, alignmask + 1); - u8 *info = desc->info; - int err; + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct crypto_ablkcipher *child = ctx->child; + unsigned long align = crypto_ablkcipher_alignmask(tfm); + struct crypto_rfc3686_req_ctx *rctx = + (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1); + struct ablkcipher_request *subreq = &rctx->subreq; + u8 *iv = rctx->iv; /* set up counter block */ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); - memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); + memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE); /* initialize counter portion of counter block */ *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); - desc->tfm = child; - desc->info = iv; - err = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); - desc->tfm = tfm; - desc->info = info; + ablkcipher_request_set_tfm(subreq, child); + ablkcipher_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, + iv); - return err; + return crypto_ablkcipher_encrypt(subreq); } static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_blkcipher *cipher; + struct crypto_ablkcipher *cipher; + unsigned long align; - cipher = crypto_spawn_blkcipher(spawn); + cipher = crypto_spawn_skcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; + align = crypto_tfm_alg_alignmask(tfm); + align &= ~(crypto_tfm_ctx_alignment() - 1); + tfm->crt_ablkcipher.reqsize = align + + sizeof(struct crypto_rfc3686_req_ctx) + + crypto_ablkcipher_reqsize(cipher); + return 0; } @@ -319,74 +330,110 @@ static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm) { struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); - crypto_free_blkcipher(ctx->child); + crypto_free_ablkcipher(ctx->child); } static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) { + struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_alg *alg; + struct crypto_skcipher_spawn *spawn; + const char *cipher_name; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); - if (err) + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) return ERR_PTR(err); - alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER, - CRYPTO_ALG_TYPE_MASK); - err = PTR_ERR(alg); - if (IS_ERR(alg)) + if ((algt->type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & algt->mask) + return ERR_PTR(-EINVAL); + + cipher_name = crypto_attr_alg_name(tb[1]); + err = PTR_ERR(cipher_name); + if (IS_ERR(cipher_name)) return ERR_PTR(err); + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + spawn = crypto_instance_ctx(inst); + + crypto_set_skcipher_spawn(spawn, inst); + err = crypto_grab_skcipher(spawn, cipher_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); + if (err) + goto err_free_inst; + + alg = crypto_skcipher_spawn_alg(spawn); + /* We only support 16-byte blocks. */ err = -EINVAL; - if (alg->cra_blkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE) - goto out_put_alg; + if (alg->cra_ablkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE) + goto err_drop_spawn; /* Not a stream cipher? */ if (alg->cra_blocksize != 1) - goto out_put_alg; + goto err_drop_spawn; - inst = crypto_alloc_instance("rfc3686", alg); - if (IS_ERR(inst)) - goto out; + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", + alg->cra_name) >= CRYPTO_MAX_ALG_NAME) + goto err_drop_spawn; + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "rfc3686(%s)", alg->cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_drop_spawn; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = 1; inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_blkcipher_type; - inst->alg.cra_blkcipher.ivsize = CTR_RFC3686_IV_SIZE; - inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize - + CTR_RFC3686_NONCE_SIZE; - inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize - + CTR_RFC3686_NONCE_SIZE; + inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + (alg->cra_flags & CRYPTO_ALG_ASYNC); + inst->alg.cra_type = &crypto_ablkcipher_type; + + inst->alg.cra_ablkcipher.ivsize = CTR_RFC3686_IV_SIZE; + inst->alg.cra_ablkcipher.min_keysize = + alg->cra_ablkcipher.min_keysize + CTR_RFC3686_NONCE_SIZE; + inst->alg.cra_ablkcipher.max_keysize = + alg->cra_ablkcipher.max_keysize + CTR_RFC3686_NONCE_SIZE; - inst->alg.cra_blkcipher.geniv = "seqiv"; + inst->alg.cra_ablkcipher.geniv = "seqiv"; + + inst->alg.cra_ablkcipher.setkey = crypto_rfc3686_setkey; + inst->alg.cra_ablkcipher.encrypt = crypto_rfc3686_crypt; + inst->alg.cra_ablkcipher.decrypt = crypto_rfc3686_crypt; inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); inst->alg.cra_init = crypto_rfc3686_init_tfm; inst->alg.cra_exit = crypto_rfc3686_exit_tfm; - inst->alg.cra_blkcipher.setkey = crypto_rfc3686_setkey; - inst->alg.cra_blkcipher.encrypt = crypto_rfc3686_crypt; - inst->alg.cra_blkcipher.decrypt = crypto_rfc3686_crypt; - -out: - crypto_mod_put(alg); return inst; -out_put_alg: - inst = ERR_PTR(err); - goto out; +err_drop_spawn: + crypto_drop_skcipher(spawn); +err_free_inst: + kfree(inst); + return ERR_PTR(err); +} + +static void crypto_rfc3686_free(struct crypto_instance *inst) +{ + struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); + + crypto_drop_skcipher(spawn); + kfree(inst); } static struct crypto_template crypto_rfc3686_tmpl = { .name = "rfc3686", .alloc = crypto_rfc3686_alloc, - .free = crypto_ctr_free, + .free = crypto_rfc3686_free, .module = THIS_MODULE, }; diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 7ae2130..87ef7d6 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1591,6 +1591,10 @@ static int do_test(int m) speed_template_16_24_32); test_acipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0, speed_template_16_24_32); + test_acipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, 0, + speed_template_20_28_36); + test_acipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, 0, + speed_template_20_28_36); break; case 501: diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index cd20685..ecdeeb1 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -51,6 +51,7 @@ static u8 speed_template_8_16[] = {8, 16, 0}; static u8 speed_template_8_32[] = {8, 32, 0}; static u8 speed_template_16_32[] = {16, 32, 0}; static u8 speed_template_16_24_32[] = {16, 24, 32, 0}; +static u8 speed_template_20_28_36[] = {20, 28, 36, 0}; static u8 speed_template_32_40_48[] = {32, 40, 48, 0}; static u8 speed_template_32_48[] = {32, 48, 0}; static u8 speed_template_32_48_64[] = {32, 48, 64, 0}; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 63445ed..421f764 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -557,10 +557,6 @@ struct xfrm_migrate { }; #define XFRM_KM_TIMEOUT 30 -/* which seqno */ -#define XFRM_REPLAY_SEQ 1 -#define XFRM_REPLAY_OSEQ 2 -#define XFRM_REPLAY_SEQ_MASK 3 /* what happened */ #define XFRM_REPLAY_UPDATE XFRM_AE_CR #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index fdfba23..b49eab8 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -278,6 +278,7 @@ enum LINUX_MIB_XFRMOUTPOLDEAD, /* XfrmOutPolDead */ LINUX_MIB_XFRMOUTPOLERROR, /* XfrmOutPolError */ LINUX_MIB_XFRMFWDHDRERROR, /* XfrmFwdHdrError*/ + LINUX_MIB_XFRMOUTSTATEINVALID, /* XfrmOutStateInvalid */ __LINUX_MIB_XFRMMAX }; diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 4ce2d93..f9a5495 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -700,8 +700,7 @@ void xfrm_probe_algs(void) } for (i = 0; i < ealg_entries(); i++) { - status = crypto_has_blkcipher(ealg_list[i].name, 0, - CRYPTO_ALG_ASYNC); + status = crypto_has_ablkcipher(ealg_list[i].name, 0, 0); if (ealg_list[i].available != status) ealg_list[i].available = status; } diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 95a338c..3670526 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -61,6 +61,12 @@ static int xfrm_output_one(struct sk_buff *skb, int err) } spin_lock_bh(&x->lock); + + if (unlikely(x->km.state != XFRM_STATE_VALID)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID); + goto error_nolock; + } + err = xfrm_state_check_expire(x); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED); diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c index d0a1af8..6039038 100644 --- a/net/xfrm/xfrm_proc.c +++ b/net/xfrm/xfrm_proc.c @@ -43,6 +43,7 @@ static const struct snmp_mib xfrm_mib_list[] = { SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD), SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR), SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR), + SNMP_MIB_ITEM("XfrmOutStateInvalid", LINUX_MIB_XFRMOUTSTATEINVALID), SNMP_MIB_SENTINEL }; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 3459692..0adae91 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -158,8 +158,8 @@ out_unlock: mutex_unlock(&hash_resize_mutex); } -static DEFINE_RWLOCK(xfrm_state_afinfo_lock); -static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO]; +static DEFINE_SPINLOCK(xfrm_state_afinfo_lock); +static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO]; static DEFINE_SPINLOCK(xfrm_state_gc_lock); @@ -168,58 +168,45 @@ int __xfrm_state_delete(struct xfrm_state *x); int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); void km_state_expired(struct xfrm_state *x, int hard, u32 portid); -static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family) -{ - struct xfrm_state_afinfo *afinfo; - if (unlikely(family >= NPROTO)) - return NULL; - write_lock_bh(&xfrm_state_afinfo_lock); - afinfo = xfrm_state_afinfo[family]; - if (unlikely(!afinfo)) - write_unlock_bh(&xfrm_state_afinfo_lock); - return afinfo; -} - -static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo) - __releases(xfrm_state_afinfo_lock) -{ - write_unlock_bh(&xfrm_state_afinfo_lock); -} - +static DEFINE_SPINLOCK(xfrm_type_lock); int xfrm_register_type(const struct xfrm_type *type, unsigned short family) { - struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family); + struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); const struct xfrm_type **typemap; int err = 0; if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; typemap = afinfo->type_map; + spin_lock_bh(&xfrm_type_lock); if (likely(typemap[type->proto] == NULL)) typemap[type->proto] = type; else err = -EEXIST; - xfrm_state_unlock_afinfo(afinfo); + spin_unlock_bh(&xfrm_type_lock); + xfrm_state_put_afinfo(afinfo); return err; } EXPORT_SYMBOL(xfrm_register_type); int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family) { - struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family); + struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); const struct xfrm_type **typemap; int err = 0; if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; typemap = afinfo->type_map; + spin_lock_bh(&xfrm_type_lock); if (unlikely(typemap[type->proto] != type)) err = -ENOENT; else typemap[type->proto] = NULL; - xfrm_state_unlock_afinfo(afinfo); + spin_unlock_bh(&xfrm_type_lock); + xfrm_state_put_afinfo(afinfo); return err; } EXPORT_SYMBOL(xfrm_unregister_type); @@ -256,6 +243,7 @@ static void xfrm_put_type(const struct xfrm_type *type) module_put(type->owner); } +static DEFINE_SPINLOCK(xfrm_mode_lock); int xfrm_register_mode(struct xfrm_mode *mode, int family) { struct xfrm_state_afinfo *afinfo; @@ -265,12 +253,13 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family) if (unlikely(mode->encap >= XFRM_MODE_MAX)) return -EINVAL; - afinfo = xfrm_state_lock_afinfo(family); + afinfo = xfrm_state_get_afinfo(family); if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; err = -EEXIST; modemap = afinfo->mode_map; + spin_lock_bh(&xfrm_mode_lock); if (modemap[mode->encap]) goto out; @@ -283,7 +272,8 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family) err = 0; out: - xfrm_state_unlock_afinfo(afinfo); + spin_unlock_bh(&xfrm_mode_lock); + xfrm_state_put_afinfo(afinfo); return err; } EXPORT_SYMBOL(xfrm_register_mode); @@ -297,19 +287,21 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family) if (unlikely(mode->encap >= XFRM_MODE_MAX)) return -EINVAL; - afinfo = xfrm_state_lock_afinfo(family); + afinfo = xfrm_state_get_afinfo(family); if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; err = -ENOENT; modemap = afinfo->mode_map; + spin_lock_bh(&xfrm_mode_lock); if (likely(modemap[mode->encap] == mode)) { modemap[mode->encap] = NULL; module_put(mode->afinfo->owner); err = 0; } - xfrm_state_unlock_afinfo(afinfo); + spin_unlock_bh(&xfrm_mode_lock); + xfrm_state_put_afinfo(afinfo); return err; } EXPORT_SYMBOL(xfrm_unregister_mode); @@ -1370,9 +1362,6 @@ int xfrm_state_check_expire(struct xfrm_state *x) if (!x->curlft.use_time) x->curlft.use_time = get_seconds(); - if (x->km.state != XFRM_STATE_VALID) - return -EINVAL; - if (x->curlft.bytes >= x->lft.hard_byte_limit || x->curlft.packets >= x->lft.hard_packet_limit) { x->km.state = XFRM_STATE_EXPIRED; @@ -1648,27 +1637,26 @@ static void xfrm_replay_timer_handler(unsigned long data) } static LIST_HEAD(xfrm_km_list); -static DEFINE_RWLOCK(xfrm_km_lock); void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) { struct xfrm_mgr *km; - read_lock(&xfrm_km_lock); - list_for_each_entry(km, &xfrm_km_list, list) + rcu_read_lock(); + list_for_each_entry_rcu(km, &xfrm_km_list, list) if (km->notify_policy) km->notify_policy(xp, dir, c); - read_unlock(&xfrm_km_lock); + rcu_read_unlock(); } void km_state_notify(struct xfrm_state *x, const struct km_event *c) { struct xfrm_mgr *km; - read_lock(&xfrm_km_lock); - list_for_each_entry(km, &xfrm_km_list, list) + rcu_read_lock(); + list_for_each_entry_rcu(km, &xfrm_km_list, list) if (km->notify) km->notify(x, c); - read_unlock(&xfrm_km_lock); + rcu_read_unlock(); } EXPORT_SYMBOL(km_policy_notify); @@ -1698,13 +1686,13 @@ int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol) int err = -EINVAL, acqret; struct xfrm_mgr *km; - read_lock(&xfrm_km_lock); - list_for_each_entry(km, &xfrm_km_list, list) { + rcu_read_lock(); + list_for_each_entry_rcu(km, &xfrm_km_list, list) { acqret = km->acquire(x, t, pol); if (!acqret) err = acqret; } - read_unlock(&xfrm_km_lock); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(km_query); @@ -1714,14 +1702,14 @@ int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) int err = -EINVAL; struct xfrm_mgr *km; - read_lock(&xfrm_km_lock); - list_for_each_entry(km, &xfrm_km_list, list) { + rcu_read_lock(); + list_for_each_entry_rcu(km, &xfrm_km_list, list) { if (km->new_mapping) err = km->new_mapping(x, ipaddr, sport); if (!err) break; } - read_unlock(&xfrm_km_lock); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(km_new_mapping); @@ -1750,15 +1738,15 @@ int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, int ret; struct xfrm_mgr *km; - read_lock(&xfrm_km_lock); - list_for_each_entry(km, &xfrm_km_list, list) { + rcu_read_lock(); + list_for_each_entry_rcu(km, &xfrm_km_list, list) { if (km->migrate) { ret = km->migrate(sel, dir, type, m, num_migrate, k); if (!ret) err = ret; } } - read_unlock(&xfrm_km_lock); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(km_migrate); @@ -1770,15 +1758,15 @@ int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address int ret; struct xfrm_mgr *km; - read_lock(&xfrm_km_lock); - list_for_each_entry(km, &xfrm_km_list, list) { + rcu_read_lock(); + list_for_each_entry_rcu(km, &xfrm_km_list, list) { if (km->report) { ret = km->report(net, proto, sel, addr); if (!ret) err = ret; } } - read_unlock(&xfrm_km_lock); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(km_report); @@ -1802,14 +1790,14 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen goto out; err = -EINVAL; - read_lock(&xfrm_km_lock); - list_for_each_entry(km, &xfrm_km_list, list) { + rcu_read_lock(); + list_for_each_entry_rcu(km, &xfrm_km_list, list) { pol = km->compile_policy(sk, optname, data, optlen, &err); if (err >= 0) break; } - read_unlock(&xfrm_km_lock); + rcu_read_unlock(); if (err >= 0) { xfrm_sk_policy_insert(sk, err, pol); @@ -1823,20 +1811,23 @@ out: } EXPORT_SYMBOL(xfrm_user_policy); +static DEFINE_SPINLOCK(xfrm_km_lock); + int xfrm_register_km(struct xfrm_mgr *km) { - write_lock_bh(&xfrm_km_lock); - list_add_tail(&km->list, &xfrm_km_list); - write_unlock_bh(&xfrm_km_lock); + spin_lock_bh(&xfrm_km_lock); + list_add_tail_rcu(&km->list, &xfrm_km_list); + spin_unlock_bh(&xfrm_km_lock); return 0; } EXPORT_SYMBOL(xfrm_register_km); int xfrm_unregister_km(struct xfrm_mgr *km) { - write_lock_bh(&xfrm_km_lock); - list_del(&km->list); - write_unlock_bh(&xfrm_km_lock); + spin_lock_bh(&xfrm_km_lock); + list_del_rcu(&km->list); + spin_unlock_bh(&xfrm_km_lock); + synchronize_rcu(); return 0; } EXPORT_SYMBOL(xfrm_unregister_km); @@ -1848,12 +1839,12 @@ int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) return -EINVAL; if (unlikely(afinfo->family >= NPROTO)) return -EAFNOSUPPORT; - write_lock_bh(&xfrm_state_afinfo_lock); + spin_lock_bh(&xfrm_state_afinfo_lock); if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) err = -ENOBUFS; else - xfrm_state_afinfo[afinfo->family] = afinfo; - write_unlock_bh(&xfrm_state_afinfo_lock); + rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo); + spin_unlock_bh(&xfrm_state_afinfo_lock); return err; } EXPORT_SYMBOL(xfrm_state_register_afinfo); @@ -1865,14 +1856,15 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) return -EINVAL; if (unlikely(afinfo->family >= NPROTO)) return -EAFNOSUPPORT; - write_lock_bh(&xfrm_state_afinfo_lock); + spin_lock_bh(&xfrm_state_afinfo_lock); if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo)) err = -EINVAL; else - xfrm_state_afinfo[afinfo->family] = NULL; + RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL); } - write_unlock_bh(&xfrm_state_afinfo_lock); + spin_unlock_bh(&xfrm_state_afinfo_lock); + synchronize_rcu(); return err; } EXPORT_SYMBOL(xfrm_state_unregister_afinfo); @@ -1882,17 +1874,16 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) struct xfrm_state_afinfo *afinfo; if (unlikely(family >= NPROTO)) return NULL; - read_lock(&xfrm_state_afinfo_lock); - afinfo = xfrm_state_afinfo[family]; + rcu_read_lock(); + afinfo = rcu_dereference(xfrm_state_afinfo[family]); if (unlikely(!afinfo)) - read_unlock(&xfrm_state_afinfo_lock); + rcu_read_unlock(); return afinfo; } static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) - __releases(xfrm_state_afinfo_lock) { - read_unlock(&xfrm_state_afinfo_lock); + rcu_read_unlock(); } /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ |