mirror of
https://github.com/strongswan/strongswan.git
synced 2025-10-03 00:00:24 -04:00
Merge branch 'ref-overflows'
Different users in the strongSwan code base use the refcount helpers to allocate incrementing unique values. So far the risk of overflows for these unsigned 32-bit values has been considered mostly theoretical, as it requires a longer uptime and a lot of activity to hit such an overflow. At least for the Netlink sequence numbers, this is not only theoretical, though, and an overflow has been hit on a productive setup. Unfortunately, the consequences are rather unpleasant, as the response with a zero sequence number can't be matched to the request. This results in the offending thread to block indefinitely while holding the Netlink mutex. So add a helper to allocate incrementing unique identifiers that checks for overflows and never returns 0. Use it for Netlink sequence numbers and some other potential users affected, namely those allocating IKE_SA/CHILD_SA unique identifiers, marks and interface identifiers. Closes strongswan/strongswan#2062
This commit is contained in:
commit
b940ce25e9
@ -514,7 +514,7 @@ METHOD(netlink_socket_t, netlink_send, status_t,
|
||||
uintptr_t seq;
|
||||
u_int try;
|
||||
|
||||
seq = ref_get(&this->seq);
|
||||
seq = ref_get_nonzero(&this->seq);
|
||||
|
||||
for (try = 0; try <= this->retries; ++try)
|
||||
{
|
||||
@ -694,7 +694,6 @@ netlink_socket_t *netlink_socket_create(int protocol, enum_name_t *names,
|
||||
.send_ack = _netlink_send_ack,
|
||||
.destroy = _destroy,
|
||||
},
|
||||
.seq = 200,
|
||||
.mutex = mutex_create(MUTEX_TYPE_RECURSIVE),
|
||||
.socket = socket(AF_NETLINK, SOCK_RAW, protocol),
|
||||
.entries = hashtable_create(hashtable_hash_ptr, hashtable_equals_ptr, 4),
|
||||
|
@ -2038,7 +2038,7 @@ child_sa_t *child_sa_create(host_t *me, host_t *other, child_cfg_t *config,
|
||||
child_sa_create_t *data)
|
||||
{
|
||||
private_child_sa_t *this;
|
||||
static refcount_t unique_id = 0, unique_mark = 0;
|
||||
static refcount_t unique_id = 0;
|
||||
|
||||
INIT(this,
|
||||
.public = {
|
||||
@ -2096,7 +2096,7 @@ child_sa_t *child_sa_create(host_t *me, host_t *other, child_cfg_t *config,
|
||||
.close_action = config->get_close_action(config),
|
||||
.dpd_action = config->get_dpd_action(config),
|
||||
.reqid = config->get_reqid(config),
|
||||
.unique_id = ref_get(&unique_id),
|
||||
.unique_id = ref_get_nonzero(&unique_id),
|
||||
.mark_in = config->get_mark(config, TRUE),
|
||||
.mark_out = config->get_mark(config, FALSE),
|
||||
.if_id_in = config->get_if_id(config, TRUE) ?: data->if_id_in_def,
|
||||
@ -2127,27 +2127,7 @@ child_sa_t *child_sa_create(host_t *me, host_t *other, child_cfg_t *config,
|
||||
}
|
||||
|
||||
allocate_unique_if_ids(&this->if_id_in, &this->if_id_out);
|
||||
|
||||
if (MARK_IS_UNIQUE(this->mark_in.value) ||
|
||||
MARK_IS_UNIQUE(this->mark_out.value))
|
||||
{
|
||||
refcount_t mark = 0;
|
||||
bool unique_dir = this->mark_in.value == MARK_UNIQUE_DIR ||
|
||||
this->mark_out.value == MARK_UNIQUE_DIR;
|
||||
|
||||
if (!unique_dir)
|
||||
{
|
||||
mark = ref_get(&unique_mark);
|
||||
}
|
||||
if (MARK_IS_UNIQUE(this->mark_in.value))
|
||||
{
|
||||
this->mark_in.value = unique_dir ? ref_get(&unique_mark) : mark;
|
||||
}
|
||||
if (MARK_IS_UNIQUE(this->mark_out.value))
|
||||
{
|
||||
this->mark_out.value = unique_dir ? ref_get(&unique_mark) : mark;
|
||||
}
|
||||
}
|
||||
allocate_unique_marks(&this->mark_in.value, &this->mark_out.value);
|
||||
|
||||
if (!this->reqid)
|
||||
{
|
||||
|
@ -3229,7 +3229,7 @@ ike_sa_t * ike_sa_create(ike_sa_id_t *ike_sa_id, bool initiator,
|
||||
.my_auths = array_create(0, 0),
|
||||
.other_auths = array_create(0, 0),
|
||||
.attributes = array_create(sizeof(attribute_entry_t), 0),
|
||||
.unique_id = ref_get(&unique_id),
|
||||
.unique_id = ref_get_nonzero(&unique_id),
|
||||
.keepalive_interval = lib->settings->get_time(lib->settings,
|
||||
"%s.keep_alive", KEEPALIVE_INTERVAL, lib->ns),
|
||||
.keepalive_dpd_margin = lib->settings->get_time(lib->settings,
|
||||
|
@ -150,6 +150,48 @@ bool mark_from_string(const char *value, mark_op_t ops, mark_t *mark)
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate a unique mark that is non-zero and not one of the special values.
|
||||
*/
|
||||
static uint32_t allocate_unique_mark()
|
||||
{
|
||||
static refcount_t unique_mark = 0;
|
||||
uint32_t m;
|
||||
|
||||
m = ref_get_nonzero(&unique_mark);
|
||||
while (MARK_IS_UNIQUE(m))
|
||||
{
|
||||
m = ref_get_nonzero(&unique_mark);
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
/*
|
||||
* Described in header
|
||||
*/
|
||||
void allocate_unique_marks(uint32_t *in, uint32_t *out)
|
||||
{
|
||||
if (MARK_IS_UNIQUE(*in) || MARK_IS_UNIQUE(*out))
|
||||
{
|
||||
uint32_t mark = 0;
|
||||
bool unique_dir = *in == MARK_UNIQUE_DIR ||
|
||||
*out == MARK_UNIQUE_DIR;
|
||||
|
||||
if (!unique_dir)
|
||||
{
|
||||
mark = allocate_unique_mark();
|
||||
}
|
||||
if (MARK_IS_UNIQUE(*in))
|
||||
{
|
||||
*in = unique_dir ? allocate_unique_mark() : mark;
|
||||
}
|
||||
if (MARK_IS_UNIQUE(*out))
|
||||
{
|
||||
*out = unique_dir ? allocate_unique_mark() : mark;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Described in header
|
||||
*/
|
||||
@ -191,30 +233,46 @@ bool if_id_from_string(const char *value, uint32_t *if_id)
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate a unique interface ID that is non-zero and not one of the special
|
||||
* values.
|
||||
*/
|
||||
static uint32_t allocate_unique_if_id()
|
||||
{
|
||||
static refcount_t unique_if_id = 0;
|
||||
uint32_t if_id;
|
||||
|
||||
if_id = ref_get_nonzero(&unique_if_id);
|
||||
while (IF_ID_IS_UNIQUE(if_id))
|
||||
{
|
||||
if_id = ref_get_nonzero(&unique_if_id);
|
||||
}
|
||||
return if_id;
|
||||
}
|
||||
|
||||
/*
|
||||
* Described in header
|
||||
*/
|
||||
void allocate_unique_if_ids(uint32_t *in, uint32_t *out)
|
||||
{
|
||||
static refcount_t unique_if_id = 0;
|
||||
|
||||
if (IF_ID_IS_UNIQUE(*in) || IF_ID_IS_UNIQUE(*out))
|
||||
{
|
||||
refcount_t if_id = 0;
|
||||
uint32_t if_id = 0;
|
||||
bool unique_dir = *in == IF_ID_UNIQUE_DIR ||
|
||||
*out == IF_ID_UNIQUE_DIR;
|
||||
|
||||
if (!unique_dir)
|
||||
{
|
||||
if_id = ref_get(&unique_if_id);
|
||||
if_id = allocate_unique_if_id();
|
||||
}
|
||||
if (IF_ID_IS_UNIQUE(*in))
|
||||
{
|
||||
*in = unique_dir ? ref_get(&unique_if_id) : if_id;
|
||||
*in = unique_dir ? allocate_unique_if_id() : if_id;
|
||||
}
|
||||
if (IF_ID_IS_UNIQUE(*out))
|
||||
{
|
||||
*out = unique_dir ? ref_get(&unique_if_id) : if_id;
|
||||
*out = unique_dir ? allocate_unique_if_id() : if_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -242,6 +242,18 @@ enum mark_op_t {
|
||||
*/
|
||||
bool mark_from_string(const char *value, mark_op_t ops, mark_t *mark);
|
||||
|
||||
/**
|
||||
* Allocate up to two unique marks depending on the given values.
|
||||
*
|
||||
* If the given values are MARK_UNIQUE, the values get replaced by a single
|
||||
* unique mark. If the given values are MARK_UNIQUE_DIR, the values get
|
||||
* replaced by a single unique mark for each direction.
|
||||
*
|
||||
* @param[out] in inbound interface ID
|
||||
* @param[out] out outbound interface ID
|
||||
*/
|
||||
void allocate_unique_marks(uint32_t *in, uint32_t *out);
|
||||
|
||||
/**
|
||||
* Special interface ID values to allocate a unique ID for each CHILD_SA/dir
|
||||
*/
|
||||
|
@ -256,6 +256,26 @@ START_TEST(test_round)
|
||||
}
|
||||
END_TEST
|
||||
|
||||
/*******************************************************************************
|
||||
* ref_get/put
|
||||
*/
|
||||
|
||||
START_TEST(test_refs)
|
||||
{
|
||||
refcount_t r = 0xfffffffe;
|
||||
|
||||
ck_assert_int_eq(ref_cur(&r), 0xfffffffe);
|
||||
ck_assert_int_eq(ref_get(&r), 0xffffffff);
|
||||
ck_assert_int_eq(ref_get_nonzero(&r), 1);
|
||||
ck_assert_int_eq(ref_get_nonzero(&r), 2);
|
||||
ck_assert_int_eq(ref_cur(&r), 2);
|
||||
ck_assert(!ref_put(&r));
|
||||
ck_assert_int_eq(ref_cur(&r), 1);
|
||||
ck_assert(ref_put(&r));
|
||||
ck_assert_int_eq(ref_cur(&r), 0);
|
||||
}
|
||||
END_TEST
|
||||
|
||||
/*******************************************************************************
|
||||
* streq
|
||||
*/
|
||||
@ -1104,6 +1124,41 @@ START_TEST(test_mark_from_string)
|
||||
}
|
||||
END_TEST
|
||||
|
||||
/*******************************************************************************
|
||||
* allocate_unique_marks
|
||||
*/
|
||||
|
||||
static struct {
|
||||
uint32_t in;
|
||||
uint32_t out;
|
||||
uint32_t exp_in;
|
||||
uint32_t exp_out;
|
||||
} unique_mark_data[] = {
|
||||
{0, 0, 0, 0 },
|
||||
{42, 42, 42, 42 },
|
||||
{42, 1337, 42, 1337 },
|
||||
/* each call increases the internal counter by 1 or 2*/
|
||||
{MARK_UNIQUE, 42, 1, 42 },
|
||||
{42, MARK_UNIQUE, 42, 2 },
|
||||
{MARK_UNIQUE_DIR, 42, 3, 42 },
|
||||
{42, MARK_UNIQUE_DIR, 42, 4 },
|
||||
{MARK_UNIQUE, MARK_UNIQUE, 5, 5 },
|
||||
{MARK_UNIQUE_DIR, MARK_UNIQUE, 6, 7 },
|
||||
{MARK_UNIQUE, MARK_UNIQUE_DIR, 8, 9 },
|
||||
{MARK_UNIQUE_DIR, MARK_UNIQUE_DIR, 10, 11 },
|
||||
};
|
||||
|
||||
START_TEST(test_allocate_unique_marks)
|
||||
{
|
||||
uint32_t mark_in = unique_mark_data[_i].in,
|
||||
mark_out = unique_mark_data[_i].out;
|
||||
|
||||
allocate_unique_marks(&mark_in, &mark_out);
|
||||
ck_assert_int_eq(mark_in, unique_mark_data[_i].exp_in);
|
||||
ck_assert_int_eq(mark_out, unique_mark_data[_i].exp_out);
|
||||
}
|
||||
END_TEST
|
||||
|
||||
/*******************************************************************************
|
||||
* if_id_from_string
|
||||
*/
|
||||
@ -1272,6 +1327,10 @@ Suite *utils_suite_create()
|
||||
tcase_add_test(tc, test_round);
|
||||
suite_add_tcase(s, tc);
|
||||
|
||||
tc = tcase_create("refcount");
|
||||
tcase_add_test(tc, test_refs);
|
||||
suite_add_tcase(s, tc);
|
||||
|
||||
tc = tcase_create("string helper");
|
||||
tcase_add_loop_test(tc, test_streq, 0, countof(streq_data));
|
||||
tcase_add_loop_test(tc, test_strneq, 0, countof(strneq_data));
|
||||
@ -1340,6 +1399,10 @@ Suite *utils_suite_create()
|
||||
tcase_add_loop_test(tc, test_mark_from_string, 0, countof(mark_data));
|
||||
suite_add_tcase(s, tc);
|
||||
|
||||
tc = tcase_create("allocate_unique_marks");
|
||||
tcase_add_loop_test(tc, test_allocate_unique_marks, 0, countof(unique_mark_data));
|
||||
suite_add_tcase(s, tc);
|
||||
|
||||
tc = tcase_create("if_id_from_string");
|
||||
tcase_add_loop_test(tc, test_if_id_from_string, 0, countof(if_id_data));
|
||||
suite_add_tcase(s, tc);
|
||||
|
@ -124,6 +124,33 @@ bool cas_ptr(void **ptr, void *oldval, void *newval);
|
||||
|
||||
#endif /* HAVE_GCC_ATOMIC_OPERATIONS */
|
||||
|
||||
/**
|
||||
* Get a new reference, but skip zero on overflow.
|
||||
*
|
||||
* If a reference counter is used to allocate unique identifiers, the
|
||||
* refcount value may overflow if it is never decremented. The 0 identifier
|
||||
* may have special semantics, hence returning can be problematic for some
|
||||
* users.
|
||||
*
|
||||
* This call does an additional ref_get() if ref_get() overflows and returns
|
||||
* zero. This ensures that zero is never returned, in the assumption that it
|
||||
* has special meaning.
|
||||
*
|
||||
* @param ref pointer to ref counter
|
||||
* @return new value of ref
|
||||
*/
|
||||
static inline refcount_t ref_get_nonzero(refcount_t *ref)
|
||||
{
|
||||
refcount_t v;
|
||||
|
||||
v = ref_get(ref);
|
||||
if (v == 0)
|
||||
{
|
||||
v = ref_get(ref);
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize atomics utility functions
|
||||
*/
|
||||
|
Loading…
x
Reference in New Issue
Block a user