mirror of
https://github.com/cuberite/libevent.git
synced 2025-09-09 04:19:10 -04:00
Merge remote branch 'github/signed_compare'
This commit is contained in:
commit
a8b7674cd5
@ -179,7 +179,8 @@ arc4_seed_sysctl_linux(void)
|
||||
int mib[] = { CTL_KERN, KERN_RANDOM, RANDOM_UUID };
|
||||
unsigned char buf[ADD_ENTROPY];
|
||||
size_t len, n;
|
||||
int i, any_set;
|
||||
unsigned i;
|
||||
int any_set;
|
||||
|
||||
memset(buf, 0, sizeof(buf));
|
||||
|
||||
@ -190,7 +191,7 @@ arc4_seed_sysctl_linux(void)
|
||||
return -1;
|
||||
}
|
||||
/* make sure that the buffer actually got set. */
|
||||
for (i=any_set=0; i<sizeof(buf); ++i) {
|
||||
for (i=0,any_set=0; i<sizeof(buf); ++i) {
|
||||
any_set |= buf[i];
|
||||
}
|
||||
if (!any_set)
|
||||
|
10
buffer.c
10
buffer.c
@ -570,7 +570,7 @@ evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
|
||||
|
||||
vec[0].iov_base = CHAIN_SPACE_PTR(chain);
|
||||
vec[0].iov_len = CHAIN_SPACE_LEN(chain);
|
||||
EVUTIL_ASSERT(vec[0].iov_len >= size);
|
||||
EVUTIL_ASSERT(size<0 || vec[0].iov_len >= (size_t)size);
|
||||
n = 1;
|
||||
} else {
|
||||
if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
|
||||
@ -1934,7 +1934,7 @@ _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
|
||||
}
|
||||
|
||||
chain = *firstchainp;
|
||||
for (i = 0; i < n_vecs_avail && so_far < howmuch; ++i) {
|
||||
for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
|
||||
size_t avail = CHAIN_SPACE_LEN(chain);
|
||||
if (avail > (howmuch - so_far) && exact)
|
||||
avail = howmuch - so_far;
|
||||
@ -2252,7 +2252,7 @@ evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (howmuch < 0 || howmuch > buffer->total_len)
|
||||
if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
|
||||
howmuch = buffer->total_len;
|
||||
|
||||
if (howmuch > 0) {
|
||||
@ -2420,7 +2420,7 @@ evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, con
|
||||
if (end)
|
||||
last_chain = end->_internal.chain;
|
||||
|
||||
if (!len)
|
||||
if (!len || len > EV_SSIZE_MAX)
|
||||
goto done;
|
||||
|
||||
first = what[0];
|
||||
@ -2435,7 +2435,7 @@ evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, con
|
||||
pos.pos += p - start_at;
|
||||
pos._internal.pos_in_chain += p - start_at;
|
||||
if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
|
||||
if (end && pos.pos + len > end->pos)
|
||||
if (end && pos.pos + (ev_ssize_t)len > end->pos)
|
||||
goto not_found;
|
||||
else
|
||||
goto done;
|
||||
|
@ -91,7 +91,7 @@ struct bufferevent_rate_limit_group {
|
||||
|
||||
/** The smallest number of bytes that any member of the group should
|
||||
* be limited to read or write at a time. */
|
||||
ev_uint32_t min_share;
|
||||
ev_int32_t min_share;
|
||||
/** Timeout event that goes off once a tick, when the bucket is ready
|
||||
* to refill. */
|
||||
struct event master_refill_event;
|
||||
|
@ -56,9 +56,9 @@ ev_token_bucket_init(struct ev_token_bucket *bucket,
|
||||
leave "last_updated" as it is; the next update will add the
|
||||
appropriate amount of bandwidth to the bucket.
|
||||
*/
|
||||
if (bucket->read_limit > cfg->read_maximum)
|
||||
if (bucket->read_limit > (ev_int64_t) cfg->read_maximum)
|
||||
bucket->read_limit = cfg->read_maximum;
|
||||
if (bucket->write_limit > cfg->write_maximum)
|
||||
if (bucket->write_limit > (ev_int64_t) cfg->write_maximum)
|
||||
bucket->write_limit = cfg->write_maximum;
|
||||
} else {
|
||||
bucket->read_limit = cfg->read_rate;
|
||||
@ -221,7 +221,7 @@ _bufferevent_get_rlim_max(struct bufferevent_private *bev, int is_write)
|
||||
if (bev->rate_limiting->group) {
|
||||
struct bufferevent_rate_limit_group *g =
|
||||
bev->rate_limiting->group;
|
||||
ev_uint32_t share;
|
||||
ev_int32_t share;
|
||||
LOCK_GROUP(g);
|
||||
if (GROUP_SUSPENDED(g)) {
|
||||
/* We can get here if we failed to lock this
|
||||
@ -632,9 +632,9 @@ bufferevent_rate_limit_group_set_cfg(
|
||||
&g->rate_limit_cfg.tick_timeout, &cfg->tick_timeout, ==);
|
||||
memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg));
|
||||
|
||||
if (g->rate_limit.read_limit > cfg->read_maximum)
|
||||
if (g->rate_limit.read_limit > (ev_int32_t)cfg->read_maximum)
|
||||
g->rate_limit.read_limit = cfg->read_maximum;
|
||||
if (g->rate_limit.write_limit > cfg->write_maximum)
|
||||
if (g->rate_limit.write_limit > (ev_int32_t)cfg->write_maximum)
|
||||
g->rate_limit.write_limit = cfg->write_maximum;
|
||||
|
||||
if (!same_tick) {
|
||||
@ -651,6 +651,9 @@ bufferevent_rate_limit_group_set_min_share(
|
||||
struct bufferevent_rate_limit_group *g,
|
||||
size_t share)
|
||||
{
|
||||
if (share > EV_INT32_MAX)
|
||||
return -1;
|
||||
|
||||
g->min_share = share;
|
||||
return 0;
|
||||
}
|
||||
|
4
evdns.c
4
evdns.c
@ -3901,7 +3901,7 @@ evdns_base_parse_hosts_line(struct evdns_base *base, char *line)
|
||||
memset(&ss, 0, sizeof(ss));
|
||||
if (evutil_parse_sockaddr_port(addr, (struct sockaddr*)&ss, &socklen)<0)
|
||||
return -1;
|
||||
if (socklen > sizeof(struct sockaddr_in6))
|
||||
if (socklen > (int)sizeof(struct sockaddr_in6))
|
||||
return -1;
|
||||
|
||||
if (sockaddr_getport((struct sockaddr*)&ss))
|
||||
@ -3921,7 +3921,7 @@ evdns_base_parse_hosts_line(struct evdns_base *base, char *line)
|
||||
he = mm_calloc(1, sizeof(struct hosts_entry)+namelen);
|
||||
if (!he)
|
||||
return -1;
|
||||
EVUTIL_ASSERT(socklen <= sizeof(he->addr));
|
||||
EVUTIL_ASSERT(socklen <= (int)sizeof(he->addr));
|
||||
memcpy(&he->addr, &ss, socklen);
|
||||
memcpy(he->hostname, hostname, namelen+1);
|
||||
he->addrlen = socklen;
|
||||
|
@ -194,9 +194,10 @@ static int
|
||||
decode_tag_internal(ev_uint32_t *ptag, struct evbuffer *evbuf, int dodrain)
|
||||
{
|
||||
ev_uint32_t number = 0;
|
||||
int len = evbuffer_get_length(evbuf);
|
||||
size_t len = evbuffer_get_length(evbuf);
|
||||
ev_uint8_t *data;
|
||||
int count = 0, shift = 0, done = 0;
|
||||
size_t count = 0;
|
||||
int shift = 0, done = 0;
|
||||
|
||||
/*
|
||||
* the encoding of a number is at most one byte more than its
|
||||
@ -225,7 +226,7 @@ decode_tag_internal(ev_uint32_t *ptag, struct evbuffer *evbuf, int dodrain)
|
||||
if (ptag != NULL)
|
||||
*ptag = number;
|
||||
|
||||
return (count);
|
||||
return count > INT_MAX ? INT_MAX : (int)(count);
|
||||
}
|
||||
|
||||
int
|
||||
@ -524,11 +525,11 @@ evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag, void *data,
|
||||
int tag_len;
|
||||
|
||||
/* Now unmarshal a tag and check that it matches the tag we want */
|
||||
if ((tag_len = evtag_unmarshal_header(src, &tag)) == -1 ||
|
||||
if ((tag_len = evtag_unmarshal_header(src, &tag)) < 0 ||
|
||||
tag != need_tag)
|
||||
return (-1);
|
||||
|
||||
if (tag_len != len)
|
||||
if ((size_t)tag_len != len)
|
||||
return (-1);
|
||||
|
||||
evbuffer_remove(src, data, len);
|
||||
|
11
evutil.c
11
evutil.c
@ -120,7 +120,8 @@ evutil_read_file(const char *filename, char **content_out, size_t *len_out,
|
||||
fd = open(filename, mode);
|
||||
if (fd < 0)
|
||||
return -1;
|
||||
if (fstat(fd, &st)) {
|
||||
if (fstat(fd, &st) || st.st_size < 0 ||
|
||||
st.st_size > EV_SSIZE_MAX-1 ) {
|
||||
close(fd);
|
||||
return -2;
|
||||
}
|
||||
@ -132,9 +133,9 @@ evutil_read_file(const char *filename, char **content_out, size_t *len_out,
|
||||
read_so_far = 0;
|
||||
while ((r = read(fd, mem+read_so_far, st.st_size - read_so_far)) > 0) {
|
||||
read_so_far += r;
|
||||
if (read_so_far >= st.st_size)
|
||||
if (read_so_far >= (size_t)st.st_size)
|
||||
break;
|
||||
EVUTIL_ASSERT(read_so_far < st.st_size);
|
||||
EVUTIL_ASSERT(read_so_far < (size_t)st.st_size);
|
||||
}
|
||||
close(fd);
|
||||
if (r < 0) {
|
||||
@ -1767,7 +1768,7 @@ evutil_parse_sockaddr_port(const char *ip_as_string, struct sockaddr *out, int *
|
||||
sin6.sin6_port = htons(port);
|
||||
if (1 != evutil_inet_pton(AF_INET6, addr_part, &sin6.sin6_addr))
|
||||
return -1;
|
||||
if (sizeof(sin6) > *outlen)
|
||||
if ((int)sizeof(sin6) > *outlen)
|
||||
return -1;
|
||||
memset(out, 0, *outlen);
|
||||
memcpy(out, &sin6, sizeof(sin6));
|
||||
@ -1786,7 +1787,7 @@ evutil_parse_sockaddr_port(const char *ip_as_string, struct sockaddr *out, int *
|
||||
sin.sin_port = htons(port);
|
||||
if (1 != evutil_inet_pton(AF_INET, addr_part, &sin.sin_addr))
|
||||
return -1;
|
||||
if (sizeof(sin) > *outlen)
|
||||
if ((int)sizeof(sin) > *outlen)
|
||||
return -1;
|
||||
memset(out, 0, *outlen);
|
||||
memcpy(out, &sin, sizeof(sin));
|
||||
|
6
http.c
6
http.c
@ -844,7 +844,7 @@ evhttp_read_body(struct evhttp_connection *evcon, struct evhttp_request *req)
|
||||
evbuffer_add_buffer(req->input_buffer, buf);
|
||||
req->body_size += evbuffer_get_length(buf);
|
||||
} else if (req->chunk_cb != NULL ||
|
||||
evbuffer_get_length(buf) >= req->ntoread) {
|
||||
evbuffer_get_length(buf) >= (size_t)req->ntoread) {
|
||||
/* We've postponed moving the data until now, but we're
|
||||
* about to use it. */
|
||||
req->ntoread -= evbuffer_get_length(buf);
|
||||
@ -2251,11 +2251,11 @@ evhttp_response_phrase_internal(int code)
|
||||
int subcode = code % 100;
|
||||
|
||||
/* Unknown class - can't do any better here */
|
||||
if (klass < 0 || klass >= MEMBERSOF(response_classes))
|
||||
if (klass < 0 || klass >= (int) MEMBERSOF(response_classes))
|
||||
return "Unknown Status Class";
|
||||
|
||||
/* Unknown sub-code, return class name at least */
|
||||
if (subcode >= response_classes[klass].num_responses)
|
||||
if (subcode >= (int) response_classes[klass].num_responses)
|
||||
return response_classes[klass].name;
|
||||
|
||||
return response_classes[klass].responses[subcode];
|
||||
|
@ -37,7 +37,7 @@
|
||||
#define EV_NORETURN
|
||||
#endif
|
||||
|
||||
#define _EVENT_ERR_ABORT 0xdeaddead
|
||||
#define _EVENT_ERR_ABORT ((int)0xdeaddead)
|
||||
|
||||
void event_err(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN;
|
||||
void event_warn(const char *fmt, ...) EV_CHECK_FMT(1,2);
|
||||
|
@ -95,7 +95,7 @@ int min_heap_elt_is_top(const struct event *e)
|
||||
|
||||
int min_heap_erase(min_heap_t* s, struct event* e)
|
||||
{
|
||||
if (((unsigned int)-1) != e->ev_timeout_pos.min_heap_idx)
|
||||
if (-1 != e->ev_timeout_pos.min_heap_idx)
|
||||
{
|
||||
struct event *last = s->p[--s->n];
|
||||
unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
|
||||
|
9
select.c
9
select.c
@ -234,11 +234,14 @@ select_add(struct event_base *base, int fd, short old, short events, void *p)
|
||||
if (sop->event_fds < fd) {
|
||||
int fdsz = sop->event_fdsz;
|
||||
|
||||
if (fdsz < sizeof(fd_mask))
|
||||
fdsz = sizeof(fd_mask);
|
||||
if (fdsz < (int)sizeof(fd_mask))
|
||||
fdsz = (int)sizeof(fd_mask);
|
||||
|
||||
/* In theory we should worry about overflow here. In
|
||||
* reality, though, the highest fd on a unixy system will
|
||||
* not overflow here. XXXX */
|
||||
while (fdsz <
|
||||
(howmany(fd + 1, NFDBITS) * sizeof(fd_mask)))
|
||||
(int) (howmany(fd + 1, NFDBITS) * sizeof(fd_mask)))
|
||||
fdsz *= 2;
|
||||
|
||||
if (fdsz != sop->event_fdsz) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user