func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static int network_config_set_string (const oconfig_item_t *ci, /* {{{ */
char **ret_string)
{
char *tmp;
if ((ci->values_num != 1)
|| (ci->values[0].type != OCONFIG_TYPE_STRING))
{
WARNING ("network plugin: The `%s' config option needs exactly "
"one string argument.", ci->key);
return (-1);
}
tmp = strdup (ci->values[0].value.string);
if (tmp == NULL)
return (-1);
sfree (*ret_string);
*ret_string = tmp;
return (0);
} /* }}} int network_config_set_string */
| 0 |
[
"CWE-119",
"CWE-787"
] |
collectd
|
b589096f907052b3a4da2b9ccc9b0e2e888dfc18
| 137,939,901,516,682,890,000,000,000,000,000,000,000 | 21 |
network plugin: Fix heap overflow in parse_packet().
Emilien Gaspar has identified a heap overflow in parse_packet(), the
function used by the network plugin to parse incoming network packets.
This is a vulnerability in collectd, though the scope is not clear at
this point. At the very least specially crafted network packets can be
used to crash the daemon. We can't rule out a potential remote code
execution though.
Fixes: CVE-2016-6254
|
static NTSTATUS dcesrv_lsa_GetQuotasForAccount(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx,
struct lsa_GetQuotasForAccount *r)
{
DCESRV_FAULT(DCERPC_FAULT_OP_RNG_ERROR);
}
| 0 |
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
| 235,102,304,670,512,500,000,000,000,000,000,000,000 | 5 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
int wav_fmt(wav_reader_t *reader, uint32_t size)
{
uint16_t wFormatTag, nChannels, nBlockAlign, wBitsPerSample, cbSize;
uint32_t nSamplesPerSec, nAvgBytesPerSec, dwChannelMask = 0;
uint16_t wValidBitsPerSample;
uint8_t guid[16];
int is_float = 0;
ENSURE(size >= 16);
TRY_IO(pcm_scanl(&reader->io, "SSLLSS", &wFormatTag, &nChannels,
&nSamplesPerSec, &nAvgBytesPerSec, &nBlockAlign,
&wBitsPerSample) != 6);
wValidBitsPerSample = wBitsPerSample;
ENSURE(wFormatTag == 1 || wFormatTag == 3 || wFormatTag == 0xfffe);
ENSURE(nChannels && nSamplesPerSec && nAvgBytesPerSec &&
nBlockAlign && wBitsPerSample && !(wBitsPerSample & 7) &&
nBlockAlign == nChannels * wBitsPerSample / 8);
if (wFormatTag == 3)
is_float = 1;
if (wFormatTag != 0xfffe)
TRY_IO(pcm_skip(&reader->io, (size - 15) & ~1));
else {
ENSURE(size >= 40);
TRY_IO(pcm_scanl(&reader->io, "SSL",
&cbSize, &wValidBitsPerSample, &dwChannelMask) != 3);
TRY_IO(pcm_read(&reader->io, guid, 16) != 16);
if (memcmp(guid, WAV_GUID_FLOAT, 16) == 0)
is_float = 1;
else if (memcmp(guid, WAV_GUID_PCM, 16) != 0)
goto FAIL;
ENSURE(wValidBitsPerSample && wValidBitsPerSample <= wBitsPerSample);
TRY_IO(pcm_skip(&reader->io, (size - 39) & ~1));
}
reader->sample_format.sample_rate = nSamplesPerSec;
reader->sample_format.bits_per_channel = wValidBitsPerSample;
reader->sample_format.bytes_per_frame = nBlockAlign;
reader->sample_format.channels_per_frame = nChannels;
reader->sample_format.channel_mask = dwChannelMask;
if (is_float)
reader->sample_format.sample_type = PCM_TYPE_FLOAT;
else if (wBitsPerSample == 8)
reader->sample_format.sample_type = PCM_TYPE_UINT;
else
reader->sample_format.sample_type = PCM_TYPE_SINT;
return 0;
FAIL:
return -1;
}
| 0 |
[
"CWE-703"
] |
fdkaac
|
4ec1422bd951a137225ffa4052da120e2ab0a0f4
| 46,680,791,534,991,070,000,000,000,000,000,000,000 | 52 |
wav/caf parser: ensure fmt/desc chunk
fixes https://github.com/nu774/fdkaac/issues/52
|
static struct sc_card_driver * sc_get_driver(void)
{
struct sc_card_driver *iso_drv = sc_get_iso7816_driver();
cac_ops = *iso_drv->ops;
cac_ops.match_card = cac_match_card;
cac_ops.init = cac_init;
cac_ops.finish = cac_finish;
cac_ops.select_file = cac_select_file; /* need to record object type */
cac_ops.get_challenge = cac_get_challenge;
cac_ops.read_binary = cac_read_binary;
cac_ops.write_binary = cac_write_binary;
cac_ops.set_security_env = cac_set_security_env;
cac_ops.restore_security_env = cac_restore_security_env;
cac_ops.compute_signature = cac_compute_signature;
cac_ops.decipher = cac_decipher;
cac_ops.card_ctl = cac_card_ctl;
cac_ops.pin_cmd = cac_pin_cmd;
return &cac_drv;
}
| 0 |
[
"CWE-415",
"CWE-119"
] |
OpenSC
|
360e95d45ac4123255a4c796db96337f332160ad
| 53,075,189,605,868,960,000,000,000,000,000,000,000 | 22 |
fixed out of bounds writes
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problems.
|
bsearch_double(double e, double *ent, short *indexarray, int nent)
{
int n = nent;
int k = 0;
while (n > 0) {
int nn = n / 2;
int idx = indexarray[k + nn];
double ne = ent[idx];
if (ne == e) {
k += nn;
break;
}
else if (ne > e) {
n -= nn + 1;
k += nn + 1;
}
else {
n = nn;
}
}
return k;
}
| 0 |
[
"CWE-119"
] |
w3m
|
67a3db378f5ee3047c158eae4342f7e3245a2ab1
| 806,444,831,532,968,600,000,000,000,000,000,000 | 23 |
Fix table rowspan and colspan
Origin: https://github.com/tats/w3m/pull/19
Bug-Debian: https://github.com/tats/w3m/issues/8
|
JSValue js_print(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv)
{
return js_print_ex(ctx, this_val, argc, argv, GF_LOG_CONSOLE, GF_FALSE);
}
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 320,726,910,128,042,440,000,000,000,000,000,000,000 | 4 |
fixed #2138
|
static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
struct sk_buff *skb = NULL;
struct net_device *dev;
struct sockcm_cookie sockc;
__be16 proto = 0;
int err;
int extra_len = 0;
/*
* Get and verify the address.
*/
if (saddr) {
if (msg->msg_namelen < sizeof(struct sockaddr))
return -EINVAL;
if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
proto = saddr->spkt_protocol;
} else
return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
/*
* Find the device first to size check it
*/
saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
retry:
rcu_read_lock();
dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
err = -ENODEV;
if (dev == NULL)
goto out_unlock;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_unlock;
/*
* You may not queue a frame bigger than the mtu. This is the lowest level
* raw protocol and you must do your own fragmentation at this level.
*/
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
if (!netif_supports_nofcs(dev)) {
err = -EPROTONOSUPPORT;
goto out_unlock;
}
extra_len = 4; /* We're doing our own CRC */
}
err = -EMSGSIZE;
if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
goto out_unlock;
if (!skb) {
size_t reserved = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
rcu_read_unlock();
skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
if (skb == NULL)
return -ENOBUFS;
/* FIXME: Save some space for broken drivers that write a hard
* header at transmission time by themselves. PPP is the notable
* one here. This should really be fixed at the driver level.
*/
skb_reserve(skb, reserved);
skb_reset_network_header(skb);
/* Try to align data part correctly */
if (hhlen) {
skb->data -= hhlen;
skb->tail -= hhlen;
if (len < hhlen)
skb_reset_network_header(skb);
}
err = memcpy_from_msg(skb_put(skb, len), msg, len);
if (err)
goto out_free;
goto retry;
}
if (!dev_validate_header(dev, skb->data, len)) {
err = -EINVAL;
goto out_unlock;
}
if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
!packet_extra_vlan_len_allowed(dev, skb)) {
err = -EMSGSIZE;
goto out_unlock;
}
sockcm_init(&sockc, sk);
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
if (unlikely(err))
goto out_unlock;
}
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb->tstamp = sockc.transmit_time;
skb_setup_tx_timestamp(skb, sockc.tsflags);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
packet_parse_headers(skb, sock);
dev_queue_xmit(skb);
rcu_read_unlock();
return len;
out_unlock:
rcu_read_unlock();
out_free:
kfree_skb(skb);
return err;
}
| 0 |
[
"CWE-787"
] |
linux
|
acf69c946233259ab4d64f8869d4037a198c7f06
| 84,788,405,253,634,045,000,000,000,000,000,000,000 | 126 |
net/packet: fix overflow in tpacket_rcv
Using tp_reserve to calculate netoff can overflow as
tp_reserve is unsigned int and netoff is unsigned short.
This may lead to macoff receving a smaller value then
sizeof(struct virtio_net_hdr), and if po->has_vnet_hdr
is set, an out-of-bounds write will occur when
calling virtio_net_hdr_from_skb.
The bug is fixed by converting netoff to unsigned int
and checking if it exceeds USHRT_MAX.
This addresses CVE-2020-14386
Fixes: 8913336a7e8d ("packet: add PACKET_RESERVE sockopt")
Signed-off-by: Or Cohen <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
X509 *ssl_get_server_send_cert(const SSL *s)
{
CERT_PKEY *cpk;
cpk = ssl_get_server_send_pkey(s);
if (!cpk)
return NULL;
return cpk->x509;
}
| 0 |
[
"CWE-310"
] |
openssl
|
56f1acf5ef8a432992497a04792ff4b3b2c6f286
| 67,099,688,994,444,520,000,000,000,000,000,000,000 | 8 |
Disable SSLv2 default build, default negotiation and weak ciphers.
SSLv2 is by default disabled at build-time. Builds that are not
configured with "enable-ssl2" will not support SSLv2. Even if
"enable-ssl2" is used, users who want to negotiate SSLv2 via the
version-flexible SSLv23_method() will need to explicitly call either
of:
SSL_CTX_clear_options(ctx, SSL_OP_NO_SSLv2);
or
SSL_clear_options(ssl, SSL_OP_NO_SSLv2);
as appropriate. Even if either of those is used, or the application
explicitly uses the version-specific SSLv2_method() or its client
or server variants, SSLv2 ciphers vulnerable to exhaustive search
key recovery have been removed. Specifically, the SSLv2 40-bit
EXPORT ciphers, and SSLv2 56-bit DES are no longer available.
Mitigation for CVE-2016-0800
Reviewed-by: Emilia Käsper <[email protected]>
|
static int mmap_is_legacy(void)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
return 1;
return sysctl_legacy_va_layout;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
4e7c22d447bb6d7e37bfe39ff658486ae78e8d77
| 131,786,153,487,413,770,000,000,000,000,000,000,000 | 10 |
x86, mm/ASLR: Fix stack randomization on 64-bit systems
The issue is that the stack for processes is not properly randomized on
64 bit architectures due to an integer overflow.
The affected function is randomize_stack_top() in file
"fs/binfmt_elf.c":
static unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned int random_variable = 0;
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE)) {
random_variable = get_random_int() & STACK_RND_MASK;
random_variable <<= PAGE_SHIFT;
}
return PAGE_ALIGN(stack_top) + random_variable;
return PAGE_ALIGN(stack_top) - random_variable;
}
Note that, it declares the "random_variable" variable as "unsigned int".
Since the result of the shifting operation between STACK_RND_MASK (which
is 0x3fffff on x86_64, 22 bits) and PAGE_SHIFT (which is 12 on x86_64):
random_variable <<= PAGE_SHIFT;
then the two leftmost bits are dropped when storing the result in the
"random_variable". This variable shall be at least 34 bits long to hold
the (22+12) result.
These two dropped bits have an impact on the entropy of process stack.
Concretely, the total stack entropy is reduced by four: from 2^28 to
2^30 (One fourth of expected entropy).
This patch restores back the entropy by correcting the types involved
in the operations in the functions randomize_stack_top() and
stack_maxrandom_size().
The successful fix can be tested with:
$ for i in `seq 1 10`; do cat /proc/self/maps | grep stack; done
7ffeda566000-7ffeda587000 rw-p 00000000 00:00 0 [stack]
7fff5a332000-7fff5a353000 rw-p 00000000 00:00 0 [stack]
7ffcdb7a1000-7ffcdb7c2000 rw-p 00000000 00:00 0 [stack]
7ffd5e2c4000-7ffd5e2e5000 rw-p 00000000 00:00 0 [stack]
...
Once corrected, the leading bytes should be between 7ffc and 7fff,
rather than always being 7fff.
Signed-off-by: Hector Marco-Gisbert <[email protected]>
Signed-off-by: Ismael Ripoll <[email protected]>
[ Rebased, fixed 80 char bugs, cleaned up commit message, added test example and CVE ]
Signed-off-by: Kees Cook <[email protected]>
Cc: <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Al Viro <[email protected]>
Fixes: CVE-2015-1593
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Borislav Petkov <[email protected]>
|
fill_input_buffer(j_decompress_ptr cinfo)
{
static uchar jpeg_buffer[4096];
size_t nbytes;
nbytes = fread(jpeg_buffer, 1, 4096, ifp);
swab(jpeg_buffer, jpeg_buffer, nbytes);
cinfo->src->next_input_byte = jpeg_buffer;
cinfo->src->bytes_in_buffer = nbytes;
return TRUE;
}
| 0 |
[
"CWE-476",
"CWE-119"
] |
LibRaw
|
d7c3d2cb460be10a3ea7b32e9443a83c243b2251
| 274,426,084,855,765,380,000,000,000,000,000,000,000 | 11 |
Secunia SA75000 advisory: several buffer overruns
|
static void polarssl_zeroize( void *v, size_t n ) {
volatile unsigned char *p = v; while( n-- ) *p++ = 0;
}
| 0 |
[
"CWE-310"
] |
polarssl
|
43c3b28ca6d22f51951e2bd563df039a9f4289ab
| 65,610,607,616,316,590,000,000,000,000,000,000,000 | 3 |
Fix memory leak with crafted ClientHello
|
__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
{
struct mid_q_entry *mid;
struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
__u64 wire_mid = le64_to_cpu(shdr->MessageId);
if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
return NULL;
}
spin_lock(&GlobalMid_Lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if ((mid->mid == wire_mid) &&
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
(mid->command == shdr->Command)) {
kref_get(&mid->refcount);
if (dequeue) {
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
}
spin_unlock(&GlobalMid_Lock);
return mid;
}
}
spin_unlock(&GlobalMid_Lock);
return NULL;
}
| 0 |
[
"CWE-476"
] |
linux
|
d6f5e358452479fa8a773b5c6ccc9e4ec5a20880
| 103,838,951,410,567,230,000,000,000,000,000,000,000 | 28 |
cifs: fix NULL ptr dereference in smb2_ioctl_query_info()
When calling smb2_ioctl_query_info() with invalid
smb_query_info::flags, a NULL ptr dereference is triggered when trying
to kfree() uninitialised rqst[n].rq_iov array.
This also fixes leaked paths that are created in SMB2_open_init()
which required SMB2_open_free() to properly free them.
Here is a small C reproducer that triggers it
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#define die(s) perror(s), exit(1)
#define QUERY_INFO 0xc018cf07
int main(int argc, char *argv[])
{
int fd;
if (argc < 2)
exit(1);
fd = open(argv[1], O_RDONLY);
if (fd == -1)
die("open");
if (ioctl(fd, QUERY_INFO, (uint32_t[]) { 0, 0, 0, 4, 0, 0}) == -1)
die("ioctl");
close(fd);
return 0;
}
mount.cifs //srv/share /mnt -o ...
gcc repro.c && ./a.out /mnt/f0
[ 1832.124468] CIFS: VFS: \\w22-dc.zelda.test\test Invalid passthru query flags: 0x4
[ 1832.125043] general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN NOPTI
[ 1832.125764] KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007]
[ 1832.126241] CPU: 3 PID: 1133 Comm: a.out Not tainted 5.17.0-rc8 #2
[ 1832.126630] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.15.0-0-g2dd4b9b-rebuilt.opensuse.org 04/01/2014
[ 1832.127322] RIP: 0010:smb2_ioctl_query_info+0x7a3/0xe30 [cifs]
[ 1832.127749] Code: 00 00 00 fc ff df 48 c1 ea 03 80 3c 02 00 0f 85 6c 05 00 00 48 b8 00 00 00 00 00 fc ff df 4d 8b 74 24 28 4c 89 f2 48 c1 ea 03 <80> 3c 02 00 0f 85 cb 04 00 00 49 8b 3e e8 bb fc fa ff 48 89 da 48
[ 1832.128911] RSP: 0018:ffffc90000957b08 EFLAGS: 00010256
[ 1832.129243] RAX: dffffc0000000000 RBX: ffff888117e9b850 RCX: ffffffffa020580d
[ 1832.129691] RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffffffffa043a2c0
[ 1832.130137] RBP: ffff888117e9b878 R08: 0000000000000001 R09: 0000000000000003
[ 1832.130585] R10: fffffbfff4087458 R11: 0000000000000001 R12: ffff888117e9b800
[ 1832.131037] R13: 00000000ffffffea R14: 0000000000000000 R15: ffff888117e9b8a8
[ 1832.131485] FS: 00007fcee9900740(0000) GS:ffff888151a00000(0000) knlGS:0000000000000000
[ 1832.131993] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 1832.132354] CR2: 00007fcee9a1ef5e CR3: 0000000114cd2000 CR4: 0000000000350ee0
[ 1832.132801] Call Trace:
[ 1832.132962] <TASK>
[ 1832.133104] ? smb2_query_reparse_tag+0x890/0x890 [cifs]
[ 1832.133489] ? cifs_mapchar+0x460/0x460 [cifs]
[ 1832.133822] ? rcu_read_lock_sched_held+0x3f/0x70
[ 1832.134125] ? cifs_strndup_to_utf16+0x15b/0x250 [cifs]
[ 1832.134502] ? lock_downgrade+0x6f0/0x6f0
[ 1832.134760] ? cifs_convert_path_to_utf16+0x198/0x220 [cifs]
[ 1832.135170] ? smb2_check_message+0x1080/0x1080 [cifs]
[ 1832.135545] cifs_ioctl+0x1577/0x3320 [cifs]
[ 1832.135864] ? lock_downgrade+0x6f0/0x6f0
[ 1832.136125] ? cifs_readdir+0x2e60/0x2e60 [cifs]
[ 1832.136468] ? rcu_read_lock_sched_held+0x3f/0x70
[ 1832.136769] ? __rseq_handle_notify_resume+0x80b/0xbe0
[ 1832.137096] ? __up_read+0x192/0x710
[ 1832.137327] ? __ia32_sys_rseq+0xf0/0xf0
[ 1832.137578] ? __x64_sys_openat+0x11f/0x1d0
[ 1832.137850] __x64_sys_ioctl+0x127/0x190
[ 1832.138103] do_syscall_64+0x3b/0x90
[ 1832.138378] entry_SYSCALL_64_after_hwframe+0x44/0xae
[ 1832.138702] RIP: 0033:0x7fcee9a253df
[ 1832.138937] Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <41> 89 c0 3d 00 f0 ff ff 77 1f 48 8b 44 24 18 64 48 2b 04 25 28 00
[ 1832.140107] RSP: 002b:00007ffeba94a8a0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[ 1832.140606] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fcee9a253df
[ 1832.141058] RDX: 00007ffeba94a910 RSI: 00000000c018cf07 RDI: 0000000000000003
[ 1832.141503] RBP: 00007ffeba94a930 R08: 00007fcee9b24db0 R09: 00007fcee9b45c4e
[ 1832.141948] R10: 00007fcee9918d40 R11: 0000000000000246 R12: 00007ffeba94aa48
[ 1832.142396] R13: 0000000000401176 R14: 0000000000403df8 R15: 00007fcee9b78000
[ 1832.142851] </TASK>
[ 1832.142994] Modules linked in: cifs cifs_arc4 cifs_md4 bpf_preload [last unloaded: cifs]
Cc: [email protected]
Signed-off-by: Paulo Alcantara (SUSE) <[email protected]>
Signed-off-by: Steve French <[email protected]>
|
static size_t segment_crypt_serialize(json_object *jobj_segment, uint8_t *buffer)
{
struct jtype j[] = {
{ JSTR, jobj_segment, "type" },
{ JU64, jobj_segment, "offset" },
{ JX64, jobj_segment, "size" },
{ JU64, jobj_segment, "iv_tweak" },
{ JSTR, jobj_segment, "encryption" },
{ JU32, jobj_segment, "sector_size" },
{}
};
return srs(j, buffer);
}
| 0 |
[
"CWE-345"
] |
cryptsetup
|
0113ac2d889c5322659ad0596d4cfc6da53e356c
| 133,795,536,961,601,970,000,000,000,000,000,000,000 | 13 |
Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack
Fix possible attacks against data confidentiality through LUKS2 online
reencryption extension crash recovery.
An attacker can modify on-disk metadata to simulate decryption in
progress with crashed (unfinished) reencryption step and persistently
decrypt part of the LUKS device.
This attack requires repeated physical access to the LUKS device but
no knowledge of user passphrases.
The decryption step is performed after a valid user activates
the device with a correct passphrase and modified metadata.
There are no visible warnings for the user that such recovery happened
(except using the luksDump command). The attack can also be reversed
afterward (simulating crashed encryption from a plaintext) with
possible modification of revealed plaintext.
The problem was caused by reusing a mechanism designed for actual
reencryption operation without reassessing the security impact for new
encryption and decryption operations. While the reencryption requires
calculating and verifying both key digests, no digest was needed to
initiate decryption recovery if the destination is plaintext (no
encryption key). Also, some metadata (like encryption cipher) is not
protected, and an attacker could change it. Note that LUKS2 protects
visible metadata only when a random change occurs. It does not protect
against intentional modification but such modification must not cause
a violation of data confidentiality.
The fix introduces additional digest protection of reencryption
metadata. The digest is calculated from known keys and critical
reencryption metadata. Now an attacker cannot create correct metadata
digest without knowledge of a passphrase for used keyslots.
For more details, see LUKS2 On-Disk Format Specification version 1.1.0.
|
static int fts3CommitMethod(sqlite3_vtab *pVtab){
TESTONLY( Fts3Table *p = (Fts3Table*)pVtab );
UNUSED_PARAMETER(pVtab);
assert( p->nPendingData==0 );
assert( p->inTransaction!=0 );
assert( p->pSegments==0 );
TESTONLY( p->inTransaction = 0 );
TESTONLY( p->mxSavepoint = -1; );
return SQLITE_OK;
}
| 0 |
[
"CWE-787"
] |
sqlite
|
c72f2fb7feff582444b8ffdc6c900c69847ce8a9
| 241,549,801,248,082,720,000,000,000,000,000,000,000 | 10 |
More improvements to shadow table corruption detection in FTS3.
FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d
|
void CWebServer::RType_UpdateScene(WebEmSession & session, const request& req, Json::Value &root)
{
if (session.rights != 2)
{
session.reply_status = reply::forbidden;
return; //Only admin user allowed
}
std::string idx = request::findValue(&req, "idx");
std::string name = request::findValue(&req, "name");
std::string description = request::findValue(&req, "description");
if ((idx.empty()) || (name.empty()))
return;
std::string stype = request::findValue(&req, "scenetype");
if (stype.empty())
{
root["status"] = "ERR";
root["message"] = "No Scene Type specified!";
return;
}
std::string tmpstr = request::findValue(&req, "protected");
int iProtected = (tmpstr == "true") ? 1 : 0;
std::string onaction = base64_decode(request::findValue(&req, "onaction"));
std::string offaction = base64_decode(request::findValue(&req, "offaction"));
root["status"] = "OK";
root["title"] = "UpdateScene";
m_sql.safe_query("UPDATE Scenes SET Name='%q', Description='%q', SceneType=%d, Protected=%d, OnAction='%q', OffAction='%q' WHERE (ID == '%q')",
name.c_str(),
description.c_str(),
atoi(stype.c_str()),
iProtected,
onaction.c_str(),
offaction.c_str(),
idx.c_str()
);
uint64_t ullidx = std::strtoull(idx.c_str(), nullptr, 10);
m_mainworker.m_eventsystem.WWWUpdateSingleState(ullidx, name, m_mainworker.m_eventsystem.REASON_SCENEGROUP);
}
| 0 |
[
"CWE-89"
] |
domoticz
|
ee70db46f81afa582c96b887b73bcd2a86feda00
| 120,501,236,522,196,000,000,000,000,000,000,000,000 | 41 |
Fixed possible SQL Injection Vulnerability (Thanks to Fabio Carretto!)
|
Value ExpressionSetUnion::evaluate(const Document& root) const {
ValueSet unionedSet = getExpressionContext()->getValueComparator().makeOrderedValueSet();
const size_t n = vpOperand.size();
for (size_t i = 0; i < n; i++) {
const Value newEntries = vpOperand[i]->evaluate(root);
if (newEntries.nullish()) {
return Value(BSONNULL);
}
uassert(17043,
str::stream() << "All operands of $setUnion must be arrays. One argument"
<< " is of type: "
<< typeName(newEntries.getType()),
newEntries.isArray());
unionedSet.insert(newEntries.getArray().begin(), newEntries.getArray().end());
}
return Value(vector<Value>(unionedSet.begin(), unionedSet.end()));
}
| 0 |
[
"CWE-835"
] |
mongo
|
0a076417d1d7fba3632b73349a1fd29a83e68816
| 274,786,962,063,774,570,000,000,000,000,000,000,000 | 18 |
SERVER-38070 fix infinite loop in agg expression
|
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
void (*setup)(struct net_device *),
unsigned int txqs, unsigned int rxqs)
{
struct net_device *dev;
size_t alloc_size;
struct net_device *p;
BUG_ON(strlen(name) >= sizeof(dev->name));
if (txqs < 1) {
pr_err("alloc_netdev: Unable to allocate device "
"with zero queues.\n");
return NULL;
}
#ifdef CONFIG_RPS
if (rxqs < 1) {
pr_err("alloc_netdev: Unable to allocate device "
"with zero RX queues.\n");
return NULL;
}
#endif
alloc_size = sizeof(struct net_device);
if (sizeof_priv) {
/* ensure 32-byte alignment of private area */
alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
alloc_size += sizeof_priv;
}
/* ensure 32-byte alignment of whole construct */
alloc_size += NETDEV_ALIGN - 1;
p = kzalloc(alloc_size, GFP_KERNEL);
if (!p) {
printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
return NULL;
}
dev = PTR_ALIGN(p, NETDEV_ALIGN);
dev->padded = (char *)dev - (char *)p;
dev->pcpu_refcnt = alloc_percpu(int);
if (!dev->pcpu_refcnt)
goto free_p;
if (dev_addr_init(dev))
goto free_pcpu;
dev_mc_init(dev);
dev_uc_init(dev);
dev_net_set(dev, &init_net);
dev->gso_max_size = GSO_MAX_SIZE;
INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
dev->ethtool_ntuple_list.count = 0;
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
INIT_LIST_HEAD(&dev->link_watch_list);
dev->priv_flags = IFF_XMIT_DST_RELEASE;
setup(dev);
dev->num_tx_queues = txqs;
dev->real_num_tx_queues = txqs;
if (netif_alloc_netdev_queues(dev))
goto free_all;
#ifdef CONFIG_RPS
dev->num_rx_queues = rxqs;
dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev))
goto free_all;
#endif
strcpy(dev->name, name);
return dev;
free_all:
free_netdev(dev);
return NULL;
free_pcpu:
free_percpu(dev->pcpu_refcnt);
kfree(dev->_tx);
#ifdef CONFIG_RPS
kfree(dev->_rx);
#endif
free_p:
kfree(p);
return NULL;
}
| 0 |
[
"CWE-264"
] |
linux
|
8909c9ad8ff03611c9c96c9a92656213e4bb495b
| 268,709,819,665,000,840,000,000,000,000,000,000,000 | 94 |
net: don't allow CAP_NET_ADMIN to load non-netdev kernel modules
Since a8f80e8ff94ecba629542d9b4b5f5a8ee3eb565c any process with
CAP_NET_ADMIN may load any module from /lib/modules/. This doesn't mean
that CAP_NET_ADMIN is a superset of CAP_SYS_MODULE as modules are
limited to /lib/modules/**. However, CAP_NET_ADMIN capability shouldn't
allow anybody load any module not related to networking.
This patch restricts an ability of autoloading modules to netdev modules
with explicit aliases. This fixes CVE-2011-1019.
Arnd Bergmann suggested to leave untouched the old pre-v2.6.32 behavior
of loading netdev modules by name (without any prefix) for processes
with CAP_SYS_MODULE to maintain the compatibility with network scripts
that use autoloading netdev modules by aliases like "eth0", "wlan0".
Currently there are only three users of the feature in the upstream
kernel: ipip, ip_gre and sit.
root@albatros:~# capsh --drop=$(seq -s, 0 11),$(seq -s, 13 34) --
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: fffffff800001000
CapEff: fffffff800001000
CapBnd: fffffff800001000
root@albatros:~# modprobe xfs
FATAL: Error inserting xfs
(/lib/modules/2.6.38-rc6-00001-g2bf4ca3/kernel/fs/xfs/xfs.ko): Operation not permitted
root@albatros:~# lsmod | grep xfs
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit
sit: error fetching interface information: Device not found
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit0
sit0 Link encap:IPv6-in-IPv4
NOARP MTU:1480 Metric:1
root@albatros:~# lsmod | grep sit
sit 10457 0
tunnel4 2957 1 sit
For CAP_SYS_MODULE module loading is still relaxed:
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: ffffffffffffffff
CapEff: ffffffffffffffff
CapBnd: ffffffffffffffff
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
xfs 745319 0
Reference: https://lkml.org/lkml/2011/2/24/203
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Michael Tokarev <[email protected]>
Acked-by: David S. Miller <[email protected]>
Acked-by: Kees Cook <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
static void sctp_association_destroy(struct sctp_association *asoc)
{
SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return);
sctp_endpoint_put(asoc->ep);
sock_put(asoc->base.sk);
if (asoc->assoc_id != 0) {
spin_lock_bh(&sctp_assocs_id_lock);
idr_remove(&sctp_assocs_id, asoc->assoc_id);
spin_unlock_bh(&sctp_assocs_id_lock);
}
WARN_ON(atomic_read(&asoc->rmem_alloc));
if (asoc->base.malloced) {
kfree(asoc);
SCTP_DBG_OBJCNT_DEC(assoc);
}
}
| 0 |
[
"CWE-287"
] |
linux-2.6
|
add52379dde2e5300e2d574b172e62c6cf43b3d3
| 94,897,833,374,069,320,000,000,000,000,000,000,000 | 20 |
sctp: Fix oops when INIT-ACK indicates that peer doesn't support AUTH
If INIT-ACK is received with SupportedExtensions parameter which
indicates that the peer does not support AUTH, the packet will be
silently ignore, and sctp_process_init() do cleanup all of the
transports in the association.
When T1-Init timer is expires, OOPS happen while we try to choose
a different init transport.
The solution is to only clean up the non-active transports, i.e
the ones that the peer added. However, that introduces a problem
with sctp_connectx(), because we don't mark the proper state for
the transports provided by the user. So, we'll simply mark
user-provided transports as ACTIVE. That will allow INIT
retransmissions to work properly in the sctp_connectx() context
and prevent the crash.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
protocol_handshake_oldstyle (struct connection *conn)
{
struct old_handshake handshake;
uint64_t exportsize;
uint16_t gflags, eflags;
/* In --tls=require / FORCEDTLS mode, old style handshakes are
* rejected because they cannot support TLS.
*/
if (tls == 2) {
nbdkit_error ("non-TLS client tried to connect in --tls=require mode");
return -1;
}
if (protocol_common_open (conn, &exportsize, &eflags) == -1)
return -1;
gflags = 0;
debug ("oldstyle negotiation: flags: global 0x%x export 0x%x",
gflags, eflags);
memset (&handshake, 0, sizeof handshake);
memcpy (handshake.nbdmagic, "NBDMAGIC", 8);
handshake.version = htobe64 (OLD_VERSION);
handshake.exportsize = htobe64 (exportsize);
handshake.gflags = htobe16 (gflags);
handshake.eflags = htobe16 (eflags);
if (conn->send (conn, &handshake, sizeof handshake, 0) == -1) {
nbdkit_error ("write: %m");
return -1;
}
return 0;
}
| 0 |
[
"CWE-406"
] |
nbdkit
|
c05686f9577fa91b6a3a4d8c065954ca6fc3fd62
| 126,734,019,457,950,470,000,000,000,000,000,000,000 | 35 |
server: Wait until handshake complete before calling .open callback
Currently we call the plugin .open callback as soon as we receive a
TCP connection:
$ nbdkit -fv --tls=require --tls-certificates=tests/pki null \
--run "telnet localhost 10809"
[...]
Trying ::1...
Connected to localhost.
Escape character is '^]'.
nbdkit: debug: accepted connection
nbdkit: debug: null: open readonly=0 ◀ NOTE
nbdkit: null[1]: debug: newstyle negotiation: flags: global 0x3
NBDMAGICIHAVEOPT
In plugins such as curl, guestfs, ssh, vddk and others we do a
considerable amount of work in the .open callback (such as making a
remote connection or launching an appliance). Therefore we are
providing an easy Denial of Service / Amplification Attack for
unauthorized clients to cause a lot of work to be done for only the
cost of a simple TCP 3 way handshake.
This commit moves the call to the .open callback after the NBD
handshake has mostly completed. In particular TLS authentication must
be complete before we will call into the plugin.
It is unlikely that there are plugins which really depend on the
current behaviour of .open (which I found surprising even though I
guess I must have written it). If there are then we could add a new
.connect callback or similar to allow plugins to get control at the
earlier point in the connection.
After this change you can see that the .open callback is not called
from just a simple TCP connection:
$ ./nbdkit -fv --tls=require --tls-certificates=tests/pki null \
--run "telnet localhost 10809"
[...]
Trying ::1...
Connected to localhost.
Escape character is '^]'.
nbdkit: debug: accepted connection
nbdkit: null[1]: debug: newstyle negotiation: flags: global 0x3
NBDMAGICIHAVEOPT
xx
nbdkit: null[1]: debug: newstyle negotiation: client flags: 0xd0a7878
nbdkit: null[1]: error: client requested unknown flags 0xd0a7878
Connection closed by foreign host.
nbdkit: debug: null: unload plugin
Signed-off-by: Richard W.M. Jones <[email protected]>
|
initialize_job_control (force)
int force;
{
shell_tty = fileno (stderr);
if (interactive)
get_tty_state ();
return 0;
}
| 0 |
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
| 104,827,355,997,505,570,000,000,000,000,000,000,000 | 9 |
bash-4.4-rc2 release
|
static int nft_block_offload_cmd(struct nft_base_chain *chain,
struct net_device *dev,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
struct flow_block_offload bo;
int err;
nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
if (err < 0)
return err;
return nft_block_setup(chain, &bo, cmd);
}
| 0 |
[
"CWE-269"
] |
nf
|
b1a5983f56e371046dcf164f90bfaf704d2b89f6
| 36,477,569,603,539,086,000,000,000,000,000,000,000 | 16 |
netfilter: nf_tables_offload: incorrect flow offload action array size
immediate verdict expression needs to allocate one slot in the flow offload
action array, however, immediate data expression does not need to do so.
fwd and dup expression need to allocate one slot, this is missing.
Add a new offload_action interface to report if this expression needs to
allocate one slot in the flow offload action array.
Fixes: be2861dc36d7 ("netfilter: nft_{fwd,dup}_netdev: add offload support")
Reported-and-tested-by: Nick Gregory <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
int kvm_iommu_map_guest(struct kvm *kvm)
{
int r;
if (!iommu_present(&pci_bus_type)) {
printk(KERN_ERR "%s: iommu not found\n", __func__);
return -ENODEV;
}
mutex_lock(&kvm->slots_lock);
kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
if (!kvm->arch.iommu_domain) {
r = -ENOMEM;
goto out_unlock;
}
if (!allow_unsafe_assigned_interrupts &&
!iommu_domain_has_cap(kvm->arch.iommu_domain,
IOMMU_CAP_INTR_REMAP)) {
printk(KERN_WARNING "%s: No interrupt remapping support,"
" disallowing device assignment."
" Re-enble with \"allow_unsafe_assigned_interrupts=1\""
" module option.\n", __func__);
iommu_domain_free(kvm->arch.iommu_domain);
kvm->arch.iommu_domain = NULL;
r = -EPERM;
goto out_unlock;
}
r = kvm_iommu_map_memslots(kvm);
if (r)
kvm_iommu_unmap_memslots(kvm);
out_unlock:
mutex_unlock(&kvm->slots_lock);
return r;
}
| 0 |
[
"CWE-264"
] |
kvm
|
21a1416a1c945c5aeaeaf791b63c64926018eb77
| 85,453,402,610,150,120,000,000,000,000,000,000,000 | 38 |
KVM: lock slots_lock around device assignment
As pointed out by Jason Baron, when assigning a device to a guest
we first set the iommu domain pointer, which enables mapping
and unmapping of memory slots to the iommu. This leaves a window
where this path is enabled, but we haven't synchronized the iommu
mappings to the existing memory slots. Thus a slot being removed
at that point could send us down unexpected code paths removing
non-existent pinnings and iommu mappings. Take the slots_lock
around creating the iommu domain and initial mappings as well as
around iommu teardown to avoid this race.
Signed-off-by: Alex Williamson <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
static void __exit pegasus_exit(void)
{
usb_deregister(&pegasus_driver);
}
| 0 |
[
"CWE-119",
"CWE-284"
] |
linux
|
5593523f968bc86d42a035c6df47d5e0979b5ace
| 172,733,280,395,370,330,000,000,000,000,000,000,000 | 4 |
pegasus: Use heap buffers for all register access
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
References: https://bugs.debian.org/852556
Reported-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Tested-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
Init_ossl_cipher(void)
{
#if 0
mOSSL = rb_define_module("OpenSSL"); /* let rdoc know about mOSSL */
#endif
/* Document-class: OpenSSL::Cipher
*
* Provides symmetric algorithms for encryption and decryption. The
* algorithms that are available depend on the particular version
* of OpenSSL that is installed.
*
* === Listing all supported algorithms
*
* A list of supported algorithms can be obtained by
*
* puts OpenSSL::Cipher.ciphers
*
* === Instantiating a Cipher
*
* There are several ways to create a Cipher instance. Generally, a
* Cipher algorithm is categorized by its name, the key length in bits
* and the cipher mode to be used. The most generic way to create a
* Cipher is the following
*
* cipher = OpenSSL::Cipher.new('<name>-<key length>-<mode>')
*
* That is, a string consisting of the hyphenated concatenation of the
* individual components name, key length and mode. Either all uppercase
* or all lowercase strings may be used, for example:
*
* cipher = OpenSSL::Cipher.new('AES-128-CBC')
*
* For each algorithm supported, there is a class defined under the
* Cipher class that goes by the name of the cipher, e.g. to obtain an
* instance of AES, you could also use
*
* # these are equivalent
* cipher = OpenSSL::Cipher::AES.new(128, :CBC)
* cipher = OpenSSL::Cipher::AES.new(128, 'CBC')
* cipher = OpenSSL::Cipher::AES.new('128-CBC')
*
* Finally, due to its wide-spread use, there are also extra classes
* defined for the different key sizes of AES
*
* cipher = OpenSSL::Cipher::AES128.new(:CBC)
* cipher = OpenSSL::Cipher::AES192.new(:CBC)
* cipher = OpenSSL::Cipher::AES256.new(:CBC)
*
* === Choosing either encryption or decryption mode
*
* Encryption and decryption are often very similar operations for
* symmetric algorithms, this is reflected by not having to choose
* different classes for either operation, both can be done using the
* same class. Still, after obtaining a Cipher instance, we need to
* tell the instance what it is that we intend to do with it, so we
* need to call either
*
* cipher.encrypt
*
* or
*
* cipher.decrypt
*
* on the Cipher instance. This should be the first call after creating
* the instance, otherwise configuration that has already been set could
* get lost in the process.
*
* === Choosing a key
*
* Symmetric encryption requires a key that is the same for the encrypting
* and for the decrypting party and after initial key establishment should
* be kept as private information. There are a lot of ways to create
* insecure keys, the most notable is to simply take a password as the key
* without processing the password further. A simple and secure way to
* create a key for a particular Cipher is
*
* cipher = OpenSSL::AES256.new(:CFB)
* cipher.encrypt
* key = cipher.random_key # also sets the generated key on the Cipher
*
* If you absolutely need to use passwords as encryption keys, you
* should use Password-Based Key Derivation Function 2 (PBKDF2) by
* generating the key with the help of the functionality provided by
* OpenSSL::PKCS5.pbkdf2_hmac_sha1 or OpenSSL::PKCS5.pbkdf2_hmac.
*
* Although there is Cipher#pkcs5_keyivgen, its use is deprecated and
* it should only be used in legacy applications because it does not use
* the newer PKCS#5 v2 algorithms.
*
* === Choosing an IV
*
* The cipher modes CBC, CFB, OFB and CTR all need an "initialization
* vector", or short, IV. ECB mode is the only mode that does not require
* an IV, but there is almost no legitimate use case for this mode
* because of the fact that it does not sufficiently hide plaintext
* patterns. Therefore
*
* <b>You should never use ECB mode unless you are absolutely sure that
* you absolutely need it</b>
*
* Because of this, you will end up with a mode that explicitly requires
* an IV in any case. Note that for backwards compatibility reasons,
* setting an IV is not explicitly mandated by the Cipher API. If not
* set, OpenSSL itself defaults to an all-zeroes IV ("\\0", not the
* character). Although the IV can be seen as public information, i.e.
* it may be transmitted in public once generated, it should still stay
* unpredictable to prevent certain kinds of attacks. Therefore, ideally
*
* <b>Always create a secure random IV for every encryption of your
* Cipher</b>
*
* A new, random IV should be created for every encryption of data. Think
* of the IV as a nonce (number used once) - it's public but random and
* unpredictable. A secure random IV can be created as follows
*
* cipher = ...
* cipher.encrypt
* key = cipher.random_key
* iv = cipher.random_iv # also sets the generated IV on the Cipher
*
* Although the key is generally a random value, too, it is a bad choice
* as an IV. There are elaborate ways how an attacker can take advantage
* of such an IV. As a general rule of thumb, exposing the key directly
* or indirectly should be avoided at all cost and exceptions only be
* made with good reason.
*
* === Calling Cipher#final
*
* ECB (which should not be used) and CBC are both block-based modes.
* This means that unlike for the other streaming-based modes, they
* operate on fixed-size blocks of data, and therefore they require a
* "finalization" step to produce or correctly decrypt the last block of
* data by appropriately handling some form of padding. Therefore it is
* essential to add the output of OpenSSL::Cipher#final to your
* encryption/decryption buffer or you will end up with decryption errors
* or truncated data.
*
* Although this is not really necessary for streaming-mode ciphers, it is
* still recommended to apply the same pattern of adding the output of
* Cipher#final there as well - it also enables you to switch between
* modes more easily in the future.
*
* === Encrypting and decrypting some data
*
* data = "Very, very confidential data"
*
* cipher = OpenSSL::Cipher::AES.new(128, :CBC)
* cipher.encrypt
* key = cipher.random_key
* iv = cipher.random_iv
*
* encrypted = cipher.update(data) + cipher.final
* ...
* decipher = OpenSSL::Cipher::AES.new(128, :CBC)
* decipher.decrypt
* decipher.key = key
* decipher.iv = iv
*
* plain = decipher.update(encrypted) + decipher.final
*
* puts data == plain #=> true
*
* === Authenticated Encryption and Associated Data (AEAD)
*
* If the OpenSSL version used supports it, an Authenticated Encryption
* mode (such as GCM or CCM) should always be preferred over any
* unauthenticated mode. Currently, OpenSSL supports AE only in combination
* with Associated Data (AEAD) where additional associated data is included
* in the encryption process to compute a tag at the end of the encryption.
* This tag will also be used in the decryption process and by verifying
* its validity, the authenticity of a given ciphertext is established.
*
* This is superior to unauthenticated modes in that it allows to detect
* if somebody effectively changed the ciphertext after it had been
* encrypted. This prevents malicious modifications of the ciphertext that
* could otherwise be exploited to modify ciphertexts in ways beneficial to
* potential attackers.
*
* If no associated data is needed for encryption and later decryption,
* the OpenSSL library still requires a value to be set - "" may be used in
* case none is available. An example using the GCM (Galois Counter Mode):
*
* cipher = OpenSSL::Cipher::AES.new(128, :GCM)
* cipher.encrypt
* key = cipher.random_key
* iv = cipher.random_iv
* cipher.auth_data = ""
*
* encrypted = cipher.update(data) + cipher.final
* tag = cipher.auth_tag
*
* decipher = OpenSSL::Cipher::AES.new(128, :GCM)
* decipher.decrypt
* decipher.key = key
* decipher.iv = iv
* decipher.auth_tag = tag
* decipher.auth_data = ""
*
* plain = decipher.update(encrypted) + decipher.final
*
* puts data == plain #=> true
*/
cCipher = rb_define_class_under(mOSSL, "Cipher", rb_cObject);
eCipherError = rb_define_class_under(cCipher, "CipherError", eOSSLError);
rb_define_alloc_func(cCipher, ossl_cipher_alloc);
rb_define_copy_func(cCipher, ossl_cipher_copy);
rb_define_module_function(cCipher, "ciphers", ossl_s_ciphers, 0);
rb_define_method(cCipher, "initialize", ossl_cipher_initialize, 1);
rb_define_method(cCipher, "reset", ossl_cipher_reset, 0);
rb_define_method(cCipher, "encrypt", ossl_cipher_encrypt, -1);
rb_define_method(cCipher, "decrypt", ossl_cipher_decrypt, -1);
rb_define_method(cCipher, "pkcs5_keyivgen", ossl_cipher_pkcs5_keyivgen, -1);
rb_define_method(cCipher, "update", ossl_cipher_update, -1);
rb_define_method(cCipher, "final", ossl_cipher_final, 0);
rb_define_method(cCipher, "name", ossl_cipher_name, 0);
rb_define_method(cCipher, "key=", ossl_cipher_set_key, 1);
rb_define_method(cCipher, "auth_data=", ossl_cipher_set_auth_data, 1);
rb_define_method(cCipher, "auth_tag=", ossl_cipher_set_auth_tag, 1);
rb_define_method(cCipher, "auth_tag", ossl_cipher_get_auth_tag, -1);
rb_define_method(cCipher, "authenticated?", ossl_cipher_is_authenticated, 0);
rb_define_method(cCipher, "key_len=", ossl_cipher_set_key_length, 1);
rb_define_method(cCipher, "key_len", ossl_cipher_key_length, 0);
rb_define_method(cCipher, "iv=", ossl_cipher_set_iv, 1);
rb_define_method(cCipher, "iv_len", ossl_cipher_iv_length, 0);
rb_define_method(cCipher, "block_size", ossl_cipher_block_size, 0);
rb_define_method(cCipher, "padding=", ossl_cipher_set_padding, 1);
id_key_set = rb_intern_const("key_set");
}
| 0 |
[
"CWE-326"
] |
ruby
|
739782e37a6662fea379e7ef3ec89e851b04b46c
| 145,505,184,337,765,840,000,000,000,000,000,000,000 | 231 |
* ext/openssl/ossl_cipher.c: remove the encryption key initialization
from Cipher#initialize. This is effectively a revert of r32723
("Avoid possible SEGV from AES encryption/decryption", 2011-07-28).
the patch is derived from https://github.com/ruby/openssl/commit/8108e0a6db133f3375608303fdd2083eb5115062,
written by Kazuki Yamaguchi.
[Backport #8221]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_2_3@59267 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
|
static void __net_exit nf_tables_pre_exit_net(struct net *net)
{
__nft_release_hooks(net);
}
| 0 |
[
"CWE-665"
] |
linux
|
ad9f151e560b016b6ad3280b48e42fa11e1a5440
| 25,426,490,198,765,545,000,000,000,000,000,000,000 | 4 |
netfilter: nf_tables: initialize set before expression setup
nft_set_elem_expr_alloc() needs an initialized set if expression sets on
the NFT_EXPR_GC flag. Move set fields initialization before expression
setup.
[4512935.019450] ==================================================================
[4512935.019456] BUG: KASAN: null-ptr-deref in nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019487] Read of size 8 at addr 0000000000000070 by task nft/23532
[4512935.019494] CPU: 1 PID: 23532 Comm: nft Not tainted 5.12.0-rc4+ #48
[...]
[4512935.019502] Call Trace:
[4512935.019505] dump_stack+0x89/0xb4
[4512935.019512] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019536] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019560] kasan_report.cold.12+0x5f/0xd8
[4512935.019566] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019590] nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019615] nf_tables_newset+0xc7f/0x1460 [nf_tables]
Reported-by: [email protected]
Fixes: 65038428b2c6 ("netfilter: nf_tables: allow to specify stateful expression in set definition")
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
static void qemu_input_transform_abs_rotate(InputEvent *evt)
{
InputMoveEvent *move = evt->u.abs.data;
switch (graphic_rotate) {
case 90:
if (move->axis == INPUT_AXIS_X) {
move->axis = INPUT_AXIS_Y;
} else if (move->axis == INPUT_AXIS_Y) {
move->axis = INPUT_AXIS_X;
move->value = qemu_input_transform_invert_abs_value(move->value);
}
break;
case 180:
move->value = qemu_input_transform_invert_abs_value(move->value);
break;
case 270:
if (move->axis == INPUT_AXIS_X) {
move->axis = INPUT_AXIS_Y;
move->value = qemu_input_transform_invert_abs_value(move->value);
} else if (move->axis == INPUT_AXIS_Y) {
move->axis = INPUT_AXIS_X;
}
break;
}
}
| 0 |
[
"CWE-772"
] |
qemu
|
77b0359bf414ad666d1714dc9888f1017c08e283
| 104,808,401,768,053,340,000,000,000,000,000,000,000 | 25 |
input: Decrement queue count on kbd delay
Delays in the input layer are special cased input events. Every input
event is accounted for in a global intput queue count. The special cased
delays however did not get removed from the queue, leading to queue overruns
and thus silent key drops after typing quite a few characters.
Signed-off-by: Alexander Graf <[email protected]>
Message-id: [email protected]
Fixes: be1a7176 ("input: add support for kbd delays")
Cc: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
xmlBufferEmpty(xmlBufferPtr buf) {
if (buf == NULL) return;
if (buf->content == NULL) return;
buf->use = 0;
if (buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) {
buf->content = BAD_CAST "";
} else if ((buf->alloc == XML_BUFFER_ALLOC_IO) &&
(buf->contentIO != NULL)) {
size_t start_buf = buf->content - buf->contentIO;
buf->size += start_buf;
buf->content = buf->contentIO;
buf->content[0] = 0;
} else {
buf->content[0] = 0;
}
}
| 0 |
[
"CWE-20"
] |
libxml2
|
bdd66182ef53fe1f7209ab6535fda56366bd7ac9
| 295,507,512,595,511,280,000,000,000,000,000,000,000 | 17 |
Avoid building recursive entities
For https://bugzilla.gnome.org/show_bug.cgi?id=762100
When we detect a recusive entity we should really not
build the associated data, moreover if someone bypass
libxml2 fatal errors and still tries to serialize a broken
entity make sure we don't risk to get ito a recursion
* parser.c: xmlParserEntityCheck() don't build if entity loop
were found and remove the associated text content
* tree.c: xmlStringGetNodeList() avoid a potential recursion
|
TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) {
setup(false, "envoy-custom-server", false);
// Store the basic request encoder during filter chain setup.
std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());
EXPECT_CALL(*filter, decodeHeaders(_, true))
.Times(2)
.WillRepeatedly(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus {
EXPECT_NE(nullptr, headers.ForwardedFor());
EXPECT_EQ("http", headers.getForwardedProtoValue());
if (headers.Path()->value() == "/healthcheck") {
filter->callbacks_->streamInfo().healthCheck(true);
}
return FilterHeadersStatus::StopIteration;
}));
EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)).Times(2);
EXPECT_CALL(filter_factory_, createFilterChain(_))
.Times(2)
.WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {
callbacks.addStreamDecoderFilter(filter);
}));
EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(2);
// When dispatch is called on the codec, we pretend to get a new stream and then fire a headers
// only request into it. Then we respond into the filter.
NiceMock<MockResponseEncoder> encoder;
EXPECT_CALL(*codec_, dispatch(_))
.Times(2)
.WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {
RequestDecoder* decoder = &conn_manager_->newStream(encoder);
// Test not charging stats on the second call.
if (data.length() == 4) {
RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{
{":authority", "host"}, {":path", "/"}, {":method", "GET"}}};
decoder->decodeHeaders(std::move(headers), true);
} else {
RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{
{":authority", "host"}, {":path", "/healthcheck"}, {":method", "GET"}}};
decoder->decodeHeaders(std::move(headers), true);
}
ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}};
filter->callbacks_->encodeHeaders(std::move(response_headers), true);
// Drain 2 so that on the 2nd iteration we will hit zero.
data.drain(2);
return Http::okStatus();
}));
// Kick off the incoming data. Use extra data which should cause a redispatch.
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false);
EXPECT_EQ(1U, stats_.named_.downstream_rq_2xx_.value());
EXPECT_EQ(1U, listener_stats_.downstream_rq_2xx_.value());
EXPECT_EQ(1U, stats_.named_.downstream_rq_completed_.value());
EXPECT_EQ(1U, listener_stats_.downstream_rq_completed_.value());
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 294,983,977,043,730,100,000,000,000,000,000,000,000 | 64 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
unsigned int get_num_deleted() const {
return num_deleted;
}
| 0 |
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
| 86,589,332,993,444,280,000,000,000,000,000,000,000 | 3 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
|
static NTSTATUS smb2cli_inbuf_parse_compound(struct smbXcli_conn *conn,
uint8_t *buf,
size_t buflen,
TALLOC_CTX *mem_ctx,
struct iovec **piov, int *pnum_iov)
{
struct iovec *iov;
int num_iov = 0;
size_t taken = 0;
uint8_t *first_hdr = buf;
size_t verified_buflen = 0;
uint8_t *tf = NULL;
size_t tf_len = 0;
iov = talloc_array(mem_ctx, struct iovec, num_iov);
if (iov == NULL) {
return NT_STATUS_NO_MEMORY;
}
while (taken < buflen) {
size_t len = buflen - taken;
uint8_t *hdr = first_hdr + taken;
struct iovec *cur;
size_t full_size;
size_t next_command_ofs;
uint16_t body_size;
struct iovec *iov_tmp;
if (verified_buflen > taken) {
len = verified_buflen - taken;
} else {
tf = NULL;
tf_len = 0;
}
if (len < 4) {
DEBUG(10, ("%d bytes left, expected at least %d\n",
(int)len, 4));
goto inval;
}
if (IVAL(hdr, 0) == SMB2_TF_MAGIC) {
struct smbXcli_session *s;
uint64_t uid;
struct iovec tf_iov[2];
size_t enc_len;
NTSTATUS status;
if (len < SMB2_TF_HDR_SIZE) {
DEBUG(10, ("%d bytes left, expected at least %d\n",
(int)len, SMB2_TF_HDR_SIZE));
goto inval;
}
tf = hdr;
tf_len = SMB2_TF_HDR_SIZE;
taken += tf_len;
hdr = first_hdr + taken;
enc_len = IVAL(tf, SMB2_TF_MSG_SIZE);
uid = BVAL(tf, SMB2_TF_SESSION_ID);
if (len < SMB2_TF_HDR_SIZE + enc_len) {
DEBUG(10, ("%d bytes left, expected at least %d\n",
(int)len,
(int)(SMB2_TF_HDR_SIZE + enc_len)));
goto inval;
}
s = smbXcli_session_by_uid(conn, uid);
if (s == NULL) {
DEBUG(10, ("unknown session_id %llu\n",
(unsigned long long)uid));
goto inval;
}
tf_iov[0].iov_base = (void *)tf;
tf_iov[0].iov_len = tf_len;
tf_iov[1].iov_base = (void *)hdr;
tf_iov[1].iov_len = enc_len;
status = smb2_signing_decrypt_pdu(s->smb2->decryption_key,
conn->smb2.server.cipher,
tf_iov, 2);
if (!NT_STATUS_IS_OK(status)) {
TALLOC_FREE(iov);
return status;
}
verified_buflen = taken + enc_len;
len = enc_len;
}
/*
* We need the header plus the body length field
*/
if (len < SMB2_HDR_BODY + 2) {
DEBUG(10, ("%d bytes left, expected at least %d\n",
(int)len, SMB2_HDR_BODY));
goto inval;
}
if (IVAL(hdr, 0) != SMB2_MAGIC) {
DEBUG(10, ("Got non-SMB2 PDU: %x\n",
IVAL(hdr, 0)));
goto inval;
}
if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
DEBUG(10, ("Got HDR len %d, expected %d\n",
SVAL(hdr, 4), SMB2_HDR_BODY));
goto inval;
}
full_size = len;
next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
body_size = SVAL(hdr, SMB2_HDR_BODY);
if (next_command_ofs != 0) {
if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
goto inval;
}
if (next_command_ofs > full_size) {
goto inval;
}
full_size = next_command_ofs;
}
if (body_size < 2) {
goto inval;
}
body_size &= 0xfffe;
if (body_size > (full_size - SMB2_HDR_BODY)) {
goto inval;
}
iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
num_iov + 4);
if (iov_tmp == NULL) {
TALLOC_FREE(iov);
return NT_STATUS_NO_MEMORY;
}
iov = iov_tmp;
cur = &iov[num_iov];
num_iov += 4;
cur[0].iov_base = tf;
cur[0].iov_len = tf_len;
cur[1].iov_base = hdr;
cur[1].iov_len = SMB2_HDR_BODY;
cur[2].iov_base = hdr + SMB2_HDR_BODY;
cur[2].iov_len = body_size;
cur[3].iov_base = hdr + SMB2_HDR_BODY + body_size;
cur[3].iov_len = full_size - (SMB2_HDR_BODY + body_size);
taken += full_size;
}
*piov = iov;
*pnum_iov = num_iov;
return NT_STATUS_OK;
inval:
TALLOC_FREE(iov);
return NT_STATUS_INVALID_NETWORK_RESPONSE;
}
| 0 |
[
"CWE-94"
] |
samba
|
46b5e4aca6adb12a27efaad3bfe66c2d8a82ec95
| 160,109,791,851,965,040,000,000,000,000,000,000,000 | 163 |
CVE-2016-2019: libcli/smb: don't allow guest sessions if we require signing
Note real anonymous sessions (with "" as username) don't hit this
as we don't even call smb2cli_session_set_session_key() in that case.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11860
Signed-off-by: Stefan Metzmacher <[email protected]>
|
int rtnl_talk_iov(struct rtnl_handle *rtnl, struct iovec *iovec, size_t iovlen,
struct nlmsghdr **answer)
{
return __rtnl_talk_iov(rtnl, iovec, iovlen, answer, true, NULL);
}
| 0 |
[] |
iproute2
|
8c50b728b226f6254251282697ce38a72639a6fc
| 75,126,113,650,359,110,000,000,000,000,000,000,000 | 5 |
libnetlink: fix use-after-free of message buf
In __rtnl_talk_iov() main loop, err is a pointer to memory in dynamically
allocated 'buf' that is used to store netlink messages. If netlink message
is an error message, buf is deallocated before returning with error code.
However, on return err->error code is checked one more time to generate
return value, after memory which err points to has already been
freed. Save error code in temporary variable and use the variable to
generate return value.
Fixes: c60389e4f9ea ("libnetlink: fix leak and using unused memory on error")
Signed-off-by: Vlad Buslov <[email protected]>
Signed-off-by: Stephen Hemminger <[email protected]>
|
static int cert_self_signed(X509 *x)
{
if (X509_check_purpose(x, -1, 0) != 1)
return 0;
if (x->ex_flags & EXFLAG_SS)
return 1;
else
return 0;
}
| 0 |
[
"CWE-295"
] |
openssl
|
2a40b7bc7b94dd7de897a74571e7024f0cf0d63b
| 272,548,707,143,336,700,000,000,000,000,000,000,000 | 9 |
check_chain_extensions: Do not override error return value by check_curve
The X509_V_FLAG_X509_STRICT flag enables additional security checks of the
certificates present in a certificate chain. It is not set by default.
Starting from OpenSSL version 1.1.1h a check to disallow certificates with
explicitly encoded elliptic curve parameters in the chain was added to the
strict checks.
An error in the implementation of this check meant that the result of a
previous check to confirm that certificates in the chain are valid CA
certificates was overwritten. This effectively bypasses the check
that non-CA certificates must not be able to issue other certificates.
If a "purpose" has been configured then a subsequent check that the
certificate is consistent with that purpose also checks that it is a
valid CA. Therefore where a purpose is set the certificate chain will
still be rejected even when the strict flag has been used. A purpose is
set by default in libssl client and server certificate verification
routines, but it can be overriden by an application.
Affected applications explicitly set the X509_V_FLAG_X509_STRICT
verification flag and either do not set a purpose for the certificate
verification or, in the case of TLS client or server applications,
override the default purpose to make it not set.
CVE-2021-3450
Reviewed-by: Matt Caswell <[email protected]>
Reviewed-by: Paul Dale <[email protected]>
|
static size_t rtnl_link_get_size(const struct net_device *dev)
{
const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
size_t size;
if (!ops)
return 0;
size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
if (ops->get_size)
/* IFLA_INFO_DATA + nested data */
size += nla_total_size(sizeof(struct nlattr)) +
ops->get_size(dev);
if (ops->get_xstats_size)
/* IFLA_INFO_XSTATS */
size += nla_total_size(ops->get_xstats_size(dev));
return size;
}
| 0 |
[
"CWE-399"
] |
linux-2.6
|
84d73cd3fb142bf1298a8c13fd4ca50fd2432372
| 280,371,897,299,366,560,000,000,000,000,000,000,000 | 22 |
rtnl: fix info leak on RTM_GETLINK request for VF devices
Initialize the mac address buffer with 0 as the driver specific function
will probably not fill the whole buffer. In fact, all in-kernel drivers
fill only ETH_ALEN of the MAX_ADDR_LEN bytes, i.e. 6 of the 32 possible
bytes. Therefore we currently leak 26 bytes of stack memory to userland
via the netlink interface.
Signed-off-by: Mathias Krause <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
uint32_t init_tag_versions(ctx_t &ctx)
{
dfa_t &dfa = ctx.dfa;
const size_t ntags = dfa.tags.size();
// all-zero tag configuration must have static number zero
ctx.dc_tagvertbl.insert_const(TAGVER_ZERO);
DASSERT(ZERO_TAGS == ctx.dc_tagvertbl.insert_const(TAGVER_ZERO));
// initial tag versions: [1 .. N]
const tcid_t INITIAL_TAGS = ctx.dc_tagvertbl.insert_succ(1);
// other versions: [ .. -(N + 1)] and [N + 1 .. ]
dfa.maxtagver = static_cast<tagver_t>(ntags);
// final/fallback versions will be assigned on the go
dfa.finvers = new tagver_t[ntags];
for (size_t i = 0; i < ntags; ++i) {
dfa.finvers[i] = fixed(dfa.tags[i]) ? TAGVER_ZERO : ++dfa.maxtagver;
}
// mark tags with history (initial and final)
for (size_t i = 0; i < ntags; ++i) {
if (history(dfa.tags[i])) {
tagver_t v = static_cast<tagver_t>(i) + 1, f = dfa.finvers[i];
if (f != TAGVER_ZERO) {
dfa.mtagvers.insert(f);
}
dfa.mtagvers.insert(v);
}
}
return INITIAL_TAGS;
}
| 0 |
[
"CWE-787"
] |
re2c
|
a3473fd7be829cb33907cb08612f955133c70a96
| 157,140,707,150,432,210,000,000,000,000,000,000,000 | 34 |
Limit maximum allowed NFA and DFA size.
Instead of failing with an out of memory exception or crashing with a
stack overflow, emit an error message and exit. This is a partial fix
for bug #394 "Stack overflow due to recursion in src/dfa/dead_rules.cc",
where re2c hit stack overflow on a counted repetition regexp with high
upper bound.
The patch adds the following limits:
1. the number of NFA states
2. NFA depth (maximum length of a non-looping path from start to end)
3. the number of DFA states
3. total DFA size (sum total of all NFA substates in all DFA states)
There are tests for the first three limits, but not for the DFA size as
all examples that trigger this behavior take a long time to finish (a
few seconds), which increases test run time almost twice.
|
rdpsnd_check_fds(fd_set * rfds, fd_set * wfds)
{
rdpsnd_queue_complete_pending();
if (device_open || rec_device_open)
current_driver->check_fds(rfds, wfds);
}
| 0 |
[
"CWE-787"
] |
rdesktop
|
766ebcf6f23ccfe8323ac10242ae6e127d4505d2
| 30,282,270,956,982,267,000,000,000,000,000,000,000 | 7 |
Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182
|
static void tpacket_destruct_skb(struct sk_buff *skb)
{
struct packet_sock *po = pkt_sk(skb->sk);
if (likely(po->tx_ring.pg_vec)) {
void *ph;
__u32 ts;
ph = skb_shinfo(skb)->destructor_arg;
packet_dec_pending(&po->tx_ring);
ts = __packet_set_timestamp(po, ph, skb);
__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
}
sock_wfree(skb);
}
| 0 |
[
"CWE-416",
"CWE-362"
] |
linux
|
84ac7260236a49c79eede91617700174c2c19b0c
| 298,033,049,082,337,050,000,000,000,000,000,000,000 | 17 |
packet: fix race condition in packet_set_ring
When packet_set_ring creates a ring buffer it will initialize a
struct timer_list if the packet version is TPACKET_V3. This value
can then be raced by a different thread calling setsockopt to
set the version to TPACKET_V1 before packet_set_ring has finished.
This leads to a use-after-free on a function pointer in the
struct timer_list when the socket is closed as the previously
initialized timer will not be deleted.
The bug is fixed by taking lock_sock(sk) in packet_setsockopt when
changing the packet version while also taking the lock at the start
of packet_set_ring.
Fixes: f6fb8f100b80 ("af-packet: TPACKET_V3 flexible buffer implementation.")
Signed-off-by: Philip Pettersson <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_input *p = arg;
/*
* We set the flags for CAP_DV_TIMINGS &
* CAP_STD here based on ioctl handler provided by the
* driver. If the driver doesn't support these
* for a specific input, it must override these flags.
*/
if (is_valid_ioctl(vfd, VIDIOC_S_STD))
p->capabilities |= V4L2_IN_CAP_STD;
if (vfd->device_caps & V4L2_CAP_IO_MC) {
if (p->index)
return -EINVAL;
strscpy(p->name, vfd->name, sizeof(p->name));
p->type = V4L2_INPUT_TYPE_CAMERA;
return 0;
}
return ops->vidioc_enum_input(file, fh, p);
}
| 0 |
[
"CWE-401"
] |
linux
|
fb18802a338b36f675a388fc03d2aa504a0d0899
| 108,360,507,787,809,400,000,000,000,000,000,000,000 | 25 |
media: v4l: ioctl: Fix memory leak in video_usercopy
When an IOCTL with argument size larger than 128 that also used array
arguments were handled, two memory allocations were made but alas, only
the latter one of them was released. This happened because there was only
a single local variable to hold such a temporary allocation.
Fix this by adding separate variables to hold the pointers to the
temporary allocations.
Reported-by: Arnd Bergmann <[email protected]>
Reported-by: [email protected]
Fixes: d14e6d76ebf7 ("[media] v4l: Add multi-planar ioctl handling code")
Cc: [email protected]
Signed-off-by: Sakari Ailus <[email protected]>
Acked-by: Arnd Bergmann <[email protected]>
Acked-by: Hans Verkuil <[email protected]>
Reviewed-by: Laurent Pinchart <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
static int ZEND_FASTCALL ZEND_ECHO_SPEC_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zval z_copy;
zval *z = &opline->op1.u.constant;
if (IS_CONST != IS_CONST &&
Z_TYPE_P(z) == IS_OBJECT && Z_OBJ_HT_P(z)->get_method != NULL &&
zend_std_cast_object_tostring(z, &z_copy, IS_STRING TSRMLS_CC) == SUCCESS) {
zend_print_variable(&z_copy);
zval_dtor(&z_copy);
} else {
zend_print_variable(z);
}
ZEND_VM_NEXT_OPCODE();
}
| 0 |
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
| 276,644,987,248,557,300,000,000,000,000,000,000,000 | 18 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
|
R_API char *r_anal_function_format_sig(R_NONNULL RAnal *anal, R_NONNULL RAnalFunction *fcn, R_NULLABLE char *fcn_name,
R_NULLABLE RAnalFcnVarsCache *reuse_cache, R_NULLABLE const char *fcn_name_pre, R_NULLABLE const char *fcn_name_post) {
RAnalFcnVarsCache *cache = NULL;
if (!fcn_name) {
fcn_name = fcn->name;
if (!fcn_name) {
return NULL;
}
}
RStrBuf *buf = r_strbuf_new (NULL);
if (!buf) {
return NULL;
}
Sdb *TDB = anal->sdb_types;
char *type_fcn_name = r_type_func_guess (TDB, fcn_name);
if (type_fcn_name && r_type_func_exist (TDB, type_fcn_name)) {
const char *fcn_type = r_type_func_ret (anal->sdb_types, type_fcn_name);
if (R_STR_ISNOTEMPTY (fcn_type)) {
const char *sp = " ";
if (*fcn_type && (fcn_type[strlen (fcn_type) - 1] == '*')) {
sp = "";
}
r_strbuf_appendf (buf, "%s%s", fcn_type, sp);
}
}
if (fcn_name_pre) {
r_strbuf_append (buf, fcn_name_pre);
}
r_strbuf_append (buf, fcn_name);
if (fcn_name_post) {
r_strbuf_append (buf, fcn_name_post);
}
r_strbuf_append (buf, " (");
if (type_fcn_name && r_type_func_exist (TDB, type_fcn_name)) {
int i, argc = r_type_func_args_count (TDB, type_fcn_name);
bool comma = true;
// This avoids false positives present in argument recovery
// and straight away print arguments fetched from types db
for (i = 0; i < argc; i++) {
char *type = r_type_func_args_type (TDB, type_fcn_name, i);
const char *name = r_type_func_args_name (TDB, type_fcn_name, i);
if (!type || !*type || !name) {
eprintf ("Missing type for %s\n", type_fcn_name);
goto beach;
}
if (i == argc - 1) {
comma = false;
}
size_t len = strlen (type);
const char *tc = len > 0 && type[len - 1] == '*'? "": " ";
r_strbuf_appendf (buf, "%s%s%s%s", type, tc, name, comma? ", ": "");
free (type);
}
goto beach;
}
R_FREE (type_fcn_name);
cache = reuse_cache;
if (!cache) {
cache = R_NEW0 (RAnalFcnVarsCache);
if (!cache) {
type_fcn_name = NULL;
goto beach;
}
r_anal_function_vars_cache_init (anal, cache, fcn);
}
bool comma = true;
bool arg_bp = false;
size_t tmp_len;
RAnalVar *var;
RListIter *iter;
r_list_foreach (cache->rvars, iter, var) {
// assume self, error are always the last
if (!strcmp (var->name, "self") || !strcmp (var->name, "error")) {
r_strbuf_slice (buf, 0, r_strbuf_length (buf) - 2);
break;
}
tmp_len = strlen (var->type);
if (tmp_len > 0) {
r_strbuf_appendf (buf, "%s%s%s%s", var->type,
tmp_len && var->type[tmp_len - 1] == '*' ? "" : " ",
var->name, iter->n ? ", " : "");
}
}
r_list_foreach (cache->bvars, iter, var) {
if (var->isarg) {
if (!r_list_empty (cache->rvars) && comma) {
r_strbuf_append (buf, ", ");
comma = false;
}
arg_bp = true;
tmp_len = strlen (var->type);
if (tmp_len > 0) {
r_strbuf_appendf (buf, "%s%s%s%s", var->type,
tmp_len && var->type[tmp_len - 1] =='*' ? "" : " ",
var->name, iter->n ? ", " : "");
}
}
}
comma = true;
const char *maybe_comma = ", ";
r_list_foreach (cache->svars, iter, var) {
if (var->isarg) {
if (!*maybe_comma || ((arg_bp || !r_list_empty (cache->rvars)) && comma)) {
comma = false;
r_strbuf_append (buf, ", ");
}
tmp_len = strlen (var->type);
if (iter->n && ((RAnalVar *)iter->n->data)->isarg) {
maybe_comma = ", ";
} else {
maybe_comma = "";
}
if (tmp_len > 0) {
r_strbuf_appendf (buf, "%s%s%s%s", var->type,
tmp_len && var->type[tmp_len - 1] == '*'? "": " ",
var->name, maybe_comma);
}
}
}
beach:
r_strbuf_append (buf, ");");
R_FREE (type_fcn_name);
if (!reuse_cache) {
// !reuse_cache => we created our own cache
r_anal_function_vars_cache_fini (cache);
free (cache);
}
return r_strbuf_drain (buf);
}
| 0 |
[
"CWE-416"
] |
radare2
|
a7ce29647fcb38386d7439696375e16e093d6acb
| 258,052,245,706,640,800,000,000,000,000,000,000,000 | 141 |
Fix UAF in aaaa on arm/thumb switching ##crash
* Reported by @peacock-doris via huntr.dev
* Reproducer tests_65185
* This is a logic fix, but not the fully safe as changes in the code
can result on UAF again, to properly protect r2 from crashing we
need to break the ABI and add refcounting to RRegItem, which can't
happen in 5.6.x because of abi-compat rules
|
add_vavailability(struct vavailability_array *vavail, icalcomponent *ical)
{
icaltimezone *utc = icaltimezone_get_utc_timezone();
struct vavailability *newav;
icalcomponent *vav;
icalproperty *prop;
/* Grow the array, if necessary */
if (vavail->len == vavail->alloc) {
vavail->alloc += 10; /* XXX arbitrary */
vavail->vav = xrealloc(vavail->vav,
vavail->alloc * sizeof(struct vavailability));
}
/* Add new vavailability */
newav = &vavail->vav[vavail->len++];
newav->ical = ical;
vav = icalcomponent_get_first_real_component(ical);
/* Set period */
newav->per.start = icalcomponent_get_dtstart(vav);
if (icaltime_is_null_time(newav->per.start))
newav->per.start = icaltime_from_timet_with_zone(caldav_epoch, 0, utc);
else
newav->per.start = icaltime_convert_to_zone(newav->per.start, utc),
newav->per.end = icalcomponent_get_dtend(vav);
if (icaltime_is_null_time(newav->per.end))
newav->per.end = icaltime_from_timet_with_zone(caldav_eternity, 0, utc);
else
newav->per.end = icaltime_convert_to_zone(newav->per.end, utc),
newav->per.duration = icaldurationtype_null_duration();
/* Set PRIORITY - 0 (or none) has lower priority than 9 */
prop = icalcomponent_get_first_property(vav, ICAL_PRIORITY_PROPERTY);
if (prop) newav->priority = icalproperty_get_priority(prop);
if (!prop || !newav->priority) newav->priority = 10;
}
| 0 |
[
"CWE-787"
] |
cyrus-imapd
|
a5779db8163b99463e25e7c476f9cbba438b65f3
| 89,097,959,887,342,980,000,000,000,000,000,000,000 | 38 |
HTTP: don't overrun buffer when parsing strings with sscanf()
|
void ida_remove(struct ida *ida, int id)
{
struct idr_layer *p = ida->idr.top;
int shift = (ida->idr.layers - 1) * IDR_BITS;
int idr_id = id / IDA_BITMAP_BITS;
int offset = id % IDA_BITMAP_BITS;
int n;
struct ida_bitmap *bitmap;
/* clear full bits while looking up the leaf idr_layer */
while ((shift > 0) && p) {
n = (idr_id >> shift) & IDR_MASK;
__clear_bit(n, &p->bitmap);
p = p->ary[n];
shift -= IDR_BITS;
}
if (p == NULL)
goto err;
n = idr_id & IDR_MASK;
__clear_bit(n, &p->bitmap);
bitmap = (void *)p->ary[n];
if (!test_bit(offset, bitmap->bitmap))
goto err;
/* update bitmap and remove it if empty */
__clear_bit(offset, bitmap->bitmap);
if (--bitmap->nr_busy == 0) {
__set_bit(n, &p->bitmap); /* to please idr_remove() */
idr_remove(&ida->idr, idr_id);
free_bitmap(ida, bitmap);
}
return;
err:
printk(KERN_WARNING
"ida_remove called for id=%d which is not allocated.\n", id);
}
| 0 |
[] |
linux
|
2dcb22b346be7b7b7e630a8970d69cf3f1111ec1
| 101,828,800,540,093,500,000,000,000,000,000,000,000 | 41 |
idr: fix backtrack logic in idr_remove_all
Currently idr_remove_all will fail with a use after free error if
idr::layers is bigger than 2, which on 32 bit systems corresponds to items
more than 1024. This is due to stepping back too many levels during
backtracking. For simplicity let's assume that IDR_BITS=1 -> we have 2
nodes at each level below the root node and each leaf node stores two IDs.
(In reality for 32 bit systems IDR_BITS=5, with 32 nodes at each sub-root
level and 32 IDs in each leaf node). The sequence of freeing the nodes at
the moment is as follows:
layer
1 -> a(7)
2 -> b(3) c(5)
3 -> d(1) e(2) f(4) g(6)
Until step 4 things go fine, but then node c is freed, whereas node g
should be freed first. Since node c contains the pointer to node g we'll
have a use after free error at step 6.
How many levels we step back after visiting the leaf nodes is currently
determined by the msb of the id we are currently visiting:
Step
1. node d with IDs 0,1 is freed, current ID is advanced to 2.
msb of the current ID bit 1. This means we need to step back
1 level to node b and take the next sibling, node e.
2-3. node e with IDs 2,3 is freed, current ID is 4, msb is bit 2.
This means we need to step back 2 levels to node a, freeing
node b on the way.
4-5. node f with IDs 4,5 is freed, current ID is 6, msb is still
bit 2. This means we again need to step back 2 levels to node
a and free c on the way.
6. We should visit node g, but its pointer is not available as
node c was freed.
The fix changes how we determine the number of levels to step back.
Instead of deducting this merely from the msb of the current ID, we should
really check if advancing the ID causes an overflow to a bit position
corresponding to a given layer. In the above example overflow from bit 0
to bit 1 should mean stepping back 1 level. Overflow from bit 1 to bit 2
should mean stepping back 2 levels and so on.
The fix was tested with IDs up to 1 << 20, which corresponds to 4 layers
on 32 bit systems.
Signed-off-by: Imre Deak <[email protected]>
Reviewed-by: Tejun Heo <[email protected]>
Cc: Eric Paris <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: <[email protected]> [2.6.34.1]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void compat_release_buffer(void *opaque, uint8_t *data)
{
AVBufferRef *buf = opaque;
av_buffer_unref(&buf);
}
| 0 |
[
"CWE-703"
] |
FFmpeg
|
e5c7229999182ad1cef13b9eca050dba7a5a08da
| 302,439,232,975,321,340,000,000,000,000,000,000,000 | 5 |
avcodec/utils: set AVFrame format unconditional
Fixes inconsistency and out of array accesses
Fixes: 10cdd7e63e7f66e3e66273939e0863dd-asan_heap-oob_1a4ff32_7078_cov_4056274555_mov_h264_aac__mp4box_frag.mp4
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <[email protected]>
|
void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen) {
if (redisAppendCommandArgv(c,argc,argv,argvlen) != REDIS_OK)
return NULL;
return __redisBlockForReply(c);
}
| 0 |
[
"CWE-190",
"CWE-680"
] |
redis
|
0215324a66af949be39b34be2d55143232c1cb71
| 277,627,733,630,822,280,000,000,000,000,000,000,000 | 5 |
Fix redis-cli / redis-sential overflow on some platforms (CVE-2021-32762) (#9587)
The redis-cli command line tool and redis-sentinel service may be vulnerable
to integer overflow when parsing specially crafted large multi-bulk network
replies. This is a result of a vulnerability in the underlying hiredis
library which does not perform an overflow check before calling the calloc()
heap allocation function.
This issue only impacts systems with heap allocators that do not perform their
own overflow checks. Most modern systems do and are therefore not likely to
be affected. Furthermore, by default redis-sentinel uses the jemalloc allocator
which is also not vulnerable.
Co-authored-by: Yossi Gottlieb <[email protected]>
|
void disablesignals(void)
{
sigset_t sigs;
sigfillset(&sigs);
if (sigprocmask(SIG_BLOCK, &sigs, &old_sigmask) < 0) {
_EXIT(EXIT_FAILURE);
}
}
| 0 |
[
"CWE-434"
] |
pure-ftpd
|
37ad222868e52271905b94afea4fc780d83294b4
| 171,716,991,401,972,300,000,000,000,000,000,000,000 | 9 |
Initialize the max upload file size when quotas are enabled
Due to an unwanted check, files causing the quota to be exceeded
were deleted after the upload, but not during the upload.
The bug was introduced in 2009 in version 1.0.23
Spotted by @DroidTest, thanks!
|
static int vmw_gb_surface_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBindGBSurface body;
} *cmd1;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdUpdateGBSurface body;
} *cmd2;
uint32_t submit_size;
struct ttm_buffer_object *bo = val_buf->bo;
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
if (unlikely(!cmd1)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"binding.\n");
return -ENOMEM;
}
cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
cmd1->body.mobid = bo->mem.start;
if (res->backup_dirty) {
cmd2 = (void *) &cmd1[1];
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.sid = res->id;
res->backup_dirty = false;
}
vmw_fifo_commit(dev_priv, submit_size);
return 0;
}
| 0 |
[
"CWE-20"
] |
linux
|
ee9c4e681ec4f58e42a83cb0c22a0289ade1aacf
| 252,781,625,568,046,300,000,000,000,000,000,000,000 | 41 |
drm/vmwgfx: limit the number of mip levels in vmw_gb_surface_define_ioctl()
The 'req->mip_levels' parameter in vmw_gb_surface_define_ioctl() is
a user-controlled 'uint32_t' value which is used as a loop count limit.
This can lead to a kernel lockup and DoS. Add check for 'req->mip_levels'.
References:
https://bugzilla.redhat.com/show_bug.cgi?id=1437431
Cc: <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Reviewed-by: Sinclair Yeh <[email protected]>
|
onig_is_code_in_cc(OnigEncoding enc, OnigCodePoint code, CClassNode* cc)
{
int len;
if (ONIGENC_MBC_MINLEN(enc) > 1) {
len = 2;
}
else {
len = ONIGENC_CODE_TO_MBCLEN(enc, code);
}
return onig_is_code_in_cc_len(len, code, cc);
}
| 0 |
[
"CWE-125"
] |
php-src
|
c6e34d91b88638966662caac62c4d0e90538e317
| 307,351,829,698,896,100,000,000,000,000,000,000,000 | 12 |
Fix bug #77371 (heap buffer overflow in mb regex functions - compile_string_node)
|
authorize_logger (const char *data)
{
g_message ("%s", data);
}
| 0 |
[
"CWE-1021"
] |
cockpit
|
46f6839d1af4e662648a85f3e54bba2d57f39f0e
| 69,024,439,640,430,290,000,000,000,000,000,000,000 | 4 |
ws: Restrict our cookie to the login host only
Mark our cookie as `SameSite: Strict` [1]. The current `None` default
will soon be moved to `Lax` by Firefox and Chromium, and recent versions
started to throw a warning about it.
[1] https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite
https://bugzilla.redhat.com/show_bug.cgi?id=1891944
|
bool JSObject::isSealed(PseudoHandle<JSObject> self, Runtime *runtime) {
if (self->flags_.sealed)
return true;
if (!self->flags_.noExtend)
return false;
auto selfHandle = runtime->makeHandle(std::move(self));
if (!HiddenClass::areAllNonConfigurable(
runtime->makeHandle(selfHandle->clazz_), runtime)) {
return false;
}
if (!checkAllOwnIndexed(
*selfHandle,
runtime,
ObjectVTable::CheckAllOwnIndexedMode::NonConfigurable)) {
return false;
}
// Now that we know we are sealed, set the flag.
selfHandle->flags_.sealed = true;
return true;
}
| 0 |
[
"CWE-843",
"CWE-125"
] |
hermes
|
fe52854cdf6725c2eaa9e125995da76e6ceb27da
| 177,137,460,488,927,600,000,000,000,000,000,000,000 | 24 |
[CVE-2020-1911] Look up HostObject computed properties on the right object in the prototype chain.
Summary:
The change in the hermes repository fixes the security vulnerability
CVE-2020-1911. This vulnerability only affects applications which
allow evaluation of uncontrolled, untrusted JavaScript code not
shipped with the app, so React Native apps will generally not be affected.
This revision includes a test for the bug. The test is generic JSI
code, so it is included in the hermes and react-native repositories.
Changelog: [Internal]
Reviewed By: tmikov
Differential Revision: D23322992
fbshipit-source-id: 4e88c974afe1ad33a263f9cac03e9dc98d33649a
|
u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr)
{
struct usb_device *udev = priv->udev;
int len;
u8 data;
mutex_lock(&priv->usb_buf_mutex);
len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
addr, 0, &priv->usb_buf.val8, sizeof(u8),
RTW_USB_CONTROL_MSG_TIMEOUT);
data = priv->usb_buf.val8;
mutex_unlock(&priv->usb_buf_mutex);
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
dev_info(&udev->dev, "%s(%04x) = 0x%02x, len %i\n",
__func__, addr, data, len);
return data;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
a2cdd07488e666aa93a49a3fc9c9b1299e27ef3c
| 68,548,322,149,841,890,000,000,000,000,000,000,000 | 19 |
rtl8xxxu: prevent leaking urb
In rtl8xxxu_submit_int_urb if usb_submit_urb fails the allocated urb
should be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Chris Chiu <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
void AsyncConnection::_connect()
{
ldout(async_msgr->cct, 10) << __func__ << " csq=" << connect_seq << dendl;
state = STATE_CONNECTING;
// rescheduler connection in order to avoid lock dep
// may called by external thread(send_message)
center->dispatch_event_external(read_handler);
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 227,590,783,000,852,900,000,000,000,000,000,000,000 | 9 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
obj2ast_keyword(PyObject* obj, keyword_ty* out, PyArena* arena)
{
PyObject* tmp = NULL;
identifier arg;
expr_ty value;
if (_PyObject_LookupAttrId(obj, &PyId_arg, &tmp) < 0) {
return 1;
}
if (tmp == NULL || tmp == Py_None) {
Py_CLEAR(tmp);
arg = NULL;
}
else {
int res;
res = obj2ast_identifier(tmp, &arg, arena);
if (res != 0) goto failed;
Py_CLEAR(tmp);
}
if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) {
return 1;
}
if (tmp == NULL) {
PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from keyword");
return 1;
}
else {
int res;
res = obj2ast_expr(tmp, &value, arena);
if (res != 0) goto failed;
Py_CLEAR(tmp);
}
*out = keyword(arg, value, arena);
return 0;
failed:
Py_XDECREF(tmp);
return 1;
}
| 0 |
[
"CWE-125"
] |
cpython
|
dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c
| 226,733,280,963,781,200,000,000,000,000,000,000,000 | 38 |
bpo-35766: Merge typed_ast back into CPython (GH-11645)
|
void fmtutil_handle_photoshop_rsrc2(deark *c, dbuf *f, i64 pos, i64 len,
unsigned int flags, struct de_module_out_params *oparams)
{
int should_decode;
int should_extract;
int extract_fmt = 1; // 0=raw, 1=TIFF-wrapped
if(flags&0x1) {
should_decode = 0;
should_extract = 1;
}
else if(de_get_ext_option_bool(c, "extract8bim", 0)) {
should_extract = 1;
should_decode = 0;
if(flags&0x2) {
// Avoid "extracting" in a way that would just recreate the exact same file.
extract_fmt = 0;
}
}
else {
should_decode = 1;
should_extract = 0;
}
if(should_decode) {
de_module_params *mparams = NULL;
mparams = de_malloc(c, sizeof(de_module_params));
mparams->in_params.codes = "R";
if(oparams) {
// Since mparams->out_params is an embedded struct, not a pointer,
// we have to copy oparam's fields to and from it.
mparams->out_params = *oparams; // struct copy
}
de_run_module_by_id_on_slice(c, "psd", mparams, f, pos, len);
if(oparams) {
*oparams = mparams->out_params; // struct copy
}
de_free(c, mparams);
}
if(should_extract && extract_fmt==0) {
dbuf_create_file_from_slice(f, pos, len, "8bim", NULL, DE_CREATEFLAG_IS_AUX);
}
else if(should_extract && extract_fmt==1) {
wrap_in_tiff(c, f, pos, len, "Deark extracted 8BIM", 34377, "8bimtiff",
DE_CREATEFLAG_IS_AUX);
}
}
| 0 |
[
"CWE-369"
] |
deark
|
62acb7753b0e3c0d3ab3c15057b0a65222313334
| 158,286,223,869,772,980,000,000,000,000,000,000,000 | 49 |
pict,macrsrc: Fixed a bug that could cause division by 0
Found by F. Çelik.
|
xfs_bmse_merge(
struct xfs_inode *ip,
int whichfork,
xfs_fileoff_t shift, /* shift fsb */
struct xfs_iext_cursor *icur,
struct xfs_bmbt_irec *got, /* extent to shift */
struct xfs_bmbt_irec *left, /* preceding extent */
struct xfs_btree_cur *cur,
int *logflags, /* output */
struct xfs_defer_ops *dfops)
{
struct xfs_bmbt_irec new;
xfs_filblks_t blockcount;
int error, i;
struct xfs_mount *mp = ip->i_mount;
blockcount = left->br_blockcount + got->br_blockcount;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(xfs_bmse_can_merge(left, got, shift));
new = *left;
new.br_blockcount = blockcount;
/*
* Update the on-disk extent count, the btree if necessary and log the
* inode.
*/
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
*logflags |= XFS_ILOG_CORE;
if (!cur) {
*logflags |= XFS_ILOG_DEXT;
goto done;
}
/* lookup and remove the extent to merge */
error = xfs_bmbt_lookup_eq(cur, got, &i);
if (error)
return error;
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
error = xfs_btree_delete(cur, &i);
if (error)
return error;
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
/* lookup and update size of the previous extent */
error = xfs_bmbt_lookup_eq(cur, left, &i);
if (error)
return error;
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
error = xfs_bmbt_update(cur, &new);
if (error)
return error;
done:
xfs_iext_remove(ip, icur, 0);
xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
&new);
/* update reverse mapping. rmap functions merge the rmaps for us */
error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
if (error)
return error;
memcpy(&new, got, sizeof(new));
new.br_startoff = left->br_startoff + left->br_blockcount;
return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
}
| 0 |
[] |
linux
|
2c4306f719b083d17df2963bc761777576b8ad1b
| 140,273,186,542,841,610,000,000,000,000,000,000,000 | 72 |
xfs: set format back to extents if xfs_bmap_extents_to_btree
If xfs_bmap_extents_to_btree fails in a mode where we call
xfs_iroot_realloc(-1) to de-allocate the root, set the
format back to extents.
Otherwise we can assume we can dereference ifp->if_broot
based on the XFS_DINODE_FMT_BTREE format, and crash.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199423
Signed-off-by: Eric Sandeen <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
|
format_defaults_window(struct format_tree *ft, struct window *w)
{
ft->w = w;
format_add_tv(ft, "window_activity", &w->activity_time);
format_add(ft, "window_id", "@%u", w->id);
format_add(ft, "window_name", "%s", w->name);
format_add(ft, "window_width", "%u", w->sx);
format_add(ft, "window_height", "%u", w->sy);
format_add_cb(ft, "window_layout", format_cb_window_layout);
format_add_cb(ft, "window_visible_layout",
format_cb_window_visible_layout);
format_add(ft, "window_panes", "%u", window_count_panes(w));
format_add(ft, "window_zoomed_flag", "%d",
!!(w->flags & WINDOW_ZOOMED));
}
| 0 |
[] |
src
|
b32e1d34e10a0da806823f57f02a4ae6e93d756e
| 92,449,433,244,564,730,000,000,000,000,000,000,000 | 16 |
evbuffer_new and bufferevent_new can both fail (when malloc fails) and
return NULL. GitHub issue 1547.
|
static int handle_invvpid(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 vmx_instruction_info;
unsigned long type, types;
gva_t gva;
struct x86_exception e;
int vpid;
if (!(vmx->nested.nested_vmx_secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) ||
!(vmx->nested.nested_vmx_vpid_caps & VMX_VPID_INVVPID_BIT)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
if (!nested_vmx_check_permission(vcpu))
return 1;
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
if (!(types & (1UL << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return 1;
}
/* according to the intel vmx instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global)
*/
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmx_instruction_info, false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
sizeof(u32), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
switch (type) {
case VMX_VPID_EXTENT_ALL_CONTEXT:
if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return 1;
}
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
nested_vmx_succeed(vcpu);
break;
default:
/* Trap single context invalidation invvpid calls */
BUG_ON(1);
break;
}
skip_emulated_instruction(vcpu);
return 1;
}
| 0 |
[
"CWE-399"
] |
linux
|
54a20552e1eae07aa240fa370a0293e006b5faed
| 215,935,869,120,840,450,000,000,000,000,000,000,000 | 61 |
KVM: x86: work around infinite loop in microcode when #AC is delivered
It was found that a guest can DoS a host by triggering an infinite
stream of "alignment check" (#AC) exceptions. This causes the
microcode to enter an infinite loop where the core never receives
another interrupt. The host kernel panics pretty quickly due to the
effects (CVE-2015-5307).
Signed-off-by: Eric Northup <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
evdev_transform_relative(struct evdev_device *device,
struct device_coords *point)
{
struct matrix rel_matrix;
if (!device->abs.apply_calibration)
return;
matrix_to_relative(&rel_matrix, &device->abs.calibration);
matrix_mult_vec(&rel_matrix, &point->x, &point->y);
}
| 0 |
[
"CWE-134"
] |
libinput
|
a423d7d3269dc32a87384f79e29bb5ac021c83d1
| 94,556,493,048,325,970,000,000,000,000,000,000,000 | 11 |
evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <[email protected]>
|
static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
struct sk_buff *skb)
{
struct nfc_llcp_sock *llcp_sock;
struct sock *sk;
u8 dsap, ssap, ptype, ns, nr;
ptype = nfc_llcp_ptype(skb);
dsap = nfc_llcp_dsap(skb);
ssap = nfc_llcp_ssap(skb);
ns = nfc_llcp_ns(skb);
nr = nfc_llcp_nr(skb);
pr_debug("%d %d R %d S %d\n", dsap, ssap, nr, ns);
llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
if (llcp_sock == NULL) {
nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
return;
}
sk = &llcp_sock->sk;
lock_sock(sk);
if (sk->sk_state == LLCP_CLOSED) {
release_sock(sk);
nfc_llcp_sock_put(llcp_sock);
}
/* Pass the payload upstream */
if (ptype == LLCP_PDU_I) {
pr_debug("I frame, queueing on %p\n", &llcp_sock->sk);
if (ns == llcp_sock->recv_n)
llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16;
else
pr_err("Received out of sequence I PDU\n");
skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE);
if (!sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
/*
* I frames will be freed from the socket layer, so we
* need to keep them alive until someone receives them.
*/
skb_get(skb);
} else {
pr_err("Receive queue is full\n");
}
}
/* Remove skbs from the pending queue */
if (llcp_sock->send_ack_n != nr) {
struct sk_buff *s, *tmp;
u8 n;
llcp_sock->send_ack_n = nr;
/* Remove and free all skbs until ns == nr */
skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) {
n = nfc_llcp_ns(s);
skb_unlink(s, &llcp_sock->tx_pending_queue);
kfree_skb(s);
if (n == nr)
break;
}
/* Re-queue the remaining skbs for transmission */
skb_queue_reverse_walk_safe(&llcp_sock->tx_pending_queue,
s, tmp) {
skb_unlink(s, &llcp_sock->tx_pending_queue);
skb_queue_head(&local->tx_queue, s);
}
}
if (ptype == LLCP_PDU_RR)
llcp_sock->remote_ready = true;
else if (ptype == LLCP_PDU_RNR)
llcp_sock->remote_ready = false;
if (nfc_llcp_queue_i_frames(llcp_sock) == 0 && ptype == LLCP_PDU_I)
nfc_llcp_send_rr(llcp_sock);
release_sock(sk);
nfc_llcp_sock_put(llcp_sock);
}
| 0 |
[
"CWE-476"
] |
linux
|
58bdd544e2933a21a51eecf17c3f5f94038261b5
| 225,026,324,807,386,950,000,000,000,000,000,000,000 | 86 |
net: nfc: Fix NULL dereference on nfc_llcp_build_tlv fails
KASAN report this:
BUG: KASAN: null-ptr-deref in nfc_llcp_build_gb+0x37f/0x540 [nfc]
Read of size 3 at addr 0000000000000000 by task syz-executor.0/5401
CPU: 0 PID: 5401 Comm: syz-executor.0 Not tainted 5.0.0-rc7+ #45
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xfa/0x1ce lib/dump_stack.c:113
kasan_report+0x171/0x18d mm/kasan/report.c:321
memcpy+0x1f/0x50 mm/kasan/common.c:130
nfc_llcp_build_gb+0x37f/0x540 [nfc]
nfc_llcp_register_device+0x6eb/0xb50 [nfc]
nfc_register_device+0x50/0x1d0 [nfc]
nfcsim_device_new+0x394/0x67d [nfcsim]
? 0xffffffffc1080000
nfcsim_init+0x6b/0x1000 [nfcsim]
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f9cb79dcc58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000020000280 RDI: 0000000000000003
RBP: 00007f9cb79dcc70 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f9cb79dd6bc
R13: 00000000004bcefb R14: 00000000006f7030 R15: 0000000000000004
nfc_llcp_build_tlv will return NULL on fails, caller should check it,
otherwise will trigger a NULL dereference.
Reported-by: Hulk Robot <[email protected]>
Fixes: eda21f16a5ed ("NFC: Set MIU and RW values from CONNECT and CC LLCP frames")
Fixes: d646960f7986 ("NFC: Initial LLCP support")
Signed-off-by: YueHaibing <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
{
skb_queue_head_init(&req->cmd_q);
req->hdev = hdev;
req->err = 0;
}
| 0 |
[
"CWE-362"
] |
linux
|
e2cb6b891ad2b8caa9131e3be70f45243df82a80
| 301,447,510,789,294,220,000,000,000,000,000,000,000 | 6 |
bluetooth: eliminate the potential race condition when removing the HCI controller
There is a possible race condition vulnerability between issuing a HCI
command and removing the cont. Specifically, functions hci_req_sync()
and hci_dev_do_close() can race each other like below:
thread-A in hci_req_sync() | thread-B in hci_dev_do_close()
| hci_req_sync_lock(hdev);
test_bit(HCI_UP, &hdev->flags); |
... | test_and_clear_bit(HCI_UP, &hdev->flags)
hci_req_sync_lock(hdev); |
|
In this commit we alter the sequence in function hci_req_sync(). Hence,
the thread-A cannot issue th.
Signed-off-by: Lin Ma <[email protected]>
Cc: Marcel Holtmann <[email protected]>
Fixes: 7c6a329e4447 ("[Bluetooth] Fix regression from using default link policy")
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
{
ldout(store->ctx(), 20) << "in delete_chunk" << dendl;
for (auto path : paths) {
ldout(store->ctx(), 20) << "bulk deleting path: " << path << dendl;
delete_single(path);
}
return true;
}
| 0 |
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
| 177,476,449,017,614,400,000,000,000,000,000,000,000 | 10 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
|
R_API int r_flag_unset_name(RFlag *f, const char *name) {
RFlagItem *item = ht_find (f->ht_name, name, NULL);
return item && r_flag_unset (f, item);
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
radare2
|
52b1526443c1f433087928291d1c3d37a5600515
| 254,336,710,242,625,900,000,000,000,000,000,000,000 | 4 |
Fix crash in wasm disassembler
|
static USHORT DetermineQueueNumber(PARANDIS_ADAPTER *)
{
return 1;
}
| 0 |
[
"CWE-20"
] |
kvm-guest-drivers-windows
|
723416fa4210b7464b28eab89cc76252e6193ac1
| 100,311,529,371,312,060,000,000,000,000,000,000,000 | 4 |
NetKVM: BZ#1169718: Checking the length only on read
Signed-off-by: Joseph Hindin <[email protected]>
|
void sctp_transport_burst_reset(struct sctp_transport *t)
{
if (t->burst_limited) {
t->cwnd = t->burst_limited;
t->burst_limited = 0;
}
}
| 0 |
[] |
linux
|
196d67593439b03088913227093e374235596e33
| 314,999,080,444,024,500,000,000,000,000,000,000,000 | 7 |
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
ecma_save_literals_for_snapshot (ecma_collection_t *lit_pool_p, /**< list of known values */
uint32_t *buffer_p, /**< [out] output snapshot buffer */
size_t buffer_size, /**< size of the buffer */
size_t *in_out_buffer_offset_p, /**< [in,out] write position in the buffer */
lit_mem_to_snapshot_id_map_entry_t **out_map_p, /**< [out] map from literal identifiers
* to the literal offsets
* in snapshot */
uint32_t *out_map_len_p) /**< [out] number of literals */
{
if (lit_pool_p->item_count == 0)
{
*out_map_p = NULL;
*out_map_len_p = 0;
}
uint32_t lit_table_size = 0;
size_t max_lit_table_size = buffer_size - *in_out_buffer_offset_p;
if (max_lit_table_size > (UINT32_MAX >> JERRY_SNAPSHOT_LITERAL_SHIFT))
{
max_lit_table_size = (UINT32_MAX >> JERRY_SNAPSHOT_LITERAL_SHIFT);
}
ecma_value_t *lit_buffer_p = lit_pool_p->buffer_p;
/* Compute the size of the literal pool. */
for (uint32_t i = 0; i < lit_pool_p->item_count; i++)
{
if (ecma_is_value_float_number (lit_buffer_p[i]))
{
lit_table_size += (uint32_t) sizeof (ecma_number_t);
}
#if JERRY_BUILTIN_BIGINT
else if (ecma_is_value_bigint (lit_buffer_p[i]))
{
ecma_extended_primitive_t *bigint_p = ecma_get_extended_primitive_from_value (lit_buffer_p[i]);
lit_table_size += (uint32_t) JERRY_ALIGNUP (sizeof (uint32_t) + ECMA_BIGINT_GET_SIZE (bigint_p),
JERRY_SNAPSHOT_LITERAL_ALIGNMENT);
}
#endif /* JERRY_BUILTIN_BIGINT */
else
{
ecma_string_t *string_p = ecma_get_string_from_value (lit_buffer_p[i]);
lit_table_size += (uint32_t) JERRY_ALIGNUP (sizeof (uint16_t) + ecma_string_get_size (string_p),
JERRY_SNAPSHOT_LITERAL_ALIGNMENT);
}
/* Check whether enough space is available and the maximum size is not reached. */
if (lit_table_size > max_lit_table_size)
{
ecma_collection_destroy (lit_pool_p);
return false;
}
}
lit_mem_to_snapshot_id_map_entry_t *map_p;
uint32_t total_count = lit_pool_p->item_count;
map_p = jmem_heap_alloc_block (total_count * sizeof (lit_mem_to_snapshot_id_map_entry_t));
/* Set return values (no error is possible from here). */
JERRY_ASSERT ((*in_out_buffer_offset_p % sizeof (uint32_t)) == 0);
uint8_t *destination_p = (uint8_t *) (buffer_p + (*in_out_buffer_offset_p / sizeof (uint32_t)));
uint32_t literal_offset = 0;
*in_out_buffer_offset_p += lit_table_size;
*out_map_p = map_p;
*out_map_len_p = total_count;
lit_buffer_p = lit_pool_p->buffer_p;
/* Generate literal pool data. */
for (uint32_t i = 0; i < lit_pool_p->item_count; i++)
{
map_p->literal_id = lit_buffer_p[i];
map_p->literal_offset = (literal_offset << JERRY_SNAPSHOT_LITERAL_SHIFT) | ECMA_TYPE_SNAPSHOT_OFFSET;
lit_utf8_size_t length;
if (ecma_is_value_float_number (lit_buffer_p[i]))
{
map_p->literal_offset |= JERRY_SNAPSHOT_LITERAL_IS_NUMBER;
ecma_number_t num = ecma_get_float_from_value (lit_buffer_p[i]);
memcpy (destination_p, &num, sizeof (ecma_number_t));
length = JERRY_ALIGNUP (sizeof (ecma_number_t), JERRY_SNAPSHOT_LITERAL_ALIGNMENT);
}
#if JERRY_BUILTIN_BIGINT
else if (ecma_is_value_bigint (lit_buffer_p[i]))
{
map_p->literal_offset |= JERRY_SNAPSHOT_LITERAL_IS_BIGINT;
ecma_extended_primitive_t *bigint_p = ecma_get_extended_primitive_from_value (lit_buffer_p[i]);
uint32_t size = ECMA_BIGINT_GET_SIZE (bigint_p);
memcpy (destination_p, &bigint_p->u.bigint_sign_and_size, sizeof (uint32_t));
memcpy (destination_p + sizeof (uint32_t), ECMA_BIGINT_GET_DIGITS (bigint_p, 0), size);
length = JERRY_ALIGNUP (sizeof (uint32_t) + size, JERRY_SNAPSHOT_LITERAL_ALIGNMENT);
}
#endif /* JERRY_BUILTIN_BIGINT */
else
{
ecma_string_t *string_p = ecma_get_string_from_value (lit_buffer_p[i]);
length = ecma_string_get_size (string_p);
*(uint16_t *) destination_p = (uint16_t) length;
ecma_string_to_utf8_bytes (string_p, destination_p + sizeof (uint16_t), length);
length = JERRY_ALIGNUP (sizeof (uint16_t) + length, JERRY_SNAPSHOT_LITERAL_ALIGNMENT);
}
JERRY_ASSERT ((length % sizeof (uint16_t)) == 0);
destination_p += length;
literal_offset += length;
map_p++;
}
ecma_collection_destroy (lit_pool_p);
return true;
} /* ecma_save_literals_for_snapshot */
| 0 |
[
"CWE-416"
] |
jerryscript
|
3bcd48f72d4af01d1304b754ef19fe1a02c96049
| 303,020,771,226,656,700,000,000,000,000,000,000,000 | 127 |
Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
|
filter_can_trim (struct backend *b, struct connection *conn)
{
struct backend_filter *f = container_of (b, struct backend_filter, backend);
void *handle = connection_get_handle (conn, f->backend.i);
struct b_conn nxdata = { .b = f->backend.next, .conn = conn };
debug ("%s: can_trim", f->name);
if (f->filter.can_trim)
return f->filter.can_trim (&next_ops, &nxdata, handle);
else
return f->backend.next->can_trim (f->backend.next, conn);
}
| 0 |
[
"CWE-406"
] |
nbdkit
|
bf0d61883a2f02f4388ec10dc92d4c61c093679e
| 13,158,718,053,963,320,000,000,000,000,000,000,000 | 13 |
server: Fix regression for NBD_OPT_INFO before NBD_OPT_GO
Most known NBD clients do not bother with NBD_OPT_INFO (except for
clients like 'qemu-nbd --list' that don't ever intend to connect), but
go straight to NBD_OPT_GO. However, it's not too hard to hack up qemu
to add in an extra client step (whether info on the same name, or more
interestingly, info on a different name), as a patch against qemu
commit 6f214b30445:
| diff --git i/nbd/client.c w/nbd/client.c
| index f6733962b49b..425292ac5ea9 100644
| --- i/nbd/client.c
| +++ w/nbd/client.c
| @@ -1038,6 +1038,14 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
| * TLS). If it is not available, fall back to
| * NBD_OPT_LIST for nicer error messages about a missing
| * export, then use NBD_OPT_EXPORT_NAME. */
| + if (getenv ("HACK"))
| + info->name[0]++;
| + result = nbd_opt_info_or_go(ioc, NBD_OPT_INFO, info, errp);
| + if (getenv ("HACK"))
| + info->name[0]--;
| + if (result < 0) {
| + return -EINVAL;
| + }
| result = nbd_opt_info_or_go(ioc, NBD_OPT_GO, info, errp);
| if (result < 0) {
| return -EINVAL;
This works just fine in 1.14.0, where we call .open only once (so the
INFO and GO repeat calls into the same plugin handle), but in 1.14.1
it regressed into causing an assertion failure: we are now calling
.open a second time on a connection that is already opened:
$ nbdkit -rfv null &
$ hacked-qemu-io -f raw -r nbd://localhost -c quit
...
nbdkit: null[1]: debug: null: open readonly=1
nbdkit: backend.c:179: backend_open: Assertion `h->handle == NULL' failed.
Worse, on the mainline development, we have recently made it possible
for plugins to actively report different information for different
export names; for example, a plugin may choose to report different
answers for .can_write on export A than for export B; but if we share
cached handles, then an NBD_OPT_INFO on one export prevents correct
answers for NBD_OPT_GO on the second export name. (The HACK envvar in
my qemu modifications can be used to demonstrate cross-name requests,
which are even less likely in a real client).
The solution is to call .close after NBD_OPT_INFO, coupled with enough
glue logic to reset cached connection handles back to the state
expected by .open. This in turn means factoring out another backend_*
function, but also gives us an opportunity to change
backend_set_handle to no longer accept NULL.
The assertion failure is, to some extent, a possible denial of service
attack (one client can force nbdkit to exit by merely sending OPT_INFO
before OPT_GO, preventing the next client from connecting), although
this is mitigated by using TLS to weed out untrusted clients. Still,
the fact that we introduced a potential DoS attack while trying to fix
a traffic amplification security bug is not very nice.
Sadly, as there are no known clients that easily trigger this mode of
operation (OPT_INFO before OPT_GO), there is no easy way to cover this
via a testsuite addition. I may end up hacking something into libnbd.
Fixes: c05686f957
Signed-off-by: Eric Blake <[email protected]>
(cherry picked from commit a6b88b195a959b17524d1c8353fd425d4891dc5f)
Conflicts:
server/backend.c
server/connections.c
server/filters.c
server/internal.h
server/plugins.c
No backend.c in the stable branch, and less things to reset, so instead
ode that logic into filter_close.
Signed-off-by: Eric Blake <[email protected]>
|
void server_forward_syslog(Server *s, int priority, const char *identifier, const char *message, const struct ucred *ucred, const struct timeval *tv) {
struct iovec iovec[5];
char header_priority[DECIMAL_STR_MAX(priority) + 3], header_time[64],
header_pid[STRLEN("[]: ") + DECIMAL_STR_MAX(pid_t) + 1];
int n = 0;
time_t t;
struct tm tm;
_cleanup_free_ char *ident_buf = NULL;
assert(s);
assert(priority >= 0);
assert(priority <= 999);
assert(message);
if (LOG_PRI(priority) > s->max_level_syslog)
return;
/* First: priority field */
xsprintf(header_priority, "<%i>", priority);
iovec[n++] = IOVEC_MAKE_STRING(header_priority);
/* Second: timestamp */
t = tv ? tv->tv_sec : ((time_t) (now(CLOCK_REALTIME) / USEC_PER_SEC));
if (!localtime_r(&t, &tm))
return;
if (strftime(header_time, sizeof(header_time), "%h %e %T ", &tm) <= 0)
return;
iovec[n++] = IOVEC_MAKE_STRING(header_time);
/* Third: identifier and PID */
if (ucred) {
if (!identifier) {
get_process_comm(ucred->pid, &ident_buf);
identifier = ident_buf;
}
xsprintf(header_pid, "["PID_FMT"]: ", ucred->pid);
if (identifier)
iovec[n++] = IOVEC_MAKE_STRING(identifier);
iovec[n++] = IOVEC_MAKE_STRING(header_pid);
} else if (identifier) {
iovec[n++] = IOVEC_MAKE_STRING(identifier);
iovec[n++] = IOVEC_MAKE_STRING(": ");
}
/* Fourth: message */
iovec[n++] = IOVEC_MAKE_STRING(message);
forward_syslog_iovec(s, iovec, n, ucred, tv);
}
| 0 |
[
"CWE-125"
] |
systemd
|
a6aadf4ae0bae185dc4c414d492a4a781c80ffe5
| 77,782,284,951,717,790,000,000,000,000,000,000,000 | 52 |
journal: fix syslog_parse_identifier()
Fixes #9829.
|
lua_State *CLua::state()
{
if (!_state)
init_lua();
return _state;
}
| 0 |
[
"CWE-434"
] |
crawl
|
fc522ff6eb1bbb85e3de60c60a45762571e48c28
| 180,180,827,169,546,400,000,000,000,000,000,000,000 | 6 |
Disable lua load(), loadstring() bytcode loading
|
PHP_METHOD(SoapServer, getFunctions)
{
soapServicePtr service;
HashTable *ft = NULL;
SOAP_SERVER_BEGIN_CODE();
if (zend_parse_parameters_none() == FAILURE) {
return;
}
FETCH_THIS_SERVICE(service);
array_init(return_value);
if (service->type == SOAP_OBJECT) {
ft = &(Z_OBJCE_P(service->soap_object)->function_table);
} else if (service->type == SOAP_CLASS) {
ft = &service->soap_class.ce->function_table;
} else if (service->soap_functions.functions_all == TRUE) {
ft = EG(function_table);
} else if (service->soap_functions.ft != NULL) {
zval **name;
HashPosition pos;
zend_hash_internal_pointer_reset_ex(service->soap_functions.ft, &pos);
while (zend_hash_get_current_data_ex(service->soap_functions.ft, (void **)&name, &pos) != FAILURE) {
add_next_index_string(return_value, Z_STRVAL_PP(name), 1);
zend_hash_move_forward_ex(service->soap_functions.ft, &pos);
}
}
if (ft != NULL) {
zend_function *f;
HashPosition pos;
zend_hash_internal_pointer_reset_ex(ft, &pos);
while (zend_hash_get_current_data_ex(ft, (void **)&f, &pos) != FAILURE) {
if ((service->type != SOAP_OBJECT && service->type != SOAP_CLASS) || (f->common.fn_flags & ZEND_ACC_PUBLIC)) {
add_next_index_string(return_value, f->common.function_name, 1);
}
zend_hash_move_forward_ex(ft, &pos);
}
}
SOAP_SERVER_END_CODE();
}
| 1 |
[] |
php-src
|
e201f01ac17243a1e5fb6a3911ed8e21b1619ac1
| 26,227,861,801,448,115,000,000,000,000,000,000,000 | 44 |
Fix bug #70388 - SOAP serialize_function_call() type confusion
|
RGWOpType get_type() override { return RGW_OP_BULK_DELETE; }
| 0 |
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
| 13,837,347,439,158,963,000,000,000,000,000,000,000 | 1 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
|
void vrend_set_single_ssbo(struct vrend_context *ctx,
uint32_t shader_type,
uint32_t index,
uint32_t offset, uint32_t length,
uint32_t handle)
{
struct vrend_ssbo *ssbo = &ctx->sub->ssbo[shader_type][index];
struct vrend_resource *res;
if (!has_feature(feat_ssbo))
return;
if (handle) {
res = vrend_renderer_ctx_res_lookup(ctx, handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, handle);
return;
}
ssbo->res = res;
ssbo->buffer_offset = offset;
ssbo->buffer_size = length;
ctx->sub->ssbo_used_mask[shader_type] |= (1u << index);
} else {
ssbo->res = 0;
ssbo->buffer_offset = 0;
ssbo->buffer_size = 0;
ctx->sub->ssbo_used_mask[shader_type] &= ~(1u << index);
}
}
| 0 |
[
"CWE-787"
] |
virglrenderer
|
cbc8d8b75be360236cada63784046688aeb6d921
| 5,719,785,181,155,637,000,000,000,000,000,000,000 | 29 |
vrend: check transfer bounds for negative values too and report error
Closes #138
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Emil Velikov <[email protected]>
|
void CoreAuthHandler::handle(const SetupData &msg)
{
if (!checkClientRegistered())
return;
QString result = Core::setup(msg.adminUser, msg.adminPassword, msg.backend, msg.setupData);
if (!result.isEmpty())
_peer->dispatch(SetupFailed(result));
else
_peer->dispatch(SetupDone());
}
| 0 |
[] |
quassel
|
e67887343c433cc35bc26ad6a9392588f427e746
| 311,397,638,509,480,400,000,000,000,000,000,000,000 | 11 |
Handle invalid handshake data properly in the core
Clients sending invalid handshake data could make the core crash
due to an unchecked pointer. This commit fixes this issue by having
the core close the socket if a peer could not be created.
Thanks to Bas Pape (Tucos) for finding this one!
|
static void XEditText(Display *display,XWidgetInfo *text_info,
const KeySym key_symbol,char *text,const size_t state)
{
switch ((int) key_symbol)
{
case XK_BackSpace:
case XK_Delete:
{
if (text_info->highlight)
{
/*
Erase the entire line of text.
*/
*text_info->text='\0';
text_info->cursor=text_info->text;
text_info->marker=text_info->text;
text_info->highlight=MagickFalse;
}
/*
Erase one character.
*/
if (text_info->cursor != text_info->text)
{
text_info->cursor--;
(void) memmove(text_info->cursor,text_info->cursor+1,
strlen(text_info->cursor+1)+1);
text_info->highlight=MagickFalse;
break;
}
}
case XK_Left:
case XK_KP_Left:
{
/*
Move cursor one position left.
*/
if (text_info->cursor == text_info->text)
break;
text_info->cursor--;
break;
}
case XK_Right:
case XK_KP_Right:
{
/*
Move cursor one position right.
*/
if (text_info->cursor == (text_info->text+Extent(text_info->text)))
break;
text_info->cursor++;
break;
}
default:
{
char
*p,
*q;
int
i;
if (state & ControlState)
break;
if (*text == '\0')
break;
if ((Extent(text_info->text)+1) >= (int) MaxTextExtent)
(void) XBell(display,0);
else
{
if (text_info->highlight)
{
/*
Erase the entire line of text.
*/
*text_info->text='\0';
text_info->cursor=text_info->text;
text_info->marker=text_info->text;
text_info->highlight=MagickFalse;
}
/*
Insert a string into the text.
*/
q=text_info->text+Extent(text_info->text)+strlen(text);
for (i=0; i <= Extent(text_info->cursor); i++)
{
*q=(*(q-Extent(text)));
q--;
}
p=text;
for (i=0; i < Extent(text); i++)
*text_info->cursor++=(*p++);
}
break;
}
}
}
| 0 |
[] |
ImageMagick6
|
366c9708a7ca1256ee03d0d4addb2690ed42273f
| 48,774,816,360,316,010,000,000,000,000,000,000,000 | 96 |
https://github.com/ImageMagick/ImageMagick/issues/3333
|
IncomingZRTPPkt::IncomingZRTPPkt(const unsigned char* const block, size_t len) :
IncomingRTPPkt(block,len) {
}
| 0 |
[
"CWE-119"
] |
ZRTPCPP
|
c8617100f359b217a974938c5539a1dd8a120b0e
| 300,383,830,031,446,700,000,000,000,000,000,000,000 | 3 |
Fix vulnerabilities found and reported by Mark Dowd
- limit length of memcpy
- limit number of offered algorithms in Hello packet
- length check in PING packet
- fix a small coding error
|
static int sctp_setsockopt_deactivate_key(struct sock *sk,
struct sctp_authkeyid *val,
unsigned int optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_association *asoc;
int ret = 0;
if (optlen != sizeof(struct sctp_authkeyid))
return -EINVAL;
asoc = sctp_id2assoc(sk, val->scact_assoc_id);
if (!asoc && val->scact_assoc_id > SCTP_ALL_ASSOC &&
sctp_style(sk, UDP))
return -EINVAL;
if (asoc)
return sctp_auth_deact_key_id(ep, asoc, val->scact_keynumber);
if (sctp_style(sk, TCP))
val->scact_assoc_id = SCTP_FUTURE_ASSOC;
if (val->scact_assoc_id == SCTP_FUTURE_ASSOC ||
val->scact_assoc_id == SCTP_ALL_ASSOC) {
ret = sctp_auth_deact_key_id(ep, asoc, val->scact_keynumber);
if (ret)
return ret;
}
if (val->scact_assoc_id == SCTP_CURRENT_ASSOC ||
val->scact_assoc_id == SCTP_ALL_ASSOC) {
list_for_each_entry(asoc, &ep->asocs, asocs) {
int res = sctp_auth_deact_key_id(ep, asoc,
val->scact_keynumber);
if (res && !ret)
ret = res;
}
}
return ret;
}
| 0 |
[
"CWE-362"
] |
linux
|
b166a20b07382b8bc1dcee2a448715c9c2c81b5b
| 228,757,686,152,149,600,000,000,000,000,000,000,000 | 42 |
net/sctp: fix race condition in sctp_destroy_sock
If sctp_destroy_sock is called without sock_net(sk)->sctp.addr_wq_lock
held and sp->do_auto_asconf is true, then an element is removed
from the auto_asconf_splist without any proper locking.
This can happen in the following functions:
1. In sctp_accept, if sctp_sock_migrate fails.
2. In inet_create or inet6_create, if there is a bpf program
attached to BPF_CGROUP_INET_SOCK_CREATE which denies
creation of the sctp socket.
The bug is fixed by acquiring addr_wq_lock in sctp_destroy_sock
instead of sctp_close.
This addresses CVE-2021-23133.
Reported-by: Or Cohen <[email protected]>
Reviewed-by: Xin Long <[email protected]>
Fixes: 610236587600 ("bpf: Add new cgroup attach type to enable sock modifications")
Signed-off-by: Or Cohen <[email protected]>
Acked-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void virtio_serial_device_unrealize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOSerial *vser = VIRTIO_SERIAL(dev);
QLIST_REMOVE(vser, next);
unregister_savevm(dev, "virtio-console", vser);
g_free(vser->ivqs);
g_free(vser->ovqs);
g_free(vser->ports_map);
if (vser->post_load) {
g_free(vser->post_load->connected);
timer_del(vser->post_load->timer);
timer_free(vser->post_load->timer);
g_free(vser->post_load);
}
virtio_cleanup(vdev);
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
qemu
|
7882080388be5088e72c425b02223c02e6cb4295
| 47,106,361,540,339,630,000,000,000,000,000,000,000 | 20 |
virtio-serial: fix ANY_LAYOUT
Don't assume a specific layout for control messages.
Required by virtio 1.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Amit Shah <[email protected]>
Reviewed-by: Jason Wang <[email protected]>
|
setDefaultRuleset(void __attribute__((unused)) *pVal, uchar *pszName)
{
DEFiRet;
CHKiRet(ruleset.SetDefaultRuleset(pszName));
finalize_it:
free(pszName); /* no longer needed */
RETiRet;
}
| 0 |
[
"CWE-119"
] |
rsyslog
|
1ca6cc236d1dabf1633238b873fb1c057e52f95e
| 22,426,102,180,510,670,000,000,000,000,000,000,000 | 10 |
bugfix: off-by-one(two) bug in legacy syslog parser
|
dbcs_ptr2len(
char_u *p)
{
int len;
// Check if second byte is not missing.
len = MB_BYTE2LEN(*p);
if (len == 2 && p[1] == NUL)
len = 1;
return len;
}
| 1 |
[
"CWE-122",
"CWE-787"
] |
vim
|
f6d39c31d2177549a986d170e192d8351bd571e2
| 62,468,950,386,092,640,000,000,000,000,000,000,000 | 11 |
patch 9.0.0220: invalid memory access with for loop over NULL string
Problem: Invalid memory access with for loop over NULL string.
Solution: Make sure mb_ptr2len() consistently returns zero for NUL.
|
static int php_get_display_errors_mode(char *value, int value_length)
{
int mode;
if (!value) {
return PHP_DISPLAY_ERRORS_STDOUT;
}
if (value_length == 2 && !strcasecmp("on", value)) {
mode = PHP_DISPLAY_ERRORS_STDOUT;
} else if (value_length == 3 && !strcasecmp("yes", value)) {
mode = PHP_DISPLAY_ERRORS_STDOUT;
} else if (value_length == 4 && !strcasecmp("true", value)) {
mode = PHP_DISPLAY_ERRORS_STDOUT;
} else if (value_length == 6 && !strcasecmp(value, "stderr")) {
mode = PHP_DISPLAY_ERRORS_STDERR;
} else if (value_length == 6 && !strcasecmp(value, "stdout")) {
mode = PHP_DISPLAY_ERRORS_STDOUT;
} else {
ZEND_ATOL(mode, value);
if (mode && mode != PHP_DISPLAY_ERRORS_STDOUT && mode != PHP_DISPLAY_ERRORS_STDERR) {
mode = PHP_DISPLAY_ERRORS_STDOUT;
}
}
return mode;
}
| 0 |
[] |
php-src
|
9a07245b728714de09361ea16b9c6fcf70cb5685
| 54,891,963,700,913,010,000,000,000,000,000,000,000 | 27 |
Fixed bug #71273 A wrong ext directory setup in php.ini leads to crash
|
static BOOL rdp_read_desktop_composition_capability_set(wStream* s, UINT16 length,
rdpSettings* settings)
{
if (length < 6)
return FALSE;
Stream_Seek_UINT16(s); /* compDeskSupportLevel (2 bytes) */
return TRUE;
}
| 0 |
[
"CWE-119",
"CWE-125"
] |
FreeRDP
|
3627aaf7d289315b614a584afb388f04abfb5bbf
| 257,896,305,155,583,970,000,000,000,000,000,000,000 | 9 |
Fixed #6011: Bounds check in rdp_read_font_capability_set
|
static void test_bug10729()
{
MYSQL_STMT *stmt;
MYSQL_BIND my_bind[1];
char a[21];
int rc;
const char *stmt_text;
int i= 0;
const char *name_array[3]= { "aaa", "bbb", "ccc" };
ulong type;
myheader("test_bug10729");
mysql_query(mysql, "drop table if exists t1");
mysql_query(mysql, "create table t1 (id integer not null primary key,"
"name VARCHAR(20) NOT NULL)");
rc= mysql_query(mysql, "insert into t1 (id, name) values "
"(1, 'aaa'), (2, 'bbb'), (3, 'ccc')");
myquery(rc);
stmt= mysql_stmt_init(mysql);
type= (ulong) CURSOR_TYPE_READ_ONLY;
rc= mysql_stmt_attr_set(stmt, STMT_ATTR_CURSOR_TYPE, (void*) &type);
check_execute(stmt, rc);
stmt_text= "select name from t1";
rc= mysql_stmt_prepare(stmt, stmt_text, strlen(stmt_text));
check_execute(stmt, rc);
bzero((char*) my_bind, sizeof(my_bind));
my_bind[0].buffer_type= MYSQL_TYPE_STRING;
my_bind[0].buffer= (void*) a;
my_bind[0].buffer_length= sizeof(a);
mysql_stmt_bind_result(stmt, my_bind);
for (i= 0; i < 3; i++)
{
int row_no= 0;
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
while ((rc= mysql_stmt_fetch(stmt)) == 0)
{
DIE_UNLESS(strcmp(a, name_array[row_no]) == 0);
if (!opt_silent)
printf("%d: %s\n", row_no, a);
++row_no;
}
DIE_UNLESS(rc == MYSQL_NO_DATA);
}
rc= mysql_stmt_close(stmt);
DIE_UNLESS(rc == 0);
rc= mysql_query(mysql, "drop table t1");
myquery(rc);
}
| 0 |
[
"CWE-416"
] |
server
|
eef21014898d61e77890359d6546d4985d829ef6
| 283,112,211,353,528,650,000,000,000,000,000,000,000 | 55 |
MDEV-11933 Wrong usage of linked list in mysql_prune_stmt_list
mysql_prune_stmt_list() was walking the list following
element->next pointers, but inside the loop it was invoking
list_add(element) that modified element->next. So, mysql_prune_stmt_list()
failed to visit and reset all elements, and some of them were left
with pointers to invalid MYSQL.
|
bool delete_directory(const std::string& dirname, const bool keep_pbl)
{
bool ret = true;
std::vector<std::string> files;
std::vector<std::string> dirs;
error_code ec;
get_files_in_dir(dirname, &files, &dirs, ENTIRE_FILE_PATH, keep_pbl ? SKIP_PBL_FILES : NO_FILTER);
if(!files.empty()) {
for(std::vector<std::string>::const_iterator i = files.begin(); i != files.end(); ++i) {
bfs::remove(path(*i), ec);
if (ec) {
LOG_FS << "remove(" << (*i) << "): " << ec.message() << '\n';
ret = false;
}
}
}
if(!dirs.empty()) {
for(std::vector<std::string>::const_iterator j = dirs.begin(); j != dirs.end(); ++j) {
//TODO: this does not preserve any other PBL files
// filesystem.cpp does this too, so this might be intentional
if(!delete_directory(*j))
ret = false;
}
}
if (ret) {
bfs::remove(path(dirname), ec);
if (ec) {
LOG_FS << "remove(" << dirname << "): " << ec.message() << '\n';
ret = false;
}
}
return ret;
}
| 0 |
[
"CWE-200"
] |
wesnoth
|
f8914468182e8d0a1551b430c0879ba236fe4d6d
| 128,242,231,259,410,490,000,000,000,000,000,000,000 | 37 |
Disallow inclusion of .pbl files from WML (bug #23504)
Note that this will also cause Lua wesnoth.have_file() to return false
on .pbl files.
|
static bool encode_server_sort_request(void *mem_ctx, void *in, DATA_BLOB *out)
{
struct ldb_server_sort_control **lssc = talloc_get_type(in, struct ldb_server_sort_control *);
struct asn1_data *data = asn1_init(mem_ctx);
int num;
if (!data) return false;
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) {
return false;
}
/*
RFC2891 section 1.1:
SortKeyList ::= SEQUENCE OF SEQUENCE {
attributeType AttributeDescription,
orderingRule [0] MatchingRuleId OPTIONAL,
reverseOrder [1] BOOLEAN DEFAULT FALSE }
*/
for (num = 0; lssc[num]; num++) {
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) {
return false;
}
if (!asn1_write_OctetString(data, lssc[num]->attributeName, strlen(lssc[num]->attributeName))) {
return false;
}
if (lssc[num]->orderingRule) {
if (!asn1_write_OctetString(data, lssc[num]->orderingRule, strlen(lssc[num]->orderingRule))) {
return false;
}
}
if (lssc[num]->reverse) {
if (!asn1_write_BOOLEAN_context(data, lssc[num]->reverse, 1)) {
return false;
}
}
if (!asn1_pop_tag(data)) {
return false;
}
}
if (!asn1_pop_tag(data)) {
return false;
}
*out = data_blob_talloc(mem_ctx, data->data, data->length);
if (out->data == NULL) {
return false;
}
talloc_free(data);
return true;
}
| 0 |
[
"CWE-399"
] |
samba
|
530d50a1abdcdf4d1775652d4c456c1274d83d8d
| 268,759,839,350,214,500,000,000,000,000,000,000,000 | 57 |
CVE-2015-7540: s4: libcli: ldap message - Ensure all asn1_XX returns are checked.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=9187
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Ronnie Sahlberg <[email protected]>
Autobuild-User(master): Jeremy Allison <[email protected]>
Autobuild-Date(master): Fri Sep 26 03:15:00 CEST 2014 on sn-devel-104
(cherry picked from commit 69a7e3cfdc8dbba9c8dcfdfae82d2894c7247e15)
|
static int ati_remote2_pre_reset(struct usb_interface *interface)
{
struct ati_remote2 *ar2;
struct usb_host_interface *alt = interface->cur_altsetting;
if (alt->desc.bInterfaceNumber)
return 0;
ar2 = usb_get_intfdata(interface);
dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__);
mutex_lock(&ati_remote2_mutex);
if (ar2->flags == ATI_REMOTE2_OPENED)
ati_remote2_kill_urbs(ar2);
return 0;
}
| 0 |
[
"CWE-703"
] |
linux
|
950336ba3e4a1ffd2ca60d29f6ef386dd2c7351d
| 72,027,656,991,040,910,000,000,000,000,000,000,000 | 19 |
Input: ati_remote2 - fix crashes on detecting device with invalid descriptor
The ati_remote2 driver expects at least two interfaces with one
endpoint each. If given malicious descriptor that specify one
interface or no endpoints, it will crash in the probe function.
Ensure there is at least two interfaces and one endpoint for each
interface before using it.
The full disclosure: http://seclists.org/bugtraq/2016/Mar/90
Reported-by: Ralf Spenneberg <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Cc: [email protected]
Signed-off-by: Dmitry Torokhov <[email protected]>
|
get_uid_for_session_id (const char *session_id,
uid_t *uid,
GError **error)
{
int ret;
ret = sd_session_get_uid (session_id, uid);
if (ret < 0) {
g_set_error (error,
GDM_DISPLAY_ERROR,
GDM_DISPLAY_ERROR_GETTING_SESSION_INFO,
"Error getting uid for session id %s from systemd: %s",
session_id,
g_strerror (-ret));
return FALSE;
}
return TRUE;
}
| 0 |
[] |
gdm
|
ff98b2817014684ae1acec78ff06f0f461a56a9f
| 68,569,059,943,107,330,000,000,000,000,000,000,000 | 19 |
manager: if falling back to X11 retry autologin
Right now, we get one shot to autologin. If it fails, we fall back to
the greeter. We should give it another go if the reason for the failure
was wayland fallback to X.
https://bugzilla.gnome.org/show_bug.cgi?id=780520
|
static void __net_exit ip6mr_rules_exit(struct net *net)
{
rtnl_lock();
ip6mr_free_table(net->ipv6.mrt6);
net->ipv6.mrt6 = NULL;
rtnl_unlock();
}
| 0 |
[
"CWE-20"
] |
linux
|
99253eb750fda6a644d5188fb26c43bad8d5a745
| 117,731,151,935,589,240,000,000,000,000,000,000,000 | 7 |
ipv6: check sk sk_type and protocol early in ip_mroute_set/getsockopt
Commit 5e1859fbcc3c ("ipv4: ipmr: various fixes and cleanups") fixed
the issue for ipv4 ipmr:
ip_mroute_setsockopt() & ip_mroute_getsockopt() should not
access/set raw_sk(sk)->ipmr_table before making sure the socket
is a raw socket, and protocol is IGMP
The same fix should be done for ipv6 ipmr as well.
This patch can fix the panic caused by overwriting the same offset
as ipmr_table as in raw_sk(sk) when accessing other type's socket
by ip_mroute_setsockopt().
Signed-off-by: Xin Long <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
unsigned int flags)
{
bool tgid = flags & TRACE_ITER_RECORD_TGID;
const char tgid_space[] = " ";
const char space[] = " ";
seq_printf(m, "# %s _-----=> irqs-off\n",
tgid ? tgid_space : space);
seq_printf(m, "# %s / _----=> need-resched\n",
tgid ? tgid_space : space);
seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
tgid ? tgid_space : space);
seq_printf(m, "# %s|| / _--=> preempt-depth\n",
tgid ? tgid_space : space);
seq_printf(m, "# %s||| / delay\n",
tgid ? tgid_space : space);
seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
tgid ? " TGID " : space);
seq_printf(m, "# | | | %s|||| | |\n",
tgid ? " | " : space);
}
| 0 |
[
"CWE-415"
] |
linux
|
4397f04575c44e1440ec2e49b6302785c95fd2f8
| 313,171,145,729,581,040,000,000,000,000,000,000,000 | 22 |
tracing: Fix possible double free on failure of allocating trace buffer
Jing Xia and Chunyan Zhang reported that on failing to allocate part of the
tracing buffer, memory is freed, but the pointers that point to them are not
initialized back to NULL, and later paths may try to free the freed memory
again. Jing and Chunyan fixed one of the locations that does this, but
missed a spot.
Link: http://lkml.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code")
Reported-by: Jing Xia <[email protected]>
Reported-by: Chunyan Zhang <[email protected]>
Signed-off-by: Steven Rostedt (VMware) <[email protected]>
|
rpc_C_GetInfo (CK_X_FUNCTION_LIST *self,
p11_rpc_message *msg)
{
CK_INFO info;
BEGIN_CALL (GetInfo);
PROCESS_CALL ((self, &info));
OUT_INFO (info);
END_CALL;
}
| 0 |
[
"CWE-190"
] |
p11-kit
|
5307a1d21a50cacd06f471a873a018d23ba4b963
| 58,984,370,443,451,650,000,000,000,000,000,000,000 | 10 |
Check for arithmetic overflows before allocating
|
vhost_dequeue_single_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool,
struct rte_mbuf **pkts,
uint16_t *buf_id,
uint16_t *desc_count)
{
struct buf_vector buf_vec[BUF_VECTOR_MAX];
uint32_t buf_len;
uint16_t nr_vec = 0;
int err;
if (unlikely(fill_vec_buf_packed(dev, vq,
vq->last_avail_idx, desc_count,
buf_vec, &nr_vec,
buf_id, &buf_len,
VHOST_ACCESS_RO) < 0))
return -1;
*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
if (unlikely(*pkts == NULL)) {
VHOST_LOG_DATA(ERR,
"Failed to allocate memory for mbuf.\n");
return -1;
}
err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
mbuf_pool);
if (unlikely(err)) {
rte_pktmbuf_free(*pkts);
return -1;
}
return 0;
}
| 0 |
[
"CWE-665"
] |
dpdk
|
97ecc1c85c95c13bc66a87435758e93406c35c48
| 246,152,458,398,828,900,000,000,000,000,000,000,000 | 35 |
vhost: fix translated address not checked
Malicious guest can construct desc with invalid address and zero buffer
length. That will request vhost to check both translated address and
translated data length. This patch will add missed address check.
CVE-2020-10725
Fixes: 75ed51697820 ("vhost: add packed ring batch dequeue")
Fixes: ef861692c398 ("vhost: add packed ring batch enqueue")
Cc: [email protected]
Signed-off-by: Marvin Liu <[email protected]>
Reviewed-by: Maxime Coquelin <[email protected]>
|
OFCondition DcmSCP::sendSTOREResponse(const T_ASC_PresentationContextID presID,
const Uint16 messageID,
const OFString &sopClassUID,
const OFString &sopInstanceUID,
const Uint16 rspStatusCode,
DcmDataset *statusDetail)
{
OFCondition cond;
OFString tempStr;
// Send back response
T_DIMSE_Message response;
// Make sure everything is zeroed (especially options)
bzero((char*)&response, sizeof(response));
T_DIMSE_C_StoreRSP &storeRsp = response.msg.CStoreRSP;
response.CommandField = DIMSE_C_STORE_RSP;
storeRsp.MessageIDBeingRespondedTo = messageID;
storeRsp.DimseStatus = rspStatusCode;
storeRsp.DataSetType = DIMSE_DATASET_NULL;
// Always send the optional fields "Affected SOP Class UID" and "Affected SOP Instance UID"
storeRsp.opts = O_STORE_AFFECTEDSOPCLASSUID | O_STORE_AFFECTEDSOPINSTANCEUID;
OFStandard::strlcpy(storeRsp.AffectedSOPClassUID, sopClassUID.c_str(), sizeof(storeRsp.AffectedSOPClassUID));
OFStandard::strlcpy(storeRsp.AffectedSOPInstanceUID, sopInstanceUID.c_str(), sizeof(storeRsp.AffectedSOPInstanceUID));
if (DCM_dcmnetLogger.isEnabledFor(OFLogger::DEBUG_LOG_LEVEL))
{
DCMNET_INFO("Sending C-STORE Response");
DCMNET_DEBUG(DIMSE_dumpMessage(tempStr, response, DIMSE_OUTGOING, NULL, presID));
} else {
DCMNET_INFO("Sending C-STORE Response (" << DU_cstoreStatusString(rspStatusCode) << ")");
}
// Send response message
cond = sendDIMSEMessage(presID, &response, NULL /* dataObject */, statusDetail);
if (cond.bad())
{
DCMNET_ERROR("Failed sending C-STORE response: " << DimseCondition::dump(tempStr, cond));
}
return cond;
}
| 0 |
[
"CWE-264"
] |
dcmtk
|
beaf5a5c24101daeeafa48c375120b16197c9e95
| 241,455,553,989,248,950,000,000,000,000,000,000,000 | 41 |
Make sure to handle setuid() return code properly.
In some tools the return value of setuid() is not checked. In the worst
case this could lead to privilege escalation since the process does not
give up its root privileges and continue as root.
|
static void check_unsafe_exec(struct linux_binprm *bprm)
{
struct task_struct *p = current, *t;
unsigned n_fs;
if (p->ptrace) {
if (p->ptrace & PT_PTRACE_CAP)
bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
else
bprm->unsafe |= LSM_UNSAFE_PTRACE;
}
/*
* This isn't strictly necessary, but it makes it harder for LSMs to
* mess up.
*/
if (task_no_new_privs(current))
bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
t = p;
n_fs = 1;
spin_lock(&p->fs->lock);
rcu_read_lock();
while_each_thread(p, t) {
if (t->fs == p->fs)
n_fs++;
}
rcu_read_unlock();
if (p->fs->users > n_fs)
bprm->unsafe |= LSM_UNSAFE_SHARE;
else
p->fs->in_exec = 1;
spin_unlock(&p->fs->lock);
}
| 0 |
[
"CWE-362"
] |
linux
|
8b01fc86b9f425899f8a3a8fc1c47d73c2c20543
| 61,674,279,533,476,180,000,000,000,000,000,000,000 | 35 |
fs: take i_mutex during prepare_binprm for set[ug]id executables
This prevents a race between chown() and execve(), where chowning a
setuid-user binary to root would momentarily make the binary setuid
root.
This patch was mostly written by Linus Torvalds.
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
Return the current SAPI module name */
PHP_FUNCTION(php_sapi_name)
{
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (sapi_module.name) {
RETURN_STRING(sapi_module.name, 1);
} else {
RETURN_FALSE;
}
| 0 |
[
"CWE-200"
] |
php-src
|
3804c0d00fa6e629173fb1c8c61f8f88d5fe39b9
| 317,438,210,926,530,330,000,000,000,000,000,000,000 | 12 |
Fix bug #67498 - phpinfo() Type Confusion Information Leak Vulnerability
|
void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id)
{
if (WARN_ON(ep_id == ENDPOINT_UNUSED || ep_id >= ENDPOINT_MAX))
return;
wmi->ep_id = ep_id;
}
| 0 |
[
"CWE-125"
] |
linux
|
5d6751eaff672ea77642e74e92e6c0ac7f9709ab
| 332,602,870,535,285,800,000,000,000,000,000,000,000 | 7 |
ath6kl: add some bounds checking
The "ev->traffic_class" and "reply->ac" variables come from the network
and they're used as an offset into the wmi->stream_exist_for_ac[] array.
Those variables are u8 so they can be 0-255 but the stream_exist_for_ac[]
array only has WMM_NUM_AC (4) elements. We need to add a couple bounds
checks to prevent array overflows.
I also modified one existing check from "if (traffic_class > 3) {" to
"if (traffic_class >= WMM_NUM_AC) {" just to make them all consistent.
Fixes: bdcd81707973 (" Add ath6kl cleaned up driver")
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
qemuProcessMonitorReportLogError(qemuMonitorPtr mon G_GNUC_UNUSED,
const char *msg,
void *opaque)
{
qemuDomainLogContextPtr logCtxt = opaque;
qemuProcessReportLogError(logCtxt, msg);
}
| 0 |
[
"CWE-416"
] |
libvirt
|
1ac703a7d0789e46833f4013a3876c2e3af18ec7
| 181,379,948,962,814,560,000,000,000,000,000,000,000 | 7 |
qemu: Add missing lock in qemuProcessHandleMonitorEOF
qemuMonitorUnregister will be called in multiple threads (e.g. threads
in rpc worker pool and the vm event thread). In some cases, it isn't
protected by the monitor lock, which may lead to call g_source_unref
more than one time and a use-after-free problem eventually.
Add the missing lock in qemuProcessHandleMonitorEOF (which is the only
position missing lock of monitor I found).
Suggested-by: Michal Privoznik <[email protected]>
Signed-off-by: Peng Liang <[email protected]>
Signed-off-by: Michal Privoznik <[email protected]>
Reviewed-by: Michal Privoznik <[email protected]>
|
static int vop_virtio_add_device(struct vop_vdev *vdev,
struct mic_device_desc *argp)
{
struct vop_info *vi = vdev->vi;
struct vop_device *vpdev = vi->vpdev;
struct mic_device_desc *dd = NULL;
struct mic_vqconfig *vqconfig;
int vr_size, i, j, ret;
u8 type = 0;
s8 db = -1;
char irqname[16];
struct mic_bootparam *bootparam;
u16 num;
dma_addr_t vr_addr;
bootparam = vpdev->hw_ops->get_dp(vpdev);
init_waitqueue_head(&vdev->waitq);
INIT_LIST_HEAD(&vdev->list);
vdev->vpdev = vpdev;
ret = vop_copy_dp_entry(vdev, argp, &type, &dd);
if (ret) {
dev_err(vop_dev(vdev), "%s %d err %d\n",
__func__, __LINE__, ret);
kfree(vdev);
return ret;
}
vop_init_device_ctrl(vdev, dd);
vdev->dd = dd;
vdev->virtio_id = type;
vqconfig = mic_vq_config(dd);
INIT_WORK(&vdev->virtio_bh_work, vop_bh_handler);
for (i = 0; i < dd->num_vq; i++) {
struct vop_vringh *vvr = &vdev->vvr[i];
struct mic_vring *vr = &vdev->vvr[i].vring;
num = le16_to_cpu(vqconfig[i].num);
mutex_init(&vvr->vr_mutex);
vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
sizeof(struct _mic_vring_info));
vr->va = (void *)
__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(vr_size));
if (!vr->va) {
ret = -ENOMEM;
dev_err(vop_dev(vdev), "%s %d err %d\n",
__func__, __LINE__, ret);
goto err;
}
vr->len = vr_size;
vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(&vpdev->dev, vr_addr)) {
free_pages((unsigned long)vr->va, get_order(vr_size));
ret = -ENOMEM;
dev_err(vop_dev(vdev), "%s %d err %d\n",
__func__, __LINE__, ret);
goto err;
}
vqconfig[i].address = cpu_to_le64(vr_addr);
vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
ret = vringh_init_kern(&vvr->vrh,
*(u32 *)mic_vq_features(vdev->dd),
num, false, vr->vr.desc, vr->vr.avail,
vr->vr.used);
if (ret) {
dev_err(vop_dev(vdev), "%s %d err %d\n",
__func__, __LINE__, ret);
goto err;
}
vringh_kiov_init(&vvr->riov, NULL, 0);
vringh_kiov_init(&vvr->wiov, NULL, 0);
vvr->head = USHRT_MAX;
vvr->vdev = vdev;
vvr->vrh.notify = _vop_notify;
dev_dbg(&vpdev->dev,
"%s %d index %d va %p info %p vr_size 0x%x\n",
__func__, __LINE__, i, vr->va, vr->info, vr_size);
vvr->buf = (void *)__get_free_pages(GFP_KERNEL,
get_order(VOP_INT_DMA_BUF_SIZE));
vvr->buf_da = dma_map_single(&vpdev->dev,
vvr->buf, VOP_INT_DMA_BUF_SIZE,
DMA_BIDIRECTIONAL);
}
snprintf(irqname, sizeof(irqname), "vop%dvirtio%d", vpdev->index,
vdev->virtio_id);
vdev->virtio_db = vpdev->hw_ops->next_db(vpdev);
vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
_vop_virtio_intr_handler, irqname, vdev,
vdev->virtio_db);
if (IS_ERR(vdev->virtio_cookie)) {
ret = PTR_ERR(vdev->virtio_cookie);
dev_dbg(&vpdev->dev, "request irq failed\n");
goto err;
}
vdev->dc->c2h_vdev_db = vdev->virtio_db;
/*
* Order the type update with previous stores. This write barrier
* is paired with the corresponding read barrier before the uncached
* system memory read of the type, on the card while scanning the
* device page.
*/
smp_wmb();
dd->type = type;
argp->type = type;
if (bootparam) {
db = bootparam->h2c_config_db;
if (db != -1)
vpdev->hw_ops->send_intr(vpdev, db);
}
dev_dbg(&vpdev->dev, "Added virtio id %d db %d\n", dd->type, db);
return 0;
err:
vqconfig = mic_vq_config(dd);
for (j = 0; j < i; j++) {
struct vop_vringh *vvr = &vdev->vvr[j];
dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[j].address),
vvr->vring.len, DMA_BIDIRECTIONAL);
free_pages((unsigned long)vvr->vring.va,
get_order(vvr->vring.len));
}
return ret;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
9bf292bfca94694a721449e3fd752493856710f6
| 157,627,553,014,670,970,000,000,000,000,000,000,000 | 134 |
misc: mic: Fix for double fetch security bug in VOP driver
The MIC VOP driver does two successive reads from user space to read a
variable length data structure. Kernel memory corruption can result if
the data structure changes between the two reads. This patch disallows
the chance of this happening.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=116651
Reported by: Pengfei Wang <[email protected]>
Reviewed-by: Sudeep Dutt <[email protected]>
Signed-off-by: Ashutosh Dixit <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static unsigned addChunk_zTXt(ucvector* out, const char* keyword, const char* textstring,
LodePNGCompressSettings* zlibsettings)
{
unsigned error = 0;
ucvector data, compressed;
size_t i, textsize = strlen(textstring);
ucvector_init(&data);
ucvector_init(&compressed);
for(i = 0; keyword[i] != 0; i++) ucvector_push_back(&data, (unsigned char)keyword[i]);
if(i < 1 || i > 79) return 89; /*error: invalid keyword size*/
ucvector_push_back(&data, 0); /*0 termination char*/
ucvector_push_back(&data, 0); /*compression method: 0*/
error = zlib_compress(&compressed.data, &compressed.size,
(unsigned char*)textstring, textsize, zlibsettings);
if(!error)
{
for(i = 0; i < compressed.size; i++) ucvector_push_back(&data, compressed.data[i]);
error = addChunk(out, "zTXt", data.data, data.size);
}
ucvector_cleanup(&compressed);
ucvector_cleanup(&data);
return error;
}
| 0 |
[
"CWE-401"
] |
FreeRDP
|
9fee4ae076b1ec97b97efb79ece08d1dab4df29a
| 318,989,254,626,248,450,000,000,000,000,000,000,000 | 26 |
Fixed #5645: realloc return handling
|
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
struct sched_domain *child;
struct sched_group *group;
WARN_ON(!sd || !sd->groups);
if (cpu != first_cpu(sd->groups->cpumask))
return;
child = sd->child;
sd->groups->__cpu_power = 0;
/*
* For perf policy, if the groups in child domain share resources
* (for example cores sharing some portions of the cache hierarchy
* or SMT), then set this domain groups cpu_power such that each group
* can handle only one task, when there are other idle groups in the
* same sched domain.
*/
if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
(child->flags &
(SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
return;
}
/*
* add cpu_power of each child group to this groups cpu_power
*/
group = child->groups;
do {
sg_inc_cpu_power(sd->groups, group->__cpu_power);
group = group->next;
} while (group != child->groups);
}
| 0 |
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
| 73,208,741,651,417,650,000,000,000,000,000,000,000 | 37 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
generate_ns_check(struct module_qstate* qstate, struct iter_qstate* iq, int id)
{
struct iter_env* ie = (struct iter_env*)qstate->env->modinfo[id];
struct module_qstate* subq;
log_assert(iq->dp);
if(iq->depth == ie->max_dependency_depth)
return;
if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen,
iq->qchase.qclass, NULL))
return;
/* is this query the same as the nscheck? */
if(qstate->qinfo.qtype == LDNS_RR_TYPE_NS &&
query_dname_compare(iq->dp->name, qstate->qinfo.qname)==0 &&
(qstate->query_flags&BIT_RD) && !(qstate->query_flags&BIT_CD)){
/* spawn off A, AAAA queries for in-zone glue to check */
generate_a_aaaa_check(qstate, iq, id);
return;
}
/* no need to get the NS record for DS, it is above the zonecut */
if(qstate->qinfo.qtype == LDNS_RR_TYPE_DS)
return;
log_nametypeclass(VERB_ALGO, "schedule ns fetch",
iq->dp->name, LDNS_RR_TYPE_NS, iq->qchase.qclass);
if(!generate_sub_request(iq->dp->name, iq->dp->namelen,
LDNS_RR_TYPE_NS, iq->qchase.qclass, qstate, id, iq,
INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1, 0)) {
verbose(VERB_ALGO, "could not generate ns check");
return;
}
if(subq) {
struct iter_qstate* subiq =
(struct iter_qstate*)subq->minfo[id];
/* make copy to avoid use of stub dp by different qs/threads */
/* refetch glue to start higher up the tree */
subiq->refetch_glue = 1;
subiq->dp = delegpt_copy(iq->dp, subq->region);
if(!subiq->dp) {
log_err("out of memory generating ns check, copydp");
fptr_ok(fptr_whitelist_modenv_kill_sub(
qstate->env->kill_sub));
(*qstate->env->kill_sub)(subq);
return;
}
}
}
| 0 |
[
"CWE-400"
] |
unbound
|
ba0f382eee814e56900a535778d13206b86b6d49
| 269,806,568,652,086,440,000,000,000,000,000,000,000 | 48 |
- CVE-2020-12662 Unbound can be tricked into amplifying an incoming
query into a large number of queries directed to a target.
- CVE-2020-12663 Malformed answers from upstream name servers can be
used to make Unbound unresponsive.
|
void __init tcp_init(void)
{
int max_rshare, max_wshare, cnt;
unsigned long limit;
unsigned int i;
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
FIELD_SIZEOF(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
inet_hashinfo_init(&tcp_hashinfo);
inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
thash_entries, 21, /* one slot per 2 MB*/
0, 64 * 1024);
tcp_hashinfo.bind_bucket_cachep =
kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
/* Size and allocate the main established and bind bucket
* hash tables.
*
* The methodology is similar to that of the buffer cache.
*/
tcp_hashinfo.ehash =
alloc_large_system_hash("TCP established",
sizeof(struct inet_ehash_bucket),
thash_entries,
17, /* one slot per 128 KB of memory */
0,
NULL,
&tcp_hashinfo.ehash_mask,
0,
thash_entries ? 0 : 512 * 1024);
for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
if (inet_ehash_locks_alloc(&tcp_hashinfo))
panic("TCP: failed to alloc ehash_locks");
tcp_hashinfo.bhash =
alloc_large_system_hash("TCP bind",
sizeof(struct inet_bind_hashbucket),
tcp_hashinfo.ehash_mask + 1,
17, /* one slot per 128 KB of memory */
0,
&tcp_hashinfo.bhash_size,
NULL,
0,
64 * 1024);
tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
spin_lock_init(&tcp_hashinfo.bhash[i].lock);
INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
}
cnt = tcp_hashinfo.ehash_mask + 1;
sysctl_tcp_max_orphans = cnt / 2;
tcp_init_mem();
/* Set per-socket limits to no more than 1/128 the pressure threshold */
limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
max_wshare = min(4UL*1024*1024, limit);
max_rshare = min(6UL*1024*1024, limit);
init_net.ipv4.sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024;
init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
init_net.ipv4.sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
init_net.ipv4.sysctl_tcp_rmem[1] = 131072;
init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare);
pr_info("Hash tables configured (established %u bind %u)\n",
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
tcp_v4_init();
tcp_metrics_init();
BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
tcp_tasklet_init();
}
| 1 |
[
"CWE-190"
] |
net
|
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
| 26,489,189,970,917,730,000,000,000,000,000,000,000 | 82 |
tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Looney <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Bruce Curtis <[email protected]>
Cc: Jonathan Lemon <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void bcf_header_debug(bcf_hdr_t *hdr)
{
int i, j;
for (i=0; i<hdr->nhrec; i++)
{
if ( !hdr->hrec[i]->value )
{
fprintf(stderr, "##%s=<", hdr->hrec[i]->key);
fprintf(stderr,"%s=%s", hdr->hrec[i]->keys[0], hdr->hrec[i]->vals[0]);
for (j=1; j<hdr->hrec[i]->nkeys; j++)
fprintf(stderr,",%s=%s", hdr->hrec[i]->keys[j], hdr->hrec[i]->vals[j]);
fprintf(stderr,">\n");
}
else
fprintf(stderr,"##%s=%s\n", hdr->hrec[i]->key,hdr->hrec[i]->value);
}
}
| 0 |
[
"CWE-787"
] |
htslib
|
dcd4b7304941a8832fba2d0fc4c1e716e7a4e72c
| 64,578,913,715,464,140,000,000,000,000,000,000,000 | 17 |
Fix check for VCF record size
The check for excessive record size in vcf_parse_format() only
looked at individual fields. It was therefore possible to
exceed the limit and overflow fmt_aux_t::offset by having
multiple fields with a combined size that went over INT_MAX.
Fix by including the amount of memory used so far in the check.
Credit to OSS-Fuzz
Fixes oss-fuzz 24097
|
png_inflate_read(png_structrp png_ptr, png_bytep read_buffer, uInt read_size,
png_uint_32p chunk_bytes, png_bytep next_out, png_alloc_size_t *out_size,
int finish)
{
if (png_ptr->zowner == png_ptr->chunk_name)
{
int ret;
/* next_in and avail_in must have been initialized by the caller. */
png_ptr->zstream.next_out = next_out;
png_ptr->zstream.avail_out = 0; /* set in the loop */
do
{
if (png_ptr->zstream.avail_in == 0)
{
if (read_size > *chunk_bytes)
read_size = (uInt)*chunk_bytes;
*chunk_bytes -= read_size;
if (read_size > 0)
png_crc_read(png_ptr, read_buffer, read_size);
png_ptr->zstream.next_in = read_buffer;
png_ptr->zstream.avail_in = read_size;
}
if (png_ptr->zstream.avail_out == 0)
{
uInt avail = ZLIB_IO_MAX;
if (avail > *out_size)
avail = (uInt)*out_size;
*out_size -= avail;
png_ptr->zstream.avail_out = avail;
}
/* Use Z_SYNC_FLUSH when there is no more chunk data to ensure that all
* the available output is produced; this allows reading of truncated
* streams.
*/
ret = inflate(&png_ptr->zstream,
*chunk_bytes > 0 ? Z_NO_FLUSH : (finish ? Z_FINISH : Z_SYNC_FLUSH));
}
while (ret == Z_OK && (*out_size > 0 || png_ptr->zstream.avail_out > 0));
*out_size += png_ptr->zstream.avail_out;
png_ptr->zstream.avail_out = 0; /* Should not be required, but is safe */
/* Ensure the error message pointer is always set: */
png_zstream_error(png_ptr, ret);
return ret;
}
else
{
png_ptr->zstream.msg = PNGZ_MSG_CAST("zstream unclaimed");
return Z_STREAM_ERROR;
}
}
| 0 |
[
"CWE-120"
] |
libpng
|
a901eb3ce6087e0afeef988247f1a1aa208cb54d
| 267,840,865,266,898,800,000,000,000,000,000,000,000 | 60 |
[libpng16] Prevent reading over-length PLTE chunk (Cosmin Truta).
|
gst_rmdemux_parse_indx (GstRMDemux * rmdemux, const guint8 * data, int length)
{
int n;
int id;
n = RMDEMUX_GUINT32_GET (data);
id = RMDEMUX_GUINT16_GET (data + 4);
rmdemux->index_offset = RMDEMUX_GUINT32_GET (data + 6);
GST_DEBUG_OBJECT (rmdemux, "Number of indices=%d Stream ID=%d length=%d", n,
id, length);
/* Point to the next index_stream */
rmdemux->index_stream = gst_rmdemux_get_stream_by_id (rmdemux, id);
/* Return the length of the index */
return 14 * n;
}
| 0 |
[] |
gst-plugins-ugly
|
9726aaf78e6643a5955864f444852423de58de29
| 332,070,619,800,393,500,000,000,000,000,000,000,000 | 18 |
rmdemux: Make sure we have enough data available when parsing audio/video packets
Otherwise there will be out-of-bounds reads and potential crashes.
Thanks to Natalie Silvanovich for reporting.
Fixes https://gitlab.freedesktop.org/gstreamer/gst-plugins-ugly/-/issues/37
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-ugly/-/merge_requests/75>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.