func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
static int ll_temac_ethtools_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *ering) { struct temac_local *lp = netdev_priv(ndev); if (ering->rx_pending > RX_BD_NUM_MAX || ering->rx_mini_pending || ering->rx_jumbo_pending || ering->rx_pending > TX_BD_NUM_MAX) return -EINVAL; if (netif_running(ndev)) return -EBUSY; lp->rx_bd_num = ering->rx_pending; lp->tx_bd_num = ering->tx_pending; return 0; }
0
[ "CWE-120", "CWE-787" ]
linux
c364df2489b8ef2f5e3159b1dff1ff1fdb16040d
6,708,159,548,838,287,000,000,000,000,000,000,000
18
net: ll_temac: Fix TX BD buffer overwrite Just as the initial check, we need to ensure num_frag+1 buffers available, as that is the number of buffers we are going to use. This fixes a buffer overflow, which might be seen during heavy network load. Complete lockup of TEMAC was reproducible within about 10 minutes of a particular load. Fixes: 84823ff80f74 ("net: ll_temac: Fix race condition causing TX hang") Cc: [email protected] # v5.4+ Signed-off-by: Esben Haabendal <[email protected]> Signed-off-by: David S. Miller <[email protected]>
output_buffer& operator<<(output_buffer& output, const RecordLayerHeader& hdr) { output[AUTO] = hdr.type_; output[AUTO] = hdr.version_.major_; output[AUTO] = hdr.version_.minor_; // length byte tmp[2]; c16toa(hdr.length_, tmp); output[AUTO] = tmp[0]; output[AUTO] = tmp[1]; return output; }
0
[]
mysql-server
b9768521bdeb1a8069c7b871f4536792b65fd79b
133,951,193,178,051,900,000,000,000,000,000,000,000
14
Updated yassl to yassl-2.3.8 (cherry picked from commit 7f9941eab55ed672bfcccd382dafbdbcfdc75aaa)
void read( Protocol_* iprot, const TypeInfo& typeInfo, ProtocolReaderStructReadState<Protocol_>& readState, void* object) { using WireTypeInfo = ProtocolReaderWireTypeInfo<Protocol_>; using WireType = typename WireTypeInfo::WireType; switch (typeInfo.type) { case protocol::TType::T_STRUCT: readState.beforeSubobject(iprot); read<Protocol_>( iprot, *static_cast<const StructInfo*>(typeInfo.typeExt), typeInfo.set ? invokeSet(typeInfo.set, object) : object); readState.afterSubobject(iprot); break; case protocol::TType::T_I64: { std::int64_t temp; iprot->readI64(temp); reinterpret_cast<void (*)(void*, std::int64_t)>(typeInfo.set)( object, temp); break; } case protocol::TType::T_I32: { std::int32_t temp; iprot->readI32(temp); reinterpret_cast<void (*)(void*, std::int32_t)>(typeInfo.set)( object, temp); break; } case protocol::TType::T_I16: { std::int16_t temp; iprot->readI16(temp); reinterpret_cast<void (*)(void*, std::int16_t)>(typeInfo.set)( object, temp); break; } case protocol::TType::T_BYTE: { std::int8_t temp; iprot->readByte(temp); reinterpret_cast<void (*)(void*, std::int8_t)>(typeInfo.set)( object, temp); break; } case protocol::TType::T_BOOL: { bool temp; iprot->readBool(temp); reinterpret_cast<void (*)(void*, bool)>(typeInfo.set)(object, temp); break; } case protocol::TType::T_DOUBLE: { double temp; iprot->readDouble(temp); reinterpret_cast<void (*)(void*, double)>(typeInfo.set)(object, temp); break; } case protocol::TType::T_FLOAT: { float temp; iprot->readFloat(temp); reinterpret_cast<void (*)(void*, float)>(typeInfo.set)(object, temp); break; } case protocol::TType::T_STRING: { switch (*static_cast<const StringFieldType*>(typeInfo.typeExt)) { case StringFieldType::String: iprot->readString(*static_cast<std::string*>(object)); break; case StringFieldType::IOBuf: iprot->readBinary(*static_cast<folly::IOBuf*>(object)); break; case StringFieldType::IOBufPtr: iprot->readBinary( *static_cast<std::unique_ptr<folly::IOBuf>*>(object)); break; } break; } case protocol::TType::T_MAP: { readState.beforeSubobject(iprot); // Initialize the container to clear out current values. auto* actualObject = invokeSet(typeInfo.set, object); const MapFieldExt& ext = *static_cast<const MapFieldExt*>(typeInfo.typeExt); std::uint32_t size = ~0; WireType reportedKeyType = WireTypeInfo::defaultValue(); WireType reportedMappedType = WireTypeInfo::defaultValue(); iprot->readMapBegin(reportedKeyType, reportedMappedType, size); struct Context { const TypeInfo* keyInfo; const TypeInfo* valInfo; Protocol_* iprot; ProtocolReaderStructReadState<Protocol_>& readState; }; const Context context = { ext.keyInfo, ext.valInfo, iprot, readState, }; auto const keyReader = [](const void* context, void* key) { const auto& typedContext = *static_cast<const Context*>(context); read( typedContext.iprot, *typedContext.keyInfo, typedContext.readState, key); }; auto const valueReader = [](const void* context, void* val) { const auto& typedContext = *static_cast<const Context*>(context); read( typedContext.iprot, *typedContext.valInfo, typedContext.readState, val); }; if (iprot->kOmitsContainerSizes()) { while (iprot->peekMap()) { ext.consumeElem(&context, actualObject, keyReader, valueReader); } } else { if (size > 0 && (ext.keyInfo->type != reportedKeyType || ext.valInfo->type != reportedMappedType)) { skip_n(*iprot, size, {reportedKeyType, reportedMappedType}); } else { if (!canReadNElements( *iprot, size, {reportedKeyType, reportedMappedType})) { protocol::TProtocolException::throwTruncatedData(); } ext.readMap(&context, actualObject, size, keyReader, valueReader); } } iprot->readMapEnd(); readState.afterSubobject(iprot); break; } case protocol::TType::T_SET: { readState.beforeSubobject(iprot); // Initialize the container to clear out current values. auto* actualObject = invokeSet(typeInfo.set, object); const SetFieldExt& ext = *static_cast<const SetFieldExt*>(typeInfo.typeExt); std::uint32_t size = ~0; WireType reportedType = WireTypeInfo::defaultValue(); iprot->readSetBegin(reportedType, size); struct Context { const TypeInfo* valInfo; Protocol_* iprot; ProtocolReaderStructReadState<Protocol_>& readState; }; const Context context = { ext.valInfo, iprot, readState, }; auto const reader = [](const void* context, void* value) { const auto& typedContext = *static_cast<const Context*>(context); read( typedContext.iprot, *typedContext.valInfo, typedContext.readState, value); }; if (iprot->kOmitsContainerSizes()) { while (iprot->peekSet()) { ext.consumeElem(&context, actualObject, reader); } } else { if (reportedType != ext.valInfo->type) { skip_n(*iprot, size, {reportedType}); } else { if (!canReadNElements(*iprot, size, {reportedType})) { protocol::TProtocolException::throwTruncatedData(); } ext.readSet(&context, actualObject, size, reader); } } iprot->readSetEnd(); readState.afterSubobject(iprot); break; } case protocol::TType::T_LIST: { readState.beforeSubobject(iprot); // Initialize the container to clear out current values. auto* actualObject = invokeSet(typeInfo.set, object); const ListFieldExt& ext = *static_cast<const ListFieldExt*>(typeInfo.typeExt); std::uint32_t size = ~0; WireType reportedType = WireTypeInfo::defaultValue(); iprot->readListBegin(reportedType, size); struct Context { const TypeInfo* valInfo; Protocol_* iprot; ProtocolReaderStructReadState<Protocol_>& readState; }; const Context context = { ext.valInfo, iprot, readState, }; auto const reader = [](const void* context, void* value) { const auto& typedContext = *static_cast<const Context*>(context); read( typedContext.iprot, *typedContext.valInfo, typedContext.readState, value); }; if (iprot->kOmitsContainerSizes()) { while (iprot->peekList()) { ext.consumeElem(&context, actualObject, reader); } } else { if (reportedType != ext.valInfo->type) { skip_n(*iprot, size, {reportedType}); } else { if (!canReadNElements(*iprot, size, {reportedType})) { protocol::TProtocolException::throwTruncatedData(); } ext.readList(&context, actualObject, size, reader); } } iprot->readListEnd(); readState.afterSubobject(iprot); break; } case protocol::TType::T_STOP: case protocol::TType::T_VOID: case protocol::TType::T_UTF8: case protocol::TType::T_U64: case protocol::TType::T_UTF16: case protocol::TType::T_STREAM: skip(iprot, readState); } }
0
[ "CWE-763" ]
fbthrift
bfda1efa547dce11a38592820916db01b05b9339
66,802,580,237,767,380,000,000,000,000,000,000,000
236
Fix handling of invalid union data in table-based serializer Summary: Fix handling of invalid union data in the table-based serializer. Previously if the input contained duplicate union data, previous active member of the union was overwritten without calling the destructor of the old object, potentially causing a memory leak. In addition to that, if the second piece of data was incomplete the wrong destructor would be called during stack unwinding causing a segfault, data corruption or other undesirable effects. Fix the issue by clearing the union if there is an active member. Also fix the type of the data member that holds the active field id (it's `int`, not `FieldID`). Reviewed By: yfeldblum Differential Revision: D26440248 fbshipit-source-id: fae9ab96566cf07e14dabe9663b2beb680a01bb4
static void __sched notrace __schedule(bool preempt) { struct task_struct *prev, *next; unsigned long *switch_count; struct rq *rq; int cpu; cpu = smp_processor_id(); rq = cpu_rq(cpu); prev = rq->curr; /* * do_exit() calls schedule() with preemption disabled as an exception; * however we must fix that up, otherwise the next task will see an * inconsistent (higher) preempt count. * * It also avoids the below schedule_debug() test from complaining * about this. */ if (unlikely(prev->state == TASK_DEAD)) preempt_enable_no_resched_notrace(); schedule_debug(prev); if (sched_feat(HRTICK)) hrtick_clear(rq); local_irq_disable(); rcu_note_context_switch(); /* * Make sure that signal_pending_state()->signal_pending() below * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) * done by the caller to avoid the race with signal_wake_up(). */ smp_mb__before_spinlock(); raw_spin_lock(&rq->lock); lockdep_pin_lock(&rq->lock); rq->clock_skip_update <<= 1; /* promote REQ to ACT */ switch_count = &prev->nivcsw; if (!preempt && prev->state) { if (unlikely(signal_pending_state(prev->state, prev))) { prev->state = TASK_RUNNING; } else { deactivate_task(rq, prev, DEQUEUE_SLEEP); prev->on_rq = 0; /* * If a worker went to sleep, notify and ask workqueue * whether it wants to wake up a task to maintain * concurrency. */ if (prev->flags & PF_WQ_WORKER) { struct task_struct *to_wakeup; to_wakeup = wq_worker_sleeping(prev); if (to_wakeup) try_to_wake_up_local(to_wakeup); } } switch_count = &prev->nvcsw; } if (task_on_rq_queued(prev)) update_rq_clock(rq); next = pick_next_task(rq, prev); clear_tsk_need_resched(prev); clear_preempt_need_resched(); rq->clock_skip_update = 0; if (likely(prev != next)) { rq->nr_switches++; rq->curr = next; ++*switch_count; trace_sched_switch(preempt, prev, next); rq = context_switch(rq, prev, next); /* unlocks the rq */ } else { lockdep_unpin_lock(&rq->lock); raw_spin_unlock_irq(&rq->lock); } balance_callback(rq); }
0
[ "CWE-119" ]
linux
29d6455178a09e1dc340380c582b13356227e8df
126,699,862,850,440,160,000,000,000,000,000,000,000
87
sched: panic on corrupted stack end Until now, hitting this BUG_ON caused a recursive oops (because oops handling involves do_exit(), which calls into the scheduler, which in turn raises an oops), which caused stuff below the stack to be overwritten until a panic happened (e.g. via an oops in interrupt context, caused by the overwritten CPU index in the thread_info). Just panic directly. Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
png_set_filter_heuristics_fixed(png_structrp png_ptr, int heuristic_method, int num_weights, png_const_fixed_point_p filter_weights, png_const_fixed_point_p filter_costs) { PNG_UNUSED(png_ptr) PNG_UNUSED(heuristic_method) PNG_UNUSED(num_weights) PNG_UNUSED(filter_weights) PNG_UNUSED(filter_costs) }
0
[ "CWE-120" ]
libpng
81f44665cce4cb1373f049a76f3904e981b7a766
19,167,300,783,713,377,000,000,000,000,000,000,000
10
[libpng16] Reject attempt to write over-length PLTE chunk
void cil_destroy_classorder(struct cil_classorder *classorder) { if (classorder == NULL) { return; } if (classorder->class_list_str != NULL) { cil_list_destroy(&classorder->class_list_str, 1); } free(classorder); }
0
[ "CWE-125" ]
selinux
340f0eb7f3673e8aacaf0a96cbfcd4d12a405521
276,049,678,873,916,060,000,000,000,000,000,000,000
12
libsepol/cil: Check for statements not allowed in optional blocks While there are some checks for invalid statements in an optional block when resolving the AST, there are no checks when building the AST. OSS-Fuzz found the following policy which caused a null dereference in cil_tree_get_next_path(). (blockinherit b3) (sid SID) (sidorder(SID)) (optional o (ibpkeycon :(1 0)s) (block b3 (filecon""block()) (filecon""block()))) The problem is that the blockinherit copies block b3 before the optional block is disabled. When the optional is disabled, block b3 is deleted along with everything else in the optional. Later, when filecon statements with the same path are found an error message is produced and in trying to find out where the block was copied from, the reference to the deleted block is used. The error handling code assumes (rightly) that if something was copied from a block then that block should still exist. It is clear that in-statements, blocks, and macros cannot be in an optional, because that allows nodes to be copied from the optional block to somewhere outside even though the optional could be disabled later. When optionals are disabled the AST is reset and the resolution is restarted at the point of resolving macro calls, so anything resolved before macro calls will never be re-resolved. This includes tunableifs, in-statements, blockinherits, blockabstracts, and macro definitions. Tunable declarations also cannot be in an optional block because they are needed to resolve tunableifs. It should be fine to allow blockinherit statements in an optional, because that is copying nodes from outside the optional to the optional and if the optional is later disabled, everything will be deleted anyway. Check and quit with an error if a tunable declaration, in-statement, block, blockabstract, or macro definition is found within an optional when either building or resolving the AST. Signed-off-by: James Carter <[email protected]>
static void __init ip4_frags_ctl_register(void) { register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); }
0
[ "CWE-20" ]
linux
5d407b071dc369c26a38398326ee2be53651cfe4
10,696,727,607,383,381,000,000,000,000,000,000,000
4
ip: frags: fix crash in ip_do_fragment() A kernel crash occurrs when defragmented packet is fragmented in ip_do_fragment(). In defragment routine, skb_orphan() is called and skb->ip_defrag_offset is set. but skb->sk and skb->ip_defrag_offset are same union member. so that frag->sk is not NULL. Hence crash occurrs in skb->sk check routine in ip_do_fragment() when defragmented packet is fragmented. test commands: %iptables -t nat -I POSTROUTING -j MASQUERADE %hping3 192.168.4.2 -s 1000 -p 2000 -d 60000 splat looks like: [ 261.069429] kernel BUG at net/ipv4/ip_output.c:636! [ 261.075753] invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI [ 261.083854] CPU: 1 PID: 1349 Comm: hping3 Not tainted 4.19.0-rc2+ #3 [ 261.100977] RIP: 0010:ip_do_fragment+0x1613/0x2600 [ 261.106945] Code: e8 e2 38 e3 fe 4c 8b 44 24 18 48 8b 74 24 08 e9 92 f6 ff ff 80 3c 02 00 0f 85 da 07 00 00 48 8b b5 d0 00 00 00 e9 25 f6 ff ff <0f> 0b 0f 0b 44 8b 54 24 58 4c 8b 4c 24 18 4c 8b 5c 24 60 4c 8b 6c [ 261.127015] RSP: 0018:ffff8801031cf2c0 EFLAGS: 00010202 [ 261.134156] RAX: 1ffff1002297537b RBX: ffffed0020639e6e RCX: 0000000000000004 [ 261.142156] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff880114ba9bd8 [ 261.150157] RBP: ffff880114ba8a40 R08: ffffed0022975395 R09: ffffed0022975395 [ 261.158157] R10: 0000000000000001 R11: ffffed0022975394 R12: ffff880114ba9ca4 [ 261.166159] R13: 0000000000000010 R14: ffff880114ba9bc0 R15: dffffc0000000000 [ 261.174169] FS: 00007fbae2199700(0000) GS:ffff88011b400000(0000) knlGS:0000000000000000 [ 261.183012] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 261.189013] CR2: 00005579244fe000 CR3: 0000000119bf4000 CR4: 00000000001006e0 [ 261.198158] Call Trace: [ 261.199018] ? dst_output+0x180/0x180 [ 261.205011] ? save_trace+0x300/0x300 [ 261.209018] ? ip_copy_metadata+0xb00/0xb00 [ 261.213034] ? sched_clock_local+0xd4/0x140 [ 261.218158] ? kill_l4proto+0x120/0x120 [nf_conntrack] [ 261.223014] ? rt_cpu_seq_stop+0x10/0x10 [ 261.227014] ? find_held_lock+0x39/0x1c0 [ 261.233008] ip_finish_output+0x51d/0xb50 [ 261.237006] ? ip_fragment.constprop.56+0x220/0x220 [ 261.243011] ? nf_ct_l4proto_register_one+0x5b0/0x5b0 [nf_conntrack] [ 261.250152] ? rcu_is_watching+0x77/0x120 [ 261.255010] ? nf_nat_ipv4_out+0x1e/0x2b0 [nf_nat_ipv4] [ 261.261033] ? nf_hook_slow+0xb1/0x160 [ 261.265007] ip_output+0x1c7/0x710 [ 261.269005] ? ip_mc_output+0x13f0/0x13f0 [ 261.273002] ? __local_bh_enable_ip+0xe9/0x1b0 [ 261.278152] ? ip_fragment.constprop.56+0x220/0x220 [ 261.282996] ? nf_hook_slow+0xb1/0x160 [ 261.287007] raw_sendmsg+0x21f9/0x4420 [ 261.291008] ? dst_output+0x180/0x180 [ 261.297003] ? sched_clock_cpu+0x126/0x170 [ 261.301003] ? find_held_lock+0x39/0x1c0 [ 261.306155] ? stop_critical_timings+0x420/0x420 [ 261.311004] ? check_flags.part.36+0x450/0x450 [ 261.315005] ? _raw_spin_unlock_irq+0x29/0x40 [ 261.320995] ? _raw_spin_unlock_irq+0x29/0x40 [ 261.326142] ? cyc2ns_read_end+0x10/0x10 [ 261.330139] ? raw_bind+0x280/0x280 [ 261.334138] ? sched_clock_cpu+0x126/0x170 [ 261.338995] ? check_flags.part.36+0x450/0x450 [ 261.342991] ? __lock_acquire+0x4500/0x4500 [ 261.348994] ? inet_sendmsg+0x11c/0x500 [ 261.352989] ? dst_output+0x180/0x180 [ 261.357012] inet_sendmsg+0x11c/0x500 [ ... ] v2: - clear skb->sk at reassembly routine.(Eric Dumarzet) Fixes: fa0f527358bd ("ip: use rb trees for IP frag queue.") Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Taehee Yoo <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int pop_capabilities(struct PopAccountData *adata, int mode) { char buf[1024]; /* don't check capabilities on reconnect */ if (adata->capabilities) return 0; /* init capabilities */ if (mode == 0) { adata->cmd_capa = false; adata->cmd_stls = false; adata->cmd_user = 0; adata->cmd_uidl = 0; adata->cmd_top = 0; adata->resp_codes = false; adata->expire = true; adata->login_delay = 0; mutt_buffer_init(&adata->auth_list); } /* Execute CAPA command */ if ((mode == 0) || adata->cmd_capa) { mutt_str_strfcpy(buf, "CAPA\r\n", sizeof(buf)); switch (pop_fetch_data(adata, buf, NULL, fetch_capa, adata)) { case 0: { adata->cmd_capa = true; break; } case -1: return -1; } } /* CAPA not supported, use defaults */ if ((mode == 0) && !adata->cmd_capa) { adata->cmd_user = 2; adata->cmd_uidl = 2; adata->cmd_top = 2; mutt_str_strfcpy(buf, "AUTH\r\n", sizeof(buf)); if (pop_fetch_data(adata, buf, NULL, fetch_auth, adata) == -1) return -1; } /* Check capabilities */ if (mode == 2) { char *msg = NULL; if (!adata->expire) msg = _("Unable to leave messages on server"); if (adata->cmd_top == 0) msg = _("Command TOP is not supported by server"); if (adata->cmd_uidl == 0) msg = _("Command UIDL is not supported by server"); if (msg && adata->cmd_capa) { mutt_error(msg); return -2; } adata->capabilities = true; } return 0; }
0
[ "CWE-94", "CWE-74" ]
neomutt
fb013ec666759cb8a9e294347c7b4c1f597639cc
90,304,349,430,468,430,000,000,000,000,000,000,000
71
tls: clear data after a starttls acknowledgement After a starttls acknowledgement message, clear the buffers of any incoming data / commands. This will ensure that all future data is handled securely. Co-authored-by: Pietro Cerutti <[email protected]>
int caca_set_dither_brightness(caca_dither_t *d, float brightness) { /* FIXME */ d->brightness = brightness; return 0; }
0
[ "CWE-369" ]
libcaca
84bd155087b93ab2d8d7cb5b1ac94ecd4cf4f93c
318,679,816,962,336,620,000,000,000,000,000,000,000
7
dither: fix integer overflows that were causing a division by zero. Fixes: #36 (CVE-2018-20544)
int task_set_sequence_head(davs2_mgr_t *mgr, davs2_seq_t *seq) { int ret = 0; davs2_thread_mutex_lock(&mgr->mutex_mgr); davs2_reconfigure_decoder(mgr); if (seq->valid_flag) { int newres = (mgr->seq_info.head.height != seq->head.height || mgr->seq_info.head.width != seq->head.width); memcpy(&mgr->seq_info, seq, sizeof(davs2_seq_t)); if (newres) { /* resolution changed : new sequence */ davs2_log(mgr, DAVS2_LOG_INFO, "Sequence Resolution: %dx%d.", seq->head.width, seq->head.height); if ((seq->head.width & 0) != 0 || (seq->head.height & 1) != 0) { davs2_log(mgr, DAVS2_LOG_ERROR, "Sequence Resolution %dx%d is not even\n", seq->head.width, seq->head.height); } /* COI for the new sequence should be reset */ mgr->i_tr_wrap_cnt = 0; mgr->i_prev_coi = -1; destroy_dpb(mgr); if (create_dpb(mgr) < 0) { /* error */ ret = -1; memset(&mgr->seq_info, 0, sizeof(davs2_seq_t)); davs2_log(mgr, DAVS2_LOG_ERROR, "failed to create dpb buffers. %dx%d.", seq->head.width, seq->head.height); } mgr->new_sps = TRUE; } } else { /* invalid header */ memset(&mgr->seq_info, 0, sizeof(davs2_seq_t)); davs2_log(mgr, DAVS2_LOG_ERROR, "decoded an invalid sequence header: %dx%d.", seq->head.width, seq->head.height); } davs2_thread_mutex_unlock(&mgr->mutex_mgr); return ret; }
0
[ "CWE-787" ]
davs2
b41cf117452e2d73d827f02d3e30aa20f1c721ac
197,200,498,808,307,170,000,000,000,000,000,000,000
44
Add checking for `frame_rate_code`. Signed-off-by: luofl <[email protected]>
int tcp_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) { if (vma->vm_flags & (VM_WRITE | VM_EXEC)) return -EPERM; vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); /* Instruct vm_insert_page() to not down_read(mmap_sem) */ vma->vm_flags |= VM_MIXEDMAP; vma->vm_ops = &tcp_vm_ops; return 0; }
0
[ "CWE-190" ]
net
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
55,318,619,506,495,100,000,000,000,000,000,000,000
13
tcp: limit payload size of sacked skbs Jonathan Looney reported that TCP can trigger the following crash in tcp_shifted_skb() : BUG_ON(tcp_skb_pcount(skb) < pcount); This can happen if the remote peer has advertized the smallest MSS that linux TCP accepts : 48 An skb can hold 17 fragments, and each fragment can hold 32KB on x86, or 64KB on PowerPC. This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs can overflow. Note that tcp_sendmsg() builds skbs with less than 64KB of payload, so this problem needs SACK to be enabled. SACK blocks allow TCP to coalesce multiple skbs in the retransmit queue, thus filling the 17 fragments to maximal capacity. CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Jonathan Looney <[email protected]> Acked-by: Neal Cardwell <[email protected]> Reviewed-by: Tyler Hicks <[email protected]> Cc: Yuchung Cheng <[email protected]> Cc: Bruce Curtis <[email protected]> Cc: Jonathan Lemon <[email protected]> Signed-off-by: David S. Miller <[email protected]>
get_scriptlocal_funcname(char_u *funcname) { char sid_buf[25]; int off; char_u *newname; if (funcname == NULL) return NULL; if (STRNCMP(funcname, "s:", 2) != 0 && STRNCMP(funcname, "<SID>", 5) != 0) // The function name is not a script-local function name return NULL; if (!SCRIPT_ID_VALID(current_sctx.sc_sid)) { emsg(_(e_using_sid_not_in_script_context)); return NULL; } // Expand s: prefix into <SNR>nr_<name> vim_snprintf(sid_buf, sizeof(sid_buf), "<SNR>%ld_", (long)current_sctx.sc_sid); off = *funcname == 's' ? 2 : 5; newname = alloc(STRLEN(sid_buf) + STRLEN(funcname + off) + 1); if (newname == NULL) return NULL; STRCPY(newname, sid_buf); STRCAT(newname, funcname + off); return newname; }
0
[ "CWE-416" ]
vim
9c23f9bb5fe435b28245ba8ac65aa0ca6b902c04
311,262,373,448,833,450,000,000,000,000,000,000,000
31
patch 8.2.3902: Vim9: double free with nested :def function Problem: Vim9: double free with nested :def function. Solution: Pass "line_to_free" from compile_def_function() and make sure cmdlinep is valid.
static int pfkey_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb = NULL; struct sadb_msg *hdr = NULL; int err; err = -EOPNOTSUPP; if (msg->msg_flags & MSG_OOB) goto out; err = -EMSGSIZE; if ((unsigned int)len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; skb = alloc_skb(len, GFP_KERNEL); if (skb == NULL) goto out; err = -EFAULT; if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) goto out; hdr = pfkey_get_base_msg(skb, &err); if (!hdr) goto out; mutex_lock(&xfrm_cfg_mutex); err = pfkey_process(sk, skb, hdr); mutex_unlock(&xfrm_cfg_mutex); out: if (err && hdr && pfkey_error(hdr, err, sk) == 0) err = 0; kfree_skb(skb); return err ? : len; }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
204,933,672,892,195,260,000,000,000,000,000,000,000
40
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
njs_vm_init(njs_vm_t *vm) { njs_int_t ret; njs_frame_t *frame; frame = (njs_frame_t *) njs_function_frame_alloc(vm, NJS_FRAME_SIZE); if (njs_slow_path(frame == NULL)) { njs_memory_error(vm); return NJS_ERROR; } frame->exception.catch = NULL; frame->exception.next = NULL; frame->previous_active_frame = NULL; vm->active_frame = frame; ret = njs_regexp_init(vm); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } ret = njs_builtin_objects_clone(vm, &vm->global_value); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } njs_lvlhsh_init(&vm->values_hash); njs_lvlhsh_init(&vm->keywords_hash); njs_lvlhsh_init(&vm->modules_hash); njs_lvlhsh_init(&vm->events_hash); njs_queue_init(&vm->posted_events); njs_queue_init(&vm->promise_events); return NJS_OK; }
0
[ "CWE-416" ]
njs
6a07c2156a07ef307b6dcf3c2ca8571a5f1af7a6
91,283,841,222,998,090,000,000,000,000,000,000,000
37
Fixed recursive async function calls. Previously, PromiseCapability record was stored (function->context) directly in function object during a function invocation. This is not correct, because PromiseCapability record should be linked to current execution context. As a result, function->context is overwritten with consecutive recursive calls which results in use-after-free. This closes #451 issue on Github.
ebb_ews_mailbox_to_contact (EBookBackendEws *bbews, EContact **contact, GHashTable *values, const EwsMailbox *mb) { CamelInternetAddress *addr; gchar *value; if (!mb->name && !mb->email) return; addr = camel_internet_address_new (); camel_internet_address_add (addr, mb->name, mb->email ? mb->email : ""); value = camel_address_encode (CAMEL_ADDRESS (addr)); if (value && (!values || g_hash_table_lookup (values, value) == NULL)) { EVCardAttribute *attr; attr = e_vcard_attribute_new (NULL, EVC_EMAIL); e_vcard_attribute_add_value (attr, value); e_vcard_append_attribute (E_VCARD (*contact), attr); if (values) g_hash_table_insert (values, g_strdup (value), GINT_TO_POINTER (1)); } else { g_free (value); } g_object_unref (addr); }
0
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
30,435,918,964,133,596,000,000,000,000,000,000,000
31
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
anniversary_populate (EContact *contact, gchar **values) { if (values[0]) { EContactDate *dt = e_contact_date_from_string (values[0]); e_contact_set (contact, E_CONTACT_ANNIVERSARY, dt); e_contact_date_free (dt); } }
0
[]
evolution-data-server
34bad61738e2127736947ac50e0c7969cc944972
332,146,218,768,367,340,000,000,000,000,000,000,000
9
Bug 796174 - strcat() considered unsafe for buffer overflow
void CLASS layer_thumb() { int i, c; char *thumb, map[][4] = { "012","102" }; colors = thumb_misc >> 5 & 7; thumb_length = thumb_width*thumb_height; thumb = (char *) calloc (colors, thumb_length); merror (thumb, "layer_thumb()"); fprintf (ofp, "P%d\n%d %d\n255\n", 5 + (colors >> 1), thumb_width, thumb_height); fread (thumb, thumb_length, colors, ifp); for (i=0; i < thumb_length; i++) FORCC putc (thumb[i+thumb_length*(map[thumb_misc >> 8][c]-'0')], ofp); free (thumb); }
0
[ "CWE-703" ]
LibRaw
11909cc59e712e09b508dda729b99aeaac2b29ad
95,825,303,640,237,130,000,000,000,000,000,000,000
16
cumulated data checks patch
static int nr_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); struct sockaddr_ax25 *usax = (struct sockaddr_ax25 *)msg->msg_name; int err; struct sockaddr_ax25 sax; struct sk_buff *skb; unsigned char *asmptr; int size; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) { err = -EADDRNOTAVAIL; goto out; } if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); err = -EPIPE; goto out; } if (nr->device == NULL) { err = -ENETUNREACH; goto out; } if (usax) { if (msg->msg_namelen < sizeof(sax)) { err = -EINVAL; goto out; } sax = *usax; if (ax25cmp(&nr->dest_addr, &sax.sax25_call) != 0) { err = -EISCONN; goto out; } if (sax.sax25_family != AF_NETROM) { err = -EINVAL; goto out; } } else { if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } sax.sax25_family = AF_NETROM; sax.sax25_call = nr->dest_addr; } SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n"); /* Build a packet - the conventional user limit is 236 bytes. We can do ludicrously large NetROM frames but must not overflow */ if (len > 65536) { err = -EMSGSIZE; goto out; } SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n"); size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) goto out; skb_reserve(skb, size - len); skb_reset_transport_header(skb); /* * Push down the NET/ROM header */ asmptr = skb_push(skb, NR_TRANSPORT_LEN); SOCK_DEBUG(sk, "Building NET/ROM Header.\n"); /* Build a NET/ROM Transport header */ *asmptr++ = nr->your_index; *asmptr++ = nr->your_id; *asmptr++ = 0; /* To be filled in later */ *asmptr++ = 0; /* Ditto */ *asmptr++ = NR_INFO; SOCK_DEBUG(sk, "Built header.\n"); /* * Put the data on the end */ skb_put(skb, len); SOCK_DEBUG(sk, "NET/ROM: Appending user data\n"); /* User data follows immediately after the NET/ROM transport header */ if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) { kfree_skb(skb); err = -EFAULT; goto out; } SOCK_DEBUG(sk, "NET/ROM: Transmitting buffer\n"); if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); err = -ENOTCONN; goto out; } nr_output(sk, skb); /* Shove it onto the queue */ err = len; out: release_sock(sk); return err; }
0
[ "CWE-200" ]
linux-2.6
f6b97b29513950bfbf621a83d85b6f86b39ec8db
57,652,077,106,035,320,000,000,000,000,000,000,000
118
netrom: Fix nr_getname() leak nr_getname() can leak kernel memory to user. Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int utf82u_index(int pos, const char *start) { int uc = 0; const char *end = start+pos; while ( start<end ) { utf8_ildb(&start); ++uc; } return( uc ); }
0
[ "CWE-119", "CWE-787" ]
fontforge
626f751752875a0ddd74b9e217b6f4828713573c
204,707,213,178,145,780,000,000,000,000,000,000,000
10
Warn users before discarding their unsaved scripts (#3852) * Warn users before discarding their unsaved scripts This closes #3846.
u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) { u32 *k = fc->scramble_key; u64 v = (unsigned long) id; u32 v0 = v; u32 v1 = v >> 32; u32 sum = 0; int i; for (i = 0; i < 32; i++) { v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); sum += 0x9E3779B9; v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); } return (u64) v0 + ((u64) v1 << 32); }
0
[]
linux-2.6
0bd87182d3ab18a32a8e9175d3f68754c58e3432
173,925,341,494,590,600,000,000,000,000,000,000,000
17
fuse: fix kunmap in fuse_ioctl_copy_user Looks like another victim of the confusing kmap() vs kmap_atomic() API differences. Reported-by: Todor Gyumyushev <[email protected]> Signed-off-by: Jens Axboe <[email protected]> Signed-off-by: Miklos Szeredi <[email protected]> Cc: Tejun Heo <[email protected]> Cc: [email protected]
static inline __maybe_unused int h2s_id(const struct h2s *h2s) { return h2s ? h2s->id : 0; }
0
[ "CWE-119" ]
haproxy
3f0e1ec70173593f4c2b3681b26c04a4ed5fc588
147,019,361,216,202,060,000,000,000,000,000,000,000
4
BUG/CRITICAL: h2: fix incorrect frame length check The incoming H2 frame length was checked against the max_frame_size setting instead of being checked against the bufsize. The max_frame_size only applies to outgoing traffic and not to incoming one, so if a large enough frame size is advertised in the SETTINGS frame, a wrapped frame will be defragmented into a temporary allocated buffer where the second fragment my overflow the heap by up to 16 kB. It is very unlikely that this can be exploited for code execution given that buffers are very short lived and their address not realistically predictable in production, but the likeliness of an immediate crash is absolutely certain. This fix must be backported to 1.8. Many thanks to Jordan Zebor from F5 Networks for reporting this issue in a responsible way.
rb_str_comparable(VALUE str1, VALUE str2) { int idx1, idx2; int rc1, rc2; if (RSTRING_LEN(str1) == 0) return TRUE; if (RSTRING_LEN(str2) == 0) return TRUE; idx1 = ENCODING_GET(str1); idx2 = ENCODING_GET(str2); if (idx1 == idx2) return TRUE; rc1 = rb_enc_str_coderange(str1); rc2 = rb_enc_str_coderange(str2); if (rc1 == ENC_CODERANGE_7BIT) { if (rc2 == ENC_CODERANGE_7BIT) return TRUE; if (rb_enc_asciicompat(rb_enc_from_index(idx2))) return TRUE; } if (rc2 == ENC_CODERANGE_7BIT) { if (rb_enc_asciicompat(rb_enc_from_index(idx1))) return TRUE; } return FALSE; }
0
[ "CWE-119" ]
ruby
1c2ef610358af33f9ded3086aa2d70aac03dcac5
166,342,949,385,827,550,000,000,000,000,000,000,000
23
* string.c (rb_str_justify): CVE-2009-4124. Fixes a bug reported by Emmanouel Kellinis <Emmanouel.Kellinis AT kpmg.co.uk>, KPMG London; Patch by nobu. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@26038 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
modify_param_name(param_token *name) { const char *delim1 = memchr (name->b, '*', name->e - name->b); const char *delim2 = memrchr (name->b, '*', name->e - name->b); int result; if(delim1 == NULL) { result = NOT_RFC2231; } else if(delim1 == delim2) { if ((name->e - 1) == delim1) { result = RFC2231_ENCODING; } else { result = RFC2231_NOENCODING; } name->e = delim1; } else { name->e = delim1; result = RFC2231_ENCODING; } return result; }
0
[ "CWE-20" ]
wget
3e25a9817f47fbb8660cc6a3b2f3eea239526c6c
218,655,186,125,234,300,000,000,000,000,000,000,000
30
Introduce --trust-server-names. Close CVE-2010-2252.
static void k_fn(struct vc_data *vc, unsigned char value, char up_flag) { if (up_flag) return; if ((unsigned)value < ARRAY_SIZE(func_table)) { if (func_table[value]) puts_queue(vc, func_table[value]); } else pr_err("k_fn called with value=%d\n", value); }
1
[ "CWE-416" ]
linux
82e61c3909db51d91b9d3e2071557b6435018b80
284,145,277,611,626,130,000,000,000,000,000,000,000
11
vt: keyboard, extend func_buf_lock to readers Both read-side users of func_table/func_buf need locking. Without that, one can easily confuse the code by repeatedly setting altering strings like: while (1) for (a = 0; a < 2; a++) { struct kbsentry kbs = {}; strcpy((char *)kbs.kb_string, a ? ".\n" : "88888\n"); ioctl(fd, KDSKBSENT, &kbs); } When that program runs, one can get unexpected output by holding F1 (note the unxpected period on the last line): . 88888 .8888 So protect all accesses to 'func_table' (and func_buf) by preexisting 'func_buf_lock'. It is easy in 'k_fn' handler as 'puts_queue' is expected not to sleep. On the other hand, KDGKBSENT needs a local (atomic) copy of the string because copy_to_user can sleep. Use already allocated, but unused 'kbs->kb_string' for that purpose. Note that the program above needs at least CAP_SYS_TTY_CONFIG. This depends on the previous patch and on the func_buf_lock lock added in commit 46ca3f735f34 (tty/vt: fix write/write race in ioctl(KDSKBSENT) handler) in 5.2. Likely fixes CVE-2020-25656. Cc: <[email protected]> Reported-by: Minh Yuan <[email protected]> Signed-off-by: Jiri Slaby <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int bin_exports(RCore *r, int mode, ut64 laddr, int va, ut64 at, const char *name, const char *args) { return bin_symbols_internal (r, mode, laddr, va, at, name, true, args); }
0
[ "CWE-125" ]
radare2
1f37c04f2a762500222dda2459e6a04646feeedf
1,000,996,991,145,299,400,000,000,000,000,000,000
3
Fix #9904 - crash in r2_hoobr_r_read_le32 (over 9000 entrypoints) and read_le oobread (#9923)
columnToCell(TScreen *screen, int row, int col, CELL *cell) { while (row < screen->max_row) { CLineData *ld = GET_LINEDATA(screen, row); int last = LastTextCol(screen, ld, row); /* TRACE(("last(%d) = %d, have %d\n", row, last, col)); */ if (col <= last) { break; } /* * Stop if the current row does not wrap (does not continue the current * line). */ if (!LineTstWrapped(ld)) { col = last + 1; break; } col -= (last + 1); ++row; } if (col < 0) col = 0; cell->row = row; cell->col = col; }
0
[ "CWE-399" ]
xterm-snapshots
82ba55b8f994ab30ff561a347b82ea340ba7075c
2,660,728,979,598,060,000,000,000,000,000,000,000
26
snapshot of project "xterm", label xterm-365d
int gdImageGrayScale(gdImagePtr src) { int x, y; int r,g,b,a; int new_pxl, pxl; typedef int (*FuncPtr)(gdImagePtr, int, int); FuncPtr f; f = GET_PIXEL_FUNCTION(src); if (src==NULL) { return 0; } for (y=0; y<src->sy; ++y) { for (x=0; x<src->sx; ++x) { pxl = f (src, x, y); r = gdImageRed(src, pxl); g = gdImageGreen(src, pxl); b = gdImageBlue(src, pxl); a = gdImageAlpha(src, pxl); r = g = b = (int) (.299 * r + .587 * g + .114 * b); new_pxl = gdImageColorAllocateAlpha(src, r, g, b, a); if (new_pxl == -1) { new_pxl = gdImageColorClosestAlpha(src, r, g, b, a); } if ((y >= 0) && (y < src->sy)) { gdImageSetPixel (src, x, y, new_pxl); } } } return 1; }
0
[ "CWE-119" ]
php-src
feba44546c27b0158f9ac20e72040a224b918c75
191,752,304,043,103,640,000,000,000,000,000,000,000
33
Fixed bug #22965 (Crash in gd lib's ImageFillToBorder()).
int jp2_encode(jas_image_t *image, jas_stream_t *out, const char *optstr) { jp2_box_t *box; jp2_ftyp_t *ftyp; jp2_ihdr_t *ihdr; jas_stream_t *tmpstream; int allcmptssame; jp2_bpcc_t *bpcc; long len; uint_fast16_t cmptno; jp2_colr_t *colr; char buf[4096]; uint_fast32_t overhead; jp2_cdefchan_t *cdefchanent; jp2_cdef_t *cdef; int i; uint_fast32_t typeasoc; jas_iccprof_t *iccprof; jas_stream_t *iccstream; int pos; int needcdef; int prec; int sgnd; box = 0; tmpstream = 0; iccstream = 0; iccprof = 0; allcmptssame = 1; sgnd = jas_image_cmptsgnd(image, 0); prec = jas_image_cmptprec(image, 0); for (i = 1; i < jas_image_numcmpts(image); ++i) { if (jas_image_cmptsgnd(image, i) != sgnd || jas_image_cmptprec(image, i) != prec) { allcmptssame = 0; break; } } /* Output the signature box. */ if (!(box = jp2_box_create(JP2_BOX_JP))) { jas_eprintf("cannot create JP box\n"); goto error; } box->data.jp.magic = JP2_JP_MAGIC; if (jp2_box_put(box, out)) { jas_eprintf("cannot write JP box\n"); goto error; } jp2_box_destroy(box); box = 0; /* Output the file type box. */ if (!(box = jp2_box_create(JP2_BOX_FTYP))) { jas_eprintf("cannot create FTYP box\n"); goto error; } ftyp = &box->data.ftyp; ftyp->majver = JP2_FTYP_MAJVER; ftyp->minver = JP2_FTYP_MINVER; ftyp->numcompatcodes = 1; ftyp->compatcodes[0] = JP2_FTYP_COMPATCODE; if (jp2_box_put(box, out)) { jas_eprintf("cannot write FTYP box\n"); goto error; } jp2_box_destroy(box); box = 0; /* * Generate the data portion of the JP2 header box. * We cannot simply output the header for this box * since we do not yet know the correct value for the length * field. */ if (!(tmpstream = jas_stream_memopen(0, 0))) { jas_eprintf("cannot create temporary stream\n"); goto error; } /* Generate image header box. */ if (!(box = jp2_box_create(JP2_BOX_IHDR))) { jas_eprintf("cannot create IHDR box\n"); goto error; } ihdr = &box->data.ihdr; ihdr->width = jas_image_width(image); ihdr->height = jas_image_height(image); ihdr->numcmpts = jas_image_numcmpts(image); ihdr->bpc = allcmptssame ? JP2_SPTOBPC(jas_image_cmptsgnd(image, 0), jas_image_cmptprec(image, 0)) : JP2_IHDR_BPCNULL; ihdr->comptype = JP2_IHDR_COMPTYPE; ihdr->csunk = 0; ihdr->ipr = 0; if (jp2_box_put(box, tmpstream)) { jas_eprintf("cannot write IHDR box\n"); goto error; } jp2_box_destroy(box); box = 0; /* Generate bits per component box. */ if (!allcmptssame) { if (!(box = jp2_box_create(JP2_BOX_BPCC))) { jas_eprintf("cannot create BPCC box\n"); goto error; } bpcc = &box->data.bpcc; bpcc->numcmpts = jas_image_numcmpts(image); if (!(bpcc->bpcs = jas_alloc2(bpcc->numcmpts, sizeof(uint_fast8_t)))) { jas_eprintf("memory allocation failed\n"); goto error; } for (cmptno = 0; cmptno < bpcc->numcmpts; ++cmptno) { bpcc->bpcs[cmptno] = JP2_SPTOBPC(jas_image_cmptsgnd(image, cmptno), jas_image_cmptprec(image, cmptno)); } if (jp2_box_put(box, tmpstream)) { jas_eprintf("cannot write BPCC box\n"); goto error; } jp2_box_destroy(box); box = 0; } /* Generate color specification box. */ if (!(box = jp2_box_create(JP2_BOX_COLR))) { jas_eprintf("cannot create COLR box\n"); goto error; } colr = &box->data.colr; switch (jas_image_clrspc(image)) { case JAS_CLRSPC_SRGB: case JAS_CLRSPC_SYCBCR: case JAS_CLRSPC_SGRAY: colr->method = JP2_COLR_ENUM; colr->csid = clrspctojp2(jas_image_clrspc(image)); colr->pri = JP2_COLR_PRI; colr->approx = 0; break; default: colr->method = JP2_COLR_ICC; colr->pri = JP2_COLR_PRI; colr->approx = 0; /* Ensure that cmprof_ is not null. */ if (!jas_image_cmprof(image)) { jas_eprintf("CM profile is null\n"); goto error; } if (!(iccprof = jas_iccprof_createfromcmprof( jas_image_cmprof(image)))) { jas_eprintf("cannot create ICC profile\n"); goto error; } if (!(iccstream = jas_stream_memopen(0, 0))) { jas_eprintf("cannot create temporary stream\n"); goto error; } if (jas_iccprof_save(iccprof, iccstream)) { jas_eprintf("cannot write ICC profile\n"); goto error; } if ((pos = jas_stream_tell(iccstream)) < 0) { jas_eprintf("cannot get stream position\n"); goto error; } colr->iccplen = pos; if (!(colr->iccp = jas_malloc(pos))) { jas_eprintf("memory allocation failed\n"); goto error; } jas_stream_rewind(iccstream); if (jas_stream_read(iccstream, colr->iccp, colr->iccplen) != colr->iccplen) { jas_eprintf("cannot read temporary stream\n"); goto error; } jas_stream_close(iccstream); iccstream = 0; jas_iccprof_destroy(iccprof); iccprof = 0; break; } if (jp2_box_put(box, tmpstream)) { jas_eprintf("cannot write box\n"); goto error; } jp2_box_destroy(box); box = 0; needcdef = 1; switch (jas_clrspc_fam(jas_image_clrspc(image))) { case JAS_CLRSPC_FAM_RGB: if (jas_image_cmpttype(image, 0) == JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R) && jas_image_cmpttype(image, 1) == JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G) && jas_image_cmpttype(image, 2) == JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B)) needcdef = 0; break; case JAS_CLRSPC_FAM_YCBCR: if (jas_image_cmpttype(image, 0) == JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_YCBCR_Y) && jas_image_cmpttype(image, 1) == JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_YCBCR_CB) && jas_image_cmpttype(image, 2) == JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_YCBCR_CR)) needcdef = 0; break; case JAS_CLRSPC_FAM_GRAY: if (jas_image_cmpttype(image, 0) == JAS_IMAGE_CT_COLOR(JAS_IMAGE_CT_GRAY_Y)) needcdef = 0; break; default: abort(); break; } if (needcdef) { if (!(box = jp2_box_create(JP2_BOX_CDEF))) { jas_eprintf("cannot create CDEF box\n"); goto error; } cdef = &box->data.cdef; cdef->numchans = jas_image_numcmpts(image); cdef->ents = jas_alloc2(cdef->numchans, sizeof(jp2_cdefchan_t)); for (i = 0; i < jas_image_numcmpts(image); ++i) { cdefchanent = &cdef->ents[i]; cdefchanent->channo = i; typeasoc = jp2_gettypeasoc(jas_image_clrspc(image), jas_image_cmpttype(image, i)); cdefchanent->type = typeasoc >> 16; cdefchanent->assoc = typeasoc & 0x7fff; } if (jp2_box_put(box, tmpstream)) { jas_eprintf("cannot write CDEF box\n"); goto error; } jp2_box_destroy(box); box = 0; } /* Determine the total length of the JP2 header box. */ len = jas_stream_tell(tmpstream); jas_stream_rewind(tmpstream); /* * Output the JP2 header box and all of the boxes which it contains. */ if (!(box = jp2_box_create(JP2_BOX_JP2H))) { jas_eprintf("cannot create JP2H box\n"); goto error; } box->len = len + JP2_BOX_HDRLEN(false); if (jp2_box_put(box, out)) { jas_eprintf("cannot write JP2H box\n"); goto error; } jp2_box_destroy(box); box = 0; if (jas_stream_copy(out, tmpstream, len)) { jas_eprintf("cannot copy stream\n"); goto error; } jas_stream_close(tmpstream); tmpstream = 0; /* * Output the contiguous code stream box. */ if (!(box = jp2_box_create(JP2_BOX_JP2C))) { jas_eprintf("cannot create JP2C box\n"); goto error; } box->len = 0; if (jp2_box_put(box, out)) { jas_eprintf("cannot write JP2C box\n"); goto error; } jp2_box_destroy(box); box = 0; /* Output the JPEG-2000 code stream. */ overhead = jas_stream_getrwcount(out); sprintf(buf, "%s\n_jp2overhead=%lu\n", (optstr ? optstr : ""), (unsigned long) overhead); if (jpc_encode(image, out, buf)) { jas_eprintf("jpc_encode failed\n"); goto error; } return 0; error: if (iccprof) { jas_iccprof_destroy(iccprof); } if (iccstream) { jas_stream_close(iccstream); } if (box) { jp2_box_destroy(box); } if (tmpstream) { jas_stream_close(tmpstream); } return -1; }
1
[ "CWE-476" ]
jasper
58ba0365d911b9f9dd68e9abf826682c0b4f2293
137,179,489,860,489,530,000,000,000,000,000,000,000
325
Added a check in the JP2 encoder to ensure that the image to be coded has at least one component. Also, made some small changes to a private build script.
void StreamReq::Dispose() { BaseObjectPtr<AsyncWrap> destroy_me{GetAsyncWrap()}; object()->SetAlignedPointerInInternalField( StreamReq::kStreamReqField, nullptr); destroy_me->Detach(); }
0
[ "CWE-416" ]
node
4f8772f9b731118628256189b73cd202149bbd97
37,761,514,958,495,906,000,000,000,000,000,000,000
6
src: retain pointers to WriteWrap/ShutdownWrap Avoids potential use-after-free when wrap req's are synchronously destroyed. CVE-ID: CVE-2020-8265 Fixes: https://github.com/nodejs-private/node-private/issues/227 Refs: https://hackerone.com/bugs?subject=nodejs&report_id=988103 PR-URL: https://github.com/nodejs-private/node-private/pull/23 Reviewed-By: Anna Henningsen <[email protected]> Reviewed-By: Matteo Collina <[email protected]> Reviewed-By: Rich Trott <[email protected]>
static void mt_fix_const_field(struct hid_field *field, unsigned int usage) { if (field->usage[0].hid != usage || !(field->flags & HID_MAIN_ITEM_CONSTANT)) return; field->flags &= ~HID_MAIN_ITEM_CONSTANT; field->flags |= HID_MAIN_ITEM_VARIABLE; }
0
[ "CWE-787" ]
linux
35556bed836f8dc07ac55f69c8d17dce3e7f0e25
293,767,444,653,032,700,000,000,000,000,000,000,000
9
HID: core: Sanitize event code and type when mapping input When calling into hid_map_usage(), the passed event code is blindly stored as is, even if it doesn't fit in the associated bitmap. This event code can come from a variety of sources, including devices masquerading as input devices, only a bit more "programmable". Instead of taking the event code at face value, check that it actually fits the corresponding bitmap, and if it doesn't: - spit out a warning so that we know which device is acting up - NULLify the bitmap pointer so that we catch unexpected uses Code paths that can make use of untrusted inputs can now check that the mapping was indeed correct and bail out if not. Cc: [email protected] Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Benjamin Tissoires <[email protected]>
nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct nlm_args *argp = &req->a_args; struct nlm_res *resp = &req->a_res; struct rpc_message msg = { .rpc_argp = argp, .rpc_resp = resp, .rpc_cred = cred, }; int status; dprintk("lockd: call procedure %d on %s\n", (int)proc, host->h_name); do { if (host->h_reclaiming && !argp->reclaim) goto in_grace_period; /* If we have no RPC client yet, create one. */ if ((clnt = nlm_bind_host(host)) == NULL) return -ENOLCK; msg.rpc_proc = &clnt->cl_procinfo[proc]; /* Perform the RPC call. If an error occurs, try again */ if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) { dprintk("lockd: rpc_call returned error %d\n", -status); switch (status) { case -EPROTONOSUPPORT: status = -EINVAL; break; case -ECONNREFUSED: case -ETIMEDOUT: case -ENOTCONN: nlm_rebind_host(host); status = -EAGAIN; break; case -ERESTARTSYS: return signalled () ? -EINTR : status; default: break; } break; } else if (resp->status == nlm_lck_denied_grace_period) { dprintk("lockd: server in grace period\n"); if (argp->reclaim) { printk(KERN_WARNING "lockd: spurious grace period reject?!\n"); return -ENOLCK; } } else { if (!argp->reclaim) { /* We appear to be out of the grace period */ wake_up_all(&host->h_gracewait); } dprintk("lockd: server returns status %d\n", resp->status); return 0; /* Okay, call complete */ } in_grace_period: /* * The server has rebooted and appears to be in the grace * period during which locks are only allowed to be * reclaimed. * We can only back off and try again later. */ status = nlm_wait_on_grace(&host->h_gracewait); } while (status == 0); return status; }
0
[ "CWE-400", "CWE-399", "CWE-703" ]
linux
0b760113a3a155269a3fba93a409c640031dd68f
60,096,913,559,185,100,000,000,000,000,000,000,000
73
NLM: Don't hang forever on NLM unlock requests If the NLM daemon is killed on the NFS server, we can currently end up hanging forever on an 'unlock' request, instead of aborting. Basically, if the rpcbind request fails, or the server keeps returning garbage, we really want to quit instead of retrying. Tested-by: Vasily Averin <[email protected]> Signed-off-by: Trond Myklebust <[email protected]> Cc: [email protected]
int main(int argc, char **argv) { StrList **depend_ptr; timestamp(); iflag_set_default_cpu(&cpu); iflag_set_default_cpu(&cmd_cpu); pass0 = 0; want_usage = terminate_after_phase = false; nasm_set_verror(nasm_verror_gnu); error_file = stderr; tolower_init(); src_init(); /* * We must call init_labels() before the command line parsing, * because we may be setting prefixes/suffixes from the command * line. */ init_labels(); offsets = raa_init(); forwrefs = saa_init((int32_t)sizeof(struct forwrefinfo)); preproc = &nasmpp; operating_mode = OP_NORMAL; parse_cmdline(argc, argv, 1); if (terminate_after_phase) { if (want_usage) usage(); return 1; } /* * Define some macros dependent on the runtime, but not * on the command line (as those are scanned in cmdline pass 2.) */ preproc->init(); define_macros_early(); parse_cmdline(argc, argv, 2); if (terminate_after_phase) { if (want_usage) usage(); return 1; } /* Save away the default state of warnings */ memcpy(warning_state_init, warning_state, sizeof warning_state); if (!using_debug_info) { /* No debug info, redirect to the null backend (empty stubs) */ dfmt = &null_debug_form; } else if (!debug_format) { /* Default debug format for this backend */ dfmt = ofmt->default_dfmt; } else { dfmt = dfmt_find(ofmt, debug_format); if (!dfmt) { nasm_fatal(ERR_NOFILE | ERR_USAGE, "unrecognized debug format `%s' for" " output format `%s'", debug_format, ofmt->shortname); } } if (ofmt->stdmac) preproc->extra_stdmac(ofmt->stdmac); /* * If no output file name provided and this * is a preprocess mode, we're perfectly * fine to output into stdout. */ if (!outname) { if (!(operating_mode & OP_PREPROCESS)) outname = filename_set_extension(inname, ofmt->extension); } /* define some macros dependent of command-line */ define_macros_late(); depend_ptr = (depend_file || (operating_mode & OP_DEPEND)) ? &depend_list : NULL; if (!depend_target) depend_target = quote_for_make(outname); if (operating_mode & OP_DEPEND) { char *line; if (depend_missing_ok) preproc->include_path(NULL); /* "assume generated" */ preproc->reset(inname, 0, depend_ptr); ofile = NULL; while ((line = preproc->getline())) nasm_free(line); preproc->cleanup(0); } else if (operating_mode & OP_PREPROCESS) { char *line; const char *file_name = NULL; int32_t prior_linnum = 0; int lineinc = 0; if (outname) { ofile = nasm_open_write(outname, NF_TEXT); if (!ofile) nasm_fatal(ERR_NOFILE, "unable to open output file `%s'", outname); } else ofile = NULL; location.known = false; /* pass = 1; */ preproc->reset(inname, 3, depend_ptr); /* Revert all warnings to the default state */ memcpy(warning_state, warning_state_init, sizeof warning_state); while ((line = preproc->getline())) { /* * We generate %line directives if needed for later programs */ int32_t linnum = prior_linnum += lineinc; int altline = src_get(&linnum, &file_name); if (altline) { if (altline == 1 && lineinc == 1) nasm_fputs("", ofile); else { lineinc = (altline != -1 || lineinc != 1); fprintf(ofile ? ofile : stdout, "%%line %"PRId32"+%d %s\n", linnum, lineinc, file_name); } prior_linnum = linnum; } nasm_fputs(line, ofile); nasm_free(line); } preproc->cleanup(0); if (ofile) fclose(ofile); if (ofile && terminate_after_phase && !keep_all) remove(outname); ofile = NULL; } if (operating_mode & OP_NORMAL) { ofile = nasm_open_write(outname, (ofmt->flags & OFMT_TEXT) ? NF_TEXT : NF_BINARY); if (!ofile) nasm_fatal(ERR_NOFILE, "unable to open output file `%s'", outname); ofmt->init(); dfmt->init(); assemble_file(inname, depend_ptr); if (!terminate_after_phase) { ofmt->cleanup(); cleanup_labels(); fflush(ofile); if (ferror(ofile)) { nasm_error(ERR_NONFATAL|ERR_NOFILE, "write error on output file `%s'", outname); terminate_after_phase = true; } } if (ofile) { fclose(ofile); if (terminate_after_phase && !keep_all) remove(outname); ofile = NULL; } } if (depend_list && !terminate_after_phase) emit_dependencies(depend_list); if (want_usage) usage(); raa_free(offsets); saa_free(forwrefs); eval_cleanup(); stdscan_cleanup(); src_free(); return terminate_after_phase; }
0
[ "CWE-476" ]
nasm
e996d28c70d45008085322b442b44a9224308548
252,512,517,825,300,640,000,000,000,000,000,000,000
198
labels: Don't nil dereference if no label provided An equ without label may cause nil dereference | equ 0x100 Fixes 98578071b9d71ecaa2344dd9c185237c1765041e Signed-off-by: Cyrill Gorcunov <[email protected]>
bool check_func_default_processor(void *arg) { return true; }
0
[ "CWE-617" ]
server
2e7891080667c59ac80f788eef4d59d447595772
57,076,057,852,016,710,000,000,000,000,000,000,000
1
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view This bug could manifest itself after pushing a where condition over a mergeable derived table / view / CTE DT into a grouping view / derived table / CTE V whose item list contained set functions with constant arguments such as MIN(2), SUM(1) etc. In such cases the field references used in the condition pushed into the view V that correspond set functions are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation of the virtual method const_item() for the class Item_direct_view_ref the wrapped set functions with constant arguments could be erroneously taken for constant items. This could lead to a wrong result set returned by the main select query in 10.2. In 10.4 where a possibility of pushing condition from HAVING into WHERE had been added this could cause a crash. Approved by Sergey Petrunya <[email protected]>
static int sctp_setsockopt_delayed_ack(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sack_info params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (optlen == sizeof(struct sctp_sack_info)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0 && params.sack_freq == 0) return 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { printk(KERN_WARNING "SCTP: Use of struct sctp_assoc_value " "in delayed_ack socket option deprecated\n"); printk(KERN_WARNING "SCTP: Use struct sctp_sack_info instead\n"); if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0) params.sack_freq = 1; else params.sack_freq = 0; } else return - EINVAL; /* Validate value parameter. */ if (params.sack_delay > 500) return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (params.sack_delay) { if (asoc) { asoc->sackdelay = msecs_to_jiffies(params.sack_delay); asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } else { sp->sackdelay = params.sack_delay; sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } } if (params.sack_freq == 1) { if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; } else { sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; } } else if (params.sack_freq > 1) { if (asoc) { asoc->sackfreq = params.sack_freq; asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } else { sp->sackfreq = params.sack_freq; sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } } /* If change is for association, also apply to each transport. */ if (asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (params.sack_delay) { trans->sackdelay = msecs_to_jiffies(params.sack_delay); trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } if (params.sack_freq == 1) { trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; } else if (params.sack_freq > 1) { trans->sackfreq = params.sack_freq; trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } } } return 0; }
0
[ "CWE-400" ]
linux-2.6
c377411f2494a931ff7facdbb3a6839b1266bcf6
176,134,383,588,591,730,000,000,000,000,000,000,000
105
net: sk_add_backlog() take rmem_alloc into account Current socket backlog limit is not enough to really stop DDOS attacks, because user thread spend many time to process a full backlog each round, and user might crazy spin on socket lock. We should add backlog size and receive_queue size (aka rmem_alloc) to pace writers, and let user run without being slow down too much. Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in stress situations. Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp receiver can now process ~200.000 pps (instead of ~100 pps before the patch) on a 8 core machine. Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
explicit BoostedTreesCalculateBestFeatureSplitOp( OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); OP_REQUIRES_OK(context, context->GetAttr("split_type", &split_type_)); }
0
[ "CWE-125", "CWE-369" ]
tensorflow
e84c975313e8e8e38bb2ea118196369c45c51378
268,404,994,668,751,920,000,000,000,000,000,000,000
6
In tf.raw_ops.BoostedTreesSparseCalculateBestFeatureSplit, limit stat_dim in stats_summary_indices to under stats_dims in stats_summary_shape PiperOrigin-RevId: 387171191 Change-Id: I83ca8a75b22aa78c037e8b98779da6cced16bfaa
static struct sock *nr_find_peer(unsigned char index, unsigned char id, ax25_address *dest) { struct sock *s; spin_lock_bh(&nr_list_lock); sk_for_each(s, &nr_list) { struct nr_sock *nr = nr_sk(s); if (nr->your_index == index && nr->your_id == id && !ax25cmp(&nr->dest_addr, dest)) { bh_lock_sock(s); goto found; } } s = NULL; found: spin_unlock_bh(&nr_list_lock); return s; }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
252,885,545,700,426,960,000,000,000,000,000,000,000
20
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
dump_isodirrec(FILE *out, const unsigned char *isodirrec) { fprintf(out, " l %d,", toi(isodirrec + DR_length_offset, DR_length_size)); fprintf(out, " a %d,", toi(isodirrec + DR_ext_attr_length_offset, DR_ext_attr_length_size)); fprintf(out, " ext 0x%x,", toi(isodirrec + DR_extent_offset, DR_extent_size)); fprintf(out, " s %d,", toi(isodirrec + DR_size_offset, DR_extent_size)); fprintf(out, " f 0x%x,", toi(isodirrec + DR_flags_offset, DR_flags_size)); fprintf(out, " u %d,", toi(isodirrec + DR_file_unit_size_offset, DR_file_unit_size_size)); fprintf(out, " ilv %d,", toi(isodirrec + DR_interleave_offset, DR_interleave_size)); fprintf(out, " seq %d,", toi(isodirrec + DR_volume_sequence_number_offset, DR_volume_sequence_number_size)); fprintf(out, " nl %d:", toi(isodirrec + DR_name_len_offset, DR_name_len_size)); fprintf(out, " `%.*s'", toi(isodirrec + DR_name_len_offset, DR_name_len_size), isodirrec + DR_name_offset); }
0
[ "CWE-125" ]
libarchive
f9569c086ff29259c73790db9cbf39fe8fb9d862
172,948,114,282,639,000,000,000,000,000,000,000,000
25
iso9660: validate directory record length
pickle_clear(PyObject *m) { _Pickle_ClearState(_Pickle_GetState(m)); return 0; }
0
[ "CWE-190", "CWE-369" ]
cpython
a4ae828ee416a66d8c7bf5ee71d653c2cc6a26dd
184,489,105,888,461,160,000,000,000,000,000,000,000
5
closes bpo-34656: Avoid relying on signed overflow in _pickle memos. (GH-9261)
static gboolean tcp_chr_telnet_init_io(QIOChannel *ioc, GIOCondition cond G_GNUC_UNUSED, gpointer user_data) { TCPCharDriverTelnetInit *init = user_data; ssize_t ret; ret = qio_channel_write(ioc, init->buf, init->buflen, NULL); if (ret < 0) { if (ret == QIO_CHANNEL_ERR_BLOCK) { ret = 0; } else { tcp_chr_disconnect(init->chr); return FALSE; } } init->buflen -= ret; if (init->buflen == 0) { tcp_chr_connect(init->chr); return FALSE; } memmove(init->buf, init->buf + ret, init->buflen); return TRUE; }
0
[ "CWE-416" ]
qemu
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
116,224,786,354,374,530,000,000,000,000,000,000,000
27
char: move front end handlers in CharBackend Since the hanlders are associated with a CharBackend, rather than the CharDriverState, it is more appropriate to store in CharBackend. This avoids the handler copy dance in qemu_chr_fe_set_handlers() then mux_chr_update_read_handler(), by storing the CharBackend pointer directly. Also a mux CharDriver should go through mux->backends[focused], since chr->be will stay NULL. Before that, it was possible to call chr->handler by mistake with surprising results, for ex through qemu_chr_be_can_write(), which would result in calling the last set handler front end, not the one with focus. Signed-off-by: Marc-André Lureau <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static bool tc_qdisc_dump_ignore(struct Qdisc *q) { return (q->flags & TCQ_F_BUILTIN) ? true : false; }
0
[ "CWE-909" ]
linux-2.6
16ebb5e0b36ceadc8186f71d68b0c4fa4b6e781b
112,829,297,122,376,760,000,000,000,000,000,000,000
4
tc: Fix unitialized kernel memory leak Three bytes of uninitialized kernel memory are currently leaked to user Signed-off-by: Eric Dumazet <[email protected]> Reviewed-by: Jiri Pirko <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) { subpage_t *subpage; hwaddr base = section->offset_within_address_space & TARGET_PAGE_MASK; MemoryRegionSection *existing = phys_page_find(d->phys_map, base, d->map.nodes, d->map.sections); MemoryRegionSection subsection = { .offset_within_address_space = base, .size = int128_make64(TARGET_PAGE_SIZE), }; hwaddr start, end; assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); if (!(existing->mr->subpage)) { subpage = subpage_init(d->as, base); subsection.address_space = d->as; subsection.mr = &subpage->iomem; phys_page_set(d, base >> TARGET_PAGE_BITS, 1, phys_section_add(&d->map, &subsection)); } else { subpage = container_of(existing->mr, subpage_t, iomem); } start = section->offset_within_address_space & ~TARGET_PAGE_MASK; end = start + int128_get64(section->size) - 1; subpage_register(subpage, start, end, phys_section_add(&d->map, section)); }
0
[]
qemu
c3c1bb99d1c11978d9ce94d1bdcf0705378c1459
299,455,600,567,545,570,000,000,000,000,000,000,000
29
exec: Respect as_tranlsate_internal length clamp address_space_translate_internal will clamp the *plen length argument based on the size of the memory region being queried. The iommu walker logic in addresss_space_translate was ignoring this by discarding the post fn call value of *plen. Fix by just always using *plen as the length argument throughout the fn, removing the len local variable. This fixes a bootloader bug when a single elf section spans multiple QEMU memory regions. Signed-off-by: Peter Crosthwaite <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
output_buffer& HandShakeHeader::get(output_buffer& out) const { return out << *this; }
0
[]
mysql-server
b9768521bdeb1a8069c7b871f4536792b65fd79b
151,126,870,064,576,340,000,000,000,000,000,000,000
4
Updated yassl to yassl-2.3.8 (cherry picked from commit 7f9941eab55ed672bfcccd382dafbdbcfdc75aaa)
template<typename T> inline T rol(const T& a, const unsigned int n=1) { return n?(T)((a<<n)|(a>>((sizeof(T)<<3) - n))):a;
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
329,307,917,655,768,000,000,000,000,000,000,000,000
3
Fix other issues in 'CImg<T>::load_bmp()'.
CImg<T> *data() { return _data; }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
333,904,136,803,253,200,000,000,000,000,000,000,000
3
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
unsigned long usecs_to_jiffies(const unsigned int u) { if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) return u * (HZ / USEC_PER_SEC); #else return ((u64)USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32) >> USEC_TO_HZ_SHR32; #endif }
0
[ "CWE-189" ]
linux
f8bd2258e2d520dff28c855658bd24bdafb5102d
29,720,805,157,929,910,000,000,000,000,000,000,000
13
remove div_long_long_rem x86 is the only arch right now, which provides an optimized for div_long_long_rem and it has the downside that one has to be very careful that the divide doesn't overflow. The API is a little akward, as the arguments for the unsigned divide are signed. The signed version also doesn't handle a negative divisor and produces worse code on 64bit archs. There is little incentive to keep this API alive, so this converts the few users to the new API. Signed-off-by: Roman Zippel <[email protected]> Cc: Ralf Baechle <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: john stultz <[email protected]> Cc: Christoph Lameter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, int wake_flags, bool success) { trace_sched_wakeup(p, success); check_preempt_curr(rq, p, wake_flags); p->state = TASK_RUNNING; #ifdef CONFIG_SMP if (p->sched_class->task_woken) p->sched_class->task_woken(rq, p); if (unlikely(rq->idle_stamp)) { u64 delta = rq->clock - rq->idle_stamp; u64 max = 2*sysctl_sched_migration_cost; if (delta > max) rq->avg_idle = max; else update_avg(&rq->avg_idle, delta); rq->idle_stamp = 0; } #endif /* if a worker is waking up, notify workqueue */ if ((p->flags & PF_WQ_WORKER) && success) wq_worker_waking_up(p, cpu_of(rq)); }
0
[ "CWE-703", "CWE-835" ]
linux
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
113,279,943,101,641,950,000,000,000,000,000,000,000
26
Sched: fix skip_clock_update optimization idle_balance() drops/retakes rq->lock, leaving the previous task vulnerable to set_tsk_need_resched(). Clear it after we return from balancing instead, and in setup_thread_stack() as well, so no successfully descheduled or never scheduled task has it set. Need resched confused the skip_clock_update logic, which assumes that the next call to update_rq_clock() will come nearly immediately after being set. Make the optimization robust against the waking a sleeper before it sucessfully deschedules case by checking that the current task has not been dequeued before setting the flag, since it is that useless clock update we're trying to save, and clear unconditionally in schedule() proper instead of conditionally in put_prev_task(). Signed-off-by: Mike Galbraith <[email protected]> Reported-by: Bjoern B. Brandenburg <[email protected]> Tested-by: Yong Zhang <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Cc: [email protected] LKML-Reference: <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) { /* * Don't forget to update Documentation/ on changes. */ static const char mnemonics[BITS_PER_LONG][2] = { /* * In case if we meet a flag we don't know about. */ [0 ... (BITS_PER_LONG-1)] = "??", [ilog2(VM_READ)] = "rd", [ilog2(VM_WRITE)] = "wr", [ilog2(VM_EXEC)] = "ex", [ilog2(VM_SHARED)] = "sh", [ilog2(VM_MAYREAD)] = "mr", [ilog2(VM_MAYWRITE)] = "mw", [ilog2(VM_MAYEXEC)] = "me", [ilog2(VM_MAYSHARE)] = "ms", [ilog2(VM_GROWSDOWN)] = "gd", [ilog2(VM_PFNMAP)] = "pf", [ilog2(VM_DENYWRITE)] = "dw", #ifdef CONFIG_X86_INTEL_MPX [ilog2(VM_MPX)] = "mp", #endif [ilog2(VM_LOCKED)] = "lo", [ilog2(VM_IO)] = "io", [ilog2(VM_SEQ_READ)] = "sr", [ilog2(VM_RAND_READ)] = "rr", [ilog2(VM_DONTCOPY)] = "dc", [ilog2(VM_DONTEXPAND)] = "de", [ilog2(VM_ACCOUNT)] = "ac", [ilog2(VM_NORESERVE)] = "nr", [ilog2(VM_HUGETLB)] = "ht", [ilog2(VM_ARCH_1)] = "ar", [ilog2(VM_DONTDUMP)] = "dd", #ifdef CONFIG_MEM_SOFT_DIRTY [ilog2(VM_SOFTDIRTY)] = "sd", #endif [ilog2(VM_MIXEDMAP)] = "mm", [ilog2(VM_HUGEPAGE)] = "hg", [ilog2(VM_NOHUGEPAGE)] = "nh", [ilog2(VM_MERGEABLE)] = "mg", [ilog2(VM_UFFD_MISSING)]= "um", [ilog2(VM_UFFD_WP)] = "uw", #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS /* These come out via ProtectionKey: */ [ilog2(VM_PKEY_BIT0)] = "", [ilog2(VM_PKEY_BIT1)] = "", [ilog2(VM_PKEY_BIT2)] = "", [ilog2(VM_PKEY_BIT3)] = "", #endif }; size_t i; seq_puts(m, "VmFlags: "); for (i = 0; i < BITS_PER_LONG; i++) { if (!mnemonics[i][0]) continue; if (vma->vm_flags & (1UL << i)) { seq_printf(m, "%c%c ", mnemonics[i][0], mnemonics[i][1]); } } seq_putc(m, '\n'); }
0
[ "CWE-119" ]
linux
1be7107fbe18eed3e319a6c3e83c78254b693acb
136,817,303,914,484,620,000,000,000,000,000,000,000
66
mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov <[email protected]> Original-patch-by: Michal Hocko <[email protected]> Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Michal Hocko <[email protected]> Tested-by: Helge Deller <[email protected]> # parisc Signed-off-by: Linus Torvalds <[email protected]>
ofputil_encode_ofp15_group_mod(enum ofp_version ofp_version, const struct ofputil_group_mod *gm) { struct ofpbuf *b; struct ofp15_group_mod *ogm; size_t start_ogm; struct ofputil_bucket *bucket; struct id_pool *bucket_ids = NULL; b = ofpraw_alloc(OFPRAW_OFPT15_GROUP_MOD, ofp_version, 0); start_ogm = b->size; ofpbuf_put_zeros(b, sizeof *ogm); LIST_FOR_EACH (bucket, list_node, &gm->buckets) { uint32_t bucket_id; /* Generate a bucket id if none was supplied */ if (bucket->bucket_id > OFPG15_BUCKET_MAX) { if (!bucket_ids) { const struct ofputil_bucket *bkt; bucket_ids = id_pool_create(0, OFPG15_BUCKET_MAX + 1); /* Mark all bucket_ids that are present in gm * as used in the pool. */ LIST_FOR_EACH_REVERSE (bkt, list_node, &gm->buckets) { if (bkt == bucket) { break; } if (bkt->bucket_id <= OFPG15_BUCKET_MAX) { id_pool_add(bucket_ids, bkt->bucket_id); } } } if (!id_pool_alloc_id(bucket_ids, &bucket_id)) { OVS_NOT_REACHED(); } } else { bucket_id = bucket->bucket_id; } ofputil_put_ofp15_bucket(bucket, bucket_id, gm->type, b, ofp_version); } ogm = ofpbuf_at_assert(b, start_ogm, sizeof *ogm); ogm->command = htons(gm->command); ogm->type = gm->type; ogm->group_id = htonl(gm->group_id); ogm->command_bucket_id = htonl(gm->command_bucket_id); ogm->bucket_array_len = htons(b->size - start_ogm - sizeof *ogm); /* Add group properties */ if (gm->props.selection_method[0]) { ofputil_put_group_prop_ntr_selection_method(ofp_version, &gm->props, b); } id_pool_destroy(bucket_ids); return b; }
0
[ "CWE-772" ]
ovs
77ad4225d125030420d897c873e4734ac708c66b
15,639,003,244,732,496,000,000,000,000,000,000,000
59
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod(). Found by libFuzzer. Reported-by: Bhargava Shastry <[email protected]> Signed-off-by: Ben Pfaff <[email protected]> Acked-by: Justin Pettit <[email protected]>
void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) { /* Check for valid transport. */ SCTP_ASSERT(tp, "NULL transport", return); /* We should not be doing any RTO updates unless rto_pending is set. */ SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return); if (tp->rttvar || tp->srtt) { struct net *net = sock_net(tp->asoc->base.sk); /* 6.3.1 C3) When a new RTT measurement R' is made, set * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' */ /* Note: The above algorithm has been rewritten to * express rto_beta and rto_alpha as inverse powers * of two. * For example, assuming the default value of RTO.Alpha of * 1/8, rto_alpha would be expressed as 3. */ tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta); tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) + (rtt >> net->sctp.rto_alpha); } else { /* 6.3.1 C2) When the first RTT measurement R is made, set * SRTT <- R, RTTVAR <- R/2. */ tp->srtt = rtt; tp->rttvar = rtt >> 1; } /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY. */ if (tp->rttvar == 0) tp->rttvar = SCTP_CLOCK_GRANULARITY; /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */ tp->rto = tp->srtt + (tp->rttvar << 2); /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min * seconds then it is rounded up to RTO.Min seconds. */ if (tp->rto < tp->asoc->rto_min) tp->rto = tp->asoc->rto_min; /* 6.3.1 C7) A maximum value may be placed on RTO provided it is * at least RTO.max seconds. */ if (tp->rto > tp->asoc->rto_max) tp->rto = tp->asoc->rto_max; tp->rtt = rtt; /* Reset rto_pending so that a new RTT measurement is started when a * new data chunk is sent. */ tp->rto_pending = 0; SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " "rttvar: %d, rto: %ld\n", __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto); }
1
[]
linux
196d67593439b03088913227093e374235596e33
41,868,277,155,663,800,000,000,000,000,000,000,000
65
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call The current SCTP stack is lacking a mechanism to have per association statistics. This is an implementation modeled after OpenSolaris' SCTP_GET_ASSOC_STATS. Userspace part will follow on lksctp if/when there is a general ACK on this. V4: - Move ipackets++ before q->immediate.func() for consistency reasons - Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid returning bogus RTO values - return asoc->rto_min when max_obs_rto value has not changed V3: - Increase ictrlchunks in sctp_assoc_bh_rcv() as well - Move ipackets++ to sctp_inq_push() - return 0 when no rto updates took place since the last call V2: - Implement partial retrieval of stat struct to cope for future expansion - Kill the rtxpackets counter as it cannot be precise anyway - Rename outseqtsns to outofseqtsns to make it clearer that these are out of sequence unexpected TSNs - Move asoc->ipackets++ under a lock to avoid potential miscounts - Fold asoc->opackets++ into the already existing asoc check - Kill unneeded (q->asoc) test when increasing rtxchunks - Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0) - Don't count SHUTDOWNs as SACKs - Move SCTP_GET_ASSOC_STATS to the private space API - Adjust the len check in sctp_getsockopt_assoc_stats() to allow for future struct growth - Move association statistics in their own struct - Update idupchunks when we send a SACK with dup TSNs - return min_rto in max_rto when RTO has not changed. Also return the transport when max_rto last changed. Signed-off: Michele Baldessari <[email protected]> Acked-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
MODRET auth_pre_pass(cmd_rec *cmd) { const char *user; char *displaylogin; pr_auth_endpwent(cmd->tmp_pool); pr_auth_endgrent(cmd->tmp_pool); /* Handle cases where PASS might be sent before USER. */ user = pr_table_get(session.notes, "mod_auth.orig-user", NULL); if (user != NULL) { config_rec *c; c = find_config(main_server->conf, CONF_PARAM, "AllowEmptyPasswords", FALSE); if (c == NULL) { const char *anon_user; config_rec *anon_config; /* Since we have not authenticated yet, we cannot use the TOPLEVEL_CONF * macro to handle <Anonymous> sections. So we do it manually. */ anon_user = pstrdup(cmd->tmp_pool, user); anon_config = pr_auth_get_anon_config(cmd->tmp_pool, &anon_user, NULL, NULL); if (anon_config != NULL) { c = find_config(anon_config->subset, CONF_PARAM, "AllowEmptyPasswords", FALSE); } } if (c != NULL) { int allow_empty_passwords; allow_empty_passwords = *((int *) c->argv[0]); if (allow_empty_passwords == FALSE) { size_t passwd_len = 0; if (cmd->argc > 1) { if (cmd->arg != NULL) { passwd_len = strlen(cmd->arg); } } /* Make sure to NOT enforce 'AllowEmptyPasswords off' if e.g. * the AllowDotLogin TLSOption is in effect. */ if (cmd->argc == 1 || passwd_len == 0) { if (session.auth_mech == NULL || strcmp(session.auth_mech, "mod_tls.c") != 0) { pr_log_debug(DEBUG5, "Refusing empty password from user '%s' (AllowEmptyPasswords " "false)", user); pr_log_auth(PR_LOG_NOTICE, "Refusing empty password from user '%s'", user); pr_event_generate("mod_auth.empty-password", user); pr_response_add_err(R_501, _("Login incorrect.")); return PR_ERROR(cmd); } pr_log_debug(DEBUG9, "%s", "'AllowEmptyPasswords off' in effect, " "BUT client authenticated via the AllowDotLogin TLSOption"); } } } } /* Look for a DisplayLogin file which has an absolute path. If we find one, * open a filehandle, such that that file can be displayed even if the * session is chrooted. DisplayLogin files with relative paths will be * handled after chroot, preserving the old behavior. */ displaylogin = get_param_ptr(TOPLEVEL_CONF, "DisplayLogin", FALSE); if (displaylogin && *displaylogin == '/') { struct stat st; displaylogin_fh = pr_fsio_open(displaylogin, O_RDONLY); if (displaylogin_fh == NULL) { pr_log_debug(DEBUG6, "unable to open DisplayLogin file '%s': %s", displaylogin, strerror(errno)); } else { if (pr_fsio_fstat(displaylogin_fh, &st) < 0) { pr_log_debug(DEBUG6, "unable to stat DisplayLogin file '%s': %s", displaylogin, strerror(errno)); pr_fsio_close(displaylogin_fh); displaylogin_fh = NULL; } else { if (S_ISDIR(st.st_mode)) { errno = EISDIR; pr_log_debug(DEBUG6, "unable to use DisplayLogin file '%s': %s", displaylogin, strerror(errno)); pr_fsio_close(displaylogin_fh); displaylogin_fh = NULL; } } } } return PR_DECLINED(cmd); }
0
[ "CWE-59", "CWE-295" ]
proftpd
349addc3be4fcdad9bd4ec01ad1ccd916c898ed8
225,287,114,285,020,780,000,000,000,000,000,000,000
106
Walk the entire DefaultRoot path, checking for symlinks of any component, when AllowChrootSymlinks is disabled.
void CLua::vfnreturns(const char *format, va_list args) { lua_State *ls = _state; int nrets = return_count(ls, format); int sp = -nrets - 1; const char *gs = strchr(format, '>'); if (gs) format = gs + 1; else if ((gs = strchr(format, ':'))) format = gs + 1; for (const char *run = format; *run; ++run) { char argtype = *run; ++sp; switch (argtype) { case 'u': if (lua_islightuserdata(ls, sp)) *(va_arg(args, void**)) = lua_touserdata(ls, sp); break; case 'd': if (lua_isnumber(ls, sp)) *(va_arg(args, int*)) = luaL_safe_checkint(ls, sp); break; case 'b': *(va_arg(args, bool *)) = lua_toboolean(ls, sp); break; case 's': { const char *s = lua_tostring(ls, sp); if (s) *(va_arg(args, string *)) = s; break; } default: break; } } // Pop args off the stack lua_pop(ls, nrets); }
0
[ "CWE-434" ]
crawl
fc522ff6eb1bbb85e3de60c60a45762571e48c28
25,416,394,409,691,470,000,000,000,000,000,000,000
44
Disable lua load(), loadstring() bytcode loading
static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct inode *inode = vma->vm_file->f_path.dentry->d_inode; int error; int ret = VM_FAULT_LOCKED; error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); if (error) return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); if (ret & VM_FAULT_MAJOR) { count_vm_event(PGMAJFAULT); mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); } return ret; }
0
[ "CWE-399" ]
linux
5f00110f7273f9ff04ac69a5f85bb535a4fd0987
230,010,770,088,162,840,000,000,000,000,000,000,000
16
tmpfs: fix use-after-free of mempolicy object The tmpfs remount logic preserves filesystem mempolicy if the mpol=M option is not specified in the remount request. A new policy can be specified if mpol=M is given. Before this patch remounting an mpol bound tmpfs without specifying mpol= mount option in the remount request would set the filesystem's mempolicy object to a freed mempolicy object. To reproduce the problem boot a DEBUG_PAGEALLOC kernel and run: # mkdir /tmp/x # mount -t tmpfs -o size=100M,mpol=interleave nodev /tmp/x # grep /tmp/x /proc/mounts nodev /tmp/x tmpfs rw,relatime,size=102400k,mpol=interleave:0-3 0 0 # mount -o remount,size=200M nodev /tmp/x # grep /tmp/x /proc/mounts nodev /tmp/x tmpfs rw,relatime,size=204800k,mpol=??? 0 0 # note ? garbage in mpol=... output above # dd if=/dev/zero of=/tmp/x/f count=1 # panic here Panic: BUG: unable to handle kernel NULL pointer dereference at (null) IP: [< (null)>] (null) [...] Oops: 0010 [#1] SMP DEBUG_PAGEALLOC Call Trace: mpol_shared_policy_init+0xa5/0x160 shmem_get_inode+0x209/0x270 shmem_mknod+0x3e/0xf0 shmem_create+0x18/0x20 vfs_create+0xb5/0x130 do_last+0x9a1/0xea0 path_openat+0xb3/0x4d0 do_filp_open+0x42/0xa0 do_sys_open+0xfe/0x1e0 compat_sys_open+0x1b/0x20 cstar_dispatch+0x7/0x1f Non-debug kernels will not crash immediately because referencing the dangling mpol will not cause a fault. Instead the filesystem will reference a freed mempolicy object, which will cause unpredictable behavior. The problem boils down to a dropped mpol reference below if shmem_parse_options() does not allocate a new mpol: config = *sbinfo shmem_parse_options(data, &config, true) mpol_put(sbinfo->mpol) sbinfo->mpol = config.mpol /* BUG: saves unreferenced mpol */ This patch avoids the crash by not releasing the mempolicy if shmem_parse_options() doesn't create a new mpol. How far back does this issue go? I see it in both 2.6.36 and 3.3. I did not look back further. Signed-off-by: Greg Thelen <[email protected]> Acked-by: Hugh Dickins <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
mono_method_get_flags (MonoMethod *method, guint32 *iflags) { if (iflags) *iflags = method->iflags; return method->flags; }
0
[]
mono
8e890a3bf80a4620e417814dc14886b1bbd17625
82,637,891,546,577,800,000,000,000,000,000,000,000
6
Search for dllimported shared libs in the base directory, not cwd. * loader.c: we don't search the current directory anymore for shared libraries referenced in DllImport attributes, as it has a slight security risk. We search in the same directory where the referencing image was loaded from, instead. Fixes bug# 641915.
static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate) { struct f2fs_fault_info *ffi = &sbi->fault_info; if (rate) { atomic_set(&ffi->inject_ops, 0); ffi->inject_rate = rate; ffi->inject_type = (1 << FAULT_MAX) - 1; } else { memset(ffi, 0, sizeof(struct f2fs_fault_info)); } }
0
[ "CWE-284" ]
linux
b9dd46188edc2f0d1f37328637860bb65a771124
164,178,669,314,177,160,000,000,000,000,000,000,000
13
f2fs: sanity check segment count F2FS uses 4 bytes to represent block address. As a result, supported size of disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments. Signed-off-by: Jin Qian <[email protected]> Signed-off-by: Jaegeuk Kim <[email protected]>
void Type_Signature_Free(struct _cms_typehandler_struct* self, void* Ptr) { _cmsFree(self ->ContextID, Ptr); }
0
[]
Little-CMS
41d222df1bc6188131a8f46c32eab0a4d4cdf1b6
187,504,789,128,200,740,000,000,000,000,000,000,000
4
Memory squeezing fix: lcms2 cmsPipeline construction When creating a new pipeline, lcms would often try to allocate a stage and pass it to cmsPipelineInsertStage without checking whether the allocation succeeded. cmsPipelineInsertStage would then assert (or crash) if it had not. The fix here is to change cmsPipelineInsertStage to check and return an error value. All calling code is then checked to test this return value and cope.
void clearRemainingGauge() { if (useRetryBudget()) { remaining_.set(0); } }
0
[ "CWE-400" ]
envoy
dfddb529e914d794ac552e906b13d71233609bf7
179,379,306,241,607,420,000,000,000,000,000,000,000
5
listener: Add configurable accepted connection limits (#153) Add support for per-listener limits on accepted connections. Signed-off-by: Tony Allen <[email protected]>
static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) { struct kvm_lapic_irq lapic_irq; lapic_irq.shorthand = APIC_DEST_NOSHORT; lapic_irq.dest_mode = APIC_DEST_PHYSICAL; lapic_irq.level = 0; lapic_irq.dest_id = apicid; lapic_irq.msi_redir_hint = false; lapic_irq.delivery_mode = APIC_DM_REMRD; kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); }
0
[ "CWE-476" ]
linux
55749769fe608fa3f4a075e42e89d237c8e37637
37,595,694,238,721,890,000,000,000,000,000,000,000
13
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty When dirty ring logging is enabled, any dirty logging without an active vCPU context will cause a kernel oops. But we've already declared that the shared_info page doesn't get dirty tracking anyway, since it would be kind of insane to mark it dirty every time we deliver an event channel interrupt. Userspace is supposed to just assume it's always dirty any time a vCPU can run or event channels are routed. So stop using the generic kvm_write_wall_clock() and just write directly through the gfn_to_pfn_cache that we already have set up. We can make kvm_write_wall_clock() static in x86.c again now, but let's not remove the 'sec_hi_ofs' argument even though it's not used yet. At some point we *will* want to use that for KVM guests too. Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region") Reported-by: butt3rflyh4ck <[email protected]> Signed-off-by: David Woodhouse <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static ssize_t ad_read_rsrc_adouble(vfs_handle_struct *handle, struct adouble *ad, const struct smb_filename *smb_fname) { size_t to_read; ssize_t len; int ret; bool ok; ret = SMB_VFS_NEXT_FSTAT(handle, ad->ad_fsp, &ad->ad_fsp->fsp_name->st); if (ret != 0) { DBG_ERR("fstat [%s] failed: %s\n", fsp_str_dbg(ad->ad_fsp), strerror(errno)); return -1; } to_read = ad->ad_fsp->fsp_name->st.st_ex_size; if (to_read > AD_XATTR_MAX_HDR_SIZE) { to_read = AD_XATTR_MAX_HDR_SIZE; } len = SMB_VFS_NEXT_PREAD(handle, ad->ad_fsp, ad->ad_data, to_read, 0); if (len != to_read) { DBG_NOTICE("%s %s: bad size: %zd\n", smb_fname->base_name, strerror(errno), len); return -1; } /* Now parse entries */ ok = ad_unpack(ad, ADEID_NUM_DOT_UND, ad->ad_fsp->fsp_name->st.st_ex_size); if (!ok) { DBG_ERR("invalid AppleDouble resource %s\n", smb_fname->base_name); errno = EINVAL; return -1; } if ((ad_getentryoff(ad, ADEID_FINDERI) != ADEDOFF_FINDERI_DOT_UND) || (ad_getentrylen(ad, ADEID_FINDERI) < ADEDLEN_FINDERI) || (ad_getentryoff(ad, ADEID_RFORK) < ADEDOFF_RFORK_DOT_UND)) { DBG_ERR("invalid AppleDouble resource %s\n", smb_fname->base_name); errno = EINVAL; return -1; } return len; }
0
[ "CWE-787" ]
samba
0e2b3fb982d1f53d111e10d9197ed2ec2e13712c
315,298,529,720,321,040,000,000,000,000,000,000,000
55
CVE-2021-44142: libadouble: harden parsing code BUG: https://bugzilla.samba.org/show_bug.cgi?id=14914 Signed-off-by: Ralph Boehme <[email protected]> Reviewed-by: Jeremy Allison <[email protected]>
static int sas_configure_parent(struct domain_device *parent, struct domain_device *child, u8 *sas_addr, int include) { struct expander_device *ex_parent = &parent->ex_dev; int res = 0; int i; if (parent->parent) { res = sas_configure_parent(parent->parent, parent, sas_addr, include); if (res) return res; } if (ex_parent->conf_route_table == 0) { pr_debug("ex %016llx has self-configuring routing table\n", SAS_ADDR(parent->sas_addr)); return 0; } for (i = 0; i < ex_parent->num_phys; i++) { struct ex_phy *phy = &ex_parent->ex_phy[i]; if ((phy->routing_attr == TABLE_ROUTING) && (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr))) { res = sas_configure_phy(parent, i, sas_addr, include); if (res) return res; } } return res; }
0
[ "CWE-401" ]
linux
3b0541791453fbe7f42867e310e0c9eb6295364d
17,380,916,258,340,153,000,000,000,000,000,000,000
35
scsi: libsas: delete sas port if expander discover failed The sas_port(phy->port) allocated in sas_ex_discover_expander() will not be deleted when the expander failed to discover. This will cause resource leak and a further issue of kernel BUG like below: [159785.843156] port-2:17:29: trying to add phy phy-2:17:29 fails: it's already part of another port [159785.852144] ------------[ cut here ]------------ [159785.856833] kernel BUG at drivers/scsi/scsi_transport_sas.c:1086! [159785.863000] Internal error: Oops - BUG: 0 [#1] SMP [159785.867866] CPU: 39 PID: 16993 Comm: kworker/u96:2 Tainted: G W OE 4.19.25-vhulk1901.1.0.h111.aarch64 #1 [159785.878458] Hardware name: Huawei Technologies Co., Ltd. Hi1620EVBCS/Hi1620EVBCS, BIOS Hi1620 CS B070 1P TA 03/21/2019 [159785.889231] Workqueue: 0000:74:02.0_disco_q sas_discover_domain [159785.895224] pstate: 40c00009 (nZcv daif +PAN +UAO) [159785.900094] pc : sas_port_add_phy+0x188/0x1b8 [159785.904524] lr : sas_port_add_phy+0x188/0x1b8 [159785.908952] sp : ffff0001120e3b80 [159785.912341] x29: ffff0001120e3b80 x28: 0000000000000000 [159785.917727] x27: ffff802ade8f5400 x26: ffff0000681b7560 [159785.923111] x25: ffff802adf11a800 x24: ffff0000680e8000 [159785.928496] x23: ffff802ade8f5728 x22: ffff802ade8f5708 [159785.933880] x21: ffff802adea2db40 x20: ffff802ade8f5400 [159785.939264] x19: ffff802adea2d800 x18: 0000000000000010 [159785.944649] x17: 00000000821bf734 x16: ffff00006714faa0 [159785.950033] x15: ffff0000e8ab4ecf x14: 7261702079646165 [159785.955417] x13: 726c612073277469 x12: ffff00006887b830 [159785.960802] x11: ffff00006773eaa0 x10: 7968702079687020 [159785.966186] x9 : 0000000000002453 x8 : 726f702072656874 [159785.971570] x7 : 6f6e6120666f2074 x6 : ffff802bcfb21290 [159785.976955] x5 : ffff802bcfb21290 x4 : 0000000000000000 [159785.982339] x3 : ffff802bcfb298c8 x2 : 337752b234c2ab00 [159785.987723] x1 : 337752b234c2ab00 x0 : 0000000000000000 [159785.993108] Process kworker/u96:2 (pid: 16993, stack limit = 0x0000000072dae094) [159786.000576] Call trace: [159786.003097] sas_port_add_phy+0x188/0x1b8 [159786.007179] sas_ex_get_linkrate.isra.5+0x134/0x140 [159786.012130] sas_ex_discover_expander+0x128/0x408 [159786.016906] sas_ex_discover_dev+0x218/0x4c8 [159786.021249] sas_ex_discover_devices+0x9c/0x1a8 [159786.025852] sas_discover_root_expander+0x134/0x160 [159786.030802] sas_discover_domain+0x1b8/0x1e8 [159786.035148] process_one_work+0x1b4/0x3f8 [159786.039230] worker_thread+0x54/0x470 [159786.042967] kthread+0x134/0x138 [159786.046269] ret_from_fork+0x10/0x18 [159786.049918] Code: 91322300 f0004402 91178042 97fe4c9b (d4210000) [159786.056083] Modules linked in: hns3_enet_ut(OE) hclge(OE) hnae3(OE) hisi_sas_test_hw(OE) hisi_sas_test_main(OE) serdes(OE) [159786.067202] ---[ end trace 03622b9e2d99e196 ]--- [159786.071893] Kernel panic - not syncing: Fatal exception [159786.077190] SMP: stopping secondary CPUs [159786.081192] Kernel Offset: disabled [159786.084753] CPU features: 0x2,a2a00a38 Fixes: 2908d778ab3e ("[SCSI] aic94xx: new driver") Reported-by: Jian Luo <[email protected]> Signed-off-by: Jason Yan <[email protected]> CC: John Garry <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
TEST(ExpressionStrLenCP, ComputesLengthOfStringWithSpecialCharacters) { assertExpectedResults("$strLenCP", {{{Value("ºabøåß"_sd)}, Value(6)}}); }
0
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
337,765,666,037,329,120,000,000,000,000,000,000,000
3
SERVER-38070 fix infinite loop in agg expression
static void read_args_from_file(char const* filename, std::vector<PointerHolder<char> >& new_argv) { std::list<std::string> lines; if (strcmp(filename, "-") == 0) { QTC::TC("qpdf", "qpdf read args from stdin"); lines = QUtil::read_lines_from_file(std::cin); } else { QTC::TC("qpdf", "qpdf read args from file"); lines = QUtil::read_lines_from_file(filename); } for (std::list<std::string>::iterator iter = lines.begin(); iter != lines.end(); ++iter) { new_argv.push_back( PointerHolder<char>(true, QUtil::copy_string((*iter).c_str()))); } }
0
[ "CWE-125" ]
qpdf
1868a10f8b06631362618bfc85ca8646da4b4b71
267,209,368,062,678,500,000,000,000,000,000,000,000
21
Replace all atoi calls with QUtil::string_to_int The latter catches underflow/overflow.
static ssize_t bad_file_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) { return -EIO; }
0
[]
linux-2.6
be6aab0e9fa6d3c6d75aa1e38ac972d8b4ee82b8
247,587,591,015,439,440,000,000,000,000,000,000
5
[PATCH] fix memory corruption from misinterpreted bad_inode_ops return values CVE-2006-5753 is for a case where an inode can be marked bad, switching the ops to bad_inode_ops, which are all connected as: static int return_EIO(void) { return -EIO; } #define EIO_ERROR ((void *) (return_EIO)) static struct inode_operations bad_inode_ops = { .create = bad_inode_create ...etc... The problem here is that the void cast causes return types to not be promoted, and for ops such as listxattr which expect more than 32 bits of return value, the 32-bit -EIO is interpreted as a large positive 64-bit number, i.e. 0x00000000fffffffa instead of 0xfffffffa. This goes particularly badly when the return value is taken as a number of bytes to copy into, say, a user's buffer for example... I originally had coded up the fix by creating a return_EIO_<TYPE> macro for each return type, like this: static int return_EIO_int(void) { return -EIO; } #define EIO_ERROR_INT ((void *) (return_EIO_int)) static struct inode_operations bad_inode_ops = { .create = EIO_ERROR_INT, ...etc... but Al felt that it was probably better to create an EIO-returner for each actual op signature. Since so few ops share a signature, I just went ahead & created an EIO function for each individual file & inode op that returns a value. Signed-off-by: Eric Sandeen <[email protected]> Cc: Al Viro <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void ldapsrv_call_wait_done(struct tevent_req *subreq) { struct ldapsrv_call *call = tevent_req_callback_data(subreq, struct ldapsrv_call); struct ldapsrv_connection *conn = call->conn; NTSTATUS status; conn->active_call = NULL; status = call->wait_recv(subreq); TALLOC_FREE(subreq); if (!NT_STATUS_IS_OK(status)) { const char *reason; reason = talloc_asprintf(call, "ldapsrv_call_wait_done: " "call->wait_recv() - %s", nt_errstr(status)); if (reason == NULL) { reason = nt_errstr(status); } ldapsrv_terminate_connection(conn, reason); return; } ldapsrv_call_writev_start(call); }
0
[ "CWE-703" ]
samba
f9b2267c6eb8138fc94df7a138ad5d87526f1d79
245,179,838,042,033,900,000,000,000,000,000,000,000
28
CVE-2021-3670 ldap_server: Ensure value of MaxQueryDuration is greater than zero BUG: https://bugzilla.samba.org/show_bug.cgi?id=14694 Signed-off-by: Joseph Sutton <[email protected]> Reviewed-by: Douglas Bagnall <[email protected]> (cherry picked from commit e1ab0c43629686d1d2c0b0b2bcdc90057a792049)
*/ int netdev_rx_handler_register(struct net_device *dev, rx_handler_func_t *rx_handler, void *rx_handler_data) { if (netdev_is_rx_handler_busy(dev)) return -EBUSY; if (dev->priv_flags & IFF_NO_RX_HANDLER) return -EINVAL; /* Note: rx_handler_data must be set before rx_handler */ rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); rcu_assign_pointer(dev->rx_handler, rx_handler); return 0;
0
[ "CWE-416" ]
linux
a4270d6795b0580287453ea55974d948393e66ef
303,677,623,376,969,500,000,000,000,000,000,000,000
16
net-gro: fix use-after-free read in napi_gro_frags() If a network driver provides to napi_gro_frags() an skb with a page fragment of exactly 14 bytes, the call to gro_pull_from_frag0() will 'consume' the fragment by calling skb_frag_unref(skb, 0), and the page might be freed and reused. Reading eth->h_proto at the end of napi_frags_skb() might read mangled data, or crash under specific debugging features. BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline] BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 Read of size 2 at addr ffff88809366840c by task syz-executor599/8957 CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x172/0x1f0 lib/dump_stack.c:113 print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188 __kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317 kasan_report+0x12/0x20 mm/kasan/common.c:614 __asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142 napi_frags_skb net/core/dev.c:5833 [inline] napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991 tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037 call_write_iter include/linux/fs.h:1872 [inline] do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693 do_iter_write fs/read_write.c:970 [inline] do_iter_write+0x184/0x610 fs/read_write.c:951 vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015 do_writev+0x15b/0x330 fs/read_write.c:1058 Fixes: a50e233c50db ("net-gro: restore frag0 optimization") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: syzbot <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void *established_get_first(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); void *rc = NULL; st->offset = 0; for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { struct sock *sk; struct hlist_nulls_node *node; struct inet_timewait_sock *tw; spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); /* Lockless fast path for the common case of empty buckets */ if (empty_bucket(st)) continue; spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { if (sk->sk_family != st->family || !net_eq(sock_net(sk), net)) { continue; } rc = sk; goto out; } st->state = TCP_SEQ_STATE_TIME_WAIT; inet_twsk_for_each(tw, node, &tcp_hashinfo.ehash[st->bucket].twchain) { if (tw->tw_family != st->family || !net_eq(twsk_net(tw), net)) { continue; } rc = tw; goto out; } spin_unlock_bh(lock); st->state = TCP_SEQ_STATE_ESTABLISHED; } out: return rc; }
0
[ "CWE-362" ]
linux-2.6
f6d8bd051c391c1c0458a30b2a7abcd939329259
243,770,962,506,090,900,000,000,000,000,000,000,000
42
inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int chunk_stripes_range_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, struct btrfs_balance_args *bargs) { int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); if (bargs->stripes_min <= num_stripes && num_stripes <= bargs->stripes_max) return 0; return 1; }
0
[ "CWE-476", "CWE-284" ]
linux
09ba3bc9dd150457c506e4661380a6183af651c1
191,227,493,530,170,600,000,000,000,000,000,000,000
12
btrfs: merge btrfs_find_device and find_device Both btrfs_find_device() and find_device() does the same thing except that the latter does not take the seed device onto account in the device scanning context. We can merge them. Signed-off-by: Anand Jain <[email protected]> Reviewed-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
static PHP_FUNCTION(xmlwriter_start_document) { zval *pind; xmlwriter_object *intern; xmlTextWriterPtr ptr; char *version = NULL, *enc = NULL, *alone = NULL; int version_len, enc_len, alone_len, retval; #ifdef ZEND_ENGINE_2 zval *this = getThis(); if (this) { if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s!s!s!", &version, &version_len, &enc, &enc_len, &alone, &alone_len) == FAILURE) { return; } XMLWRITER_FROM_OBJECT(intern, this); } else #endif { if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r|s!s!s!", &pind, &version, &version_len, &enc, &enc_len, &alone, &alone_len) == FAILURE) { return; } ZEND_FETCH_RESOURCE(intern,xmlwriter_object *, &pind, -1, "XMLWriter", le_xmlwriter); } ptr = intern->ptr; if (ptr) { retval = xmlTextWriterStartDocument(ptr, version, enc, alone); if (retval != -1) { RETURN_TRUE; } } RETURN_FALSE; }
0
[ "CWE-20" ]
php-src
52b93f0cfd3cba7ff98cc5198df6ca4f23865f80
198,401,531,111,818,860,000,000,000,000,000,000,000
36
Fixed bug #69353 (Missing null byte checks for paths in various PHP extensions)
CudnnSupport::createRnnDescriptor( int num_layers, int hidden_size, int input_size, int cell_size, int batch_size, dnn::RnnInputMode input_mode, dnn::RnnDirectionMode direction_mode, dnn::RnnMode rnn_mode, dnn::DataType data_type, const dnn::AlgorithmConfig& algorithm_config, float dropout, uint64 seed, ScratchAllocator* state_allocator, bool use_padded_io) { // Setting up a cudnnRNNDescriptor requires a cuDNN handle, but because it's // not enqueueing anything into a stream, we pass in the null stream. auto cudnn = cudnn_->GetHandle(parent_, /*stream=*/nullptr); SE_ASSIGN_OR_RETURN( CudnnRnnDescriptor rnn_desc, CudnnRnnDescriptor::Create( cudnn, num_layers, hidden_size, input_size, cell_size, batch_size, ToCudnnRnnInputMode(input_mode), ToCudnnRnnDirectionMode(direction_mode), ToCudnnRnnMode(rnn_mode), ToCudnnDataType(data_type), GetRnnComputeType(data_type), algorithm_config, dropout, seed, state_allocator, use_padded_io)); return std::unique_ptr<dnn::RnnDescriptor>( new CudnnRnnDescriptor(std::move(rnn_desc))); }
0
[ "CWE-20" ]
tensorflow
14755416e364f17fb1870882fa778c7fec7f16e3
10,774,097,555,849,140,000,000,000,000,000,000,000
21
Prevent CHECK-fail in LSTM/GRU with zero-length input. PiperOrigin-RevId: 346239181 Change-Id: I5f233dbc076aab7bb4e31ba24f5abd4eaf99ea4f
int commonio_open (struct commonio_db *db, int mode) { char *buf; char *cp; char *line; struct commonio_entry *p; void *eptr = NULL; int flags = mode; size_t buflen; int fd; int saved_errno; mode &= ~O_CREAT; if ( db->isopen || ( (O_RDONLY != mode) && (O_RDWR != mode))) { errno = EINVAL; return 0; } db->readonly = (mode == O_RDONLY); if (!db->readonly && !db->locked) { errno = EACCES; return 0; } db->head = NULL; db->tail = NULL; db->cursor = NULL; db->changed = false; fd = open (db->filename, (db->readonly ? O_RDONLY : O_RDWR) | O_NOCTTY | O_NONBLOCK | O_NOFOLLOW); saved_errno = errno; db->fp = NULL; if (fd >= 0) { #ifdef WITH_TCB if (tcb_is_suspect (fd) != 0) { (void) close (fd); errno = EINVAL; return 0; } #endif /* WITH_TCB */ db->fp = fdopen (fd, db->readonly ? "r" : "r+"); saved_errno = errno; if (NULL == db->fp) { (void) close (fd); } } errno = saved_errno; /* * If O_CREAT was specified and the file didn't exist, it will be * created by commonio_close(). We have no entries to read yet. --marekm */ if (NULL == db->fp) { if (((flags & O_CREAT) != 0) && (ENOENT == errno)) { db->isopen = true; return 1; } return 0; } /* Do not inherit fd in spawned processes (e.g. nscd) */ fcntl (fileno (db->fp), F_SETFD, FD_CLOEXEC); buflen = BUFLEN; buf = (char *) malloc (buflen); if (NULL == buf) { goto cleanup_ENOMEM; } while (db->ops->fgets (buf, (int) buflen, db->fp) == buf) { while ( ((cp = strrchr (buf, '\n')) == NULL) && (feof (db->fp) == 0)) { size_t len; buflen += BUFLEN; cp = (char *) realloc (buf, buflen); if (NULL == cp) { goto cleanup_buf; } buf = cp; len = strlen (buf); if (db->ops->fgets (buf + len, (int) (buflen - len), db->fp) == NULL) { goto cleanup_buf; } } cp = strrchr (buf, '\n'); if (NULL != cp) { *cp = '\0'; } line = strdup (buf); if (NULL == line) { goto cleanup_buf; } if (name_is_nis (line)) { eptr = NULL; } else { eptr = db->ops->parse (line); if (NULL != eptr) { eptr = db->ops->dup (eptr); if (NULL == eptr) { goto cleanup_line; } } } p = (struct commonio_entry *) malloc (sizeof *p); if (NULL == p) { goto cleanup_entry; } p->eptr = eptr; p->line = line; p->changed = false; add_one_entry (db, p); } free (buf); if (ferror (db->fp) != 0) { goto cleanup_errno; } if ((NULL != db->ops->open_hook) && (db->ops->open_hook () == 0)) { goto cleanup_errno; } db->isopen = true; return 1; cleanup_entry: if (NULL != eptr) { db->ops->free (eptr); } cleanup_line: free (line); cleanup_buf: free (buf); cleanup_ENOMEM: errno = ENOMEM; cleanup_errno: saved_errno = errno; free_linked_list (db); fclose (db->fp); db->fp = NULL; errno = saved_errno; return 0; }
0
[ "CWE-119", "CWE-787" ]
shadow
954e3d2e7113e9ac06632aee3c69b8d818cc8952
122,117,509,460,420,730,000,000,000,000,000,000,000
156
Fix buffer overflow if NULL line is present in db. If ptr->line == NULL for an entry, the first cycle will exit, but the second one will happily write past entries buffer. We actually do not want to exit the first cycle prematurely on ptr->line == NULL. Signed-off-by: Tomas Mraz <[email protected]>
void setup_actions() { g["Definition"] = [&](const SemanticValues &sv, any &dt) { Data &data = *any_cast<Data *>(dt); auto is_macro = sv.choice() == 0; auto ignore = any_cast<bool>(sv[0]); auto name = any_cast<std::string>(sv[1]); std::vector<std::string> params; std::shared_ptr<Ope> ope; if (is_macro) { params = any_cast<std::vector<std::string>>(sv[2]); ope = any_cast<std::shared_ptr<Ope>>(sv[4]); if (sv.size() == 6) { data.instructions[name] = any_cast<Instruction>(sv[5]); } } else { ope = any_cast<std::shared_ptr<Ope>>(sv[3]); if (sv.size() == 5) { data.instructions[name] = any_cast<Instruction>(sv[4]); } } auto &grammar = *data.grammar; if (!grammar.count(name)) { auto &rule = grammar[name]; rule <= ope; rule.name = name; rule.s_ = sv.c_str(); rule.ignoreSemanticValue = ignore; rule.is_macro = is_macro; rule.params = params; if (data.start.empty()) { data.start = name; data.start_pos = sv.c_str(); } } else { data.duplicates.emplace_back(name, sv.c_str()); } }; g["Expression"] = [&](const SemanticValues &sv) { if (sv.size() == 1) { return any_cast<std::shared_ptr<Ope>>(sv[0]); } else { std::vector<std::shared_ptr<Ope>> opes; for (auto i = 0u; i < sv.size(); i++) { opes.emplace_back(any_cast<std::shared_ptr<Ope>>(sv[i])); } const std::shared_ptr<Ope> ope = std::make_shared<PrioritizedChoice>(opes); return ope; } }; g["Sequence"] = [&](const SemanticValues &sv) { if (sv.size() == 1) { return any_cast<std::shared_ptr<Ope>>(sv[0]); } else { std::vector<std::shared_ptr<Ope>> opes; for (const auto &x : sv) { opes.emplace_back(any_cast<std::shared_ptr<Ope>>(x)); } const std::shared_ptr<Ope> ope = std::make_shared<Sequence>(opes); return ope; } }; g["Prefix"] = [&](const SemanticValues &sv) { std::shared_ptr<Ope> ope; if (sv.size() == 1) { ope = any_cast<std::shared_ptr<Ope>>(sv[0]); } else { assert(sv.size() == 2); auto tok = any_cast<char>(sv[0]); ope = any_cast<std::shared_ptr<Ope>>(sv[1]); if (tok == '&') { ope = apd(ope); } else { // '!' ope = npd(ope); } } return ope; }; struct Loop { enum class Type { opt = 0, zom, oom, rep }; Type type; std::pair<size_t, size_t> range; }; g["Suffix"] = [&](const SemanticValues &sv) { auto ope = any_cast<std::shared_ptr<Ope>>(sv[0]); if (sv.size() == 1) { return ope; } else { assert(sv.size() == 2); auto loop = any_cast<Loop>(sv[1]); switch (loop.type) { case Loop::Type::opt: return opt(ope); case Loop::Type::zom: return zom(ope); case Loop::Type::oom: return oom(ope); default: // Regex-like repetition return rep(ope, loop.range.first, loop.range.second); } } }; g["Loop"] = [&](const SemanticValues &sv) { switch (sv.choice()) { case 0: // Option return Loop{Loop::Type::opt, std::pair<size_t, size_t>()}; case 1: // Zero or More return Loop{Loop::Type::zom, std::pair<size_t, size_t>()}; case 2: // One or More return Loop{Loop::Type::oom, std::pair<size_t, size_t>()}; default: // Regex-like repetition return Loop{Loop::Type::rep, any_cast<std::pair<size_t, size_t>>(sv[0])}; } }; g["RepetitionRange"] = [&](const SemanticValues &sv) { switch (sv.choice()) { case 0: { // Number COMMA Number auto min = any_cast<size_t>(sv[0]); auto max = any_cast<size_t>(sv[1]); return std::make_pair(min, max); } case 1: // Number COMMA return std::make_pair(any_cast<size_t>(sv[0]), std::numeric_limits<size_t>::max()); case 2: { // Number auto n = any_cast<size_t>(sv[0]); return std::make_pair(n, n); } default: // COMMA Number return std::make_pair(std::numeric_limits<size_t>::min(), any_cast<size_t>(sv[0])); } }; g["Number"] = [&](const SemanticValues &sv) { std::stringstream ss(sv.str()); size_t n; ss >> n; return n; }; g["Primary"] = [&](const SemanticValues &sv, any &dt) { Data &data = *any_cast<Data *>(dt); switch (sv.choice()) { case 0: // Macro Reference case 1: { // Reference auto is_macro = sv.choice() == 0; auto ignore = any_cast<bool>(sv[0]); const auto &ident = any_cast<std::string>(sv[1]); std::vector<std::shared_ptr<Ope>> args; if (is_macro) { args = any_cast<std::vector<std::shared_ptr<Ope>>>(sv[2]); } std::shared_ptr<Ope> ope = ref(*data.grammar, ident, sv.c_str(), is_macro, args); if (ignore) { return ign(ope); } else { return ope; } } case 2: { // (Expression) return any_cast<std::shared_ptr<Ope>>(sv[0]); } case 3: { // TokenBoundary return tok(any_cast<std::shared_ptr<Ope>>(sv[0])); } case 4: { // CaptureScope return csc(any_cast<std::shared_ptr<Ope>>(sv[0])); } case 5: { // Capture const auto &name = any_cast<std::string>(sv[0]); auto ope = any_cast<std::shared_ptr<Ope>>(sv[1]); return cap(ope, [name](const char *a_s, size_t a_n, Context &c) { auto &cs = c.capture_scope_stack[c.capture_scope_stack_size - 1]; cs[name] = std::string(a_s, a_n); }); } default: { return any_cast<std::shared_ptr<Ope>>(sv[0]); } } }; g["IdentCont"] = [](const SemanticValues &sv) { return std::string(sv.c_str(), sv.length()); }; g["Dictionary"] = [](const SemanticValues &sv) { auto items = sv.transform<std::string>(); return dic(items); }; g["Literal"] = [](const SemanticValues &sv) { const auto &tok = sv.tokens.front(); return lit(resolve_escape_sequence(tok.first, tok.second)); }; g["LiteralI"] = [](const SemanticValues &sv) { const auto &tok = sv.tokens.front(); return liti(resolve_escape_sequence(tok.first, tok.second)); }; g["LiteralD"] = [](const SemanticValues &sv) { auto &tok = sv.tokens.front(); return resolve_escape_sequence(tok.first, tok.second); }; g["Class"] = [](const SemanticValues &sv) { auto ranges = sv.transform<std::pair<char32_t, char32_t>>(); return cls(ranges); }; g["NegatedClass"] = [](const SemanticValues &sv) { auto ranges = sv.transform<std::pair<char32_t, char32_t>>(); return ncls(ranges); }; g["Range"] = [](const SemanticValues &sv) { switch (sv.choice()) { case 0: { auto s1 = any_cast<std::string>(sv[0]); auto s2 = any_cast<std::string>(sv[1]); auto cp1 = decode_codepoint(s1.c_str(), s1.length()); auto cp2 = decode_codepoint(s2.c_str(), s2.length()); return std::make_pair(cp1, cp2); } case 1: { auto s = any_cast<std::string>(sv[0]); auto cp = decode_codepoint(s.c_str(), s.length()); return std::make_pair(cp, cp); } } return std::make_pair<char32_t, char32_t>(0, 0); }; g["Char"] = [](const SemanticValues &sv) { return resolve_escape_sequence(sv.c_str(), sv.length()); }; g["AND"] = [](const SemanticValues &sv) { return *sv.c_str(); }; g["NOT"] = [](const SemanticValues &sv) { return *sv.c_str(); }; g["QUESTION"] = [](const SemanticValues &sv) { return *sv.c_str(); }; g["STAR"] = [](const SemanticValues &sv) { return *sv.c_str(); }; g["PLUS"] = [](const SemanticValues &sv) { return *sv.c_str(); }; g["DOT"] = [](const SemanticValues & /*sv*/) { return dot(); }; g["BeginCap"] = [](const SemanticValues &sv) { return sv.token(); }; g["BackRef"] = [&](const SemanticValues &sv) { return bkr(sv.token()); }; g["Ignore"] = [](const SemanticValues &sv) { return sv.size() > 0; }; g["Parameters"] = [](const SemanticValues &sv) { return sv.transform<std::string>(); }; g["Arguments"] = [](const SemanticValues &sv) { return sv.transform<std::shared_ptr<Ope>>(); }; g["PrecedenceClimbing"] = [](const SemanticValues &sv) { PrecedenceClimbing::BinOpeInfo binOpeInfo; size_t level = 1; for (auto v : sv) { auto tokens = any_cast<std::vector<std::string>>(v); auto assoc = tokens[0][0]; for (size_t i = 1; i < tokens.size(); i++) { const auto &tok = tokens[i]; binOpeInfo[tok] = std::make_pair(level, assoc); } level++; } Instruction instruction; instruction.type = "precedence"; instruction.data = binOpeInfo; return instruction; }; g["PrecedenceInfo"] = [](const SemanticValues &sv) { return sv.transform<std::string>(); }; g["PrecedenceOpe"] = [](const SemanticValues &sv) { return sv.token(); }; g["PrecedenceAssoc"] = [](const SemanticValues &sv) { return sv.token(); }; }
0
[ "CWE-125" ]
cpp-peglib
b3b29ce8f3acf3a32733d930105a17d7b0ba347e
300,960,907,312,263,200,000,000,000,000,000,000,000
292
Fix #122
static void __usbnet_status_stop_force(struct usbnet *dev) { if (dev->interrupt) { mutex_lock(&dev->interrupt_mutex); usb_kill_urb(dev->interrupt); dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n"); mutex_unlock(&dev->interrupt_mutex); } }
0
[ "CWE-703" ]
linux
1666984c8625b3db19a9abc298931d35ab7bc64b
189,356,585,481,670,030,000,000,000,000,000,000,000
9
usbnet: cleanup after bind() in probe() In case bind() works, but a later error forces bailing in probe() in error cases work and a timer may be scheduled. They must be killed. This fixes an error case related to the double free reported in http://www.spinics.net/lists/netdev/msg367669.html and needs to go on top of Linus' fix to cdc-ncm. Signed-off-by: Oliver Neukum <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int sort_key_read(MI_SORT_PARAM *sort_param, void *key) { int error; SORT_INFO *sort_info=sort_param->sort_info; MI_INFO *info=sort_info->info; DBUG_ENTER("sort_key_read"); if ((error=sort_get_next_record(sort_param))) DBUG_RETURN(error); if (info->state->records == sort_info->max_records) { mi_check_print_error(sort_info->param, "Key %d - Found too many records; Can't continue", sort_param->key+1); DBUG_RETURN(1); } sort_param->real_key_length= (info->s->rec_reflength+ _mi_make_key(info, sort_param->key, (uchar*) key, sort_param->record, sort_param->filepos)); #ifdef HAVE_purify bzero(key+sort_param->real_key_length, (sort_param->key_length-sort_param->real_key_length)); #endif DBUG_RETURN(sort_write_record(sort_param)); } /* sort_key_read */
0
[ "CWE-362" ]
mysql-server
4e5473862e6852b0f3802b0cd0c6fa10b5253291
290,656,092,752,887,060,000,000,000,000,000,000,000
26
Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD) is created. When repair finishes, this file is renamed to the original .MYD file. The problem was that during this rename, we copied the stats from the old file to the new file with chmod/chown. If a user managed to replace the temporary file before chmod/chown was executed, it was possible to get an arbitrary file with the privileges of the mysql user. This patch fixes the problem by not copying stats from the old file to the new file. This is not needed as the new file was created with the correct stats. This fix only changes server behavior - external utilities such as myisamchk still does chmod/chown. No test case provided since the problem involves synchronization with file system operations.
void Compute(OpKernelContext* context) override { typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; constexpr int tensor_in_and_out_dims = 4; const Tensor& tensor_in = context->input(0); OP_REQUIRES(context, tensor_in.dims() == tensor_in_and_out_dims, errors::InvalidArgument("tensor_in must be 4-dimensional")); std::vector<int> input_size(tensor_in_and_out_dims); std::vector<int> output_size(tensor_in_and_out_dims); for (int i = 0; i < tensor_in_and_out_dims; ++i) { input_size[i] = tensor_in.dim_size(i); } // Output size. for (int i = 0; i < tensor_in_and_out_dims; ++i) { output_size[i] = static_cast<int>(std::floor(input_size[i] / pooling_ratio_[i])); DCHECK_GT(output_size[i], 0); } // Generate pooling sequence. std::vector<int64> row_cum_seq; std::vector<int64> col_cum_seq; GuardedPhiloxRandom generator; generator.Init(seed_, seed2_); row_cum_seq = GeneratePoolingSequence(input_size[1], output_size[1], &generator, pseudo_random_); col_cum_seq = GeneratePoolingSequence(input_size[2], output_size[2], &generator, pseudo_random_); // Prepare output. Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 0, TensorShape({output_size[0], output_size[1], output_size[2], output_size[3]}), &output_tensor)); Tensor* output_row_seq_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 1, TensorShape({static_cast<int64>(row_cum_seq.size())}), &output_row_seq_tensor)); Tensor* output_col_seq_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 2, TensorShape({static_cast<int64>(col_cum_seq.size())}), &output_col_seq_tensor)); ConstEigenMatrixMap in_mat(tensor_in.flat<T>().data(), input_size[3], input_size[2] * input_size[1] * input_size[0]); EigenMatrixMap out_mat(output_tensor->flat<T>().data(), output_size[3], output_size[2] * output_size[1] * output_size[0]); // out_count corresponds to number of elements in each pooling cell. Eigen::Matrix<T, Eigen::Dynamic, 1> out_count(out_mat.cols()); // Initializes the output tensor and out_count with 0. out_mat.setZero(); out_count.setZero(); auto output_row_seq_flat = output_row_seq_tensor->flat<int64>(); auto output_col_seq_flat = output_col_seq_tensor->flat<int64>(); // Set output tensors. for (int i = 0; i < row_cum_seq.size(); ++i) { output_row_seq_flat(i) = row_cum_seq[i]; } for (int i = 0; i < col_cum_seq.size(); ++i) { output_col_seq_flat(i) = col_cum_seq[i]; } // For both input and output, // 0: batch // 1: row / row // 2: col / col // 3: depth / channel const int64 row_max = input_size[1] - 1; const int64 col_max = input_size[2] - 1; for (int64 b = 0; b < input_size[0]; ++b) { // row sequence. for (int64 hs = 0; hs < row_cum_seq.size() - 1; ++hs) { // row start and end. const int64 row_start = row_cum_seq[hs]; int64 row_end = overlapping_ ? row_cum_seq[hs + 1] : row_cum_seq[hs + 1] - 1; row_end = std::min(row_end, row_max); // col sequence. for (int64 ws = 0; ws < col_cum_seq.size() - 1; ++ws) { const int64 out_offset = (b * output_size[1] + hs) * output_size[2] + ws; // col start and end. const int64 col_start = col_cum_seq[ws]; int64 col_end = overlapping_ ? col_cum_seq[ws + 1] : col_cum_seq[ws + 1] - 1; col_end = std::min(col_end, col_max); for (int64 h = row_start; h <= row_end; ++h) { for (int64 w = col_start; w <= col_end; ++w) { const int64 in_offset = (b * input_size[1] + h) * input_size[2] + w; out_mat.col(out_offset) += in_mat.col(in_offset); out_count(out_offset)++; } } } } } DCHECK_GT(out_count.minCoeff(), 0); out_mat.array().rowwise() /= out_count.transpose().array(); }
1
[ "CWE-369" ]
tensorflow
548b5eaf23685d86f722233d8fbc21d0a4aecb96
65,175,645,890,002,240,000,000,000,000,000,000,000
115
Fix divide by zero error in `fractional_pool_common.cc`. PiperOrigin-RevId: 371126221 Change-Id: Iea4b2f363aaeb116ab460e3bc592c687484af344
unsigned FAST_FUNC udhcp_option_idx(const char *name) { int n = index_in_strings(dhcp_option_strings, name); if (n >= 0) return n; { char buf[sizeof(dhcp_option_strings)]; char *d = buf; const char *s = dhcp_option_strings; while (s < dhcp_option_strings + sizeof(dhcp_option_strings) - 2) { *d++ = (*s == '\0' ? ' ' : *s); s++; } *d = '\0'; bb_error_msg_and_die("unknown option '%s', known options: %s", name, buf); } }
0
[ "CWE-20" ]
busybox
7280d2017d8075267a12e469983e38277dcf0374
2,087,317,214,614,450,300,000,000,000,000,000,000
18
udhcpc: sanitize hostnames in incoming packets. Closes 3979. The following options are replaced with string "bad" if they contain malformed hostname: HOST_NAME, DOMAIN_NAME, NIS_DOMAIN, TFTP_SERVER_NAME function old new delta xmalloc_optname_optval 850 888 +38 attach_option 440 443 +3 len_of_option_as_string 13 14 +1 dhcp_option_lengths 13 14 +1 ------------------------------------------------------------------------------ (add/remove: 0/0 grow/shrink: 4/0 up/down: 43/0) Total: 43 bytes Signed-off-by: Denys Vlasenko <[email protected]>
exists_in_thread_func (GSimpleAsyncResult *res, GObject *object, GCancellable *cancellable) { GError *error; error = NULL; if (!polkit_system_bus_name_exists_sync (POLKIT_SUBJECT (object), cancellable, &error)) { g_simple_async_result_set_from_error (res, error); g_error_free (error); } }
0
[ "CWE-754" ]
polkit
a04d13affe0fa53ff618e07aa8f57f4c0e3b9b81
146,725,542,987,764,580,000,000,000,000,000,000,000
14
GHSL-2021-074: authentication bypass vulnerability in polkit initial values returned if error caught
void snd_pcm_debug_name(struct snd_pcm_substream *substream, char *name, size_t len) { snprintf(name, len, "pcmC%dD%d%c:%d", substream->pcm->card->number, substream->pcm->device, substream->stream ? 'c' : 'p', substream->number); }
0
[ "CWE-416", "CWE-362" ]
linux
3aa02cb664c5fb1042958c8d1aa8c35055a2ebc4
142,524,523,744,430,200,000,000,000,000,000,000,000
9
ALSA: pcm : Call kill_fasync() in stream lock Currently kill_fasync() is called outside the stream lock in snd_pcm_period_elapsed(). This is potentially racy, since the stream may get released even during the irq handler is running. Although snd_pcm_release_substream() calls snd_pcm_drop(), this doesn't guarantee that the irq handler finishes, thus the kill_fasync() call outside the stream spin lock may be invoked after the substream is detached, as recently reported by KASAN. As a quick workaround, move kill_fasync() call inside the stream lock. The fasync is rarely used interface, so this shouldn't have a big impact from the performance POV. Ideally, we should implement some sync mechanism for the proper finish of stream and irq handler. But this oneliner should suffice for most cases, so far. Reported-by: Baozeng Ding <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int)); static int napi_gro_complete(struct sk_buff *skb) { struct packet_offload *ptype; __be16 type = skb->protocol; struct list_head *head = &offload_base; int err = -ENOENT; BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); if (NAPI_GRO_CB(skb)->count == 1) { skb_shinfo(skb)->gso_size = 0; goto out; } rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { if (ptype->type != type || !ptype->callbacks.gro_complete) continue; err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, ipv6_gro_complete, inet_gro_complete, skb, 0); break; } rcu_read_unlock(); if (err) { WARN_ON(&ptype->list == head); kfree_skb(skb); return NET_RX_SUCCESS; } out: return netif_receive_skb_internal(skb);
0
[ "CWE-416" ]
linux
a4270d6795b0580287453ea55974d948393e66ef
324,760,375,292,496,300,000,000,000,000,000,000,000
35
net-gro: fix use-after-free read in napi_gro_frags() If a network driver provides to napi_gro_frags() an skb with a page fragment of exactly 14 bytes, the call to gro_pull_from_frag0() will 'consume' the fragment by calling skb_frag_unref(skb, 0), and the page might be freed and reused. Reading eth->h_proto at the end of napi_frags_skb() might read mangled data, or crash under specific debugging features. BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline] BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 Read of size 2 at addr ffff88809366840c by task syz-executor599/8957 CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x172/0x1f0 lib/dump_stack.c:113 print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188 __kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317 kasan_report+0x12/0x20 mm/kasan/common.c:614 __asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142 napi_frags_skb net/core/dev.c:5833 [inline] napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991 tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037 call_write_iter include/linux/fs.h:1872 [inline] do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693 do_iter_write fs/read_write.c:970 [inline] do_iter_write+0x184/0x610 fs/read_write.c:951 vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015 do_writev+0x15b/0x330 fs/read_write.c:1058 Fixes: a50e233c50db ("net-gro: restore frag0 optimization") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: syzbot <[email protected]> Signed-off-by: David S. Miller <[email protected]>
LogStreamerMain(logstreamer_param *param) { if (!ReceiveXlogStream(param->bgconn, param->startptr, param->timeline, param->sysidentifier, param->xlogdir, reached_end_position, standby_message_timeout, NULL)) /* * Any errors will already have been reported in the function process, * but we need to tell the parent that we didn't shutdown in a nice * way. */ return 1; PQfinish(param->bgconn); return 0; }
0
[ "CWE-119" ]
postgres
01824385aead50e557ca1af28640460fa9877d51
169,559,029,359,710,560,000,000,000,000,000,000,000
17
Prevent potential overruns of fixed-size buffers. Coverity identified a number of places in which it couldn't prove that a string being copied into a fixed-size buffer would fit. We believe that most, perhaps all of these are in fact safe, or are copying data that is coming from a trusted source so that any overrun is not really a security issue. Nonetheless it seems prudent to forestall any risk by using strlcpy() and similar functions. Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports. In addition, fix a potential null-pointer-dereference crash in contrib/chkpass. The crypt(3) function is defined to return NULL on failure, but chkpass.c didn't check for that before using the result. The main practical case in which this could be an issue is if libc is configured to refuse to execute unapproved hashing algorithms (e.g., "FIPS mode"). This ideally should've been a separate commit, but since it touches code adjacent to one of the buffer overrun changes, I included it in this commit to avoid last-minute merge issues. This issue was reported by Honza Horak. Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()
TEST(ArrayOpsTest, BatchToSpace_ShapeFn) { ShapeInferenceTestOp op("BatchToSpace"); op.input_tensors.resize(2); TF_ASSERT_OK(NodeDefBuilder("test", "BatchToSpace") .Input("input", 0, DT_FLOAT) .Input("crops", 1, DT_INT32) .Attr("block_size", 2) .Finalize(&op.node_def)); // croppings not known, but batch size can be computed. INFER_OK(op, "[4,8,8,3];[2,2]", "[1,?,?,d0_3]"); // block_size not compatible with batch size INFER_ERROR("Dimension size must be evenly divisible by", op, "[5,8,8,3];[2,2]"); // Unknown croppings means unknown width and height. INFER_OK(op, "[4,8,8,3];?", "[1,?,?,d0_3]"); // croppings not correct shape INFER_ERROR("rank", op, "[4,8,8,3];[4]"); INFER_ERROR("3 and 2", op, "[4,8,8,3];[2,3]"); Tensor croppings = test::AsTensor<int64_t>({4, 2, 2, 4}, {{2, 2}}); op.input_tensors[1] = &croppings; INFER_OK(op, "[4,8,8,3];[2,2]", "[1,10,10,d0_3]"); // Bad croppings values croppings = test::AsTensor<int32>({100, 2, 3, 4}, {{2, 2}}); op.input_tensors[1] = &croppings; INFER_ERROR("Negative dimension size caused by subtracting", op, "[4,8,8,3];[2,2]"); croppings = test::AsTensor<int32>({1, 2, 3, 400}, {{2, 2}}); op.input_tensors[1] = &croppings; INFER_ERROR("Negative dimension size caused by subtracting", op, "[4,8,8,3];[2,2]"); // Negative paddings croppings = test::AsTensor<int32>({1, -2, 3, 4}, {{2, 2}}); op.input_tensors[1] = &croppings; INFER_ERROR("cannot be negative", op, "[4,8,8,3];[2,2]"); }
0
[ "CWE-125" ]
tensorflow
7cf73a2274732c9d82af51c2bc2cf90d13cd7e6d
318,752,729,982,030,900,000,000,000,000,000,000,000
42
Address QuantizeAndDequantizeV* heap oob. Added additional checks for the 'axis' attribute. PiperOrigin-RevId: 402446942 Change-Id: Id2f6b82e4e740d0550329be02621c46466b5a5b9
prepare_payload_and_sign(struct tang_keys_info* tki) { if (!tki) { return 0; } size_t idx; json_t* jwk; json_array_foreach(tki->m_keys, idx, jwk) { if (jwk_valid_for_signing_and_verifying(jwk)) { if (json_array_append(tki->m_sign, jwk) == -1) { continue; } if (json_array_append(tki->m_payload, jwk) == -1) { continue; } } else if (jwk_valid_for_deriving_keys(jwk)) { if (json_array_append(tki->m_payload, jwk) == -1) { continue; } } } if (json_array_size(tki->m_sign) == 0 || json_array_size(tki->m_payload) == 0) { return 0; } return 1; }
0
[ "CWE-200" ]
tang
e82459fda10f0630c3414ed2afbc6320bb9ea7c9
4,458,806,382,062,923,000,000,000,000,000,000,000
27
keys: move signing part out of find_by_thp() and to find_jws() (#81) Handle just signing keys in find_jws(), to make sure we are responding only to proper queries. Tests were also failing to detect this issue and were updated accordingly. Issue discovered by Twitter Kernel and OS team during a source code audit while evaluating Tang/Clevis for their needs. Fixes CVE-2021-4076
bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event, bool incident) { DBUG_ENTER("MYSQL_BIN_LOG::write(THD *, IO_CACHE *, Log_event *)"); DBUG_ASSERT(is_open()); if (likely(is_open())) // Should always be true { bool check_purge; mysql_mutex_lock(&LOCK_log); /* We only bother to write to the binary log if there is anything to write. */ if (my_b_tell(cache) > 0) { /* Log "BEGIN" at the beginning of every transaction. Here, a transaction is either a BEGIN..COMMIT block or a single statement in autocommit mode. */ Query_log_event qinfo(thd, STRING_WITH_LEN("BEGIN"), TRUE, FALSE, TRUE, 0); if (qinfo.write(&log_file)) goto err; DBUG_EXECUTE_IF("crash_before_writing_xid", { if ((write_error= write_cache(cache, false, true))) DBUG_PRINT("info", ("error writing binlog cache: %d", write_error)); DBUG_PRINT("info", ("crashing before writing xid")); DBUG_SUICIDE(); }); if ((write_error= write_cache(cache, false, false))) goto err; if (commit_event && commit_event->write(&log_file)) goto err; if (incident && write_incident(thd, FALSE)) goto err; bool synced= 0; if (flush_and_sync(&synced)) goto err; DBUG_EXECUTE_IF("half_binlogged_transaction", DBUG_SUICIDE();); if (cache->error) // Error on read { sql_print_error(ER(ER_ERROR_ON_READ), cache->file_name, errno); write_error=1; // Don't give more errors goto err; } if (RUN_HOOK(binlog_storage, after_flush, (thd, log_file_name, log_file.pos_in_file, synced))) { sql_print_error("Failed to run 'after_flush' hooks"); write_error=1; goto err; } signal_update(); } /* if commit_event is Xid_log_event, increase the number of prepared_xids (it's decreasd in ::unlog()). Binlog cannot be rotated if there're prepared xids in it - see the comment in new_file() for an explanation. If the commit_event is not Xid_log_event (then it's a Query_log_event) rotate binlog, if necessary. */ if (commit_event && commit_event->get_type_code() == XID_EVENT) { mysql_mutex_lock(&LOCK_prep_xids); prepared_xids++; mysql_mutex_unlock(&LOCK_prep_xids); mysql_mutex_unlock(&LOCK_log); } else { if (rotate(false, &check_purge)) goto err; mysql_mutex_unlock(&LOCK_log); if (check_purge) purge(); } } DBUG_RETURN(0); err: if (!write_error) { write_error= 1; sql_print_error(ER(ER_ERROR_ON_WRITE), name, errno); } mysql_mutex_unlock(&LOCK_log); DBUG_RETURN(1); }
0
[ "CWE-264" ]
mysql-server
48bd8b16fe382be302c6f0b45931be5aa6f29a0e
70,661,250,104,947,830,000,000,000,000,000,000,000
101
Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE [This is the 5.5/5.6 version of the bugfix]. The problem was that it was possible to write log files ending in .ini/.cnf that later could be parsed as an options file. This made it possible for users to specify startup options without the permissions to do so. This patch fixes the problem by disallowing general query log and slow query log to be written to files ending in .ini and .cnf.
TEST(GetComputedPathsTest, ExpressionObjectCorrectlyReportsComputedPathsNested) { intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest()); auto specObject = fromjson( "{a: {b: '$c'}," "d: {$map: {input: '$e', as: 'iter', in: {f: '$$iter.g'}}}}"); auto expr = Expression::parseObject(expCtx, specObject, expCtx->variablesParseState); ASSERT(dynamic_cast<ExpressionObject*>(expr.get())); auto computedPaths = expr->getComputedPaths("h"); ASSERT(computedPaths.paths.empty()); ASSERT_EQ(computedPaths.renames.size(), 2u); ASSERT_EQ(computedPaths.renames["h.a.b"], "c"); ASSERT_EQ(computedPaths.renames["h.d.f"], "e.g"); }
0
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
201,752,244,825,251,570,000,000,000,000,000,000,000
13
SERVER-38070 fix infinite loop in agg expression
gst_matroska_demux_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { GstMatroskaDemux *demux; g_return_if_fail (GST_IS_MATROSKA_DEMUX (object)); demux = GST_MATROSKA_DEMUX (object); switch (prop_id) { case PROP_MAX_GAP_TIME: GST_OBJECT_LOCK (demux); g_value_set_uint64 (value, demux->max_gap_time); GST_OBJECT_UNLOCK (demux); break; case PROP_MAX_BACKTRACK_DISTANCE: GST_OBJECT_LOCK (demux); g_value_set_uint (value, demux->max_backtrack_distance); GST_OBJECT_UNLOCK (demux); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } }
0
[]
gst-plugins-good
9181191511f9c0be6a89c98b311f49d66bd46dc3
318,283,263,954,042,300,000,000,000,000,000,000,000
24
matroskademux: Fix extraction of multichannel WavPack The old code had a couple of issues that all lead to potential memory safety bugs. - Use a constant for the Wavpack4Header size instead of using sizeof. It's written out into the data and not from the struct and who knows what special alignment/padding requirements some C compilers have. - gst_buffer_set_size() does not realloc the buffer when setting a bigger size than allocated, it only allows growing up to the maximum allocated size. Instead use a GstAdapter to collect all the blocks and take out everything at once in the end. - Check that enough data is actually available in the input and otherwise handle it an error in all cases instead of silently ignoring it. Among other things this fixes out of bounds writes because the code assumed gst_buffer_set_size() can grow the buffer and simply wrote after the end of the buffer. Thanks to Natalie Silvanovich for reporting. Fixes https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/issues/859 Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/merge_requests/903>
static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) { }
0
[ "CWE-401" ]
linux
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
213,412,701,491,658,780,000,000,000,000,000,000,000
3
KVM: SVM: Fix potential memory leak in svm_cpu_init() When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually the only possible outcome here. Reviewed-by: Liran Alon <[email protected]> Reviewed-by: Vitaly Kuznetsov <[email protected]> Signed-off-by: Miaohe Lin <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
virtual void dump(const string& code, const string& message) const {}
0
[ "CWE-770" ]
ceph
ab29bed2fc9f961fe895de1086a8208e21ddaddc
237,854,415,591,832,740,000,000,000,000,000,000,000
1
rgw: fix issues with 'enforce bounds' patch The patch to enforce bounds on max-keys/max-uploads/max-parts had a few issues that would prevent us from compiling it. Instead of changing the code provided by the submitter, we're addressing them in a separate commit to maintain the DCO. Signed-off-by: Joao Eduardo Luis <[email protected]> Signed-off-by: Abhishek Lekshmanan <[email protected]> (cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a) mimic specific fixes: As the largeish change from master g_conf() isn't in mimic yet, use the g_conf global structure, also make rgw_op use the value from req_info ceph context as we do for all the requests
camel_network_service_init (CamelNetworkServiceInterface *iface) { iface->get_service_name = pop3_store_get_service_name; iface->get_default_port = pop3_store_get_default_port; }
0
[ "CWE-74" ]
evolution-data-server
ba82be72cfd427b5d72ff21f929b3a6d8529c4df
13,629,906,142,838,539,000,000,000,000,000,000,000
5
I#226 - CVE-2020-14928: Response Injection via STARTTLS in SMTP and POP3 Closes https://gitlab.gnome.org/GNOME/evolution-data-server/-/issues/226
static int cap_socket_setsockopt(struct socket *sock, int level, int optname) { return 0; }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
330,988,091,888,285,430,000,000,000,000,000,000,000
4
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
static int ZEND_FASTCALL ZEND_RECV_INIT_SPEC_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zval *assignment_value; zend_uint arg_num = Z_LVAL(opline->op1.u.constant); zend_free_op free_res; zval **param = zend_vm_stack_get_arg(arg_num TSRMLS_CC); zval **var_ptr; if (param == NULL) { ALLOC_ZVAL(assignment_value); *assignment_value = opline->op2.u.constant; if ((Z_TYPE(opline->op2.u.constant) & IS_CONSTANT_TYPE_MASK) == IS_CONSTANT || Z_TYPE(opline->op2.u.constant)==IS_CONSTANT_ARRAY) { Z_SET_REFCOUNT_P(assignment_value, 1); zval_update_constant(&assignment_value, 0 TSRMLS_CC); } else { zval_copy_ctor(assignment_value); } INIT_PZVAL(assignment_value); } else { assignment_value = *param; Z_ADDREF_P(assignment_value); } zend_verify_arg_type((zend_function *) EG(active_op_array), arg_num, assignment_value, opline->extended_value TSRMLS_CC); var_ptr = get_zval_ptr_ptr(&opline->result, EX(Ts), &free_res, BP_VAR_W); Z_DELREF_PP(var_ptr); *var_ptr = assignment_value; ZEND_VM_NEXT_OPCODE(); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
240,156,279,528,376,320,000,000,000,000,000,000,000
31
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
sessActivity(ptcpsess_t *const pSess, int *const continue_polling) { int lenRcv; int lenBuf; uchar *peerName; int lenPeer; int remsock = 0; /* init just to keep compiler happy... :-( */ sbool bEmitOnClose = 0; char rcvBuf[128*1024]; DEFiRet; DBGPRINTF("imptcp: new activity on session socket %d\n", pSess->sock); while(1) { lenBuf = sizeof(rcvBuf); lenRcv = recv(pSess->sock, rcvBuf, lenBuf, 0); if(lenRcv > 0) { /* have data, process it */ DBGPRINTF("imptcp: data(%d) on socket %d: %s\n", lenBuf, pSess->sock, rcvBuf); CHKiRet(DataRcvd(pSess, rcvBuf, lenRcv)); } else if (lenRcv == 0) { /* session was closed, do clean-up */ if(pSess->pLstn->pSrv->bEmitMsgOnClose) { prop.GetString(pSess->peerName, &peerName, &lenPeer), remsock = pSess->sock; bEmitOnClose = 1; } *continue_polling = 0; if(bEmitOnClose) { LogError(0, RS_RET_PEER_CLOSED_CONN, "imptcp session %d closed by " "remote peer %s.", remsock, peerName); } CHKiRet(closeSess(pSess)); /* close may emit more messages in strmzip mode! */ break; } else { if(CHK_EAGAIN_EWOULDBLOCK) break; DBGPRINTF("imptcp: error on session socket %d - closed.\n", pSess->sock); *continue_polling = 0; closeSess(pSess); /* try clean-up by dropping session */ break; } } finalize_it: RETiRet; }
0
[ "CWE-787" ]
rsyslog
89955b0bcb1ff105e1374aad7e0e993faa6a038f
255,482,900,065,186,480,000,000,000,000,000,000,000
48
net bugfix: potential buffer overrun
static void dissect_INFO_REPLY(tvbuff_t *tvb, packet_info *pinfo, gint offset, guint8 flags, const guint encoding, int octets_to_next_header, proto_tree *tree) { /* RTPS 1.0/1.1: * INFO_REPLY is *NOT* the same thing as the old INFO_REPLY. * * RTPS 1.2/2.0: * 0...2...........7...............15.............23...............31 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | INFO_REPLY |X|X|X|X|X|X|M|E| octetsToNextHeader | * +---------------+---------------+---------------+---------------+ * | | * ~ LocatorList unicastReplyLocatorList ~ * | | * +---------------+---------------+---------------+---------------+ * | | * ~ LocatorList multicastReplyLocatorList [only if M==1] ~ * | | * +---------------+---------------+---------------+---------------+ */ int min_len; proto_item *octet_item; proto_tree_add_bitmask_value(tree, tvb, offset + 1, hf_rtps_sm_flags, ett_rtps_flags, INFO_REPLY_FLAGS, flags); octet_item = proto_tree_add_item(tree, hf_rtps_sm_octets_to_next_header, tvb, offset + 2, 2, encoding); min_len = 4; if ((flags & FLAG_INFO_REPLY_M) != 0) min_len += 4; if (octets_to_next_header < min_len) { expert_add_info_format(pinfo, octet_item, &ei_rtps_sm_octets_to_next_header_error, "(Error: should be >= %u)", min_len); return; } offset += 4; /* unicastReplyLocatorList */ offset = rtps_util_add_locator_list(tree, pinfo, tvb, offset, "unicastReplyLocatorList", encoding); /* multicastReplyLocatorList */ if ((flags & FLAG_INFO_REPLY_M) != 0) { /*offset = */rtps_util_add_locator_list(tree, pinfo, tvb, offset, "multicastReplyLocatorList", encoding); } }
0
[ "CWE-401" ]
wireshark
33e63d19e5496c151bad69f65cdbc7cba2b4c211
30,036,510,175,899,830,000,000,000,000,000,000,000
50
RTPS: Fixup our coherent set map. coherent_set_tracking.coherent_set_registry_map uses a struct as a key, but the hash and comparison routines treat keys as a sequence of bytes. Make sure every key byte is initialized. Fixes #16994. Call wmem_strong_hash on our key in coherent_set_key_hash_by_key instead of creating and leaking a GBytes struct.
static int __net_init ipv4_frags_init_net(struct net *net) { /* Fragment cache limits. * * The fragment memory accounting code, (tries to) account for * the real memory usage, by measuring both the size of frag * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) * and the SKB's truesize. * * A 64K fragment consumes 129736 bytes (44*2944)+200 * (1500 truesize == 2944, sizeof(struct ipq) == 200) * * We will commit 4MB at one time. Should we cross that limit * we will prune down to 3MB, making room for approx 8 big 64K * fragments 8x128k. */ net->ipv4.frags.high_thresh = 4 * 1024 * 1024; net->ipv4.frags.low_thresh = 3 * 1024 * 1024; /* * Important NOTE! Fragment queue must be destroyed before MSL expires. * RFC791 is wrong proposing to prolongate timer each fragment arrival * by TTL. */ net->ipv4.frags.timeout = IP_FRAG_TIME; inet_frags_init_net(&net->ipv4.frags); return ip4_frags_ns_ctl_register(net); }
0
[]
linux
3ef0eb0db4bf92c6d2510fe5c4dc51852746f206
95,719,924,489,987,240,000,000,000,000,000,000,000
29
net: frag, move LRU list maintenance outside of rwlock Updating the fragmentation queues LRU (Least-Recently-Used) list, required taking the hash writer lock. However, the LRU list isn't tied to the hash at all, so we can use a separate lock for it. Original-idea-by: Florian Westphal <[email protected]> Signed-off-by: Jesper Dangaard Brouer <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { _cleanup_fdset_free_ FDSet *fds = NULL; Manager *m = userdata; char buf[NOTIFY_BUFFER_MAX+1]; struct iovec iovec = { .iov_base = buf, .iov_len = sizeof(buf)-1, }; union { struct cmsghdr cmsghdr; uint8_t buf[CMSG_SPACE(sizeof(struct ucred)) + CMSG_SPACE(sizeof(int) * NOTIFY_FD_MAX)]; } control = {}; struct msghdr msghdr = { .msg_iov = &iovec, .msg_iovlen = 1, .msg_control = &control, .msg_controllen = sizeof(control), }; struct cmsghdr *cmsg; struct ucred *ucred = NULL; bool found = false; Unit *u1, *u2, *u3; int r, *fd_array = NULL; unsigned n_fds = 0; ssize_t n; assert(m); assert(m->notify_fd == fd); if (revents != EPOLLIN) { log_warning("Got unexpected poll event for notify fd."); return 0; } n = recvmsg(m->notify_fd, &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC); if (n < 0) { if (!IN_SET(errno, EAGAIN, EINTR)) log_error("Failed to receive notification message: %m"); /* It's not an option to return an error here since it * would disable the notification handler entirely. Services * wouldn't be able to send the WATCHDOG message for * example... */ return 0; } if (n == 0) { log_debug("Got zero-length notification message. Ignoring."); return 0; } CMSG_FOREACH(cmsg, &msghdr) { if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { fd_array = (int*) CMSG_DATA(cmsg); n_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int); } else if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_CREDENTIALS && cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred))) { ucred = (struct ucred*) CMSG_DATA(cmsg); } } if (n_fds > 0) { assert(fd_array); r = fdset_new_array(&fds, fd_array, n_fds); if (r < 0) { close_many(fd_array, n_fds); log_oom(); return 0; } } if (!ucred || ucred->pid <= 0) { log_warning("Received notify message without valid credentials. Ignoring."); return 0; } if ((size_t) n >= sizeof(buf)) { log_warning("Received notify message exceeded maximum size. Ignoring."); return 0; } buf[n] = 0; /* Notify every unit that might be interested, but try * to avoid notifying the same one multiple times. */ u1 = manager_get_unit_by_pid_cgroup(m, ucred->pid); if (u1) { manager_invoke_notify_message(m, u1, ucred->pid, buf, n, fds); found = true; } u2 = hashmap_get(m->watch_pids1, PID_TO_PTR(ucred->pid)); if (u2 && u2 != u1) { manager_invoke_notify_message(m, u2, ucred->pid, buf, n, fds); found = true; } u3 = hashmap_get(m->watch_pids2, PID_TO_PTR(ucred->pid)); if (u3 && u3 != u2 && u3 != u1) { manager_invoke_notify_message(m, u3, ucred->pid, buf, n, fds); found = true; } if (!found) log_warning("Cannot find unit for notify message of PID "PID_FMT".", ucred->pid); if (fdset_size(fds) > 0) log_warning("Got auxiliary fds with notification message, closing all."); return 0; }
1
[ "CWE-20" ]
systemd
8523bf7dd514a3a2c6114b7b8fb8f308b4f09fc4
336,555,145,514,301,300,000,000,000,000,000,000,000
118
pid1: process zero-length notification messages again This undoes 531ac2b234. I acked that patch without looking at the code carefully enough. There are two problems: - we want to process the fds anyway - in principle empty notification messages are valid, and we should process them as usual, including logging using log_unit_debug().
int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) { u32 lock_status; u32 resource_bit = (1 << resource); int func = BP_FUNC(bp); u32 hw_lock_control_reg; /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return -EINVAL; } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); } /* Validating that the resource is currently taken */ lock_status = REG_RD(bp, hw_lock_control_reg); if (!(lock_status & resource_bit)) { BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n", lock_status, resource_bit); return -EFAULT; } REG_WR(bp, hw_lock_control_reg, resource_bit); return 0; }
0
[ "CWE-20" ]
linux
8914a595110a6eca69a5e275b323f5d09e18f4f9
70,723,227,962,069,440,000,000,000,000,000,000,000
32
bnx2x: disable GSO where gso_size is too big for hardware If a bnx2x card is passed a GSO packet with a gso_size larger than ~9700 bytes, it will cause a firmware error that will bring the card down: bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert! bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2 bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052 bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1 ... (dump of values continues) ... Detect when the mac length of a GSO packet is greater than the maximum packet size (9700 bytes) and disable GSO. Signed-off-by: Daniel Axtens <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static UINT32 update_glyph_offset(const BYTE* data, size_t length, UINT32 index, INT32* x, INT32* y, UINT32 ulCharInc, UINT32 flAccel) { if ((ulCharInc == 0) && (!(flAccel & SO_CHAR_INC_EQUAL_BM_BASE))) { UINT32 offset = data[index++]; if (offset & 0x80) { if (index + 1 < length) { offset = data[index++]; offset |= ((UINT32)data[index++]) << 8; } else WLog_WARN(TAG, "[%s] glyph index out of bound %" PRIu32 " [max %" PRIuz "]", index, length); } if (flAccel & SO_VERTICAL) *y += offset; if (flAccel & SO_HORIZONTAL) *x += offset; } return index; }
0
[ "CWE-703", "CWE-125" ]
FreeRDP
c0fd449ec0870b050d350d6d844b1ea6dad4bc7d
41,150,128,179,172,720,000,000,000,000,000,000,000
29
Fixed Out-of-bound read in glyph_cache_put CVE-2020-11098 thanks to @antonio-morales for finding this.
void HTTPSession::resumeIngress(HTTPTransaction* txn) noexcept { VLOG(4) << *this << " resuming streamID=" << txn->getID() << ", liveTransactions_ was " << liveTransactions_; ++liveTransactions_; auto exTxns = txn->getExTransactions(); for (auto it = exTxns.begin(); it != exTxns.end(); ++it) { auto exTxn = findTransaction(*it); if (exTxn) { exTxn->resumeIngress(); } } if (liveTransactions_ == 1) { resumeReads(); } }
0
[ "CWE-20" ]
proxygen
0600ebe59c3e82cd012def77ca9ca1918da74a71
258,823,653,336,126,560,000,000,000,000,000,000,000
16
Check that a secondary auth manager is set before dereferencing. Summary: CVE-2018-6343 Reviewed By: mingtaoy Differential Revision: D12994423 fbshipit-source-id: 9229ec11da8085f1fa153595e8e5353e19d06fb7
bool memcpy_field_possible(const Field *from) const { return Field_str::memcpy_field_possible(from) && !compression_method() == !from->compression_method() && !table->copy_blobs; }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
296,874,308,835,509,580,000,000,000,000,000,000,000
6
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <[email protected]>
static MagickBooleanType ReadHEICImageByID(const ImageInfo *image_info, Image *image,struct heif_context *heif_context,heif_item_id image_id, ExceptionInfo *exception) { const char *option; int stride_y, stride_cb, stride_cr; MagickBooleanType status; ssize_t y; struct heif_decoding_options *decode_options; struct heif_error error; struct heif_image *heif_image; struct heif_image_handle *image_handle; const uint8_t *p_y, *p_cb, *p_cr; error=heif_context_get_image_handle(heif_context,image_id,&image_handle); if (IsHeifSuccess(&error,image,exception) == MagickFalse) return(MagickFalse); if (ReadHEICColorProfile(image,image_handle,exception) == MagickFalse) { heif_image_handle_release(image_handle); return(MagickFalse); } if (ReadHEICExifProfile(image,image_handle,exception) == MagickFalse) { heif_image_handle_release(image_handle); return(MagickFalse); } /* Set image size. */ image->depth=8; image->columns=(size_t) heif_image_handle_get_width(image_handle); image->rows=(size_t) heif_image_handle_get_height(image_handle); if (image_info->ping != MagickFalse) { image->colorspace=YCbCrColorspace; heif_image_handle_release(image_handle); return(MagickTrue); } if (HEICSkipImage(image_info,image) != MagickFalse) { heif_image_handle_release(image_handle); return(MagickTrue); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) { heif_image_handle_release(image_handle); return(MagickFalse); } /* Copy HEIF image into ImageMagick data structures. */ (void) SetImageColorspace(image,YCbCrColorspace,exception); decode_options=(struct heif_decoding_options *) NULL; option=GetImageOption(image_info,"heic:preserve-orientation"); if (IsStringTrue(option) == MagickTrue) { decode_options=heif_decoding_options_alloc(); decode_options->ignore_transformations=1; } else (void) SetImageProperty(image,"exif:Orientation","1",exception); error=heif_decode_image(image_handle,&heif_image,heif_colorspace_YCbCr, heif_chroma_420,decode_options); if (decode_options != (struct heif_decoding_options *) NULL) heif_decoding_options_free(decode_options); if (IsHeifSuccess(&error,image,exception) == MagickFalse) { heif_image_handle_release(image_handle); return(MagickFalse); } /* Correct the width and height of the image. */ image->columns=(size_t) heif_image_get_width(heif_image,heif_channel_Y); image->rows=(size_t) heif_image_get_height(heif_image,heif_channel_Y); status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) { heif_image_release(heif_image); heif_image_handle_release(image_handle); return(MagickFalse); } p_y=heif_image_get_plane_readonly(heif_image,heif_channel_Y,&stride_y); p_cb=heif_image_get_plane_readonly(heif_image,heif_channel_Cb,&stride_cb); p_cr=heif_image_get_plane_readonly(heif_image,heif_channel_Cr,&stride_cr); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *q; register ssize_t x; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum((unsigned char) p_y[y* stride_y+x]),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) p_cb[(y/2)* stride_cb+x/2]),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) p_cr[(y/2)* stride_cr+x/2]),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } heif_image_release(heif_image); heif_image_handle_release(image_handle); return(MagickTrue); }
0
[ "CWE-125" ]
ImageMagick
868aad754ee599eb7153b84d610f2ecdf7b339f6
152,426,799,834,776,980,000,000,000,000,000,000,000
136
Always correct the width and height of the image (#1859).
static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, struct vm_area_struct *vma, struct page *check_page) { struct mm_struct *mm = vma->vm_mm; pmd_t *pmd; pte_t *pte; pte_t pteval; spinlock_t *ptl; struct page *page; unsigned long address; unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ unsigned long end; int ret = SWAP_AGAIN; int locked_vma = 0; address = (vma->vm_start + cursor) & CLUSTER_MASK; end = address + CLUSTER_SIZE; if (address < vma->vm_start) address = vma->vm_start; if (end > vma->vm_end) end = vma->vm_end; pmd = mm_find_pmd(mm, address); if (!pmd) return ret; mmun_start = address; mmun_end = end; mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); /* * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, * keep the sem while scanning the cluster for mlocking pages. */ if (down_read_trylock(&vma->vm_mm->mmap_sem)) { locked_vma = (vma->vm_flags & VM_LOCKED); if (!locked_vma) up_read(&vma->vm_mm->mmap_sem); /* don't need it */ } pte = pte_offset_map_lock(mm, pmd, address, &ptl); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); for (; address < end; pte++, address += PAGE_SIZE) { if (!pte_present(*pte)) continue; page = vm_normal_page(vma, address, *pte); BUG_ON(!page || PageAnon(page)); if (locked_vma) { if (page == check_page) { /* we know we have check_page locked */ mlock_vma_page(page); ret = SWAP_MLOCK; } else if (trylock_page(page)) { /* * If we can lock the page, perform mlock. * Otherwise leave the page alone, it will be * eventually encountered again later. */ mlock_vma_page(page); unlock_page(page); } continue; /* don't unmap */ } if (ptep_clear_flush_young_notify(vma, address, pte)) continue; /* Nuke the page table entry. */ flush_cache_page(vma, address, pte_pfn(*pte)); pteval = ptep_clear_flush(vma, address, pte); /* If nonlinear, store the file page offset in the pte. */ if (page->index != linear_page_index(vma, address)) { pte_t ptfile = pgoff_to_pte(page->index); if (pte_soft_dirty(pteval)) pte_file_mksoft_dirty(ptfile); set_pte_at(mm, address, pte, ptfile); } /* Move the dirty bit to the physical page now the pte is gone. */ if (pte_dirty(pteval)) set_page_dirty(page); page_remove_rmap(page); page_cache_release(page); dec_mm_counter(mm, MM_FILEPAGES); (*mapcount)--; } pte_unmap_unlock(pte - 1, ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); if (locked_vma) up_read(&vma->vm_mm->mmap_sem); return ret; }
0
[ "CWE-400", "CWE-703", "CWE-264" ]
linux
57e68e9cd65b4b8eb4045a1e0d0746458502554c
300,924,747,398,764,670,000,000,000,000,000,000,000
99
mm: try_to_unmap_cluster() should lock_page() before mlocking A BUG_ON(!PageLocked) was triggered in mlock_vma_page() by Sasha Levin fuzzing with trinity. The call site try_to_unmap_cluster() does not lock the pages other than its check_page parameter (which is already locked). The BUG_ON in mlock_vma_page() is not documented and its purpose is somewhat unclear, but apparently it serializes against page migration, which could otherwise fail to transfer the PG_mlocked flag. This would not be fatal, as the page would be eventually encountered again, but NR_MLOCK accounting would become distorted nevertheless. This patch adds a comment to the BUG_ON in mlock_vma_page() and munlock_vma_page() to that effect. The call site try_to_unmap_cluster() is fixed so that for page != check_page, trylock_page() is attempted (to avoid possible deadlocks as we already have check_page locked) and mlock_vma_page() is performed only upon success. If the page lock cannot be obtained, the page is left without PG_mlocked, which is again not a problem in the whole unevictable memory design. Signed-off-by: Vlastimil Babka <[email protected]> Signed-off-by: Bob Liu <[email protected]> Reported-by: Sasha Levin <[email protected]> Cc: Wanpeng Li <[email protected]> Cc: Michel Lespinasse <[email protected]> Cc: KOSAKI Motohiro <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: David Rientjes <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
cdata_write (cdata_t *cd, GDataOutputStream *out, int type, guint8 *data, size_t size, gsize *bytes_written, GCancellable *cancellable, GError **error) { if (!cdata_set(cd, type, data, size)) return FALSE; guint32 datacsum = compute_checksum(cd->in, cd->ncbytes, 0); guint8 sizecsum[4]; guint16 nbytes_le; nbytes_le = GUINT16_TO_LE (cd->ncbytes); memcpy (&sizecsum[0], &nbytes_le, 2); nbytes_le = GUINT16_TO_LE (cd->nubytes); memcpy (&sizecsum[2], &nbytes_le, 2); cd->checksum = compute_checksum (sizecsum, sizeof(sizecsum), datacsum); GOutputStream *stream = g_filter_output_stream_get_base_stream (G_FILTER_OUTPUT_STREAM (out)); *bytes_written = 0; if ((!W4 (cd->checksum)) || (!W2 (cd->ncbytes)) || (!W2 (cd->nubytes)) || (g_output_stream_write (stream, cd->in, cd->ncbytes, cancellable, error) == -1)) return FALSE; *bytes_written = 4 + 2 + 2 + cd->ncbytes; return TRUE; }
0
[ "CWE-787" ]
gcab
c512f6ff0c82a1139b36db2b28f93edc01c74b4b
250,471,076,177,551,670,000,000,000,000,000,000,000
30
trivial: Allocate cdata_t on the heap Using a 91kB stack allocation for one object isn't awesome, and it also allows us to use g_autoptr() to simplify gcab_folder_extract()
PHP_FUNCTION(openssl_spki_new) { size_t challenge_len; char * challenge = NULL, * spkstr = NULL; zend_string * s = NULL; zend_resource *keyresource = NULL; const char *spkac = "SPKAC="; zend_long algo = OPENSSL_ALGO_MD5; zval *method = NULL; zval * zpkey = NULL; EVP_PKEY * pkey = NULL; NETSCAPE_SPKI *spki=NULL; const EVP_MD *mdtype; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rs|z", &zpkey, &challenge, &challenge_len, &method) == FAILURE) { return; } RETVAL_FALSE; PHP_OPENSSL_CHECK_SIZE_T_TO_INT(challenge_len, challenge); pkey = php_openssl_evp_from_zval(zpkey, 0, challenge, challenge_len, 1, &keyresource); if (pkey == NULL) { php_error_docref(NULL, E_WARNING, "Unable to use supplied private key"); goto cleanup; } if (method != NULL) { if (Z_TYPE_P(method) == IS_LONG) { algo = Z_LVAL_P(method); } else { php_error_docref(NULL, E_WARNING, "Algorithm must be of supported type"); goto cleanup; } } mdtype = php_openssl_get_evp_md_from_algo(algo); if (!mdtype) { php_error_docref(NULL, E_WARNING, "Unknown signature algorithm"); goto cleanup; } if ((spki = NETSCAPE_SPKI_new()) == NULL) { php_openssl_store_errors(); php_error_docref(NULL, E_WARNING, "Unable to create new SPKAC"); goto cleanup; } if (challenge) { if (!ASN1_STRING_set(spki->spkac->challenge, challenge, (int)challenge_len)) { php_openssl_store_errors(); php_error_docref(NULL, E_WARNING, "Unable to set challenge data"); goto cleanup; } } if (!NETSCAPE_SPKI_set_pubkey(spki, pkey)) { php_openssl_store_errors(); php_error_docref(NULL, E_WARNING, "Unable to embed public key"); goto cleanup; } if (!NETSCAPE_SPKI_sign(spki, pkey, mdtype)) { php_openssl_store_errors(); php_error_docref(NULL, E_WARNING, "Unable to sign with specified algorithm"); goto cleanup; } spkstr = NETSCAPE_SPKI_b64_encode(spki); if (!spkstr){ php_openssl_store_errors(); php_error_docref(NULL, E_WARNING, "Unable to encode SPKAC"); goto cleanup; } s = zend_string_alloc(strlen(spkac) + strlen(spkstr), 0); sprintf(ZSTR_VAL(s), "%s%s", spkac, spkstr); ZSTR_LEN(s) = strlen(ZSTR_VAL(s)); OPENSSL_free(spkstr); RETVAL_STR(s); goto cleanup; cleanup: if (spki != NULL) { NETSCAPE_SPKI_free(spki); } if (keyresource == NULL && pkey != NULL) { EVP_PKEY_free(pkey); } if (s && ZSTR_LEN(s) <= 0) { RETVAL_FALSE; } if (keyresource == NULL && s != NULL) { zend_string_release(s); } }
0
[ "CWE-326" ]
php-src
0216630ea2815a5789a24279a1211ac398d4de79
318,828,452,048,777,100,000,000,000,000,000,000,000
101
Fix bug #79601 (Wrong ciphertext/tag in AES-CCM encryption for a 12 bytes IV)