instruction
stringclasses
1 value
input
stringlengths
90
139k
output
stringlengths
16
138k
__index_level_0__
int64
165k
175k
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ff_h264_free_tables(H264Context *h, int free_rbsp) { int i; H264Context *hx; av_freep(&h->intra4x4_pred_mode); av_freep(&h->chroma_pred_mode_table); av_freep(&h->cbp_table); av_freep(&h->mvd_table[0]); av_freep(&h->mvd_table[1]); av_freep(&h->direct_table); av_freep(&h->non_zero_count); av_freep(&h->slice_table_base); h->slice_table = NULL; av_freep(&h->list_counts); av_freep(&h->mb2b_xy); av_freep(&h->mb2br_xy); av_buffer_pool_uninit(&h->qscale_table_pool); av_buffer_pool_uninit(&h->mb_type_pool); av_buffer_pool_uninit(&h->motion_val_pool); av_buffer_pool_uninit(&h->ref_index_pool); if (free_rbsp && h->DPB) { for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) ff_h264_unref_picture(h, &h->DPB[i]); av_freep(&h->DPB); } else if (h->DPB) { for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) h->DPB[i].needs_realloc = 1; } h->cur_pic_ptr = NULL; for (i = 0; i < H264_MAX_THREADS; i++) { hx = h->thread_context[i]; if (!hx) continue; av_freep(&hx->top_borders[1]); av_freep(&hx->top_borders[0]); av_freep(&hx->bipred_scratchpad); av_freep(&hx->edge_emu_buffer); av_freep(&hx->dc_val_base); av_freep(&hx->er.mb_index2xy); av_freep(&hx->er.error_status_table); av_freep(&hx->er.er_temp_buffer); av_freep(&hx->er.mbintra_table); av_freep(&hx->er.mbskip_table); if (free_rbsp) { av_freep(&hx->rbsp_buffer[1]); av_freep(&hx->rbsp_buffer[0]); hx->rbsp_buffer_size[0] = 0; hx->rbsp_buffer_size[1] = 0; } if (i) av_freep(&h->thread_context[i]); } } Commit Message: avcodec/h264: Clear delayed_pic on deallocation Fixes use of freed memory Fixes: case5_av_frame_copy_props.mp4 Found-by: Michal Zalewski <[email protected]> Signed-off-by: Michael Niedermayer <[email protected]> CWE ID:
void ff_h264_free_tables(H264Context *h, int free_rbsp) { int i; H264Context *hx; av_freep(&h->intra4x4_pred_mode); av_freep(&h->chroma_pred_mode_table); av_freep(&h->cbp_table); av_freep(&h->mvd_table[0]); av_freep(&h->mvd_table[1]); av_freep(&h->direct_table); av_freep(&h->non_zero_count); av_freep(&h->slice_table_base); h->slice_table = NULL; av_freep(&h->list_counts); av_freep(&h->mb2b_xy); av_freep(&h->mb2br_xy); av_buffer_pool_uninit(&h->qscale_table_pool); av_buffer_pool_uninit(&h->mb_type_pool); av_buffer_pool_uninit(&h->motion_val_pool); av_buffer_pool_uninit(&h->ref_index_pool); if (free_rbsp && h->DPB) { for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) ff_h264_unref_picture(h, &h->DPB[i]); memset(h->delayed_pic, 0, sizeof(h->delayed_pic)); av_freep(&h->DPB); } else if (h->DPB) { for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) h->DPB[i].needs_realloc = 1; } h->cur_pic_ptr = NULL; for (i = 0; i < H264_MAX_THREADS; i++) { hx = h->thread_context[i]; if (!hx) continue; av_freep(&hx->top_borders[1]); av_freep(&hx->top_borders[0]); av_freep(&hx->bipred_scratchpad); av_freep(&hx->edge_emu_buffer); av_freep(&hx->dc_val_base); av_freep(&hx->er.mb_index2xy); av_freep(&hx->er.error_status_table); av_freep(&hx->er.er_temp_buffer); av_freep(&hx->er.mbintra_table); av_freep(&hx->er.mbskip_table); if (free_rbsp) { av_freep(&hx->rbsp_buffer[1]); av_freep(&hx->rbsp_buffer[0]); hx->rbsp_buffer_size[0] = 0; hx->rbsp_buffer_size[1] = 0; } if (i) av_freep(&h->thread_context[i]); } }
166,624
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static irqreturn_t i8042_interrupt(int irq, void *dev_id) { struct i8042_port *port; struct serio *serio; unsigned long flags; unsigned char str, data; unsigned int dfl; unsigned int port_no; bool filtered; int ret = 1; spin_lock_irqsave(&i8042_lock, flags); str = i8042_read_status(); if (unlikely(~str & I8042_STR_OBF)) { spin_unlock_irqrestore(&i8042_lock, flags); if (irq) dbg("Interrupt %d, without any data\n", irq); ret = 0; goto out; } data = i8042_read_data(); if (i8042_mux_present && (str & I8042_STR_AUXDATA)) { static unsigned long last_transmit; static unsigned char last_str; dfl = 0; if (str & I8042_STR_MUXERR) { dbg("MUX error, status is %02x, data is %02x\n", str, data); /* * When MUXERR condition is signalled the data register can only contain * 0xfd, 0xfe or 0xff if implementation follows the spec. Unfortunately * it is not always the case. Some KBCs also report 0xfc when there is * nothing connected to the port while others sometimes get confused which * port the data came from and signal error leaving the data intact. They * _do not_ revert to legacy mode (actually I've never seen KBC reverting * to legacy mode yet, when we see one we'll add proper handling). * Anyway, we process 0xfc, 0xfd, 0xfe and 0xff as timeouts, and for the * rest assume that the data came from the same serio last byte * was transmitted (if transmission happened not too long ago). */ switch (data) { default: if (time_before(jiffies, last_transmit + HZ/10)) { str = last_str; break; } /* fall through - report timeout */ case 0xfc: case 0xfd: case 0xfe: dfl = SERIO_TIMEOUT; data = 0xfe; break; case 0xff: dfl = SERIO_PARITY; data = 0xfe; break; } } port_no = I8042_MUX_PORT_NO + ((str >> 6) & 3); last_str = str; last_transmit = jiffies; } else { dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) | ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0); port_no = (str & I8042_STR_AUXDATA) ? I8042_AUX_PORT_NO : I8042_KBD_PORT_NO; } port = &i8042_ports[port_no]; serio = port->exists ? port->serio : NULL; filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n", port_no, irq, dfl & SERIO_PARITY ? ", bad parity" : "", dfl & SERIO_TIMEOUT ? ", timeout" : ""); filtered = i8042_filter(data, str, serio); spin_unlock_irqrestore(&i8042_lock, flags); if (likely(port->exists && !filtered)) serio_interrupt(serio, data, dfl); out: return IRQ_RETVAL(ret); } Commit Message: Input: i8042 - fix crash at boot time The driver checks port->exists twice in i8042_interrupt(), first when trying to assign temporary "serio" variable, and second time when deciding whether it should call serio_interrupt(). The value of port->exists may change between the 2 checks, and we may end up calling serio_interrupt() with a NULL pointer: BUG: unable to handle kernel NULL pointer dereference at 0000000000000050 IP: [<ffffffff8150feaf>] _spin_lock_irqsave+0x1f/0x40 PGD 0 Oops: 0002 [#1] SMP last sysfs file: CPU 0 Modules linked in: Pid: 1, comm: swapper Not tainted 2.6.32-358.el6.x86_64 #1 QEMU Standard PC (i440FX + PIIX, 1996) RIP: 0010:[<ffffffff8150feaf>] [<ffffffff8150feaf>] _spin_lock_irqsave+0x1f/0x40 RSP: 0018:ffff880028203cc0 EFLAGS: 00010082 RAX: 0000000000010000 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000282 RSI: 0000000000000098 RDI: 0000000000000050 RBP: ffff880028203cc0 R08: ffff88013e79c000 R09: ffff880028203ee0 R10: 0000000000000298 R11: 0000000000000282 R12: 0000000000000050 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000098 FS: 0000000000000000(0000) GS:ffff880028200000(0000) knlGS:0000000000000000 CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b CR2: 0000000000000050 CR3: 0000000001a85000 CR4: 00000000001407f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process swapper (pid: 1, threadinfo ffff88013e79c000, task ffff88013e79b500) Stack: ffff880028203d00 ffffffff813de186 ffffffffffffff02 0000000000000000 <d> 0000000000000000 0000000000000000 0000000000000000 0000000000000098 <d> ffff880028203d70 ffffffff813e0162 ffff880028203d20 ffffffff8103b8ac Call Trace: <IRQ> [<ffffffff813de186>] serio_interrupt+0x36/0xa0 [<ffffffff813e0162>] i8042_interrupt+0x132/0x3a0 [<ffffffff8103b8ac>] ? kvm_clock_read+0x1c/0x20 [<ffffffff8103b8b9>] ? kvm_clock_get_cycles+0x9/0x10 [<ffffffff810e1640>] handle_IRQ_event+0x60/0x170 [<ffffffff8103b154>] ? kvm_guest_apic_eoi_write+0x44/0x50 [<ffffffff810e3d8e>] handle_edge_irq+0xde/0x180 [<ffffffff8100de89>] handle_irq+0x49/0xa0 [<ffffffff81516c8c>] do_IRQ+0x6c/0xf0 [<ffffffff8100b9d3>] ret_from_intr+0x0/0x11 [<ffffffff81076f63>] ? __do_softirq+0x73/0x1e0 [<ffffffff8109b75b>] ? hrtimer_interrupt+0x14b/0x260 [<ffffffff8100c1cc>] ? call_softirq+0x1c/0x30 [<ffffffff8100de05>] ? do_softirq+0x65/0xa0 [<ffffffff81076d95>] ? irq_exit+0x85/0x90 [<ffffffff81516d80>] ? smp_apic_timer_interrupt+0x70/0x9b [<ffffffff8100bb93>] ? apic_timer_interrupt+0x13/0x20 To avoid the issue let's change the second check to test whether serio is NULL or not. Also, let's take i8042_lock in i8042_start() and i8042_stop() instead of trying to be overly smart and using memory barriers. Signed-off-by: Chen Hong <[email protected]> [dtor: take lock in i8042_start()/i8042_stop()] Cc: [email protected] Signed-off-by: Dmitry Torokhov <[email protected]> CWE ID: CWE-476
static irqreturn_t i8042_interrupt(int irq, void *dev_id) { struct i8042_port *port; struct serio *serio; unsigned long flags; unsigned char str, data; unsigned int dfl; unsigned int port_no; bool filtered; int ret = 1; spin_lock_irqsave(&i8042_lock, flags); str = i8042_read_status(); if (unlikely(~str & I8042_STR_OBF)) { spin_unlock_irqrestore(&i8042_lock, flags); if (irq) dbg("Interrupt %d, without any data\n", irq); ret = 0; goto out; } data = i8042_read_data(); if (i8042_mux_present && (str & I8042_STR_AUXDATA)) { static unsigned long last_transmit; static unsigned char last_str; dfl = 0; if (str & I8042_STR_MUXERR) { dbg("MUX error, status is %02x, data is %02x\n", str, data); /* * When MUXERR condition is signalled the data register can only contain * 0xfd, 0xfe or 0xff if implementation follows the spec. Unfortunately * it is not always the case. Some KBCs also report 0xfc when there is * nothing connected to the port while others sometimes get confused which * port the data came from and signal error leaving the data intact. They * _do not_ revert to legacy mode (actually I've never seen KBC reverting * to legacy mode yet, when we see one we'll add proper handling). * Anyway, we process 0xfc, 0xfd, 0xfe and 0xff as timeouts, and for the * rest assume that the data came from the same serio last byte * was transmitted (if transmission happened not too long ago). */ switch (data) { default: if (time_before(jiffies, last_transmit + HZ/10)) { str = last_str; break; } /* fall through - report timeout */ case 0xfc: case 0xfd: case 0xfe: dfl = SERIO_TIMEOUT; data = 0xfe; break; case 0xff: dfl = SERIO_PARITY; data = 0xfe; break; } } port_no = I8042_MUX_PORT_NO + ((str >> 6) & 3); last_str = str; last_transmit = jiffies; } else { dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) | ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0); port_no = (str & I8042_STR_AUXDATA) ? I8042_AUX_PORT_NO : I8042_KBD_PORT_NO; } port = &i8042_ports[port_no]; serio = port->exists ? port->serio : NULL; filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n", port_no, irq, dfl & SERIO_PARITY ? ", bad parity" : "", dfl & SERIO_TIMEOUT ? ", timeout" : ""); filtered = i8042_filter(data, str, serio); spin_unlock_irqrestore(&i8042_lock, flags); if (likely(serio && !filtered)) serio_interrupt(serio, data, dfl); out: return IRQ_RETVAL(ret); }
169,421
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: raptor_libxml_getEntity(void* user_data, const xmlChar *name) { raptor_sax2* sax2 = (raptor_sax2*)user_data; return libxml2_getEntity(sax2->xc, name); } Commit Message: CVE-2012-0037 Enforce entity loading policy in raptor_libxml_resolveEntity and raptor_libxml_getEntity by checking for file URIs and network URIs. Add RAPTOR_OPTION_LOAD_EXTERNAL_ENTITIES / loadExternalEntities for turning on loading of XML external entity loading, disabled by default. This affects all the parsers that use SAX2: rdfxml, rss-tag-soup (and aliases) and rdfa. CWE ID: CWE-200
raptor_libxml_getEntity(void* user_data, const xmlChar *name) { raptor_libxml_getEntity(void* user_data, const xmlChar *name) { raptor_sax2* sax2 = (raptor_sax2*)user_data; xmlParserCtxtPtr xc = sax2->xc; xmlEntityPtr ret = NULL; if(!xc) return NULL; if(!xc->inSubset) { /* looks for hardcoded set of entity names - lt, gt etc. */ ret = xmlGetPredefinedEntity(name); if(ret) { RAPTOR_DEBUG2("Entity '%s' found in predefined set\n", name); return ret; } } /* This section uses xmlGetDocEntity which looks for entities in * memory only, never from a file or URI */ if(xc->myDoc && (xc->myDoc->standalone == 1)) { RAPTOR_DEBUG2("Entity '%s' document is standalone\n", name); /* Document is standalone: no entities are required to interpret doc */ if(xc->inSubset == 2) { xc->myDoc->standalone = 0; ret = xmlGetDocEntity(xc->myDoc, name); xc->myDoc->standalone = 1; } else { ret = xmlGetDocEntity(xc->myDoc, name); if(!ret) { xc->myDoc->standalone = 0; ret = xmlGetDocEntity(xc->myDoc, name); xc->myDoc->standalone = 1; } } } else { ret = xmlGetDocEntity(xc->myDoc, name); } if(ret && !ret->children && (ret->etype == XML_EXTERNAL_GENERAL_PARSED_ENTITY)) { /* Entity is an external general parsed entity. It may be in a * catalog file, user file or user URI */ int val = 0; xmlNodePtr children; int load_entity = 0; load_entity = RAPTOR_OPTIONS_GET_NUMERIC(sax2, RAPTOR_OPTION_LOAD_EXTERNAL_ENTITIES); if(load_entity) load_entity = raptor_sax2_check_load_uri_string(sax2, ret->URI); if(!load_entity) { RAPTOR_DEBUG2("Not getting entity URI %s by policy\n", ret->URI); children = xmlNewText((const xmlChar*)""); } else { /* Disable SAX2 handlers so that the SAX2 events do not all get * sent to callbacks during dealing with the entity parsing. */ sax2->enabled = 0; val = xmlParseCtxtExternalEntity(xc, ret->URI, ret->ExternalID, &children); sax2->enabled = 1; } if(!val) { xmlAddChildList((xmlNodePtr)ret, children); } else { xc->validate = 0; return NULL; } ret->owner = 1; /* Mark this entity as having been checked - never do this again */ if(!ret->checked) ret->checked = 1; } return ret; }
165,658
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static OPJ_BOOL opj_pi_next_pcrl(opj_pi_iterator_t * pi) { opj_pi_comp_t *comp = NULL; opj_pi_resolution_t *res = NULL; OPJ_UINT32 index = 0; if (!pi->first) { comp = &pi->comps[pi->compno]; goto LABEL_SKIP; } else { OPJ_UINT32 compno, resno; pi->first = 0; pi->dx = 0; pi->dy = 0; for (compno = 0; compno < pi->numcomps; compno++) { comp = &pi->comps[compno]; for (resno = 0; resno < comp->numresolutions; resno++) { OPJ_UINT32 dx, dy; res = &comp->resolutions[resno]; dx = comp->dx * (1u << (res->pdx + comp->numresolutions - 1 - resno)); dy = comp->dy * (1u << (res->pdy + comp->numresolutions - 1 - resno)); pi->dx = !pi->dx ? dx : opj_uint_min(pi->dx, dx); pi->dy = !pi->dy ? dy : opj_uint_min(pi->dy, dy); } } } if (!pi->tp_on) { pi->poc.ty0 = pi->ty0; pi->poc.tx0 = pi->tx0; pi->poc.ty1 = pi->ty1; pi->poc.tx1 = pi->tx1; } for (pi->y = pi->poc.ty0; pi->y < pi->poc.ty1; pi->y += (OPJ_INT32)(pi->dy - (OPJ_UINT32)(pi->y % (OPJ_INT32)pi->dy))) { for (pi->x = pi->poc.tx0; pi->x < pi->poc.tx1; pi->x += (OPJ_INT32)(pi->dx - (OPJ_UINT32)(pi->x % (OPJ_INT32)pi->dx))) { for (pi->compno = pi->poc.compno0; pi->compno < pi->poc.compno1; pi->compno++) { comp = &pi->comps[pi->compno]; for (pi->resno = pi->poc.resno0; pi->resno < opj_uint_min(pi->poc.resno1, comp->numresolutions); pi->resno++) { OPJ_UINT32 levelno; OPJ_INT32 trx0, try0; OPJ_INT32 trx1, try1; OPJ_UINT32 rpx, rpy; OPJ_INT32 prci, prcj; res = &comp->resolutions[pi->resno]; levelno = comp->numresolutions - 1 - pi->resno; trx0 = opj_int_ceildiv(pi->tx0, (OPJ_INT32)(comp->dx << levelno)); try0 = opj_int_ceildiv(pi->ty0, (OPJ_INT32)(comp->dy << levelno)); trx1 = opj_int_ceildiv(pi->tx1, (OPJ_INT32)(comp->dx << levelno)); try1 = opj_int_ceildiv(pi->ty1, (OPJ_INT32)(comp->dy << levelno)); rpx = res->pdx + levelno; rpy = res->pdy + levelno; if (!((pi->y % (OPJ_INT32)(comp->dy << rpy) == 0) || ((pi->y == pi->ty0) && ((try0 << levelno) % (1 << rpy))))) { continue; } if (!((pi->x % (OPJ_INT32)(comp->dx << rpx) == 0) || ((pi->x == pi->tx0) && ((trx0 << levelno) % (1 << rpx))))) { continue; } if ((res->pw == 0) || (res->ph == 0)) { continue; } if ((trx0 == trx1) || (try0 == try1)) { continue; } prci = opj_int_floordivpow2(opj_int_ceildiv(pi->x, (OPJ_INT32)(comp->dx << levelno)), (OPJ_INT32)res->pdx) - opj_int_floordivpow2(trx0, (OPJ_INT32)res->pdx); prcj = opj_int_floordivpow2(opj_int_ceildiv(pi->y, (OPJ_INT32)(comp->dy << levelno)), (OPJ_INT32)res->pdy) - opj_int_floordivpow2(try0, (OPJ_INT32)res->pdy); pi->precno = (OPJ_UINT32)(prci + prcj * (OPJ_INT32)res->pw); for (pi->layno = pi->poc.layno0; pi->layno < pi->poc.layno1; pi->layno++) { index = pi->layno * pi->step_l + pi->resno * pi->step_r + pi->compno * pi->step_c + pi->precno * pi->step_p; if (!pi->include[index]) { pi->include[index] = 1; return OPJ_TRUE; } LABEL_SKIP: ; } } } } } return OPJ_FALSE; } Commit Message: Avoid division by zero in opj_pi_next_rpcl, opj_pi_next_pcrl and opj_pi_next_cprl (#938) Fixes issues with id:000026,sig:08,src:002419,op:int32,pos:60,val:+32 and id:000019,sig:08,src:001098,op:flip1,pos:49 CWE ID: CWE-369
static OPJ_BOOL opj_pi_next_pcrl(opj_pi_iterator_t * pi) { opj_pi_comp_t *comp = NULL; opj_pi_resolution_t *res = NULL; OPJ_UINT32 index = 0; if (!pi->first) { comp = &pi->comps[pi->compno]; goto LABEL_SKIP; } else { OPJ_UINT32 compno, resno; pi->first = 0; pi->dx = 0; pi->dy = 0; for (compno = 0; compno < pi->numcomps; compno++) { comp = &pi->comps[compno]; for (resno = 0; resno < comp->numresolutions; resno++) { OPJ_UINT32 dx, dy; res = &comp->resolutions[resno]; dx = comp->dx * (1u << (res->pdx + comp->numresolutions - 1 - resno)); dy = comp->dy * (1u << (res->pdy + comp->numresolutions - 1 - resno)); pi->dx = !pi->dx ? dx : opj_uint_min(pi->dx, dx); pi->dy = !pi->dy ? dy : opj_uint_min(pi->dy, dy); } } } if (!pi->tp_on) { pi->poc.ty0 = pi->ty0; pi->poc.tx0 = pi->tx0; pi->poc.ty1 = pi->ty1; pi->poc.tx1 = pi->tx1; } for (pi->y = pi->poc.ty0; pi->y < pi->poc.ty1; pi->y += (OPJ_INT32)(pi->dy - (OPJ_UINT32)(pi->y % (OPJ_INT32)pi->dy))) { for (pi->x = pi->poc.tx0; pi->x < pi->poc.tx1; pi->x += (OPJ_INT32)(pi->dx - (OPJ_UINT32)(pi->x % (OPJ_INT32)pi->dx))) { for (pi->compno = pi->poc.compno0; pi->compno < pi->poc.compno1; pi->compno++) { comp = &pi->comps[pi->compno]; for (pi->resno = pi->poc.resno0; pi->resno < opj_uint_min(pi->poc.resno1, comp->numresolutions); pi->resno++) { OPJ_UINT32 levelno; OPJ_INT32 trx0, try0; OPJ_INT32 trx1, try1; OPJ_UINT32 rpx, rpy; OPJ_INT32 prci, prcj; res = &comp->resolutions[pi->resno]; levelno = comp->numresolutions - 1 - pi->resno; trx0 = opj_int_ceildiv(pi->tx0, (OPJ_INT32)(comp->dx << levelno)); try0 = opj_int_ceildiv(pi->ty0, (OPJ_INT32)(comp->dy << levelno)); trx1 = opj_int_ceildiv(pi->tx1, (OPJ_INT32)(comp->dx << levelno)); try1 = opj_int_ceildiv(pi->ty1, (OPJ_INT32)(comp->dy << levelno)); rpx = res->pdx + levelno; rpy = res->pdy + levelno; /* To avoid divisions by zero / undefined behaviour on shift */ /* in below tests */ /* Relates to id:000019,sig:08,src:001098,op:flip1,pos:49 */ /* of https://github.com/uclouvain/openjpeg/issues/938 */ if (rpx >= 31 || ((comp->dx << rpx) >> rpx) != comp->dx || rpy >= 31 || ((comp->dy << rpy) >> rpy) != comp->dy) { continue; } /* See ISO-15441. B.12.1.4 Position-component-resolution level-layer progression */ if (!((pi->y % (OPJ_INT32)(comp->dy << rpy) == 0) || ((pi->y == pi->ty0) && ((try0 << levelno) % (1 << rpy))))) { continue; } if (!((pi->x % (OPJ_INT32)(comp->dx << rpx) == 0) || ((pi->x == pi->tx0) && ((trx0 << levelno) % (1 << rpx))))) { continue; } if ((res->pw == 0) || (res->ph == 0)) { continue; } if ((trx0 == trx1) || (try0 == try1)) { continue; } prci = opj_int_floordivpow2(opj_int_ceildiv(pi->x, (OPJ_INT32)(comp->dx << levelno)), (OPJ_INT32)res->pdx) - opj_int_floordivpow2(trx0, (OPJ_INT32)res->pdx); prcj = opj_int_floordivpow2(opj_int_ceildiv(pi->y, (OPJ_INT32)(comp->dy << levelno)), (OPJ_INT32)res->pdy) - opj_int_floordivpow2(try0, (OPJ_INT32)res->pdy); pi->precno = (OPJ_UINT32)(prci + prcj * (OPJ_INT32)res->pw); for (pi->layno = pi->poc.layno0; pi->layno < pi->poc.layno1; pi->layno++) { index = pi->layno * pi->step_l + pi->resno * pi->step_r + pi->compno * pi->step_c + pi->precno * pi->step_p; if (!pi->include[index]) { pi->include[index] = 1; return OPJ_TRUE; } LABEL_SKIP: ; } } } } } return OPJ_FALSE; }
168,456
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int touch_file(const char *path, bool parents, usec_t stamp, uid_t uid, gid_t gid, mode_t mode) { _cleanup_close_ int fd; int r; assert(path); if (parents) mkdir_parents(path, 0755); fd = open(path, O_WRONLY|O_CREAT|O_CLOEXEC|O_NOCTTY, mode > 0 ? mode : 0644); if (fd < 0) return -errno; if (mode > 0) { r = fchmod(fd, mode); if (r < 0) return -errno; } if (uid != UID_INVALID || gid != GID_INVALID) { r = fchown(fd, uid, gid); if (r < 0) return -errno; } if (stamp != USEC_INFINITY) { struct timespec ts[2]; timespec_store(&ts[0], stamp); ts[1] = ts[0]; r = futimens(fd, ts); } else r = futimens(fd, NULL); if (r < 0) return -errno; return 0; } Commit Message: util-lib: use MODE_INVALID as invalid value for mode_t everywhere CWE ID: CWE-264
int touch_file(const char *path, bool parents, usec_t stamp, uid_t uid, gid_t gid, mode_t mode) { _cleanup_close_ int fd; int r; assert(path); if (parents) mkdir_parents(path, 0755); fd = open(path, O_WRONLY|O_CREAT|O_CLOEXEC|O_NOCTTY, mode > 0 ? mode : 0644); if (fd < 0) return -errno; if (mode != MODE_INVALID) { r = fchmod(fd, mode); if (r < 0) return -errno; } if (uid != UID_INVALID || gid != GID_INVALID) { r = fchown(fd, uid, gid); if (r < 0) return -errno; } if (stamp != USEC_INFINITY) { struct timespec ts[2]; timespec_store(&ts[0], stamp); ts[1] = ts[0]; r = futimens(fd, ts); } else r = futimens(fd, NULL); if (r < 0) return -errno; return 0; }
170,105
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: allocate(struct file *file, int allocate_idat) { struct control *control = png_voidcast(struct control*, file->alloc_ptr); if (allocate_idat) { assert(file->idat == NULL); IDAT_init(&control->idat, file); } else /* chunk */ { assert(file->chunk == NULL); chunk_init(&control->chunk, file); } } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
allocate(struct file *file, int allocate_idat) { struct control *control = voidcast(struct control*, file->alloc_ptr); if (allocate_idat) { assert(file->idat == NULL); IDAT_init(&control->idat, file); } else /* chunk */ { assert(file->chunk == NULL); chunk_init(&control->chunk, file); } }
173,729
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int mailimf_group_parse(const char * message, size_t length, size_t * indx, struct mailimf_group ** result) { size_t cur_token; char * display_name; struct mailimf_mailbox_list * mailbox_list; struct mailimf_group * group; int r; int res; cur_token = * indx; mailbox_list = NULL; r = mailimf_display_name_parse(message, length, &cur_token, &display_name); if (r != MAILIMF_NO_ERROR) { res = r; goto err; } r = mailimf_colon_parse(message, length, &cur_token); if (r != MAILIMF_NO_ERROR) { res = r; goto free_display_name; } r = mailimf_mailbox_list_parse(message, length, &cur_token, &mailbox_list); switch (r) { case MAILIMF_NO_ERROR: break; case MAILIMF_ERROR_PARSE: r = mailimf_cfws_parse(message, length, &cur_token); if ((r != MAILIMF_NO_ERROR) && (r != MAILIMF_ERROR_PARSE)) { res = r; goto free_display_name; } break; default: res = r; goto free_display_name; } r = mailimf_semi_colon_parse(message, length, &cur_token); if (r != MAILIMF_NO_ERROR) { res = r; goto free_mailbox_list; } group = mailimf_group_new(display_name, mailbox_list); if (group == NULL) { res = MAILIMF_ERROR_MEMORY; goto free_mailbox_list; } * indx = cur_token; * result = group; return MAILIMF_NO_ERROR; free_mailbox_list: if (mailbox_list != NULL) { mailimf_mailbox_list_free(mailbox_list); } free_display_name: mailimf_display_name_free(display_name); err: return res; } Commit Message: Fixed crash #274 CWE ID: CWE-476
static int mailimf_group_parse(const char * message, size_t length, size_t * indx, struct mailimf_group ** result) { size_t cur_token; char * display_name; struct mailimf_mailbox_list * mailbox_list; struct mailimf_group * group; int r; int res; clist * list; cur_token = * indx; mailbox_list = NULL; r = mailimf_display_name_parse(message, length, &cur_token, &display_name); if (r != MAILIMF_NO_ERROR) { res = r; goto err; } r = mailimf_colon_parse(message, length, &cur_token); if (r != MAILIMF_NO_ERROR) { res = r; goto free_display_name; } r = mailimf_mailbox_list_parse(message, length, &cur_token, &mailbox_list); switch (r) { case MAILIMF_NO_ERROR: break; case MAILIMF_ERROR_PARSE: r = mailimf_cfws_parse(message, length, &cur_token); if ((r != MAILIMF_NO_ERROR) && (r != MAILIMF_ERROR_PARSE)) { res = r; goto free_display_name; } list = clist_new(); if (list == NULL) { res = MAILIMF_ERROR_MEMORY; goto free_display_name; } mailbox_list = mailimf_mailbox_list_new(list); if (mailbox_list == NULL) { res = MAILIMF_ERROR_MEMORY; clist_free(list); goto free_display_name; } break; default: res = r; goto free_display_name; } r = mailimf_semi_colon_parse(message, length, &cur_token); if (r != MAILIMF_NO_ERROR) { res = r; goto free_mailbox_list; } group = mailimf_group_new(display_name, mailbox_list); if (group == NULL) { res = MAILIMF_ERROR_MEMORY; goto free_mailbox_list; } * indx = cur_token; * result = group; return MAILIMF_NO_ERROR; free_mailbox_list: if (mailbox_list != NULL) { mailimf_mailbox_list_free(mailbox_list); } free_display_name: mailimf_display_name_free(display_name); err: return res; }
168,192
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: cmsNAMEDCOLORLIST* CMSEXPORT cmsAllocNamedColorList(cmsContext ContextID, cmsUInt32Number n, cmsUInt32Number ColorantCount, const char* Prefix, const char* Suffix) { cmsNAMEDCOLORLIST* v = (cmsNAMEDCOLORLIST*) _cmsMallocZero(ContextID, sizeof(cmsNAMEDCOLORLIST)); if (v == NULL) return NULL; v ->List = NULL; v ->nColors = 0; v ->ContextID = ContextID; while (v -> Allocated < n) GrowNamedColorList(v); strncpy(v ->Prefix, Prefix, sizeof(v ->Prefix)); strncpy(v ->Suffix, Suffix, sizeof(v ->Suffix)); v->Prefix[32] = v->Suffix[32] = 0; v -> ColorantCount = ColorantCount; return v; } Commit Message: Non happy-path fixes CWE ID:
cmsNAMEDCOLORLIST* CMSEXPORT cmsAllocNamedColorList(cmsContext ContextID, cmsUInt32Number n, cmsUInt32Number ColorantCount, const char* Prefix, const char* Suffix) { cmsNAMEDCOLORLIST* v = (cmsNAMEDCOLORLIST*) _cmsMallocZero(ContextID, sizeof(cmsNAMEDCOLORLIST)); if (v == NULL) return NULL; v ->List = NULL; v ->nColors = 0; v ->ContextID = ContextID; while (v -> Allocated < n) GrowNamedColorList(v); strncpy(v ->Prefix, Prefix, sizeof(v ->Prefix)-1); strncpy(v ->Suffix, Suffix, sizeof(v ->Suffix)-1); v->Prefix[32] = v->Suffix[32] = 0; v -> ColorantCount = ColorantCount; return v; }
166,541
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: xfs_attr_node_addname(xfs_da_args_t *args) { xfs_da_state_t *state; xfs_da_state_blk_t *blk; xfs_inode_t *dp; xfs_mount_t *mp; int committed, retval, error; trace_xfs_attr_node_addname(args); /* * Fill in bucket of arguments/results/context to carry around. */ dp = args->dp; mp = dp->i_mount; restart: state = xfs_da_state_alloc(); state->args = args; state->mp = mp; state->blocksize = state->mp->m_sb.sb_blocksize; state->node_ents = state->mp->m_attr_node_ents; /* * Search to see if name already exists, and get back a pointer * to where it should go. */ error = xfs_da3_node_lookup_int(state, &retval); if (error) goto out; blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { goto out; } else if (retval == EEXIST) { if (args->flags & ATTR_CREATE) goto out; trace_xfs_attr_node_replace(args); args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */ args->blkno2 = args->blkno; /* set 2nd entry info*/ args->index2 = args->index; args->rmtblkno2 = args->rmtblkno; args->rmtblkcnt2 = args->rmtblkcnt; args->rmtblkno = 0; args->rmtblkcnt = 0; } retval = xfs_attr3_leaf_add(blk->bp, state->args); if (retval == ENOSPC) { if (state->path.active == 1) { /* * Its really a single leaf node, but it had * out-of-line values so it looked like it *might* * have been a b-tree. */ xfs_da_state_free(state); state = NULL; xfs_bmap_init(args->flist, args->firstblock); error = xfs_attr3_leaf_to_node(args); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans * and started a new one. We need the inode to be * in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); /* * Commit the node conversion and start the next * trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; goto restart; } /* * Split as many Btree elements as required. * This code tracks the new and old attr's location * in the index/blkno/rmtblkno/rmtblkcnt fields and * in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields. */ xfs_bmap_init(args->flist, args->firstblock); error = xfs_da3_split(state); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans and started * a new one. We need the inode to be in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); } else { /* * Addition succeeded, update Btree hashvals. */ xfs_da3_fixhashpath(state, &state->path); } /* * Kill the state structure, we're done with it and need to * allow the buffers to come back later. */ xfs_da_state_free(state); state = NULL; /* * Commit the leaf addition or btree split and start the next * trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; /* * If there was an out-of-line value, allocate the blocks we * identified for its storage and copy the value. This is done * after we create the attribute so that we don't overflow the * maximum size of a transaction and/or hit a deadlock. */ if (args->rmtblkno > 0) { error = xfs_attr_rmtval_set(args); if (error) return(error); } /* * If this is an atomic rename operation, we must "flip" the * incomplete flags on the "new" and "old" attribute/value pairs * so that one disappears and one appears atomically. Then we * must remove the "old" attribute/value pair. */ if (args->op_flags & XFS_DA_OP_RENAME) { /* * In a separate transaction, set the incomplete flag on the * "old" attr and clear the incomplete flag on the "new" attr. */ error = xfs_attr3_leaf_flipflags(args); if (error) goto out; /* * Dismantle the "old" attribute/value pair by removing * a "remote" value (if it exists). */ args->index = args->index2; args->blkno = args->blkno2; args->rmtblkno = args->rmtblkno2; args->rmtblkcnt = args->rmtblkcnt2; if (args->rmtblkno) { error = xfs_attr_rmtval_remove(args); if (error) return(error); } /* * Re-find the "old" attribute entry after any split ops. * The INCOMPLETE flag means that we will find the "old" * attr, not the "new" one. */ args->flags |= XFS_ATTR_INCOMPLETE; state = xfs_da_state_alloc(); state->args = args; state->mp = mp; state->blocksize = state->mp->m_sb.sb_blocksize; state->node_ents = state->mp->m_attr_node_ents; state->inleaf = 0; error = xfs_da3_node_lookup_int(state, &retval); if (error) goto out; /* * Remove the name and update the hashvals in the tree. */ blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); error = xfs_attr3_leaf_remove(blk->bp, args); xfs_da3_fixhashpath(state, &state->path); /* * Check to see if the tree needs to be collapsed. */ if (retval && (state->path.active > 1)) { xfs_bmap_init(args->flist, args->firstblock); error = xfs_da3_join(state); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans * and started a new one. We need the inode to be * in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); } /* * Commit and start the next trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; } else if (args->rmtblkno > 0) { /* * Added a "remote" value, just clear the incomplete flag. */ error = xfs_attr3_leaf_clearflag(args); if (error) goto out; } retval = error = 0; out: if (state) xfs_da_state_free(state); if (error) return(error); return(retval); } Commit Message: xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <[email protected]> Reviewed-by: Brian Foster <[email protected]> Signed-off-by: Dave Chinner <[email protected]> CWE ID: CWE-19
xfs_attr_node_addname(xfs_da_args_t *args) { xfs_da_state_t *state; xfs_da_state_blk_t *blk; xfs_inode_t *dp; xfs_mount_t *mp; int committed, retval, error; trace_xfs_attr_node_addname(args); /* * Fill in bucket of arguments/results/context to carry around. */ dp = args->dp; mp = dp->i_mount; restart: state = xfs_da_state_alloc(); state->args = args; state->mp = mp; state->blocksize = state->mp->m_sb.sb_blocksize; state->node_ents = state->mp->m_attr_node_ents; /* * Search to see if name already exists, and get back a pointer * to where it should go. */ error = xfs_da3_node_lookup_int(state, &retval); if (error) goto out; blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { goto out; } else if (retval == EEXIST) { if (args->flags & ATTR_CREATE) goto out; trace_xfs_attr_node_replace(args); /* save the attribute state for later removal*/ args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */ args->blkno2 = args->blkno; /* set 2nd entry info*/ args->index2 = args->index; args->rmtblkno2 = args->rmtblkno; args->rmtblkcnt2 = args->rmtblkcnt; args->rmtvaluelen2 = args->rmtvaluelen; /* * clear the remote attr state now that it is saved so that the * values reflect the state of the attribute we are about to * add, not the attribute we just found and will remove later. */ args->rmtblkno = 0; args->rmtblkcnt = 0; args->rmtvaluelen = 0; } retval = xfs_attr3_leaf_add(blk->bp, state->args); if (retval == ENOSPC) { if (state->path.active == 1) { /* * Its really a single leaf node, but it had * out-of-line values so it looked like it *might* * have been a b-tree. */ xfs_da_state_free(state); state = NULL; xfs_bmap_init(args->flist, args->firstblock); error = xfs_attr3_leaf_to_node(args); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans * and started a new one. We need the inode to be * in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); /* * Commit the node conversion and start the next * trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; goto restart; } /* * Split as many Btree elements as required. * This code tracks the new and old attr's location * in the index/blkno/rmtblkno/rmtblkcnt fields and * in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields. */ xfs_bmap_init(args->flist, args->firstblock); error = xfs_da3_split(state); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans and started * a new one. We need the inode to be in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); } else { /* * Addition succeeded, update Btree hashvals. */ xfs_da3_fixhashpath(state, &state->path); } /* * Kill the state structure, we're done with it and need to * allow the buffers to come back later. */ xfs_da_state_free(state); state = NULL; /* * Commit the leaf addition or btree split and start the next * trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; /* * If there was an out-of-line value, allocate the blocks we * identified for its storage and copy the value. This is done * after we create the attribute so that we don't overflow the * maximum size of a transaction and/or hit a deadlock. */ if (args->rmtblkno > 0) { error = xfs_attr_rmtval_set(args); if (error) return(error); } /* * If this is an atomic rename operation, we must "flip" the * incomplete flags on the "new" and "old" attribute/value pairs * so that one disappears and one appears atomically. Then we * must remove the "old" attribute/value pair. */ if (args->op_flags & XFS_DA_OP_RENAME) { /* * In a separate transaction, set the incomplete flag on the * "old" attr and clear the incomplete flag on the "new" attr. */ error = xfs_attr3_leaf_flipflags(args); if (error) goto out; /* * Dismantle the "old" attribute/value pair by removing * a "remote" value (if it exists). */ args->index = args->index2; args->blkno = args->blkno2; args->rmtblkno = args->rmtblkno2; args->rmtblkcnt = args->rmtblkcnt2; args->rmtvaluelen = args->rmtvaluelen2; if (args->rmtblkno) { error = xfs_attr_rmtval_remove(args); if (error) return(error); } /* * Re-find the "old" attribute entry after any split ops. * The INCOMPLETE flag means that we will find the "old" * attr, not the "new" one. */ args->flags |= XFS_ATTR_INCOMPLETE; state = xfs_da_state_alloc(); state->args = args; state->mp = mp; state->blocksize = state->mp->m_sb.sb_blocksize; state->node_ents = state->mp->m_attr_node_ents; state->inleaf = 0; error = xfs_da3_node_lookup_int(state, &retval); if (error) goto out; /* * Remove the name and update the hashvals in the tree. */ blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); error = xfs_attr3_leaf_remove(blk->bp, args); xfs_da3_fixhashpath(state, &state->path); /* * Check to see if the tree needs to be collapsed. */ if (retval && (state->path.active > 1)) { xfs_bmap_init(args->flist, args->firstblock); error = xfs_da3_join(state); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans * and started a new one. We need the inode to be * in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); } /* * Commit and start the next trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; } else if (args->rmtblkno > 0) { /* * Added a "remote" value, just clear the incomplete flag. */ error = xfs_attr3_leaf_clearflag(args); if (error) goto out; } retval = error = 0; out: if (state) xfs_da_state_free(state); if (error) return(error); return(retval); }
166,732
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ExtensionApiTest::SetUpOnMainThread() { ExtensionBrowserTest::SetUpOnMainThread(); DCHECK(!test_config_.get()) << "Previous test did not clear config state."; test_config_.reset(new base::DictionaryValue()); test_config_->SetString(kTestDataDirectory, net::FilePathToFileURL(test_data_dir_).spec()); test_config_->SetBoolean(kBrowserSideNavigationEnabled, content::IsBrowserSideNavigationEnabled()); extensions::TestGetConfigFunction::set_test_config_state( test_config_.get()); } Commit Message: Hide DevTools frontend from webRequest API Prevent extensions from observing requests for remote DevTools frontends and add regression tests. And update ExtensionTestApi to support initializing the embedded test server and port from SetUpCommandLine (before SetUpOnMainThread). BUG=797497,797500 TEST=browser_test --gtest_filter=DevToolsFrontendInWebRequestApiTest.HiddenRequests Cq-Include-Trybots: master.tryserver.chromium.linux:linux_mojo Change-Id: Ic8f44b5771f2d5796f8c3de128f0a7ab88a77735 Reviewed-on: https://chromium-review.googlesource.com/844316 Commit-Queue: Rob Wu <[email protected]> Reviewed-by: Devlin <[email protected]> Reviewed-by: Dmitry Gozman <[email protected]> Cr-Commit-Position: refs/heads/master@{#528187} CWE ID: CWE-200
void ExtensionApiTest::SetUpOnMainThread() { ExtensionBrowserTest::SetUpOnMainThread(); DCHECK(!test_config_.get()) << "Previous test did not clear config state."; test_config_.reset(new base::DictionaryValue()); test_config_->SetString(kTestDataDirectory, net::FilePathToFileURL(test_data_dir_).spec()); test_config_->SetBoolean(kBrowserSideNavigationEnabled, content::IsBrowserSideNavigationEnabled()); if (embedded_test_server()->Started()) { // InitializeEmbeddedTestServer was called before |test_config_| was set. // Set the missing port key. test_config_->SetInteger(kEmbeddedTestServerPort, embedded_test_server()->port()); } extensions::TestGetConfigFunction::set_test_config_state( test_config_.get()); }
172,670
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: omx_vdec::omx_vdec(): m_error_propogated(false), m_state(OMX_StateInvalid), m_app_data(NULL), m_inp_mem_ptr(NULL), m_out_mem_ptr(NULL), input_flush_progress (false), output_flush_progress (false), input_use_buffer (false), output_use_buffer (false), ouput_egl_buffers(false), m_use_output_pmem(OMX_FALSE), m_out_mem_region_smi(OMX_FALSE), m_out_pvt_entry_pmem(OMX_FALSE), pending_input_buffers(0), pending_output_buffers(0), m_out_bm_count(0), m_inp_bm_count(0), m_inp_bPopulated(OMX_FALSE), m_out_bPopulated(OMX_FALSE), m_flags(0), #ifdef _ANDROID_ m_heap_ptr(NULL), #endif m_inp_bEnabled(OMX_TRUE), m_out_bEnabled(OMX_TRUE), m_in_alloc_cnt(0), m_platform_list(NULL), m_platform_entry(NULL), m_pmem_info(NULL), h264_parser(NULL), arbitrary_bytes (true), psource_frame (NULL), pdest_frame (NULL), m_inp_heap_ptr (NULL), m_phdr_pmem_ptr(NULL), m_heap_inp_bm_count (0), codec_type_parse ((codec_type)0), first_frame_meta (true), frame_count (0), nal_count (0), nal_length(0), look_ahead_nal (false), first_frame(0), first_buffer(NULL), first_frame_size (0), m_device_file_ptr(NULL), m_vc1_profile((vc1_profile_type)0), h264_last_au_ts(LLONG_MAX), h264_last_au_flags(0), m_disp_hor_size(0), m_disp_vert_size(0), prev_ts(LLONG_MAX), rst_prev_ts(true), frm_int(0), in_reconfig(false), m_display_id(NULL), client_extradata(0), m_reject_avc_1080p_mp (0), #ifdef _ANDROID_ m_enable_android_native_buffers(OMX_FALSE), m_use_android_native_buffers(OMX_FALSE), iDivXDrmDecrypt(NULL), #endif m_desc_buffer_ptr(NULL), secure_mode(false), m_other_extradata(NULL), m_profile(0), client_set_fps(false), m_last_rendered_TS(-1), m_queued_codec_config_count(0), secure_scaling_to_non_secure_opb(false) { /* Assumption is that , to begin with , we have all the frames with decoder */ DEBUG_PRINT_HIGH("In %u bit OMX vdec Constructor", (unsigned int)sizeof(long) * 8); memset(&m_debug,0,sizeof(m_debug)); #ifdef _ANDROID_ char property_value[PROPERTY_VALUE_MAX] = {0}; property_get("vidc.debug.level", property_value, "1"); debug_level = atoi(property_value); property_value[0] = '\0'; DEBUG_PRINT_HIGH("In OMX vdec Constructor"); property_get("vidc.dec.debug.perf", property_value, "0"); perf_flag = atoi(property_value); if (perf_flag) { DEBUG_PRINT_HIGH("vidc.dec.debug.perf is %d", perf_flag); dec_time.start(); proc_frms = latency = 0; } prev_n_filled_len = 0; property_value[0] = '\0'; property_get("vidc.dec.debug.ts", property_value, "0"); m_debug_timestamp = atoi(property_value); DEBUG_PRINT_HIGH("vidc.dec.debug.ts value is %d",m_debug_timestamp); if (m_debug_timestamp) { time_stamp_dts.set_timestamp_reorder_mode(true); time_stamp_dts.enable_debug_print(true); } property_value[0] = '\0'; property_get("vidc.dec.debug.concealedmb", property_value, "0"); m_debug_concealedmb = atoi(property_value); DEBUG_PRINT_HIGH("vidc.dec.debug.concealedmb value is %d",m_debug_concealedmb); property_value[0] = '\0'; property_get("vidc.dec.profile.check", property_value, "0"); m_reject_avc_1080p_mp = atoi(property_value); DEBUG_PRINT_HIGH("vidc.dec.profile.check value is %d",m_reject_avc_1080p_mp); property_value[0] = '\0'; property_get("vidc.dec.log.in", property_value, "0"); m_debug.in_buffer_log = atoi(property_value); property_value[0] = '\0'; property_get("vidc.dec.log.out", property_value, "0"); m_debug.out_buffer_log = atoi(property_value); sprintf(m_debug.log_loc, "%s", BUFFER_LOG_LOC); property_value[0] = '\0'; property_get("vidc.log.loc", property_value, ""); if (*property_value) strlcpy(m_debug.log_loc, property_value, PROPERTY_VALUE_MAX); property_value[0] = '\0'; property_get("vidc.dec.120fps.enabled", property_value, "0"); if(atoi(property_value)) { DEBUG_PRINT_LOW("feature 120 FPS decode enabled"); m_last_rendered_TS = 0; } property_value[0] = '\0'; property_get("vidc.dec.debug.dyn.disabled", property_value, "0"); m_disable_dynamic_buf_mode = atoi(property_value); DEBUG_PRINT_HIGH("vidc.dec.debug.dyn.disabled value is %d",m_disable_dynamic_buf_mode); #endif memset(&m_cmp,0,sizeof(m_cmp)); memset(&m_cb,0,sizeof(m_cb)); memset (&drv_ctx,0,sizeof(drv_ctx)); memset (&h264_scratch,0,sizeof (OMX_BUFFERHEADERTYPE)); memset (m_hwdevice_name,0,sizeof(m_hwdevice_name)); memset(m_demux_offsets, 0, ( sizeof(OMX_U32) * 8192) ); memset(&m_custom_buffersize, 0, sizeof(m_custom_buffersize)); m_demux_entries = 0; msg_thread_id = 0; async_thread_id = 0; msg_thread_created = false; async_thread_created = false; #ifdef _ANDROID_ICS_ memset(&native_buffer, 0 ,(sizeof(struct nativebuffer) * MAX_NUM_INPUT_OUTPUT_BUFFERS)); #endif memset(&drv_ctx.extradata_info, 0, sizeof(drv_ctx.extradata_info)); /* invalidate m_frame_pack_arrangement */ memset(&m_frame_pack_arrangement, 0, sizeof(OMX_QCOM_FRAME_PACK_ARRANGEMENT)); m_frame_pack_arrangement.cancel_flag = 1; drv_ctx.timestamp_adjust = false; drv_ctx.video_driver_fd = -1; m_vendor_config.pData = NULL; pthread_mutex_init(&m_lock, NULL); pthread_mutex_init(&c_lock, NULL); sem_init(&m_cmd_lock,0,0); sem_init(&m_safe_flush, 0, 0); streaming[CAPTURE_PORT] = streaming[OUTPUT_PORT] = false; #ifdef _ANDROID_ char extradata_value[PROPERTY_VALUE_MAX] = {0}; property_get("vidc.dec.debug.extradata", extradata_value, "0"); m_debug_extradata = atoi(extradata_value); DEBUG_PRINT_HIGH("vidc.dec.debug.extradata value is %d",m_debug_extradata); #endif m_fill_output_msg = OMX_COMPONENT_GENERATE_FTB; client_buffers.set_vdec_client(this); dynamic_buf_mode = false; out_dynamic_list = NULL; is_down_scalar_enabled = false; m_smoothstreaming_mode = false; m_smoothstreaming_width = 0; m_smoothstreaming_height = 0; is_q6_platform = false; } Commit Message: DO NOT MERGE mm-video-v4l2: vdec: Avoid processing ETBs/FTBs in invalid states (per the spec) ETB/FTB should not be handled in states other than Executing, Paused and Idle. This avoids accessing invalid buffers. Also add a lock to protect the private-buffers from being deleted while accessing from another thread. Bug: 27890802 Security Vulnerability - Heap Use-After-Free and Possible LPE in MediaServer (libOmxVdec problem #6) CRs-Fixed: 1008882 Change-Id: Iaac2e383cd53cf9cf8042c9ed93ddc76dba3907e CWE ID:
omx_vdec::omx_vdec(): m_error_propogated(false), m_state(OMX_StateInvalid), m_app_data(NULL), m_inp_mem_ptr(NULL), m_out_mem_ptr(NULL), input_flush_progress (false), output_flush_progress (false), input_use_buffer (false), output_use_buffer (false), ouput_egl_buffers(false), m_use_output_pmem(OMX_FALSE), m_out_mem_region_smi(OMX_FALSE), m_out_pvt_entry_pmem(OMX_FALSE), pending_input_buffers(0), pending_output_buffers(0), m_out_bm_count(0), m_inp_bm_count(0), m_inp_bPopulated(OMX_FALSE), m_out_bPopulated(OMX_FALSE), m_flags(0), #ifdef _ANDROID_ m_heap_ptr(NULL), #endif m_inp_bEnabled(OMX_TRUE), m_out_bEnabled(OMX_TRUE), m_in_alloc_cnt(0), m_platform_list(NULL), m_platform_entry(NULL), m_pmem_info(NULL), h264_parser(NULL), arbitrary_bytes (true), psource_frame (NULL), pdest_frame (NULL), m_inp_heap_ptr (NULL), m_phdr_pmem_ptr(NULL), m_heap_inp_bm_count (0), codec_type_parse ((codec_type)0), first_frame_meta (true), frame_count (0), nal_count (0), nal_length(0), look_ahead_nal (false), first_frame(0), first_buffer(NULL), first_frame_size (0), m_device_file_ptr(NULL), m_vc1_profile((vc1_profile_type)0), h264_last_au_ts(LLONG_MAX), h264_last_au_flags(0), m_disp_hor_size(0), m_disp_vert_size(0), prev_ts(LLONG_MAX), rst_prev_ts(true), frm_int(0), in_reconfig(false), m_display_id(NULL), client_extradata(0), m_reject_avc_1080p_mp (0), #ifdef _ANDROID_ m_enable_android_native_buffers(OMX_FALSE), m_use_android_native_buffers(OMX_FALSE), iDivXDrmDecrypt(NULL), #endif m_desc_buffer_ptr(NULL), secure_mode(false), m_other_extradata(NULL), m_profile(0), client_set_fps(false), m_last_rendered_TS(-1), m_queued_codec_config_count(0), secure_scaling_to_non_secure_opb(false) { /* Assumption is that , to begin with , we have all the frames with decoder */ DEBUG_PRINT_HIGH("In %u bit OMX vdec Constructor", (unsigned int)sizeof(long) * 8); memset(&m_debug,0,sizeof(m_debug)); #ifdef _ANDROID_ char property_value[PROPERTY_VALUE_MAX] = {0}; property_get("vidc.debug.level", property_value, "1"); debug_level = atoi(property_value); property_value[0] = '\0'; DEBUG_PRINT_HIGH("In OMX vdec Constructor"); property_get("vidc.dec.debug.perf", property_value, "0"); perf_flag = atoi(property_value); if (perf_flag) { DEBUG_PRINT_HIGH("vidc.dec.debug.perf is %d", perf_flag); dec_time.start(); proc_frms = latency = 0; } prev_n_filled_len = 0; property_value[0] = '\0'; property_get("vidc.dec.debug.ts", property_value, "0"); m_debug_timestamp = atoi(property_value); DEBUG_PRINT_HIGH("vidc.dec.debug.ts value is %d",m_debug_timestamp); if (m_debug_timestamp) { time_stamp_dts.set_timestamp_reorder_mode(true); time_stamp_dts.enable_debug_print(true); } property_value[0] = '\0'; property_get("vidc.dec.debug.concealedmb", property_value, "0"); m_debug_concealedmb = atoi(property_value); DEBUG_PRINT_HIGH("vidc.dec.debug.concealedmb value is %d",m_debug_concealedmb); property_value[0] = '\0'; property_get("vidc.dec.profile.check", property_value, "0"); m_reject_avc_1080p_mp = atoi(property_value); DEBUG_PRINT_HIGH("vidc.dec.profile.check value is %d",m_reject_avc_1080p_mp); property_value[0] = '\0'; property_get("vidc.dec.log.in", property_value, "0"); m_debug.in_buffer_log = atoi(property_value); property_value[0] = '\0'; property_get("vidc.dec.log.out", property_value, "0"); m_debug.out_buffer_log = atoi(property_value); sprintf(m_debug.log_loc, "%s", BUFFER_LOG_LOC); property_value[0] = '\0'; property_get("vidc.log.loc", property_value, ""); if (*property_value) strlcpy(m_debug.log_loc, property_value, PROPERTY_VALUE_MAX); property_value[0] = '\0'; property_get("vidc.dec.120fps.enabled", property_value, "0"); if(atoi(property_value)) { DEBUG_PRINT_LOW("feature 120 FPS decode enabled"); m_last_rendered_TS = 0; } property_value[0] = '\0'; property_get("vidc.dec.debug.dyn.disabled", property_value, "0"); m_disable_dynamic_buf_mode = atoi(property_value); DEBUG_PRINT_HIGH("vidc.dec.debug.dyn.disabled value is %d",m_disable_dynamic_buf_mode); #endif memset(&m_cmp,0,sizeof(m_cmp)); memset(&m_cb,0,sizeof(m_cb)); memset (&drv_ctx,0,sizeof(drv_ctx)); memset (&h264_scratch,0,sizeof (OMX_BUFFERHEADERTYPE)); memset (m_hwdevice_name,0,sizeof(m_hwdevice_name)); memset(m_demux_offsets, 0, ( sizeof(OMX_U32) * 8192) ); memset(&m_custom_buffersize, 0, sizeof(m_custom_buffersize)); m_demux_entries = 0; msg_thread_id = 0; async_thread_id = 0; msg_thread_created = false; async_thread_created = false; #ifdef _ANDROID_ICS_ memset(&native_buffer, 0 ,(sizeof(struct nativebuffer) * MAX_NUM_INPUT_OUTPUT_BUFFERS)); #endif memset(&drv_ctx.extradata_info, 0, sizeof(drv_ctx.extradata_info)); /* invalidate m_frame_pack_arrangement */ memset(&m_frame_pack_arrangement, 0, sizeof(OMX_QCOM_FRAME_PACK_ARRANGEMENT)); m_frame_pack_arrangement.cancel_flag = 1; drv_ctx.timestamp_adjust = false; drv_ctx.video_driver_fd = -1; m_vendor_config.pData = NULL; pthread_mutex_init(&m_lock, NULL); pthread_mutex_init(&c_lock, NULL); pthread_mutex_init(&buf_lock, NULL); sem_init(&m_cmd_lock,0,0); sem_init(&m_safe_flush, 0, 0); streaming[CAPTURE_PORT] = streaming[OUTPUT_PORT] = false; #ifdef _ANDROID_ char extradata_value[PROPERTY_VALUE_MAX] = {0}; property_get("vidc.dec.debug.extradata", extradata_value, "0"); m_debug_extradata = atoi(extradata_value); DEBUG_PRINT_HIGH("vidc.dec.debug.extradata value is %d",m_debug_extradata); #endif m_fill_output_msg = OMX_COMPONENT_GENERATE_FTB; client_buffers.set_vdec_client(this); dynamic_buf_mode = false; out_dynamic_list = NULL; is_down_scalar_enabled = false; m_smoothstreaming_mode = false; m_smoothstreaming_width = 0; m_smoothstreaming_height = 0; is_q6_platform = false; }
173,753
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) { struct super_block *sb = inode->i_sb; ext4_lblk_t first_block, stop_block; struct address_space *mapping = inode->i_mapping; loff_t first_block_offset, last_block_offset; handle_t *handle; unsigned int credits; int ret = 0; if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; trace_ext4_punch_hole(inode, offset, length, 0); /* * Write out all dirty pages to avoid race conditions * Then release them. */ if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ret = filemap_write_and_wait_range(mapping, offset, offset + length - 1); if (ret) return ret; } mutex_lock(&inode->i_mutex); /* No need to punch hole beyond i_size */ if (offset >= inode->i_size) goto out_mutex; /* * If the hole extends beyond i_size, set the hole * to end after the page that contains i_size */ if (offset + length > inode->i_size) { length = inode->i_size + PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - offset; } if (offset & (sb->s_blocksize - 1) || (offset + length) & (sb->s_blocksize - 1)) { /* * Attach jinode to inode for jbd2 if we do any zeroing of * partial block */ ret = ext4_inode_attach_jinode(inode); if (ret < 0) goto out_mutex; } first_block_offset = round_up(offset, sb->s_blocksize); last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; /* Now release the pages and zero block aligned part of pages*/ if (last_block_offset > first_block_offset) truncate_pagecache_range(inode, first_block_offset, last_block_offset); /* Wait all existing dio workers, newcomers will block on i_mutex */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); else credits = ext4_blocks_for_truncate(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_std_error(sb, ret); goto out_dio; } ret = ext4_zero_partial_blocks(handle, inode, offset, length); if (ret) goto out_stop; first_block = (offset + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); /* If there are no blocks to remove, return now */ if (first_block >= stop_block) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); ret = ext4_es_remove_extent(inode, first_block, stop_block - first_block); if (ret) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ret = ext4_ext_remove_space(inode, first_block, stop_block - 1); else ret = ext4_ind_remove_space(handle, inode, first_block, stop_block); up_write(&EXT4_I(inode)->i_data_sem); if (IS_SYNC(inode)) ext4_handle_sync(handle); /* Now release the pages again to reduce race window */ if (last_block_offset > first_block_offset) truncate_pagecache_range(inode, first_block_offset, last_block_offset); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); out_stop: ext4_journal_stop(handle); out_dio: ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); return ret; } Commit Message: ext4: fix races between page faults and hole punching Currently, page faults and hole punching are completely unsynchronized. This can result in page fault faulting in a page into a range that we are punching after truncate_pagecache_range() has been called and thus we can end up with a page mapped to disk blocks that will be shortly freed. Filesystem corruption will shortly follow. Note that the same race is avoided for truncate by checking page fault offset against i_size but there isn't similar mechanism available for punching holes. Fix the problem by creating new rw semaphore i_mmap_sem in inode and grab it for writing over truncate, hole punching, and other functions removing blocks from extent tree and for read over page faults. We cannot easily use i_data_sem for this since that ranks below transaction start and we need something ranking above it so that it can be held over the whole truncate / hole punching operation. Also remove various workarounds we had in the code to reduce race window when page fault could have created pages with stale mapping information. Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]> CWE ID: CWE-362
int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) { struct super_block *sb = inode->i_sb; ext4_lblk_t first_block, stop_block; struct address_space *mapping = inode->i_mapping; loff_t first_block_offset, last_block_offset; handle_t *handle; unsigned int credits; int ret = 0; if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; trace_ext4_punch_hole(inode, offset, length, 0); /* * Write out all dirty pages to avoid race conditions * Then release them. */ if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ret = filemap_write_and_wait_range(mapping, offset, offset + length - 1); if (ret) return ret; } mutex_lock(&inode->i_mutex); /* No need to punch hole beyond i_size */ if (offset >= inode->i_size) goto out_mutex; /* * If the hole extends beyond i_size, set the hole * to end after the page that contains i_size */ if (offset + length > inode->i_size) { length = inode->i_size + PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - offset; } if (offset & (sb->s_blocksize - 1) || (offset + length) & (sb->s_blocksize - 1)) { /* * Attach jinode to inode for jbd2 if we do any zeroing of * partial block */ ret = ext4_inode_attach_jinode(inode); if (ret < 0) goto out_mutex; } /* Wait all existing dio workers, newcomers will block on i_mutex */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); /* * Prevent page faults from reinstantiating pages we have released from * page cache. */ down_write(&EXT4_I(inode)->i_mmap_sem); first_block_offset = round_up(offset, sb->s_blocksize); last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; /* Now release the pages and zero block aligned part of pages*/ if (last_block_offset > first_block_offset) truncate_pagecache_range(inode, first_block_offset, last_block_offset); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); else credits = ext4_blocks_for_truncate(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_std_error(sb, ret); goto out_dio; } ret = ext4_zero_partial_blocks(handle, inode, offset, length); if (ret) goto out_stop; first_block = (offset + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); /* If there are no blocks to remove, return now */ if (first_block >= stop_block) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); ret = ext4_es_remove_extent(inode, first_block, stop_block - first_block); if (ret) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ret = ext4_ext_remove_space(inode, first_block, stop_block - 1); else ret = ext4_ind_remove_space(handle, inode, first_block, stop_block); up_write(&EXT4_I(inode)->i_data_sem); if (IS_SYNC(inode)) ext4_handle_sync(handle); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); out_stop: ext4_journal_stop(handle); out_dio: up_write(&EXT4_I(inode)->i_mmap_sem); ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); return ret; }
167,490
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) { struct scsi_cmnd *cmd, *n; list_for_each_entry_safe(cmd, n, error_q, eh_entry) { if (cmd->device->sdev_target == my_cmd->device->sdev_target && cmd->device->lun == my_cmd->device->lun) sas_eh_defer_cmd(cmd); } } Commit Message: scsi: libsas: defer ata device eh commands to libata When ata device doing EH, some commands still attached with tasks are not passed to libata when abort failed or recover failed, so libata did not handle these commands. After these commands done, sas task is freed, but ata qc is not freed. This will cause ata qc leak and trigger a warning like below: WARNING: CPU: 0 PID: 28512 at drivers/ata/libata-eh.c:4037 ata_eh_finish+0xb4/0xcc CPU: 0 PID: 28512 Comm: kworker/u32:2 Tainted: G W OE 4.14.0#1 ...... Call trace: [<ffff0000088b7bd0>] ata_eh_finish+0xb4/0xcc [<ffff0000088b8420>] ata_do_eh+0xc4/0xd8 [<ffff0000088b8478>] ata_std_error_handler+0x44/0x8c [<ffff0000088b8068>] ata_scsi_port_error_handler+0x480/0x694 [<ffff000008875fc4>] async_sas_ata_eh+0x4c/0x80 [<ffff0000080f6be8>] async_run_entry_fn+0x4c/0x170 [<ffff0000080ebd70>] process_one_work+0x144/0x390 [<ffff0000080ec100>] worker_thread+0x144/0x418 [<ffff0000080f2c98>] kthread+0x10c/0x138 [<ffff0000080855dc>] ret_from_fork+0x10/0x18 If ata qc leaked too many, ata tag allocation will fail and io blocked for ever. As suggested by Dan Williams, defer ata device commands to libata and merge sas_eh_finish_cmd() with sas_eh_defer_cmd(). libata will handle ata qcs correctly after this. Signed-off-by: Jason Yan <[email protected]> CC: Xiaofei Tan <[email protected]> CC: John Garry <[email protected]> CC: Dan Williams <[email protected]> Reviewed-by: Dan Williams <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]> CWE ID:
static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) { struct scsi_cmnd *cmd, *n; list_for_each_entry_safe(cmd, n, error_q, eh_entry) { if (cmd->device->sdev_target == my_cmd->device->sdev_target && cmd->device->lun == my_cmd->device->lun) sas_eh_finish_cmd(cmd); } }
169,263
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: deinterlace_row(png_bytep buffer, png_const_bytep row, unsigned int pixel_size, png_uint_32 w, int pass) { /* The inverse of the above, 'row' is part of row 'y' of the output image, * in 'buffer'. The image is 'w' wide and this is pass 'pass', distribute * the pixels of row into buffer and return the number written (to allow * this to be checked). */ png_uint_32 xin, xout, xstep; xout = PNG_PASS_START_COL(pass); xstep = 1U<<PNG_PASS_COL_SHIFT(pass); for (xin=0; xout<w; xout+=xstep) { pixel_copy(buffer, xout, row, xin, pixel_size); ++xin; } } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
deinterlace_row(png_bytep buffer, png_const_bytep row,
173,607
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int t2p_process_jpeg_strip( unsigned char* strip, tsize_t* striplength, unsigned char* buffer, tsize_t* bufferoffset, tstrip_t no, uint32 height){ tsize_t i=0; while (i < *striplength) { tsize_t datalen; uint16 ri; uint16 v_samp; uint16 h_samp; int j; int ncomp; /* marker header: one or more FFs */ if (strip[i] != 0xff) return(0); i++; while (i < *striplength && strip[i] == 0xff) i++; if (i >= *striplength) return(0); /* SOI is the only pre-SOS marker without a length word */ if (strip[i] == 0xd8) datalen = 0; else { if ((*striplength - i) <= 2) return(0); datalen = (strip[i+1] << 8) | strip[i+2]; if (datalen < 2 || datalen >= (*striplength - i)) return(0); } switch( strip[i] ){ case 0xd8: /* SOI - start of image */ _TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), 2); *bufferoffset+=2; break; case 0xc0: /* SOF0 */ case 0xc1: /* SOF1 */ case 0xc3: /* SOF3 */ case 0xc9: /* SOF9 */ case 0xca: /* SOF10 */ if(no==0){ _TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2); ncomp = buffer[*bufferoffset+9]; if (ncomp < 1 || ncomp > 4) return(0); v_samp=1; h_samp=1; for(j=0;j<ncomp;j++){ uint16 samp = buffer[*bufferoffset+11+(3*j)]; if( (samp>>4) > h_samp) h_samp = (samp>>4); if( (samp & 0x0f) > v_samp) v_samp = (samp & 0x0f); } v_samp*=8; h_samp*=8; ri=((( ((uint16)(buffer[*bufferoffset+5])<<8) | (uint16)(buffer[*bufferoffset+6]) )+v_samp-1)/ v_samp); ri*=((( ((uint16)(buffer[*bufferoffset+7])<<8) | (uint16)(buffer[*bufferoffset+8]) )+h_samp-1)/ h_samp); buffer[*bufferoffset+5]= (unsigned char) ((height>>8) & 0xff); buffer[*bufferoffset+6]= (unsigned char) (height & 0xff); *bufferoffset+=datalen+2; /* insert a DRI marker */ buffer[(*bufferoffset)++]=0xff; buffer[(*bufferoffset)++]=0xdd; buffer[(*bufferoffset)++]=0x00; buffer[(*bufferoffset)++]=0x04; buffer[(*bufferoffset)++]=(ri >> 8) & 0xff; buffer[(*bufferoffset)++]= ri & 0xff; } break; case 0xc4: /* DHT */ case 0xdb: /* DQT */ _TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2); *bufferoffset+=datalen+2; break; case 0xda: /* SOS */ if(no==0){ _TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2); *bufferoffset+=datalen+2; } else { buffer[(*bufferoffset)++]=0xff; buffer[(*bufferoffset)++]= (unsigned char)(0xd0 | ((no-1)%8)); } i += datalen + 1; /* copy remainder of strip */ _TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i]), *striplength - i); *bufferoffset+= *striplength - i; return(1); default: /* ignore any other marker */ break; } i += datalen + 1; } /* failed to find SOS marker */ return(0); } Commit Message: * tools/tiffcrop.c: fix various out-of-bounds write vulnerabilities in heap or stack allocated buffers. Reported as MSVR 35093, MSVR 35096 and MSVR 35097. Discovered by Axel Souchet and Vishal Chauhan from the MSRC Vulnerabilities & Mitigations team. * tools/tiff2pdf.c: fix out-of-bounds write vulnerabilities in heap allocate buffer in t2p_process_jpeg_strip(). Reported as MSVR 35098. Discovered by Axel Souchet and Vishal Chauhan from the MSRC Vulnerabilities & Mitigations team. * libtiff/tif_pixarlog.c: fix out-of-bounds write vulnerabilities in heap allocated buffers. Reported as MSVR 35094. Discovered by Axel Souchet and Vishal Chauhan from the MSRC Vulnerabilities & Mitigations team. * libtiff/tif_write.c: fix issue in error code path of TIFFFlushData1() that didn't reset the tif_rawcc and tif_rawcp members. I'm not completely sure if that could happen in practice outside of the odd behaviour of t2p_seekproc() of tiff2pdf). The report points that a better fix could be to check the return value of TIFFFlushData1() in places where it isn't done currently, but it seems this patch is enough. Reported as MSVR 35095. Discovered by Axel Souchet & Vishal Chauhan & Suha Can from the MSRC Vulnerabilities & Mitigations team. CWE ID: CWE-787
int t2p_process_jpeg_strip( unsigned char* strip, tsize_t* striplength, unsigned char* buffer, tsize_t buffersize, tsize_t* bufferoffset, tstrip_t no, uint32 height){ tsize_t i=0; while (i < *striplength) { tsize_t datalen; uint16 ri; uint16 v_samp; uint16 h_samp; int j; int ncomp; /* marker header: one or more FFs */ if (strip[i] != 0xff) return(0); i++; while (i < *striplength && strip[i] == 0xff) i++; if (i >= *striplength) return(0); /* SOI is the only pre-SOS marker without a length word */ if (strip[i] == 0xd8) datalen = 0; else { if ((*striplength - i) <= 2) return(0); datalen = (strip[i+1] << 8) | strip[i+2]; if (datalen < 2 || datalen >= (*striplength - i)) return(0); } switch( strip[i] ){ case 0xd8: /* SOI - start of image */ if( *bufferoffset + 2 > buffersize ) return(0); _TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), 2); *bufferoffset+=2; break; case 0xc0: /* SOF0 */ case 0xc1: /* SOF1 */ case 0xc3: /* SOF3 */ case 0xc9: /* SOF9 */ case 0xca: /* SOF10 */ if(no==0){ if( *bufferoffset + datalen + 2 + 6 > buffersize ) return(0); _TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2); if( *bufferoffset + 9 >= buffersize ) return(0); ncomp = buffer[*bufferoffset+9]; if (ncomp < 1 || ncomp > 4) return(0); v_samp=1; h_samp=1; if( *bufferoffset + 11 + 3*(ncomp-1) >= buffersize ) return(0); for(j=0;j<ncomp;j++){ uint16 samp = buffer[*bufferoffset+11+(3*j)]; if( (samp>>4) > h_samp) h_samp = (samp>>4); if( (samp & 0x0f) > v_samp) v_samp = (samp & 0x0f); } v_samp*=8; h_samp*=8; ri=((( ((uint16)(buffer[*bufferoffset+5])<<8) | (uint16)(buffer[*bufferoffset+6]) )+v_samp-1)/ v_samp); ri*=((( ((uint16)(buffer[*bufferoffset+7])<<8) | (uint16)(buffer[*bufferoffset+8]) )+h_samp-1)/ h_samp); buffer[*bufferoffset+5]= (unsigned char) ((height>>8) & 0xff); buffer[*bufferoffset+6]= (unsigned char) (height & 0xff); *bufferoffset+=datalen+2; /* insert a DRI marker */ buffer[(*bufferoffset)++]=0xff; buffer[(*bufferoffset)++]=0xdd; buffer[(*bufferoffset)++]=0x00; buffer[(*bufferoffset)++]=0x04; buffer[(*bufferoffset)++]=(ri >> 8) & 0xff; buffer[(*bufferoffset)++]= ri & 0xff; } break; case 0xc4: /* DHT */ case 0xdb: /* DQT */ if( *bufferoffset + datalen + 2 > buffersize ) return(0); _TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2); *bufferoffset+=datalen+2; break; case 0xda: /* SOS */ if(no==0){ if( *bufferoffset + datalen + 2 > buffersize ) return(0); _TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2); *bufferoffset+=datalen+2; } else { if( *bufferoffset + 2 > buffersize ) return(0); buffer[(*bufferoffset)++]=0xff; buffer[(*bufferoffset)++]= (unsigned char)(0xd0 | ((no-1)%8)); } i += datalen + 1; /* copy remainder of strip */ if( *bufferoffset + *striplength - i > buffersize ) return(0); _TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i]), *striplength - i); *bufferoffset+= *striplength - i; return(1); default: /* ignore any other marker */ break; } i += datalen + 1; } /* failed to find SOS marker */ return(0); }
166,872
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void *atomic_thread_inc_dec(void *context) { struct atomic_test_s32_s *at = (struct atomic_test_s32_s *)context; for (int i = 0; i < at->max_val; i++) { usleep(1); atomic_inc_prefix_s32(&at->data[i]); usleep(1); atomic_dec_prefix_s32(&at->data[i]); } return NULL; } Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release CWE ID: CWE-284
void *atomic_thread_inc_dec(void *context) { struct atomic_test_s32_s *at = (struct atomic_test_s32_s *)context; for (int i = 0; i < at->max_val; i++) { TEMP_FAILURE_RETRY(usleep(1)); atomic_inc_prefix_s32(&at->data[i]); TEMP_FAILURE_RETRY(usleep(1)); atomic_dec_prefix_s32(&at->data[i]); } return NULL; }
173,491
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool TabsCaptureVisibleTabFunction::RunImpl() { PrefService* service = profile()->GetPrefs(); if (service->GetBoolean(prefs::kDisableScreenshots)) { error_ = keys::kScreenshotsDisabled; return false; } WebContents* web_contents = NULL; if (!GetTabToCapture(&web_contents)) return false; image_format_ = FORMAT_JPEG; // Default format is JPEG. image_quality_ = kDefaultQuality; // Default quality setting. if (HasOptionalArgument(1)) { DictionaryValue* options = NULL; EXTENSION_FUNCTION_VALIDATE(args_->GetDictionary(1, &options)); if (options->HasKey(keys::kFormatKey)) { std::string format; EXTENSION_FUNCTION_VALIDATE( options->GetString(keys::kFormatKey, &format)); if (format == keys::kFormatValueJpeg) { image_format_ = FORMAT_JPEG; } else if (format == keys::kFormatValuePng) { image_format_ = FORMAT_PNG; } else { EXTENSION_FUNCTION_VALIDATE(0); } } if (options->HasKey(keys::kQualityKey)) { EXTENSION_FUNCTION_VALIDATE( options->GetInteger(keys::kQualityKey, &image_quality_)); } } if (!GetExtension()->CanCaptureVisiblePage( web_contents->GetURL(), SessionID::IdForTab(web_contents), &error_)) { return false; } RenderViewHost* render_view_host = web_contents->GetRenderViewHost(); content::RenderWidgetHostView* view = render_view_host->GetView(); if (!view) { error_ = keys::kInternalVisibleTabCaptureError; return false; } render_view_host->CopyFromBackingStore( gfx::Rect(), view->GetViewBounds().size(), base::Bind(&TabsCaptureVisibleTabFunction::CopyFromBackingStoreComplete, this)); return true; } Commit Message: Don't allow extensions to take screenshots of interstitial pages. Branched from https://codereview.chromium.org/14885004/ which is trying to test it. BUG=229504 Review URL: https://chromiumcodereview.appspot.com/14954004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@198297 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-264
bool TabsCaptureVisibleTabFunction::RunImpl() { PrefService* service = profile()->GetPrefs(); if (service->GetBoolean(prefs::kDisableScreenshots)) { error_ = keys::kScreenshotsDisabled; return false; } WebContents* web_contents = NULL; if (!GetTabToCapture(&web_contents)) return false; image_format_ = FORMAT_JPEG; // Default format is JPEG. image_quality_ = kDefaultQuality; // Default quality setting. if (HasOptionalArgument(1)) { DictionaryValue* options = NULL; EXTENSION_FUNCTION_VALIDATE(args_->GetDictionary(1, &options)); if (options->HasKey(keys::kFormatKey)) { std::string format; EXTENSION_FUNCTION_VALIDATE( options->GetString(keys::kFormatKey, &format)); if (format == keys::kFormatValueJpeg) { image_format_ = FORMAT_JPEG; } else if (format == keys::kFormatValuePng) { image_format_ = FORMAT_PNG; } else { EXTENSION_FUNCTION_VALIDATE(0); } } if (options->HasKey(keys::kQualityKey)) { EXTENSION_FUNCTION_VALIDATE( options->GetInteger(keys::kQualityKey, &image_quality_)); } } // Use the last committed URL rather than the active URL for permissions // checking, since the visible page won't be updated until it has been // committed. A canonical example of this is interstitials, which show the // URL of the new/loading page (active) but would capture the content of the // old page (last committed). // // TODO(creis): Use WebContents::GetLastCommittedURL instead. // http://crbug.com/237908. NavigationEntry* last_committed_entry = web_contents->GetController().GetLastCommittedEntry(); GURL last_committed_url = last_committed_entry ? last_committed_entry->GetURL() : GURL(); if (!GetExtension()->CanCaptureVisiblePage(last_committed_url, SessionID::IdForTab(web_contents), &error_)) { return false; } RenderViewHost* render_view_host = web_contents->GetRenderViewHost(); content::RenderWidgetHostView* view = render_view_host->GetView(); if (!view) { error_ = keys::kInternalVisibleTabCaptureError; return false; } render_view_host->CopyFromBackingStore( gfx::Rect(), view->GetViewBounds().size(), base::Bind(&TabsCaptureVisibleTabFunction::CopyFromBackingStoreComplete, this)); return true; }
171,268
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: modifier_total_encodings(PNG_CONST png_modifier *pm) { return 1 + /* (1) nothing */ pm->ngammas + /* (2) gamma values to test */ pm->nencodings + /* (3) total number of encodings */ /* The following test only works after the first time through the * png_modifier code because 'bit_depth' is set when the IHDR is read. * modifier_reset, below, preserves the setting until after it has called * the iterate function (also below.) * * For this reason do not rely on this function outside a call to * modifier_reset. */ ((pm->bit_depth == 16 || pm->assume_16_bit_calculations) ? pm->nencodings : 0); /* (4) encodings with gamma == 1.0 */ } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
modifier_total_encodings(PNG_CONST png_modifier *pm) modifier_total_encodings(const png_modifier *pm) { return 1 + /* (1) nothing */ pm->ngammas + /* (2) gamma values to test */ pm->nencodings + /* (3) total number of encodings */ /* The following test only works after the first time through the * png_modifier code because 'bit_depth' is set when the IHDR is read. * modifier_reset, below, preserves the setting until after it has called * the iterate function (also below.) * * For this reason do not rely on this function outside a call to * modifier_reset. */ ((pm->bit_depth == 16 || pm->assume_16_bit_calculations) ? pm->nencodings : 0); /* (4) encodings with gamma == 1.0 */ }
173,671
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { key_ref_t key_ref; void *payload; long ret; ret = -EINVAL; if (plen > PAGE_SIZE) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (_payload) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error2; } /* find the target key (which must be writable) */ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } /* update the key */ ret = key_update(key_ref, payload, plen); key_ref_put(key_ref); error2: kfree(payload); error: return ret; } Commit Message: KEYS: fix dereferencing NULL payload with nonzero length sys_add_key() and the KEYCTL_UPDATE operation of sys_keyctl() allowed a NULL payload with nonzero length to be passed to the key type's ->preparse(), ->instantiate(), and/or ->update() methods. Various key types including asymmetric, cifs.idmap, cifs.spnego, and pkcs7_test did not handle this case, allowing an unprivileged user to trivially cause a NULL pointer dereference (kernel oops) if one of these key types was present. Fix it by doing the copy_from_user() when 'plen' is nonzero rather than when '_payload' is non-NULL, causing the syscall to fail with EFAULT as expected when an invalid buffer is specified. Cc: [email protected] # 2.6.10+ Signed-off-by: Eric Biggers <[email protected]> Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]> CWE ID: CWE-476
long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { key_ref_t key_ref; void *payload; long ret; ret = -EINVAL; if (plen > PAGE_SIZE) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error2; } /* find the target key (which must be writable) */ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } /* update the key */ ret = key_update(key_ref, payload, plen); key_ref_put(key_ref); error2: kfree(payload); error: return ret; }
167,727
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void AcceleratedStaticBitmapImage::EnsureMailbox(MailboxSyncMode mode, GLenum filter) { if (!texture_holder_->IsMailboxTextureHolder()) { TRACE_EVENT0("blink", "AcceleratedStaticBitmapImage::EnsureMailbox"); if (!original_skia_image_) { RetainOriginalSkImage(); } texture_holder_ = std::make_unique<MailboxTextureHolder>( std::move(texture_holder_), filter); } texture_holder_->Sync(mode); } Commit Message: Fix *StaticBitmapImage ThreadChecker and unaccelerated SkImage destroy - AcceleratedStaticBitmapImage was misusing ThreadChecker by having its own detach logic. Using proper DetachThread is simpler, cleaner and correct. - UnacceleratedStaticBitmapImage didn't destroy the SkImage in the proper thread, leading to GrContext/SkSp problems. Bug: 890576 Change-Id: Ic71e7f7322b0b851774628247aa5256664bc0723 Reviewed-on: https://chromium-review.googlesource.com/c/1307775 Reviewed-by: Gabriel Charette <[email protected]> Reviewed-by: Jeremy Roman <[email protected]> Commit-Queue: Fernando Serboncini <[email protected]> Cr-Commit-Position: refs/heads/master@{#604427} CWE ID: CWE-119
void AcceleratedStaticBitmapImage::EnsureMailbox(MailboxSyncMode mode, GLenum filter) { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); if (!texture_holder_->IsMailboxTextureHolder()) { TRACE_EVENT0("blink", "AcceleratedStaticBitmapImage::EnsureMailbox"); if (!original_skia_image_) { RetainOriginalSkImage(); } texture_holder_ = std::make_unique<MailboxTextureHolder>( std::move(texture_holder_), filter); } texture_holder_->Sync(mode); }
172,595
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: IHEVCD_ERROR_T ihevcd_cabac_init(cab_ctxt_t *ps_cabac, bitstrm_t *ps_bitstrm, WORD32 qp, WORD32 cabac_init_idc, const UWORD8 *pu1_init_ctxt) { /* Sanity checks */ ASSERT(ps_cabac != NULL); ASSERT(ps_bitstrm != NULL); ASSERT((qp >= 0) && (qp < 52)); ASSERT((cabac_init_idc >= 0) && (cabac_init_idc < 3)); UNUSED(qp); UNUSED(cabac_init_idc); /* CABAC engine uses 32 bit range instead of 9 bits as specified by * the spec. This is done to reduce number of renormalizations */ /* cabac engine initialization */ #if FULLRANGE ps_cabac->u4_range = (UWORD32)510 << RANGE_SHIFT; BITS_GET(ps_cabac->u4_ofst, ps_bitstrm->pu4_buf, ps_bitstrm->u4_bit_ofst, ps_bitstrm->u4_cur_word, ps_bitstrm->u4_nxt_word, (9 + RANGE_SHIFT)); #else ps_cabac->u4_range = (UWORD32)510; BITS_GET(ps_cabac->u4_ofst, ps_bitstrm->pu4_buf, ps_bitstrm->u4_bit_ofst, ps_bitstrm->u4_cur_word, ps_bitstrm->u4_nxt_word, 9); #endif /* cabac context initialization based on init idc and slice qp */ memcpy(ps_cabac->au1_ctxt_models, pu1_init_ctxt, IHEVC_CAB_CTXT_END); DEBUG_RANGE_OFST("init", ps_cabac->u4_range, ps_cabac->u4_ofst); return ((IHEVCD_ERROR_T)IHEVCD_SUCCESS); } Commit Message: Return error from cabac init if offset is greater than range When the offset was greater than range, the bitstream was read more than the valid range in leaf-level cabac parsing modules. Error check was added to cabac init to fix this issue. Additionally end of slice and slice error were signalled to suppress further parsing of current slice. Bug: 34897036 Change-Id: I1263f1d1219684ffa6e952c76e5a08e9a933c9d2 (cherry picked from commit 3b175da88a1807d19cdd248b74bce60e57f05c6a) (cherry picked from commit b92314c860d01d754ef579eafe55d7377962b3ba) CWE ID: CWE-119
IHEVCD_ERROR_T ihevcd_cabac_init(cab_ctxt_t *ps_cabac, bitstrm_t *ps_bitstrm, WORD32 qp, WORD32 cabac_init_idc, const UWORD8 *pu1_init_ctxt) { /* Sanity checks */ ASSERT(ps_cabac != NULL); ASSERT(ps_bitstrm != NULL); ASSERT((qp >= 0) && (qp < 52)); ASSERT((cabac_init_idc >= 0) && (cabac_init_idc < 3)); UNUSED(qp); UNUSED(cabac_init_idc); /* CABAC engine uses 32 bit range instead of 9 bits as specified by * the spec. This is done to reduce number of renormalizations */ /* cabac engine initialization */ #if FULLRANGE ps_cabac->u4_range = (UWORD32)510 << RANGE_SHIFT; BITS_GET(ps_cabac->u4_ofst, ps_bitstrm->pu4_buf, ps_bitstrm->u4_bit_ofst, ps_bitstrm->u4_cur_word, ps_bitstrm->u4_nxt_word, (9 + RANGE_SHIFT)); #else ps_cabac->u4_range = (UWORD32)510; BITS_GET(ps_cabac->u4_ofst, ps_bitstrm->pu4_buf, ps_bitstrm->u4_bit_ofst, ps_bitstrm->u4_cur_word, ps_bitstrm->u4_nxt_word, 9); #endif /* cabac context initialization based on init idc and slice qp */ memcpy(ps_cabac->au1_ctxt_models, pu1_init_ctxt, IHEVC_CAB_CTXT_END); DEBUG_RANGE_OFST("init", ps_cabac->u4_range, ps_cabac->u4_ofst); /* * If the offset is greater than or equal to range, return fail. */ if(ps_cabac->u4_ofst >= ps_cabac->u4_range) { return ((IHEVCD_ERROR_T)IHEVCD_FAIL); } return ((IHEVCD_ERROR_T)IHEVCD_SUCCESS); }
174,029
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: xsltNumberFormatGetMultipleLevel(xsltTransformContextPtr context, xmlNodePtr node, xsltCompMatchPtr countPat, xsltCompMatchPtr fromPat, double *array, int max, xmlDocPtr doc, xmlNodePtr elem) { int amount = 0; int cnt; xmlNodePtr ancestor; xmlNodePtr preceding; xmlXPathParserContextPtr parser; context->xpathCtxt->node = node; parser = xmlXPathNewParserContext(NULL, context->xpathCtxt); if (parser) { /* ancestor-or-self::*[count] */ for (ancestor = node; (ancestor != NULL) && (ancestor->type != XML_DOCUMENT_NODE); ancestor = xmlXPathNextAncestor(parser, ancestor)) { if ((fromPat != NULL) && xsltTestCompMatchList(context, ancestor, fromPat)) break; /* for */ if ((countPat == NULL && node->type == ancestor->type && xmlStrEqual(node->name, ancestor->name)) || xsltTestCompMatchList(context, ancestor, countPat)) { /* count(preceding-sibling::*) */ cnt = 0; for (preceding = ancestor; preceding != NULL; preceding = xmlXPathNextPrecedingSibling(parser, preceding)) { if (countPat == NULL) { if ((preceding->type == ancestor->type) && xmlStrEqual(preceding->name, ancestor->name)){ if ((preceding->ns == ancestor->ns) || ((preceding->ns != NULL) && (ancestor->ns != NULL) && (xmlStrEqual(preceding->ns->href, ancestor->ns->href) ))) cnt++; } } else { if (xsltTestCompMatchList(context, preceding, countPat)) cnt++; } } array[amount++] = (double)cnt; if (amount >= max) break; /* for */ } } xmlXPathFreeParserContext(parser); } return amount; } Commit Message: Roll libxslt to 891681e3e948f31732229f53cb6db7215f740fc7 BUG=583156,583171 Review URL: https://codereview.chromium.org/1853083002 Cr-Commit-Position: refs/heads/master@{#385338} CWE ID: CWE-119
xsltNumberFormatGetMultipleLevel(xsltTransformContextPtr context, xmlNodePtr node, xsltCompMatchPtr countPat, xsltCompMatchPtr fromPat, double *array, int max) { int amount = 0; int cnt; xmlNodePtr ancestor; xmlNodePtr preceding; xmlXPathParserContextPtr parser; context->xpathCtxt->node = node; parser = xmlXPathNewParserContext(NULL, context->xpathCtxt); if (parser) { /* ancestor-or-self::*[count] */ for (ancestor = node; (ancestor != NULL) && (ancestor->type != XML_DOCUMENT_NODE); ancestor = xmlXPathNextAncestor(parser, ancestor)) { if ((fromPat != NULL) && xsltTestCompMatchList(context, ancestor, fromPat)) break; /* for */ if (xsltTestCompMatchCount(context, ancestor, countPat, node)) { /* count(preceding-sibling::*) */ cnt = 1; for (preceding = xmlXPathNextPrecedingSibling(parser, ancestor); preceding != NULL; preceding = xmlXPathNextPrecedingSibling(parser, preceding)) { if (xsltTestCompMatchCount(context, preceding, countPat, node)) cnt++; } array[amount++] = (double)cnt; if (amount >= max) break; /* for */ } } xmlXPathFreeParserContext(parser); } return amount; }
173,309
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: device_has_capability (NMDevice *self, NMDeviceCapabilities caps) { { static guint32 devcount = 0; NMDevicePrivate *priv; g_return_if_fail (NM_IS_DEVICE (self)); priv = NM_DEVICE_GET_PRIVATE (self); g_return_if_fail (priv->path == NULL); priv->path = g_strdup_printf ("/org/freedesktop/NetworkManager/Devices/%d", devcount++); _LOGI (LOGD_DEVICE, "exported as %s", priv->path); nm_dbus_manager_register_object (nm_dbus_manager_get (), priv->path, self); } const char * nm_device_get_path (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->path; } const char * nm_device_get_udi (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->udi; } const char * nm_device_get_iface (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), 0); return NM_DEVICE_GET_PRIVATE (self)->iface; } int nm_device_get_ifindex (NMDevice *self) { g_return_val_if_fail (self != NULL, 0); return NM_DEVICE_GET_PRIVATE (self)->ifindex; } gboolean nm_device_is_software (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); return priv->is_software; } const char * nm_device_get_ip_iface (NMDevice *self) { NMDevicePrivate *priv; g_return_val_if_fail (self != NULL, NULL); priv = NM_DEVICE_GET_PRIVATE (self); /* If it's not set, default to iface */ return priv->ip_iface ? priv->ip_iface : priv->iface; } int nm_device_get_ip_ifindex (NMDevice *self) { NMDevicePrivate *priv; g_return_val_if_fail (self != NULL, 0); priv = NM_DEVICE_GET_PRIVATE (self); /* If it's not set, default to iface */ return priv->ip_iface ? priv->ip_ifindex : priv->ifindex; } void nm_device_set_ip_iface (NMDevice *self, const char *iface) { NMDevicePrivate *priv; char *old_ip_iface; g_return_if_fail (NM_IS_DEVICE (self)); priv = NM_DEVICE_GET_PRIVATE (self); if (!g_strcmp0 (iface, priv->ip_iface)) return; old_ip_iface = priv->ip_iface; priv->ip_ifindex = 0; priv->ip_iface = g_strdup (iface); if (priv->ip_iface) { priv->ip_ifindex = nm_platform_link_get_ifindex (priv->ip_iface); if (priv->ip_ifindex > 0) { if (nm_platform_check_support_user_ipv6ll ()) nm_platform_link_set_user_ipv6ll_enabled (priv->ip_ifindex, TRUE); if (!nm_platform_link_is_up (priv->ip_ifindex)) nm_platform_link_set_up (priv->ip_ifindex); } else { /* Device IP interface must always be a kernel network interface */ _LOGW (LOGD_HW, "failed to look up interface index"); } } /* We don't care about any saved values from the old iface */ g_hash_table_remove_all (priv->ip6_saved_properties); /* Emit change notification */ if (g_strcmp0 (old_ip_iface, priv->ip_iface)) g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE); g_free (old_ip_iface); } static gboolean get_ip_iface_identifier (NMDevice *self, NMUtilsIPv6IfaceId *out_iid) { NMLinkType link_type; const guint8 *hwaddr = NULL; size_t hwaddr_len = 0; int ifindex; gboolean success; /* If we get here, we *must* have a kernel netdev, which implies an ifindex */ ifindex = nm_device_get_ip_ifindex (self); g_assert (ifindex); link_type = nm_platform_link_get_type (ifindex); g_return_val_if_fail (link_type > NM_LINK_TYPE_UNKNOWN, 0); hwaddr = nm_platform_link_get_address (ifindex, &hwaddr_len); if (!hwaddr_len) return FALSE; success = nm_utils_get_ipv6_interface_identifier (link_type, hwaddr, hwaddr_len, out_iid); if (!success) { _LOGW (LOGD_HW, "failed to generate interface identifier " "for link type %u hwaddr_len %zu", link_type, hwaddr_len); } return success; } static gboolean nm_device_get_ip_iface_identifier (NMDevice *self, NMUtilsIPv6IfaceId *iid) { return NM_DEVICE_GET_CLASS (self)->get_ip_iface_identifier (self, iid); } const char * nm_device_get_driver (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->driver; } const char * nm_device_get_driver_version (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->driver_version; } NMDeviceType nm_device_get_device_type (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), NM_DEVICE_TYPE_UNKNOWN); return NM_DEVICE_GET_PRIVATE (self)->type; } /** * nm_device_get_priority(): * @self: the #NMDevice * * Returns: the device's routing priority. Lower numbers means a "better" * device, eg higher priority. */ int nm_device_get_priority (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), 1000); /* Device 'priority' is used for the default route-metric and is based on * the device type. The settings ipv4.route-metric and ipv6.route-metric * can overwrite this default. * * Currently for both IPv4 and IPv6 we use the same default values. * * The route-metric is used for the metric of the routes of device. * This also applies to the default route. Therefore it affects also * which device is the "best". * * For comparison, note that iproute2 by default adds IPv4 routes with * metric 0, and IPv6 routes with metric 1024. The latter is the IPv6 * "user default" in the kernel (NM_PLATFORM_ROUTE_METRIC_DEFAULT_IP6). * In kernel, the full uint32_t range is available for route * metrics (except for IPv6, where 0 means 1024). */ switch (nm_device_get_device_type (self)) { /* 50 is reserved for VPN (NM_VPN_ROUTE_METRIC_DEFAULT) */ case NM_DEVICE_TYPE_ETHERNET: return 100; case NM_DEVICE_TYPE_INFINIBAND: return 150; case NM_DEVICE_TYPE_ADSL: return 200; case NM_DEVICE_TYPE_WIMAX: return 250; case NM_DEVICE_TYPE_BOND: return 300; case NM_DEVICE_TYPE_TEAM: return 350; case NM_DEVICE_TYPE_VLAN: return 400; case NM_DEVICE_TYPE_BRIDGE: return 425; case NM_DEVICE_TYPE_MODEM: return 450; case NM_DEVICE_TYPE_BT: return 550; case NM_DEVICE_TYPE_WIFI: return 600; case NM_DEVICE_TYPE_OLPC_MESH: return 650; case NM_DEVICE_TYPE_GENERIC: return 950; case NM_DEVICE_TYPE_UNKNOWN: return 10000; case NM_DEVICE_TYPE_UNUSED1: case NM_DEVICE_TYPE_UNUSED2: /* omit default: to get compiler warning about missing switch cases */ break; } return 11000; } guint32 nm_device_get_ip4_route_metric (NMDevice *self) { NMConnection *connection; gint64 route_metric = -1; g_return_val_if_fail (NM_IS_DEVICE (self), G_MAXUINT32); connection = nm_device_get_connection (self); if (connection) route_metric = nm_setting_ip_config_get_route_metric (nm_connection_get_setting_ip4_config (connection)); return route_metric >= 0 ? route_metric : nm_device_get_priority (self); } guint32 nm_device_get_ip6_route_metric (NMDevice *self) { NMConnection *connection; gint64 route_metric = -1; g_return_val_if_fail (NM_IS_DEVICE (self), G_MAXUINT32); connection = nm_device_get_connection (self); if (connection) route_metric = nm_setting_ip_config_get_route_metric (nm_connection_get_setting_ip6_config (connection)); return route_metric >= 0 ? route_metric : nm_device_get_priority (self); } const NMPlatformIP4Route * nm_device_get_ip4_default_route (NMDevice *self, gboolean *out_is_assumed) { NMDevicePrivate *priv; g_return_val_if_fail (NM_IS_DEVICE (self), NULL); priv = NM_DEVICE_GET_PRIVATE (self); if (out_is_assumed) *out_is_assumed = priv->default_route.v4_is_assumed; return priv->default_route.v4_has ? &priv->default_route.v4 : NULL; } const NMPlatformIP6Route * nm_device_get_ip6_default_route (NMDevice *self, gboolean *out_is_assumed) { NMDevicePrivate *priv; g_return_val_if_fail (NM_IS_DEVICE (self), NULL); priv = NM_DEVICE_GET_PRIVATE (self); if (out_is_assumed) *out_is_assumed = priv->default_route.v6_is_assumed; return priv->default_route.v6_has ? &priv->default_route.v6 : NULL; } const char * nm_device_get_type_desc (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->type_desc; } gboolean nm_device_has_carrier (NMDevice *self) { return NM_DEVICE_GET_PRIVATE (self)->carrier; } NMActRequest * nm_device_get_act_request (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->act_request; } NMConnection * nm_device_get_connection (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); return priv->act_request ? nm_act_request_get_connection (priv->act_request) : NULL; } RfKillType nm_device_get_rfkill_type (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); return NM_DEVICE_GET_PRIVATE (self)->rfkill_type; } static const char * nm_device_get_physical_port_id (NMDevice *self) { return NM_DEVICE_GET_PRIVATE (self)->physical_port_id; } /***********************************************************/ static gboolean nm_device_uses_generated_assumed_connection (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; if ( priv->act_request && nm_active_connection_get_assumed (NM_ACTIVE_CONNECTION (priv->act_request))) { connection = nm_act_request_get_connection (priv->act_request); if ( connection && nm_settings_connection_get_nm_generated_assumed (NM_SETTINGS_CONNECTION (connection))) return TRUE; } return FALSE; } gboolean nm_device_uses_assumed_connection (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if ( priv->act_request && nm_active_connection_get_assumed (NM_ACTIVE_CONNECTION (priv->act_request))) return TRUE; return FALSE; } static SlaveInfo * find_slave_info (NMDevice *self, NMDevice *slave) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); SlaveInfo *info; GSList *iter; for (iter = priv->slaves; iter; iter = g_slist_next (iter)) { info = iter->data; if (info->slave == slave) return info; } return NULL; } static void free_slave_info (SlaveInfo *info) { g_signal_handler_disconnect (info->slave, info->watch_id); g_clear_object (&info->slave); memset (info, 0, sizeof (*info)); g_free (info); } /** * nm_device_enslave_slave: * @self: the master device * @slave: the slave device to enslave * @connection: (allow-none): the slave device's connection * * If @self is capable of enslaving other devices (ie it's a bridge, bond, team, * etc) then this function enslaves @slave. * * Returns: %TRUE on success, %FALSE on failure or if this device cannot enslave * other devices. */ static gboolean nm_device_enslave_slave (NMDevice *self, NMDevice *slave, NMConnection *connection) { SlaveInfo *info; gboolean success = FALSE; gboolean configure; g_return_val_if_fail (self != NULL, FALSE); g_return_val_if_fail (slave != NULL, FALSE); g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->enslave_slave != NULL, FALSE); info = find_slave_info (self, slave); if (!info) return FALSE; if (info->enslaved) success = TRUE; else { configure = (info->configure && connection != NULL); if (configure) g_return_val_if_fail (nm_device_get_state (slave) >= NM_DEVICE_STATE_DISCONNECTED, FALSE); success = NM_DEVICE_GET_CLASS (self)->enslave_slave (self, slave, connection, configure); info->enslaved = success; } nm_device_slave_notify_enslave (info->slave, success); /* Ensure the device's hardware address is up-to-date; it often changes * when slaves change. */ nm_device_update_hw_address (self); /* Restart IP configuration if we're waiting for slaves. Do this * after updating the hardware address as IP config may need the * new address. */ if (success) { if (NM_DEVICE_GET_PRIVATE (self)->ip4_state == IP_WAIT) nm_device_activate_stage3_ip4_start (self); if (NM_DEVICE_GET_PRIVATE (self)->ip6_state == IP_WAIT) nm_device_activate_stage3_ip6_start (self); } return success; } /** * nm_device_release_one_slave: * @self: the master device * @slave: the slave device to release * @configure: whether @self needs to actually release @slave * @reason: the state change reason for the @slave * * If @self is capable of enslaving other devices (ie it's a bridge, bond, team, * etc) then this function releases the previously enslaved @slave and/or * updates the state of @self and @slave to reflect its release. * * Returns: %TRUE on success, %FALSE on failure, if this device cannot enslave * other devices, or if @slave was never enslaved. */ static gboolean nm_device_release_one_slave (NMDevice *self, NMDevice *slave, gboolean configure, NMDeviceStateReason reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); SlaveInfo *info; gboolean success = FALSE; g_return_val_if_fail (slave != NULL, FALSE); g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->release_slave != NULL, FALSE); info = find_slave_info (self, slave); if (!info) return FALSE; priv->slaves = g_slist_remove (priv->slaves, info); if (info->enslaved) { success = NM_DEVICE_GET_CLASS (self)->release_slave (self, slave, configure); /* The release_slave() implementation logs success/failure (in the * correct device-specific log domain), so we don't have to do anything. */ } if (!configure) { g_warn_if_fail (reason == NM_DEVICE_STATE_REASON_NONE || reason == NM_DEVICE_STATE_REASON_REMOVED); reason = NM_DEVICE_STATE_REASON_NONE; } else if (reason == NM_DEVICE_STATE_REASON_NONE) { g_warn_if_reached (); reason = NM_DEVICE_STATE_REASON_UNKNOWN; } nm_device_slave_notify_release (info->slave, reason); free_slave_info (info); /* Ensure the device's hardware address is up-to-date; it often changes * when slaves change. */ nm_device_update_hw_address (self); return success; } static gboolean is_software_external (NMDevice *self) { return nm_device_is_software (self) && !nm_device_get_is_nm_owned (self); } /** * nm_device_finish_init: * @self: the master device * * Whatever needs to be done post-initialization, when the device has a DBus * object name. */ void nm_device_finish_init (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); g_assert (priv->initialized == FALSE); /* Do not manage externally created software devices until they are IFF_UP */ if ( is_software_external (self) && !nm_platform_link_is_up (priv->ifindex) && priv->ifindex > 0) nm_device_set_initial_unmanaged_flag (self, NM_UNMANAGED_EXTERNAL_DOWN, TRUE); if (priv->master) nm_device_enslave_slave (priv->master, self, NULL); priv->initialized = TRUE; } static void carrier_changed (NMDevice *self, gboolean carrier) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (!nm_device_get_managed (self)) return; nm_device_recheck_available_connections (self); /* ignore-carrier devices ignore all carrier-down events */ if (priv->ignore_carrier && !carrier) return; if (priv->is_master) { /* Bridge/bond/team carrier does not affect its own activation, * but when carrier comes on, if there are slaves waiting, * it will restart them. */ if (!carrier) return; if (nm_device_activate_ip4_state_in_wait (self)) nm_device_activate_stage3_ip4_start (self); if (nm_device_activate_ip6_state_in_wait (self)) nm_device_activate_stage3_ip6_start (self); return; } else if (nm_device_get_enslaved (self) && !carrier) { /* Slaves don't deactivate when they lose carrier; for * bonds/teams in particular that would be actively * counterproductive. */ return; } if (carrier) { g_warn_if_fail (priv->state >= NM_DEVICE_STATE_UNAVAILABLE); if (priv->state == NM_DEVICE_STATE_UNAVAILABLE) { nm_device_queue_state (self, NM_DEVICE_STATE_DISCONNECTED, NM_DEVICE_STATE_REASON_CARRIER); } else if (priv->state == NM_DEVICE_STATE_DISCONNECTED) { /* If the device is already in DISCONNECTED state without a carrier * (probably because it is tagged for carrier ignore) ensure that * when the carrier appears, auto connections are rechecked for * the device. */ nm_device_emit_recheck_auto_activate (self); } } else { g_return_if_fail (priv->state >= NM_DEVICE_STATE_UNAVAILABLE); if (priv->state == NM_DEVICE_STATE_UNAVAILABLE) { if (nm_device_queued_state_peek (self) >= NM_DEVICE_STATE_DISCONNECTED) nm_device_queued_state_clear (self); } else { nm_device_queue_state (self, NM_DEVICE_STATE_UNAVAILABLE, NM_DEVICE_STATE_REASON_CARRIER); } } } #define LINK_DISCONNECT_DELAY 4 static gboolean link_disconnect_action_cb (gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); _LOGD (LOGD_DEVICE, "link disconnected (calling deferred action) (id=%u)", priv->carrier_defer_id); priv->carrier_defer_id = 0; _LOGI (LOGD_DEVICE, "link disconnected (calling deferred action)"); NM_DEVICE_GET_CLASS (self)->carrier_changed (self, FALSE); return FALSE; } static void link_disconnect_action_cancel (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->carrier_defer_id) { g_source_remove (priv->carrier_defer_id); _LOGD (LOGD_DEVICE, "link disconnected (canceling deferred action) (id=%u)", priv->carrier_defer_id); priv->carrier_defer_id = 0; } } void nm_device_set_carrier (NMDevice *self, gboolean carrier) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self); NMDeviceState state = nm_device_get_state (self); if (priv->carrier == carrier) return; priv->carrier = carrier; g_object_notify (G_OBJECT (self), NM_DEVICE_CARRIER); if (priv->carrier) { _LOGI (LOGD_DEVICE, "link connected"); link_disconnect_action_cancel (self); klass->carrier_changed (self, TRUE); if (priv->carrier_wait_id) { g_source_remove (priv->carrier_wait_id); priv->carrier_wait_id = 0; nm_device_remove_pending_action (self, "carrier wait", TRUE); _carrier_wait_check_queued_act_request (self); } } else if (state <= NM_DEVICE_STATE_DISCONNECTED) { _LOGI (LOGD_DEVICE, "link disconnected"); klass->carrier_changed (self, FALSE); } else { _LOGI (LOGD_DEVICE, "link disconnected (deferring action for %d seconds)", LINK_DISCONNECT_DELAY); priv->carrier_defer_id = g_timeout_add_seconds (LINK_DISCONNECT_DELAY, link_disconnect_action_cb, self); _LOGD (LOGD_DEVICE, "link disconnected (deferring action for %d seconds) (id=%u)", LINK_DISCONNECT_DELAY, priv->carrier_defer_id); } } static void update_for_ip_ifname_change (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); g_hash_table_remove_all (priv->ip6_saved_properties); if (priv->dhcp4_client) { if (!nm_device_dhcp4_renew (self, FALSE)) { nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_DHCP_FAILED); return; } } if (priv->dhcp6_client) { if (!nm_device_dhcp6_renew (self, FALSE)) { nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_DHCP_FAILED); return; } } if (priv->rdisc) { /* FIXME: todo */ } if (priv->dnsmasq_manager) { /* FIXME: todo */ } } static void device_set_master (NMDevice *self, int ifindex) { NMDevice *master; NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); master = nm_manager_get_device_by_ifindex (nm_manager_get (), ifindex); if (master && NM_DEVICE_GET_CLASS (master)->enslave_slave) { g_clear_object (&priv->master); priv->master = g_object_ref (master); nm_device_master_add_slave (master, self, FALSE); } else if (master) { _LOGI (LOGD_DEVICE, "enslaved to non-master-type device %s; ignoring", nm_device_get_iface (master)); } else { _LOGW (LOGD_DEVICE, "enslaved to unknown device %d %s", ifindex, nm_platform_link_get_name (ifindex)); } } static void device_link_changed (NMDevice *self, NMPlatformLink *info) { NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMUtilsIPv6IfaceId token_iid; gboolean ip_ifname_changed = FALSE; if (info->udi && g_strcmp0 (info->udi, priv->udi)) { /* Update UDI to what udev gives us */ g_free (priv->udi); priv->udi = g_strdup (info->udi); g_object_notify (G_OBJECT (self), NM_DEVICE_UDI); } /* Update MTU if it has changed. */ if (priv->mtu != info->mtu) { priv->mtu = info->mtu; g_object_notify (G_OBJECT (self), NM_DEVICE_MTU); } if (info->name[0] && strcmp (priv->iface, info->name) != 0) { _LOGI (LOGD_DEVICE, "interface index %d renamed iface from '%s' to '%s'", priv->ifindex, priv->iface, info->name); g_free (priv->iface); priv->iface = g_strdup (info->name); /* If the device has no explicit ip_iface, then changing iface changes ip_iface too. */ ip_ifname_changed = !priv->ip_iface; g_object_notify (G_OBJECT (self), NM_DEVICE_IFACE); if (ip_ifname_changed) g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE); /* Re-match available connections against the new interface name */ nm_device_recheck_available_connections (self); /* Let any connections that use the new interface name have a chance * to auto-activate on the device. */ nm_device_emit_recheck_auto_activate (self); } /* Update slave status for external changes */ if (priv->enslaved && info->master != nm_device_get_ifindex (priv->master)) nm_device_release_one_slave (priv->master, self, FALSE, NM_DEVICE_STATE_REASON_NONE); if (info->master && !priv->enslaved) { device_set_master (self, info->master); if (priv->master) nm_device_enslave_slave (priv->master, self, NULL); } if (priv->rdisc && nm_platform_link_get_ipv6_token (priv->ifindex, &token_iid)) { _LOGD (LOGD_DEVICE, "IPv6 tokenized identifier present on device %s", priv->iface); if (nm_rdisc_set_iid (priv->rdisc, token_iid)) nm_rdisc_start (priv->rdisc); } if (klass->link_changed) klass->link_changed (self, info); /* Update DHCP, etc, if needed */ if (ip_ifname_changed) update_for_ip_ifname_change (self); if (priv->up != info->up) { priv->up = info->up; /* Manage externally-created software interfaces only when they are IFF_UP */ g_assert (priv->ifindex > 0); if (is_software_external (self)) { gboolean external_down = nm_device_get_unmanaged_flag (self, NM_UNMANAGED_EXTERNAL_DOWN); if (external_down && info->up) { if (nm_device_get_state (self) < NM_DEVICE_STATE_DISCONNECTED) { /* Ensure the assume check is queued before any queued state changes * from the transition to UNAVAILABLE. */ nm_device_queue_recheck_assume (self); /* Resetting the EXTERNAL_DOWN flag may change the device's state * to UNAVAILABLE. To ensure that the state change doesn't touch * the device before assumption occurs, pass * NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED as the reason. */ nm_device_set_unmanaged (self, NM_UNMANAGED_EXTERNAL_DOWN, FALSE, NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED); } else { /* Don't trigger a state change; if the device is in a * state higher than UNAVAILABLE, it is already IFF_UP * or an explicit activation request was received. */ priv->unmanaged_flags &= ~NM_UNMANAGED_EXTERNAL_DOWN; } } else if (!external_down && !info->up && nm_device_get_state (self) <= NM_DEVICE_STATE_DISCONNECTED) { /* If the device is already disconnected and is set !IFF_UP, * unmanage it. */ nm_device_set_unmanaged (self, NM_UNMANAGED_EXTERNAL_DOWN, TRUE, NM_DEVICE_STATE_REASON_USER_REQUESTED); } } } } static void device_ip_link_changed (NMDevice *self, NMPlatformLink *info) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (info->name[0] && g_strcmp0 (priv->ip_iface, info->name)) { _LOGI (LOGD_DEVICE, "interface index %d renamed ip_iface (%d) from '%s' to '%s'", priv->ifindex, nm_device_get_ip_ifindex (self), priv->ip_iface, info->name); g_free (priv->ip_iface); priv->ip_iface = g_strdup (info->name); g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE); update_for_ip_ifname_change (self); } } static void link_changed_cb (NMPlatform *platform, int ifindex, NMPlatformLink *info, NMPlatformSignalChangeType change_type, NMPlatformReason reason, NMDevice *self) { if (change_type != NM_PLATFORM_SIGNAL_CHANGED) return; /* We don't filter by 'reason' because we are interested in *all* link * changes. For example a call to nm_platform_link_set_up() may result * in an internal carrier change (i.e. we ask the kernel to set IFF_UP * and it results in also setting IFF_LOWER_UP. */ if (ifindex == nm_device_get_ifindex (self)) device_link_changed (self, info); else if (ifindex == nm_device_get_ip_ifindex (self)) device_ip_link_changed (self, info); } static void link_changed (NMDevice *self, NMPlatformLink *info) { /* Update carrier from link event if applicable. */ if ( device_has_capability (self, NM_DEVICE_CAP_CARRIER_DETECT) && !device_has_capability (self, NM_DEVICE_CAP_NONSTANDARD_CARRIER)) nm_device_set_carrier (self, info->connected); } /** * nm_device_notify_component_added(): * @self: the #NMDevice * @component: the component being added by a plugin * * Called by the manager to notify the device that a new component has * been found. The device implementation should return %TRUE if it * wishes to claim the component, or %FALSE if it cannot. * * Returns: %TRUE to claim the component, %FALSE if the component cannot be * claimed. */ gboolean nm_device_notify_component_added (NMDevice *self, GObject *component) { if (NM_DEVICE_GET_CLASS (self)->component_added) return NM_DEVICE_GET_CLASS (self)->component_added (self, component); return FALSE; } /** * nm_device_owns_iface(): * @self: the #NMDevice * @iface: an interface name * * Called by the manager to ask if the device or any of its components owns * @iface. For example, a WWAN implementation would return %TRUE for an * ethernet interface name that was owned by the WWAN device's modem component, * because that ethernet interface is controlled by the WWAN device and cannot * be used independently of the WWAN device. * * Returns: %TRUE if @self or it's components owns the interface name, * %FALSE if not */ gboolean nm_device_owns_iface (NMDevice *self, const char *iface) { if (NM_DEVICE_GET_CLASS (self)->owns_iface) return NM_DEVICE_GET_CLASS (self)->owns_iface (self, iface); return FALSE; } NMConnection * nm_device_new_default_connection (NMDevice *self) { if (NM_DEVICE_GET_CLASS (self)->new_default_connection) return NM_DEVICE_GET_CLASS (self)->new_default_connection (self); return NULL; } static void slave_state_changed (NMDevice *slave, NMDeviceState slave_new_state, NMDeviceState slave_old_state, NMDeviceStateReason reason, NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); gboolean release = FALSE; _LOGD (LOGD_DEVICE, "slave %s state change %d (%s) -> %d (%s)", nm_device_get_iface (slave), slave_old_state, state_to_string (slave_old_state), slave_new_state, state_to_string (slave_new_state)); /* Don't try to enslave slaves until the master is ready */ if (priv->state < NM_DEVICE_STATE_CONFIG) return; if (slave_new_state == NM_DEVICE_STATE_IP_CONFIG) nm_device_enslave_slave (self, slave, nm_device_get_connection (slave)); else if (slave_new_state > NM_DEVICE_STATE_ACTIVATED) release = TRUE; else if ( slave_new_state <= NM_DEVICE_STATE_DISCONNECTED && slave_old_state > NM_DEVICE_STATE_DISCONNECTED) { /* Catch failures due to unavailable or unmanaged */ release = TRUE; } if (release) { nm_device_release_one_slave (self, slave, TRUE, reason); /* Bridge/bond/team interfaces are left up until manually deactivated */ if (priv->slaves == NULL && priv->state == NM_DEVICE_STATE_ACTIVATED) _LOGD (LOGD_DEVICE, "last slave removed; remaining activated"); } } /** * nm_device_master_add_slave: * @self: the master device * @slave: the slave device to enslave * @configure: pass %TRUE if the slave should be configured by the master, or * %FALSE if it is already configured outside NetworkManager * * If @self is capable of enslaving other devices (ie it's a bridge, bond, team, * etc) then this function adds @slave to the slave list for later enslavement. * * Returns: %TRUE on success, %FALSE on failure */ static gboolean nm_device_master_add_slave (NMDevice *self, NMDevice *slave, gboolean configure) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); SlaveInfo *info; g_return_val_if_fail (self != NULL, FALSE); g_return_val_if_fail (slave != NULL, FALSE); g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->enslave_slave != NULL, FALSE); if (configure) g_return_val_if_fail (nm_device_get_state (slave) >= NM_DEVICE_STATE_DISCONNECTED, FALSE); if (!find_slave_info (self, slave)) { info = g_malloc0 (sizeof (SlaveInfo)); info->slave = g_object_ref (slave); info->configure = configure; info->watch_id = g_signal_connect (slave, "state-changed", G_CALLBACK (slave_state_changed), self); priv->slaves = g_slist_append (priv->slaves, info); } nm_device_queue_recheck_assume (self); return TRUE; } /** * nm_device_master_get_slaves: * @self: the master device * * Returns: any slaves of which @self is the master. Caller owns returned list. */ GSList * nm_device_master_get_slaves (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); GSList *slaves = NULL, *iter; for (iter = priv->slaves; iter; iter = g_slist_next (iter)) slaves = g_slist_prepend (slaves, ((SlaveInfo *) iter->data)->slave); return slaves; } /** * nm_device_master_get_slave_by_ifindex: * @self: the master device * @ifindex: the slave's interface index * * Returns: the slave with the given @ifindex of which @self is the master, * or %NULL if no device with @ifindex is a slave of @self. */ NMDevice * nm_device_master_get_slave_by_ifindex (NMDevice *self, int ifindex) { GSList *iter; for (iter = NM_DEVICE_GET_PRIVATE (self)->slaves; iter; iter = g_slist_next (iter)) { SlaveInfo *info = iter->data; if (nm_device_get_ip_ifindex (info->slave) == ifindex) return info->slave; } return NULL; } /** * nm_device_master_check_slave_physical_port: * @self: the master device * @slave: a slave device * @log_domain: domain to log a warning in * * Checks if @self already has a slave with the same #NMDevice:physical-port-id * as @slave, and logs a warning if so. */ void nm_device_master_check_slave_physical_port (NMDevice *self, NMDevice *slave, guint64 log_domain) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); const char *slave_physical_port_id, *existing_physical_port_id; SlaveInfo *info; GSList *iter; slave_physical_port_id = nm_device_get_physical_port_id (slave); if (!slave_physical_port_id) return; for (iter = priv->slaves; iter; iter = iter->next) { info = iter->data; if (info->slave == slave) continue; existing_physical_port_id = nm_device_get_physical_port_id (info->slave); if (!g_strcmp0 (slave_physical_port_id, existing_physical_port_id)) { _LOGW (log_domain, "slave %s shares a physical port with existing slave %s", nm_device_get_ip_iface (slave), nm_device_get_ip_iface (info->slave)); /* Since this function will get called for every slave, we only have * to warn about the first match we find; if there are other matches * later in the list, we will have already warned about them matching * @existing earlier. */ return; } } } /* release all slaves */ static void nm_device_master_release_slaves (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMDeviceStateReason reason; /* Don't release the slaves if this connection doesn't belong to NM. */ if (nm_device_uses_generated_assumed_connection (self)) return; reason = priv->state_reason; if (priv->state == NM_DEVICE_STATE_FAILED) reason = NM_DEVICE_STATE_REASON_DEPENDENCY_FAILED; while (priv->slaves) { SlaveInfo *info = priv->slaves->data; nm_device_release_one_slave (self, info->slave, TRUE, reason); } } /** * nm_device_get_master: * @self: the device * * If @self has been enslaved by another device, this returns that * device. Otherwise it returns %NULL. (In particular, note that if * @self is in the process of activating as a slave, but has not yet * been enslaved by its master, this will return %NULL.) * * Returns: (transfer none): @self's master, or %NULL */ NMDevice * nm_device_get_master (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->enslaved) return priv->master; else return NULL; } /** * nm_device_slave_notify_enslave: * @self: the slave device * @success: whether the enslaving operation succeeded * * Notifies a slave that either it has been enslaved, or else its master tried * to enslave it and failed. */ static void nm_device_slave_notify_enslave (NMDevice *self, gboolean success) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection = nm_device_get_connection (self); gboolean activating = (priv->state == NM_DEVICE_STATE_IP_CONFIG); g_assert (priv->master); if (!priv->enslaved) { if (success) { if (activating) { _LOGI (LOGD_DEVICE, "Activation: connection '%s' enslaved, continuing activation", nm_connection_get_id (connection)); } else _LOGI (LOGD_DEVICE, "enslaved to %s", nm_device_get_iface (priv->master)); priv->enslaved = TRUE; g_object_notify (G_OBJECT (self), NM_DEVICE_MASTER); } else if (activating) { _LOGW (LOGD_DEVICE, "Activation: connection '%s' could not be enslaved", nm_connection_get_id (connection)); } } if (activating) { priv->ip4_state = IP_DONE; priv->ip6_state = IP_DONE; nm_device_queue_state (self, success ? NM_DEVICE_STATE_SECONDARIES : NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_NONE); } else nm_device_queue_recheck_assume (self); } /** * nm_device_slave_notify_release: * @self: the slave device * @reason: the reason associated with the state change * * Notifies a slave that it has been released, and why. */ static void nm_device_slave_notify_release (NMDevice *self, NMDeviceStateReason reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection = nm_device_get_connection (self); NMDeviceState new_state; const char *master_status; if ( reason != NM_DEVICE_STATE_REASON_NONE && priv->state > NM_DEVICE_STATE_DISCONNECTED && priv->state <= NM_DEVICE_STATE_ACTIVATED) { if (reason == NM_DEVICE_STATE_REASON_DEPENDENCY_FAILED) { new_state = NM_DEVICE_STATE_FAILED; master_status = "failed"; } else if (reason == NM_DEVICE_STATE_REASON_USER_REQUESTED) { new_state = NM_DEVICE_STATE_DEACTIVATING; master_status = "deactivated by user request"; } else { new_state = NM_DEVICE_STATE_DISCONNECTED; master_status = "deactivated"; } _LOGD (LOGD_DEVICE, "Activation: connection '%s' master %s", nm_connection_get_id (connection), master_status); nm_device_queue_state (self, new_state, reason); } else if (priv->master) _LOGI (LOGD_DEVICE, "released from master %s", nm_device_get_iface (priv->master)); else _LOGD (LOGD_DEVICE, "released from master%s", priv->enslaved ? "" : " (was not enslaved)"); if (priv->enslaved) { priv->enslaved = FALSE; g_object_notify (G_OBJECT (self), NM_DEVICE_MASTER); } } /** * nm_device_get_enslaved: * @self: the #NMDevice * * Returns: %TRUE if the device is enslaved to a master device (eg bridge or * bond or team), %FALSE if not */ gboolean nm_device_get_enslaved (NMDevice *self) { return NM_DEVICE_GET_PRIVATE (self)->enslaved; } /** * nm_device_removed: * @self: the #NMDevice * * Called by the manager when the device was removed. Releases the device from * the master in case it's enslaved. */ void nm_device_removed (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->enslaved) nm_device_release_one_slave (priv->master, self, FALSE, NM_DEVICE_STATE_REASON_REMOVED); } static gboolean is_available (NMDevice *self, NMDeviceCheckDevAvailableFlags flags) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->carrier || priv->ignore_carrier) return TRUE; if (NM_FLAGS_HAS (flags, NM_DEVICE_CHECK_DEV_AVAILABLE_IGNORE_CARRIER)) return TRUE; return FALSE; } /** * nm_device_is_available: * @self: the #NMDevice * @flags: additional flags to influence the check. Flags have the * meaning to increase the availability of a device. * * Checks if @self would currently be capable of activating a * connection. In particular, it checks that the device is ready (eg, * is not missing firmware), that it has carrier (if necessary), and * that any necessary external software (eg, ModemManager, * wpa_supplicant) is available. * * @self can only be in a state higher than * %NM_DEVICE_STATE_UNAVAILABLE when nm_device_is_available() returns * %TRUE. (But note that it can still be %NM_DEVICE_STATE_UNMANAGED * when it is available.) * * Returns: %TRUE or %FALSE */ gboolean nm_device_is_available (NMDevice *self, NMDeviceCheckDevAvailableFlags flags) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->firmware_missing) return FALSE; return NM_DEVICE_GET_CLASS (self)->is_available (self, flags); } gboolean nm_device_get_enabled (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); if (NM_DEVICE_GET_CLASS (self)->get_enabled) return NM_DEVICE_GET_CLASS (self)->get_enabled (self); return TRUE; } void nm_device_set_enabled (NMDevice *self, gboolean enabled) { g_return_if_fail (NM_IS_DEVICE (self)); if (NM_DEVICE_GET_CLASS (self)->set_enabled) NM_DEVICE_GET_CLASS (self)->set_enabled (self, enabled); } /** * nm_device_get_autoconnect: * @self: the #NMDevice * * Returns: %TRUE if the device allows autoconnect connections, or %FALSE if the * device is explicitly blocking all autoconnect connections. Does not take * into account transient conditions like companion devices that may wish to * block the device. */ gboolean nm_device_get_autoconnect (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); return NM_DEVICE_GET_PRIVATE (self)->autoconnect; } static void nm_device_set_autoconnect (NMDevice *self, gboolean autoconnect) { NMDevicePrivate *priv; g_return_if_fail (NM_IS_DEVICE (self)); priv = NM_DEVICE_GET_PRIVATE (self); if (priv->autoconnect == autoconnect) return; if (autoconnect) { /* Default-unmanaged devices never autoconnect */ if (!nm_device_get_default_unmanaged (self)) { priv->autoconnect = TRUE; g_object_notify (G_OBJECT (self), NM_DEVICE_AUTOCONNECT); } } else { priv->autoconnect = FALSE; g_object_notify (G_OBJECT (self), NM_DEVICE_AUTOCONNECT); } } static gboolean autoconnect_allowed_accumulator (GSignalInvocationHint *ihint, GValue *return_accu, const GValue *handler_return, gpointer data) { if (!g_value_get_boolean (handler_return)) g_value_set_boolean (return_accu, FALSE); return TRUE; } /** * nm_device_autoconnect_allowed: * @self: the #NMDevice * * Returns: %TRUE if the device can be auto-connected immediately, taking * transient conditions into account (like companion devices that may wish to * block autoconnect for a time). */ gboolean nm_device_autoconnect_allowed (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); GValue instance = G_VALUE_INIT; GValue retval = G_VALUE_INIT; if (priv->state < NM_DEVICE_STATE_DISCONNECTED || !priv->autoconnect) return FALSE; /* The 'autoconnect-allowed' signal is emitted on a device to allow * other listeners to block autoconnect on the device if they wish. * This is mainly used by the OLPC Mesh devices to block autoconnect * on their companion WiFi device as they share radio resources and * cannot be connected at the same time. */ g_value_init (&instance, G_TYPE_OBJECT); g_value_set_object (&instance, self); g_value_init (&retval, G_TYPE_BOOLEAN); if (priv->autoconnect) g_value_set_boolean (&retval, TRUE); else g_value_set_boolean (&retval, FALSE); /* Use g_signal_emitv() rather than g_signal_emit() to avoid the return * value being changed if no handlers are connected */ g_signal_emitv (&instance, signals[AUTOCONNECT_ALLOWED], 0, &retval); g_value_unset (&instance); return g_value_get_boolean (&retval); } static gboolean can_auto_connect (NMDevice *self, NMConnection *connection, char **specific_object) { NMSettingConnection *s_con; s_con = nm_connection_get_setting_connection (connection); if (!nm_setting_connection_get_autoconnect (s_con)) return FALSE; return nm_device_check_connection_available (self, connection, NM_DEVICE_CHECK_CON_AVAILABLE_NONE, NULL); } /** * nm_device_can_auto_connect: * @self: an #NMDevice * @connection: a #NMConnection * @specific_object: (out) (transfer full): on output, the path of an * object associated with the returned connection, to be passed to * nm_manager_activate_connection(), or %NULL. * * Checks if @connection can be auto-activated on @self right now. * This requires, at a minimum, that the connection be compatible with * @self, and that it have the #NMSettingConnection:autoconnect property * set, and that the device allow auto connections. Some devices impose * additional requirements. (Eg, a Wi-Fi connection can only be activated * if its SSID was seen in the last scan.) * * Returns: %TRUE, if the @connection can be auto-activated. **/ gboolean nm_device_can_auto_connect (NMDevice *self, NMConnection *connection, char **specific_object) { g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); g_return_val_if_fail (NM_IS_CONNECTION (connection), FALSE); g_return_val_if_fail (specific_object && !*specific_object, FALSE); if (nm_device_autoconnect_allowed (self)) return NM_DEVICE_GET_CLASS (self)->can_auto_connect (self, connection, specific_object); return FALSE; } static gboolean device_has_config (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); /* Check for IP configuration. */ if (priv->ip4_config && nm_ip4_config_get_num_addresses (priv->ip4_config)) return TRUE; if (priv->ip6_config && nm_ip6_config_get_num_addresses (priv->ip6_config)) return TRUE; /* The existence of a software device is good enough. */ if (nm_device_is_software (self)) return TRUE; /* Slaves are also configured by definition */ if (nm_platform_link_get_master (priv->ifindex) > 0) return TRUE; return FALSE; } /** * nm_device_master_update_slave_connection: * @self: the master #NMDevice * @slave: the slave #NMDevice * @connection: the #NMConnection to update with the slave settings * @GError: (out): error description * * Reads the slave configuration for @slave and updates @connection with those * properties. This invokes a virtual function on the master device @self. * * Returns: %TRUE if the configuration was read and @connection updated, * %FALSE on failure. */ gboolean nm_device_master_update_slave_connection (NMDevice *self, NMDevice *slave, NMConnection *connection, GError **error) { NMDeviceClass *klass; gboolean success; g_return_val_if_fail (self, FALSE); g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); g_return_val_if_fail (slave, FALSE); g_return_val_if_fail (connection, FALSE); g_return_val_if_fail (!error || !*error, FALSE); g_return_val_if_fail (nm_connection_get_setting_connection (connection), FALSE); g_return_val_if_fail (nm_device_get_iface (self), FALSE); klass = NM_DEVICE_GET_CLASS (self); if (klass->master_update_slave_connection) { success = klass->master_update_slave_connection (self, slave, connection, error); g_return_val_if_fail (!error || (success && !*error) || *error, success); return success; } g_set_error (error, NM_DEVICE_ERROR, NM_DEVICE_ERROR_FAILED, "master device '%s' cannot update a slave connection for slave device '%s' (master type not supported?)", nm_device_get_iface (self), nm_device_get_iface (slave)); return FALSE; } NMConnection * nm_device_generate_connection (NMDevice *self, NMDevice *master) { NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); const char *ifname = nm_device_get_iface (self); NMConnection *connection; NMSetting *s_con; NMSetting *s_ip4; NMSetting *s_ip6; gs_free char *uuid = NULL; const char *ip4_method, *ip6_method; GError *error = NULL; /* If update_connection() is not implemented, just fail. */ if (!klass->update_connection) return NULL; /* Return NULL if device is unconfigured. */ if (!device_has_config (self)) { _LOGD (LOGD_DEVICE, "device has no existing configuration"); return NULL; } connection = nm_simple_connection_new (); s_con = nm_setting_connection_new (); uuid = nm_utils_uuid_generate (); g_object_set (s_con, NM_SETTING_CONNECTION_UUID, uuid, NM_SETTING_CONNECTION_ID, ifname, NM_SETTING_CONNECTION_AUTOCONNECT, FALSE, NM_SETTING_CONNECTION_INTERFACE_NAME, ifname, NM_SETTING_CONNECTION_TIMESTAMP, (guint64) time (NULL), NULL); if (klass->connection_type) g_object_set (s_con, NM_SETTING_CONNECTION_TYPE, klass->connection_type, NULL); nm_connection_add_setting (connection, s_con); /* If the device is a slave, update various slave settings */ if (master) { if (!nm_device_master_update_slave_connection (master, self, connection, &error)) { _LOGE (LOGD_DEVICE, "master device '%s' failed to update slave connection: %s", nm_device_get_iface (master), error ? error->message : "(unknown error)"); g_error_free (error); g_object_unref (connection); return NULL; } } else { /* Only regular and master devices get IP configuration; slaves do not */ s_ip4 = nm_ip4_config_create_setting (priv->ip4_config); nm_connection_add_setting (connection, s_ip4); s_ip6 = nm_ip6_config_create_setting (priv->ip6_config); nm_connection_add_setting (connection, s_ip6); } klass->update_connection (self, connection); /* Check the connection in case of update_connection() bug. */ if (!nm_connection_verify (connection, &error)) { _LOGE (LOGD_DEVICE, "Generated connection does not verify: %s", error->message); g_clear_error (&error); g_object_unref (connection); return NULL; } /* Ignore the connection if it has no IP configuration, * no slave configuration, and is not a master interface. */ ip4_method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); ip6_method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG); if ( g_strcmp0 (ip4_method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED) == 0 && g_strcmp0 (ip6_method, NM_SETTING_IP6_CONFIG_METHOD_IGNORE) == 0 && !nm_setting_connection_get_master (NM_SETTING_CONNECTION (s_con)) && !priv->slaves) { _LOGD (LOGD_DEVICE, "ignoring generated connection (no IP and not in master-slave relationship)"); g_object_unref (connection); connection = NULL; } return connection; } gboolean nm_device_complete_connection (NMDevice *self, NMConnection *connection, const char *specific_object, const GSList *existing_connections, GError **error) { gboolean success = FALSE; g_return_val_if_fail (self != NULL, FALSE); g_return_val_if_fail (connection != NULL, FALSE); if (!NM_DEVICE_GET_CLASS (self)->complete_connection) { g_set_error (error, NM_DEVICE_ERROR, NM_DEVICE_ERROR_INVALID_CONNECTION, "Device class %s had no complete_connection method", G_OBJECT_TYPE_NAME (self)); return FALSE; } success = NM_DEVICE_GET_CLASS (self)->complete_connection (self, connection, specific_object, existing_connections, error); if (success) success = nm_connection_verify (connection, error); return success; } static gboolean check_connection_compatible (NMDevice *self, NMConnection *connection) { NMSettingConnection *s_con; const char *config_iface, *device_iface; s_con = nm_connection_get_setting_connection (connection); g_assert (s_con); config_iface = nm_setting_connection_get_interface_name (s_con); device_iface = nm_device_get_iface (self); if (config_iface && strcmp (config_iface, device_iface) != 0) return FALSE; return TRUE; } /** * nm_device_check_connection_compatible: * @self: an #NMDevice * @connection: an #NMConnection * * Checks if @connection could potentially be activated on @self. * This means only that @self has the proper capabilities, and that * @connection is not locked to some other device. It does not * necessarily mean that @connection could be activated on @self * right now. (Eg, it might refer to a Wi-Fi network that is not * currently available.) * * Returns: #TRUE if @connection could potentially be activated on * @self. */ gboolean nm_device_check_connection_compatible (NMDevice *self, NMConnection *connection) { g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); g_return_val_if_fail (NM_IS_CONNECTION (connection), FALSE); return NM_DEVICE_GET_CLASS (self)->check_connection_compatible (self, connection); } /** * nm_device_can_assume_connections: * @self: #NMDevice instance * * This is a convenience function to determine whether connection assumption * is available for this device. * * Returns: %TRUE if the device is capable of assuming connections, %FALSE if not */ static gboolean nm_device_can_assume_connections (NMDevice *self) { return !!NM_DEVICE_GET_CLASS (self)->update_connection; } /** * nm_device_can_assume_active_connection: * @self: #NMDevice instance * * This is a convenience function to determine whether the device's active * connection can be assumed if NetworkManager restarts. This method returns * %TRUE if and only if the device can assume connections, and the device has * an active connection, and that active connection can be assumed. * * Returns: %TRUE if the device's active connection can be assumed, or %FALSE * if there is no active connection or the active connection cannot be * assumed. */ gboolean nm_device_can_assume_active_connection (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; const char *method; const char *assumable_ip6_methods[] = { NM_SETTING_IP6_CONFIG_METHOD_IGNORE, NM_SETTING_IP6_CONFIG_METHOD_AUTO, NM_SETTING_IP6_CONFIG_METHOD_DHCP, NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL, NM_SETTING_IP6_CONFIG_METHOD_MANUAL, NULL }; const char *assumable_ip4_methods[] = { NM_SETTING_IP4_CONFIG_METHOD_DISABLED, NM_SETTING_IP6_CONFIG_METHOD_AUTO, NM_SETTING_IP6_CONFIG_METHOD_MANUAL, NULL }; if (!nm_device_can_assume_connections (self)) return FALSE; connection = nm_device_get_connection (self); if (!connection) return FALSE; /* Can't assume connections that aren't yet configured * FIXME: what about bridges/bonds waiting for slaves? */ if (priv->state < NM_DEVICE_STATE_IP_CONFIG) return FALSE; if (priv->ip4_state != IP_DONE && priv->ip6_state != IP_DONE) return FALSE; method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG); if (!_nm_utils_string_in_list (method, assumable_ip6_methods)) return FALSE; method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); if (!_nm_utils_string_in_list (method, assumable_ip4_methods)) return FALSE; return TRUE; } static gboolean nm_device_emit_recheck_assume (gpointer self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); priv->recheck_assume_id = 0; if (!nm_device_get_act_request (self)) { _LOGD (LOGD_DEVICE, "emit RECHECK_ASSUME signal"); g_signal_emit (self, signals[RECHECK_ASSUME], 0); } return G_SOURCE_REMOVE; } void nm_device_queue_recheck_assume (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (nm_device_can_assume_connections (self) && !priv->recheck_assume_id) priv->recheck_assume_id = g_idle_add (nm_device_emit_recheck_assume, self); } void nm_device_emit_recheck_auto_activate (NMDevice *self) { g_signal_emit (self, signals[RECHECK_AUTO_ACTIVATE], 0); } static void dnsmasq_state_changed_cb (NMDnsMasqManager *manager, guint32 status, gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); switch (status) { case NM_DNSMASQ_STATUS_DEAD: nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_SHARED_START_FAILED); break; default: break; } } static void activation_source_clear (NMDevice *self, gboolean remove_source, int family) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); guint *act_source_id; gpointer *act_source_func; if (family == AF_INET6) { act_source_id = &priv->act_source6_id; act_source_func = &priv->act_source6_func; } else { act_source_id = &priv->act_source_id; act_source_func = &priv->act_source_func; } if (*act_source_id) { if (remove_source) g_source_remove (*act_source_id); *act_source_id = 0; *act_source_func = NULL; } } static void activation_source_schedule (NMDevice *self, GSourceFunc func, int family) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); guint *act_source_id; gpointer *act_source_func; if (family == AF_INET6) { act_source_id = &priv->act_source6_id; act_source_func = &priv->act_source6_func; } else { act_source_id = &priv->act_source_id; act_source_func = &priv->act_source_func; } if (*act_source_id) _LOGE (LOGD_DEVICE, "activation stage already scheduled"); /* Don't bother rescheduling the same function that's about to * run anyway. Fixes issues with crappy wireless drivers sending * streams of associate events before NM has had a chance to process * the first one. */ if (!*act_source_id || (*act_source_func != func)) { activation_source_clear (self, TRUE, family); *act_source_id = g_idle_add (func, self); *act_source_func = func; } } static gboolean get_ip_config_may_fail (NMDevice *self, int family) { NMConnection *connection; NMSettingIPConfig *s_ip = NULL; g_return_val_if_fail (self != NULL, TRUE); connection = nm_device_get_connection (self); g_assert (connection); /* Fail the connection if the failed IP method is required to complete */ switch (family) { case AF_INET: s_ip = nm_connection_get_setting_ip4_config (connection); break; case AF_INET6: s_ip = nm_connection_get_setting_ip6_config (connection); break; default: g_assert_not_reached (); } return nm_setting_ip_config_get_may_fail (s_ip); } static void master_ready_cb (NMActiveConnection *active, GParamSpec *pspec, NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMActiveConnection *master; g_assert (priv->state == NM_DEVICE_STATE_PREPARE); /* Notify a master device that it has a new slave */ g_assert (nm_active_connection_get_master_ready (active)); master = nm_active_connection_get_master (active); priv->master = g_object_ref (nm_active_connection_get_device (master)); nm_device_master_add_slave (priv->master, self, nm_active_connection_get_assumed (active) ? FALSE : TRUE); _LOGD (LOGD_DEVICE, "master connection ready; master device %s", nm_device_get_iface (priv->master)); if (priv->master_ready_id) { g_signal_handler_disconnect (active, priv->master_ready_id); priv->master_ready_id = 0; } nm_device_activate_schedule_stage2_device_config (self); } static NMActStageReturn act_stage1_prepare (NMDevice *self, NMDeviceStateReason *reason) { return NM_ACT_STAGE_RETURN_SUCCESS; } /* * nm_device_activate_stage1_device_prepare * * Prepare for device activation * */ static gboolean nm_device_activate_stage1_device_prepare (gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMActStageReturn ret = NM_ACT_STAGE_RETURN_SUCCESS; NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE; NMActiveConnection *active = NM_ACTIVE_CONNECTION (priv->act_request); /* Clear the activation source ID now that this stage has run */ activation_source_clear (self, FALSE, 0); priv->ip4_state = priv->ip6_state = IP_NONE; /* Notify the new ActiveConnection along with the state change */ g_object_notify (G_OBJECT (self), NM_DEVICE_ACTIVE_CONNECTION); _LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) started..."); nm_device_state_changed (self, NM_DEVICE_STATE_PREPARE, NM_DEVICE_STATE_REASON_NONE); /* Assumed connections were already set up outside NetworkManager */ if (!nm_active_connection_get_assumed (active)) { ret = NM_DEVICE_GET_CLASS (self)->act_stage1_prepare (self, &reason); if (ret == NM_ACT_STAGE_RETURN_POSTPONE) { goto out; } else if (ret == NM_ACT_STAGE_RETURN_FAILURE) { nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason); goto out; } g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS); } if (nm_active_connection_get_master (active)) { /* If the master connection is ready for slaves, attach ourselves */ if (nm_active_connection_get_master_ready (active)) master_ready_cb (active, NULL, self); else { _LOGD (LOGD_DEVICE, "waiting for master connection to become ready"); /* Attach a signal handler and wait for the master connection to begin activating */ g_assert (priv->master_ready_id == 0); priv->master_ready_id = g_signal_connect (active, "notify::" NM_ACTIVE_CONNECTION_INT_MASTER_READY, (GCallback) master_ready_cb, self); /* Postpone */ } } else nm_device_activate_schedule_stage2_device_config (self); out: _LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) complete."); return FALSE; } /* * nm_device_activate_schedule_stage1_device_prepare * * Prepare a device for activation * */ void nm_device_activate_schedule_stage1_device_prepare (NMDevice *self) { NMDevicePrivate *priv; g_return_if_fail (NM_IS_DEVICE (self)); priv = NM_DEVICE_GET_PRIVATE (self); g_return_if_fail (priv->act_request); activation_source_schedule (self, nm_device_activate_stage1_device_prepare, 0); _LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) scheduled..."); } static NMActStageReturn act_stage2_config (NMDevice *self, NMDeviceStateReason *reason) { /* Nothing to do */ return NM_ACT_STAGE_RETURN_SUCCESS; } /* * nm_device_activate_stage2_device_config * * Determine device parameters and set those on the device, ie * for wireless devices, set SSID, keys, etc. * */ static gboolean nm_device_activate_stage2_device_config (gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMActStageReturn ret; NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE; gboolean no_firmware = FALSE; NMActiveConnection *active = NM_ACTIVE_CONNECTION (priv->act_request); GSList *iter; /* Clear the activation source ID now that this stage has run */ activation_source_clear (self, FALSE, 0); _LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) starting..."); nm_device_state_changed (self, NM_DEVICE_STATE_CONFIG, NM_DEVICE_STATE_REASON_NONE); /* Assumed connections were already set up outside NetworkManager */ if (!nm_active_connection_get_assumed (active)) { if (!nm_device_bring_up (self, FALSE, &no_firmware)) { if (no_firmware) nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_FIRMWARE_MISSING); else nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_CONFIG_FAILED); goto out; } ret = NM_DEVICE_GET_CLASS (self)->act_stage2_config (self, &reason); if (ret == NM_ACT_STAGE_RETURN_POSTPONE) goto out; else if (ret == NM_ACT_STAGE_RETURN_FAILURE) { nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason); goto out; } g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS); } /* If we have slaves that aren't yet enslaved, do that now */ for (iter = priv->slaves; iter; iter = g_slist_next (iter)) { SlaveInfo *info = iter->data; NMDeviceState slave_state = nm_device_get_state (info->slave); if (slave_state == NM_DEVICE_STATE_IP_CONFIG) nm_device_enslave_slave (self, info->slave, nm_device_get_connection (info->slave)); else if ( nm_device_uses_generated_assumed_connection (self) && slave_state <= NM_DEVICE_STATE_DISCONNECTED) nm_device_queue_recheck_assume (info->slave); } _LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) successful."); nm_device_activate_schedule_stage3_ip_config_start (self); out: _LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) complete."); return FALSE; } /* * nm_device_activate_schedule_stage2_device_config * * Schedule setup of the hardware device * */ void nm_device_activate_schedule_stage2_device_config (NMDevice *self) { NMDevicePrivate *priv; g_return_if_fail (NM_IS_DEVICE (self)); priv = NM_DEVICE_GET_PRIVATE (self); g_return_if_fail (priv->act_request); activation_source_schedule (self, nm_device_activate_stage2_device_config, 0); _LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) scheduled..."); } /*********************************************/ /* avahi-autoipd stuff */ static void aipd_timeout_remove (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->aipd_timeout) { g_source_remove (priv->aipd_timeout); priv->aipd_timeout = 0; } } static void aipd_cleanup (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->aipd_watch) { g_source_remove (priv->aipd_watch); priv->aipd_watch = 0; } if (priv->aipd_pid > 0) { nm_utils_kill_child_sync (priv->aipd_pid, SIGKILL, LOGD_AUTOIP4, "avahi-autoipd", NULL, 0, 0); priv->aipd_pid = -1; } aipd_timeout_remove (self); } static NMIP4Config * aipd_get_ip4_config (NMDevice *self, guint32 lla) { NMIP4Config *config = NULL; NMPlatformIP4Address address; NMPlatformIP4Route route; config = nm_ip4_config_new (nm_device_get_ip_ifindex (self)); g_assert (config); memset (&address, 0, sizeof (address)); address.address = lla; address.plen = 16; address.source = NM_IP_CONFIG_SOURCE_IP4LL; nm_ip4_config_add_address (config, &address); /* Add a multicast route for link-local connections: destination= 224.0.0.0, netmask=240.0.0.0 */ memset (&route, 0, sizeof (route)); route.network = htonl (0xE0000000L); route.plen = 4; route.source = NM_IP_CONFIG_SOURCE_IP4LL; route.metric = nm_device_get_ip4_route_metric (self); nm_ip4_config_add_route (config, &route); return config; } #define IPV4LL_NETWORK (htonl (0xA9FE0000L)) #define IPV4LL_NETMASK (htonl (0xFFFF0000L)) void nm_device_handle_autoip4_event (NMDevice *self, const char *event, const char *address) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection = NULL; const char *method; NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE; g_return_if_fail (event != NULL); if (priv->act_request == NULL) return; connection = nm_act_request_get_connection (priv->act_request); g_assert (connection); /* Ignore if the connection isn't an AutoIP connection */ method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); if (g_strcmp0 (method, NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL) != 0) return; if (strcmp (event, "BIND") == 0) { guint32 lla; NMIP4Config *config; if (inet_pton (AF_INET, address, &lla) <= 0) { _LOGE (LOGD_AUTOIP4, "invalid address %s received from avahi-autoipd.", address); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_ERROR); return; } if ((lla & IPV4LL_NETMASK) != IPV4LL_NETWORK) { _LOGE (LOGD_AUTOIP4, "invalid address %s received from avahi-autoipd (not link-local).", address); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_ERROR); return; } config = aipd_get_ip4_config (self, lla); if (config == NULL) { _LOGE (LOGD_AUTOIP4, "failed to get autoip config"); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE); return; } if (priv->ip4_state == IP_CONF) { aipd_timeout_remove (self); nm_device_activate_schedule_ip4_config_result (self, config); } else if (priv->ip4_state == IP_DONE) { if (!ip4_config_merge_and_apply (self, config, TRUE, &reason)) { _LOGE (LOGD_AUTOIP4, "failed to update IP4 config for autoip change."); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason); } } else g_assert_not_reached (); g_object_unref (config); } else { _LOGW (LOGD_AUTOIP4, "autoip address %s no longer valid because '%s'.", address, event); /* The address is gone; terminate the connection or fail activation */ nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED); } } static void aipd_watch_cb (GPid pid, gint status, gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMDeviceState state; if (!priv->aipd_watch) return; priv->aipd_watch = 0; if (WIFEXITED (status)) _LOGD (LOGD_AUTOIP4, "avahi-autoipd exited with error code %d", WEXITSTATUS (status)); else if (WIFSTOPPED (status)) _LOGW (LOGD_AUTOIP4, "avahi-autoipd stopped unexpectedly with signal %d", WSTOPSIG (status)); else if (WIFSIGNALED (status)) _LOGW (LOGD_AUTOIP4, "avahi-autoipd died with signal %d", WTERMSIG (status)); else _LOGW (LOGD_AUTOIP4, "avahi-autoipd died from an unknown cause"); aipd_cleanup (self); state = nm_device_get_state (self); if (nm_device_is_activating (self) || (state == NM_DEVICE_STATE_ACTIVATED)) nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_FAILED); } static gboolean aipd_timeout_cb (gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->aipd_timeout) { _LOGI (LOGD_AUTOIP4, "avahi-autoipd timed out."); priv->aipd_timeout = 0; aipd_cleanup (self); if (priv->ip4_state == IP_CONF) nm_device_activate_schedule_ip4_config_timeout (self); } return FALSE; } /* default to installed helper, but can be modified for testing */ const char *nm_device_autoipd_helper_path = LIBEXECDIR "/nm-avahi-autoipd.action"; static NMActStageReturn aipd_start (NMDevice *self, NMDeviceStateReason *reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); const char *argv[6]; char *cmdline; const char *aipd_binary; int i = 0; GError *error = NULL; aipd_cleanup (self); /* Find avahi-autoipd */ aipd_binary = nm_utils_find_helper ("avahi-autoipd", NULL, NULL); if (!aipd_binary) { _LOGW (LOGD_DEVICE | LOGD_AUTOIP4, "Activation: Stage 3 of 5 (IP Configure Start) failed" " to start avahi-autoipd: not found"); *reason = NM_DEVICE_STATE_REASON_AUTOIP_START_FAILED; return NM_ACT_STAGE_RETURN_FAILURE; } argv[i++] = aipd_binary; argv[i++] = "--script"; argv[i++] = nm_device_autoipd_helper_path; if (nm_logging_enabled (LOGL_DEBUG, LOGD_AUTOIP4)) argv[i++] = "--debug"; argv[i++] = nm_device_get_ip_iface (self); argv[i++] = NULL; cmdline = g_strjoinv (" ", (char **) argv); _LOGD (LOGD_AUTOIP4, "running: %s", cmdline); g_free (cmdline); if (!g_spawn_async ("/", (char **) argv, NULL, G_SPAWN_DO_NOT_REAP_CHILD, nm_utils_setpgid, NULL, &(priv->aipd_pid), &error)) { _LOGW (LOGD_DEVICE | LOGD_AUTOIP4, "Activation: Stage 3 of 5 (IP Configure Start) failed" " to start avahi-autoipd: %s", error && error->message ? error->message : "(unknown)"); g_clear_error (&error); aipd_cleanup (self); return NM_ACT_STAGE_RETURN_FAILURE; } _LOGI (LOGD_DEVICE | LOGD_AUTOIP4, "Activation: Stage 3 of 5 (IP Configure Start) started" " avahi-autoipd..."); /* Monitor the child process so we know when it dies */ priv->aipd_watch = g_child_watch_add (priv->aipd_pid, aipd_watch_cb, self); /* Start a timeout to bound the address attempt */ priv->aipd_timeout = g_timeout_add_seconds (20, aipd_timeout_cb, self); return NM_ACT_STAGE_RETURN_POSTPONE; } /*********************************************/ static gboolean _device_get_default_route_from_platform (NMDevice *self, int addr_family, NMPlatformIPRoute *out_route) { gboolean success = FALSE; int ifindex = nm_device_get_ip_ifindex (self); GArray *routes; if (addr_family == AF_INET) routes = nm_platform_ip4_route_get_all (ifindex, NM_PLATFORM_GET_ROUTE_MODE_ONLY_DEFAULT); else routes = nm_platform_ip6_route_get_all (ifindex, NM_PLATFORM_GET_ROUTE_MODE_ONLY_DEFAULT); if (routes) { guint route_metric = G_MAXUINT32, m; const NMPlatformIPRoute *route = NULL, *r; guint i; /* if there are several default routes, find the one with the best metric */ for (i = 0; i < routes->len; i++) { if (addr_family == AF_INET) { r = (const NMPlatformIPRoute *) &g_array_index (routes, NMPlatformIP4Route, i); m = r->metric; } else { r = (const NMPlatformIPRoute *) &g_array_index (routes, NMPlatformIP6Route, i); m = nm_utils_ip6_route_metric_normalize (r->metric); } if (!route || m < route_metric) { route = r; route_metric = m; } } if (route) { if (addr_family == AF_INET) *((NMPlatformIP4Route *) out_route) = *((NMPlatformIP4Route *) route); else *((NMPlatformIP6Route *) out_route) = *((NMPlatformIP6Route *) route); success = TRUE; } g_array_free (routes, TRUE); } return success; } /*********************************************/ static void ensure_con_ipx_config (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); int ip_ifindex = nm_device_get_ip_ifindex (self); NMConnection *connection; g_assert (!!priv->con_ip4_config == !!priv->con_ip6_config); if (priv->con_ip4_config) return; connection = nm_device_get_connection (self); if (!connection) return; priv->con_ip4_config = nm_ip4_config_new (ip_ifindex); priv->con_ip6_config = nm_ip6_config_new (ip_ifindex); nm_ip4_config_merge_setting (priv->con_ip4_config, nm_connection_get_setting_ip4_config (connection), nm_device_get_ip4_route_metric (self)); nm_ip6_config_merge_setting (priv->con_ip6_config, nm_connection_get_setting_ip6_config (connection), nm_device_get_ip6_route_metric (self)); if (nm_device_uses_assumed_connection (self)) { /* For assumed connections ignore all addresses and routes. */ nm_ip4_config_reset_addresses (priv->con_ip4_config); nm_ip4_config_reset_routes (priv->con_ip4_config); nm_ip6_config_reset_addresses (priv->con_ip6_config); nm_ip6_config_reset_routes (priv->con_ip6_config); } } /*********************************************/ /* DHCPv4 stuff */ static void dhcp4_cleanup (NMDevice *self, gboolean stop, gboolean release) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->dhcp4_client) { /* Stop any ongoing DHCP transaction on this device */ if (priv->dhcp4_state_sigid) { g_signal_handler_disconnect (priv->dhcp4_client, priv->dhcp4_state_sigid); priv->dhcp4_state_sigid = 0; } nm_device_remove_pending_action (self, PENDING_ACTION_DHCP4, FALSE); if (stop) nm_dhcp_client_stop (priv->dhcp4_client, release); g_clear_object (&priv->dhcp4_client); } if (priv->dhcp4_config) { g_clear_object (&priv->dhcp4_config); g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP4_CONFIG); } } static gboolean ip4_config_merge_and_apply (NMDevice *self, NMIP4Config *config, gboolean commit, NMDeviceStateReason *out_reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; gboolean success; NMIP4Config *composite; gboolean has_direct_route; const guint32 default_route_metric = nm_device_get_ip4_route_metric (self); guint32 gateway; /* Merge all the configs into the composite config */ if (config) { g_clear_object (&priv->dev_ip4_config); priv->dev_ip4_config = g_object_ref (config); } composite = nm_ip4_config_new (nm_device_get_ip_ifindex (self)); ensure_con_ipx_config (self); if (priv->dev_ip4_config) nm_ip4_config_merge (composite, priv->dev_ip4_config); if (priv->vpn4_config) nm_ip4_config_merge (composite, priv->vpn4_config); if (priv->ext_ip4_config) nm_ip4_config_merge (composite, priv->ext_ip4_config); /* Merge WWAN config *last* to ensure modem-given settings overwrite * any external stuff set by pppd or other scripts. */ if (priv->wwan_ip4_config) nm_ip4_config_merge (composite, priv->wwan_ip4_config); /* Merge user overrides into the composite config. For assumed connection, * con_ip4_config is empty. */ if (priv->con_ip4_config) nm_ip4_config_merge (composite, priv->con_ip4_config); connection = nm_device_get_connection (self); /* Add the default route. * * We keep track of the default route of a device in a private field. * NMDevice needs to know the default route at this point, because the gateway * might require a direct route (see below). * * But also, we don't want to add the default route to priv->ip4_config, * because the default route from the setting might not be the same that * NMDefaultRouteManager eventually configures (because the it might * tweak the effective metric). */ /* unless we come to a different conclusion below, we have no default route and * the route is assumed. */ priv->default_route.v4_has = FALSE; priv->default_route.v4_is_assumed = TRUE; if (!commit) { /* during a non-commit event, we always pickup whatever is configured. */ goto END_ADD_DEFAULT_ROUTE; } if (nm_device_uses_assumed_connection (self)) goto END_ADD_DEFAULT_ROUTE; /* we are about to commit (for a non-assumed connection). Enforce whatever we have * configured. */ priv->default_route.v4_is_assumed = FALSE; if ( !connection || !nm_default_route_manager_ip4_connection_has_default_route (nm_default_route_manager_get (), connection)) goto END_ADD_DEFAULT_ROUTE; if (!nm_ip4_config_get_num_addresses (composite)) { /* without addresses we can have no default route. */ goto END_ADD_DEFAULT_ROUTE; } gateway = nm_ip4_config_get_gateway (composite); if ( !gateway && nm_device_get_device_type (self) != NM_DEVICE_TYPE_MODEM) goto END_ADD_DEFAULT_ROUTE; has_direct_route = ( gateway == 0 || nm_ip4_config_get_subnet_for_host (composite, gateway) || nm_ip4_config_get_direct_route_for_host (composite, gateway)); priv->default_route.v4_has = TRUE; memset (&priv->default_route.v4, 0, sizeof (priv->default_route.v4)); priv->default_route.v4.source = NM_IP_CONFIG_SOURCE_USER; priv->default_route.v4.gateway = gateway; priv->default_route.v4.metric = default_route_metric; priv->default_route.v4.mss = nm_ip4_config_get_mss (composite); if (!has_direct_route) { NMPlatformIP4Route r = priv->default_route.v4; /* add a direct route to the gateway */ r.network = gateway; r.plen = 32; r.gateway = 0; nm_ip4_config_add_route (composite, &r); } END_ADD_DEFAULT_ROUTE: if (priv->default_route.v4_is_assumed) { /* If above does not explicitly assign a default route, we always pick up the * default route based on what is currently configured. * That means that even managed connections with never-default, can * get a default route (if configured externally). */ priv->default_route.v4_has = _device_get_default_route_from_platform (self, AF_INET, (NMPlatformIPRoute *) &priv->default_route.v4); } /* Allow setting MTU etc */ if (commit) { if (NM_DEVICE_GET_CLASS (self)->ip4_config_pre_commit) NM_DEVICE_GET_CLASS (self)->ip4_config_pre_commit (self, composite); } success = nm_device_set_ip4_config (self, composite, default_route_metric, commit, out_reason); g_object_unref (composite); return success; } static void dhcp4_lease_change (NMDevice *self, NMIP4Config *config) { NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE; g_return_if_fail (config != NULL); if (!ip4_config_merge_and_apply (self, config, TRUE, &reason)) { _LOGW (LOGD_DHCP4, "failed to update IPv4 config for DHCP change."); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason); } else { /* Notify dispatcher scripts of new DHCP4 config */ nm_dispatcher_call (DISPATCHER_ACTION_DHCP4_CHANGE, nm_device_get_connection (self), self, NULL, NULL, NULL); } } static void dhcp4_fail (NMDevice *self, gboolean timeout) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); dhcp4_cleanup (self, TRUE, FALSE); if (timeout || (priv->ip4_state == IP_CONF)) nm_device_activate_schedule_ip4_config_timeout (self); else if (priv->ip4_state == IP_DONE) nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED); else g_warn_if_reached (); } static void dhcp4_update_config (NMDevice *self, NMDhcp4Config *config, GHashTable *options) { GHashTableIter iter; const char *key, *value; /* Update the DHCP4 config object with new DHCP options */ nm_dhcp4_config_reset (config); g_hash_table_iter_init (&iter, options); while (g_hash_table_iter_next (&iter, (gpointer) &key, (gpointer) &value)) nm_dhcp4_config_add_option (config, key, value); g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP4_CONFIG); } static void dhcp4_state_changed (NMDhcpClient *client, NMDhcpState state, NMIP4Config *ip4_config, GHashTable *options, gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); g_return_if_fail (nm_dhcp_client_get_ipv6 (client) == FALSE); g_return_if_fail (!ip4_config || NM_IS_IP4_CONFIG (ip4_config)); _LOGD (LOGD_DHCP4, "new DHCPv4 client state %d", state); switch (state) { case NM_DHCP_STATE_BOUND: if (!ip4_config) { _LOGW (LOGD_DHCP4, "failed to get IPv4 config in response to DHCP event."); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE); break; } dhcp4_update_config (self, priv->dhcp4_config, options); if (priv->ip4_state == IP_CONF) nm_device_activate_schedule_ip4_config_result (self, ip4_config); else if (priv->ip4_state == IP_DONE) dhcp4_lease_change (self, ip4_config); break; case NM_DHCP_STATE_TIMEOUT: dhcp4_fail (self, TRUE); break; case NM_DHCP_STATE_EXPIRE: /* Ignore expiry before we even have a lease (NAK, old lease, etc) */ if (priv->ip4_state == IP_CONF) break; /* Fall through */ case NM_DHCP_STATE_DONE: case NM_DHCP_STATE_FAIL: dhcp4_fail (self, FALSE); break; default: break; } } static NMActStageReturn dhcp4_start (NMDevice *self, NMConnection *connection, NMDeviceStateReason *reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMSettingIPConfig *s_ip4; const guint8 *hw_addr; size_t hw_addr_len = 0; GByteArray *tmp = NULL; s_ip4 = nm_connection_get_setting_ip4_config (connection); /* Clear old exported DHCP options */ if (priv->dhcp4_config) g_object_unref (priv->dhcp4_config); priv->dhcp4_config = nm_dhcp4_config_new (); hw_addr = nm_platform_link_get_address (nm_device_get_ip_ifindex (self), &hw_addr_len); if (hw_addr_len) { tmp = g_byte_array_sized_new (hw_addr_len); g_byte_array_append (tmp, hw_addr, hw_addr_len); } /* Begin DHCP on the interface */ g_warn_if_fail (priv->dhcp4_client == NULL); priv->dhcp4_client = nm_dhcp_manager_start_ip4 (nm_dhcp_manager_get (), nm_device_get_ip_iface (self), nm_device_get_ip_ifindex (self), tmp, nm_connection_get_uuid (connection), nm_device_get_ip4_route_metric (self), nm_setting_ip_config_get_dhcp_send_hostname (s_ip4), nm_setting_ip_config_get_dhcp_hostname (s_ip4), nm_setting_ip4_config_get_dhcp_client_id (NM_SETTING_IP4_CONFIG (s_ip4)), priv->dhcp_timeout, priv->dhcp_anycast_address, NULL); if (tmp) g_byte_array_free (tmp, TRUE); if (!priv->dhcp4_client) { *reason = NM_DEVICE_STATE_REASON_DHCP_START_FAILED; return NM_ACT_STAGE_RETURN_FAILURE; } priv->dhcp4_state_sigid = g_signal_connect (priv->dhcp4_client, NM_DHCP_CLIENT_SIGNAL_STATE_CHANGED, G_CALLBACK (dhcp4_state_changed), self); nm_device_add_pending_action (self, PENDING_ACTION_DHCP4, TRUE); /* DHCP devices will be notified by the DHCP manager when stuff happens */ return NM_ACT_STAGE_RETURN_POSTPONE; } gboolean nm_device_dhcp4_renew (NMDevice *self, gboolean release) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMActStageReturn ret; NMDeviceStateReason reason; NMConnection *connection; g_return_val_if_fail (priv->dhcp4_client != NULL, FALSE); _LOGI (LOGD_DHCP4, "DHCPv4 lease renewal requested"); /* Terminate old DHCP instance and release the old lease */ dhcp4_cleanup (self, TRUE, release); connection = nm_device_get_connection (self); g_assert (connection); /* Start DHCP again on the interface */ ret = dhcp4_start (self, connection, &reason); return (ret != NM_ACT_STAGE_RETURN_FAILURE); } /*********************************************/ static GHashTable *shared_ips = NULL; static void release_shared_ip (gpointer data) { g_hash_table_remove (shared_ips, data); } static gboolean reserve_shared_ip (NMDevice *self, NMSettingIPConfig *s_ip4, NMPlatformIP4Address *address) { if (G_UNLIKELY (shared_ips == NULL)) shared_ips = g_hash_table_new (g_direct_hash, g_direct_equal); memset (address, 0, sizeof (*address)); if (s_ip4 && nm_setting_ip_config_get_num_addresses (s_ip4)) { /* Use the first user-supplied address */ NMIPAddress *user = nm_setting_ip_config_get_address (s_ip4, 0); g_assert (user); nm_ip_address_get_address_binary (user, &address->address); address->plen = nm_ip_address_get_prefix (user); } else { /* Find an unused address in the 10.42.x.x range */ guint32 start = (guint32) ntohl (0x0a2a0001); /* 10.42.0.1 */ guint32 count = 0; while (g_hash_table_lookup (shared_ips, GUINT_TO_POINTER (start + count))) { count += ntohl (0x100); if (count > ntohl (0xFE00)) { _LOGE (LOGD_SHARING, "ran out of shared IP addresses!"); return FALSE; } } address->address = start + count; address->plen = 24; g_hash_table_insert (shared_ips, GUINT_TO_POINTER (address->address), GUINT_TO_POINTER (TRUE)); } return TRUE; } static NMIP4Config * shared4_new_config (NMDevice *self, NMConnection *connection, NMDeviceStateReason *reason) { NMIP4Config *config = NULL; NMPlatformIP4Address address; g_return_val_if_fail (self != NULL, NULL); if (!reserve_shared_ip (self, nm_connection_get_setting_ip4_config (connection), &address)) { *reason = NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE; return NULL; } config = nm_ip4_config_new (nm_device_get_ip_ifindex (self)); address.source = NM_IP_CONFIG_SOURCE_SHARED; nm_ip4_config_add_address (config, &address); /* Remove the address lock when the object gets disposed */ g_object_set_data_full (G_OBJECT (config), "shared-ip", GUINT_TO_POINTER (address.address), release_shared_ip); return config; } /*********************************************/ static gboolean connection_ip4_method_requires_carrier (NMConnection *connection, gboolean *out_ip4_enabled) { const char *method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); static const char *ip4_carrier_methods[] = { NM_SETTING_IP4_CONFIG_METHOD_AUTO, NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL, NULL }; if (out_ip4_enabled) *out_ip4_enabled = !!strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED); return _nm_utils_string_in_list (method, ip4_carrier_methods); } static gboolean connection_ip6_method_requires_carrier (NMConnection *connection, gboolean *out_ip6_enabled) { const char *method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG); static const char *ip6_carrier_methods[] = { NM_SETTING_IP6_CONFIG_METHOD_AUTO, NM_SETTING_IP6_CONFIG_METHOD_DHCP, NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL, NULL }; if (out_ip6_enabled) *out_ip6_enabled = !!strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_IGNORE); return _nm_utils_string_in_list (method, ip6_carrier_methods); } static gboolean connection_requires_carrier (NMConnection *connection) { NMSettingIPConfig *s_ip4, *s_ip6; gboolean ip4_carrier_wanted, ip6_carrier_wanted; gboolean ip4_used = FALSE, ip6_used = FALSE; ip4_carrier_wanted = connection_ip4_method_requires_carrier (connection, &ip4_used); if (ip4_carrier_wanted) { /* If IPv4 wants a carrier and cannot fail, the whole connection * requires a carrier regardless of the IPv6 method. */ s_ip4 = nm_connection_get_setting_ip4_config (connection); if (s_ip4 && !nm_setting_ip_config_get_may_fail (s_ip4)) return TRUE; } ip6_carrier_wanted = connection_ip6_method_requires_carrier (connection, &ip6_used); if (ip6_carrier_wanted) { /* If IPv6 wants a carrier and cannot fail, the whole connection * requires a carrier regardless of the IPv4 method. */ s_ip6 = nm_connection_get_setting_ip6_config (connection); if (s_ip6 && !nm_setting_ip_config_get_may_fail (s_ip6)) return TRUE; } /* If an IP version wants a carrier and and the other IP version isn't * used, the connection requires carrier since it will just fail without one. */ if (ip4_carrier_wanted && !ip6_used) return TRUE; if (ip6_carrier_wanted && !ip4_used) return TRUE; /* If both want a carrier, the whole connection wants a carrier */ return ip4_carrier_wanted && ip6_carrier_wanted; } static gboolean have_any_ready_slaves (NMDevice *self, const GSList *slaves) { const GSList *iter; /* Any enslaved slave is "ready" in the generic case as it's * at least >= NM_DEVCIE_STATE_IP_CONFIG and has had Layer 2 * properties set up. */ for (iter = slaves; iter; iter = g_slist_next (iter)) { if (nm_device_get_enslaved (iter->data)) return TRUE; } return FALSE; } static gboolean ip4_requires_slaves (NMConnection *connection) { const char *method; method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); return strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_AUTO) == 0; } static NMActStageReturn act_stage3_ip4_config_start (NMDevice *self, NMIP4Config **out_config, NMDeviceStateReason *reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; NMActStageReturn ret = NM_ACT_STAGE_RETURN_FAILURE; const char *method; GSList *slaves; gboolean ready_slaves; g_return_val_if_fail (reason != NULL, NM_ACT_STAGE_RETURN_FAILURE); connection = nm_device_get_connection (self); g_assert (connection); if ( connection_ip4_method_requires_carrier (connection, NULL) && priv->is_master && !priv->carrier) { _LOGI (LOGD_IP4 | LOGD_DEVICE, "IPv4 config waiting until carrier is on"); return NM_ACT_STAGE_RETURN_WAIT; } if (priv->is_master && ip4_requires_slaves (connection)) { /* If the master has no ready slaves, and depends on slaves for * a successful IPv4 attempt, then postpone IPv4 addressing. */ slaves = nm_device_master_get_slaves (self); ready_slaves = NM_DEVICE_GET_CLASS (self)->have_any_ready_slaves (self, slaves); g_slist_free (slaves); if (ready_slaves == FALSE) { _LOGI (LOGD_DEVICE | LOGD_IP4, "IPv4 config waiting until slaves are ready"); return NM_ACT_STAGE_RETURN_WAIT; } } method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); /* Start IPv4 addressing based on the method requested */ if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_AUTO) == 0) ret = dhcp4_start (self, connection, reason); else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL) == 0) ret = aipd_start (self, reason); else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_MANUAL) == 0) { /* Use only IPv4 config from the connection data */ *out_config = nm_ip4_config_new (nm_device_get_ip_ifindex (self)); g_assert (*out_config); ret = NM_ACT_STAGE_RETURN_SUCCESS; } else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_SHARED) == 0) { *out_config = shared4_new_config (self, connection, reason); if (*out_config) { priv->dnsmasq_manager = nm_dnsmasq_manager_new (nm_device_get_ip_iface (self)); ret = NM_ACT_STAGE_RETURN_SUCCESS; } else ret = NM_ACT_STAGE_RETURN_FAILURE; } else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED) == 0) { /* Nothing to do... */ ret = NM_ACT_STAGE_RETURN_STOP; } else _LOGW (LOGD_IP4, "unhandled IPv4 config method '%s'; will fail", method); return ret; } /*********************************************/ /* DHCPv6 stuff */ static void dhcp6_cleanup (NMDevice *self, gboolean stop, gboolean release) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); priv->dhcp6_mode = NM_RDISC_DHCP_LEVEL_NONE; g_clear_object (&priv->dhcp6_ip6_config); if (priv->dhcp6_client) { if (priv->dhcp6_state_sigid) { g_signal_handler_disconnect (priv->dhcp6_client, priv->dhcp6_state_sigid); priv->dhcp6_state_sigid = 0; } if (stop) nm_dhcp_client_stop (priv->dhcp6_client, release); g_clear_object (&priv->dhcp6_client); } nm_device_remove_pending_action (self, PENDING_ACTION_DHCP6, FALSE); if (priv->dhcp6_config) { g_clear_object (&priv->dhcp6_config); g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP6_CONFIG); } } static gboolean ip6_config_merge_and_apply (NMDevice *self, gboolean commit, NMDeviceStateReason *out_reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; gboolean success; NMIP6Config *composite; gboolean has_direct_route; const struct in6_addr *gateway; /* If no config was passed in, create a new one */ composite = nm_ip6_config_new (nm_device_get_ip_ifindex (self)); ensure_con_ipx_config (self); g_assert (composite); /* Merge all the IP configs into the composite config */ if (priv->ac_ip6_config) nm_ip6_config_merge (composite, priv->ac_ip6_config); if (priv->dhcp6_ip6_config) nm_ip6_config_merge (composite, priv->dhcp6_ip6_config); if (priv->vpn6_config) nm_ip6_config_merge (composite, priv->vpn6_config); if (priv->ext_ip6_config) nm_ip6_config_merge (composite, priv->ext_ip6_config); /* Merge WWAN config *last* to ensure modem-given settings overwrite * any external stuff set by pppd or other scripts. */ if (priv->wwan_ip6_config) nm_ip6_config_merge (composite, priv->wwan_ip6_config); /* Merge user overrides into the composite config. For assumed connections, * con_ip6_config is empty. */ if (priv->con_ip6_config) nm_ip6_config_merge (composite, priv->con_ip6_config); connection = nm_device_get_connection (self); /* Add the default route. * * We keep track of the default route of a device in a private field. * NMDevice needs to know the default route at this point, because the gateway * might require a direct route (see below). * * But also, we don't want to add the default route to priv->ip6_config, * because the default route from the setting might not be the same that * NMDefaultRouteManager eventually configures (because the it might * tweak the effective metric). */ /* unless we come to a different conclusion below, we have no default route and * the route is assumed. */ priv->default_route.v6_has = FALSE; priv->default_route.v6_is_assumed = TRUE; if (!commit) { /* during a non-commit event, we always pickup whatever is configured. */ goto END_ADD_DEFAULT_ROUTE; } if (nm_device_uses_assumed_connection (self)) goto END_ADD_DEFAULT_ROUTE; /* we are about to commit (for a non-assumed connection). Enforce whatever we have * configured. */ priv->default_route.v6_is_assumed = FALSE; if ( !connection || !nm_default_route_manager_ip6_connection_has_default_route (nm_default_route_manager_get (), connection)) goto END_ADD_DEFAULT_ROUTE; if (!nm_ip6_config_get_num_addresses (composite)) { /* without addresses we can have no default route. */ goto END_ADD_DEFAULT_ROUTE; } gateway = nm_ip6_config_get_gateway (composite); if (!gateway) goto END_ADD_DEFAULT_ROUTE; has_direct_route = nm_ip6_config_get_direct_route_for_host (composite, gateway) != NULL; priv->default_route.v6_has = TRUE; memset (&priv->default_route.v6, 0, sizeof (priv->default_route.v6)); priv->default_route.v6.source = NM_IP_CONFIG_SOURCE_USER; priv->default_route.v6.gateway = *gateway; priv->default_route.v6.metric = nm_device_get_ip6_route_metric (self); priv->default_route.v6.mss = nm_ip6_config_get_mss (composite); if (!has_direct_route) { NMPlatformIP6Route r = priv->default_route.v6; /* add a direct route to the gateway */ r.network = *gateway; r.plen = 128; r.gateway = in6addr_any; nm_ip6_config_add_route (composite, &r); } END_ADD_DEFAULT_ROUTE: if (priv->default_route.v6_is_assumed) { /* If above does not explicitly assign a default route, we always pick up the * default route based on what is currently configured. * That means that even managed connections with never-default, can * get a default route (if configured externally). */ priv->default_route.v6_has = _device_get_default_route_from_platform (self, AF_INET6, (NMPlatformIPRoute *) &priv->default_route.v6); } nm_ip6_config_addresses_sort (composite, priv->rdisc ? priv->rdisc_use_tempaddr : NM_SETTING_IP6_CONFIG_PRIVACY_UNKNOWN); /* Allow setting MTU etc */ if (commit) { if (NM_DEVICE_GET_CLASS (self)->ip6_config_pre_commit) NM_DEVICE_GET_CLASS (self)->ip6_config_pre_commit (self, composite); } success = nm_device_set_ip6_config (self, composite, commit, out_reason); g_object_unref (composite); return success; } static void dhcp6_lease_change (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE; if (priv->dhcp6_ip6_config == NULL) { _LOGW (LOGD_DHCP6, "failed to get DHCPv6 config for rebind"); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED); return; } g_assert (priv->dhcp6_client); /* sanity check */ connection = nm_device_get_connection (self); g_assert (connection); /* Apply the updated config */ if (ip6_config_merge_and_apply (self, TRUE, &reason) == FALSE) { _LOGW (LOGD_DHCP6, "failed to update IPv6 config in response to DHCP event."); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason); } else { /* Notify dispatcher scripts of new DHCPv6 config */ nm_dispatcher_call (DISPATCHER_ACTION_DHCP6_CHANGE, connection, self, NULL, NULL, NULL); } } static void dhcp6_fail (NMDevice *self, gboolean timeout) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); dhcp6_cleanup (self, TRUE, FALSE); if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_MANAGED) { if (timeout || (priv->ip6_state == IP_CONF)) nm_device_activate_schedule_ip6_config_timeout (self); else if (priv->ip6_state == IP_DONE) nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED); else g_warn_if_reached (); } else { /* not a hard failure; just live with the RA info */ if (priv->ip6_state == IP_CONF) nm_device_activate_schedule_ip6_config_result (self); } } static void dhcp6_timeout (NMDevice *self, NMDhcpClient *client) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_MANAGED) dhcp6_fail (self, TRUE); else { /* not a hard failure; just live with the RA info */ dhcp6_cleanup (self, TRUE, FALSE); if (priv->ip6_state == IP_CONF) nm_device_activate_schedule_ip6_config_result (self); } } static void dhcp6_update_config (NMDevice *self, NMDhcp6Config *config, GHashTable *options) { GHashTableIter iter; const char *key, *value; /* Update the DHCP6 config object with new DHCP options */ nm_dhcp6_config_reset (config); g_hash_table_iter_init (&iter, options); while (g_hash_table_iter_next (&iter, (gpointer) &key, (gpointer) &value)) nm_dhcp6_config_add_option (config, key, value); g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP6_CONFIG); } static void dhcp6_state_changed (NMDhcpClient *client, NMDhcpState state, NMIP6Config *ip6_config, GHashTable *options, gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); g_return_if_fail (nm_dhcp_client_get_ipv6 (client) == TRUE); g_return_if_fail (!ip6_config || NM_IS_IP6_CONFIG (ip6_config)); _LOGD (LOGD_DHCP6, "new DHCPv6 client state %d", state); switch (state) { case NM_DHCP_STATE_BOUND: g_clear_object (&priv->dhcp6_ip6_config); if (ip6_config) { priv->dhcp6_ip6_config = g_object_ref (ip6_config); dhcp6_update_config (self, priv->dhcp6_config, options); } if (priv->ip6_state == IP_CONF) { if (priv->dhcp6_ip6_config == NULL) { /* FIXME: Initial DHCP failed; should we fail IPv6 entirely then? */ nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_DHCP_FAILED); break; } nm_device_activate_schedule_ip6_config_result (self); } else if (priv->ip6_state == IP_DONE) dhcp6_lease_change (self); break; case NM_DHCP_STATE_TIMEOUT: dhcp6_timeout (self, client); break; case NM_DHCP_STATE_EXPIRE: /* Ignore expiry before we even have a lease (NAK, old lease, etc) */ if (priv->ip6_state != IP_CONF) dhcp6_fail (self, FALSE); break; case NM_DHCP_STATE_DONE: /* In IPv6 info-only mode, the client doesn't handle leases so it * may exit right after getting a response from the server. That's * normal. In that case we just ignore the exit. */ if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_OTHERCONF) break; /* Otherwise, fall through */ case NM_DHCP_STATE_FAIL: dhcp6_fail (self, FALSE); break; default: break; } } static gboolean dhcp6_start_with_link_ready (NMDevice *self, NMConnection *connection) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMSettingIPConfig *s_ip6; GByteArray *tmp = NULL; const guint8 *hw_addr; size_t hw_addr_len = 0; g_assert (connection); s_ip6 = nm_connection_get_setting_ip6_config (connection); g_assert (s_ip6); hw_addr = nm_platform_link_get_address (nm_device_get_ip_ifindex (self), &hw_addr_len); if (hw_addr_len) { tmp = g_byte_array_sized_new (hw_addr_len); g_byte_array_append (tmp, hw_addr, hw_addr_len); } priv->dhcp6_client = nm_dhcp_manager_start_ip6 (nm_dhcp_manager_get (), nm_device_get_ip_iface (self), nm_device_get_ip_ifindex (self), tmp, nm_connection_get_uuid (connection), nm_device_get_ip6_route_metric (self), nm_setting_ip_config_get_dhcp_send_hostname (s_ip6), nm_setting_ip_config_get_dhcp_hostname (s_ip6), priv->dhcp_timeout, priv->dhcp_anycast_address, (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_OTHERCONF) ? TRUE : FALSE, nm_setting_ip6_config_get_ip6_privacy (NM_SETTING_IP6_CONFIG (s_ip6))); if (tmp) g_byte_array_free (tmp, TRUE); if (priv->dhcp6_client) { priv->dhcp6_state_sigid = g_signal_connect (priv->dhcp6_client, NM_DHCP_CLIENT_SIGNAL_STATE_CHANGED, G_CALLBACK (dhcp6_state_changed), self); } return !!priv->dhcp6_client; } static gboolean dhcp6_start (NMDevice *self, gboolean wait_for_ll, NMDeviceStateReason *reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; NMSettingIPConfig *s_ip6; g_clear_object (&priv->dhcp6_config); priv->dhcp6_config = nm_dhcp6_config_new (); g_warn_if_fail (priv->dhcp6_ip6_config == NULL); g_clear_object (&priv->dhcp6_ip6_config); connection = nm_device_get_connection (self); g_assert (connection); s_ip6 = nm_connection_get_setting_ip6_config (connection); if (!nm_setting_ip_config_get_may_fail (s_ip6) || !strcmp (nm_setting_ip_config_get_method (s_ip6), NM_SETTING_IP6_CONFIG_METHOD_DHCP)) nm_device_add_pending_action (self, PENDING_ACTION_DHCP6, TRUE); if (wait_for_ll) { NMActStageReturn ret; /* ensure link local is ready... */ ret = linklocal6_start (self); if (ret == NM_ACT_STAGE_RETURN_POSTPONE) { /* success; wait for the LL address to show up */ return TRUE; } /* success; already have the LL address; kick off DHCP */ g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS); } if (!dhcp6_start_with_link_ready (self, connection)) { *reason = NM_DEVICE_STATE_REASON_DHCP_START_FAILED; return FALSE; } return TRUE; } gboolean nm_device_dhcp6_renew (NMDevice *self, gboolean release) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); g_return_val_if_fail (priv->dhcp6_client != NULL, FALSE); _LOGI (LOGD_DHCP6, "DHCPv6 lease renewal requested"); /* Terminate old DHCP instance and release the old lease */ dhcp6_cleanup (self, TRUE, release); /* Start DHCP again on the interface */ return dhcp6_start (self, FALSE, NULL); } /******************************************/ static gboolean have_ip6_address (const NMIP6Config *ip6_config, gboolean linklocal) { guint i; if (!ip6_config) return FALSE; linklocal = !!linklocal; for (i = 0; i < nm_ip6_config_get_num_addresses (ip6_config); i++) { const NMPlatformIP6Address *addr = nm_ip6_config_get_address (ip6_config, i); if ((IN6_IS_ADDR_LINKLOCAL (&addr->address) == linklocal) && !(addr->flags & IFA_F_TENTATIVE)) return TRUE; } return FALSE; } static void linklocal6_cleanup (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->linklocal6_timeout_id) { g_source_remove (priv->linklocal6_timeout_id); priv->linklocal6_timeout_id = 0; } } static gboolean linklocal6_timeout_cb (gpointer user_data) { NMDevice *self = user_data; linklocal6_cleanup (self); _LOGD (LOGD_DEVICE, "linklocal6: waiting for link-local addresses failed due to timeout"); nm_device_activate_schedule_ip6_config_timeout (self); return G_SOURCE_REMOVE; } static void linklocal6_complete (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; const char *method; g_assert (priv->linklocal6_timeout_id); g_assert (have_ip6_address (priv->ip6_config, TRUE)); linklocal6_cleanup (self); connection = nm_device_get_connection (self); g_assert (connection); method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG); _LOGD (LOGD_DEVICE, "linklocal6: waiting for link-local addresses successful, continue with method %s", method); if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_AUTO) == 0) { if (!addrconf6_start_with_link_ready (self)) { /* Time out IPv6 instead of failing the entire activation */ nm_device_activate_schedule_ip6_config_timeout (self); } } else if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_DHCP) == 0) { if (!dhcp6_start_with_link_ready (self, connection)) { /* Time out IPv6 instead of failing the entire activation */ nm_device_activate_schedule_ip6_config_timeout (self); } } else if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL) == 0) nm_device_activate_schedule_ip6_config_result (self); else g_return_if_fail (FALSE); } static void check_and_add_ipv6ll_addr (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); int ip_ifindex = nm_device_get_ip_ifindex (self); NMUtilsIPv6IfaceId iid; struct in6_addr lladdr; guint i, n; if (priv->nm_ipv6ll == FALSE) return; if (priv->ip6_config) { n = nm_ip6_config_get_num_addresses (priv->ip6_config); for (i = 0; i < n; i++) { const NMPlatformIP6Address *addr; addr = nm_ip6_config_get_address (priv->ip6_config, i); if (IN6_IS_ADDR_LINKLOCAL (&addr->address)) { /* Already have an LL address, nothing to do */ return; } } } if (!nm_device_get_ip_iface_identifier (self, &iid)) { _LOGW (LOGD_IP6, "failed to get interface identifier; IPv6 may be broken"); return; } memset (&lladdr, 0, sizeof (lladdr)); lladdr.s6_addr16[0] = htons (0xfe80); nm_utils_ipv6_addr_set_interface_identfier (&lladdr, iid); _LOGD (LOGD_IP6, "adding IPv6LL address %s", nm_utils_inet6_ntop (&lladdr, NULL)); if (!nm_platform_ip6_address_add (ip_ifindex, lladdr, in6addr_any, 64, NM_PLATFORM_LIFETIME_PERMANENT, NM_PLATFORM_LIFETIME_PERMANENT, 0)) { _LOGW (LOGD_IP6, "failed to add IPv6 link-local address %s", nm_utils_inet6_ntop (&lladdr, NULL)); } } static NMActStageReturn linklocal6_start (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; const char *method; linklocal6_cleanup (self); if (have_ip6_address (priv->ip6_config, TRUE)) return NM_ACT_STAGE_RETURN_SUCCESS; connection = nm_device_get_connection (self); g_assert (connection); method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG); _LOGD (LOGD_DEVICE, "linklocal6: starting IPv6 with method '%s', but the device has no link-local addresses configured. Wait.", method); check_and_add_ipv6ll_addr (self); priv->linklocal6_timeout_id = g_timeout_add_seconds (5, linklocal6_timeout_cb, self); return NM_ACT_STAGE_RETURN_POSTPONE; } /******************************************/ static void print_support_extended_ifa_flags (NMSettingIP6ConfigPrivacy use_tempaddr) { static gint8 warn = 0; static gint8 s_libnl = -1, s_kernel; if (warn >= 2) return; if (s_libnl == -1) { s_libnl = !!nm_platform_check_support_libnl_extended_ifa_flags (); s_kernel = !!nm_platform_check_support_kernel_extended_ifa_flags (); if (s_libnl && s_kernel) { nm_log_dbg (LOGD_IP6, "kernel and libnl support extended IFA_FLAGS (needed by NM for IPv6 private addresses)"); warn = 2; return; } } if ( use_tempaddr != NM_SETTING_IP6_CONFIG_PRIVACY_PREFER_TEMP_ADDR && use_tempaddr != NM_SETTING_IP6_CONFIG_PRIVACY_PREFER_PUBLIC_ADDR) { if (warn == 0) { nm_log_dbg (LOGD_IP6, "%s%s%s %s not support extended IFA_FLAGS (needed by NM for IPv6 private addresses)", !s_kernel ? "kernel" : "", !s_kernel && !s_libnl ? " and " : "", !s_libnl ? "libnl" : "", !s_kernel && !s_libnl ? "do" : "does"); warn = 1; } return; } if (!s_libnl && !s_kernel) { nm_log_warn (LOGD_IP6, "libnl and the kernel do not support extended IFA_FLAGS needed by NM for " "IPv6 private addresses. This feature is not available"); } else if (!s_libnl) { nm_log_warn (LOGD_IP6, "libnl does not support extended IFA_FLAGS needed by NM for " "IPv6 private addresses. This feature is not available"); } else if (!s_kernel) { nm_log_warn (LOGD_IP6, "The kernel does not support extended IFA_FLAGS needed by NM for " "IPv6 private addresses. This feature is not available"); } warn = 2; } static void rdisc_config_changed (NMRDisc *rdisc, NMRDiscConfigMap changed, NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); warn = 2; } static void rdisc_config_changed (NMRDisc *rdisc, NMRDiscConfigMap changed, NMDevice *self) { address.preferred = discovered_address->preferred; if (address.preferred > address.lifetime) address.preferred = address.lifetime; address.source = NM_IP_CONFIG_SOURCE_RDISC; address.flags = ifa_flags; nm_ip6_config_add_address (priv->ac_ip6_config, &address); } } Commit Message: CWE ID: CWE-20
device_has_capability (NMDevice *self, NMDeviceCapabilities caps) { { static guint32 devcount = 0; NMDevicePrivate *priv; g_return_if_fail (NM_IS_DEVICE (self)); priv = NM_DEVICE_GET_PRIVATE (self); g_return_if_fail (priv->path == NULL); priv->path = g_strdup_printf ("/org/freedesktop/NetworkManager/Devices/%d", devcount++); _LOGI (LOGD_DEVICE, "exported as %s", priv->path); nm_dbus_manager_register_object (nm_dbus_manager_get (), priv->path, self); } const char * nm_device_get_path (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->path; } const char * nm_device_get_udi (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->udi; } const char * nm_device_get_iface (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), 0); return NM_DEVICE_GET_PRIVATE (self)->iface; } int nm_device_get_ifindex (NMDevice *self) { g_return_val_if_fail (self != NULL, 0); return NM_DEVICE_GET_PRIVATE (self)->ifindex; } gboolean nm_device_is_software (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); return priv->is_software; } const char * nm_device_get_ip_iface (NMDevice *self) { NMDevicePrivate *priv; g_return_val_if_fail (self != NULL, NULL); priv = NM_DEVICE_GET_PRIVATE (self); /* If it's not set, default to iface */ return priv->ip_iface ? priv->ip_iface : priv->iface; } int nm_device_get_ip_ifindex (NMDevice *self) { NMDevicePrivate *priv; g_return_val_if_fail (self != NULL, 0); priv = NM_DEVICE_GET_PRIVATE (self); /* If it's not set, default to iface */ return priv->ip_iface ? priv->ip_ifindex : priv->ifindex; } void nm_device_set_ip_iface (NMDevice *self, const char *iface) { NMDevicePrivate *priv; char *old_ip_iface; g_return_if_fail (NM_IS_DEVICE (self)); priv = NM_DEVICE_GET_PRIVATE (self); if (!g_strcmp0 (iface, priv->ip_iface)) return; old_ip_iface = priv->ip_iface; priv->ip_ifindex = 0; priv->ip_iface = g_strdup (iface); if (priv->ip_iface) { priv->ip_ifindex = nm_platform_link_get_ifindex (priv->ip_iface); if (priv->ip_ifindex > 0) { if (nm_platform_check_support_user_ipv6ll ()) nm_platform_link_set_user_ipv6ll_enabled (priv->ip_ifindex, TRUE); if (!nm_platform_link_is_up (priv->ip_ifindex)) nm_platform_link_set_up (priv->ip_ifindex); } else { /* Device IP interface must always be a kernel network interface */ _LOGW (LOGD_HW, "failed to look up interface index"); } } /* We don't care about any saved values from the old iface */ g_hash_table_remove_all (priv->ip6_saved_properties); /* Emit change notification */ if (g_strcmp0 (old_ip_iface, priv->ip_iface)) g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE); g_free (old_ip_iface); } static gboolean get_ip_iface_identifier (NMDevice *self, NMUtilsIPv6IfaceId *out_iid) { NMLinkType link_type; const guint8 *hwaddr = NULL; size_t hwaddr_len = 0; int ifindex; gboolean success; /* If we get here, we *must* have a kernel netdev, which implies an ifindex */ ifindex = nm_device_get_ip_ifindex (self); g_assert (ifindex); link_type = nm_platform_link_get_type (ifindex); g_return_val_if_fail (link_type > NM_LINK_TYPE_UNKNOWN, 0); hwaddr = nm_platform_link_get_address (ifindex, &hwaddr_len); if (!hwaddr_len) return FALSE; success = nm_utils_get_ipv6_interface_identifier (link_type, hwaddr, hwaddr_len, out_iid); if (!success) { _LOGW (LOGD_HW, "failed to generate interface identifier " "for link type %u hwaddr_len %zu", link_type, hwaddr_len); } return success; } static gboolean nm_device_get_ip_iface_identifier (NMDevice *self, NMUtilsIPv6IfaceId *iid) { return NM_DEVICE_GET_CLASS (self)->get_ip_iface_identifier (self, iid); } const char * nm_device_get_driver (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->driver; } const char * nm_device_get_driver_version (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->driver_version; } NMDeviceType nm_device_get_device_type (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), NM_DEVICE_TYPE_UNKNOWN); return NM_DEVICE_GET_PRIVATE (self)->type; } /** * nm_device_get_priority(): * @self: the #NMDevice * * Returns: the device's routing priority. Lower numbers means a "better" * device, eg higher priority. */ int nm_device_get_priority (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), 1000); /* Device 'priority' is used for the default route-metric and is based on * the device type. The settings ipv4.route-metric and ipv6.route-metric * can overwrite this default. * * Currently for both IPv4 and IPv6 we use the same default values. * * The route-metric is used for the metric of the routes of device. * This also applies to the default route. Therefore it affects also * which device is the "best". * * For comparison, note that iproute2 by default adds IPv4 routes with * metric 0, and IPv6 routes with metric 1024. The latter is the IPv6 * "user default" in the kernel (NM_PLATFORM_ROUTE_METRIC_DEFAULT_IP6). * In kernel, the full uint32_t range is available for route * metrics (except for IPv6, where 0 means 1024). */ switch (nm_device_get_device_type (self)) { /* 50 is reserved for VPN (NM_VPN_ROUTE_METRIC_DEFAULT) */ case NM_DEVICE_TYPE_ETHERNET: return 100; case NM_DEVICE_TYPE_INFINIBAND: return 150; case NM_DEVICE_TYPE_ADSL: return 200; case NM_DEVICE_TYPE_WIMAX: return 250; case NM_DEVICE_TYPE_BOND: return 300; case NM_DEVICE_TYPE_TEAM: return 350; case NM_DEVICE_TYPE_VLAN: return 400; case NM_DEVICE_TYPE_BRIDGE: return 425; case NM_DEVICE_TYPE_MODEM: return 450; case NM_DEVICE_TYPE_BT: return 550; case NM_DEVICE_TYPE_WIFI: return 600; case NM_DEVICE_TYPE_OLPC_MESH: return 650; case NM_DEVICE_TYPE_GENERIC: return 950; case NM_DEVICE_TYPE_UNKNOWN: return 10000; case NM_DEVICE_TYPE_UNUSED1: case NM_DEVICE_TYPE_UNUSED2: /* omit default: to get compiler warning about missing switch cases */ break; } return 11000; } guint32 nm_device_get_ip4_route_metric (NMDevice *self) { NMConnection *connection; gint64 route_metric = -1; g_return_val_if_fail (NM_IS_DEVICE (self), G_MAXUINT32); connection = nm_device_get_connection (self); if (connection) route_metric = nm_setting_ip_config_get_route_metric (nm_connection_get_setting_ip4_config (connection)); return route_metric >= 0 ? route_metric : nm_device_get_priority (self); } guint32 nm_device_get_ip6_route_metric (NMDevice *self) { NMConnection *connection; gint64 route_metric = -1; g_return_val_if_fail (NM_IS_DEVICE (self), G_MAXUINT32); connection = nm_device_get_connection (self); if (connection) route_metric = nm_setting_ip_config_get_route_metric (nm_connection_get_setting_ip6_config (connection)); return route_metric >= 0 ? route_metric : nm_device_get_priority (self); } const NMPlatformIP4Route * nm_device_get_ip4_default_route (NMDevice *self, gboolean *out_is_assumed) { NMDevicePrivate *priv; g_return_val_if_fail (NM_IS_DEVICE (self), NULL); priv = NM_DEVICE_GET_PRIVATE (self); if (out_is_assumed) *out_is_assumed = priv->default_route.v4_is_assumed; return priv->default_route.v4_has ? &priv->default_route.v4 : NULL; } const NMPlatformIP6Route * nm_device_get_ip6_default_route (NMDevice *self, gboolean *out_is_assumed) { NMDevicePrivate *priv; g_return_val_if_fail (NM_IS_DEVICE (self), NULL); priv = NM_DEVICE_GET_PRIVATE (self); if (out_is_assumed) *out_is_assumed = priv->default_route.v6_is_assumed; return priv->default_route.v6_has ? &priv->default_route.v6 : NULL; } const char * nm_device_get_type_desc (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->type_desc; } gboolean nm_device_has_carrier (NMDevice *self) { return NM_DEVICE_GET_PRIVATE (self)->carrier; } NMActRequest * nm_device_get_act_request (NMDevice *self) { g_return_val_if_fail (self != NULL, NULL); return NM_DEVICE_GET_PRIVATE (self)->act_request; } NMConnection * nm_device_get_connection (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); return priv->act_request ? nm_act_request_get_connection (priv->act_request) : NULL; } RfKillType nm_device_get_rfkill_type (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); return NM_DEVICE_GET_PRIVATE (self)->rfkill_type; } static const char * nm_device_get_physical_port_id (NMDevice *self) { return NM_DEVICE_GET_PRIVATE (self)->physical_port_id; } /***********************************************************/ static gboolean nm_device_uses_generated_assumed_connection (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; if ( priv->act_request && nm_active_connection_get_assumed (NM_ACTIVE_CONNECTION (priv->act_request))) { connection = nm_act_request_get_connection (priv->act_request); if ( connection && nm_settings_connection_get_nm_generated_assumed (NM_SETTINGS_CONNECTION (connection))) return TRUE; } return FALSE; } gboolean nm_device_uses_assumed_connection (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if ( priv->act_request && nm_active_connection_get_assumed (NM_ACTIVE_CONNECTION (priv->act_request))) return TRUE; return FALSE; } static SlaveInfo * find_slave_info (NMDevice *self, NMDevice *slave) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); SlaveInfo *info; GSList *iter; for (iter = priv->slaves; iter; iter = g_slist_next (iter)) { info = iter->data; if (info->slave == slave) return info; } return NULL; } static void free_slave_info (SlaveInfo *info) { g_signal_handler_disconnect (info->slave, info->watch_id); g_clear_object (&info->slave); memset (info, 0, sizeof (*info)); g_free (info); } /** * nm_device_enslave_slave: * @self: the master device * @slave: the slave device to enslave * @connection: (allow-none): the slave device's connection * * If @self is capable of enslaving other devices (ie it's a bridge, bond, team, * etc) then this function enslaves @slave. * * Returns: %TRUE on success, %FALSE on failure or if this device cannot enslave * other devices. */ static gboolean nm_device_enslave_slave (NMDevice *self, NMDevice *slave, NMConnection *connection) { SlaveInfo *info; gboolean success = FALSE; gboolean configure; g_return_val_if_fail (self != NULL, FALSE); g_return_val_if_fail (slave != NULL, FALSE); g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->enslave_slave != NULL, FALSE); info = find_slave_info (self, slave); if (!info) return FALSE; if (info->enslaved) success = TRUE; else { configure = (info->configure && connection != NULL); if (configure) g_return_val_if_fail (nm_device_get_state (slave) >= NM_DEVICE_STATE_DISCONNECTED, FALSE); success = NM_DEVICE_GET_CLASS (self)->enslave_slave (self, slave, connection, configure); info->enslaved = success; } nm_device_slave_notify_enslave (info->slave, success); /* Ensure the device's hardware address is up-to-date; it often changes * when slaves change. */ nm_device_update_hw_address (self); /* Restart IP configuration if we're waiting for slaves. Do this * after updating the hardware address as IP config may need the * new address. */ if (success) { if (NM_DEVICE_GET_PRIVATE (self)->ip4_state == IP_WAIT) nm_device_activate_stage3_ip4_start (self); if (NM_DEVICE_GET_PRIVATE (self)->ip6_state == IP_WAIT) nm_device_activate_stage3_ip6_start (self); } return success; } /** * nm_device_release_one_slave: * @self: the master device * @slave: the slave device to release * @configure: whether @self needs to actually release @slave * @reason: the state change reason for the @slave * * If @self is capable of enslaving other devices (ie it's a bridge, bond, team, * etc) then this function releases the previously enslaved @slave and/or * updates the state of @self and @slave to reflect its release. * * Returns: %TRUE on success, %FALSE on failure, if this device cannot enslave * other devices, or if @slave was never enslaved. */ static gboolean nm_device_release_one_slave (NMDevice *self, NMDevice *slave, gboolean configure, NMDeviceStateReason reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); SlaveInfo *info; gboolean success = FALSE; g_return_val_if_fail (slave != NULL, FALSE); g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->release_slave != NULL, FALSE); info = find_slave_info (self, slave); if (!info) return FALSE; priv->slaves = g_slist_remove (priv->slaves, info); if (info->enslaved) { success = NM_DEVICE_GET_CLASS (self)->release_slave (self, slave, configure); /* The release_slave() implementation logs success/failure (in the * correct device-specific log domain), so we don't have to do anything. */ } if (!configure) { g_warn_if_fail (reason == NM_DEVICE_STATE_REASON_NONE || reason == NM_DEVICE_STATE_REASON_REMOVED); reason = NM_DEVICE_STATE_REASON_NONE; } else if (reason == NM_DEVICE_STATE_REASON_NONE) { g_warn_if_reached (); reason = NM_DEVICE_STATE_REASON_UNKNOWN; } nm_device_slave_notify_release (info->slave, reason); free_slave_info (info); /* Ensure the device's hardware address is up-to-date; it often changes * when slaves change. */ nm_device_update_hw_address (self); return success; } static gboolean is_software_external (NMDevice *self) { return nm_device_is_software (self) && !nm_device_get_is_nm_owned (self); } /** * nm_device_finish_init: * @self: the master device * * Whatever needs to be done post-initialization, when the device has a DBus * object name. */ void nm_device_finish_init (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); g_assert (priv->initialized == FALSE); /* Do not manage externally created software devices until they are IFF_UP */ if ( is_software_external (self) && !nm_platform_link_is_up (priv->ifindex) && priv->ifindex > 0) nm_device_set_initial_unmanaged_flag (self, NM_UNMANAGED_EXTERNAL_DOWN, TRUE); if (priv->master) nm_device_enslave_slave (priv->master, self, NULL); priv->initialized = TRUE; } static void carrier_changed (NMDevice *self, gboolean carrier) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (!nm_device_get_managed (self)) return; nm_device_recheck_available_connections (self); /* ignore-carrier devices ignore all carrier-down events */ if (priv->ignore_carrier && !carrier) return; if (priv->is_master) { /* Bridge/bond/team carrier does not affect its own activation, * but when carrier comes on, if there are slaves waiting, * it will restart them. */ if (!carrier) return; if (nm_device_activate_ip4_state_in_wait (self)) nm_device_activate_stage3_ip4_start (self); if (nm_device_activate_ip6_state_in_wait (self)) nm_device_activate_stage3_ip6_start (self); return; } else if (nm_device_get_enslaved (self) && !carrier) { /* Slaves don't deactivate when they lose carrier; for * bonds/teams in particular that would be actively * counterproductive. */ return; } if (carrier) { g_warn_if_fail (priv->state >= NM_DEVICE_STATE_UNAVAILABLE); if (priv->state == NM_DEVICE_STATE_UNAVAILABLE) { nm_device_queue_state (self, NM_DEVICE_STATE_DISCONNECTED, NM_DEVICE_STATE_REASON_CARRIER); } else if (priv->state == NM_DEVICE_STATE_DISCONNECTED) { /* If the device is already in DISCONNECTED state without a carrier * (probably because it is tagged for carrier ignore) ensure that * when the carrier appears, auto connections are rechecked for * the device. */ nm_device_emit_recheck_auto_activate (self); } } else { g_return_if_fail (priv->state >= NM_DEVICE_STATE_UNAVAILABLE); if (priv->state == NM_DEVICE_STATE_UNAVAILABLE) { if (nm_device_queued_state_peek (self) >= NM_DEVICE_STATE_DISCONNECTED) nm_device_queued_state_clear (self); } else { nm_device_queue_state (self, NM_DEVICE_STATE_UNAVAILABLE, NM_DEVICE_STATE_REASON_CARRIER); } } } #define LINK_DISCONNECT_DELAY 4 static gboolean link_disconnect_action_cb (gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); _LOGD (LOGD_DEVICE, "link disconnected (calling deferred action) (id=%u)", priv->carrier_defer_id); priv->carrier_defer_id = 0; _LOGI (LOGD_DEVICE, "link disconnected (calling deferred action)"); NM_DEVICE_GET_CLASS (self)->carrier_changed (self, FALSE); return FALSE; } static void link_disconnect_action_cancel (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->carrier_defer_id) { g_source_remove (priv->carrier_defer_id); _LOGD (LOGD_DEVICE, "link disconnected (canceling deferred action) (id=%u)", priv->carrier_defer_id); priv->carrier_defer_id = 0; } } void nm_device_set_carrier (NMDevice *self, gboolean carrier) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self); NMDeviceState state = nm_device_get_state (self); if (priv->carrier == carrier) return; priv->carrier = carrier; g_object_notify (G_OBJECT (self), NM_DEVICE_CARRIER); if (priv->carrier) { _LOGI (LOGD_DEVICE, "link connected"); link_disconnect_action_cancel (self); klass->carrier_changed (self, TRUE); if (priv->carrier_wait_id) { g_source_remove (priv->carrier_wait_id); priv->carrier_wait_id = 0; nm_device_remove_pending_action (self, "carrier wait", TRUE); _carrier_wait_check_queued_act_request (self); } } else if (state <= NM_DEVICE_STATE_DISCONNECTED) { _LOGI (LOGD_DEVICE, "link disconnected"); klass->carrier_changed (self, FALSE); } else { _LOGI (LOGD_DEVICE, "link disconnected (deferring action for %d seconds)", LINK_DISCONNECT_DELAY); priv->carrier_defer_id = g_timeout_add_seconds (LINK_DISCONNECT_DELAY, link_disconnect_action_cb, self); _LOGD (LOGD_DEVICE, "link disconnected (deferring action for %d seconds) (id=%u)", LINK_DISCONNECT_DELAY, priv->carrier_defer_id); } } static void update_for_ip_ifname_change (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); g_hash_table_remove_all (priv->ip6_saved_properties); if (priv->dhcp4_client) { if (!nm_device_dhcp4_renew (self, FALSE)) { nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_DHCP_FAILED); return; } } if (priv->dhcp6_client) { if (!nm_device_dhcp6_renew (self, FALSE)) { nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_DHCP_FAILED); return; } } if (priv->rdisc) { /* FIXME: todo */ } if (priv->dnsmasq_manager) { /* FIXME: todo */ } } static void device_set_master (NMDevice *self, int ifindex) { NMDevice *master; NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); master = nm_manager_get_device_by_ifindex (nm_manager_get (), ifindex); if (master && NM_DEVICE_GET_CLASS (master)->enslave_slave) { g_clear_object (&priv->master); priv->master = g_object_ref (master); nm_device_master_add_slave (master, self, FALSE); } else if (master) { _LOGI (LOGD_DEVICE, "enslaved to non-master-type device %s; ignoring", nm_device_get_iface (master)); } else { _LOGW (LOGD_DEVICE, "enslaved to unknown device %d %s", ifindex, nm_platform_link_get_name (ifindex)); } } static void device_link_changed (NMDevice *self, NMPlatformLink *info) { NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMUtilsIPv6IfaceId token_iid; gboolean ip_ifname_changed = FALSE; if (info->udi && g_strcmp0 (info->udi, priv->udi)) { /* Update UDI to what udev gives us */ g_free (priv->udi); priv->udi = g_strdup (info->udi); g_object_notify (G_OBJECT (self), NM_DEVICE_UDI); } /* Update MTU if it has changed. */ if (priv->mtu != info->mtu) { priv->mtu = info->mtu; g_object_notify (G_OBJECT (self), NM_DEVICE_MTU); } if (info->name[0] && strcmp (priv->iface, info->name) != 0) { _LOGI (LOGD_DEVICE, "interface index %d renamed iface from '%s' to '%s'", priv->ifindex, priv->iface, info->name); g_free (priv->iface); priv->iface = g_strdup (info->name); /* If the device has no explicit ip_iface, then changing iface changes ip_iface too. */ ip_ifname_changed = !priv->ip_iface; g_object_notify (G_OBJECT (self), NM_DEVICE_IFACE); if (ip_ifname_changed) g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE); /* Re-match available connections against the new interface name */ nm_device_recheck_available_connections (self); /* Let any connections that use the new interface name have a chance * to auto-activate on the device. */ nm_device_emit_recheck_auto_activate (self); } /* Update slave status for external changes */ if (priv->enslaved && info->master != nm_device_get_ifindex (priv->master)) nm_device_release_one_slave (priv->master, self, FALSE, NM_DEVICE_STATE_REASON_NONE); if (info->master && !priv->enslaved) { device_set_master (self, info->master); if (priv->master) nm_device_enslave_slave (priv->master, self, NULL); } if (priv->rdisc && nm_platform_link_get_ipv6_token (priv->ifindex, &token_iid)) { _LOGD (LOGD_DEVICE, "IPv6 tokenized identifier present on device %s", priv->iface); if (nm_rdisc_set_iid (priv->rdisc, token_iid)) nm_rdisc_start (priv->rdisc); } if (klass->link_changed) klass->link_changed (self, info); /* Update DHCP, etc, if needed */ if (ip_ifname_changed) update_for_ip_ifname_change (self); if (priv->up != info->up) { priv->up = info->up; /* Manage externally-created software interfaces only when they are IFF_UP */ g_assert (priv->ifindex > 0); if (is_software_external (self)) { gboolean external_down = nm_device_get_unmanaged_flag (self, NM_UNMANAGED_EXTERNAL_DOWN); if (external_down && info->up) { if (nm_device_get_state (self) < NM_DEVICE_STATE_DISCONNECTED) { /* Ensure the assume check is queued before any queued state changes * from the transition to UNAVAILABLE. */ nm_device_queue_recheck_assume (self); /* Resetting the EXTERNAL_DOWN flag may change the device's state * to UNAVAILABLE. To ensure that the state change doesn't touch * the device before assumption occurs, pass * NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED as the reason. */ nm_device_set_unmanaged (self, NM_UNMANAGED_EXTERNAL_DOWN, FALSE, NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED); } else { /* Don't trigger a state change; if the device is in a * state higher than UNAVAILABLE, it is already IFF_UP * or an explicit activation request was received. */ priv->unmanaged_flags &= ~NM_UNMANAGED_EXTERNAL_DOWN; } } else if (!external_down && !info->up && nm_device_get_state (self) <= NM_DEVICE_STATE_DISCONNECTED) { /* If the device is already disconnected and is set !IFF_UP, * unmanage it. */ nm_device_set_unmanaged (self, NM_UNMANAGED_EXTERNAL_DOWN, TRUE, NM_DEVICE_STATE_REASON_USER_REQUESTED); } } } } static void device_ip_link_changed (NMDevice *self, NMPlatformLink *info) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (info->name[0] && g_strcmp0 (priv->ip_iface, info->name)) { _LOGI (LOGD_DEVICE, "interface index %d renamed ip_iface (%d) from '%s' to '%s'", priv->ifindex, nm_device_get_ip_ifindex (self), priv->ip_iface, info->name); g_free (priv->ip_iface); priv->ip_iface = g_strdup (info->name); g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE); update_for_ip_ifname_change (self); } } static void link_changed_cb (NMPlatform *platform, int ifindex, NMPlatformLink *info, NMPlatformSignalChangeType change_type, NMPlatformReason reason, NMDevice *self) { if (change_type != NM_PLATFORM_SIGNAL_CHANGED) return; /* We don't filter by 'reason' because we are interested in *all* link * changes. For example a call to nm_platform_link_set_up() may result * in an internal carrier change (i.e. we ask the kernel to set IFF_UP * and it results in also setting IFF_LOWER_UP. */ if (ifindex == nm_device_get_ifindex (self)) device_link_changed (self, info); else if (ifindex == nm_device_get_ip_ifindex (self)) device_ip_link_changed (self, info); } static void link_changed (NMDevice *self, NMPlatformLink *info) { /* Update carrier from link event if applicable. */ if ( device_has_capability (self, NM_DEVICE_CAP_CARRIER_DETECT) && !device_has_capability (self, NM_DEVICE_CAP_NONSTANDARD_CARRIER)) nm_device_set_carrier (self, info->connected); } /** * nm_device_notify_component_added(): * @self: the #NMDevice * @component: the component being added by a plugin * * Called by the manager to notify the device that a new component has * been found. The device implementation should return %TRUE if it * wishes to claim the component, or %FALSE if it cannot. * * Returns: %TRUE to claim the component, %FALSE if the component cannot be * claimed. */ gboolean nm_device_notify_component_added (NMDevice *self, GObject *component) { if (NM_DEVICE_GET_CLASS (self)->component_added) return NM_DEVICE_GET_CLASS (self)->component_added (self, component); return FALSE; } /** * nm_device_owns_iface(): * @self: the #NMDevice * @iface: an interface name * * Called by the manager to ask if the device or any of its components owns * @iface. For example, a WWAN implementation would return %TRUE for an * ethernet interface name that was owned by the WWAN device's modem component, * because that ethernet interface is controlled by the WWAN device and cannot * be used independently of the WWAN device. * * Returns: %TRUE if @self or it's components owns the interface name, * %FALSE if not */ gboolean nm_device_owns_iface (NMDevice *self, const char *iface) { if (NM_DEVICE_GET_CLASS (self)->owns_iface) return NM_DEVICE_GET_CLASS (self)->owns_iface (self, iface); return FALSE; } NMConnection * nm_device_new_default_connection (NMDevice *self) { if (NM_DEVICE_GET_CLASS (self)->new_default_connection) return NM_DEVICE_GET_CLASS (self)->new_default_connection (self); return NULL; } static void slave_state_changed (NMDevice *slave, NMDeviceState slave_new_state, NMDeviceState slave_old_state, NMDeviceStateReason reason, NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); gboolean release = FALSE; _LOGD (LOGD_DEVICE, "slave %s state change %d (%s) -> %d (%s)", nm_device_get_iface (slave), slave_old_state, state_to_string (slave_old_state), slave_new_state, state_to_string (slave_new_state)); /* Don't try to enslave slaves until the master is ready */ if (priv->state < NM_DEVICE_STATE_CONFIG) return; if (slave_new_state == NM_DEVICE_STATE_IP_CONFIG) nm_device_enslave_slave (self, slave, nm_device_get_connection (slave)); else if (slave_new_state > NM_DEVICE_STATE_ACTIVATED) release = TRUE; else if ( slave_new_state <= NM_DEVICE_STATE_DISCONNECTED && slave_old_state > NM_DEVICE_STATE_DISCONNECTED) { /* Catch failures due to unavailable or unmanaged */ release = TRUE; } if (release) { nm_device_release_one_slave (self, slave, TRUE, reason); /* Bridge/bond/team interfaces are left up until manually deactivated */ if (priv->slaves == NULL && priv->state == NM_DEVICE_STATE_ACTIVATED) _LOGD (LOGD_DEVICE, "last slave removed; remaining activated"); } } /** * nm_device_master_add_slave: * @self: the master device * @slave: the slave device to enslave * @configure: pass %TRUE if the slave should be configured by the master, or * %FALSE if it is already configured outside NetworkManager * * If @self is capable of enslaving other devices (ie it's a bridge, bond, team, * etc) then this function adds @slave to the slave list for later enslavement. * * Returns: %TRUE on success, %FALSE on failure */ static gboolean nm_device_master_add_slave (NMDevice *self, NMDevice *slave, gboolean configure) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); SlaveInfo *info; g_return_val_if_fail (self != NULL, FALSE); g_return_val_if_fail (slave != NULL, FALSE); g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->enslave_slave != NULL, FALSE); if (configure) g_return_val_if_fail (nm_device_get_state (slave) >= NM_DEVICE_STATE_DISCONNECTED, FALSE); if (!find_slave_info (self, slave)) { info = g_malloc0 (sizeof (SlaveInfo)); info->slave = g_object_ref (slave); info->configure = configure; info->watch_id = g_signal_connect (slave, "state-changed", G_CALLBACK (slave_state_changed), self); priv->slaves = g_slist_append (priv->slaves, info); } nm_device_queue_recheck_assume (self); return TRUE; } /** * nm_device_master_get_slaves: * @self: the master device * * Returns: any slaves of which @self is the master. Caller owns returned list. */ GSList * nm_device_master_get_slaves (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); GSList *slaves = NULL, *iter; for (iter = priv->slaves; iter; iter = g_slist_next (iter)) slaves = g_slist_prepend (slaves, ((SlaveInfo *) iter->data)->slave); return slaves; } /** * nm_device_master_get_slave_by_ifindex: * @self: the master device * @ifindex: the slave's interface index * * Returns: the slave with the given @ifindex of which @self is the master, * or %NULL if no device with @ifindex is a slave of @self. */ NMDevice * nm_device_master_get_slave_by_ifindex (NMDevice *self, int ifindex) { GSList *iter; for (iter = NM_DEVICE_GET_PRIVATE (self)->slaves; iter; iter = g_slist_next (iter)) { SlaveInfo *info = iter->data; if (nm_device_get_ip_ifindex (info->slave) == ifindex) return info->slave; } return NULL; } /** * nm_device_master_check_slave_physical_port: * @self: the master device * @slave: a slave device * @log_domain: domain to log a warning in * * Checks if @self already has a slave with the same #NMDevice:physical-port-id * as @slave, and logs a warning if so. */ void nm_device_master_check_slave_physical_port (NMDevice *self, NMDevice *slave, guint64 log_domain) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); const char *slave_physical_port_id, *existing_physical_port_id; SlaveInfo *info; GSList *iter; slave_physical_port_id = nm_device_get_physical_port_id (slave); if (!slave_physical_port_id) return; for (iter = priv->slaves; iter; iter = iter->next) { info = iter->data; if (info->slave == slave) continue; existing_physical_port_id = nm_device_get_physical_port_id (info->slave); if (!g_strcmp0 (slave_physical_port_id, existing_physical_port_id)) { _LOGW (log_domain, "slave %s shares a physical port with existing slave %s", nm_device_get_ip_iface (slave), nm_device_get_ip_iface (info->slave)); /* Since this function will get called for every slave, we only have * to warn about the first match we find; if there are other matches * later in the list, we will have already warned about them matching * @existing earlier. */ return; } } } /* release all slaves */ static void nm_device_master_release_slaves (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMDeviceStateReason reason; /* Don't release the slaves if this connection doesn't belong to NM. */ if (nm_device_uses_generated_assumed_connection (self)) return; reason = priv->state_reason; if (priv->state == NM_DEVICE_STATE_FAILED) reason = NM_DEVICE_STATE_REASON_DEPENDENCY_FAILED; while (priv->slaves) { SlaveInfo *info = priv->slaves->data; nm_device_release_one_slave (self, info->slave, TRUE, reason); } } /** * nm_device_get_master: * @self: the device * * If @self has been enslaved by another device, this returns that * device. Otherwise it returns %NULL. (In particular, note that if * @self is in the process of activating as a slave, but has not yet * been enslaved by its master, this will return %NULL.) * * Returns: (transfer none): @self's master, or %NULL */ NMDevice * nm_device_get_master (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->enslaved) return priv->master; else return NULL; } /** * nm_device_slave_notify_enslave: * @self: the slave device * @success: whether the enslaving operation succeeded * * Notifies a slave that either it has been enslaved, or else its master tried * to enslave it and failed. */ static void nm_device_slave_notify_enslave (NMDevice *self, gboolean success) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection = nm_device_get_connection (self); gboolean activating = (priv->state == NM_DEVICE_STATE_IP_CONFIG); g_assert (priv->master); if (!priv->enslaved) { if (success) { if (activating) { _LOGI (LOGD_DEVICE, "Activation: connection '%s' enslaved, continuing activation", nm_connection_get_id (connection)); } else _LOGI (LOGD_DEVICE, "enslaved to %s", nm_device_get_iface (priv->master)); priv->enslaved = TRUE; g_object_notify (G_OBJECT (self), NM_DEVICE_MASTER); } else if (activating) { _LOGW (LOGD_DEVICE, "Activation: connection '%s' could not be enslaved", nm_connection_get_id (connection)); } } if (activating) { priv->ip4_state = IP_DONE; priv->ip6_state = IP_DONE; nm_device_queue_state (self, success ? NM_DEVICE_STATE_SECONDARIES : NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_NONE); } else nm_device_queue_recheck_assume (self); } /** * nm_device_slave_notify_release: * @self: the slave device * @reason: the reason associated with the state change * * Notifies a slave that it has been released, and why. */ static void nm_device_slave_notify_release (NMDevice *self, NMDeviceStateReason reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection = nm_device_get_connection (self); NMDeviceState new_state; const char *master_status; if ( reason != NM_DEVICE_STATE_REASON_NONE && priv->state > NM_DEVICE_STATE_DISCONNECTED && priv->state <= NM_DEVICE_STATE_ACTIVATED) { if (reason == NM_DEVICE_STATE_REASON_DEPENDENCY_FAILED) { new_state = NM_DEVICE_STATE_FAILED; master_status = "failed"; } else if (reason == NM_DEVICE_STATE_REASON_USER_REQUESTED) { new_state = NM_DEVICE_STATE_DEACTIVATING; master_status = "deactivated by user request"; } else { new_state = NM_DEVICE_STATE_DISCONNECTED; master_status = "deactivated"; } _LOGD (LOGD_DEVICE, "Activation: connection '%s' master %s", nm_connection_get_id (connection), master_status); nm_device_queue_state (self, new_state, reason); } else if (priv->master) _LOGI (LOGD_DEVICE, "released from master %s", nm_device_get_iface (priv->master)); else _LOGD (LOGD_DEVICE, "released from master%s", priv->enslaved ? "" : " (was not enslaved)"); if (priv->enslaved) { priv->enslaved = FALSE; g_object_notify (G_OBJECT (self), NM_DEVICE_MASTER); } } /** * nm_device_get_enslaved: * @self: the #NMDevice * * Returns: %TRUE if the device is enslaved to a master device (eg bridge or * bond or team), %FALSE if not */ gboolean nm_device_get_enslaved (NMDevice *self) { return NM_DEVICE_GET_PRIVATE (self)->enslaved; } /** * nm_device_removed: * @self: the #NMDevice * * Called by the manager when the device was removed. Releases the device from * the master in case it's enslaved. */ void nm_device_removed (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->enslaved) nm_device_release_one_slave (priv->master, self, FALSE, NM_DEVICE_STATE_REASON_REMOVED); } static gboolean is_available (NMDevice *self, NMDeviceCheckDevAvailableFlags flags) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->carrier || priv->ignore_carrier) return TRUE; if (NM_FLAGS_HAS (flags, NM_DEVICE_CHECK_DEV_AVAILABLE_IGNORE_CARRIER)) return TRUE; return FALSE; } /** * nm_device_is_available: * @self: the #NMDevice * @flags: additional flags to influence the check. Flags have the * meaning to increase the availability of a device. * * Checks if @self would currently be capable of activating a * connection. In particular, it checks that the device is ready (eg, * is not missing firmware), that it has carrier (if necessary), and * that any necessary external software (eg, ModemManager, * wpa_supplicant) is available. * * @self can only be in a state higher than * %NM_DEVICE_STATE_UNAVAILABLE when nm_device_is_available() returns * %TRUE. (But note that it can still be %NM_DEVICE_STATE_UNMANAGED * when it is available.) * * Returns: %TRUE or %FALSE */ gboolean nm_device_is_available (NMDevice *self, NMDeviceCheckDevAvailableFlags flags) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->firmware_missing) return FALSE; return NM_DEVICE_GET_CLASS (self)->is_available (self, flags); } gboolean nm_device_get_enabled (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); if (NM_DEVICE_GET_CLASS (self)->get_enabled) return NM_DEVICE_GET_CLASS (self)->get_enabled (self); return TRUE; } void nm_device_set_enabled (NMDevice *self, gboolean enabled) { g_return_if_fail (NM_IS_DEVICE (self)); if (NM_DEVICE_GET_CLASS (self)->set_enabled) NM_DEVICE_GET_CLASS (self)->set_enabled (self, enabled); } /** * nm_device_get_autoconnect: * @self: the #NMDevice * * Returns: %TRUE if the device allows autoconnect connections, or %FALSE if the * device is explicitly blocking all autoconnect connections. Does not take * into account transient conditions like companion devices that may wish to * block the device. */ gboolean nm_device_get_autoconnect (NMDevice *self) { g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); return NM_DEVICE_GET_PRIVATE (self)->autoconnect; } static void nm_device_set_autoconnect (NMDevice *self, gboolean autoconnect) { NMDevicePrivate *priv; g_return_if_fail (NM_IS_DEVICE (self)); priv = NM_DEVICE_GET_PRIVATE (self); if (priv->autoconnect == autoconnect) return; if (autoconnect) { /* Default-unmanaged devices never autoconnect */ if (!nm_device_get_default_unmanaged (self)) { priv->autoconnect = TRUE; g_object_notify (G_OBJECT (self), NM_DEVICE_AUTOCONNECT); } } else { priv->autoconnect = FALSE; g_object_notify (G_OBJECT (self), NM_DEVICE_AUTOCONNECT); } } static gboolean autoconnect_allowed_accumulator (GSignalInvocationHint *ihint, GValue *return_accu, const GValue *handler_return, gpointer data) { if (!g_value_get_boolean (handler_return)) g_value_set_boolean (return_accu, FALSE); return TRUE; } /** * nm_device_autoconnect_allowed: * @self: the #NMDevice * * Returns: %TRUE if the device can be auto-connected immediately, taking * transient conditions into account (like companion devices that may wish to * block autoconnect for a time). */ gboolean nm_device_autoconnect_allowed (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); GValue instance = G_VALUE_INIT; GValue retval = G_VALUE_INIT; if (priv->state < NM_DEVICE_STATE_DISCONNECTED || !priv->autoconnect) return FALSE; /* The 'autoconnect-allowed' signal is emitted on a device to allow * other listeners to block autoconnect on the device if they wish. * This is mainly used by the OLPC Mesh devices to block autoconnect * on their companion WiFi device as they share radio resources and * cannot be connected at the same time. */ g_value_init (&instance, G_TYPE_OBJECT); g_value_set_object (&instance, self); g_value_init (&retval, G_TYPE_BOOLEAN); if (priv->autoconnect) g_value_set_boolean (&retval, TRUE); else g_value_set_boolean (&retval, FALSE); /* Use g_signal_emitv() rather than g_signal_emit() to avoid the return * value being changed if no handlers are connected */ g_signal_emitv (&instance, signals[AUTOCONNECT_ALLOWED], 0, &retval); g_value_unset (&instance); return g_value_get_boolean (&retval); } static gboolean can_auto_connect (NMDevice *self, NMConnection *connection, char **specific_object) { NMSettingConnection *s_con; s_con = nm_connection_get_setting_connection (connection); if (!nm_setting_connection_get_autoconnect (s_con)) return FALSE; return nm_device_check_connection_available (self, connection, NM_DEVICE_CHECK_CON_AVAILABLE_NONE, NULL); } /** * nm_device_can_auto_connect: * @self: an #NMDevice * @connection: a #NMConnection * @specific_object: (out) (transfer full): on output, the path of an * object associated with the returned connection, to be passed to * nm_manager_activate_connection(), or %NULL. * * Checks if @connection can be auto-activated on @self right now. * This requires, at a minimum, that the connection be compatible with * @self, and that it have the #NMSettingConnection:autoconnect property * set, and that the device allow auto connections. Some devices impose * additional requirements. (Eg, a Wi-Fi connection can only be activated * if its SSID was seen in the last scan.) * * Returns: %TRUE, if the @connection can be auto-activated. **/ gboolean nm_device_can_auto_connect (NMDevice *self, NMConnection *connection, char **specific_object) { g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); g_return_val_if_fail (NM_IS_CONNECTION (connection), FALSE); g_return_val_if_fail (specific_object && !*specific_object, FALSE); if (nm_device_autoconnect_allowed (self)) return NM_DEVICE_GET_CLASS (self)->can_auto_connect (self, connection, specific_object); return FALSE; } static gboolean device_has_config (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); /* Check for IP configuration. */ if (priv->ip4_config && nm_ip4_config_get_num_addresses (priv->ip4_config)) return TRUE; if (priv->ip6_config && nm_ip6_config_get_num_addresses (priv->ip6_config)) return TRUE; /* The existence of a software device is good enough. */ if (nm_device_is_software (self)) return TRUE; /* Slaves are also configured by definition */ if (nm_platform_link_get_master (priv->ifindex) > 0) return TRUE; return FALSE; } /** * nm_device_master_update_slave_connection: * @self: the master #NMDevice * @slave: the slave #NMDevice * @connection: the #NMConnection to update with the slave settings * @GError: (out): error description * * Reads the slave configuration for @slave and updates @connection with those * properties. This invokes a virtual function on the master device @self. * * Returns: %TRUE if the configuration was read and @connection updated, * %FALSE on failure. */ gboolean nm_device_master_update_slave_connection (NMDevice *self, NMDevice *slave, NMConnection *connection, GError **error) { NMDeviceClass *klass; gboolean success; g_return_val_if_fail (self, FALSE); g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); g_return_val_if_fail (slave, FALSE); g_return_val_if_fail (connection, FALSE); g_return_val_if_fail (!error || !*error, FALSE); g_return_val_if_fail (nm_connection_get_setting_connection (connection), FALSE); g_return_val_if_fail (nm_device_get_iface (self), FALSE); klass = NM_DEVICE_GET_CLASS (self); if (klass->master_update_slave_connection) { success = klass->master_update_slave_connection (self, slave, connection, error); g_return_val_if_fail (!error || (success && !*error) || *error, success); return success; } g_set_error (error, NM_DEVICE_ERROR, NM_DEVICE_ERROR_FAILED, "master device '%s' cannot update a slave connection for slave device '%s' (master type not supported?)", nm_device_get_iface (self), nm_device_get_iface (slave)); return FALSE; } NMConnection * nm_device_generate_connection (NMDevice *self, NMDevice *master) { NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); const char *ifname = nm_device_get_iface (self); NMConnection *connection; NMSetting *s_con; NMSetting *s_ip4; NMSetting *s_ip6; gs_free char *uuid = NULL; const char *ip4_method, *ip6_method; GError *error = NULL; /* If update_connection() is not implemented, just fail. */ if (!klass->update_connection) return NULL; /* Return NULL if device is unconfigured. */ if (!device_has_config (self)) { _LOGD (LOGD_DEVICE, "device has no existing configuration"); return NULL; } connection = nm_simple_connection_new (); s_con = nm_setting_connection_new (); uuid = nm_utils_uuid_generate (); g_object_set (s_con, NM_SETTING_CONNECTION_UUID, uuid, NM_SETTING_CONNECTION_ID, ifname, NM_SETTING_CONNECTION_AUTOCONNECT, FALSE, NM_SETTING_CONNECTION_INTERFACE_NAME, ifname, NM_SETTING_CONNECTION_TIMESTAMP, (guint64) time (NULL), NULL); if (klass->connection_type) g_object_set (s_con, NM_SETTING_CONNECTION_TYPE, klass->connection_type, NULL); nm_connection_add_setting (connection, s_con); /* If the device is a slave, update various slave settings */ if (master) { if (!nm_device_master_update_slave_connection (master, self, connection, &error)) { _LOGE (LOGD_DEVICE, "master device '%s' failed to update slave connection: %s", nm_device_get_iface (master), error ? error->message : "(unknown error)"); g_error_free (error); g_object_unref (connection); return NULL; } } else { /* Only regular and master devices get IP configuration; slaves do not */ s_ip4 = nm_ip4_config_create_setting (priv->ip4_config); nm_connection_add_setting (connection, s_ip4); s_ip6 = nm_ip6_config_create_setting (priv->ip6_config); nm_connection_add_setting (connection, s_ip6); } klass->update_connection (self, connection); /* Check the connection in case of update_connection() bug. */ if (!nm_connection_verify (connection, &error)) { _LOGE (LOGD_DEVICE, "Generated connection does not verify: %s", error->message); g_clear_error (&error); g_object_unref (connection); return NULL; } /* Ignore the connection if it has no IP configuration, * no slave configuration, and is not a master interface. */ ip4_method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); ip6_method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG); if ( g_strcmp0 (ip4_method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED) == 0 && g_strcmp0 (ip6_method, NM_SETTING_IP6_CONFIG_METHOD_IGNORE) == 0 && !nm_setting_connection_get_master (NM_SETTING_CONNECTION (s_con)) && !priv->slaves) { _LOGD (LOGD_DEVICE, "ignoring generated connection (no IP and not in master-slave relationship)"); g_object_unref (connection); connection = NULL; } return connection; } gboolean nm_device_complete_connection (NMDevice *self, NMConnection *connection, const char *specific_object, const GSList *existing_connections, GError **error) { gboolean success = FALSE; g_return_val_if_fail (self != NULL, FALSE); g_return_val_if_fail (connection != NULL, FALSE); if (!NM_DEVICE_GET_CLASS (self)->complete_connection) { g_set_error (error, NM_DEVICE_ERROR, NM_DEVICE_ERROR_INVALID_CONNECTION, "Device class %s had no complete_connection method", G_OBJECT_TYPE_NAME (self)); return FALSE; } success = NM_DEVICE_GET_CLASS (self)->complete_connection (self, connection, specific_object, existing_connections, error); if (success) success = nm_connection_verify (connection, error); return success; } static gboolean check_connection_compatible (NMDevice *self, NMConnection *connection) { NMSettingConnection *s_con; const char *config_iface, *device_iface; s_con = nm_connection_get_setting_connection (connection); g_assert (s_con); config_iface = nm_setting_connection_get_interface_name (s_con); device_iface = nm_device_get_iface (self); if (config_iface && strcmp (config_iface, device_iface) != 0) return FALSE; return TRUE; } /** * nm_device_check_connection_compatible: * @self: an #NMDevice * @connection: an #NMConnection * * Checks if @connection could potentially be activated on @self. * This means only that @self has the proper capabilities, and that * @connection is not locked to some other device. It does not * necessarily mean that @connection could be activated on @self * right now. (Eg, it might refer to a Wi-Fi network that is not * currently available.) * * Returns: #TRUE if @connection could potentially be activated on * @self. */ gboolean nm_device_check_connection_compatible (NMDevice *self, NMConnection *connection) { g_return_val_if_fail (NM_IS_DEVICE (self), FALSE); g_return_val_if_fail (NM_IS_CONNECTION (connection), FALSE); return NM_DEVICE_GET_CLASS (self)->check_connection_compatible (self, connection); } /** * nm_device_can_assume_connections: * @self: #NMDevice instance * * This is a convenience function to determine whether connection assumption * is available for this device. * * Returns: %TRUE if the device is capable of assuming connections, %FALSE if not */ static gboolean nm_device_can_assume_connections (NMDevice *self) { return !!NM_DEVICE_GET_CLASS (self)->update_connection; } /** * nm_device_can_assume_active_connection: * @self: #NMDevice instance * * This is a convenience function to determine whether the device's active * connection can be assumed if NetworkManager restarts. This method returns * %TRUE if and only if the device can assume connections, and the device has * an active connection, and that active connection can be assumed. * * Returns: %TRUE if the device's active connection can be assumed, or %FALSE * if there is no active connection or the active connection cannot be * assumed. */ gboolean nm_device_can_assume_active_connection (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; const char *method; const char *assumable_ip6_methods[] = { NM_SETTING_IP6_CONFIG_METHOD_IGNORE, NM_SETTING_IP6_CONFIG_METHOD_AUTO, NM_SETTING_IP6_CONFIG_METHOD_DHCP, NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL, NM_SETTING_IP6_CONFIG_METHOD_MANUAL, NULL }; const char *assumable_ip4_methods[] = { NM_SETTING_IP4_CONFIG_METHOD_DISABLED, NM_SETTING_IP6_CONFIG_METHOD_AUTO, NM_SETTING_IP6_CONFIG_METHOD_MANUAL, NULL }; if (!nm_device_can_assume_connections (self)) return FALSE; connection = nm_device_get_connection (self); if (!connection) return FALSE; /* Can't assume connections that aren't yet configured * FIXME: what about bridges/bonds waiting for slaves? */ if (priv->state < NM_DEVICE_STATE_IP_CONFIG) return FALSE; if (priv->ip4_state != IP_DONE && priv->ip6_state != IP_DONE) return FALSE; method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG); if (!_nm_utils_string_in_list (method, assumable_ip6_methods)) return FALSE; method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); if (!_nm_utils_string_in_list (method, assumable_ip4_methods)) return FALSE; return TRUE; } static gboolean nm_device_emit_recheck_assume (gpointer self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); priv->recheck_assume_id = 0; if (!nm_device_get_act_request (self)) { _LOGD (LOGD_DEVICE, "emit RECHECK_ASSUME signal"); g_signal_emit (self, signals[RECHECK_ASSUME], 0); } return G_SOURCE_REMOVE; } void nm_device_queue_recheck_assume (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (nm_device_can_assume_connections (self) && !priv->recheck_assume_id) priv->recheck_assume_id = g_idle_add (nm_device_emit_recheck_assume, self); } void nm_device_emit_recheck_auto_activate (NMDevice *self) { g_signal_emit (self, signals[RECHECK_AUTO_ACTIVATE], 0); } static void dnsmasq_state_changed_cb (NMDnsMasqManager *manager, guint32 status, gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); switch (status) { case NM_DNSMASQ_STATUS_DEAD: nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_SHARED_START_FAILED); break; default: break; } } static void activation_source_clear (NMDevice *self, gboolean remove_source, int family) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); guint *act_source_id; gpointer *act_source_func; if (family == AF_INET6) { act_source_id = &priv->act_source6_id; act_source_func = &priv->act_source6_func; } else { act_source_id = &priv->act_source_id; act_source_func = &priv->act_source_func; } if (*act_source_id) { if (remove_source) g_source_remove (*act_source_id); *act_source_id = 0; *act_source_func = NULL; } } static void activation_source_schedule (NMDevice *self, GSourceFunc func, int family) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); guint *act_source_id; gpointer *act_source_func; if (family == AF_INET6) { act_source_id = &priv->act_source6_id; act_source_func = &priv->act_source6_func; } else { act_source_id = &priv->act_source_id; act_source_func = &priv->act_source_func; } if (*act_source_id) _LOGE (LOGD_DEVICE, "activation stage already scheduled"); /* Don't bother rescheduling the same function that's about to * run anyway. Fixes issues with crappy wireless drivers sending * streams of associate events before NM has had a chance to process * the first one. */ if (!*act_source_id || (*act_source_func != func)) { activation_source_clear (self, TRUE, family); *act_source_id = g_idle_add (func, self); *act_source_func = func; } } static gboolean get_ip_config_may_fail (NMDevice *self, int family) { NMConnection *connection; NMSettingIPConfig *s_ip = NULL; g_return_val_if_fail (self != NULL, TRUE); connection = nm_device_get_connection (self); g_assert (connection); /* Fail the connection if the failed IP method is required to complete */ switch (family) { case AF_INET: s_ip = nm_connection_get_setting_ip4_config (connection); break; case AF_INET6: s_ip = nm_connection_get_setting_ip6_config (connection); break; default: g_assert_not_reached (); } return nm_setting_ip_config_get_may_fail (s_ip); } static void master_ready_cb (NMActiveConnection *active, GParamSpec *pspec, NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMActiveConnection *master; g_assert (priv->state == NM_DEVICE_STATE_PREPARE); /* Notify a master device that it has a new slave */ g_assert (nm_active_connection_get_master_ready (active)); master = nm_active_connection_get_master (active); priv->master = g_object_ref (nm_active_connection_get_device (master)); nm_device_master_add_slave (priv->master, self, nm_active_connection_get_assumed (active) ? FALSE : TRUE); _LOGD (LOGD_DEVICE, "master connection ready; master device %s", nm_device_get_iface (priv->master)); if (priv->master_ready_id) { g_signal_handler_disconnect (active, priv->master_ready_id); priv->master_ready_id = 0; } nm_device_activate_schedule_stage2_device_config (self); } static NMActStageReturn act_stage1_prepare (NMDevice *self, NMDeviceStateReason *reason) { return NM_ACT_STAGE_RETURN_SUCCESS; } /* * nm_device_activate_stage1_device_prepare * * Prepare for device activation * */ static gboolean nm_device_activate_stage1_device_prepare (gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMActStageReturn ret = NM_ACT_STAGE_RETURN_SUCCESS; NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE; NMActiveConnection *active = NM_ACTIVE_CONNECTION (priv->act_request); /* Clear the activation source ID now that this stage has run */ activation_source_clear (self, FALSE, 0); priv->ip4_state = priv->ip6_state = IP_NONE; /* Notify the new ActiveConnection along with the state change */ g_object_notify (G_OBJECT (self), NM_DEVICE_ACTIVE_CONNECTION); _LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) started..."); nm_device_state_changed (self, NM_DEVICE_STATE_PREPARE, NM_DEVICE_STATE_REASON_NONE); /* Assumed connections were already set up outside NetworkManager */ if (!nm_active_connection_get_assumed (active)) { ret = NM_DEVICE_GET_CLASS (self)->act_stage1_prepare (self, &reason); if (ret == NM_ACT_STAGE_RETURN_POSTPONE) { goto out; } else if (ret == NM_ACT_STAGE_RETURN_FAILURE) { nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason); goto out; } g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS); } if (nm_active_connection_get_master (active)) { /* If the master connection is ready for slaves, attach ourselves */ if (nm_active_connection_get_master_ready (active)) master_ready_cb (active, NULL, self); else { _LOGD (LOGD_DEVICE, "waiting for master connection to become ready"); /* Attach a signal handler and wait for the master connection to begin activating */ g_assert (priv->master_ready_id == 0); priv->master_ready_id = g_signal_connect (active, "notify::" NM_ACTIVE_CONNECTION_INT_MASTER_READY, (GCallback) master_ready_cb, self); /* Postpone */ } } else nm_device_activate_schedule_stage2_device_config (self); out: _LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) complete."); return FALSE; } /* * nm_device_activate_schedule_stage1_device_prepare * * Prepare a device for activation * */ void nm_device_activate_schedule_stage1_device_prepare (NMDevice *self) { NMDevicePrivate *priv; g_return_if_fail (NM_IS_DEVICE (self)); priv = NM_DEVICE_GET_PRIVATE (self); g_return_if_fail (priv->act_request); activation_source_schedule (self, nm_device_activate_stage1_device_prepare, 0); _LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) scheduled..."); } static NMActStageReturn act_stage2_config (NMDevice *self, NMDeviceStateReason *reason) { /* Nothing to do */ return NM_ACT_STAGE_RETURN_SUCCESS; } /* * nm_device_activate_stage2_device_config * * Determine device parameters and set those on the device, ie * for wireless devices, set SSID, keys, etc. * */ static gboolean nm_device_activate_stage2_device_config (gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMActStageReturn ret; NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE; gboolean no_firmware = FALSE; NMActiveConnection *active = NM_ACTIVE_CONNECTION (priv->act_request); GSList *iter; /* Clear the activation source ID now that this stage has run */ activation_source_clear (self, FALSE, 0); _LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) starting..."); nm_device_state_changed (self, NM_DEVICE_STATE_CONFIG, NM_DEVICE_STATE_REASON_NONE); /* Assumed connections were already set up outside NetworkManager */ if (!nm_active_connection_get_assumed (active)) { if (!nm_device_bring_up (self, FALSE, &no_firmware)) { if (no_firmware) nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_FIRMWARE_MISSING); else nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_CONFIG_FAILED); goto out; } ret = NM_DEVICE_GET_CLASS (self)->act_stage2_config (self, &reason); if (ret == NM_ACT_STAGE_RETURN_POSTPONE) goto out; else if (ret == NM_ACT_STAGE_RETURN_FAILURE) { nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason); goto out; } g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS); } /* If we have slaves that aren't yet enslaved, do that now */ for (iter = priv->slaves; iter; iter = g_slist_next (iter)) { SlaveInfo *info = iter->data; NMDeviceState slave_state = nm_device_get_state (info->slave); if (slave_state == NM_DEVICE_STATE_IP_CONFIG) nm_device_enslave_slave (self, info->slave, nm_device_get_connection (info->slave)); else if ( nm_device_uses_generated_assumed_connection (self) && slave_state <= NM_DEVICE_STATE_DISCONNECTED) nm_device_queue_recheck_assume (info->slave); } _LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) successful."); nm_device_activate_schedule_stage3_ip_config_start (self); out: _LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) complete."); return FALSE; } /* * nm_device_activate_schedule_stage2_device_config * * Schedule setup of the hardware device * */ void nm_device_activate_schedule_stage2_device_config (NMDevice *self) { NMDevicePrivate *priv; g_return_if_fail (NM_IS_DEVICE (self)); priv = NM_DEVICE_GET_PRIVATE (self); g_return_if_fail (priv->act_request); activation_source_schedule (self, nm_device_activate_stage2_device_config, 0); _LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) scheduled..."); } /*********************************************/ /* avahi-autoipd stuff */ static void aipd_timeout_remove (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->aipd_timeout) { g_source_remove (priv->aipd_timeout); priv->aipd_timeout = 0; } } static void aipd_cleanup (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->aipd_watch) { g_source_remove (priv->aipd_watch); priv->aipd_watch = 0; } if (priv->aipd_pid > 0) { nm_utils_kill_child_sync (priv->aipd_pid, SIGKILL, LOGD_AUTOIP4, "avahi-autoipd", NULL, 0, 0); priv->aipd_pid = -1; } aipd_timeout_remove (self); } static NMIP4Config * aipd_get_ip4_config (NMDevice *self, guint32 lla) { NMIP4Config *config = NULL; NMPlatformIP4Address address; NMPlatformIP4Route route; config = nm_ip4_config_new (nm_device_get_ip_ifindex (self)); g_assert (config); memset (&address, 0, sizeof (address)); address.address = lla; address.plen = 16; address.source = NM_IP_CONFIG_SOURCE_IP4LL; nm_ip4_config_add_address (config, &address); /* Add a multicast route for link-local connections: destination= 224.0.0.0, netmask=240.0.0.0 */ memset (&route, 0, sizeof (route)); route.network = htonl (0xE0000000L); route.plen = 4; route.source = NM_IP_CONFIG_SOURCE_IP4LL; route.metric = nm_device_get_ip4_route_metric (self); nm_ip4_config_add_route (config, &route); return config; } #define IPV4LL_NETWORK (htonl (0xA9FE0000L)) #define IPV4LL_NETMASK (htonl (0xFFFF0000L)) void nm_device_handle_autoip4_event (NMDevice *self, const char *event, const char *address) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection = NULL; const char *method; NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE; g_return_if_fail (event != NULL); if (priv->act_request == NULL) return; connection = nm_act_request_get_connection (priv->act_request); g_assert (connection); /* Ignore if the connection isn't an AutoIP connection */ method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); if (g_strcmp0 (method, NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL) != 0) return; if (strcmp (event, "BIND") == 0) { guint32 lla; NMIP4Config *config; if (inet_pton (AF_INET, address, &lla) <= 0) { _LOGE (LOGD_AUTOIP4, "invalid address %s received from avahi-autoipd.", address); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_ERROR); return; } if ((lla & IPV4LL_NETMASK) != IPV4LL_NETWORK) { _LOGE (LOGD_AUTOIP4, "invalid address %s received from avahi-autoipd (not link-local).", address); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_ERROR); return; } config = aipd_get_ip4_config (self, lla); if (config == NULL) { _LOGE (LOGD_AUTOIP4, "failed to get autoip config"); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE); return; } if (priv->ip4_state == IP_CONF) { aipd_timeout_remove (self); nm_device_activate_schedule_ip4_config_result (self, config); } else if (priv->ip4_state == IP_DONE) { if (!ip4_config_merge_and_apply (self, config, TRUE, &reason)) { _LOGE (LOGD_AUTOIP4, "failed to update IP4 config for autoip change."); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason); } } else g_assert_not_reached (); g_object_unref (config); } else { _LOGW (LOGD_AUTOIP4, "autoip address %s no longer valid because '%s'.", address, event); /* The address is gone; terminate the connection or fail activation */ nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED); } } static void aipd_watch_cb (GPid pid, gint status, gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMDeviceState state; if (!priv->aipd_watch) return; priv->aipd_watch = 0; if (WIFEXITED (status)) _LOGD (LOGD_AUTOIP4, "avahi-autoipd exited with error code %d", WEXITSTATUS (status)); else if (WIFSTOPPED (status)) _LOGW (LOGD_AUTOIP4, "avahi-autoipd stopped unexpectedly with signal %d", WSTOPSIG (status)); else if (WIFSIGNALED (status)) _LOGW (LOGD_AUTOIP4, "avahi-autoipd died with signal %d", WTERMSIG (status)); else _LOGW (LOGD_AUTOIP4, "avahi-autoipd died from an unknown cause"); aipd_cleanup (self); state = nm_device_get_state (self); if (nm_device_is_activating (self) || (state == NM_DEVICE_STATE_ACTIVATED)) nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_FAILED); } static gboolean aipd_timeout_cb (gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->aipd_timeout) { _LOGI (LOGD_AUTOIP4, "avahi-autoipd timed out."); priv->aipd_timeout = 0; aipd_cleanup (self); if (priv->ip4_state == IP_CONF) nm_device_activate_schedule_ip4_config_timeout (self); } return FALSE; } /* default to installed helper, but can be modified for testing */ const char *nm_device_autoipd_helper_path = LIBEXECDIR "/nm-avahi-autoipd.action"; static NMActStageReturn aipd_start (NMDevice *self, NMDeviceStateReason *reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); const char *argv[6]; char *cmdline; const char *aipd_binary; int i = 0; GError *error = NULL; aipd_cleanup (self); /* Find avahi-autoipd */ aipd_binary = nm_utils_find_helper ("avahi-autoipd", NULL, NULL); if (!aipd_binary) { _LOGW (LOGD_DEVICE | LOGD_AUTOIP4, "Activation: Stage 3 of 5 (IP Configure Start) failed" " to start avahi-autoipd: not found"); *reason = NM_DEVICE_STATE_REASON_AUTOIP_START_FAILED; return NM_ACT_STAGE_RETURN_FAILURE; } argv[i++] = aipd_binary; argv[i++] = "--script"; argv[i++] = nm_device_autoipd_helper_path; if (nm_logging_enabled (LOGL_DEBUG, LOGD_AUTOIP4)) argv[i++] = "--debug"; argv[i++] = nm_device_get_ip_iface (self); argv[i++] = NULL; cmdline = g_strjoinv (" ", (char **) argv); _LOGD (LOGD_AUTOIP4, "running: %s", cmdline); g_free (cmdline); if (!g_spawn_async ("/", (char **) argv, NULL, G_SPAWN_DO_NOT_REAP_CHILD, nm_utils_setpgid, NULL, &(priv->aipd_pid), &error)) { _LOGW (LOGD_DEVICE | LOGD_AUTOIP4, "Activation: Stage 3 of 5 (IP Configure Start) failed" " to start avahi-autoipd: %s", error && error->message ? error->message : "(unknown)"); g_clear_error (&error); aipd_cleanup (self); return NM_ACT_STAGE_RETURN_FAILURE; } _LOGI (LOGD_DEVICE | LOGD_AUTOIP4, "Activation: Stage 3 of 5 (IP Configure Start) started" " avahi-autoipd..."); /* Monitor the child process so we know when it dies */ priv->aipd_watch = g_child_watch_add (priv->aipd_pid, aipd_watch_cb, self); /* Start a timeout to bound the address attempt */ priv->aipd_timeout = g_timeout_add_seconds (20, aipd_timeout_cb, self); return NM_ACT_STAGE_RETURN_POSTPONE; } /*********************************************/ static gboolean _device_get_default_route_from_platform (NMDevice *self, int addr_family, NMPlatformIPRoute *out_route) { gboolean success = FALSE; int ifindex = nm_device_get_ip_ifindex (self); GArray *routes; if (addr_family == AF_INET) routes = nm_platform_ip4_route_get_all (ifindex, NM_PLATFORM_GET_ROUTE_MODE_ONLY_DEFAULT); else routes = nm_platform_ip6_route_get_all (ifindex, NM_PLATFORM_GET_ROUTE_MODE_ONLY_DEFAULT); if (routes) { guint route_metric = G_MAXUINT32, m; const NMPlatformIPRoute *route = NULL, *r; guint i; /* if there are several default routes, find the one with the best metric */ for (i = 0; i < routes->len; i++) { if (addr_family == AF_INET) { r = (const NMPlatformIPRoute *) &g_array_index (routes, NMPlatformIP4Route, i); m = r->metric; } else { r = (const NMPlatformIPRoute *) &g_array_index (routes, NMPlatformIP6Route, i); m = nm_utils_ip6_route_metric_normalize (r->metric); } if (!route || m < route_metric) { route = r; route_metric = m; } } if (route) { if (addr_family == AF_INET) *((NMPlatformIP4Route *) out_route) = *((NMPlatformIP4Route *) route); else *((NMPlatformIP6Route *) out_route) = *((NMPlatformIP6Route *) route); success = TRUE; } g_array_free (routes, TRUE); } return success; } /*********************************************/ static void ensure_con_ipx_config (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); int ip_ifindex = nm_device_get_ip_ifindex (self); NMConnection *connection; g_assert (!!priv->con_ip4_config == !!priv->con_ip6_config); if (priv->con_ip4_config) return; connection = nm_device_get_connection (self); if (!connection) return; priv->con_ip4_config = nm_ip4_config_new (ip_ifindex); priv->con_ip6_config = nm_ip6_config_new (ip_ifindex); nm_ip4_config_merge_setting (priv->con_ip4_config, nm_connection_get_setting_ip4_config (connection), nm_device_get_ip4_route_metric (self)); nm_ip6_config_merge_setting (priv->con_ip6_config, nm_connection_get_setting_ip6_config (connection), nm_device_get_ip6_route_metric (self)); if (nm_device_uses_assumed_connection (self)) { /* For assumed connections ignore all addresses and routes. */ nm_ip4_config_reset_addresses (priv->con_ip4_config); nm_ip4_config_reset_routes (priv->con_ip4_config); nm_ip6_config_reset_addresses (priv->con_ip6_config); nm_ip6_config_reset_routes (priv->con_ip6_config); } } /*********************************************/ /* DHCPv4 stuff */ static void dhcp4_cleanup (NMDevice *self, gboolean stop, gboolean release) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->dhcp4_client) { /* Stop any ongoing DHCP transaction on this device */ if (priv->dhcp4_state_sigid) { g_signal_handler_disconnect (priv->dhcp4_client, priv->dhcp4_state_sigid); priv->dhcp4_state_sigid = 0; } nm_device_remove_pending_action (self, PENDING_ACTION_DHCP4, FALSE); if (stop) nm_dhcp_client_stop (priv->dhcp4_client, release); g_clear_object (&priv->dhcp4_client); } if (priv->dhcp4_config) { g_clear_object (&priv->dhcp4_config); g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP4_CONFIG); } } static gboolean ip4_config_merge_and_apply (NMDevice *self, NMIP4Config *config, gboolean commit, NMDeviceStateReason *out_reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; gboolean success; NMIP4Config *composite; gboolean has_direct_route; const guint32 default_route_metric = nm_device_get_ip4_route_metric (self); guint32 gateway; /* Merge all the configs into the composite config */ if (config) { g_clear_object (&priv->dev_ip4_config); priv->dev_ip4_config = g_object_ref (config); } composite = nm_ip4_config_new (nm_device_get_ip_ifindex (self)); ensure_con_ipx_config (self); if (priv->dev_ip4_config) nm_ip4_config_merge (composite, priv->dev_ip4_config); if (priv->vpn4_config) nm_ip4_config_merge (composite, priv->vpn4_config); if (priv->ext_ip4_config) nm_ip4_config_merge (composite, priv->ext_ip4_config); /* Merge WWAN config *last* to ensure modem-given settings overwrite * any external stuff set by pppd or other scripts. */ if (priv->wwan_ip4_config) nm_ip4_config_merge (composite, priv->wwan_ip4_config); /* Merge user overrides into the composite config. For assumed connection, * con_ip4_config is empty. */ if (priv->con_ip4_config) nm_ip4_config_merge (composite, priv->con_ip4_config); connection = nm_device_get_connection (self); /* Add the default route. * * We keep track of the default route of a device in a private field. * NMDevice needs to know the default route at this point, because the gateway * might require a direct route (see below). * * But also, we don't want to add the default route to priv->ip4_config, * because the default route from the setting might not be the same that * NMDefaultRouteManager eventually configures (because the it might * tweak the effective metric). */ /* unless we come to a different conclusion below, we have no default route and * the route is assumed. */ priv->default_route.v4_has = FALSE; priv->default_route.v4_is_assumed = TRUE; if (!commit) { /* during a non-commit event, we always pickup whatever is configured. */ goto END_ADD_DEFAULT_ROUTE; } if (nm_device_uses_assumed_connection (self)) goto END_ADD_DEFAULT_ROUTE; /* we are about to commit (for a non-assumed connection). Enforce whatever we have * configured. */ priv->default_route.v4_is_assumed = FALSE; if ( !connection || !nm_default_route_manager_ip4_connection_has_default_route (nm_default_route_manager_get (), connection)) goto END_ADD_DEFAULT_ROUTE; if (!nm_ip4_config_get_num_addresses (composite)) { /* without addresses we can have no default route. */ goto END_ADD_DEFAULT_ROUTE; } gateway = nm_ip4_config_get_gateway (composite); if ( !gateway && nm_device_get_device_type (self) != NM_DEVICE_TYPE_MODEM) goto END_ADD_DEFAULT_ROUTE; has_direct_route = ( gateway == 0 || nm_ip4_config_get_subnet_for_host (composite, gateway) || nm_ip4_config_get_direct_route_for_host (composite, gateway)); priv->default_route.v4_has = TRUE; memset (&priv->default_route.v4, 0, sizeof (priv->default_route.v4)); priv->default_route.v4.source = NM_IP_CONFIG_SOURCE_USER; priv->default_route.v4.gateway = gateway; priv->default_route.v4.metric = default_route_metric; priv->default_route.v4.mss = nm_ip4_config_get_mss (composite); if (!has_direct_route) { NMPlatformIP4Route r = priv->default_route.v4; /* add a direct route to the gateway */ r.network = gateway; r.plen = 32; r.gateway = 0; nm_ip4_config_add_route (composite, &r); } END_ADD_DEFAULT_ROUTE: if (priv->default_route.v4_is_assumed) { /* If above does not explicitly assign a default route, we always pick up the * default route based on what is currently configured. * That means that even managed connections with never-default, can * get a default route (if configured externally). */ priv->default_route.v4_has = _device_get_default_route_from_platform (self, AF_INET, (NMPlatformIPRoute *) &priv->default_route.v4); } /* Allow setting MTU etc */ if (commit) { if (NM_DEVICE_GET_CLASS (self)->ip4_config_pre_commit) NM_DEVICE_GET_CLASS (self)->ip4_config_pre_commit (self, composite); } success = nm_device_set_ip4_config (self, composite, default_route_metric, commit, out_reason); g_object_unref (composite); return success; } static void dhcp4_lease_change (NMDevice *self, NMIP4Config *config) { NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE; g_return_if_fail (config != NULL); if (!ip4_config_merge_and_apply (self, config, TRUE, &reason)) { _LOGW (LOGD_DHCP4, "failed to update IPv4 config for DHCP change."); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason); } else { /* Notify dispatcher scripts of new DHCP4 config */ nm_dispatcher_call (DISPATCHER_ACTION_DHCP4_CHANGE, nm_device_get_connection (self), self, NULL, NULL, NULL); } } static void dhcp4_fail (NMDevice *self, gboolean timeout) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); dhcp4_cleanup (self, TRUE, FALSE); if (timeout || (priv->ip4_state == IP_CONF)) nm_device_activate_schedule_ip4_config_timeout (self); else if (priv->ip4_state == IP_DONE) nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED); else g_warn_if_reached (); } static void dhcp4_update_config (NMDevice *self, NMDhcp4Config *config, GHashTable *options) { GHashTableIter iter; const char *key, *value; /* Update the DHCP4 config object with new DHCP options */ nm_dhcp4_config_reset (config); g_hash_table_iter_init (&iter, options); while (g_hash_table_iter_next (&iter, (gpointer) &key, (gpointer) &value)) nm_dhcp4_config_add_option (config, key, value); g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP4_CONFIG); } static void dhcp4_state_changed (NMDhcpClient *client, NMDhcpState state, NMIP4Config *ip4_config, GHashTable *options, gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); g_return_if_fail (nm_dhcp_client_get_ipv6 (client) == FALSE); g_return_if_fail (!ip4_config || NM_IS_IP4_CONFIG (ip4_config)); _LOGD (LOGD_DHCP4, "new DHCPv4 client state %d", state); switch (state) { case NM_DHCP_STATE_BOUND: if (!ip4_config) { _LOGW (LOGD_DHCP4, "failed to get IPv4 config in response to DHCP event."); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE); break; } dhcp4_update_config (self, priv->dhcp4_config, options); if (priv->ip4_state == IP_CONF) nm_device_activate_schedule_ip4_config_result (self, ip4_config); else if (priv->ip4_state == IP_DONE) dhcp4_lease_change (self, ip4_config); break; case NM_DHCP_STATE_TIMEOUT: dhcp4_fail (self, TRUE); break; case NM_DHCP_STATE_EXPIRE: /* Ignore expiry before we even have a lease (NAK, old lease, etc) */ if (priv->ip4_state == IP_CONF) break; /* Fall through */ case NM_DHCP_STATE_DONE: case NM_DHCP_STATE_FAIL: dhcp4_fail (self, FALSE); break; default: break; } } static NMActStageReturn dhcp4_start (NMDevice *self, NMConnection *connection, NMDeviceStateReason *reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMSettingIPConfig *s_ip4; const guint8 *hw_addr; size_t hw_addr_len = 0; GByteArray *tmp = NULL; s_ip4 = nm_connection_get_setting_ip4_config (connection); /* Clear old exported DHCP options */ if (priv->dhcp4_config) g_object_unref (priv->dhcp4_config); priv->dhcp4_config = nm_dhcp4_config_new (); hw_addr = nm_platform_link_get_address (nm_device_get_ip_ifindex (self), &hw_addr_len); if (hw_addr_len) { tmp = g_byte_array_sized_new (hw_addr_len); g_byte_array_append (tmp, hw_addr, hw_addr_len); } /* Begin DHCP on the interface */ g_warn_if_fail (priv->dhcp4_client == NULL); priv->dhcp4_client = nm_dhcp_manager_start_ip4 (nm_dhcp_manager_get (), nm_device_get_ip_iface (self), nm_device_get_ip_ifindex (self), tmp, nm_connection_get_uuid (connection), nm_device_get_ip4_route_metric (self), nm_setting_ip_config_get_dhcp_send_hostname (s_ip4), nm_setting_ip_config_get_dhcp_hostname (s_ip4), nm_setting_ip4_config_get_dhcp_client_id (NM_SETTING_IP4_CONFIG (s_ip4)), priv->dhcp_timeout, priv->dhcp_anycast_address, NULL); if (tmp) g_byte_array_free (tmp, TRUE); if (!priv->dhcp4_client) { *reason = NM_DEVICE_STATE_REASON_DHCP_START_FAILED; return NM_ACT_STAGE_RETURN_FAILURE; } priv->dhcp4_state_sigid = g_signal_connect (priv->dhcp4_client, NM_DHCP_CLIENT_SIGNAL_STATE_CHANGED, G_CALLBACK (dhcp4_state_changed), self); nm_device_add_pending_action (self, PENDING_ACTION_DHCP4, TRUE); /* DHCP devices will be notified by the DHCP manager when stuff happens */ return NM_ACT_STAGE_RETURN_POSTPONE; } gboolean nm_device_dhcp4_renew (NMDevice *self, gboolean release) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMActStageReturn ret; NMDeviceStateReason reason; NMConnection *connection; g_return_val_if_fail (priv->dhcp4_client != NULL, FALSE); _LOGI (LOGD_DHCP4, "DHCPv4 lease renewal requested"); /* Terminate old DHCP instance and release the old lease */ dhcp4_cleanup (self, TRUE, release); connection = nm_device_get_connection (self); g_assert (connection); /* Start DHCP again on the interface */ ret = dhcp4_start (self, connection, &reason); return (ret != NM_ACT_STAGE_RETURN_FAILURE); } /*********************************************/ static GHashTable *shared_ips = NULL; static void release_shared_ip (gpointer data) { g_hash_table_remove (shared_ips, data); } static gboolean reserve_shared_ip (NMDevice *self, NMSettingIPConfig *s_ip4, NMPlatformIP4Address *address) { if (G_UNLIKELY (shared_ips == NULL)) shared_ips = g_hash_table_new (g_direct_hash, g_direct_equal); memset (address, 0, sizeof (*address)); if (s_ip4 && nm_setting_ip_config_get_num_addresses (s_ip4)) { /* Use the first user-supplied address */ NMIPAddress *user = nm_setting_ip_config_get_address (s_ip4, 0); g_assert (user); nm_ip_address_get_address_binary (user, &address->address); address->plen = nm_ip_address_get_prefix (user); } else { /* Find an unused address in the 10.42.x.x range */ guint32 start = (guint32) ntohl (0x0a2a0001); /* 10.42.0.1 */ guint32 count = 0; while (g_hash_table_lookup (shared_ips, GUINT_TO_POINTER (start + count))) { count += ntohl (0x100); if (count > ntohl (0xFE00)) { _LOGE (LOGD_SHARING, "ran out of shared IP addresses!"); return FALSE; } } address->address = start + count; address->plen = 24; g_hash_table_insert (shared_ips, GUINT_TO_POINTER (address->address), GUINT_TO_POINTER (TRUE)); } return TRUE; } static NMIP4Config * shared4_new_config (NMDevice *self, NMConnection *connection, NMDeviceStateReason *reason) { NMIP4Config *config = NULL; NMPlatformIP4Address address; g_return_val_if_fail (self != NULL, NULL); if (!reserve_shared_ip (self, nm_connection_get_setting_ip4_config (connection), &address)) { *reason = NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE; return NULL; } config = nm_ip4_config_new (nm_device_get_ip_ifindex (self)); address.source = NM_IP_CONFIG_SOURCE_SHARED; nm_ip4_config_add_address (config, &address); /* Remove the address lock when the object gets disposed */ g_object_set_data_full (G_OBJECT (config), "shared-ip", GUINT_TO_POINTER (address.address), release_shared_ip); return config; } /*********************************************/ static gboolean connection_ip4_method_requires_carrier (NMConnection *connection, gboolean *out_ip4_enabled) { const char *method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); static const char *ip4_carrier_methods[] = { NM_SETTING_IP4_CONFIG_METHOD_AUTO, NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL, NULL }; if (out_ip4_enabled) *out_ip4_enabled = !!strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED); return _nm_utils_string_in_list (method, ip4_carrier_methods); } static gboolean connection_ip6_method_requires_carrier (NMConnection *connection, gboolean *out_ip6_enabled) { const char *method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG); static const char *ip6_carrier_methods[] = { NM_SETTING_IP6_CONFIG_METHOD_AUTO, NM_SETTING_IP6_CONFIG_METHOD_DHCP, NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL, NULL }; if (out_ip6_enabled) *out_ip6_enabled = !!strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_IGNORE); return _nm_utils_string_in_list (method, ip6_carrier_methods); } static gboolean connection_requires_carrier (NMConnection *connection) { NMSettingIPConfig *s_ip4, *s_ip6; gboolean ip4_carrier_wanted, ip6_carrier_wanted; gboolean ip4_used = FALSE, ip6_used = FALSE; ip4_carrier_wanted = connection_ip4_method_requires_carrier (connection, &ip4_used); if (ip4_carrier_wanted) { /* If IPv4 wants a carrier and cannot fail, the whole connection * requires a carrier regardless of the IPv6 method. */ s_ip4 = nm_connection_get_setting_ip4_config (connection); if (s_ip4 && !nm_setting_ip_config_get_may_fail (s_ip4)) return TRUE; } ip6_carrier_wanted = connection_ip6_method_requires_carrier (connection, &ip6_used); if (ip6_carrier_wanted) { /* If IPv6 wants a carrier and cannot fail, the whole connection * requires a carrier regardless of the IPv4 method. */ s_ip6 = nm_connection_get_setting_ip6_config (connection); if (s_ip6 && !nm_setting_ip_config_get_may_fail (s_ip6)) return TRUE; } /* If an IP version wants a carrier and and the other IP version isn't * used, the connection requires carrier since it will just fail without one. */ if (ip4_carrier_wanted && !ip6_used) return TRUE; if (ip6_carrier_wanted && !ip4_used) return TRUE; /* If both want a carrier, the whole connection wants a carrier */ return ip4_carrier_wanted && ip6_carrier_wanted; } static gboolean have_any_ready_slaves (NMDevice *self, const GSList *slaves) { const GSList *iter; /* Any enslaved slave is "ready" in the generic case as it's * at least >= NM_DEVCIE_STATE_IP_CONFIG and has had Layer 2 * properties set up. */ for (iter = slaves; iter; iter = g_slist_next (iter)) { if (nm_device_get_enslaved (iter->data)) return TRUE; } return FALSE; } static gboolean ip4_requires_slaves (NMConnection *connection) { const char *method; method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); return strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_AUTO) == 0; } static NMActStageReturn act_stage3_ip4_config_start (NMDevice *self, NMIP4Config **out_config, NMDeviceStateReason *reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; NMActStageReturn ret = NM_ACT_STAGE_RETURN_FAILURE; const char *method; GSList *slaves; gboolean ready_slaves; g_return_val_if_fail (reason != NULL, NM_ACT_STAGE_RETURN_FAILURE); connection = nm_device_get_connection (self); g_assert (connection); if ( connection_ip4_method_requires_carrier (connection, NULL) && priv->is_master && !priv->carrier) { _LOGI (LOGD_IP4 | LOGD_DEVICE, "IPv4 config waiting until carrier is on"); return NM_ACT_STAGE_RETURN_WAIT; } if (priv->is_master && ip4_requires_slaves (connection)) { /* If the master has no ready slaves, and depends on slaves for * a successful IPv4 attempt, then postpone IPv4 addressing. */ slaves = nm_device_master_get_slaves (self); ready_slaves = NM_DEVICE_GET_CLASS (self)->have_any_ready_slaves (self, slaves); g_slist_free (slaves); if (ready_slaves == FALSE) { _LOGI (LOGD_DEVICE | LOGD_IP4, "IPv4 config waiting until slaves are ready"); return NM_ACT_STAGE_RETURN_WAIT; } } method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG); /* Start IPv4 addressing based on the method requested */ if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_AUTO) == 0) ret = dhcp4_start (self, connection, reason); else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL) == 0) ret = aipd_start (self, reason); else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_MANUAL) == 0) { /* Use only IPv4 config from the connection data */ *out_config = nm_ip4_config_new (nm_device_get_ip_ifindex (self)); g_assert (*out_config); ret = NM_ACT_STAGE_RETURN_SUCCESS; } else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_SHARED) == 0) { *out_config = shared4_new_config (self, connection, reason); if (*out_config) { priv->dnsmasq_manager = nm_dnsmasq_manager_new (nm_device_get_ip_iface (self)); ret = NM_ACT_STAGE_RETURN_SUCCESS; } else ret = NM_ACT_STAGE_RETURN_FAILURE; } else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED) == 0) { /* Nothing to do... */ ret = NM_ACT_STAGE_RETURN_STOP; } else _LOGW (LOGD_IP4, "unhandled IPv4 config method '%s'; will fail", method); return ret; } /*********************************************/ /* DHCPv6 stuff */ static void dhcp6_cleanup (NMDevice *self, gboolean stop, gboolean release) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); priv->dhcp6_mode = NM_RDISC_DHCP_LEVEL_NONE; g_clear_object (&priv->dhcp6_ip6_config); if (priv->dhcp6_client) { if (priv->dhcp6_state_sigid) { g_signal_handler_disconnect (priv->dhcp6_client, priv->dhcp6_state_sigid); priv->dhcp6_state_sigid = 0; } if (stop) nm_dhcp_client_stop (priv->dhcp6_client, release); g_clear_object (&priv->dhcp6_client); } nm_device_remove_pending_action (self, PENDING_ACTION_DHCP6, FALSE); if (priv->dhcp6_config) { g_clear_object (&priv->dhcp6_config); g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP6_CONFIG); } } static gboolean ip6_config_merge_and_apply (NMDevice *self, gboolean commit, NMDeviceStateReason *out_reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; gboolean success; NMIP6Config *composite; gboolean has_direct_route; const struct in6_addr *gateway; /* If no config was passed in, create a new one */ composite = nm_ip6_config_new (nm_device_get_ip_ifindex (self)); ensure_con_ipx_config (self); g_assert (composite); /* Merge all the IP configs into the composite config */ if (priv->ac_ip6_config) nm_ip6_config_merge (composite, priv->ac_ip6_config); if (priv->dhcp6_ip6_config) nm_ip6_config_merge (composite, priv->dhcp6_ip6_config); if (priv->vpn6_config) nm_ip6_config_merge (composite, priv->vpn6_config); if (priv->ext_ip6_config) nm_ip6_config_merge (composite, priv->ext_ip6_config); /* Merge WWAN config *last* to ensure modem-given settings overwrite * any external stuff set by pppd or other scripts. */ if (priv->wwan_ip6_config) nm_ip6_config_merge (composite, priv->wwan_ip6_config); /* Merge user overrides into the composite config. For assumed connections, * con_ip6_config is empty. */ if (priv->con_ip6_config) nm_ip6_config_merge (composite, priv->con_ip6_config); connection = nm_device_get_connection (self); /* Add the default route. * * We keep track of the default route of a device in a private field. * NMDevice needs to know the default route at this point, because the gateway * might require a direct route (see below). * * But also, we don't want to add the default route to priv->ip6_config, * because the default route from the setting might not be the same that * NMDefaultRouteManager eventually configures (because the it might * tweak the effective metric). */ /* unless we come to a different conclusion below, we have no default route and * the route is assumed. */ priv->default_route.v6_has = FALSE; priv->default_route.v6_is_assumed = TRUE; if (!commit) { /* during a non-commit event, we always pickup whatever is configured. */ goto END_ADD_DEFAULT_ROUTE; } if (nm_device_uses_assumed_connection (self)) goto END_ADD_DEFAULT_ROUTE; /* we are about to commit (for a non-assumed connection). Enforce whatever we have * configured. */ priv->default_route.v6_is_assumed = FALSE; if ( !connection || !nm_default_route_manager_ip6_connection_has_default_route (nm_default_route_manager_get (), connection)) goto END_ADD_DEFAULT_ROUTE; if (!nm_ip6_config_get_num_addresses (composite)) { /* without addresses we can have no default route. */ goto END_ADD_DEFAULT_ROUTE; } gateway = nm_ip6_config_get_gateway (composite); if (!gateway) goto END_ADD_DEFAULT_ROUTE; has_direct_route = nm_ip6_config_get_direct_route_for_host (composite, gateway) != NULL; priv->default_route.v6_has = TRUE; memset (&priv->default_route.v6, 0, sizeof (priv->default_route.v6)); priv->default_route.v6.source = NM_IP_CONFIG_SOURCE_USER; priv->default_route.v6.gateway = *gateway; priv->default_route.v6.metric = nm_device_get_ip6_route_metric (self); priv->default_route.v6.mss = nm_ip6_config_get_mss (composite); if (!has_direct_route) { NMPlatformIP6Route r = priv->default_route.v6; /* add a direct route to the gateway */ r.network = *gateway; r.plen = 128; r.gateway = in6addr_any; nm_ip6_config_add_route (composite, &r); } END_ADD_DEFAULT_ROUTE: if (priv->default_route.v6_is_assumed) { /* If above does not explicitly assign a default route, we always pick up the * default route based on what is currently configured. * That means that even managed connections with never-default, can * get a default route (if configured externally). */ priv->default_route.v6_has = _device_get_default_route_from_platform (self, AF_INET6, (NMPlatformIPRoute *) &priv->default_route.v6); } nm_ip6_config_addresses_sort (composite, priv->rdisc ? priv->rdisc_use_tempaddr : NM_SETTING_IP6_CONFIG_PRIVACY_UNKNOWN); /* Allow setting MTU etc */ if (commit) { if (NM_DEVICE_GET_CLASS (self)->ip6_config_pre_commit) NM_DEVICE_GET_CLASS (self)->ip6_config_pre_commit (self, composite); } success = nm_device_set_ip6_config (self, composite, commit, out_reason); g_object_unref (composite); return success; } static void dhcp6_lease_change (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE; if (priv->dhcp6_ip6_config == NULL) { _LOGW (LOGD_DHCP6, "failed to get DHCPv6 config for rebind"); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED); return; } g_assert (priv->dhcp6_client); /* sanity check */ connection = nm_device_get_connection (self); g_assert (connection); /* Apply the updated config */ if (ip6_config_merge_and_apply (self, TRUE, &reason) == FALSE) { _LOGW (LOGD_DHCP6, "failed to update IPv6 config in response to DHCP event."); nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason); } else { /* Notify dispatcher scripts of new DHCPv6 config */ nm_dispatcher_call (DISPATCHER_ACTION_DHCP6_CHANGE, connection, self, NULL, NULL, NULL); } } static void dhcp6_fail (NMDevice *self, gboolean timeout) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); dhcp6_cleanup (self, TRUE, FALSE); if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_MANAGED) { if (timeout || (priv->ip6_state == IP_CONF)) nm_device_activate_schedule_ip6_config_timeout (self); else if (priv->ip6_state == IP_DONE) nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED); else g_warn_if_reached (); } else { /* not a hard failure; just live with the RA info */ if (priv->ip6_state == IP_CONF) nm_device_activate_schedule_ip6_config_result (self); } } static void dhcp6_timeout (NMDevice *self, NMDhcpClient *client) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_MANAGED) dhcp6_fail (self, TRUE); else { /* not a hard failure; just live with the RA info */ dhcp6_cleanup (self, TRUE, FALSE); if (priv->ip6_state == IP_CONF) nm_device_activate_schedule_ip6_config_result (self); } } static void dhcp6_update_config (NMDevice *self, NMDhcp6Config *config, GHashTable *options) { GHashTableIter iter; const char *key, *value; /* Update the DHCP6 config object with new DHCP options */ nm_dhcp6_config_reset (config); g_hash_table_iter_init (&iter, options); while (g_hash_table_iter_next (&iter, (gpointer) &key, (gpointer) &value)) nm_dhcp6_config_add_option (config, key, value); g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP6_CONFIG); } static void dhcp6_state_changed (NMDhcpClient *client, NMDhcpState state, NMIP6Config *ip6_config, GHashTable *options, gpointer user_data) { NMDevice *self = NM_DEVICE (user_data); NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); g_return_if_fail (nm_dhcp_client_get_ipv6 (client) == TRUE); g_return_if_fail (!ip6_config || NM_IS_IP6_CONFIG (ip6_config)); _LOGD (LOGD_DHCP6, "new DHCPv6 client state %d", state); switch (state) { case NM_DHCP_STATE_BOUND: g_clear_object (&priv->dhcp6_ip6_config); if (ip6_config) { priv->dhcp6_ip6_config = g_object_ref (ip6_config); dhcp6_update_config (self, priv->dhcp6_config, options); } if (priv->ip6_state == IP_CONF) { if (priv->dhcp6_ip6_config == NULL) { /* FIXME: Initial DHCP failed; should we fail IPv6 entirely then? */ nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_DHCP_FAILED); break; } nm_device_activate_schedule_ip6_config_result (self); } else if (priv->ip6_state == IP_DONE) dhcp6_lease_change (self); break; case NM_DHCP_STATE_TIMEOUT: dhcp6_timeout (self, client); break; case NM_DHCP_STATE_EXPIRE: /* Ignore expiry before we even have a lease (NAK, old lease, etc) */ if (priv->ip6_state != IP_CONF) dhcp6_fail (self, FALSE); break; case NM_DHCP_STATE_DONE: /* In IPv6 info-only mode, the client doesn't handle leases so it * may exit right after getting a response from the server. That's * normal. In that case we just ignore the exit. */ if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_OTHERCONF) break; /* Otherwise, fall through */ case NM_DHCP_STATE_FAIL: dhcp6_fail (self, FALSE); break; default: break; } } static gboolean dhcp6_start_with_link_ready (NMDevice *self, NMConnection *connection) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMSettingIPConfig *s_ip6; GByteArray *tmp = NULL; const guint8 *hw_addr; size_t hw_addr_len = 0; g_assert (connection); s_ip6 = nm_connection_get_setting_ip6_config (connection); g_assert (s_ip6); hw_addr = nm_platform_link_get_address (nm_device_get_ip_ifindex (self), &hw_addr_len); if (hw_addr_len) { tmp = g_byte_array_sized_new (hw_addr_len); g_byte_array_append (tmp, hw_addr, hw_addr_len); } priv->dhcp6_client = nm_dhcp_manager_start_ip6 (nm_dhcp_manager_get (), nm_device_get_ip_iface (self), nm_device_get_ip_ifindex (self), tmp, nm_connection_get_uuid (connection), nm_device_get_ip6_route_metric (self), nm_setting_ip_config_get_dhcp_send_hostname (s_ip6), nm_setting_ip_config_get_dhcp_hostname (s_ip6), priv->dhcp_timeout, priv->dhcp_anycast_address, (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_OTHERCONF) ? TRUE : FALSE, nm_setting_ip6_config_get_ip6_privacy (NM_SETTING_IP6_CONFIG (s_ip6))); if (tmp) g_byte_array_free (tmp, TRUE); if (priv->dhcp6_client) { priv->dhcp6_state_sigid = g_signal_connect (priv->dhcp6_client, NM_DHCP_CLIENT_SIGNAL_STATE_CHANGED, G_CALLBACK (dhcp6_state_changed), self); } return !!priv->dhcp6_client; } static gboolean dhcp6_start (NMDevice *self, gboolean wait_for_ll, NMDeviceStateReason *reason) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; NMSettingIPConfig *s_ip6; g_clear_object (&priv->dhcp6_config); priv->dhcp6_config = nm_dhcp6_config_new (); g_warn_if_fail (priv->dhcp6_ip6_config == NULL); g_clear_object (&priv->dhcp6_ip6_config); connection = nm_device_get_connection (self); g_assert (connection); s_ip6 = nm_connection_get_setting_ip6_config (connection); if (!nm_setting_ip_config_get_may_fail (s_ip6) || !strcmp (nm_setting_ip_config_get_method (s_ip6), NM_SETTING_IP6_CONFIG_METHOD_DHCP)) nm_device_add_pending_action (self, PENDING_ACTION_DHCP6, TRUE); if (wait_for_ll) { NMActStageReturn ret; /* ensure link local is ready... */ ret = linklocal6_start (self); if (ret == NM_ACT_STAGE_RETURN_POSTPONE) { /* success; wait for the LL address to show up */ return TRUE; } /* success; already have the LL address; kick off DHCP */ g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS); } if (!dhcp6_start_with_link_ready (self, connection)) { *reason = NM_DEVICE_STATE_REASON_DHCP_START_FAILED; return FALSE; } return TRUE; } gboolean nm_device_dhcp6_renew (NMDevice *self, gboolean release) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); g_return_val_if_fail (priv->dhcp6_client != NULL, FALSE); _LOGI (LOGD_DHCP6, "DHCPv6 lease renewal requested"); /* Terminate old DHCP instance and release the old lease */ dhcp6_cleanup (self, TRUE, release); /* Start DHCP again on the interface */ return dhcp6_start (self, FALSE, NULL); } /******************************************/ static gboolean have_ip6_address (const NMIP6Config *ip6_config, gboolean linklocal) { guint i; if (!ip6_config) return FALSE; linklocal = !!linklocal; for (i = 0; i < nm_ip6_config_get_num_addresses (ip6_config); i++) { const NMPlatformIP6Address *addr = nm_ip6_config_get_address (ip6_config, i); if ((IN6_IS_ADDR_LINKLOCAL (&addr->address) == linklocal) && !(addr->flags & IFA_F_TENTATIVE)) return TRUE; } return FALSE; } static void linklocal6_cleanup (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); if (priv->linklocal6_timeout_id) { g_source_remove (priv->linklocal6_timeout_id); priv->linklocal6_timeout_id = 0; } } static gboolean linklocal6_timeout_cb (gpointer user_data) { NMDevice *self = user_data; linklocal6_cleanup (self); _LOGD (LOGD_DEVICE, "linklocal6: waiting for link-local addresses failed due to timeout"); nm_device_activate_schedule_ip6_config_timeout (self); return G_SOURCE_REMOVE; } static void linklocal6_complete (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; const char *method; g_assert (priv->linklocal6_timeout_id); g_assert (have_ip6_address (priv->ip6_config, TRUE)); linklocal6_cleanup (self); connection = nm_device_get_connection (self); g_assert (connection); method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG); _LOGD (LOGD_DEVICE, "linklocal6: waiting for link-local addresses successful, continue with method %s", method); if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_AUTO) == 0) { if (!addrconf6_start_with_link_ready (self)) { /* Time out IPv6 instead of failing the entire activation */ nm_device_activate_schedule_ip6_config_timeout (self); } } else if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_DHCP) == 0) { if (!dhcp6_start_with_link_ready (self, connection)) { /* Time out IPv6 instead of failing the entire activation */ nm_device_activate_schedule_ip6_config_timeout (self); } } else if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL) == 0) nm_device_activate_schedule_ip6_config_result (self); else g_return_if_fail (FALSE); } static void check_and_add_ipv6ll_addr (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); int ip_ifindex = nm_device_get_ip_ifindex (self); NMUtilsIPv6IfaceId iid; struct in6_addr lladdr; guint i, n; if (priv->nm_ipv6ll == FALSE) return; if (priv->ip6_config) { n = nm_ip6_config_get_num_addresses (priv->ip6_config); for (i = 0; i < n; i++) { const NMPlatformIP6Address *addr; addr = nm_ip6_config_get_address (priv->ip6_config, i); if (IN6_IS_ADDR_LINKLOCAL (&addr->address)) { /* Already have an LL address, nothing to do */ return; } } } if (!nm_device_get_ip_iface_identifier (self, &iid)) { _LOGW (LOGD_IP6, "failed to get interface identifier; IPv6 may be broken"); return; } memset (&lladdr, 0, sizeof (lladdr)); lladdr.s6_addr16[0] = htons (0xfe80); nm_utils_ipv6_addr_set_interface_identfier (&lladdr, iid); _LOGD (LOGD_IP6, "adding IPv6LL address %s", nm_utils_inet6_ntop (&lladdr, NULL)); if (!nm_platform_ip6_address_add (ip_ifindex, lladdr, in6addr_any, 64, NM_PLATFORM_LIFETIME_PERMANENT, NM_PLATFORM_LIFETIME_PERMANENT, 0)) { _LOGW (LOGD_IP6, "failed to add IPv6 link-local address %s", nm_utils_inet6_ntop (&lladdr, NULL)); } } static NMActStageReturn linklocal6_start (NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); NMConnection *connection; const char *method; linklocal6_cleanup (self); if (have_ip6_address (priv->ip6_config, TRUE)) return NM_ACT_STAGE_RETURN_SUCCESS; connection = nm_device_get_connection (self); g_assert (connection); method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG); _LOGD (LOGD_DEVICE, "linklocal6: starting IPv6 with method '%s', but the device has no link-local addresses configured. Wait.", method); check_and_add_ipv6ll_addr (self); priv->linklocal6_timeout_id = g_timeout_add_seconds (5, linklocal6_timeout_cb, self); return NM_ACT_STAGE_RETURN_POSTPONE; } /******************************************/ static void print_support_extended_ifa_flags (NMSettingIP6ConfigPrivacy use_tempaddr) { static gint8 warn = 0; static gint8 s_libnl = -1, s_kernel; if (warn >= 2) return; if (s_libnl == -1) { s_libnl = !!nm_platform_check_support_libnl_extended_ifa_flags (); s_kernel = !!nm_platform_check_support_kernel_extended_ifa_flags (); if (s_libnl && s_kernel) { nm_log_dbg (LOGD_IP6, "kernel and libnl support extended IFA_FLAGS (needed by NM for IPv6 private addresses)"); warn = 2; return; } } if ( use_tempaddr != NM_SETTING_IP6_CONFIG_PRIVACY_PREFER_TEMP_ADDR && use_tempaddr != NM_SETTING_IP6_CONFIG_PRIVACY_PREFER_PUBLIC_ADDR) { if (warn == 0) { nm_log_dbg (LOGD_IP6, "%s%s%s %s not support extended IFA_FLAGS (needed by NM for IPv6 private addresses)", !s_kernel ? "kernel" : "", !s_kernel && !s_libnl ? " and " : "", !s_libnl ? "libnl" : "", !s_kernel && !s_libnl ? "do" : "does"); warn = 1; } return; } if (!s_libnl && !s_kernel) { nm_log_warn (LOGD_IP6, "libnl and the kernel do not support extended IFA_FLAGS needed by NM for " "IPv6 private addresses. This feature is not available"); } else if (!s_libnl) { nm_log_warn (LOGD_IP6, "libnl does not support extended IFA_FLAGS needed by NM for " "IPv6 private addresses. This feature is not available"); } else if (!s_kernel) { nm_log_warn (LOGD_IP6, "The kernel does not support extended IFA_FLAGS needed by NM for " "IPv6 private addresses. This feature is not available"); } warn = 2; } static void rdisc_config_changed (NMRDisc *rdisc, NMRDiscConfigMap changed, NMDevice *self) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); warn = 2; } static void nm_device_ipv6_set_mtu (NMDevice *self, guint32 mtu); static void nm_device_set_mtu (NMDevice *self, guint32 mtu) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); int ifindex = nm_device_get_ifindex (self); if (mtu) priv->mtu = mtu; /* Ensure the IPv6 MTU is still alright. */ if (priv->ip6_mtu) nm_device_ipv6_set_mtu (self, priv->ip6_mtu); if (priv->mtu != nm_platform_link_get_mtu (ifindex)) nm_platform_link_set_mtu (ifindex, priv->mtu); } static void nm_device_ipv6_set_mtu (NMDevice *self, guint32 mtu) { NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self); guint32 plat_mtu = nm_device_ipv6_sysctl_get_int32 (self, "mtu", priv->mtu); char val[16]; priv->ip6_mtu = mtu ?: plat_mtu; if (priv->ip6_mtu && priv->mtu < priv->ip6_mtu) { _LOGW (LOGD_DEVICE | LOGD_IP6, "Lowering IPv6 MTU (%d) to match device MTU (%d)", priv->ip6_mtu, priv->mtu); priv->ip6_mtu = priv->mtu; } if (priv->ip6_mtu < 1280) { _LOGW (LOGD_DEVICE | LOGD_IP6, "IPv6 MTU (%d) smaller than 1280, adjusting", priv->ip6_mtu); priv->ip6_mtu = 1280; } if (priv->mtu < priv->ip6_mtu) { _LOGW (LOGD_DEVICE | LOGD_IP6, "Raising device MTU (%d) to match IPv6 MTU (%d)", priv->mtu, priv->ip6_mtu); nm_device_set_mtu (self, priv->ip6_mtu); } if (priv->ip6_mtu != plat_mtu) { g_snprintf (val, sizeof (val), "%d", mtu); nm_device_ipv6_sysctl_set (self, "mtu", val); } } static void rdisc_config_changed (NMRDisc *rdisc, NMRDiscConfigMap changed, NMDevice *self) { address.preferred = discovered_address->preferred; if (address.preferred > address.lifetime) address.preferred = address.lifetime; address.source = NM_IP_CONFIG_SOURCE_RDISC; address.flags = ifa_flags; nm_ip6_config_add_address (priv->ac_ip6_config, &address); } }
164,814
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int sanity_check_ckpt(struct f2fs_sb_info *sbi) { unsigned int total, fsmeta; struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned int ovp_segments, reserved_segments; total = le32_to_cpu(raw_super->segment_count); fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); fsmeta += le32_to_cpu(raw_super->segment_count_sit); fsmeta += le32_to_cpu(raw_super->segment_count_nat); fsmeta += le32_to_cpu(ckpt->rsvd_segment_count); fsmeta += le32_to_cpu(raw_super->segment_count_ssa); if (unlikely(fsmeta >= total)) return 1; ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); if (unlikely(fsmeta < F2FS_MIN_SEGMENTS || ovp_segments == 0 || reserved_segments == 0)) { f2fs_msg(sbi->sb, KERN_ERR, "Wrong layout: check mkfs.f2fs version"); return 1; } if (unlikely(f2fs_cp_error(sbi))) { f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); return 1; } return 0; } Commit Message: f2fs: sanity check checkpoint segno and blkoff Make sure segno and blkoff read from raw image are valid. Cc: [email protected] Signed-off-by: Jin Qian <[email protected]> [Jaegeuk Kim: adjust minor coding style] Signed-off-by: Jaegeuk Kim <[email protected]> CWE ID: CWE-129
int sanity_check_ckpt(struct f2fs_sb_info *sbi) { unsigned int total, fsmeta; struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned int ovp_segments, reserved_segments; unsigned int main_segs, blocks_per_seg; int i; total = le32_to_cpu(raw_super->segment_count); fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); fsmeta += le32_to_cpu(raw_super->segment_count_sit); fsmeta += le32_to_cpu(raw_super->segment_count_nat); fsmeta += le32_to_cpu(ckpt->rsvd_segment_count); fsmeta += le32_to_cpu(raw_super->segment_count_ssa); if (unlikely(fsmeta >= total)) return 1; ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); if (unlikely(fsmeta < F2FS_MIN_SEGMENTS || ovp_segments == 0 || reserved_segments == 0)) { f2fs_msg(sbi->sb, KERN_ERR, "Wrong layout: check mkfs.f2fs version"); return 1; } main_segs = le32_to_cpu(raw_super->segment_count_main); blocks_per_seg = sbi->blocks_per_seg; for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs || le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) return 1; } for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) { if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs || le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) return 1; } if (unlikely(f2fs_cp_error(sbi))) { f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); return 1; } return 0; }
168,064
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, unsigned long arg) { struct vbg_session *session = filp->private_data; size_t returned_size, size; struct vbg_ioctl_hdr hdr; bool is_vmmdev_req; int ret = 0; void *buf; if (copy_from_user(&hdr, (void *)arg, sizeof(hdr))) return -EFAULT; if (hdr.version != VBG_IOCTL_HDR_VERSION) return -EINVAL; if (hdr.size_in < sizeof(hdr) || (hdr.size_out && hdr.size_out < sizeof(hdr))) return -EINVAL; size = max(hdr.size_in, hdr.size_out); if (_IOC_SIZE(req) && _IOC_SIZE(req) != size) return -EINVAL; if (size > SZ_16M) return -E2BIG; /* * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid * the need for a bounce-buffer and another copy later on. */ is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || req == VBG_IOCTL_VMMDEV_REQUEST_BIG; if (is_vmmdev_req) buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); else buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; if (copy_from_user(buf, (void *)arg, hdr.size_in)) { ret = -EFAULT; goto out; } if (hdr.size_in < size) memset(buf + hdr.size_in, 0, size - hdr.size_in); ret = vbg_core_ioctl(session, req, buf); if (ret) goto out; returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out; if (returned_size > size) { vbg_debug("%s: too much output data %zu > %zu\n", __func__, returned_size, size); returned_size = size; } if (copy_to_user((void *)arg, buf, returned_size) != 0) ret = -EFAULT; out: if (is_vmmdev_req) vbg_req_free(buf, size); else kfree(buf); return ret; } Commit Message: virt: vbox: Only copy_from_user the request-header once In vbg_misc_device_ioctl(), the header of the ioctl argument is copied from the userspace pointer 'arg' and saved to the kernel object 'hdr'. Then the 'version', 'size_in', and 'size_out' fields of 'hdr' are verified. Before this commit, after the checks a buffer for the entire request would be allocated and then all data including the verified header would be copied from the userspace 'arg' pointer again. Given that the 'arg' pointer resides in userspace, a malicious userspace process can race to change the data pointed to by 'arg' between the two copies. By doing so, the user can bypass the verifications on the ioctl argument. This commit fixes this by using the already checked copy of the header to fill the header part of the allocated buffer and only copying the remainder of the data from userspace. Signed-off-by: Wenwen Wang <[email protected]> Reviewed-by: Hans de Goede <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> CWE ID: CWE-362
static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, unsigned long arg) { struct vbg_session *session = filp->private_data; size_t returned_size, size; struct vbg_ioctl_hdr hdr; bool is_vmmdev_req; int ret = 0; void *buf; if (copy_from_user(&hdr, (void *)arg, sizeof(hdr))) return -EFAULT; if (hdr.version != VBG_IOCTL_HDR_VERSION) return -EINVAL; if (hdr.size_in < sizeof(hdr) || (hdr.size_out && hdr.size_out < sizeof(hdr))) return -EINVAL; size = max(hdr.size_in, hdr.size_out); if (_IOC_SIZE(req) && _IOC_SIZE(req) != size) return -EINVAL; if (size > SZ_16M) return -E2BIG; /* * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid * the need for a bounce-buffer and another copy later on. */ is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || req == VBG_IOCTL_VMMDEV_REQUEST_BIG; if (is_vmmdev_req) buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); else buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; *((struct vbg_ioctl_hdr *)buf) = hdr; if (copy_from_user(buf + sizeof(hdr), (void *)arg + sizeof(hdr), hdr.size_in - sizeof(hdr))) { ret = -EFAULT; goto out; } if (hdr.size_in < size) memset(buf + hdr.size_in, 0, size - hdr.size_in); ret = vbg_core_ioctl(session, req, buf); if (ret) goto out; returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out; if (returned_size > size) { vbg_debug("%s: too much output data %zu > %zu\n", __func__, returned_size, size); returned_size = size; } if (copy_to_user((void *)arg, buf, returned_size) != 0) ret = -EFAULT; out: if (is_vmmdev_req) vbg_req_free(buf, size); else kfree(buf); return ret; }
169,188
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long Cluster::ParseSimpleBlock( long long block_size, long long& pos, long& len) { const long long block_start = pos; const long long block_stop = pos + block_size; IMkvReader* const pReader = m_pSegment->m_pReader; long long total, avail; long status = pReader->Length(&total, &avail); if (status < 0) //error return status; assert((total < 0) || (avail <= total)); if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((pos + len) > block_stop) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long track = ReadUInt(pReader, pos, len); if (track < 0) //error return static_cast<long>(track); if (track == 0) return E_FILE_FORMAT_INVALID; #if 0 const Tracks* const pTracks = m_pSegment->GetTracks(); assert(pTracks); const long tn = static_cast<long>(track); const Track* const pTrack = pTracks->GetTrackByNumber(tn); if (pTrack == NULL) return E_FILE_FORMAT_INVALID; #endif pos += len; //consume track number if ((pos + 2) > block_stop) return E_FILE_FORMAT_INVALID; if ((pos + 2) > avail) { len = 2; return E_BUFFER_NOT_FULL; } pos += 2; //consume timecode if ((pos + 1) > block_stop) return E_FILE_FORMAT_INVALID; if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } unsigned char flags; status = pReader->Read(pos, 1, &flags); if (status < 0) //error or underflow { len = 1; return status; } ++pos; //consume flags byte assert(pos <= avail); if (pos >= block_stop) return E_FILE_FORMAT_INVALID; const int lacing = int(flags & 0x06) >> 1; if ((lacing != 0) && (block_stop > avail)) { len = static_cast<long>(block_stop - pos); return E_BUFFER_NOT_FULL; } status = CreateBlock(0x23, //simple block id block_start, block_size, 0); //DiscardPadding if (status != 0) return status; m_pos = block_stop; return 0; //success } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
long Cluster::ParseSimpleBlock( if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((cluster_stop >= 0) && ((pos + len) > cluster_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long id = ReadUInt(pReader, pos, len); if (id < 0) // error return static_cast<long>(id); if (id == 0) return E_FILE_FORMAT_INVALID; // This is the distinguished set of ID's we use to determine // that we have exhausted the sub-element's inside the cluster // whose ID we parsed earlier. if (id == 0x0F43B675) // Cluster ID break; if (id == 0x0C53BB6B) // Cues ID break; pos += len; // consume ID field // Parse Size if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((cluster_stop >= 0) && ((pos + len) > cluster_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(pReader, pos, len); if (size < 0) // error return static_cast<long>(size); const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) return E_FILE_FORMAT_INVALID; pos += len; // consume size field if ((cluster_stop >= 0) && (pos > cluster_stop)) return E_FILE_FORMAT_INVALID; // pos now points to start of payload if (size == 0) // weird continue; if ((cluster_stop >= 0) && ((pos + size) > cluster_stop)) return E_FILE_FORMAT_INVALID; if (id == 0x67) { // TimeCode ID len = static_cast<long>(size); if ((pos + size) > avail) return E_BUFFER_NOT_FULL; timecode = UnserializeUInt(pReader, pos, size); if (timecode < 0) // error (or underflow) return static_cast<long>(timecode); new_pos = pos + size; if (bBlock) break; } else if (id == 0x20) { // BlockGroup ID bBlock = true; break; } else if (id == 0x23) { // SimpleBlock ID bBlock = true; break; } pos += size; // consume payload assert((cluster_stop < 0) || (pos <= cluster_stop)); } assert((cluster_stop < 0) || (pos <= cluster_stop)); if (timecode < 0) // no timecode found return E_FILE_FORMAT_INVALID; if (!bBlock) return E_FILE_FORMAT_INVALID; m_pos = new_pos; // designates position just beyond timecode payload m_timecode = timecode; // m_timecode >= 0 means we're partially loaded if (cluster_size >= 0) m_element_size = cluster_stop - m_element_start; return 0; } long Cluster::Parse(long long& pos, long& len) const { long status = Load(pos, len); if (status < 0) return status; assert(m_pos >= m_element_start); assert(m_timecode >= 0); // assert(m_size > 0); // assert(m_element_size > m_size); const long long cluster_stop = (m_element_size < 0) ? -1 : m_element_start + m_element_size; if ((cluster_stop >= 0) && (m_pos >= cluster_stop)) return 1; // nothing else to do IMkvReader* const pReader = m_pSegment->m_pReader; long long total, avail; status = pReader->Length(&total, &avail); if (status < 0) // error return status; assert((total < 0) || (avail <= total)); pos = m_pos; for (;;) { if ((cluster_stop >= 0) && (pos >= cluster_stop)) break; if ((total >= 0) && (pos >= total)) { if (m_element_size < 0) m_element_size = pos - m_element_start; break; } // Parse ID if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((cluster_stop >= 0) && ((pos + len) > cluster_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long id = ReadUInt(pReader, pos, len); if (id < 0) // error return static_cast<long>(id); if (id == 0) // weird return E_FILE_FORMAT_INVALID; // This is the distinguished set of ID's we use to determine // that we have exhausted the sub-element's inside the cluster // whose ID we parsed earlier. if ((id == 0x0F43B675) || (id == 0x0C53BB6B)) { // Cluster or Cues ID if (m_element_size < 0) m_element_size = pos - m_element_start; break; } pos += len; // consume ID field // Parse Size if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((cluster_stop >= 0) && ((pos + len) > cluster_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(pReader, pos, len); if (size < 0) // error return static_cast<long>(size); const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) return E_FILE_FORMAT_INVALID; pos += len; // consume size field if ((cluster_stop >= 0) && (pos > cluster_stop)) return E_FILE_FORMAT_INVALID; // pos now points to start of payload if (size == 0) // weird continue; // const long long block_start = pos; const long long block_stop = pos + size; if (cluster_stop >= 0) { if (block_stop > cluster_stop) { if ((id == 0x20) || (id == 0x23)) return E_FILE_FORMAT_INVALID; pos = cluster_stop; break; } } else if ((total >= 0) && (block_stop > total)) { m_element_size = total - m_element_start; pos = total; break; } else if (block_stop > avail) { len = static_cast<long>(size); return E_BUFFER_NOT_FULL; } Cluster* const this_ = const_cast<Cluster*>(this); if (id == 0x20) // BlockGroup return this_->ParseBlockGroup(size, pos, len); if (id == 0x23) // SimpleBlock return this_->ParseSimpleBlock(size, pos, len); pos += size; // consume payload assert((cluster_stop < 0) || (pos <= cluster_stop)); } assert(m_element_size > 0); m_pos = pos; assert((cluster_stop < 0) || (m_pos <= cluster_stop)); if (m_entries_count > 0) { const long idx = m_entries_count - 1; const BlockEntry* const pLast = m_entries[idx]; assert(pLast); const Block* const pBlock = pLast->GetBlock(); assert(pBlock); const long long start = pBlock->m_start; if ((total >= 0) && (start > total)) return -1; // defend against trucated stream const long long size = pBlock->m_size; const long long stop = start + size; assert((cluster_stop < 0) || (stop <= cluster_stop)); if ((total >= 0) && (stop > total)) return -1; // defend against trucated stream } return 1; // no more entries } long Cluster::ParseSimpleBlock(long long block_size, long long& pos, long& len) { const long long block_start = pos; const long long block_stop = pos + block_size; IMkvReader* const pReader = m_pSegment->m_pReader; long long total, avail; long status = pReader->Length(&total, &avail); if (status < 0) // error return status; assert((total < 0) || (avail <= total)); // parse track number if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((pos + len) > block_stop) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long track = ReadUInt(pReader, pos, len); if (track < 0) // error return static_cast<long>(track); if (track == 0) return E_FILE_FORMAT_INVALID; #if 0 const Tracks* const pTracks = m_pSegment->GetTracks(); assert(pTracks); const long tn = static_cast<long>(track); const Track* const pTrack = pTracks->GetTrackByNumber(tn); if (pTrack == NULL) return E_FILE_FORMAT_INVALID; #endif pos += len; // consume track number if ((pos + 2) > block_stop) return E_FILE_FORMAT_INVALID; if ((pos + 2) > avail) { len = 2; return E_BUFFER_NOT_FULL; } pos += 2; // consume timecode if ((pos + 1) > block_stop) return E_FILE_FORMAT_INVALID; if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } unsigned char flags; status = pReader->Read(pos, 1, &flags); if (status < 0) { // error or underflow len = 1; return status; } ++pos; // consume flags byte assert(pos <= avail); if (pos >= block_stop) return E_FILE_FORMAT_INVALID; const int lacing = int(flags & 0x06) >> 1; if ((lacing != 0) && (block_stop > avail)) { len = static_cast<long>(block_stop - pos); return E_BUFFER_NOT_FULL; } status = CreateBlock(0x23, // simple block id block_start, block_size, 0); // DiscardPadding if (status != 0) return status; m_pos = block_stop; return 0; // success }
174,429
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: status_t Camera3Device::createDefaultRequest(int templateId, CameraMetadata *request) { ATRACE_CALL(); ALOGV("%s: for template %d", __FUNCTION__, templateId); Mutex::Autolock il(mInterfaceLock); Mutex::Autolock l(mLock); switch (mStatus) { case STATUS_ERROR: CLOGE("Device has encountered a serious error"); return INVALID_OPERATION; case STATUS_UNINITIALIZED: CLOGE("Device is not initialized!"); return INVALID_OPERATION; case STATUS_UNCONFIGURED: case STATUS_CONFIGURED: case STATUS_ACTIVE: break; default: SET_ERR_L("Unexpected status: %d", mStatus); return INVALID_OPERATION; } if (!mRequestTemplateCache[templateId].isEmpty()) { *request = mRequestTemplateCache[templateId]; return OK; } const camera_metadata_t *rawRequest; ATRACE_BEGIN("camera3->construct_default_request_settings"); rawRequest = mHal3Device->ops->construct_default_request_settings( mHal3Device, templateId); ATRACE_END(); if (rawRequest == NULL) { ALOGI("%s: template %d is not supported on this camera device", __FUNCTION__, templateId); return BAD_VALUE; } *request = rawRequest; mRequestTemplateCache[templateId] = rawRequest; return OK; } Commit Message: Camera3Device: Validate template ID Validate template ID before creating a default request. Bug: 26866110 Bug: 27568958 Change-Id: Ifda457024f1d5c2b1382f189c1a8d5fda852d30d CWE ID: CWE-264
status_t Camera3Device::createDefaultRequest(int templateId, CameraMetadata *request) { ATRACE_CALL(); ALOGV("%s: for template %d", __FUNCTION__, templateId); if (templateId <= 0 || templateId >= CAMERA3_TEMPLATE_COUNT) { android_errorWriteWithInfoLog(CameraService::SN_EVENT_LOG_ID, "26866110", IPCThreadState::self()->getCallingUid(), NULL, 0); return BAD_VALUE; } Mutex::Autolock il(mInterfaceLock); Mutex::Autolock l(mLock); switch (mStatus) { case STATUS_ERROR: CLOGE("Device has encountered a serious error"); return INVALID_OPERATION; case STATUS_UNINITIALIZED: CLOGE("Device is not initialized!"); return INVALID_OPERATION; case STATUS_UNCONFIGURED: case STATUS_CONFIGURED: case STATUS_ACTIVE: break; default: SET_ERR_L("Unexpected status: %d", mStatus); return INVALID_OPERATION; } if (!mRequestTemplateCache[templateId].isEmpty()) { *request = mRequestTemplateCache[templateId]; return OK; } const camera_metadata_t *rawRequest; ATRACE_BEGIN("camera3->construct_default_request_settings"); rawRequest = mHal3Device->ops->construct_default_request_settings( mHal3Device, templateId); ATRACE_END(); if (rawRequest == NULL) { ALOGI("%s: template %d is not supported on this camera device", __FUNCTION__, templateId); return BAD_VALUE; } *request = rawRequest; mRequestTemplateCache[templateId] = rawRequest; return OK; }
173,883
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: sctp_disposition_t sctp_sf_do_asconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_chunk *asconf_ack = NULL; struct sctp_paramhdr *err_param = NULL; sctp_addiphdr_t *hdr; union sctp_addr_param *addr_param; __u32 serial; int length; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* ADD-IP: Section 4.1.1 * This chunk MUST be sent in an authenticated way by using * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk * is received unauthenticated it MUST be silently discarded as * described in [I-D.ietf-tsvwg-sctp-auth]. */ if (!net->sctp.addip_noauth && !chunk->auth) return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); /* Make sure that the ASCONF ADDIP chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); hdr = (sctp_addiphdr_t *)chunk->skb->data; serial = ntohl(hdr->serial); addr_param = (union sctp_addr_param *)hdr->params; length = ntohs(addr_param->p.length); if (length < sizeof(sctp_paramhdr_t)) return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, (void *)addr_param, commands); /* Verify the ASCONF chunk before processing it. */ if (!sctp_verify_asconf(asoc, (sctp_paramhdr_t *)((void *)addr_param + length), (void *)chunk->chunk_end, &err_param)) return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, (void *)err_param, commands); /* ADDIP 5.2 E1) Compare the value of the serial number to the value * the endpoint stored in a new association variable * 'Peer-Serial-Number'. */ if (serial == asoc->peer.addip_serial + 1) { /* If this is the first instance of ASCONF in the packet, * we can clean our old ASCONF-ACKs. */ if (!chunk->has_asconf) sctp_assoc_clean_asconf_ack_cache(asoc); /* ADDIP 5.2 E4) When the Sequence Number matches the next one * expected, process the ASCONF as described below and after * processing the ASCONF Chunk, append an ASCONF-ACK Chunk to * the response packet and cache a copy of it (in the event it * later needs to be retransmitted). * * Essentially, do V1-V5. */ asconf_ack = sctp_process_asconf((struct sctp_association *) asoc, chunk); if (!asconf_ack) return SCTP_DISPOSITION_NOMEM; } else if (serial < asoc->peer.addip_serial + 1) { /* ADDIP 5.2 E2) * If the value found in the Sequence Number is less than the * ('Peer- Sequence-Number' + 1), simply skip to the next * ASCONF, and include in the outbound response packet * any previously cached ASCONF-ACK response that was * sent and saved that matches the Sequence Number of the * ASCONF. Note: It is possible that no cached ASCONF-ACK * Chunk exists. This will occur when an older ASCONF * arrives out of order. In such a case, the receiver * should skip the ASCONF Chunk and not include ASCONF-ACK * Chunk for that chunk. */ asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); if (!asconf_ack) return SCTP_DISPOSITION_DISCARD; /* Reset the transport so that we select the correct one * this time around. This is to make sure that we don't * accidentally use a stale transport that's been removed. */ asconf_ack->transport = NULL; } else { /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since * it must be either a stale packet or from an attacker. */ return SCTP_DISPOSITION_DISCARD; } /* ADDIP 5.2 E6) The destination address of the SCTP packet * containing the ASCONF-ACK Chunks MUST be the source address of * the SCTP packet that held the ASCONF Chunks. * * To do this properly, we'll set the destination address of the chunk * and at the transmit time, will try look up the transport to use. * Since ASCONFs may be bundled, the correct transport may not be * created until we process the entire packet, thus this workaround. */ asconf_ack->dest = chunk->source; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); if (asoc->new_transport) { sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport, commands); ((struct sctp_association *)asoc)->new_transport = NULL; } return SCTP_DISPOSITION_CONSUME; } Commit Message: net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks Commit 6f4c618ddb0 ("SCTP : Add paramters validity check for ASCONF chunk") added basic verification of ASCONF chunks, however, it is still possible to remotely crash a server by sending a special crafted ASCONF chunk, even up to pre 2.6.12 kernels: skb_over_panic: text:ffffffffa01ea1c3 len:31056 put:30768 head:ffff88011bd81800 data:ffff88011bd81800 tail:0x7950 end:0x440 dev:<NULL> ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:129! [...] Call Trace: <IRQ> [<ffffffff8144fb1c>] skb_put+0x5c/0x70 [<ffffffffa01ea1c3>] sctp_addto_chunk+0x63/0xd0 [sctp] [<ffffffffa01eadaf>] sctp_process_asconf+0x1af/0x540 [sctp] [<ffffffff8152d025>] ? _read_unlock_bh+0x15/0x20 [<ffffffffa01e0038>] sctp_sf_do_asconf+0x168/0x240 [sctp] [<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp] [<ffffffff8147645d>] ? fib_rules_lookup+0xad/0xf0 [<ffffffffa01e6b22>] ? sctp_cmp_addr_exact+0x32/0x40 [sctp] [<ffffffffa01e8393>] sctp_assoc_bh_rcv+0xd3/0x180 [sctp] [<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp] [<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp] [<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter] [<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0 [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0 [<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120 [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0 [<ffffffff81496ded>] ip_local_deliver_finish+0xdd/0x2d0 [<ffffffff81497078>] ip_local_deliver+0x98/0xa0 [<ffffffff8149653d>] ip_rcv_finish+0x12d/0x440 [<ffffffff81496ac5>] ip_rcv+0x275/0x350 [<ffffffff8145c88b>] __netif_receive_skb+0x4ab/0x750 [<ffffffff81460588>] netif_receive_skb+0x58/0x60 This can be triggered e.g., through a simple scripted nmap connection scan injecting the chunk after the handshake, for example, ... -------------- INIT[ASCONF; ASCONF_ACK] -------------> <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------ -------------------- COOKIE-ECHO --------------------> <-------------------- COOKIE-ACK --------------------- ------------------ ASCONF; UNKNOWN ------------------> ... where ASCONF chunk of length 280 contains 2 parameters ... 1) Add IP address parameter (param length: 16) 2) Add/del IP address parameter (param length: 255) ... followed by an UNKNOWN chunk of e.g. 4 bytes. Here, the Address Parameter in the ASCONF chunk is even missing, too. This is just an example and similarly-crafted ASCONF chunks could be used just as well. The ASCONF chunk passes through sctp_verify_asconf() as all parameters passed sanity checks, and after walking, we ended up successfully at the chunk end boundary, and thus may invoke sctp_process_asconf(). Parameter walking is done with WORD_ROUND() to take padding into account. In sctp_process_asconf()'s TLV processing, we may fail in sctp_process_asconf_param() e.g., due to removal of the IP address that is also the source address of the packet containing the ASCONF chunk, and thus we need to add all TLVs after the failure to our ASCONF response to remote via helper function sctp_add_asconf_response(), which basically invokes a sctp_addto_chunk() adding the error parameters to the given skb. When walking to the next parameter this time, we proceed with ... length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; ... instead of the WORD_ROUND()'ed length, thus resulting here in an off-by-one that leads to reading the follow-up garbage parameter length of 12336, and thus throwing an skb_over_panic for the reply when trying to sctp_addto_chunk() next time, which implicitly calls the skb_put() with that length. Fix it by using sctp_walk_params() [ which is also used in INIT parameter processing ] macro in the verification *and* in ASCONF processing: it will make sure we don't spill over, that we walk parameters WORD_ROUND()'ed. Moreover, we're being more defensive and guard against unknown parameter types and missized addresses. Joint work with Vlad Yasevich. Fixes: b896b82be4ae ("[SCTP] ADDIP: Support for processing incoming ASCONF_ACK chunks.") Signed-off-by: Daniel Borkmann <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Acked-by: Neil Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-399
sctp_disposition_t sctp_sf_do_asconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_chunk *asconf_ack = NULL; struct sctp_paramhdr *err_param = NULL; sctp_addiphdr_t *hdr; __u32 serial; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* ADD-IP: Section 4.1.1 * This chunk MUST be sent in an authenticated way by using * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk * is received unauthenticated it MUST be silently discarded as * described in [I-D.ietf-tsvwg-sctp-auth]. */ if (!net->sctp.addip_noauth && !chunk->auth) return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); /* Make sure that the ASCONF ADDIP chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); hdr = (sctp_addiphdr_t *)chunk->skb->data; serial = ntohl(hdr->serial); /* Verify the ASCONF chunk before processing it. */ if (!sctp_verify_asconf(asoc, chunk, true, &err_param)) return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, (void *)err_param, commands); /* ADDIP 5.2 E1) Compare the value of the serial number to the value * the endpoint stored in a new association variable * 'Peer-Serial-Number'. */ if (serial == asoc->peer.addip_serial + 1) { /* If this is the first instance of ASCONF in the packet, * we can clean our old ASCONF-ACKs. */ if (!chunk->has_asconf) sctp_assoc_clean_asconf_ack_cache(asoc); /* ADDIP 5.2 E4) When the Sequence Number matches the next one * expected, process the ASCONF as described below and after * processing the ASCONF Chunk, append an ASCONF-ACK Chunk to * the response packet and cache a copy of it (in the event it * later needs to be retransmitted). * * Essentially, do V1-V5. */ asconf_ack = sctp_process_asconf((struct sctp_association *) asoc, chunk); if (!asconf_ack) return SCTP_DISPOSITION_NOMEM; } else if (serial < asoc->peer.addip_serial + 1) { /* ADDIP 5.2 E2) * If the value found in the Sequence Number is less than the * ('Peer- Sequence-Number' + 1), simply skip to the next * ASCONF, and include in the outbound response packet * any previously cached ASCONF-ACK response that was * sent and saved that matches the Sequence Number of the * ASCONF. Note: It is possible that no cached ASCONF-ACK * Chunk exists. This will occur when an older ASCONF * arrives out of order. In such a case, the receiver * should skip the ASCONF Chunk and not include ASCONF-ACK * Chunk for that chunk. */ asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); if (!asconf_ack) return SCTP_DISPOSITION_DISCARD; /* Reset the transport so that we select the correct one * this time around. This is to make sure that we don't * accidentally use a stale transport that's been removed. */ asconf_ack->transport = NULL; } else { /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since * it must be either a stale packet or from an attacker. */ return SCTP_DISPOSITION_DISCARD; } /* ADDIP 5.2 E6) The destination address of the SCTP packet * containing the ASCONF-ACK Chunks MUST be the source address of * the SCTP packet that held the ASCONF Chunks. * * To do this properly, we'll set the destination address of the chunk * and at the transmit time, will try look up the transport to use. * Since ASCONFs may be bundled, the correct transport may not be * created until we process the entire packet, thus this workaround. */ asconf_ack->dest = chunk->source; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); if (asoc->new_transport) { sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport, commands); ((struct sctp_association *)asoc)->new_transport = NULL; } return SCTP_DISPOSITION_CONSUME; }
166,335
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: AppControllerImpl::AppControllerImpl(Profile* profile) //// static : profile_(profile), app_service_proxy_(apps::AppServiceProxy::Get(profile)), url_prefix_(base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( chromeos::switches::kKioskNextHomeUrlPrefix)) { app_service_proxy_->AppRegistryCache().AddObserver(this); if (profile) { content::URLDataSource::Add(profile, std::make_unique<apps::AppIconSource>(profile)); } } Commit Message: Refactor the AppController implementation into a KeyedService. This is necessary to guarantee that the AppController will not outlive the AppServiceProxy, which could happen before during Profile destruction. Bug: 945427 Change-Id: I9e2089799e38d5a70a4a9aa66df5319113e7809e Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1542336 Reviewed-by: Michael Giuffrida <[email protected]> Commit-Queue: Lucas Tenório <[email protected]> Cr-Commit-Position: refs/heads/master@{#645122} CWE ID: CWE-416
AppControllerImpl::AppControllerImpl(Profile* profile) //// static AppControllerService* AppControllerService::Get( content::BrowserContext* context) { return AppControllerServiceFactory::GetForBrowserContext(context); } AppControllerService::AppControllerService(Profile* profile) : profile_(profile), app_service_proxy_(apps::AppServiceProxy::Get(profile)), url_prefix_(base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( chromeos::switches::kKioskNextHomeUrlPrefix)) { DCHECK(profile); app_service_proxy_->AppRegistryCache().AddObserver(this); content::URLDataSource::Add(profile, std::make_unique<apps::AppIconSource>(profile)); }
172,079
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int jp2_cmap_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_cmap_t *cmap = &box->data.cmap; jp2_cmapent_t *ent; unsigned int i; cmap->numchans = (box->datalen) / 4; if (!(cmap->ents = jas_alloc2(cmap->numchans, sizeof(jp2_cmapent_t)))) { return -1; } for (i = 0; i < cmap->numchans; ++i) { ent = &cmap->ents[i]; if (jp2_getuint16(in, &ent->cmptno) || jp2_getuint8(in, &ent->map) || jp2_getuint8(in, &ent->pcol)) { return -1; } } return 0; } Commit Message: Fixed bugs due to uninitialized data in the JP2 decoder. Also, added some comments marking I/O stream interfaces that probably need to be changed (in the long term) to fix integer overflow problems. CWE ID: CWE-476
static int jp2_cmap_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_cmap_t *cmap = &box->data.cmap; jp2_cmapent_t *ent; unsigned int i; cmap->ents = 0; cmap->numchans = (box->datalen) / 4; if (!(cmap->ents = jas_alloc2(cmap->numchans, sizeof(jp2_cmapent_t)))) { return -1; } for (i = 0; i < cmap->numchans; ++i) { ent = &cmap->ents[i]; if (jp2_getuint16(in, &ent->cmptno) || jp2_getuint8(in, &ent->map) || jp2_getuint8(in, &ent->pcol)) { return -1; } } return 0; }
168,322
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static unsigned int help(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned int matchoff, unsigned int matchlen, struct nf_conntrack_expect *exp) { char buffer[sizeof("4294967296 65635")]; u_int16_t port; unsigned int ret; /* Reply comes from server. */ exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; exp->dir = IP_CT_DIR_REPLY; exp->expectfn = nf_nat_follow_master; /* Try to get same port: if not, try to change it. */ for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { int ret; exp->tuple.dst.u.tcp.port = htons(port); ret = nf_ct_expect_related(exp); if (ret == 0) break; else if (ret != -EBUSY) { port = 0; break; } } if (port == 0) { nf_ct_helper_log(skb, exp->master, "all ports in use"); return NF_DROP; } ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, protoff, matchoff, matchlen, buffer, strlen(buffer)); if (ret != NF_ACCEPT) { nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); nf_ct_unexpect_related(exp); } return ret; } Commit Message: netfilter: nf_nat: fix access to uninitialized buffer in IRC NAT helper Commit 5901b6be885e attempted to introduce IPv6 support into IRC NAT helper. By doing so, the following code seemed to be removed by accident: ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip); sprintf(buffer, "%u %u", ip, port); pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", buffer, &ip, port); This leads to the fact that buffer[] was left uninitialized and contained some stack value. When we call nf_nat_mangle_tcp_packet(), we call strlen(buffer) on excatly this uninitialized buffer. If we are unlucky and the skb has enough tailroom, we overwrite resp. leak contents with values that sit on our stack into the packet and send that out to the receiver. Since the rather informal DCC spec [1] does not seem to specify IPv6 support right now, we log such occurences so that admins can act accordingly, and drop the packet. I've looked into XChat source, and IPv6 is not supported there: addresses are in u32 and print via %u format string. Therefore, restore old behaviour as in IPv4, use snprintf(). The IRC helper does not support IPv6 by now. By this, we can safely use strlen(buffer) in nf_nat_mangle_tcp_packet() and prevent a buffer overflow. Also simplify some code as we now have ct variable anyway. [1] http://www.irchelp.org/irchelp/rfc/ctcpspec.html Fixes: 5901b6be885e ("netfilter: nf_nat: support IPv6 in IRC NAT helper") Signed-off-by: Daniel Borkmann <[email protected]> Cc: Harald Welte <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]> CWE ID: CWE-119
static unsigned int help(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned int matchoff, unsigned int matchlen, struct nf_conntrack_expect *exp) { char buffer[sizeof("4294967296 65635")]; struct nf_conn *ct = exp->master; union nf_inet_addr newaddr; u_int16_t port; unsigned int ret; /* Reply comes from server. */ newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; exp->dir = IP_CT_DIR_REPLY; exp->expectfn = nf_nat_follow_master; /* Try to get same port: if not, try to change it. */ for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { int ret; exp->tuple.dst.u.tcp.port = htons(port); ret = nf_ct_expect_related(exp); if (ret == 0) break; else if (ret != -EBUSY) { port = 0; break; } } if (port == 0) { nf_ct_helper_log(skb, ct, "all ports in use"); return NF_DROP; } /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26 * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26 * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27 * * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits, * 255.255.255.255==4294967296, 10 digits) * P: bound port (min 1 d, max 5d (65635)) * F: filename (min 1 d ) * S: size (min 1 d ) * 0x01, \n: terminators */ /* AAA = "us", ie. where server normally talks to. */ snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port); pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", buffer, &newaddr.ip, port); ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, matchlen, buffer, strlen(buffer)); if (ret != NF_ACCEPT) { nf_ct_helper_log(skb, ct, "cannot mangle packet"); nf_ct_unexpect_related(exp); } return ret; }
166,436
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: WebTransformationMatrix WebTransformOperations::blend(const WebTransformOperations& from, double progress) const { WebTransformationMatrix toReturn; bool fromIdentity = from.isIdentity(); bool toIdentity = isIdentity(); if (fromIdentity && toIdentity) return toReturn; if (matchesTypes(from)) { size_t numOperations = max(fromIdentity ? 0 : from.m_private->operations.size(), toIdentity ? 0 : m_private->operations.size()); for (size_t i = 0; i < numOperations; ++i) { WebTransformationMatrix blended = blendTransformOperations( fromIdentity ? 0 : &from.m_private->operations[i], toIdentity ? 0 : &m_private->operations[i], progress); toReturn.multiply(blended); } } else { toReturn = apply(); WebTransformationMatrix fromTransform = from.apply(); toReturn.blend(fromTransform, progress); } return toReturn; } Commit Message: [chromium] We should accelerate all transformations, except when we must blend matrices that cannot be decomposed. https://bugs.webkit.org/show_bug.cgi?id=95855 Reviewed by James Robinson. Source/Platform: WebTransformOperations are now able to report if they can successfully blend. WebTransformationMatrix::blend now returns a bool if blending would fail. * chromium/public/WebTransformOperations.h: (WebTransformOperations): * chromium/public/WebTransformationMatrix.h: (WebTransformationMatrix): Source/WebCore: WebTransformOperations are now able to report if they can successfully blend. WebTransformationMatrix::blend now returns a bool if blending would fail. Unit tests: AnimationTranslationUtilTest.createTransformAnimationWithNonDecomposableMatrix AnimationTranslationUtilTest.createTransformAnimationWithNonInvertibleTransform * platform/chromium/support/WebTransformOperations.cpp: (WebKit::blendTransformOperations): (WebKit::WebTransformOperations::blend): (WebKit::WebTransformOperations::canBlendWith): (WebKit): (WebKit::WebTransformOperations::blendInternal): * platform/chromium/support/WebTransformationMatrix.cpp: (WebKit::WebTransformationMatrix::blend): * platform/graphics/chromium/AnimationTranslationUtil.cpp: (WebCore::WebTransformAnimationCurve): Source/WebKit/chromium: Added the following unit tests: AnimationTranslationUtilTest.createTransformAnimationWithNonDecomposableMatrix AnimationTranslationUtilTest.createTransformAnimationWithNonInvertibleTransform * tests/AnimationTranslationUtilTest.cpp: (WebKit::TEST): (WebKit): git-svn-id: svn://svn.chromium.org/blink/trunk@127868 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
WebTransformationMatrix WebTransformOperations::blend(const WebTransformOperations& from, double progress) const { WebTransformationMatrix toReturn; blendInternal(from, progress, toReturn); return toReturn; }
171,002
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void fe_netjoin_deinit(void) { while (joinservers != NULL) netjoin_server_remove(joinservers->data); if (join_tag != -1) { g_source_remove(join_tag); signal_remove("print starting", (SIGNAL_FUNC) sig_print_starting); } signal_remove("setup changed", (SIGNAL_FUNC) read_settings); signal_remove("message quit", (SIGNAL_FUNC) msg_quit); signal_remove("message join", (SIGNAL_FUNC) msg_join); signal_remove("message irc mode", (SIGNAL_FUNC) msg_mode); } Commit Message: Merge branch 'netjoin-timeout' into 'master' fe-netjoin: remove irc servers on "server disconnected" signal Closes #7 See merge request !10 CWE ID: CWE-416
void fe_netjoin_deinit(void) { while (joinservers != NULL) netjoin_server_remove(joinservers->data); if (join_tag != -1) { g_source_remove(join_tag); signal_remove("print starting", (SIGNAL_FUNC) sig_print_starting); } signal_remove("setup changed", (SIGNAL_FUNC) read_settings); signal_remove("server disconnected", (SIGNAL_FUNC) sig_server_disconnected); signal_remove("message quit", (SIGNAL_FUNC) msg_quit); signal_remove("message join", (SIGNAL_FUNC) msg_join); signal_remove("message irc mode", (SIGNAL_FUNC) msg_mode); }
168,290
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static SECStatus SelectClientCert(void *arg, PRFileDesc *sock, struct CERTDistNamesStr *caNames, struct CERTCertificateStr **pRetCert, struct SECKEYPrivateKeyStr **pRetKey) { struct ssl_connect_data *connssl = (struct ssl_connect_data *)arg; struct Curl_easy *data = connssl->data; const char *nickname = connssl->client_nickname; if(connssl->obj_clicert) { /* use the cert/key provided by PEM reader */ static const char pem_slotname[] = "PEM Token #1"; SECItem cert_der = { 0, NULL, 0 }; void *proto_win = SSL_RevealPinArg(sock); struct CERTCertificateStr *cert; struct SECKEYPrivateKeyStr *key; PK11SlotInfo *slot = PK11_FindSlotByName(pem_slotname); if(NULL == slot) { failf(data, "NSS: PK11 slot not found: %s", pem_slotname); return SECFailure; } if(PK11_ReadRawAttribute(PK11_TypeGeneric, connssl->obj_clicert, CKA_VALUE, &cert_der) != SECSuccess) { failf(data, "NSS: CKA_VALUE not found in PK11 generic object"); PK11_FreeSlot(slot); return SECFailure; } cert = PK11_FindCertFromDERCertItem(slot, &cert_der, proto_win); SECITEM_FreeItem(&cert_der, PR_FALSE); if(NULL == cert) { failf(data, "NSS: client certificate from file not found"); PK11_FreeSlot(slot); return SECFailure; } key = PK11_FindPrivateKeyFromCert(slot, cert, NULL); PK11_FreeSlot(slot); if(NULL == key) { failf(data, "NSS: private key from file not found"); CERT_DestroyCertificate(cert); return SECFailure; } infof(data, "NSS: client certificate from file\n"); display_cert_info(data, cert); *pRetCert = cert; *pRetKey = key; return SECSuccess; } /* use the default NSS hook */ if(SECSuccess != NSS_GetClientAuthData((void *)nickname, sock, caNames, pRetCert, pRetKey) || NULL == *pRetCert) { if(NULL == nickname) failf(data, "NSS: client certificate not found (nickname not " "specified)"); else failf(data, "NSS: client certificate not found: %s", nickname); return SECFailure; } /* get certificate nickname if any */ nickname = (*pRetCert)->nickname; if(NULL == nickname) nickname = "[unknown]"; if(NULL == *pRetKey) { failf(data, "NSS: private key not found for certificate: %s", nickname); return SECFailure; } infof(data, "NSS: using client certificate: %s\n", nickname); display_cert_info(data, *pRetCert); return SECSuccess; } Commit Message: nss: refuse previously loaded certificate from file ... when we are not asked to use a certificate from file CWE ID: CWE-287
static SECStatus SelectClientCert(void *arg, PRFileDesc *sock, struct CERTDistNamesStr *caNames, struct CERTCertificateStr **pRetCert, struct SECKEYPrivateKeyStr **pRetKey) { struct ssl_connect_data *connssl = (struct ssl_connect_data *)arg; struct Curl_easy *data = connssl->data; const char *nickname = connssl->client_nickname; static const char pem_slotname[] = "PEM Token #1"; if(connssl->obj_clicert) { /* use the cert/key provided by PEM reader */ SECItem cert_der = { 0, NULL, 0 }; void *proto_win = SSL_RevealPinArg(sock); struct CERTCertificateStr *cert; struct SECKEYPrivateKeyStr *key; PK11SlotInfo *slot = PK11_FindSlotByName(pem_slotname); if(NULL == slot) { failf(data, "NSS: PK11 slot not found: %s", pem_slotname); return SECFailure; } if(PK11_ReadRawAttribute(PK11_TypeGeneric, connssl->obj_clicert, CKA_VALUE, &cert_der) != SECSuccess) { failf(data, "NSS: CKA_VALUE not found in PK11 generic object"); PK11_FreeSlot(slot); return SECFailure; } cert = PK11_FindCertFromDERCertItem(slot, &cert_der, proto_win); SECITEM_FreeItem(&cert_der, PR_FALSE); if(NULL == cert) { failf(data, "NSS: client certificate from file not found"); PK11_FreeSlot(slot); return SECFailure; } key = PK11_FindPrivateKeyFromCert(slot, cert, NULL); PK11_FreeSlot(slot); if(NULL == key) { failf(data, "NSS: private key from file not found"); CERT_DestroyCertificate(cert); return SECFailure; } infof(data, "NSS: client certificate from file\n"); display_cert_info(data, cert); *pRetCert = cert; *pRetKey = key; return SECSuccess; } /* use the default NSS hook */ if(SECSuccess != NSS_GetClientAuthData((void *)nickname, sock, caNames, pRetCert, pRetKey) || NULL == *pRetCert) { if(NULL == nickname) failf(data, "NSS: client certificate not found (nickname not " "specified)"); else failf(data, "NSS: client certificate not found: %s", nickname); return SECFailure; } /* get certificate nickname if any */ nickname = (*pRetCert)->nickname; if(NULL == nickname) nickname = "[unknown]"; if(!strncmp(nickname, pem_slotname, sizeof(pem_slotname) - 1U)) { failf(data, "NSS: refusing previously loaded certificate from file: %s", nickname); return SECFailure; } if(NULL == *pRetKey) { failf(data, "NSS: private key not found for certificate: %s", nickname); return SECFailure; } infof(data, "NSS: using client certificate: %s\n", nickname); display_cert_info(data, *pRetCert); return SECSuccess; }
166,945
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: virtual void ResetModel() { last_pts_ = 0; bits_in_buffer_model_ = cfg_.rc_target_bitrate * cfg_.rc_buf_initial_sz; frame_number_ = 0; tot_frame_number_ = 0; first_drop_ = 0; num_drops_ = 0; for (int i = 0; i < 3; ++i) { bits_total_[i] = 0; } } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
virtual void ResetModel() { last_pts_ = 0; bits_in_buffer_model_ = cfg_.rc_target_bitrate * cfg_.rc_buf_initial_sz; frame_number_ = 0; tot_frame_number_ = 0; first_drop_ = 0; num_drops_ = 0; // Denoiser is off by default. denoiser_on_ = 0; for (int i = 0; i < 3; ++i) { bits_total_[i] = 0; } denoiser_offon_test_ = 0; denoiser_offon_period_ = -1; }
174,518
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ExtensionsGuestViewMessageFilter::FrameNavigationHelper::FrameDeleted( RenderFrameHost* render_frame_host) { if (render_frame_host->GetFrameTreeNodeId() != frame_tree_node_id_) return; filter_->ResumeAttachOrDestroy(element_instance_id_, MSG_ROUTING_NONE /* no plugin frame */); } Commit Message: [GuestView] - Introduce MimeHandlerViewAttachHelper This CL is for the most part a mechanical change which extracts almost all the frame-based MimeHandlerView code out of ExtensionsGuestViewMessageFilter. This change both removes the current clutter form EGVMF as well as fixesa race introduced when the frame-based logic was added to EGVMF. The reason for the race was that EGVMF is destroyed on IO thread but all the access to it (for frame-based MHV) are from UI. [email protected],[email protected] Bug: 659750, 896679, 911161, 918861 Change-Id: I6474b870e4d56daa68be03637bb633665d9f9dda Reviewed-on: https://chromium-review.googlesource.com/c/1401451 Commit-Queue: Ehsan Karamad <[email protected]> Reviewed-by: James MacLean <[email protected]> Reviewed-by: Ehsan Karamad <[email protected]> Cr-Commit-Position: refs/heads/master@{#621155} CWE ID: CWE-362
void ExtensionsGuestViewMessageFilter::FrameNavigationHelper::FrameDeleted(
173,040
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int imap_subscribe(char *path, bool subscribe) { struct ImapData *idata = NULL; char buf[LONG_STRING]; char mbox[LONG_STRING]; char errstr[STRING]; struct Buffer err, token; struct ImapMbox mx; if (!mx_is_imap(path) || imap_parse_path(path, &mx) || !mx.mbox) { mutt_error(_("Bad mailbox name")); return -1; } idata = imap_conn_find(&(mx.account), 0); if (!idata) goto fail; imap_fix_path(idata, mx.mbox, buf, sizeof(buf)); if (!*buf) mutt_str_strfcpy(buf, "INBOX", sizeof(buf)); if (ImapCheckSubscribed) { mutt_buffer_init(&token); mutt_buffer_init(&err); err.data = errstr; err.dsize = sizeof(errstr); snprintf(mbox, sizeof(mbox), "%smailboxes \"%s\"", subscribe ? "" : "un", path); if (mutt_parse_rc_line(mbox, &token, &err)) mutt_debug(1, "Error adding subscribed mailbox: %s\n", errstr); FREE(&token.data); } if (subscribe) mutt_message(_("Subscribing to %s..."), buf); else mutt_message(_("Unsubscribing from %s..."), buf); imap_munge_mbox_name(idata, mbox, sizeof(mbox), buf); snprintf(buf, sizeof(buf), "%sSUBSCRIBE %s", subscribe ? "" : "UN", mbox); if (imap_exec(idata, buf, 0) < 0) goto fail; imap_unmunge_mbox_name(idata, mx.mbox); if (subscribe) mutt_message(_("Subscribed to %s"), mx.mbox); else mutt_message(_("Unsubscribed from %s"), mx.mbox); FREE(&mx.mbox); return 0; fail: FREE(&mx.mbox); return -1; } Commit Message: Quote path in imap_subscribe CWE ID: CWE-77
int imap_subscribe(char *path, bool subscribe) { struct ImapData *idata = NULL; char buf[LONG_STRING]; char mbox[LONG_STRING]; char errstr[STRING]; struct Buffer err, token; struct ImapMbox mx; size_t len = 0; if (!mx_is_imap(path) || imap_parse_path(path, &mx) || !mx.mbox) { mutt_error(_("Bad mailbox name")); return -1; } idata = imap_conn_find(&(mx.account), 0); if (!idata) goto fail; imap_fix_path(idata, mx.mbox, buf, sizeof(buf)); if (!*buf) mutt_str_strfcpy(buf, "INBOX", sizeof(buf)); if (ImapCheckSubscribed) { mutt_buffer_init(&token); mutt_buffer_init(&err); err.data = errstr; err.dsize = sizeof(errstr); len = snprintf(mbox, sizeof(mbox), "%smailboxes ", subscribe ? "" : "un"); imap_quote_string(mbox + len, sizeof(mbox) - len, path, true); if (mutt_parse_rc_line(mbox, &token, &err)) mutt_debug(1, "Error adding subscribed mailbox: %s\n", errstr); FREE(&token.data); } if (subscribe) mutt_message(_("Subscribing to %s..."), buf); else mutt_message(_("Unsubscribing from %s..."), buf); imap_munge_mbox_name(idata, mbox, sizeof(mbox), buf); snprintf(buf, sizeof(buf), "%sSUBSCRIBE %s", subscribe ? "" : "UN", mbox); if (imap_exec(idata, buf, 0) < 0) goto fail; imap_unmunge_mbox_name(idata, mx.mbox); if (subscribe) mutt_message(_("Subscribed to %s"), mx.mbox); else mutt_message(_("Unsubscribed from %s"), mx.mbox); FREE(&mx.mbox); return 0; fail: FREE(&mx.mbox); return -1; }
169,137
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int rock_continue(struct rock_state *rs) { int ret = 1; int blocksize = 1 << rs->inode->i_blkbits; const int min_de_size = offsetof(struct rock_ridge, u); kfree(rs->buffer); rs->buffer = NULL; if ((unsigned)rs->cont_offset > blocksize - min_de_size || (unsigned)rs->cont_size > blocksize || (unsigned)(rs->cont_offset + rs->cont_size) > blocksize) { printk(KERN_NOTICE "rock: corrupted directory entry. " "extent=%d, offset=%d, size=%d\n", rs->cont_extent, rs->cont_offset, rs->cont_size); ret = -EIO; goto out; } if (rs->cont_extent) { struct buffer_head *bh; rs->buffer = kmalloc(rs->cont_size, GFP_KERNEL); if (!rs->buffer) { ret = -ENOMEM; goto out; } ret = -EIO; bh = sb_bread(rs->inode->i_sb, rs->cont_extent); if (bh) { memcpy(rs->buffer, bh->b_data + rs->cont_offset, rs->cont_size); put_bh(bh); rs->chr = rs->buffer; rs->len = rs->cont_size; rs->cont_extent = 0; rs->cont_size = 0; rs->cont_offset = 0; return 0; } printk("Unable to read rock-ridge attributes\n"); } out: kfree(rs->buffer); rs->buffer = NULL; return ret; } Commit Message: isofs: Fix infinite looping over CE entries Rock Ridge extensions define so called Continuation Entries (CE) which define where is further space with Rock Ridge data. Corrupted isofs image can contain arbitrarily long chain of these, including a one containing loop and thus causing kernel to end in an infinite loop when traversing these entries. Limit the traversal to 32 entries which should be more than enough space to store all the Rock Ridge data. Reported-by: P J P <[email protected]> CC: [email protected] Signed-off-by: Jan Kara <[email protected]> CWE ID: CWE-399
static int rock_continue(struct rock_state *rs) { int ret = 1; int blocksize = 1 << rs->inode->i_blkbits; const int min_de_size = offsetof(struct rock_ridge, u); kfree(rs->buffer); rs->buffer = NULL; if ((unsigned)rs->cont_offset > blocksize - min_de_size || (unsigned)rs->cont_size > blocksize || (unsigned)(rs->cont_offset + rs->cont_size) > blocksize) { printk(KERN_NOTICE "rock: corrupted directory entry. " "extent=%d, offset=%d, size=%d\n", rs->cont_extent, rs->cont_offset, rs->cont_size); ret = -EIO; goto out; } if (rs->cont_extent) { struct buffer_head *bh; rs->buffer = kmalloc(rs->cont_size, GFP_KERNEL); if (!rs->buffer) { ret = -ENOMEM; goto out; } ret = -EIO; if (++rs->cont_loops >= RR_MAX_CE_ENTRIES) goto out; bh = sb_bread(rs->inode->i_sb, rs->cont_extent); if (bh) { memcpy(rs->buffer, bh->b_data + rs->cont_offset, rs->cont_size); put_bh(bh); rs->chr = rs->buffer; rs->len = rs->cont_size; rs->cont_extent = 0; rs->cont_size = 0; rs->cont_offset = 0; return 0; } printk("Unable to read rock-ridge attributes\n"); } out: kfree(rs->buffer); rs->buffer = NULL; return ret; }
166,236
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void umount_tree(struct mount *mnt, enum umount_tree_flags how) { LIST_HEAD(tmp_list); struct mount *p; if (how & UMOUNT_PROPAGATE) propagate_mount_unlock(mnt); /* Gather the mounts to umount */ for (p = mnt; p; p = next_mnt(p, mnt)) { p->mnt.mnt_flags |= MNT_UMOUNT; list_move(&p->mnt_list, &tmp_list); } /* Hide the mounts from mnt_mounts */ list_for_each_entry(p, &tmp_list, mnt_list) { list_del_init(&p->mnt_child); } /* Add propogated mounts to the tmp_list */ if (how & UMOUNT_PROPAGATE) propagate_umount(&tmp_list); while (!list_empty(&tmp_list)) { bool disconnect; p = list_first_entry(&tmp_list, struct mount, mnt_list); list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); p->mnt_ns = NULL; if (how & UMOUNT_SYNC) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; disconnect = disconnect_mount(p, how); pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, disconnect ? &unmounted : NULL); if (mnt_has_parent(p)) { mnt_add_count(p->mnt_parent, -1); if (!disconnect) { /* Don't forget about p */ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); } else { umount_mnt(p); } } change_mnt_propagation(p, MS_PRIVATE); } } Commit Message: mnt: Add a per mount namespace limit on the number of mounts CAI Qian <[email protected]> pointed out that the semantics of shared subtrees make it possible to create an exponentially increasing number of mounts in a mount namespace. mkdir /tmp/1 /tmp/2 mount --make-rshared / for i in $(seq 1 20) ; do mount --bind /tmp/1 /tmp/2 ; done Will create create 2^20 or 1048576 mounts, which is a practical problem as some people have managed to hit this by accident. As such CVE-2016-6213 was assigned. Ian Kent <[email protected]> described the situation for autofs users as follows: > The number of mounts for direct mount maps is usually not very large because of > the way they are implemented, large direct mount maps can have performance > problems. There can be anywhere from a few (likely case a few hundred) to less > than 10000, plus mounts that have been triggered and not yet expired. > > Indirect mounts have one autofs mount at the root plus the number of mounts that > have been triggered and not yet expired. > > The number of autofs indirect map entries can range from a few to the common > case of several thousand and in rare cases up to between 30000 and 50000. I've > not heard of people with maps larger than 50000 entries. > > The larger the number of map entries the greater the possibility for a large > number of active mounts so it's not hard to expect cases of a 1000 or somewhat > more active mounts. So I am setting the default number of mounts allowed per mount namespace at 100,000. This is more than enough for any use case I know of, but small enough to quickly stop an exponential increase in mounts. Which should be perfect to catch misconfigurations and malfunctioning programs. For anyone who needs a higher limit this can be changed by writing to the new /proc/sys/fs/mount-max sysctl. Tested-by: CAI Qian <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]> CWE ID: CWE-400
static void umount_tree(struct mount *mnt, enum umount_tree_flags how) { LIST_HEAD(tmp_list); struct mount *p; if (how & UMOUNT_PROPAGATE) propagate_mount_unlock(mnt); /* Gather the mounts to umount */ for (p = mnt; p; p = next_mnt(p, mnt)) { p->mnt.mnt_flags |= MNT_UMOUNT; list_move(&p->mnt_list, &tmp_list); } /* Hide the mounts from mnt_mounts */ list_for_each_entry(p, &tmp_list, mnt_list) { list_del_init(&p->mnt_child); } /* Add propogated mounts to the tmp_list */ if (how & UMOUNT_PROPAGATE) propagate_umount(&tmp_list); while (!list_empty(&tmp_list)) { struct mnt_namespace *ns; bool disconnect; p = list_first_entry(&tmp_list, struct mount, mnt_list); list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); ns = p->mnt_ns; if (ns) { ns->mounts--; __touch_mnt_namespace(ns); } p->mnt_ns = NULL; if (how & UMOUNT_SYNC) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; disconnect = disconnect_mount(p, how); pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, disconnect ? &unmounted : NULL); if (mnt_has_parent(p)) { mnt_add_count(p->mnt_parent, -1); if (!disconnect) { /* Don't forget about p */ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); } else { umount_mnt(p); } } change_mnt_propagation(p, MS_PRIVATE); } }
167,011
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: PHP_METHOD(Phar, getSupportedCompression) { if (zend_parse_parameters_none() == FAILURE) { return; } array_init(return_value); phar_request_initialize(TSRMLS_C); if (PHAR_G(has_zlib)) { add_next_index_stringl(return_value, "GZ", 2, 1); } if (PHAR_G(has_bz2)) { add_next_index_stringl(return_value, "BZIP2", 5, 1); } } Commit Message: CWE ID: CWE-20
PHP_METHOD(Phar, getSupportedCompression) { if (zend_parse_parameters_none() == FAILURE) { return; } array_init(return_value); phar_request_initialize(TSRMLS_C); if (PHAR_G(has_zlib)) { add_next_index_stringl(return_value, "GZ", 2, 1); } if (PHAR_G(has_bz2)) { add_next_index_stringl(return_value, "BZIP2", 5, 1); } }
165,293
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: dtls1_process_buffered_records(SSL *s) { pitem *item; item = pqueue_peek(s->d1->unprocessed_rcds.q); if (item) { /* Check if epoch is current. */ if (s->d1->unprocessed_rcds.epoch != s->d1->r_epoch) return(1); /* Nothing to do. */ /* Process all the records. */ while (pqueue_peek(s->d1->unprocessed_rcds.q)) { dtls1_get_unprocessed_record(s); if ( ! dtls1_process_record(s)) return(0); dtls1_buffer_record(s, &(s->d1->processed_rcds), s->s3->rrec.seq_num); } } /* sync epoch numbers once all the unprocessed records * have been processed */ s->d1->processed_rcds.epoch = s->d1->r_epoch; s->d1->unprocessed_rcds.epoch = s->d1->r_epoch + 1; return(1); } Commit Message: A memory leak can occur in dtls1_buffer_record if either of the calls to ssl3_setup_buffers or pqueue_insert fail. The former will fail if there is a malloc failure, whilst the latter will fail if attempting to add a duplicate record to the queue. This should never happen because duplicate records should be detected and dropped before any attempt to add them to the queue. Unfortunately records that arrive that are for the next epoch are not being recorded correctly, and therefore replays are not being detected. Additionally, these "should not happen" failures that can occur in dtls1_buffer_record are not being treated as fatal and therefore an attacker could exploit this by sending repeated replay records for the next epoch, eventually causing a DoS through memory exhaustion. Thanks to Chris Mueller for reporting this issue and providing initial analysis and a patch. Further analysis and the final patch was performed by Matt Caswell from the OpenSSL development team. CVE-2015-0206 Reviewed-by: Dr Stephen Henson <[email protected]> CWE ID: CWE-119
dtls1_process_buffered_records(SSL *s) { pitem *item; item = pqueue_peek(s->d1->unprocessed_rcds.q); if (item) { /* Check if epoch is current. */ if (s->d1->unprocessed_rcds.epoch != s->d1->r_epoch) return(1); /* Nothing to do. */ /* Process all the records. */ while (pqueue_peek(s->d1->unprocessed_rcds.q)) { dtls1_get_unprocessed_record(s); if ( ! dtls1_process_record(s)) return(0); if(dtls1_buffer_record(s, &(s->d1->processed_rcds), s->s3->rrec.seq_num)<0) return -1; } } /* sync epoch numbers once all the unprocessed records * have been processed */ s->d1->processed_rcds.epoch = s->d1->r_epoch; s->d1->unprocessed_rcds.epoch = s->d1->r_epoch + 1; return(1); }
166,747
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: CURLcode Curl_auth_create_plain_message(struct Curl_easy *data, const char *userp, const char *passwdp, char **outptr, size_t *outlen) { CURLcode result; char *plainauth; size_t ulen; size_t plen; size_t plainlen; *outlen = 0; *outptr = NULL; ulen = strlen(userp); plen = strlen(passwdp); /* Compute binary message length. Check for overflows. */ if((ulen > SIZE_T_MAX/2) || (plen > (SIZE_T_MAX/2 - 2))) return CURLE_OUT_OF_MEMORY; plainlen = 2 * ulen + plen + 2; plainauth = malloc(plainlen); if(!plainauth) return CURLE_OUT_OF_MEMORY; /* Calculate the reply */ memcpy(plainauth, userp, ulen); plainauth[ulen] = '\0'; memcpy(plainauth + ulen + 1, userp, ulen); plainauth[2 * ulen + 1] = '\0'; memcpy(plainauth + 2 * ulen + 2, passwdp, plen); /* Base64 encode the reply */ result = Curl_base64_encode(data, plainauth, plainlen, outptr, outlen); free(plainauth); return result; } Commit Message: Curl_auth_create_plain_message: fix too-large-input-check CVE-2018-16839 Reported-by: Harry Sintonen Bug: https://curl.haxx.se/docs/CVE-2018-16839.html CWE ID: CWE-119
CURLcode Curl_auth_create_plain_message(struct Curl_easy *data, const char *userp, const char *passwdp, char **outptr, size_t *outlen) { CURLcode result; char *plainauth; size_t ulen; size_t plen; size_t plainlen; *outlen = 0; *outptr = NULL; ulen = strlen(userp); plen = strlen(passwdp); /* Compute binary message length. Check for overflows. */ if((ulen > SIZE_T_MAX/4) || (plen > (SIZE_T_MAX/2 - 2))) return CURLE_OUT_OF_MEMORY; plainlen = 2 * ulen + plen + 2; plainauth = malloc(plainlen); if(!plainauth) return CURLE_OUT_OF_MEMORY; /* Calculate the reply */ memcpy(plainauth, userp, ulen); plainauth[ulen] = '\0'; memcpy(plainauth + ulen + 1, userp, ulen); plainauth[2 * ulen + 1] = '\0'; memcpy(plainauth + 2 * ulen + 2, passwdp, plen); /* Base64 encode the reply */ result = Curl_base64_encode(data, plainauth, plainlen, outptr, outlen); free(plainauth); return result; }
169,031
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadOneDJVUImage(LoadContext* lc,const int pagenum, const ImageInfo *image_info,ExceptionInfo *exception) { ddjvu_page_type_t type; ddjvu_pageinfo_t info; ddjvu_message_t *message; Image *image; int logging; int tag; /* so, we know that the page is there! Get its dimension, and */ /* Read one DJVU image */ image = lc->image; /* register PixelPacket *q; */ logging=LogMagickEvent(CoderEvent,GetMagickModule(), " enter ReadOneDJVUImage()"); (void) logging; #if DEBUG printf("==== Loading the page %d\n", pagenum); #endif lc->page = ddjvu_page_create_by_pageno(lc->document, pagenum); /* 0? */ /* pump data untill the page is ready for rendering. */ tag=(-1); do { while ((message = ddjvu_message_peek(lc->context))) { tag=process_message(message); if (tag == 0) break; ddjvu_message_pop(lc->context); } /* fixme: maybe exit? */ /* if (lc->error) break; */ message = pump_data_until_message(lc,image); if (message) do { tag=process_message(message); if (tag == 0) break; ddjvu_message_pop(lc->context); } while ((message = ddjvu_message_peek(lc->context))); } while (!ddjvu_page_decoding_done(lc->page)); ddjvu_document_get_pageinfo(lc->document, pagenum, &info); image->x_resolution = (float) info.dpi; image->y_resolution =(float) info.dpi; if (image_info->density != (char *) NULL) { int flags; GeometryInfo geometry_info; /* Set rendering resolution. */ flags=ParseGeometry(image_info->density,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; info.width=(int) (info.width*image->x_resolution/info.dpi); info.height=(int) (info.height*image->y_resolution/info.dpi); info.dpi=(int) MagickMax(image->x_resolution,image->y_resolution); } type = ddjvu_page_get_type(lc->page); /* double -> float! */ /* image->gamma = (float)ddjvu_page_get_gamma(lc->page); */ /* mmc: set image->depth */ /* mmc: This from the type */ image->columns=(size_t) info.width; image->rows=(size_t) info.height; /* mmc: bitonal should be palettized, and compressed! */ if (type == DDJVU_PAGETYPE_BITONAL){ image->colorspace = GRAYColorspace; image->storage_class = PseudoClass; image->depth = 8UL; /* i only support that? */ image->colors= 2; if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } else { image->colorspace = RGBColorspace; image->storage_class = DirectClass; /* fixme: MAGICKCORE_QUANTUM_DEPTH ?*/ image->depth = 8UL; /* i only support that? */ image->matte = MagickTrue; /* is this useful? */ } #if DEBUG printf("now filling %.20g x %.20g\n",(double) image->columns,(double) image->rows); #endif #if 1 /* per_line */ /* q = QueueAuthenticPixels(image,0,0,image->columns,image->rows); */ get_page_image(lc, lc->page, 0, 0, info.width, info.height, image_info); #else int i; for (i = 0;i< image->rows; i++) { printf("%d\n",i); q = QueueAuthenticPixels(image,0,i,image->columns,1); get_page_line(lc, i, quantum_info); SyncAuthenticPixels(image); } #endif /* per_line */ #if DEBUG printf("END: finished filling %.20g x %.20g\n",(double) image->columns, (double) image->rows); #endif if (!image->ping) SyncImage(image); /* indexes=GetAuthenticIndexQueue(image); */ /* mmc: ??? Convert PNM pixels to runlength-encoded MIFF packets. */ /* image->colors = */ /* how is the line padding / stride? */ if (lc->page) { ddjvu_page_release(lc->page); lc->page = NULL; } /* image->page.y=mng_info->y_off[mng_info->object_id]; */ if (tag == 0) image=DestroyImage(image); return image; /* end of reading one DJVU page/image */ } Commit Message: CWE ID: CWE-119
static Image *ReadOneDJVUImage(LoadContext* lc,const int pagenum, const ImageInfo *image_info,ExceptionInfo *exception) { ddjvu_page_type_t type; ddjvu_pageinfo_t info; ddjvu_message_t *message; Image *image; int logging; int tag; MagickBooleanType status; /* so, we know that the page is there! Get its dimension, and */ /* Read one DJVU image */ image = lc->image; /* register PixelPacket *q; */ logging=LogMagickEvent(CoderEvent,GetMagickModule(), " enter ReadOneDJVUImage()"); (void) logging; #if DEBUG printf("==== Loading the page %d\n", pagenum); #endif lc->page = ddjvu_page_create_by_pageno(lc->document, pagenum); /* 0? */ /* pump data untill the page is ready for rendering. */ tag=(-1); do { while ((message = ddjvu_message_peek(lc->context))) { tag=process_message(message); if (tag == 0) break; ddjvu_message_pop(lc->context); } /* fixme: maybe exit? */ /* if (lc->error) break; */ message = pump_data_until_message(lc,image); if (message) do { tag=process_message(message); if (tag == 0) break; ddjvu_message_pop(lc->context); } while ((message = ddjvu_message_peek(lc->context))); } while (!ddjvu_page_decoding_done(lc->page)); ddjvu_document_get_pageinfo(lc->document, pagenum, &info); image->x_resolution = (float) info.dpi; image->y_resolution =(float) info.dpi; if (image_info->density != (char *) NULL) { int flags; GeometryInfo geometry_info; /* Set rendering resolution. */ flags=ParseGeometry(image_info->density,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; info.width=(int) (info.width*image->x_resolution/info.dpi); info.height=(int) (info.height*image->y_resolution/info.dpi); info.dpi=(int) MagickMax(image->x_resolution,image->y_resolution); } type = ddjvu_page_get_type(lc->page); /* double -> float! */ /* image->gamma = (float)ddjvu_page_get_gamma(lc->page); */ /* mmc: set image->depth */ /* mmc: This from the type */ image->columns=(size_t) info.width; image->rows=(size_t) info.height; /* mmc: bitonal should be palettized, and compressed! */ if (type == DDJVU_PAGETYPE_BITONAL){ image->colorspace = GRAYColorspace; image->storage_class = PseudoClass; image->depth = 8UL; /* i only support that? */ image->colors= 2; if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } else { image->colorspace = RGBColorspace; image->storage_class = DirectClass; /* fixme: MAGICKCORE_QUANTUM_DEPTH ?*/ image->depth = 8UL; /* i only support that? */ image->matte = MagickTrue; /* is this useful? */ } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } #if DEBUG printf("now filling %.20g x %.20g\n",(double) image->columns,(double) image->rows); #endif #if 1 /* per_line */ /* q = QueueAuthenticPixels(image,0,0,image->columns,image->rows); */ get_page_image(lc, lc->page, 0, 0, info.width, info.height, image_info); #else int i; for (i = 0;i< image->rows; i++) { printf("%d\n",i); q = QueueAuthenticPixels(image,0,i,image->columns,1); get_page_line(lc, i, quantum_info); SyncAuthenticPixels(image); } #endif /* per_line */ #if DEBUG printf("END: finished filling %.20g x %.20g\n",(double) image->columns, (double) image->rows); #endif if (!image->ping) SyncImage(image); /* indexes=GetAuthenticIndexQueue(image); */ /* mmc: ??? Convert PNM pixels to runlength-encoded MIFF packets. */ /* image->colors = */ /* how is the line padding / stride? */ if (lc->page) { ddjvu_page_release(lc->page); lc->page = NULL; } /* image->page.y=mng_info->y_off[mng_info->object_id]; */ if (tag == 0) image=DestroyImage(image); return image; /* end of reading one DJVU page/image */ }
168,558
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void GCInfoTable::Init() { CHECK(!g_gc_info_table); Resize(); } Commit Message: [oilpan] Fix GCInfoTable for multiple threads Previously, grow and access from different threads could lead to a race on the table backing; see bug. - Rework the table to work on an existing reservation. - Commit upon growing, avoiding any copies. Drive-by: Fix over-allocation of table. Bug: chromium:841280 Change-Id: I329cb6f40091e14e8c05334ba1104a9440c31d43 Reviewed-on: https://chromium-review.googlesource.com/1061525 Commit-Queue: Michael Lippautz <[email protected]> Reviewed-by: Kentaro Hara <[email protected]> Cr-Commit-Position: refs/heads/master@{#560434} CWE ID: CWE-362
void GCInfoTable::Init() { GCInfoTable::GCInfoTable() { CHECK(!table_); table_ = reinterpret_cast<GCInfo const**>(base::AllocPages( nullptr, MaxTableSize(), base::kPageAllocationGranularity, base::PageInaccessible)); CHECK(table_); Resize(); }
173,135
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: socket_t *socket_accept(const socket_t *socket) { assert(socket != NULL); int fd = accept(socket->fd, NULL, NULL); if (fd == INVALID_FD) { LOG_ERROR("%s unable to accept socket: %s", __func__, strerror(errno)); return NULL; } socket_t *ret = (socket_t *)osi_calloc(sizeof(socket_t)); if (!ret) { close(fd); LOG_ERROR("%s unable to allocate memory for socket.", __func__); return NULL; } ret->fd = fd; return ret; } Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release CWE ID: CWE-284
socket_t *socket_accept(const socket_t *socket) { assert(socket != NULL); int fd = TEMP_FAILURE_RETRY(accept(socket->fd, NULL, NULL)); if (fd == INVALID_FD) { LOG_ERROR("%s unable to accept socket: %s", __func__, strerror(errno)); return NULL; } socket_t *ret = (socket_t *)osi_calloc(sizeof(socket_t)); if (!ret) { close(fd); LOG_ERROR("%s unable to allocate memory for socket.", __func__); return NULL; } ret->fd = fd; return ret; }
173,484
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int nntp_hcache_namer(const char *path, char *dest, size_t destlen) { return snprintf(dest, destlen, "%s.hcache", path); } Commit Message: sanitise cache paths Co-authored-by: JerikoOne <[email protected]> CWE ID: CWE-22
static int nntp_hcache_namer(const char *path, char *dest, size_t destlen) { int count = snprintf(dest, destlen, "%s.hcache", path); /* Strip out any directories in the path */ char *first = strchr(dest, '/'); char *last = strrchr(dest, '/'); if (first && last && (last > first)) { memmove(first, last, strlen(last) + 1); count -= (last - first); } return count; }
169,119
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: GpuChannelHost* BrowserGpuChannelHostFactory::EstablishGpuChannelSync( CauseForGpuLaunch cause_for_gpu_launch) { if (gpu_channel_.get()) { if (gpu_channel_->state() == GpuChannelHost::kLost) gpu_channel_ = NULL; else return gpu_channel_.get(); } GpuDataManagerImpl::GetInstance(); EstablishRequest request; GetIOLoopProxy()->PostTask( FROM_HERE, base::Bind( &BrowserGpuChannelHostFactory::EstablishGpuChannelOnIO, base::Unretained(this), &request, cause_for_gpu_launch)); request.event.Wait(); if (request.channel_handle.name.empty() || request.gpu_process_handle == base::kNullProcessHandle) return NULL; base::ProcessHandle browser_process_for_gpu; #if defined(OS_WIN) DuplicateHandle(base::GetCurrentProcessHandle(), base::GetCurrentProcessHandle(), request.gpu_process_handle, &browser_process_for_gpu, PROCESS_DUP_HANDLE, FALSE, 0); #else browser_process_for_gpu = base::GetCurrentProcessHandle(); #endif gpu_channel_ = new GpuChannelHost(this, gpu_host_id_, gpu_client_id_); gpu_channel_->set_gpu_info(request.gpu_info); content::GetContentClient()->SetGpuInfo(request.gpu_info); gpu_channel_->Connect(request.channel_handle, browser_process_for_gpu); return gpu_channel_.get(); } Commit Message: Convert plugin and GPU process to brokered handle duplication. BUG=119250 Review URL: https://chromiumcodereview.appspot.com/9958034 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
GpuChannelHost* BrowserGpuChannelHostFactory::EstablishGpuChannelSync( CauseForGpuLaunch cause_for_gpu_launch) { if (gpu_channel_.get()) { if (gpu_channel_->state() == GpuChannelHost::kLost) gpu_channel_ = NULL; else return gpu_channel_.get(); } GpuDataManagerImpl::GetInstance(); EstablishRequest request; GetIOLoopProxy()->PostTask( FROM_HERE, base::Bind( &BrowserGpuChannelHostFactory::EstablishGpuChannelOnIO, base::Unretained(this), &request, cause_for_gpu_launch)); request.event.Wait(); if (request.channel_handle.name.empty()) return NULL; gpu_channel_ = new GpuChannelHost(this, gpu_host_id_, gpu_client_id_); gpu_channel_->set_gpu_info(request.gpu_info); content::GetContentClient()->SetGpuInfo(request.gpu_info); gpu_channel_->Connect(request.channel_handle); return gpu_channel_.get(); }
170,917
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: pgp_enumerate_blob(sc_card_t *card, pgp_blob_t *blob) { const u8 *in; int r; if (blob->files != NULL) return SC_SUCCESS; if ((r = pgp_read_blob(card, blob)) < 0) return r; in = blob->data; while ((int) blob->len > (in - blob->data)) { unsigned int cla, tag, tmptag; size_t len; const u8 *data = in; pgp_blob_t *new; r = sc_asn1_read_tag(&data, blob->len - (in - blob->data), &cla, &tag, &len); if (r < 0 || data == NULL) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unexpected end of contents\n"); return SC_ERROR_OBJECT_NOT_VALID; } /* undo ASN1's split of tag & class */ for (tmptag = tag; tmptag > 0x0FF; tmptag >>= 8) { cla <<= 8; } tag |= cla; /* Awful hack for composite DOs that have * a TLV with the DO's id encompassing the * entire blob. Example: Yubikey Neo */ if (tag == blob->id) { in = data; continue; } /* create fake file system hierarchy by * using constructed DOs as DF */ if ((new = pgp_new_blob(card, blob, tag, sc_file_new())) == NULL) return SC_ERROR_OUT_OF_MEMORY; pgp_set_blob(new, data, len); in = data + len; } return SC_SUCCESS; } Commit Message: fixed out of bounds reads Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting and suggesting security fixes. CWE ID: CWE-125
pgp_enumerate_blob(sc_card_t *card, pgp_blob_t *blob) { const u8 *in; int r; if (blob->files != NULL) return SC_SUCCESS; if ((r = pgp_read_blob(card, blob)) < 0) return r; in = blob->data; while ((int) blob->len > (in - blob->data)) { unsigned int cla, tag, tmptag; size_t len; const u8 *data = in; pgp_blob_t *new; if (!in) return SC_ERROR_OBJECT_NOT_VALID; r = sc_asn1_read_tag(&data, blob->len - (in - blob->data), &cla, &tag, &len); if (r < 0 || data == NULL) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unexpected end of contents\n"); return SC_ERROR_OBJECT_NOT_VALID; } if (data + len > blob->data + blob->len) return SC_ERROR_OBJECT_NOT_VALID; /* undo ASN1's split of tag & class */ for (tmptag = tag; tmptag > 0x0FF; tmptag >>= 8) { cla <<= 8; } tag |= cla; /* Awful hack for composite DOs that have * a TLV with the DO's id encompassing the * entire blob. Example: Yubikey Neo */ if (tag == blob->id) { in = data; continue; } /* create fake file system hierarchy by * using constructed DOs as DF */ if ((new = pgp_new_blob(card, blob, tag, sc_file_new())) == NULL) return SC_ERROR_OUT_OF_MEMORY; pgp_set_blob(new, data, len); in = data + len; } return SC_SUCCESS; }
169,060
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int SoundPool::load(int fd, int64_t offset, int64_t length, int priority __unused) { ALOGV("load: fd=%d, offset=%" PRId64 ", length=%" PRId64 ", priority=%d", fd, offset, length, priority); Mutex::Autolock lock(&mLock); sp<Sample> sample = new Sample(++mNextSampleID, fd, offset, length); mSamples.add(sample->sampleID(), sample); doLoad(sample); return sample->sampleID(); } Commit Message: DO NOT MERGE SoundPool: add lock for findSample access from SoundPoolThread Sample decoding still occurs in SoundPoolThread without holding the SoundPool lock. Bug: 25781119 Change-Id: I11fde005aa9cf5438e0390a0d2dfe0ec1dd282e8 CWE ID: CWE-264
int SoundPool::load(int fd, int64_t offset, int64_t length, int priority __unused) { ALOGV("load: fd=%d, offset=%" PRId64 ", length=%" PRId64 ", priority=%d", fd, offset, length, priority);
173,962
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int empty_write_end(struct page *page, unsigned from, unsigned to, int mode) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct buffer_head *bh; unsigned offset, blksize = 1 << inode->i_blkbits; pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; zero_user(page, from, to-from); mark_page_accessed(page); if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) { if (!gfs2_is_writeback(ip)) gfs2_page_add_databufs(ip, page, from, to); block_commit_write(page, from, to); return 0; } offset = 0; bh = page_buffers(page); while (offset < to) { if (offset >= from) { set_buffer_uptodate(bh); mark_buffer_dirty(bh); clear_buffer_new(bh); write_dirty_buffer(bh, WRITE); } offset += blksize; bh = bh->b_this_page; } offset = 0; bh = page_buffers(page); while (offset < to) { if (offset >= from) { wait_on_buffer(bh); if (!buffer_uptodate(bh)) return -EIO; } offset += blksize; bh = bh->b_this_page; } return 0; } Commit Message: GFS2: rewrite fallocate code to write blocks directly GFS2's fallocate code currently goes through the page cache. Since it's only writing to the end of the file or to holes in it, it doesn't need to, and it was causing issues on low memory environments. This patch pulls in some of Steve's block allocation work, and uses it to simply allocate the blocks for the file, and zero them out at allocation time. It provides a slight performance increase, and it dramatically simplifies the code. Signed-off-by: Benjamin Marzinski <[email protected]> Signed-off-by: Steven Whitehouse <[email protected]> CWE ID: CWE-119
static int empty_write_end(struct page *page, unsigned from,
166,211
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int main(int argc __unused, char** argv) { signal(SIGPIPE, SIG_IGN); char value[PROPERTY_VALUE_MAX]; bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1); pid_t childPid; if (doLog && (childPid = fork()) != 0) { strcpy(argv[0], "media.log"); sp<ProcessState> proc(ProcessState::self()); MediaLogService::instantiate(); ProcessState::self()->startThreadPool(); for (;;) { siginfo_t info; int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED); if (ret == EINTR) { continue; } if (ret < 0) { break; } char buffer[32]; const char *code; switch (info.si_code) { case CLD_EXITED: code = "CLD_EXITED"; break; case CLD_KILLED: code = "CLD_KILLED"; break; case CLD_DUMPED: code = "CLD_DUMPED"; break; case CLD_STOPPED: code = "CLD_STOPPED"; break; case CLD_TRAPPED: code = "CLD_TRAPPED"; break; case CLD_CONTINUED: code = "CLD_CONTINUED"; break; default: snprintf(buffer, sizeof(buffer), "unknown (%d)", info.si_code); code = buffer; break; } struct rusage usage; getrusage(RUSAGE_CHILDREN, &usage); ALOG(LOG_ERROR, "media.log", "pid %d status %d code %s user %ld.%03lds sys %ld.%03lds", info.si_pid, info.si_status, code, usage.ru_utime.tv_sec, usage.ru_utime.tv_usec / 1000, usage.ru_stime.tv_sec, usage.ru_stime.tv_usec / 1000); sp<IServiceManager> sm = defaultServiceManager(); sp<IBinder> binder = sm->getService(String16("media.log")); if (binder != 0) { Vector<String16> args; binder->dump(-1, args); } switch (info.si_code) { case CLD_EXITED: case CLD_KILLED: case CLD_DUMPED: { ALOG(LOG_INFO, "media.log", "exiting"); _exit(0); } default: break; } } } else { if (doLog) { prctl(PR_SET_PDEATHSIG, SIGKILL); // if parent media.log dies before me, kill me also setpgid(0, 0); // but if I die first, don't kill my parent } InitializeIcuOrDie(); sp<ProcessState> proc(ProcessState::self()); sp<IServiceManager> sm = defaultServiceManager(); ALOGI("ServiceManager: %p", sm.get()); AudioFlinger::instantiate(); MediaPlayerService::instantiate(); ResourceManagerService::instantiate(); CameraService::instantiate(); AudioPolicyService::instantiate(); SoundTriggerHwService::instantiate(); RadioService::instantiate(); registerExtensions(); ProcessState::self()->startThreadPool(); IPCThreadState::self()->joinThreadPool(); } } Commit Message: limit mediaserver memory Limit mediaserver using rlimit, to prevent it from bringing down the system via the low memory killer. Default max is 65% of total RAM, but can be customized via system property. Bug: 28471206 Bug: 28615448 Change-Id: Ic84137435d1ef0a6883e9789a4b4f399e4283f05 CWE ID: CWE-399
int main(int argc __unused, char** argv) { limitProcessMemory( "ro.media.maxmem", /* property that defines limit */ SIZE_MAX, /* upper limit in bytes */ 65 /* upper limit as percentage of physical RAM */); signal(SIGPIPE, SIG_IGN); char value[PROPERTY_VALUE_MAX]; bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1); pid_t childPid; if (doLog && (childPid = fork()) != 0) { strcpy(argv[0], "media.log"); sp<ProcessState> proc(ProcessState::self()); MediaLogService::instantiate(); ProcessState::self()->startThreadPool(); for (;;) { siginfo_t info; int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED); if (ret == EINTR) { continue; } if (ret < 0) { break; } char buffer[32]; const char *code; switch (info.si_code) { case CLD_EXITED: code = "CLD_EXITED"; break; case CLD_KILLED: code = "CLD_KILLED"; break; case CLD_DUMPED: code = "CLD_DUMPED"; break; case CLD_STOPPED: code = "CLD_STOPPED"; break; case CLD_TRAPPED: code = "CLD_TRAPPED"; break; case CLD_CONTINUED: code = "CLD_CONTINUED"; break; default: snprintf(buffer, sizeof(buffer), "unknown (%d)", info.si_code); code = buffer; break; } struct rusage usage; getrusage(RUSAGE_CHILDREN, &usage); ALOG(LOG_ERROR, "media.log", "pid %d status %d code %s user %ld.%03lds sys %ld.%03lds", info.si_pid, info.si_status, code, usage.ru_utime.tv_sec, usage.ru_utime.tv_usec / 1000, usage.ru_stime.tv_sec, usage.ru_stime.tv_usec / 1000); sp<IServiceManager> sm = defaultServiceManager(); sp<IBinder> binder = sm->getService(String16("media.log")); if (binder != 0) { Vector<String16> args; binder->dump(-1, args); } switch (info.si_code) { case CLD_EXITED: case CLD_KILLED: case CLD_DUMPED: { ALOG(LOG_INFO, "media.log", "exiting"); _exit(0); } default: break; } } } else { if (doLog) { prctl(PR_SET_PDEATHSIG, SIGKILL); // if parent media.log dies before me, kill me also setpgid(0, 0); // but if I die first, don't kill my parent } InitializeIcuOrDie(); sp<ProcessState> proc(ProcessState::self()); sp<IServiceManager> sm = defaultServiceManager(); ALOGI("ServiceManager: %p", sm.get()); AudioFlinger::instantiate(); MediaPlayerService::instantiate(); ResourceManagerService::instantiate(); CameraService::instantiate(); AudioPolicyService::instantiate(); SoundTriggerHwService::instantiate(); RadioService::instantiate(); registerExtensions(); ProcessState::self()->startThreadPool(); IPCThreadState::self()->joinThreadPool(); } }
173,564
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool WorkerProcessLauncherTest::LaunchProcess( IPC::Listener* delegate, ScopedHandle* process_exit_event_out) { process_exit_event_.Set(CreateEvent(NULL, TRUE, FALSE, NULL)); if (!process_exit_event_.IsValid()) return false; channel_name_ = GenerateIpcChannelName(this); if (!CreateIpcChannel(channel_name_, kIpcSecurityDescriptor, task_runner_, delegate, &channel_server_)) { return false; } exit_code_ = STILL_ACTIVE; return DuplicateHandle(GetCurrentProcess(), process_exit_event_, GetCurrentProcess(), process_exit_event_out->Receive(), 0, FALSE, DUPLICATE_SAME_ACCESS) != FALSE; } Commit Message: Validate and report peer's PID to WorkerProcessIpcDelegate so it will be able to duplicate handles to and from the worker process. As a side effect WorkerProcessLauncher::Delegate is now responsible for retrieving the client's PID and deciding whether a launch failed due to a permanent error condition. BUG=134694 Review URL: https://chromiumcodereview.appspot.com/11143025 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@162778 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
bool WorkerProcessLauncherTest::LaunchProcess( IPC::Listener* delegate, ScopedHandle* process_exit_event_out) { process_exit_event_.Set(CreateEvent(NULL, TRUE, FALSE, NULL)); if (!process_exit_event_.IsValid()) return false; channel_name_ = GenerateIpcChannelName(this); ScopedHandle pipe; if (!CreateIpcChannel(channel_name_, kIpcSecurityDescriptor, &pipe)) { return false; } // Wrap the pipe into an IPC channel. channel_server_.reset(new IPC::ChannelProxy( IPC::ChannelHandle(pipe), IPC::Channel::MODE_SERVER, delegate, task_runner_)); return DuplicateHandle(GetCurrentProcess(), process_exit_event_, GetCurrentProcess(), process_exit_event_out->Receive(), 0, FALSE, DUPLICATE_SAME_ACCESS) != FALSE; }
171,551
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ip6_print(netdissect_options *ndo, const u_char *bp, u_int length) { register const struct ip6_hdr *ip6; register int advance; u_int len; const u_char *ipend; register const u_char *cp; register u_int payload_len; int nh; int fragmented = 0; u_int flow; ip6 = (const struct ip6_hdr *)bp; ND_TCHECK(*ip6); if (length < sizeof (struct ip6_hdr)) { ND_PRINT((ndo, "truncated-ip6 %u", length)); return; } if (!ndo->ndo_eflag) ND_PRINT((ndo, "IP6 ")); if (IP6_VERSION(ip6) != 6) { ND_PRINT((ndo,"version error: %u != 6", IP6_VERSION(ip6))); return; } payload_len = EXTRACT_16BITS(&ip6->ip6_plen); len = payload_len + sizeof(struct ip6_hdr); if (length < len) ND_PRINT((ndo, "truncated-ip6 - %u bytes missing!", len - length)); if (ndo->ndo_vflag) { flow = EXTRACT_32BITS(&ip6->ip6_flow); ND_PRINT((ndo, "(")); #if 0 /* rfc1883 */ if (flow & 0x0f000000) ND_PRINT((ndo, "pri 0x%02x, ", (flow & 0x0f000000) >> 24)); if (flow & 0x00ffffff) ND_PRINT((ndo, "flowlabel 0x%06x, ", flow & 0x00ffffff)); #else /* RFC 2460 */ if (flow & 0x0ff00000) ND_PRINT((ndo, "class 0x%02x, ", (flow & 0x0ff00000) >> 20)); if (flow & 0x000fffff) ND_PRINT((ndo, "flowlabel 0x%05x, ", flow & 0x000fffff)); #endif ND_PRINT((ndo, "hlim %u, next-header %s (%u) payload length: %u) ", ip6->ip6_hlim, tok2str(ipproto_values,"unknown",ip6->ip6_nxt), ip6->ip6_nxt, payload_len)); } /* * Cut off the snapshot length to the end of the IP payload. */ ipend = bp + len; if (ipend < ndo->ndo_snapend) ndo->ndo_snapend = ipend; cp = (const u_char *)ip6; advance = sizeof(struct ip6_hdr); nh = ip6->ip6_nxt; while (cp < ndo->ndo_snapend && advance > 0) { cp += advance; len -= advance; if (cp == (const u_char *)(ip6 + 1) && nh != IPPROTO_TCP && nh != IPPROTO_UDP && nh != IPPROTO_DCCP && nh != IPPROTO_SCTP) { ND_PRINT((ndo, "%s > %s: ", ip6addr_string(ndo, &ip6->ip6_src), ip6addr_string(ndo, &ip6->ip6_dst))); } switch (nh) { case IPPROTO_HOPOPTS: advance = hbhopt_print(ndo, cp); if (advance < 0) return; nh = *cp; break; case IPPROTO_DSTOPTS: advance = dstopt_print(ndo, cp); if (advance < 0) return; nh = *cp; break; case IPPROTO_FRAGMENT: advance = frag6_print(ndo, cp, (const u_char *)ip6); if (advance < 0 || ndo->ndo_snapend <= cp + advance) return; nh = *cp; fragmented = 1; break; case IPPROTO_MOBILITY_OLD: case IPPROTO_MOBILITY: /* * XXX - we don't use "advance"; RFC 3775 says that * the next header field in a mobility header * should be IPPROTO_NONE, but speaks of * the possiblity of a future extension in * which payload can be piggybacked atop a * mobility header. */ advance = mobility_print(ndo, cp, (const u_char *)ip6); nh = *cp; return; case IPPROTO_ROUTING: advance = rt6_print(ndo, cp, (const u_char *)ip6); nh = *cp; break; case IPPROTO_SCTP: sctp_print(ndo, cp, (const u_char *)ip6, len); return; case IPPROTO_DCCP: dccp_print(ndo, cp, (const u_char *)ip6, len); return; case IPPROTO_TCP: tcp_print(ndo, cp, len, (const u_char *)ip6, fragmented); return; case IPPROTO_UDP: udp_print(ndo, cp, len, (const u_char *)ip6, fragmented); return; case IPPROTO_ICMPV6: icmp6_print(ndo, cp, len, (const u_char *)ip6, fragmented); return; case IPPROTO_AH: advance = ah_print(ndo, cp); nh = *cp; break; case IPPROTO_ESP: { int enh, padlen; advance = esp_print(ndo, cp, len, (const u_char *)ip6, &enh, &padlen); nh = enh & 0xff; len -= padlen; break; } case IPPROTO_IPCOMP: { ipcomp_print(ndo, cp); /* * Either this has decompressed the payload and * printed it, in which case there's nothing more * to do, or it hasn't, in which case there's * nothing more to do. */ advance = -1; break; } case IPPROTO_PIM: pim_print(ndo, cp, len, (const u_char *)ip6); return; case IPPROTO_OSPF: ospf6_print(ndo, cp, len); return; case IPPROTO_IPV6: ip6_print(ndo, cp, len); return; case IPPROTO_IPV4: ip_print(ndo, cp, len); return; case IPPROTO_PGM: pgm_print(ndo, cp, len, (const u_char *)ip6); return; case IPPROTO_GRE: gre_print(ndo, cp, len); return; case IPPROTO_RSVP: rsvp_print(ndo, cp, len); return; case IPPROTO_NONE: ND_PRINT((ndo, "no next header")); return; default: ND_PRINT((ndo, "ip-proto-%d %d", nh, len)); return; } } return; trunc: ND_PRINT((ndo, "[|ip6]")); } Commit Message: CVE-2017-12985/IPv6: Check for print routines returning -1 when running past the end. rt6_print(), ah_print(), and esp_print() return -1 if they run up against the end of the packet while dissecting; if that happens, stop dissecting, don't try to fetch the next header value, because 1) *it* might be past the end of the packet and 2) we won't be using it in any case, as we'll be exiting the loop. Also, change mobility_print() to return -1 if it runs up against the end of the packet, and stop dissecting if it does so. This fixes a buffer over-read discovered by Brian 'geeknik' Carpenter. Add tests using the capture files supplied by the reporter(s). CWE ID: CWE-125
ip6_print(netdissect_options *ndo, const u_char *bp, u_int length) { register const struct ip6_hdr *ip6; register int advance; u_int len; const u_char *ipend; register const u_char *cp; register u_int payload_len; int nh; int fragmented = 0; u_int flow; ip6 = (const struct ip6_hdr *)bp; ND_TCHECK(*ip6); if (length < sizeof (struct ip6_hdr)) { ND_PRINT((ndo, "truncated-ip6 %u", length)); return; } if (!ndo->ndo_eflag) ND_PRINT((ndo, "IP6 ")); if (IP6_VERSION(ip6) != 6) { ND_PRINT((ndo,"version error: %u != 6", IP6_VERSION(ip6))); return; } payload_len = EXTRACT_16BITS(&ip6->ip6_plen); len = payload_len + sizeof(struct ip6_hdr); if (length < len) ND_PRINT((ndo, "truncated-ip6 - %u bytes missing!", len - length)); if (ndo->ndo_vflag) { flow = EXTRACT_32BITS(&ip6->ip6_flow); ND_PRINT((ndo, "(")); #if 0 /* rfc1883 */ if (flow & 0x0f000000) ND_PRINT((ndo, "pri 0x%02x, ", (flow & 0x0f000000) >> 24)); if (flow & 0x00ffffff) ND_PRINT((ndo, "flowlabel 0x%06x, ", flow & 0x00ffffff)); #else /* RFC 2460 */ if (flow & 0x0ff00000) ND_PRINT((ndo, "class 0x%02x, ", (flow & 0x0ff00000) >> 20)); if (flow & 0x000fffff) ND_PRINT((ndo, "flowlabel 0x%05x, ", flow & 0x000fffff)); #endif ND_PRINT((ndo, "hlim %u, next-header %s (%u) payload length: %u) ", ip6->ip6_hlim, tok2str(ipproto_values,"unknown",ip6->ip6_nxt), ip6->ip6_nxt, payload_len)); } /* * Cut off the snapshot length to the end of the IP payload. */ ipend = bp + len; if (ipend < ndo->ndo_snapend) ndo->ndo_snapend = ipend; cp = (const u_char *)ip6; advance = sizeof(struct ip6_hdr); nh = ip6->ip6_nxt; while (cp < ndo->ndo_snapend && advance > 0) { if (len < (u_int)advance) goto trunc; cp += advance; len -= advance; if (cp == (const u_char *)(ip6 + 1) && nh != IPPROTO_TCP && nh != IPPROTO_UDP && nh != IPPROTO_DCCP && nh != IPPROTO_SCTP) { ND_PRINT((ndo, "%s > %s: ", ip6addr_string(ndo, &ip6->ip6_src), ip6addr_string(ndo, &ip6->ip6_dst))); } switch (nh) { case IPPROTO_HOPOPTS: advance = hbhopt_print(ndo, cp); if (advance < 0) return; nh = *cp; break; case IPPROTO_DSTOPTS: advance = dstopt_print(ndo, cp); if (advance < 0) return; nh = *cp; break; case IPPROTO_FRAGMENT: advance = frag6_print(ndo, cp, (const u_char *)ip6); if (advance < 0 || ndo->ndo_snapend <= cp + advance) return; nh = *cp; fragmented = 1; break; case IPPROTO_MOBILITY_OLD: case IPPROTO_MOBILITY: /* * XXX - we don't use "advance"; RFC 3775 says that * the next header field in a mobility header * should be IPPROTO_NONE, but speaks of * the possiblity of a future extension in * which payload can be piggybacked atop a * mobility header. */ advance = mobility_print(ndo, cp, (const u_char *)ip6); if (advance < 0) return; nh = *cp; return; case IPPROTO_ROUTING: ND_TCHECK(*cp); advance = rt6_print(ndo, cp, (const u_char *)ip6); if (advance < 0) return; nh = *cp; break; case IPPROTO_SCTP: sctp_print(ndo, cp, (const u_char *)ip6, len); return; case IPPROTO_DCCP: dccp_print(ndo, cp, (const u_char *)ip6, len); return; case IPPROTO_TCP: tcp_print(ndo, cp, len, (const u_char *)ip6, fragmented); return; case IPPROTO_UDP: udp_print(ndo, cp, len, (const u_char *)ip6, fragmented); return; case IPPROTO_ICMPV6: icmp6_print(ndo, cp, len, (const u_char *)ip6, fragmented); return; case IPPROTO_AH: advance = ah_print(ndo, cp); if (advance < 0) return; nh = *cp; break; case IPPROTO_ESP: { int enh, padlen; advance = esp_print(ndo, cp, len, (const u_char *)ip6, &enh, &padlen); if (advance < 0) return; nh = enh & 0xff; len -= padlen; break; } case IPPROTO_IPCOMP: { ipcomp_print(ndo, cp); /* * Either this has decompressed the payload and * printed it, in which case there's nothing more * to do, or it hasn't, in which case there's * nothing more to do. */ advance = -1; break; } case IPPROTO_PIM: pim_print(ndo, cp, len, (const u_char *)ip6); return; case IPPROTO_OSPF: ospf6_print(ndo, cp, len); return; case IPPROTO_IPV6: ip6_print(ndo, cp, len); return; case IPPROTO_IPV4: ip_print(ndo, cp, len); return; case IPPROTO_PGM: pgm_print(ndo, cp, len, (const u_char *)ip6); return; case IPPROTO_GRE: gre_print(ndo, cp, len); return; case IPPROTO_RSVP: rsvp_print(ndo, cp, len); return; case IPPROTO_NONE: ND_PRINT((ndo, "no next header")); return; default: ND_PRINT((ndo, "ip-proto-%d %d", nh, len)); return; } } return; trunc: ND_PRINT((ndo, "[|ip6]")); }
167,930
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long FS_FOpenFileRead(const char *filename, fileHandle_t *file, qboolean uniqueFILE) { searchpath_t *search; long len; if(!fs_searchpaths) Com_Error(ERR_FATAL, "Filesystem call made without initialization"); for(search = fs_searchpaths; search; search = search->next) { len = FS_FOpenFileReadDir(filename, search, file, uniqueFILE, qfalse); if(file == NULL) { if(len > 0) return len; } else { if(len >= 0 && *file) return len; } } #ifdef FS_MISSING if(missingFiles) fprintf(missingFiles, "%s\n", filename); #endif if(file) { *file = 0; return -1; } else { return 0; } } Commit Message: All: Don't load .pk3s as .dlls, and don't load user config files from .pk3s CWE ID: CWE-269
long FS_FOpenFileRead(const char *filename, fileHandle_t *file, qboolean uniqueFILE) { searchpath_t *search; long len; qboolean isLocalConfig; if(!fs_searchpaths) Com_Error(ERR_FATAL, "Filesystem call made without initialization"); isLocalConfig = !strcmp(filename, "autoexec.cfg") || !strcmp(filename, Q3CONFIG_CFG); for(search = fs_searchpaths; search; search = search->next) { // autoexec.cfg and wolfconfig_mp.cfg can only be loaded outside of pk3 files. if (isLocalConfig && search->pack) continue; len = FS_FOpenFileReadDir(filename, search, file, uniqueFILE, qfalse); if(file == NULL) { if(len > 0) return len; } else { if(len >= 0 && *file) return len; } } #ifdef FS_MISSING if(missingFiles) fprintf(missingFiles, "%s\n", filename); #endif if(file) { *file = 0; return -1; } else { return 0; } }
170,083
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: const extensions::Extension* GetExtension(Profile* profile, const std::string& extension_id) { const ExtensionService* service = extensions::ExtensionSystem::Get(profile)->extension_service(); const extensions::Extension* extension = service->GetInstalledExtension(extension_id); return extension; } Commit Message: [Extensions] Add GetInstalledExtension() method to ExtensionRegistry This CL adds GetInstalledExtension() method to ExtensionRegistry and uses it instead of deprecated ExtensionService::GetInstalledExtension() in chrome/browser/ui/app_list/. Part of removing the deprecated GetInstalledExtension() call from the ExtensionService. BUG=489687 Review URL: https://codereview.chromium.org/1130353010 Cr-Commit-Position: refs/heads/master@{#333036} CWE ID:
const extensions::Extension* GetExtension(Profile* profile, const std::string& extension_id) { const ExtensionRegistry* registry = ExtensionRegistry::Get(profile); const extensions::Extension* extension = registry->GetInstalledExtension(extension_id); return extension; }
171,720
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void GLES2DecoderImpl::GetTexParameterImpl( GLenum target, GLenum pname, GLfloat* fparams, GLint* iparams, const char* function_name) { TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget( &state_, target); if (!texture_ref) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, function_name, "unknown texture for target"); return; } Texture* texture = texture_ref->texture(); switch (pname) { case GL_TEXTURE_MAX_ANISOTROPY_EXT: if (workarounds().init_texture_max_anisotropy) { texture->InitTextureMaxAnisotropyIfNeeded(target); } break; case GL_TEXTURE_IMMUTABLE_LEVELS: if (gl_version_info().IsLowerThanGL(4, 2)) { GLint levels = texture->GetImmutableLevels(); if (fparams) { fparams[0] = static_cast<GLfloat>(levels); } else { iparams[0] = levels; } return; } break; if (workarounds().use_shadowed_tex_level_params) { if (fparams) { fparams[0] = static_cast<GLfloat>(texture->base_level()); } else { iparams[0] = texture->base_level(); } return; } break; case GL_TEXTURE_MAX_LEVEL: if (workarounds().use_shadowed_tex_level_params) { if (fparams) { fparams[0] = static_cast<GLfloat>(texture->max_level()); } else { iparams[0] = texture->max_level(); } return; } break; case GL_TEXTURE_SWIZZLE_R: if (fparams) { fparams[0] = static_cast<GLfloat>(texture->swizzle_r()); } else { iparams[0] = texture->swizzle_r(); } return; case GL_TEXTURE_SWIZZLE_G: if (fparams) { fparams[0] = static_cast<GLfloat>(texture->swizzle_g()); } else { iparams[0] = texture->swizzle_g(); } return; case GL_TEXTURE_SWIZZLE_B: if (fparams) { fparams[0] = static_cast<GLfloat>(texture->swizzle_b()); } else { iparams[0] = texture->swizzle_b(); } return; case GL_TEXTURE_SWIZZLE_A: if (fparams) { fparams[0] = static_cast<GLfloat>(texture->swizzle_a()); } else { iparams[0] = texture->swizzle_a(); } return; default: break; } if (fparams) { api()->glGetTexParameterfvFn(target, pname, fparams); } else { api()->glGetTexParameterivFn(target, pname, iparams); } } Commit Message: Implement immutable texture base/max level clamping It seems some drivers fail to handle that gracefully, so let's always clamp to be on the safe side. BUG=877874 TEST=test case in the bug, gpu_unittests [email protected] Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel Change-Id: I6d93cb9389ea70525df4604112223604577582a2 Reviewed-on: https://chromium-review.googlesource.com/1194994 Reviewed-by: Kenneth Russell <[email protected]> Commit-Queue: Zhenyao Mo <[email protected]> Cr-Commit-Position: refs/heads/master@{#587264} CWE ID: CWE-119
void GLES2DecoderImpl::GetTexParameterImpl( GLenum target, GLenum pname, GLfloat* fparams, GLint* iparams, const char* function_name) { TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget( &state_, target); if (!texture_ref) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, function_name, "unknown texture for target"); return; } Texture* texture = texture_ref->texture(); switch (pname) { case GL_TEXTURE_MAX_ANISOTROPY_EXT: if (workarounds().init_texture_max_anisotropy) { texture->InitTextureMaxAnisotropyIfNeeded(target); } break; case GL_TEXTURE_IMMUTABLE_LEVELS: if (gl_version_info().IsLowerThanGL(4, 2)) { GLint levels = texture->GetImmutableLevels(); if (fparams) { fparams[0] = static_cast<GLfloat>(levels); } else { iparams[0] = levels; } return; } break; // Use shadowed value in case it's clamped; also because older MacOSX // stores the value on int16_t (see https://crbug.com/610153). if (fparams) { fparams[0] = static_cast<GLfloat>(texture->unclamped_base_level()); } else { iparams[0] = texture->unclamped_base_level(); } return; case GL_TEXTURE_MAX_LEVEL: // Use shadowed value in case it's clamped; also because older MacOSX // stores the value on int16_t (see https://crbug.com/610153). if (fparams) { fparams[0] = static_cast<GLfloat>(texture->unclamped_max_level()); } else { iparams[0] = texture->unclamped_max_level(); } return; case GL_TEXTURE_SWIZZLE_R: if (fparams) { fparams[0] = static_cast<GLfloat>(texture->swizzle_r()); } else { iparams[0] = texture->swizzle_r(); } return; case GL_TEXTURE_SWIZZLE_G: if (fparams) { fparams[0] = static_cast<GLfloat>(texture->swizzle_g()); } else { iparams[0] = texture->swizzle_g(); } return; case GL_TEXTURE_SWIZZLE_B: if (fparams) { fparams[0] = static_cast<GLfloat>(texture->swizzle_b()); } else { iparams[0] = texture->swizzle_b(); } return; case GL_TEXTURE_SWIZZLE_A: if (fparams) { fparams[0] = static_cast<GLfloat>(texture->swizzle_a()); } else { iparams[0] = texture->swizzle_a(); } return; default: break; } if (fparams) { api()->glGetTexParameterfvFn(target, pname, fparams); } else { api()->glGetTexParameterivFn(target, pname, iparams); } }
172,658
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadPICTImage(const ImageInfo *image_info, ExceptionInfo *exception) { char geometry[MaxTextExtent], header_ole[4]; Image *image; IndexPacket index; int c, code; MagickBooleanType jpeg, status; PICTRectangle frame; PICTPixmap pixmap; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; register ssize_t i; size_t extent, length; ssize_t count, flags, j, version, y; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read PICT header. */ pixmap.bits_per_pixel=0; pixmap.component_count=0; /* Skip header : 512 for standard PICT and 4, ie "PICT" for OLE2 */ header_ole[0]=ReadBlobByte(image); header_ole[1]=ReadBlobByte(image); header_ole[2]=ReadBlobByte(image); header_ole[3]=ReadBlobByte(image); if (!((header_ole[0] == 0x50) && (header_ole[1] == 0x49) && (header_ole[2] == 0x43) && (header_ole[3] == 0x54))) for (i=0; i < 508; i++) (void) ReadBlobByte(image); (void) ReadBlobMSBShort(image); /* skip picture size */ if (ReadRectangle(image,&frame) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); while ((c=ReadBlobByte(image)) == 0) ; if (c != 0x11) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); version=ReadBlobByte(image); if (version == 2) { c=ReadBlobByte(image); if (c != 0xff) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } else if (version != 1) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((frame.left < 0) || (frame.right < 0) || (frame.top < 0) || (frame.bottom < 0) || (frame.left >= frame.right) || (frame.top >= frame.bottom)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Create black canvas. */ flags=0; image->depth=8; image->columns=1UL*(frame.right-frame.left); image->rows=1UL*(frame.bottom-frame.top); image->x_resolution=DefaultResolution; image->y_resolution=DefaultResolution; image->units=UndefinedResolution; /* Interpret PICT opcodes. */ jpeg=MagickFalse; for (code=0; EOFBlob(image) == MagickFalse; ) { if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if ((version == 1) || ((TellBlob(image) % 2) != 0)) code=ReadBlobByte(image); if (version == 2) code=(int) ReadBlobMSBShort(image); if (code < 0) break; if (code > 0xa1) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"%04X:",code); } else { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %04X %s: %s",code,codes[code].name,codes[code].description); switch (code) { case 0x01: { /* Clipping rectangle. */ length=ReadBlobMSBShort(image); if (length != 0x000a) { for (i=0; i < (ssize_t) (length-2); i++) (void) ReadBlobByte(image); break; } if (ReadRectangle(image,&frame) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (((frame.left & 0x8000) != 0) || ((frame.top & 0x8000) != 0)) break; image->columns=1UL*(frame.right-frame.left); image->rows=1UL*(frame.bottom-frame.top); (void) SetImageBackgroundColor(image); break; } case 0x12: case 0x13: case 0x14: { ssize_t pattern; size_t height, width; /* Skip pattern definition. */ pattern=1L*ReadBlobMSBShort(image); for (i=0; i < 8; i++) (void) ReadBlobByte(image); if (pattern == 2) { for (i=0; i < 5; i++) (void) ReadBlobByte(image); break; } if (pattern != 1) ThrowReaderException(CorruptImageError,"UnknownPatternType"); length=ReadBlobMSBShort(image); if (ReadRectangle(image,&frame) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); ReadPixmap(pixmap); image->depth=1UL*pixmap.component_size; image->x_resolution=1.0*pixmap.horizontal_resolution; image->y_resolution=1.0*pixmap.vertical_resolution; image->units=PixelsPerInchResolution; (void) ReadBlobMSBLong(image); flags=1L*ReadBlobMSBShort(image); length=ReadBlobMSBShort(image); for (i=0; i <= (ssize_t) length; i++) (void) ReadBlobMSBLong(image); width=1UL*(frame.bottom-frame.top); height=1UL*(frame.right-frame.left); if (pixmap.bits_per_pixel <= 8) length&=0x7fff; if (pixmap.bits_per_pixel == 16) width<<=1; if (length == 0) length=width; if (length < 8) { for (i=0; i < (ssize_t) (length*height); i++) (void) ReadBlobByte(image); } else for (j=0; j < (int) height; j++) if (length > 200) for (j=0; j < (ssize_t) ReadBlobMSBShort(image); j++) (void) ReadBlobByte(image); else for (j=0; j < (ssize_t) ReadBlobByte(image); j++) (void) ReadBlobByte(image); break; } case 0x1b: { /* Initialize image background color. */ image->background_color.red=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); image->background_color.green=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); image->background_color.blue=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); break; } case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: { /* Skip polygon or region. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) (length-2); i++) (void) ReadBlobByte(image); break; } case 0x90: case 0x91: case 0x98: case 0x99: case 0x9a: case 0x9b: { ssize_t bytes_per_line; PICTRectangle source, destination; register unsigned char *p; size_t j; unsigned char *pixels; Image *tile_image; /* Pixmap clipped by a rectangle. */ bytes_per_line=0; if ((code != 0x9a) && (code != 0x9b)) bytes_per_line=1L*ReadBlobMSBShort(image); else { (void) ReadBlobMSBShort(image); (void) ReadBlobMSBShort(image); (void) ReadBlobMSBShort(image); } if (ReadRectangle(image,&frame) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize tile image. */ tile_image=CloneImage(image,1UL*(frame.right-frame.left), 1UL*(frame.bottom-frame.top),MagickTrue,exception); if (tile_image == (Image *) NULL) return((Image *) NULL); if ((code == 0x9a) || (code == 0x9b) || ((bytes_per_line & 0x8000) != 0)) { ReadPixmap(pixmap); tile_image->depth=1UL*pixmap.component_size; tile_image->matte=pixmap.component_count == 4 ? MagickTrue : MagickFalse; tile_image->x_resolution=(double) pixmap.horizontal_resolution; tile_image->y_resolution=(double) pixmap.vertical_resolution; tile_image->units=PixelsPerInchResolution; if (tile_image->matte != MagickFalse) image->matte=tile_image->matte; } if ((code != 0x9a) && (code != 0x9b)) { /* Initialize colormap. */ tile_image->colors=2; if ((bytes_per_line & 0x8000) != 0) { (void) ReadBlobMSBLong(image); flags=1L*ReadBlobMSBShort(image); tile_image->colors=1UL*ReadBlobMSBShort(image)+1; } status=AcquireImageColormap(tile_image,tile_image->colors); if (status == MagickFalse) { tile_image=DestroyImage(tile_image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } if ((bytes_per_line & 0x8000) != 0) { for (i=0; i < (ssize_t) tile_image->colors; i++) { j=ReadBlobMSBShort(image) % tile_image->colors; if ((flags & 0x8000) != 0) j=(size_t) i; tile_image->colormap[j].red=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); tile_image->colormap[j].green=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); tile_image->colormap[j].blue=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); } } else { for (i=0; i < (ssize_t) tile_image->colors; i++) { tile_image->colormap[i].red=(Quantum) (QuantumRange- tile_image->colormap[i].red); tile_image->colormap[i].green=(Quantum) (QuantumRange- tile_image->colormap[i].green); tile_image->colormap[i].blue=(Quantum) (QuantumRange- tile_image->colormap[i].blue); } } } if (ReadRectangle(image,&source) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (ReadRectangle(image,&destination) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlobMSBShort(image); if ((code == 0x91) || (code == 0x99) || (code == 0x9b)) { /* Skip region. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) (length-2); i++) (void) ReadBlobByte(image); } if ((code != 0x9a) && (code != 0x9b) && (bytes_per_line & 0x8000) == 0) pixels=DecodeImage(image,tile_image,1UL*bytes_per_line,1,&extent); else pixels=DecodeImage(image,tile_image,1UL*bytes_per_line,1U* pixmap.bits_per_pixel,&extent); if (pixels == (unsigned char *) NULL) { tile_image=DestroyImage(tile_image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } /* Convert PICT tile image to pixel packets. */ p=pixels; for (y=0; y < (ssize_t) tile_image->rows; y++) { if (p > (pixels+extent+image->columns)) ThrowReaderException(CorruptImageError,"NotEnoughPixelData"); q=QueueAuthenticPixels(tile_image,0,y,tile_image->columns,1, exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(tile_image); for (x=0; x < (ssize_t) tile_image->columns; x++) { if (tile_image->storage_class == PseudoClass) { index=ConstrainColormapIndex(tile_image,*p); SetPixelIndex(indexes+x,index); SetPixelRed(q, tile_image->colormap[(ssize_t) index].red); SetPixelGreen(q, tile_image->colormap[(ssize_t) index].green); SetPixelBlue(q, tile_image->colormap[(ssize_t) index].blue); } else { if (pixmap.bits_per_pixel == 16) { i=(*p++); j=(*p); SetPixelRed(q,ScaleCharToQuantum( (unsigned char) ((i & 0x7c) << 1))); SetPixelGreen(q,ScaleCharToQuantum( (unsigned char) (((i & 0x03) << 6) | ((j & 0xe0) >> 2)))); SetPixelBlue(q,ScaleCharToQuantum( (unsigned char) ((j & 0x1f) << 3))); } else if (tile_image->matte == MagickFalse) { if (p > (pixels+extent+2*image->columns)) ThrowReaderException(CorruptImageError, "NotEnoughPixelData"); SetPixelRed(q,ScaleCharToQuantum(*p)); SetPixelGreen(q,ScaleCharToQuantum( *(p+tile_image->columns))); SetPixelBlue(q,ScaleCharToQuantum( *(p+2*tile_image->columns))); } else { if (p > (pixels+extent+3*image->columns)) ThrowReaderException(CorruptImageError, "NotEnoughPixelData"); SetPixelAlpha(q,ScaleCharToQuantum(*p)); SetPixelRed(q,ScaleCharToQuantum( *(p+tile_image->columns))); SetPixelGreen(q,ScaleCharToQuantum( *(p+2*tile_image->columns))); SetPixelBlue(q,ScaleCharToQuantum( *(p+3*tile_image->columns))); } } p++; q++; } if (SyncAuthenticPixels(tile_image,exception) == MagickFalse) break; if ((tile_image->storage_class == DirectClass) && (pixmap.bits_per_pixel != 16)) { p+=(pixmap.component_count-1)*tile_image->columns; if (p < pixels) break; } status=SetImageProgress(image,LoadImageTag,y,tile_image->rows); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); if (jpeg == MagickFalse) if ((code == 0x9a) || (code == 0x9b) || ((bytes_per_line & 0x8000) != 0)) (void) CompositeImage(image,CopyCompositeOp,tile_image, destination.left,destination.top); tile_image=DestroyImage(tile_image); break; } case 0xa1: { unsigned char *info; size_t type; /* Comment. */ type=ReadBlobMSBShort(image); length=ReadBlobMSBShort(image); if (length == 0) break; (void) ReadBlobMSBLong(image); length-=4; if (length == 0) break; info=(unsigned char *) AcquireQuantumMemory(length,sizeof(*info)); if (info == (unsigned char *) NULL) break; count=ReadBlob(image,length,info); (void) count; switch (type) { case 0xe0: { if (length == 0) break; profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,info); status=SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); break; } case 0x1f2: { if (length == 0) break; profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,info); status=SetImageProfile(image,"iptc",profile); if (status == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); profile=DestroyStringInfo(profile); break; } default: break; } info=(unsigned char *) RelinquishMagickMemory(info); break; } default: { /* Skip to next op code. */ if (code < 0) break; if (codes[code].length == -1) (void) ReadBlobMSBShort(image); else for (i=0; i < (ssize_t) codes[code].length; i++) (void) ReadBlobByte(image); } } } if (code == 0xc00) { /* Skip header. */ for (i=0; i < 24; i++) (void) ReadBlobByte(image); continue; } if (((code >= 0xb0) && (code <= 0xcf)) || ((code >= 0x8000) && (code <= 0x80ff))) continue; if (code == 0x8200) { FILE *file; Image *tile_image; ImageInfo *read_info; int unique_file; /* Embedded JPEG. */ jpeg=MagickTrue; read_info=CloneImageInfo(image_info); SetImageInfoBlob(read_info,(void *) NULL,0); file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(read_info->filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) { if (file != (FILE *) NULL) (void) fclose(file); (void) RelinquishUniqueFileResource(read_info->filename); (void) CopyMagickString(image->filename,read_info->filename, MaxTextExtent); ThrowFileException(exception,FileOpenError, "UnableToCreateTemporaryFile",image->filename); image=DestroyImageList(image); return((Image *) NULL); } length=ReadBlobMSBLong(image); for (i=0; i < 6; i++) (void) ReadBlobMSBLong(image); if (ReadRectangle(image,&frame) == MagickFalse) { (void) fclose(file); (void) RelinquishUniqueFileResource(read_info->filename); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } for (i=0; i < 122; i++) (void) ReadBlobByte(image); for (i=0; i < (ssize_t) (length-154); i++) { c=ReadBlobByte(image); (void) fputc(c,file); } (void) fclose(file); (void) close(unique_file); tile_image=ReadImage(read_info,exception); (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); if (tile_image == (Image *) NULL) continue; (void) FormatLocaleString(geometry,MaxTextExtent,"%.20gx%.20g", (double) MagickMax(image->columns,tile_image->columns), (double) MagickMax(image->rows,tile_image->rows)); (void) SetImageExtent(image, MagickMax(image->columns,tile_image->columns), MagickMax(image->rows,tile_image->rows)); (void) TransformImageColorspace(image,tile_image->colorspace); (void) CompositeImage(image,CopyCompositeOp,tile_image,frame.left, frame.right); image->compression=tile_image->compression; tile_image=DestroyImage(tile_image); continue; } if ((code == 0xff) || (code == 0xffff)) break; if (((code >= 0xd0) && (code <= 0xfe)) || ((code >= 0x8100) && (code <= 0xffff))) { /* Skip reserved. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) length; i++) (void) ReadBlobByte(image); continue; } if ((code >= 0x100) && (code <= 0x7fff)) { /* Skip reserved. */ length=(size_t) ((code >> 7) & 0xff); for (i=0; i < (ssize_t) length; i++) (void) ReadBlobByte(image); continue; } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } Commit Message: CWE ID: CWE-119
static Image *ReadPICTImage(const ImageInfo *image_info, ExceptionInfo *exception) { char geometry[MaxTextExtent], header_ole[4]; Image *image; IndexPacket index; int c, code; MagickBooleanType jpeg, status; PICTRectangle frame; PICTPixmap pixmap; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; register ssize_t i; size_t extent, length; ssize_t count, flags, j, version, y; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read PICT header. */ pixmap.bits_per_pixel=0; pixmap.component_count=0; /* Skip header : 512 for standard PICT and 4, ie "PICT" for OLE2 */ header_ole[0]=ReadBlobByte(image); header_ole[1]=ReadBlobByte(image); header_ole[2]=ReadBlobByte(image); header_ole[3]=ReadBlobByte(image); if (!((header_ole[0] == 0x50) && (header_ole[1] == 0x49) && (header_ole[2] == 0x43) && (header_ole[3] == 0x54))) for (i=0; i < 508; i++) (void) ReadBlobByte(image); (void) ReadBlobMSBShort(image); /* skip picture size */ if (ReadRectangle(image,&frame) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); while ((c=ReadBlobByte(image)) == 0) ; if (c != 0x11) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); version=ReadBlobByte(image); if (version == 2) { c=ReadBlobByte(image); if (c != 0xff) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } else if (version != 1) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((frame.left < 0) || (frame.right < 0) || (frame.top < 0) || (frame.bottom < 0) || (frame.left >= frame.right) || (frame.top >= frame.bottom)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Create black canvas. */ flags=0; image->depth=8; image->columns=1UL*(frame.right-frame.left); image->rows=1UL*(frame.bottom-frame.top); image->x_resolution=DefaultResolution; image->y_resolution=DefaultResolution; image->units=UndefinedResolution; /* Interpret PICT opcodes. */ jpeg=MagickFalse; for (code=0; EOFBlob(image) == MagickFalse; ) { if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((version == 1) || ((TellBlob(image) % 2) != 0)) code=ReadBlobByte(image); if (version == 2) code=(int) ReadBlobMSBShort(image); if (code < 0) break; if (code > 0xa1) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"%04X:",code); } else { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %04X %s: %s",code,codes[code].name,codes[code].description); switch (code) { case 0x01: { /* Clipping rectangle. */ length=ReadBlobMSBShort(image); if (length != 0x000a) { for (i=0; i < (ssize_t) (length-2); i++) (void) ReadBlobByte(image); break; } if (ReadRectangle(image,&frame) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (((frame.left & 0x8000) != 0) || ((frame.top & 0x8000) != 0)) break; image->columns=1UL*(frame.right-frame.left); image->rows=1UL*(frame.bottom-frame.top); (void) SetImageBackgroundColor(image); break; } case 0x12: case 0x13: case 0x14: { ssize_t pattern; size_t height, width; /* Skip pattern definition. */ pattern=1L*ReadBlobMSBShort(image); for (i=0; i < 8; i++) (void) ReadBlobByte(image); if (pattern == 2) { for (i=0; i < 5; i++) (void) ReadBlobByte(image); break; } if (pattern != 1) ThrowReaderException(CorruptImageError,"UnknownPatternType"); length=ReadBlobMSBShort(image); if (ReadRectangle(image,&frame) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); ReadPixmap(pixmap); image->depth=1UL*pixmap.component_size; image->x_resolution=1.0*pixmap.horizontal_resolution; image->y_resolution=1.0*pixmap.vertical_resolution; image->units=PixelsPerInchResolution; (void) ReadBlobMSBLong(image); flags=1L*ReadBlobMSBShort(image); length=ReadBlobMSBShort(image); for (i=0; i <= (ssize_t) length; i++) (void) ReadBlobMSBLong(image); width=1UL*(frame.bottom-frame.top); height=1UL*(frame.right-frame.left); if (pixmap.bits_per_pixel <= 8) length&=0x7fff; if (pixmap.bits_per_pixel == 16) width<<=1; if (length == 0) length=width; if (length < 8) { for (i=0; i < (ssize_t) (length*height); i++) (void) ReadBlobByte(image); } else for (j=0; j < (int) height; j++) if (length > 200) for (j=0; j < (ssize_t) ReadBlobMSBShort(image); j++) (void) ReadBlobByte(image); else for (j=0; j < (ssize_t) ReadBlobByte(image); j++) (void) ReadBlobByte(image); break; } case 0x1b: { /* Initialize image background color. */ image->background_color.red=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); image->background_color.green=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); image->background_color.blue=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); break; } case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: { /* Skip polygon or region. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) (length-2); i++) (void) ReadBlobByte(image); break; } case 0x90: case 0x91: case 0x98: case 0x99: case 0x9a: case 0x9b: { ssize_t bytes_per_line; PICTRectangle source, destination; register unsigned char *p; size_t j; unsigned char *pixels; Image *tile_image; /* Pixmap clipped by a rectangle. */ bytes_per_line=0; if ((code != 0x9a) && (code != 0x9b)) bytes_per_line=1L*ReadBlobMSBShort(image); else { (void) ReadBlobMSBShort(image); (void) ReadBlobMSBShort(image); (void) ReadBlobMSBShort(image); } if (ReadRectangle(image,&frame) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize tile image. */ tile_image=CloneImage(image,1UL*(frame.right-frame.left), 1UL*(frame.bottom-frame.top),MagickTrue,exception); if (tile_image == (Image *) NULL) return((Image *) NULL); if ((code == 0x9a) || (code == 0x9b) || ((bytes_per_line & 0x8000) != 0)) { ReadPixmap(pixmap); tile_image->depth=1UL*pixmap.component_size; tile_image->matte=pixmap.component_count == 4 ? MagickTrue : MagickFalse; tile_image->x_resolution=(double) pixmap.horizontal_resolution; tile_image->y_resolution=(double) pixmap.vertical_resolution; tile_image->units=PixelsPerInchResolution; if (tile_image->matte != MagickFalse) image->matte=tile_image->matte; } if ((code != 0x9a) && (code != 0x9b)) { /* Initialize colormap. */ tile_image->colors=2; if ((bytes_per_line & 0x8000) != 0) { (void) ReadBlobMSBLong(image); flags=1L*ReadBlobMSBShort(image); tile_image->colors=1UL*ReadBlobMSBShort(image)+1; } status=AcquireImageColormap(tile_image,tile_image->colors); if (status == MagickFalse) { tile_image=DestroyImage(tile_image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } if ((bytes_per_line & 0x8000) != 0) { for (i=0; i < (ssize_t) tile_image->colors; i++) { j=ReadBlobMSBShort(image) % tile_image->colors; if ((flags & 0x8000) != 0) j=(size_t) i; tile_image->colormap[j].red=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); tile_image->colormap[j].green=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); tile_image->colormap[j].blue=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); } } else { for (i=0; i < (ssize_t) tile_image->colors; i++) { tile_image->colormap[i].red=(Quantum) (QuantumRange- tile_image->colormap[i].red); tile_image->colormap[i].green=(Quantum) (QuantumRange- tile_image->colormap[i].green); tile_image->colormap[i].blue=(Quantum) (QuantumRange- tile_image->colormap[i].blue); } } } if (ReadRectangle(image,&source) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (ReadRectangle(image,&destination) == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlobMSBShort(image); if ((code == 0x91) || (code == 0x99) || (code == 0x9b)) { /* Skip region. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) (length-2); i++) (void) ReadBlobByte(image); } if ((code != 0x9a) && (code != 0x9b) && (bytes_per_line & 0x8000) == 0) pixels=DecodeImage(image,tile_image,1UL*bytes_per_line,1,&extent); else pixels=DecodeImage(image,tile_image,1UL*bytes_per_line,1U* pixmap.bits_per_pixel,&extent); if (pixels == (unsigned char *) NULL) { tile_image=DestroyImage(tile_image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } /* Convert PICT tile image to pixel packets. */ p=pixels; for (y=0; y < (ssize_t) tile_image->rows; y++) { if (p > (pixels+extent+image->columns)) ThrowReaderException(CorruptImageError,"NotEnoughPixelData"); q=QueueAuthenticPixels(tile_image,0,y,tile_image->columns,1, exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(tile_image); for (x=0; x < (ssize_t) tile_image->columns; x++) { if (tile_image->storage_class == PseudoClass) { index=ConstrainColormapIndex(tile_image,*p); SetPixelIndex(indexes+x,index); SetPixelRed(q, tile_image->colormap[(ssize_t) index].red); SetPixelGreen(q, tile_image->colormap[(ssize_t) index].green); SetPixelBlue(q, tile_image->colormap[(ssize_t) index].blue); } else { if (pixmap.bits_per_pixel == 16) { i=(*p++); j=(*p); SetPixelRed(q,ScaleCharToQuantum( (unsigned char) ((i & 0x7c) << 1))); SetPixelGreen(q,ScaleCharToQuantum( (unsigned char) (((i & 0x03) << 6) | ((j & 0xe0) >> 2)))); SetPixelBlue(q,ScaleCharToQuantum( (unsigned char) ((j & 0x1f) << 3))); } else if (tile_image->matte == MagickFalse) { if (p > (pixels+extent+2*image->columns)) ThrowReaderException(CorruptImageError, "NotEnoughPixelData"); SetPixelRed(q,ScaleCharToQuantum(*p)); SetPixelGreen(q,ScaleCharToQuantum( *(p+tile_image->columns))); SetPixelBlue(q,ScaleCharToQuantum( *(p+2*tile_image->columns))); } else { if (p > (pixels+extent+3*image->columns)) ThrowReaderException(CorruptImageError, "NotEnoughPixelData"); SetPixelAlpha(q,ScaleCharToQuantum(*p)); SetPixelRed(q,ScaleCharToQuantum( *(p+tile_image->columns))); SetPixelGreen(q,ScaleCharToQuantum( *(p+2*tile_image->columns))); SetPixelBlue(q,ScaleCharToQuantum( *(p+3*tile_image->columns))); } } p++; q++; } if (SyncAuthenticPixels(tile_image,exception) == MagickFalse) break; if ((tile_image->storage_class == DirectClass) && (pixmap.bits_per_pixel != 16)) { p+=(pixmap.component_count-1)*tile_image->columns; if (p < pixels) break; } status=SetImageProgress(image,LoadImageTag,y,tile_image->rows); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); if (jpeg == MagickFalse) if ((code == 0x9a) || (code == 0x9b) || ((bytes_per_line & 0x8000) != 0)) (void) CompositeImage(image,CopyCompositeOp,tile_image, destination.left,destination.top); tile_image=DestroyImage(tile_image); break; } case 0xa1: { unsigned char *info; size_t type; /* Comment. */ type=ReadBlobMSBShort(image); length=ReadBlobMSBShort(image); if (length == 0) break; (void) ReadBlobMSBLong(image); length-=4; if (length == 0) break; info=(unsigned char *) AcquireQuantumMemory(length,sizeof(*info)); if (info == (unsigned char *) NULL) break; count=ReadBlob(image,length,info); (void) count; switch (type) { case 0xe0: { if (length == 0) break; profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,info); status=SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); break; } case 0x1f2: { if (length == 0) break; profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,info); status=SetImageProfile(image,"iptc",profile); if (status == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); profile=DestroyStringInfo(profile); break; } default: break; } info=(unsigned char *) RelinquishMagickMemory(info); break; } default: { /* Skip to next op code. */ if (code < 0) break; if (codes[code].length == -1) (void) ReadBlobMSBShort(image); else for (i=0; i < (ssize_t) codes[code].length; i++) (void) ReadBlobByte(image); } } } if (code == 0xc00) { /* Skip header. */ for (i=0; i < 24; i++) (void) ReadBlobByte(image); continue; } if (((code >= 0xb0) && (code <= 0xcf)) || ((code >= 0x8000) && (code <= 0x80ff))) continue; if (code == 0x8200) { FILE *file; Image *tile_image; ImageInfo *read_info; int unique_file; /* Embedded JPEG. */ jpeg=MagickTrue; read_info=CloneImageInfo(image_info); SetImageInfoBlob(read_info,(void *) NULL,0); file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(read_info->filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) { if (file != (FILE *) NULL) (void) fclose(file); (void) RelinquishUniqueFileResource(read_info->filename); (void) CopyMagickString(image->filename,read_info->filename, MaxTextExtent); ThrowFileException(exception,FileOpenError, "UnableToCreateTemporaryFile",image->filename); image=DestroyImageList(image); return((Image *) NULL); } length=ReadBlobMSBLong(image); for (i=0; i < 6; i++) (void) ReadBlobMSBLong(image); if (ReadRectangle(image,&frame) == MagickFalse) { (void) fclose(file); (void) RelinquishUniqueFileResource(read_info->filename); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } for (i=0; i < 122; i++) (void) ReadBlobByte(image); for (i=0; i < (ssize_t) (length-154); i++) { c=ReadBlobByte(image); (void) fputc(c,file); } (void) fclose(file); (void) close(unique_file); tile_image=ReadImage(read_info,exception); (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); if (tile_image == (Image *) NULL) continue; (void) FormatLocaleString(geometry,MaxTextExtent,"%.20gx%.20g", (double) MagickMax(image->columns,tile_image->columns), (double) MagickMax(image->rows,tile_image->rows)); (void) SetImageExtent(image, MagickMax(image->columns,tile_image->columns), MagickMax(image->rows,tile_image->rows)); (void) TransformImageColorspace(image,tile_image->colorspace); (void) CompositeImage(image,CopyCompositeOp,tile_image,frame.left, frame.right); image->compression=tile_image->compression; tile_image=DestroyImage(tile_image); continue; } if ((code == 0xff) || (code == 0xffff)) break; if (((code >= 0xd0) && (code <= 0xfe)) || ((code >= 0x8100) && (code <= 0xffff))) { /* Skip reserved. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) length; i++) (void) ReadBlobByte(image); continue; } if ((code >= 0x100) && (code <= 0x7fff)) { /* Skip reserved. */ length=(size_t) ((code >> 7) & 0xff); for (i=0; i < (ssize_t) length; i++) (void) ReadBlobByte(image); continue; } } (void) CloseBlob(image); return(GetFirstImageInList(image)); }
168,593
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void WebViewTestClient::DidFocus() { test_runner()->SetFocus(web_view_test_proxy_base_->web_view(), true); } Commit Message: If a page calls |window.focus()|, kick it out of fullscreen. BUG=776418, 800056 Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017 Reviewed-on: https://chromium-review.googlesource.com/852378 Reviewed-by: Nasko Oskov <[email protected]> Reviewed-by: Philip Jägenstedt <[email protected]> Commit-Queue: Avi Drissman <[email protected]> Cr-Commit-Position: refs/heads/master@{#533790} CWE ID:
void WebViewTestClient::DidFocus() { void WebViewTestClient::DidFocus(blink::WebLocalFrame* calling_frame) { test_runner()->SetFocus(web_view_test_proxy_base_->web_view(), true); }
172,721
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static sk_sp<SkImage> unPremulSkImageToPremul(SkImage* input) { SkImageInfo info = SkImageInfo::Make(input->width(), input->height(), kN32_SkColorType, kPremul_SkAlphaType); RefPtr<Uint8Array> dstPixels = copySkImageData(input, info); if (!dstPixels) return nullptr; return newSkImageFromRaster( info, std::move(dstPixels), static_cast<size_t>(input->width()) * info.bytesPerPixel()); } Commit Message: Prevent bad casting in ImageBitmap when calling ArrayBuffer::createOrNull Currently when ImageBitmap's constructor is invoked, we check whether dstSize will overflow size_t or not. The problem comes when we call ArrayBuffer::createOrNull some times in the code. Both parameters of ArrayBuffer::createOrNull are unsigned. In ImageBitmap when we call this method, the first parameter is usually width * height. This could overflow unsigned even if it has been checked safe with size_t, the reason is that unsigned is a 32-bit value on 64-bit systems, while size_t is a 64-bit value. This CL makes a change such that we check whether the dstSize will overflow unsigned or not. In this case, we can guarantee that createOrNull will not have any crash. BUG=664139 Review-Url: https://codereview.chromium.org/2500493002 Cr-Commit-Position: refs/heads/master@{#431936} CWE ID: CWE-787
static sk_sp<SkImage> unPremulSkImageToPremul(SkImage* input) { SkImageInfo info = SkImageInfo::Make(input->width(), input->height(), kN32_SkColorType, kPremul_SkAlphaType); RefPtr<Uint8Array> dstPixels = copySkImageData(input, info); if (!dstPixels) return nullptr; return newSkImageFromRaster( info, std::move(dstPixels), static_cast<unsigned>(input->width()) * info.bytesPerPixel()); }
172,507
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: cib_remote_connection_destroy(gpointer user_data) { cib_client_t *client = user_data; if (client == NULL) { return; } crm_trace("Cleaning up after client disconnect: %s/%s", crm_str(client->name), client->id); if (client->id != NULL) { if (!g_hash_table_remove(client_list, client->id)) { crm_err("Client %s not found in the hashtable", client->name); } } crm_trace("Destroying %s (%p)", client->name, user_data); num_clients--; crm_trace("Num unfree'd clients: %d", num_clients); free(client->name); free(client->callback_id); free(client->id); free(client->user); free(client); crm_trace("Freed the cib client"); if (cib_shutdown_flag) { cib_shutdown(0); } return; } Commit Message: High: core: Internal tls api improvements for reuse with future LRMD tls backend. CWE ID: CWE-399
cib_remote_connection_destroy(gpointer user_data) { cib_client_t *client = user_data; int csock = 0; if (client == NULL) { return; } crm_trace("Cleaning up after client disconnect: %s/%s", crm_str(client->name), client->id); if (client->id != NULL) { if (!g_hash_table_remove(client_list, client->id)) { crm_err("Client %s not found in the hashtable", client->name); } } crm_trace("Destroying %s (%p)", client->name, user_data); num_clients--; crm_trace("Num unfree'd clients: %d", num_clients); if (client->remote_auth_timeout) { g_source_remove(client->remote_auth_timeout); } if (client->encrypted) { #ifdef HAVE_GNUTLS_GNUTLS_H if (client->session) { void *sock_ptr = gnutls_transport_get_ptr(*client->session); csock = GPOINTER_TO_INT(sock_ptr); if (client->handshake_complete) { gnutls_bye(*client->session, GNUTLS_SHUT_WR); } gnutls_deinit(*client->session); gnutls_free(client->session); } #endif } else { csock = GPOINTER_TO_INT(client->session); } client->session = NULL; if (csock > 0) { close(csock); } free(client->name); free(client->callback_id); free(client->id); free(client->user); free(client->recv_buf); free(client); crm_trace("Freed the cib client"); if (cib_shutdown_flag) { cib_shutdown(0); } return; }
166,147
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int fscrypt_process_policy(struct inode *inode, const struct fscrypt_policy *policy) { if (policy->version != 0) return -EINVAL; if (!inode_has_encryption_context(inode)) { if (!inode->i_sb->s_cop->empty_dir) return -EOPNOTSUPP; if (!inode->i_sb->s_cop->empty_dir(inode)) return -ENOTEMPTY; return create_encryption_context_from_policy(inode, policy); } if (is_encryption_context_consistent_with_policy(inode, policy)) return 0; printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n", __func__); return -EINVAL; } Commit Message: fscrypto: add authorization check for setting encryption policy On an ext4 or f2fs filesystem with file encryption supported, a user could set an encryption policy on any empty directory(*) to which they had readonly access. This is obviously problematic, since such a directory might be owned by another user and the new encryption policy would prevent that other user from creating files in their own directory (for example). Fix this by requiring inode_owner_or_capable() permission to set an encryption policy. This means that either the caller must own the file, or the caller must have the capability CAP_FOWNER. (*) Or also on any regular file, for f2fs v4.6 and later and ext4 v4.8-rc1 and later; a separate bug fix is coming for that. Signed-off-by: Eric Biggers <[email protected]> Cc: [email protected] # 4.1+; check fs/{ext4,f2fs} Signed-off-by: Theodore Ts'o <[email protected]> CWE ID: CWE-264
int fscrypt_process_policy(struct inode *inode, const struct fscrypt_policy *policy) { if (!inode_owner_or_capable(inode)) return -EACCES; if (policy->version != 0) return -EINVAL; if (!inode_has_encryption_context(inode)) { if (!inode->i_sb->s_cop->empty_dir) return -EOPNOTSUPP; if (!inode->i_sb->s_cop->empty_dir(inode)) return -ENOTEMPTY; return create_encryption_context_from_policy(inode, policy); } if (is_encryption_context_consistent_with_policy(inode, policy)) return 0; printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n", __func__); return -EINVAL; }
168,460
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BaseMultipleFieldsDateAndTimeInputType::createShadowSubtree() { ASSERT(element()->shadow()); Document* document = element()->document(); ContainerNode* container = element()->userAgentShadowRoot(); RefPtr<DateTimeEditElement> dateTimeEditElement(DateTimeEditElement::create(document, *this)); m_dateTimeEditElement = dateTimeEditElement.get(); container->appendChild(m_dateTimeEditElement); updateInnerTextValue(); RefPtr<ClearButtonElement> clearButton = ClearButtonElement::create(document, *this); m_clearButton = clearButton.get(); container->appendChild(clearButton); RefPtr<SpinButtonElement> spinButton = SpinButtonElement::create(document, *this); m_spinButtonElement = spinButton.get(); container->appendChild(spinButton); bool shouldAddPickerIndicator = false; if (InputType::themeSupportsDataListUI(this)) shouldAddPickerIndicator = true; RefPtr<RenderTheme> theme = document->page() ? document->page()->theme() : RenderTheme::defaultTheme(); if (theme->supportsCalendarPicker(formControlType())) { shouldAddPickerIndicator = true; m_pickerIndicatorIsAlwaysVisible = true; } if (shouldAddPickerIndicator) { RefPtr<PickerIndicatorElement> pickerElement = PickerIndicatorElement::create(document, *this); m_pickerIndicatorElement = pickerElement.get(); container->appendChild(m_pickerIndicatorElement); m_pickerIndicatorIsVisible = true; updatePickerIndicatorVisibility(); } } Commit Message: Setting input.x-webkit-speech should not cause focus change In r150866, we introduced element()->focus() in destroyShadowSubtree() to retain focus on <input> when its type attribute gets changed. But when x-webkit-speech attribute is changed, the element is detached before calling destroyShadowSubtree() and element()->focus() failed This patch moves detach() after destroyShadowSubtree() to fix the problem. BUG=243818 TEST=fast/forms/input-type-change-focusout.html NOTRY=true Review URL: https://chromiumcodereview.appspot.com/16084005 git-svn-id: svn://svn.chromium.org/blink/trunk@151444 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-20
void BaseMultipleFieldsDateAndTimeInputType::createShadowSubtree() { ASSERT(element()->shadow()); // Element must not be attached here, because if it was attached // DateTimeEditElement::customStyleForRenderer() is called in appendChild() // before the field wrapper element is created. ASSERT(!element()->attached()); Document* document = element()->document(); ContainerNode* container = element()->userAgentShadowRoot(); RefPtr<DateTimeEditElement> dateTimeEditElement(DateTimeEditElement::create(document, *this)); m_dateTimeEditElement = dateTimeEditElement.get(); container->appendChild(m_dateTimeEditElement); updateInnerTextValue(); RefPtr<ClearButtonElement> clearButton = ClearButtonElement::create(document, *this); m_clearButton = clearButton.get(); container->appendChild(clearButton); RefPtr<SpinButtonElement> spinButton = SpinButtonElement::create(document, *this); m_spinButtonElement = spinButton.get(); container->appendChild(spinButton); bool shouldAddPickerIndicator = false; if (InputType::themeSupportsDataListUI(this)) shouldAddPickerIndicator = true; RefPtr<RenderTheme> theme = document->page() ? document->page()->theme() : RenderTheme::defaultTheme(); if (theme->supportsCalendarPicker(formControlType())) { shouldAddPickerIndicator = true; m_pickerIndicatorIsAlwaysVisible = true; } if (shouldAddPickerIndicator) { RefPtr<PickerIndicatorElement> pickerElement = PickerIndicatorElement::create(document, *this); m_pickerIndicatorElement = pickerElement.get(); container->appendChild(m_pickerIndicatorElement); m_pickerIndicatorIsVisible = true; updatePickerIndicatorVisibility(); } }
171,264
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: OMX_ERRORTYPE omx_vdec::free_input_buffer(OMX_BUFFERHEADERTYPE *bufferHdr) { unsigned int index = 0; if (bufferHdr == NULL || m_inp_mem_ptr == NULL) { return OMX_ErrorBadParameter; } index = bufferHdr - m_inp_mem_ptr; DEBUG_PRINT_LOW("Free Input Buffer index = %d",index); if (index < drv_ctx.ip_buf.actualcount && drv_ctx.ptr_inputbuffer) { DEBUG_PRINT_LOW("Free Input Buffer index = %d",index); if (drv_ctx.ptr_inputbuffer[index].pmem_fd > 0) { struct vdec_setbuffer_cmd setbuffers; setbuffers.buffer_type = VDEC_BUFFER_TYPE_INPUT; memcpy (&setbuffers.buffer,&drv_ctx.ptr_inputbuffer[index], sizeof (vdec_bufferpayload)); if (!secure_mode) { DEBUG_PRINT_LOW("unmap the input buffer fd=%d", drv_ctx.ptr_inputbuffer[index].pmem_fd); DEBUG_PRINT_LOW("unmap the input buffer size=%u address = %p", (unsigned int)drv_ctx.ptr_inputbuffer[index].mmaped_size, drv_ctx.ptr_inputbuffer[index].bufferaddr); munmap (drv_ctx.ptr_inputbuffer[index].bufferaddr, drv_ctx.ptr_inputbuffer[index].mmaped_size); } close (drv_ctx.ptr_inputbuffer[index].pmem_fd); drv_ctx.ptr_inputbuffer[index].pmem_fd = -1; if (m_desc_buffer_ptr && m_desc_buffer_ptr[index].buf_addr) { free(m_desc_buffer_ptr[index].buf_addr); m_desc_buffer_ptr[index].buf_addr = NULL; m_desc_buffer_ptr[index].desc_data_size = 0; } #ifdef USE_ION free_ion_memory(&drv_ctx.ip_buf_ion_info[index]); #endif } } return OMX_ErrorNone; } Commit Message: DO NOT MERGE mm-video-v4l2: vdec: Avoid processing ETBs/FTBs in invalid states (per the spec) ETB/FTB should not be handled in states other than Executing, Paused and Idle. This avoids accessing invalid buffers. Also add a lock to protect the private-buffers from being deleted while accessing from another thread. Bug: 27890802 Security Vulnerability - Heap Use-After-Free and Possible LPE in MediaServer (libOmxVdec problem #6) CRs-Fixed: 1008882 Change-Id: Iaac2e383cd53cf9cf8042c9ed93ddc76dba3907e CWE ID:
OMX_ERRORTYPE omx_vdec::free_input_buffer(OMX_BUFFERHEADERTYPE *bufferHdr) { unsigned int index = 0; if (bufferHdr == NULL || m_inp_mem_ptr == NULL) { return OMX_ErrorBadParameter; } index = bufferHdr - m_inp_mem_ptr; DEBUG_PRINT_LOW("Free Input Buffer index = %d",index); auto_lock l(buf_lock); bufferHdr->pInputPortPrivate = NULL; if (index < drv_ctx.ip_buf.actualcount && drv_ctx.ptr_inputbuffer) { DEBUG_PRINT_LOW("Free Input Buffer index = %d",index); if (drv_ctx.ptr_inputbuffer[index].pmem_fd > 0) { struct vdec_setbuffer_cmd setbuffers; setbuffers.buffer_type = VDEC_BUFFER_TYPE_INPUT; memcpy (&setbuffers.buffer,&drv_ctx.ptr_inputbuffer[index], sizeof (vdec_bufferpayload)); if (!secure_mode) { DEBUG_PRINT_LOW("unmap the input buffer fd=%d", drv_ctx.ptr_inputbuffer[index].pmem_fd); DEBUG_PRINT_LOW("unmap the input buffer size=%u address = %p", (unsigned int)drv_ctx.ptr_inputbuffer[index].mmaped_size, drv_ctx.ptr_inputbuffer[index].bufferaddr); munmap (drv_ctx.ptr_inputbuffer[index].bufferaddr, drv_ctx.ptr_inputbuffer[index].mmaped_size); } close (drv_ctx.ptr_inputbuffer[index].pmem_fd); drv_ctx.ptr_inputbuffer[index].pmem_fd = -1; if (m_desc_buffer_ptr && m_desc_buffer_ptr[index].buf_addr) { free(m_desc_buffer_ptr[index].buf_addr); m_desc_buffer_ptr[index].buf_addr = NULL; m_desc_buffer_ptr[index].desc_data_size = 0; } #ifdef USE_ION free_ion_memory(&drv_ctx.ip_buf_ion_info[index]); #endif } } return OMX_ErrorNone; }
173,752
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: status_t OMXNodeInstance::sendCommand( OMX_COMMANDTYPE cmd, OMX_S32 param) { const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource()); if (bufferSource != NULL && cmd == OMX_CommandStateSet) { if (param == OMX_StateIdle) { bufferSource->omxIdle(); } else if (param == OMX_StateLoaded) { bufferSource->omxLoaded(); setGraphicBufferSource(NULL); } } Mutex::Autolock autoLock(mLock); { Mutex::Autolock _l(mDebugLock); bumpDebugLevel_l(2 /* numInputBuffers */, 2 /* numOutputBuffers */); } const char *paramString = cmd == OMX_CommandStateSet ? asString((OMX_STATETYPE)param) : portString(param); CLOG_STATE(sendCommand, "%s(%d), %s(%d)", asString(cmd), cmd, paramString, param); OMX_ERRORTYPE err = OMX_SendCommand(mHandle, cmd, param, NULL); CLOG_IF_ERROR(sendCommand, err, "%s(%d), %s(%d)", asString(cmd), cmd, paramString, param); return StatusFromOMXError(err); } Commit Message: DO NOT MERGE: IOMX: work against metadata buffer spoofing - Prohibit direct set/getParam/Settings for extensions meant for OMXNodeInstance alone. This disallows enabling metadata mode without the knowledge of OMXNodeInstance. - Use a backup buffer for metadata mode buffers and do not directly share with clients. - Disallow setting up metadata mode/tunneling/input surface after first sendCommand. - Disallow store-meta for input cross process. - Disallow emptyBuffer for surface input (via IOMX). - Fix checking for input surface. Bug: 29422020 Change-Id: I801c77b80e703903f62e42d76fd2e76a34e4bc8e (cherry picked from commit 7c3c2fa3e233c656fc8c2fc2a6634b3ecf8a23e8) CWE ID: CWE-200
status_t OMXNodeInstance::sendCommand( OMX_COMMANDTYPE cmd, OMX_S32 param) { if (cmd == OMX_CommandStateSet) { // We do not support returning from unloaded state, so there are no configurations past // first StateSet command. mSailed = true; } const sp<GraphicBufferSource> bufferSource(getGraphicBufferSource()); if (bufferSource != NULL && cmd == OMX_CommandStateSet) { if (param == OMX_StateIdle) { bufferSource->omxIdle(); } else if (param == OMX_StateLoaded) { bufferSource->omxLoaded(); setGraphicBufferSource(NULL); } } Mutex::Autolock autoLock(mLock); { Mutex::Autolock _l(mDebugLock); bumpDebugLevel_l(2 /* numInputBuffers */, 2 /* numOutputBuffers */); } const char *paramString = cmd == OMX_CommandStateSet ? asString((OMX_STATETYPE)param) : portString(param); CLOG_STATE(sendCommand, "%s(%d), %s(%d)", asString(cmd), cmd, paramString, param); OMX_ERRORTYPE err = OMX_SendCommand(mHandle, cmd, param, NULL); CLOG_IF_ERROR(sendCommand, err, "%s(%d), %s(%d)", asString(cmd), cmd, paramString, param); return StatusFromOMXError(err); }
174,137
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void PaymentRequest::CanMakePayment() { if (observer_for_testing_) observer_for_testing_->OnCanMakePaymentCalled(); if (!delegate_->GetPrefService()->GetBoolean(kCanMakePaymentEnabled) || !state_) { CanMakePaymentCallback(/*can_make_payment=*/false); } else { state_->CanMakePayment( base::BindOnce(&PaymentRequest::CanMakePaymentCallback, weak_ptr_factory_.GetWeakPtr())); } } Commit Message: [Payment Request][Desktop] Prevent use after free. Before this patch, a compromised renderer on desktop could make IPC methods into Payment Request in an unexpected ordering and cause use after free in the browser. This patch will disconnect the IPC pipes if: - Init() is called more than once. - Any other method is called before Init(). - Show() is called more than once. - Retry(), UpdateWith(), NoupdatedPaymentDetails(), Abort(), or Complete() are called before Show(). This patch re-orders the IPC methods in payment_request.cc to match the order in payment_request.h, which eases verifying correctness of their error handling. This patch prints more errors to the developer console, if available, to improve debuggability by web developers, who rarely check where LOG prints. After this patch, unexpected ordering of calls into the Payment Request IPC from the renderer to the browser on desktop will print an error in the developer console and disconnect the IPC pipes. The binary might increase slightly in size because more logs are included in the release version instead of being stripped at compile time. Bug: 912947 Change-Id: Iac2131181c64cd49b4e5ec99f4b4a8ae5d8df57a Reviewed-on: https://chromium-review.googlesource.com/c/1370198 Reviewed-by: anthonyvd <[email protected]> Commit-Queue: Rouslan Solomakhin <[email protected]> Cr-Commit-Position: refs/heads/master@{#616822} CWE ID: CWE-189
void PaymentRequest::CanMakePayment() { if (!IsInitialized()) { log_.Error("Attempted canMakePayment without initialization"); OnConnectionTerminated(); return; } // It's valid to call canMakePayment() without calling show() first. if (observer_for_testing_) observer_for_testing_->OnCanMakePaymentCalled(); if (!delegate_->GetPrefService()->GetBoolean(kCanMakePaymentEnabled) || !state_) { CanMakePaymentCallback(/*can_make_payment=*/false); } else { state_->CanMakePayment( base::BindOnce(&PaymentRequest::CanMakePaymentCallback, weak_ptr_factory_.GetWeakPtr())); } }
173,080
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long Segment::DoLoadCluster( long long& pos, long& len) { if (m_pos < 0) return DoLoadClusterUnknownSize(pos, len); long long total, avail; long status = m_pReader->Length(&total, &avail); if (status < 0) //error return status; assert((total < 0) || (avail <= total)); const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size; long long cluster_off = -1; //offset relative to start of segment long long cluster_size = -1; //size of cluster payload for (;;) { if ((total >= 0) && (m_pos >= total)) return 1; //no more clusters if ((segment_stop >= 0) && (m_pos >= segment_stop)) return 1; //no more clusters pos = m_pos; if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; const long long id = ReadUInt(m_pReader, idpos, len); if (id < 0) //error (or underflow) return static_cast<long>(id); pos += len; //consume ID if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) //error return static_cast<long>(size); pos += len; //consume length of size of element if (size == 0) //weird { m_pos = pos; continue; } const long long unknown_size = (1LL << (7 * len)) - 1; #if 0 //we must handle this to support live webm if (size == unknown_size) return E_FILE_FORMAT_INVALID; //TODO: allow this #endif if ((segment_stop >= 0) && (size != unknown_size) && ((pos + size) > segment_stop)) { return E_FILE_FORMAT_INVALID; } #if 0 //commented-out, to support incremental cluster parsing len = static_cast<long>(size); if ((pos + size) > avail) return E_BUFFER_NOT_FULL; #endif if (id == 0x0C53BB6B) //Cues ID { if (size == unknown_size) return E_FILE_FORMAT_INVALID; //TODO: liberalize if (m_pCues == NULL) { const long long element_size = (pos - idpos) + size; m_pCues = new Cues(this, pos, size, idpos, element_size); assert(m_pCues); //TODO } m_pos = pos + size; //consume payload continue; } if (id != 0x0F43B675) //Cluster ID { if (size == unknown_size) return E_FILE_FORMAT_INVALID; //TODO: liberalize m_pos = pos + size; //consume payload continue; } cluster_off = idpos - m_start; //relative pos if (size != unknown_size) cluster_size = size; break; } assert(cluster_off >= 0); //have cluster long long pos_; long len_; status = Cluster::HasBlockEntries(this, cluster_off, pos_, len_); if (status < 0) //error, or underflow { pos = pos_; len = len_; return status; } const long idx = m_clusterCount; if (m_clusterPreloadCount > 0) { assert(idx < m_clusterSize); Cluster* const pCluster = m_clusters[idx]; assert(pCluster); assert(pCluster->m_index < 0); const long long off = pCluster->GetPosition(); assert(off >= 0); if (off == cluster_off) //preloaded already { if (status == 0) //no entries found return E_FILE_FORMAT_INVALID; if (cluster_size >= 0) pos += cluster_size; else { const long long element_size = pCluster->GetElementSize(); if (element_size <= 0) return E_FILE_FORMAT_INVALID; //TODO: handle this case pos = pCluster->m_element_start + element_size; } pCluster->m_index = idx; //move from preloaded to loaded ++m_clusterCount; --m_clusterPreloadCount; m_pos = pos; //consume payload assert((segment_stop < 0) || (m_pos <= segment_stop)); return 0; //success } } if (status == 0) //no entries found { if (cluster_size < 0) return E_FILE_FORMAT_INVALID; //TODO: handle this pos += cluster_size; if ((total >= 0) && (pos >= total)) { m_pos = total; return 1; //no more clusters } if ((segment_stop >= 0) && (pos >= segment_stop)) { m_pos = segment_stop; return 1; //no more clusters } m_pos = pos; return 2; //try again } Cluster* const pCluster = Cluster::Create(this, idx, cluster_off); assert(pCluster); AppendCluster(pCluster); assert(m_clusters); assert(idx < m_clusterSize); assert(m_clusters[idx] == pCluster); if (cluster_size >= 0) { pos += cluster_size; m_pos = pos; assert((segment_stop < 0) || (m_pos <= segment_stop)); return 0; } m_pUnknownSize = pCluster; m_pos = -pos; return 0; //partial success, since we have a new cluster //// status == 0 means "no block entries found" //// pos designates start of payload //// m_pos has NOT been adjusted yet (in case we need to come back here) #if 0 if (cluster_size < 0) //unknown size { const long long payload_pos = pos; //absolute pos of cluster payload for (;;) //determine cluster size { if ((total >= 0) && (pos >= total)) break; if ((segment_stop >= 0) && (pos >= segment_stop)) break; //no more clusters if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; const long long id = ReadUInt(m_pReader, idpos, len); if (id < 0) //error (or underflow) return static_cast<long>(id); if (id == 0x0F43B675) //Cluster ID break; if (id == 0x0C53BB6B) //Cues ID break; switch (id) { case 0x20: //BlockGroup case 0x23: //Simple Block case 0x67: //TimeCode case 0x2B: //PrevSize break; default: assert(false); break; } pos += len; //consume ID (of sub-element) if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) //error return static_cast<long>(size); pos += len; //consume size field of element if (size == 0) //weird continue; const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) return E_FILE_FORMAT_INVALID; //not allowed for sub-elements if ((segment_stop >= 0) && ((pos + size) > segment_stop)) //weird return E_FILE_FORMAT_INVALID; pos += size; //consume payload of sub-element assert((segment_stop < 0) || (pos <= segment_stop)); } //determine cluster size cluster_size = pos - payload_pos; assert(cluster_size >= 0); pos = payload_pos; //reset and re-parse original cluster } if (m_clusterPreloadCount > 0) { assert(idx < m_clusterSize); Cluster* const pCluster = m_clusters[idx]; assert(pCluster); assert(pCluster->m_index < 0); const long long off = pCluster->GetPosition(); assert(off >= 0); if (off == cluster_off) //preloaded already return E_FILE_FORMAT_INVALID; //subtle } m_pos = pos + cluster_size; //consume payload assert((segment_stop < 0) || (m_pos <= segment_stop)); return 2; //try to find another cluster #endif } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
long Segment::DoLoadCluster( if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; const long long id = ReadUInt(m_pReader, idpos, len); if (id < 0) // error (or underflow) return static_cast<long>(id); pos += len; // consume ID // Read Size if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume length of size of element // pos now points to start of payload if (size == 0) { // weird m_pos = pos; continue; } const long long unknown_size = (1LL << (7 * len)) - 1; #if 0 // we must handle this to support live webm if (size == unknown_size) return E_FILE_FORMAT_INVALID; //TODO: allow this #endif if ((segment_stop >= 0) && (size != unknown_size) && ((pos + size) > segment_stop)) { return E_FILE_FORMAT_INVALID; } #if 0 // commented-out, to support incremental cluster parsing len = static_cast<long>(size); if ((pos + size) > avail) return E_BUFFER_NOT_FULL; #endif if (id == 0x0C53BB6B) { // Cues ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; // TODO: liberalize if (m_pCues == NULL) { const long long element_size = (pos - idpos) + size; m_pCues = new Cues(this, pos, size, idpos, element_size); assert(m_pCues); // TODO } m_pos = pos + size; // consume payload continue; } if (id != 0x0F43B675) { // Cluster ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; // TODO: liberalize m_pos = pos + size; // consume payload continue; } // We have a cluster. cluster_off = idpos - m_start; // relative pos if (size != unknown_size) cluster_size = size; break; } assert(cluster_off >= 0); // have cluster long long pos_; long len_; status = Cluster::HasBlockEntries(this, cluster_off, pos_, len_); if (status < 0) { // error, or underflow pos = pos_; len = len_; return status; } // status == 0 means "no block entries found" // status > 0 means "found at least one block entry" // TODO: // The issue here is that the segment increments its own // pos ptr past the most recent cluster parsed, and then // starts from there to parse the next cluster. If we // don't know the size of the current cluster, then we // must either parse its payload (as we do below), looking // for the cluster (or cues) ID to terminate the parse. // This isn't really what we want: rather, we really need // a way to create the curr cluster object immediately. // The pity is that cluster::parse can determine its own // boundary, and we largely duplicate that same logic here. // // Maybe we need to get rid of our look-ahead preloading // in source::parse??? // // As we're parsing the blocks in the curr cluster //(in cluster::parse), we should have some way to signal // to the segment that we have determined the boundary, // so it can adjust its own segment::m_pos member. // // The problem is that we're asserting in asyncreadinit, // because we adjust the pos down to the curr seek pos, // and the resulting adjusted len is > 2GB. I'm suspicious // that this is even correct, but even if it is, we can't // be loading that much data in the cache anyway. const long idx = m_clusterCount; if (m_clusterPreloadCount > 0) { assert(idx < m_clusterSize); Cluster* const pCluster = m_clusters[idx]; assert(pCluster); assert(pCluster->m_index < 0); const long long off = pCluster->GetPosition(); assert(off >= 0); if (off == cluster_off) { // preloaded already if (status == 0) // no entries found return E_FILE_FORMAT_INVALID; if (cluster_size >= 0) pos += cluster_size; else { const long long element_size = pCluster->GetElementSize(); if (element_size <= 0) return E_FILE_FORMAT_INVALID; // TODO: handle this case pos = pCluster->m_element_start + element_size; } pCluster->m_index = idx; // move from preloaded to loaded ++m_clusterCount; --m_clusterPreloadCount; m_pos = pos; // consume payload assert((segment_stop < 0) || (m_pos <= segment_stop)); return 0; // success } } if (status == 0) { // no entries found if (cluster_size < 0) return E_FILE_FORMAT_INVALID; // TODO: handle this pos += cluster_size; if ((total >= 0) && (pos >= total)) { m_pos = total; return 1; // no more clusters } if ((segment_stop >= 0) && (pos >= segment_stop)) { m_pos = segment_stop; return 1; // no more clusters } m_pos = pos; return 2; // try again } // status > 0 means we have an entry Cluster* const pCluster = Cluster::Create(this, idx, cluster_off); // element_size); assert(pCluster); AppendCluster(pCluster); assert(m_clusters); assert(idx < m_clusterSize); assert(m_clusters[idx] == pCluster); if (cluster_size >= 0) { pos += cluster_size; m_pos = pos; assert((segment_stop < 0) || (m_pos <= segment_stop)); return 0; } m_pUnknownSize = pCluster; m_pos = -pos; return 0; // partial success, since we have a new cluster //// status == 0 means "no block entries found" //// pos designates start of payload //// m_pos has NOT been adjusted yet (in case we need to come back here) #if 0 if (cluster_size < 0) { //unknown size const long long payload_pos = pos; //absolute pos of cluster payload for (;;) { //determine cluster size if ((total >= 0) && (pos >= total)) break; if ((segment_stop >= 0) && (pos >= segment_stop)) break; //no more clusters if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; const long long id = ReadUInt(m_pReader, idpos, len); if (id < 0) //error (or underflow) return static_cast<long>(id); if (id == 0x0F43B675) //Cluster ID break; if (id == 0x0C53BB6B) //Cues ID break; switch (id) { case 0x20: //BlockGroup case 0x23: //Simple Block case 0x67: //TimeCode case 0x2B: //PrevSize break; default: assert(false); break; } pos += len; //consume ID (of sub-element) if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) //error return static_cast<long>(size); pos += len; //consume size field of element if (size == 0) //weird continue; const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) return E_FILE_FORMAT_INVALID; //not allowed for sub-elements if ((segment_stop >= 0) && ((pos + size) > segment_stop)) //weird return E_FILE_FORMAT_INVALID; pos += size; //consume payload of sub-element assert((segment_stop < 0) || (pos <= segment_stop)); } //determine cluster size cluster_size = pos - payload_pos; assert(cluster_size >= 0); pos = payload_pos; //reset and re-parse original cluster } if (m_clusterPreloadCount > 0) { assert(idx < m_clusterSize); Cluster* const pCluster = m_clusters[idx]; assert(pCluster); assert(pCluster->m_index < 0); const long long off = pCluster->GetPosition(); assert(off >= 0); if (off == cluster_off) //preloaded already return E_FILE_FORMAT_INVALID; //subtle } m_pos = pos + cluster_size; //consume payload assert((segment_stop < 0) || (m_pos <= segment_stop)); return 2; //try to find another cluster #endif }
174,264
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER) { const unsigned char *cursor, *limit, *marker, *start; zval **rval_ref; limit = max; cursor = *p; if (YYCURSOR >= YYLIMIT) { return 0; } if (var_hash && cursor[0] != 'R') { var_push(var_hash, rval); } start = cursor; #line 484 "ext/standard/var_unserializer.c" { YYCTYPE yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7); yych = *YYCURSOR; switch (yych) { case 'C': case 'O': goto yy13; case 'N': goto yy5; case 'R': goto yy2; case 'S': goto yy10; case 'a': goto yy11; case 'b': goto yy6; case 'd': goto yy8; case 'i': goto yy7; case 'o': goto yy12; case 'r': goto yy4; case 's': goto yy9; case '}': goto yy14; default: goto yy16; } yy2: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: #line 835 "ext/standard/var_unserializer.re" { return 0; } #line 546 "ext/standard/var_unserializer.c" yy4: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy89; if (yych == ';') goto yy87; goto yy3; yy6: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy83; goto yy3; yy7: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy77; goto yy3; yy8: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy53; goto yy3; yy9: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy46; goto yy3; yy10: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy39; goto yy3; yy11: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy32; goto yy3; yy12: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy25; goto yy3; yy13: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy17; goto yy3; yy14: ++YYCURSOR; goto yy3; yy14: ++YYCURSOR; #line 829 "ext/standard/var_unserializer.re" { /* this is the case where we have less data than planned */ php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data"); return 0; /* not sure if it should be 0 or 1 here? */ } #line 595 "ext/standard/var_unserializer.c" yy16: yych = *++YYCURSOR; goto yy3; goto yy20; } Commit Message: CWE ID:
PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER) { const unsigned char *cursor, *limit, *marker, *start; zval **rval_ref; limit = max; cursor = *p; if (YYCURSOR >= YYLIMIT) { return 0; } if (var_hash && cursor[0] != 'R') { var_push(var_hash, rval); } start = cursor; #line 487 "ext/standard/var_unserializer.c" { YYCTYPE yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7); yych = *YYCURSOR; switch (yych) { case 'C': case 'O': goto yy13; case 'N': goto yy5; case 'R': goto yy2; case 'S': goto yy10; case 'a': goto yy11; case 'b': goto yy6; case 'd': goto yy8; case 'i': goto yy7; case 'o': goto yy12; case 'r': goto yy4; case 's': goto yy9; case '}': goto yy14; default: goto yy16; } yy2: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: #line 838 "ext/standard/var_unserializer.re" { return 0; } #line 549 "ext/standard/var_unserializer.c" yy4: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy89; if (yych == ';') goto yy87; goto yy3; yy6: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy83; goto yy3; yy7: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy77; goto yy3; yy8: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy53; goto yy3; yy9: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy46; goto yy3; yy10: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy39; goto yy3; yy11: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy32; goto yy3; yy12: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy25; goto yy3; yy13: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy17; goto yy3; yy14: ++YYCURSOR; goto yy3; yy14: ++YYCURSOR; #line 832 "ext/standard/var_unserializer.re" { /* this is the case where we have less data than planned */ php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data"); return 0; /* not sure if it should be 0 or 1 here? */ } #line 598 "ext/standard/var_unserializer.c" yy16: yych = *++YYCURSOR; goto yy3; goto yy20; }
164,892
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) { struct encrypted_key_payload *epayload = key->payload.data[0]; struct encrypted_key_payload *new_epayload; char *buf; char *new_master_desc = NULL; const char *format = NULL; size_t datalen = prep->datalen; int ret = 0; if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) return -ENOKEY; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; buf = kmalloc(datalen + 1, GFP_KERNEL); if (!buf) return -ENOMEM; buf[datalen] = 0; memcpy(buf, prep->data, datalen); ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL); if (ret < 0) goto out; ret = valid_master_desc(new_master_desc, epayload->master_desc); if (ret < 0) goto out; new_epayload = encrypted_key_alloc(key, epayload->format, new_master_desc, epayload->datalen); if (IS_ERR(new_epayload)) { ret = PTR_ERR(new_epayload); goto out; } __ekey_init(new_epayload, epayload->format, new_master_desc, epayload->datalen); memcpy(new_epayload->iv, epayload->iv, ivsize); memcpy(new_epayload->payload_data, epayload->payload_data, epayload->payload_datalen); rcu_assign_keypointer(key, new_epayload); call_rcu(&epayload->rcu, encrypted_rcu_free); out: kzfree(buf); return ret; } Commit Message: KEYS: Fix race between updating and finding a negative key Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection error into one field such that: (1) The instantiation state can be modified/read atomically. (2) The error can be accessed atomically with the state. (3) The error isn't stored unioned with the payload pointers. This deals with the problem that the state is spread over three different objects (two bits and a separate variable) and reading or updating them atomically isn't practical, given that not only can uninstantiated keys change into instantiated or rejected keys, but rejected keys can also turn into instantiated keys - and someone accessing the key might not be using any locking. The main side effect of this problem is that what was held in the payload may change, depending on the state. For instance, you might observe the key to be in the rejected state. You then read the cached error, but if the key semaphore wasn't locked, the key might've become instantiated between the two reads - and you might now have something in hand that isn't actually an error code. The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error code if the key is negatively instantiated. The key_is_instantiated() function is replaced with key_is_positive() to avoid confusion as negative keys are also 'instantiated'. Additionally, barriering is included: (1) Order payload-set before state-set during instantiation. (2) Order state-read before payload-read when using the key. Further separate barriering is necessary if RCU is being used to access the payload content after reading the payload pointers. Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data") Cc: [email protected] # v4.4+ Reported-by: Eric Biggers <[email protected]> Signed-off-by: David Howells <[email protected]> Reviewed-by: Eric Biggers <[email protected]> CWE ID: CWE-20
static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) { struct encrypted_key_payload *epayload = key->payload.data[0]; struct encrypted_key_payload *new_epayload; char *buf; char *new_master_desc = NULL; const char *format = NULL; size_t datalen = prep->datalen; int ret = 0; if (key_is_negative(key)) return -ENOKEY; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; buf = kmalloc(datalen + 1, GFP_KERNEL); if (!buf) return -ENOMEM; buf[datalen] = 0; memcpy(buf, prep->data, datalen); ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL); if (ret < 0) goto out; ret = valid_master_desc(new_master_desc, epayload->master_desc); if (ret < 0) goto out; new_epayload = encrypted_key_alloc(key, epayload->format, new_master_desc, epayload->datalen); if (IS_ERR(new_epayload)) { ret = PTR_ERR(new_epayload); goto out; } __ekey_init(new_epayload, epayload->format, new_master_desc, epayload->datalen); memcpy(new_epayload->iv, epayload->iv, ivsize); memcpy(new_epayload->payload_data, epayload->payload_data, epayload->payload_datalen); rcu_assign_keypointer(key, new_epayload); call_rcu(&epayload->rcu, encrypted_rcu_free); out: kzfree(buf); return ret; }
167,694
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int complete_emulated_mmio(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; struct kvm_mmio_fragment *frag; unsigned len; BUG_ON(!vcpu->mmio_needed); /* Complete previous fragment */ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; len = min(8u, frag->len); if (!vcpu->mmio_is_write) memcpy(frag->data, run->mmio.data, len); if (frag->len <= 8) { /* Switch to the next fragment. */ frag++; vcpu->mmio_cur_fragment++; } else { /* Go forward to the next mmio piece. */ frag->data += len; frag->gpa += len; frag->len -= len; } if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { vcpu->mmio_needed = 0; /* FIXME: return into emulator if single-stepping. */ if (vcpu->mmio_is_write) return 1; vcpu->mmio_read_completed = 1; return complete_emulated_io(vcpu); } run->exit_reason = KVM_EXIT_MMIO; run->mmio.phys_addr = frag->gpa; if (vcpu->mmio_is_write) memcpy(run->mmio.data, frag->data, min(8u, frag->len)); run->mmio.len = min(8u, frag->len); run->mmio.is_write = vcpu->mmio_is_write; vcpu->arch.complete_userspace_io = complete_emulated_mmio; return 0; } Commit Message: kvm: x86: fix emulator buffer overflow (CVE-2014-0049) The problem occurs when the guest performs a pusha with the stack address pointing to an mmio address (or an invalid guest physical address) to start with, but then extending into an ordinary guest physical address. When doing repeated emulated pushes emulator_read_write sets mmio_needed to 1 on the first one. On a later push when the stack points to regular memory, mmio_nr_fragments is set to 0, but mmio_is_needed is not set to 0. As a result, KVM exits to userspace, and then returns to complete_emulated_mmio. In complete_emulated_mmio vcpu->mmio_cur_fragment is incremented. The termination condition of vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments is never achieved. The code bounces back and fourth to userspace incrementing mmio_cur_fragment past it's buffer. If the guest does nothing else it eventually leads to a a crash on a memcpy from invalid memory address. However if a guest code can cause the vm to be destroyed in another vcpu with excellent timing, then kvm_clear_async_pf_completion_queue can be used by the guest to control the data that's pointed to by the call to cancel_work_item, which can be used to gain execution. Fixes: f78146b0f9230765c6315b2e14f56112513389ad Signed-off-by: Andrew Honig <[email protected]> Cc: [email protected] (3.5+) Signed-off-by: Paolo Bonzini <[email protected]> CWE ID: CWE-119
static int complete_emulated_mmio(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; struct kvm_mmio_fragment *frag; unsigned len; BUG_ON(!vcpu->mmio_needed); /* Complete previous fragment */ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; len = min(8u, frag->len); if (!vcpu->mmio_is_write) memcpy(frag->data, run->mmio.data, len); if (frag->len <= 8) { /* Switch to the next fragment. */ frag++; vcpu->mmio_cur_fragment++; } else { /* Go forward to the next mmio piece. */ frag->data += len; frag->gpa += len; frag->len -= len; } if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { vcpu->mmio_needed = 0; /* FIXME: return into emulator if single-stepping. */ if (vcpu->mmio_is_write) return 1; vcpu->mmio_read_completed = 1; return complete_emulated_io(vcpu); } run->exit_reason = KVM_EXIT_MMIO; run->mmio.phys_addr = frag->gpa; if (vcpu->mmio_is_write) memcpy(run->mmio.data, frag->data, min(8u, frag->len)); run->mmio.len = min(8u, frag->len); run->mmio.is_write = vcpu->mmio_is_write; vcpu->arch.complete_userspace_io = complete_emulated_mmio; return 0; }
166,466
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock_iocb *siocb = kiocb_to_siocb(iocb); struct scm_cookie tmp_scm; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); int noblock = flags & MSG_DONTWAIT; struct sk_buff *skb; int err; int peeked, skip; err = -EOPNOTSUPP; if (flags&MSG_OOB) goto out; msg->msg_namelen = 0; err = mutex_lock_interruptible(&u->readlock); if (err) { err = sock_intr_errno(sock_rcvtimeo(sk, noblock)); goto out; } skip = sk_peek_offset(sk, flags); skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err); if (!skb) { unix_state_lock(sk); /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && (sk->sk_shutdown & RCV_SHUTDOWN)) err = 0; unix_state_unlock(sk); goto out_unlock; } wake_up_interruptible_sync_poll(&u->peer_wait, POLLOUT | POLLWRNORM | POLLWRBAND); if (msg->msg_name) unix_copy_addr(msg, skb->sk); if (size > skb->len - skip) size = skb->len - skip; else if (size < skb->len - skip) msg->msg_flags |= MSG_TRUNC; err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size); if (err) goto out_free; if (sock_flag(sk, SOCK_RCVTSTAMP)) __sock_recv_timestamp(msg, sk, skb); if (!siocb->scm) { siocb->scm = &tmp_scm; memset(&tmp_scm, 0, sizeof(tmp_scm)); } scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); unix_set_secdata(siocb->scm, skb); if (!(flags & MSG_PEEK)) { if (UNIXCB(skb).fp) unix_detach_fds(siocb->scm, skb); sk_peek_offset_bwd(sk, skb->len); } else { /* It is questionable: on PEEK we could: - do not return fds - good, but too simple 8) - return fds, and do not return them on read (old strategy, apparently wrong) - clone fds (I chose it for now, it is the most universal solution) POSIX 1003.1g does not actually define this clearly at all. POSIX 1003.1g doesn't define a lot of things clearly however! */ sk_peek_offset_fwd(sk, size); if (UNIXCB(skb).fp) siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); } err = (flags & MSG_TRUNC) ? skb->len - skip : size; scm_recv(sock, msg, siocb->scm, flags); out_free: skb_free_datagram(sk, skb); out_unlock: mutex_unlock(&u->readlock); out: return err; } Commit Message: net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-20
static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock_iocb *siocb = kiocb_to_siocb(iocb); struct scm_cookie tmp_scm; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); int noblock = flags & MSG_DONTWAIT; struct sk_buff *skb; int err; int peeked, skip; err = -EOPNOTSUPP; if (flags&MSG_OOB) goto out; err = mutex_lock_interruptible(&u->readlock); if (err) { err = sock_intr_errno(sock_rcvtimeo(sk, noblock)); goto out; } skip = sk_peek_offset(sk, flags); skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err); if (!skb) { unix_state_lock(sk); /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && (sk->sk_shutdown & RCV_SHUTDOWN)) err = 0; unix_state_unlock(sk); goto out_unlock; } wake_up_interruptible_sync_poll(&u->peer_wait, POLLOUT | POLLWRNORM | POLLWRBAND); if (msg->msg_name) unix_copy_addr(msg, skb->sk); if (size > skb->len - skip) size = skb->len - skip; else if (size < skb->len - skip) msg->msg_flags |= MSG_TRUNC; err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size); if (err) goto out_free; if (sock_flag(sk, SOCK_RCVTSTAMP)) __sock_recv_timestamp(msg, sk, skb); if (!siocb->scm) { siocb->scm = &tmp_scm; memset(&tmp_scm, 0, sizeof(tmp_scm)); } scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); unix_set_secdata(siocb->scm, skb); if (!(flags & MSG_PEEK)) { if (UNIXCB(skb).fp) unix_detach_fds(siocb->scm, skb); sk_peek_offset_bwd(sk, skb->len); } else { /* It is questionable: on PEEK we could: - do not return fds - good, but too simple 8) - return fds, and do not return them on read (old strategy, apparently wrong) - clone fds (I chose it for now, it is the most universal solution) POSIX 1003.1g does not actually define this clearly at all. POSIX 1003.1g doesn't define a lot of things clearly however! */ sk_peek_offset_fwd(sk, size); if (UNIXCB(skb).fp) siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); } err = (flags & MSG_TRUNC) ? skb->len - skip : size; scm_recv(sock, msg, siocb->scm, flags); out_free: skb_free_datagram(sk, skb); out_unlock: mutex_unlock(&u->readlock); out: return err; }
166,520
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: explicit ImageDecodedHandlerWithTimeout( const base::Callback<void(const SkBitmap&)>& image_decoded_callback) : image_decoded_callback_(image_decoded_callback), weak_ptr_factory_(this) {} Commit Message: Local NTP: add smoke tests for doodles Split LogoService into LogoService interface and LogoServiceImpl to make it easier to provide fake data to the test. Bug: 768419 Cq-Include-Trybots: master.tryserver.chromium.linux:closure_compilation Change-Id: I84639189d2db1b24a2e139936c99369352bab587 Reviewed-on: https://chromium-review.googlesource.com/690198 Reviewed-by: Sylvain Defresne <[email protected]> Reviewed-by: Marc Treib <[email protected]> Commit-Queue: Chris Pickel <[email protected]> Cr-Commit-Position: refs/heads/master@{#505374} CWE ID: CWE-119
explicit ImageDecodedHandlerWithTimeout(
171,953
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int read_escaped_char( yyscan_t yyscanner, uint8_t* escaped_char) { char text[4] = {0, 0, 0, 0}; text[0] = '\\'; text[1] = RE_YY_INPUT(yyscanner); if (text[1] == EOF) return 0; if (text[1] == 'x') { text[2] = RE_YY_INPUT(yyscanner); if (text[2] == EOF) return 0; text[3] = RE_YY_INPUT(yyscanner); if (text[3] == EOF) return 0; } *escaped_char = escaped_char_value(text); return 1; } Commit Message: re_lexer: Make reading escape sequences more robust (#586) * Add test for issue #503 * re_lexer: Make reading escape sequences more robust This commit fixes parsing incomplete escape sequences at the end of a regular expression and parsing things like \xxy (invalid hex digits) which before were silently turned into (char)255. Close #503 * Update re_lexer.c CWE ID: CWE-476
int read_escaped_char( yyscan_t yyscanner, uint8_t* escaped_char) { char text[4] = {0, 0, 0, 0}; text[0] = '\\'; text[1] = RE_YY_INPUT(yyscanner); if (text[1] == EOF || text[1] == 0) return 0; if (text[1] == 'x') { text[2] = RE_YY_INPUT(yyscanner); if (!isxdigit(text[2])) return 0; text[3] = RE_YY_INPUT(yyscanner); if (!isxdigit(text[3])) return 0; } *escaped_char = escaped_char_value(text); return 1; }
168,486
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_paramhdr *param_hdr, void *chunk_end, struct sctp_paramhdr **errp) { sctp_addip_param_t *asconf_param; union sctp_params param; int length, plen; param.v = (sctp_paramhdr_t *) param_hdr; while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) { length = ntohs(param.p->length); *errp = param.p; if (param.v > chunk_end - length || length < sizeof(sctp_paramhdr_t)) return 0; switch (param.p->type) { case SCTP_PARAM_ADD_IP: case SCTP_PARAM_DEL_IP: case SCTP_PARAM_SET_PRIMARY: asconf_param = (sctp_addip_param_t *)param.v; plen = ntohs(asconf_param->param_hdr.length); if (plen < sizeof(sctp_addip_param_t) + sizeof(sctp_paramhdr_t)) return 0; break; case SCTP_PARAM_SUCCESS_REPORT: case SCTP_PARAM_ADAPTATION_LAYER_IND: if (length != sizeof(sctp_addip_param_t)) return 0; break; default: break; } param.v += WORD_ROUND(length); } if (param.v != chunk_end) return 0; return 1; } Commit Message: net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks Commit 6f4c618ddb0 ("SCTP : Add paramters validity check for ASCONF chunk") added basic verification of ASCONF chunks, however, it is still possible to remotely crash a server by sending a special crafted ASCONF chunk, even up to pre 2.6.12 kernels: skb_over_panic: text:ffffffffa01ea1c3 len:31056 put:30768 head:ffff88011bd81800 data:ffff88011bd81800 tail:0x7950 end:0x440 dev:<NULL> ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:129! [...] Call Trace: <IRQ> [<ffffffff8144fb1c>] skb_put+0x5c/0x70 [<ffffffffa01ea1c3>] sctp_addto_chunk+0x63/0xd0 [sctp] [<ffffffffa01eadaf>] sctp_process_asconf+0x1af/0x540 [sctp] [<ffffffff8152d025>] ? _read_unlock_bh+0x15/0x20 [<ffffffffa01e0038>] sctp_sf_do_asconf+0x168/0x240 [sctp] [<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp] [<ffffffff8147645d>] ? fib_rules_lookup+0xad/0xf0 [<ffffffffa01e6b22>] ? sctp_cmp_addr_exact+0x32/0x40 [sctp] [<ffffffffa01e8393>] sctp_assoc_bh_rcv+0xd3/0x180 [sctp] [<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp] [<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp] [<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter] [<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0 [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0 [<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120 [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0 [<ffffffff81496ded>] ip_local_deliver_finish+0xdd/0x2d0 [<ffffffff81497078>] ip_local_deliver+0x98/0xa0 [<ffffffff8149653d>] ip_rcv_finish+0x12d/0x440 [<ffffffff81496ac5>] ip_rcv+0x275/0x350 [<ffffffff8145c88b>] __netif_receive_skb+0x4ab/0x750 [<ffffffff81460588>] netif_receive_skb+0x58/0x60 This can be triggered e.g., through a simple scripted nmap connection scan injecting the chunk after the handshake, for example, ... -------------- INIT[ASCONF; ASCONF_ACK] -------------> <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------ -------------------- COOKIE-ECHO --------------------> <-------------------- COOKIE-ACK --------------------- ------------------ ASCONF; UNKNOWN ------------------> ... where ASCONF chunk of length 280 contains 2 parameters ... 1) Add IP address parameter (param length: 16) 2) Add/del IP address parameter (param length: 255) ... followed by an UNKNOWN chunk of e.g. 4 bytes. Here, the Address Parameter in the ASCONF chunk is even missing, too. This is just an example and similarly-crafted ASCONF chunks could be used just as well. The ASCONF chunk passes through sctp_verify_asconf() as all parameters passed sanity checks, and after walking, we ended up successfully at the chunk end boundary, and thus may invoke sctp_process_asconf(). Parameter walking is done with WORD_ROUND() to take padding into account. In sctp_process_asconf()'s TLV processing, we may fail in sctp_process_asconf_param() e.g., due to removal of the IP address that is also the source address of the packet containing the ASCONF chunk, and thus we need to add all TLVs after the failure to our ASCONF response to remote via helper function sctp_add_asconf_response(), which basically invokes a sctp_addto_chunk() adding the error parameters to the given skb. When walking to the next parameter this time, we proceed with ... length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; ... instead of the WORD_ROUND()'ed length, thus resulting here in an off-by-one that leads to reading the follow-up garbage parameter length of 12336, and thus throwing an skb_over_panic for the reply when trying to sctp_addto_chunk() next time, which implicitly calls the skb_put() with that length. Fix it by using sctp_walk_params() [ which is also used in INIT parameter processing ] macro in the verification *and* in ASCONF processing: it will make sure we don't spill over, that we walk parameters WORD_ROUND()'ed. Moreover, we're being more defensive and guard against unknown parameter types and missized addresses. Joint work with Vlad Yasevich. Fixes: b896b82be4ae ("[SCTP] ADDIP: Support for processing incoming ASCONF_ACK chunks.") Signed-off-by: Daniel Borkmann <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Acked-by: Neil Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-399
int sctp_verify_asconf(const struct sctp_association *asoc, /* Verify the ASCONF packet before we process it. */ bool sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_chunk *chunk, bool addr_param_needed, struct sctp_paramhdr **errp) { sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr; union sctp_params param; bool addr_param_seen = false; sctp_walk_params(param, addip, addip_hdr.params) { size_t length = ntohs(param.p->length); *errp = param.p; switch (param.p->type) { case SCTP_PARAM_ERR_CAUSE: break; case SCTP_PARAM_IPV4_ADDRESS: if (length != sizeof(sctp_ipv4addr_param_t)) return false; addr_param_seen = true; break; case SCTP_PARAM_IPV6_ADDRESS: if (length != sizeof(sctp_ipv6addr_param_t)) return false; addr_param_seen = true; break; case SCTP_PARAM_ADD_IP: case SCTP_PARAM_DEL_IP: case SCTP_PARAM_SET_PRIMARY: /* In ASCONF chunks, these need to be first. */ if (addr_param_needed && !addr_param_seen) return false; length = ntohs(param.addip->param_hdr.length); if (length < sizeof(sctp_addip_param_t) + sizeof(sctp_paramhdr_t)) return false; break; case SCTP_PARAM_SUCCESS_REPORT: case SCTP_PARAM_ADAPTATION_LAYER_IND: if (length != sizeof(sctp_addip_param_t)) return false; break; default: /* This is unkown to us, reject! */ return false; } } /* Remaining sanity checks. */ if (addr_param_needed && !addr_param_seen) return false; if (!addr_param_needed && addr_param_seen) return false; if (param.v != chunk->chunk_end) return false; return true; }
166,334
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void DisconnectWindowLinux::Hide() { NOTIMPLEMENTED(); } Commit Message: Initial implementation of DisconnectWindow on Linux. BUG=None TEST=Manual Review URL: http://codereview.chromium.org/7089016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@88889 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void DisconnectWindowLinux::Hide() { DCHECK(disconnect_window_); gtk_widget_hide(disconnect_window_); } gboolean DisconnectWindowLinux::OnWindowDelete(GtkWidget* widget, GdkEvent* event) { // Don't allow the window to be closed. return TRUE; } void DisconnectWindowLinux::OnDisconnectClicked(GtkButton* sender) { DCHECK(host_); host_->Shutdown(); }
170,473
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: InvalidState AXNodeObject::getInvalidState() const { const AtomicString& attributeValue = getAOMPropertyOrARIAAttribute(AOMStringProperty::kInvalid); if (equalIgnoringCase(attributeValue, "false")) return InvalidStateFalse; if (equalIgnoringCase(attributeValue, "true")) return InvalidStateTrue; if (equalIgnoringCase(attributeValue, "spelling")) return InvalidStateSpelling; if (equalIgnoringCase(attributeValue, "grammar")) return InvalidStateGrammar; if (!attributeValue.isEmpty()) return InvalidStateOther; if (getNode() && getNode()->isElementNode() && toElement(getNode())->isFormControlElement()) { HTMLFormControlElement* element = toHTMLFormControlElement(getNode()); HeapVector<Member<HTMLFormControlElement>> invalidControls; bool isInvalid = !element->checkValidity(&invalidControls, CheckValidityDispatchNoEvent); return isInvalid ? InvalidStateTrue : InvalidStateFalse; } return AXObject::getInvalidState(); } Commit Message: Switch to equalIgnoringASCIICase throughout modules/accessibility BUG=627682 Review-Url: https://codereview.chromium.org/2793913007 Cr-Commit-Position: refs/heads/master@{#461858} CWE ID: CWE-254
InvalidState AXNodeObject::getInvalidState() const { const AtomicString& attributeValue = getAOMPropertyOrARIAAttribute(AOMStringProperty::kInvalid); if (equalIgnoringASCIICase(attributeValue, "false")) return InvalidStateFalse; if (equalIgnoringASCIICase(attributeValue, "true")) return InvalidStateTrue; if (equalIgnoringASCIICase(attributeValue, "spelling")) return InvalidStateSpelling; if (equalIgnoringASCIICase(attributeValue, "grammar")) return InvalidStateGrammar; if (!attributeValue.isEmpty()) return InvalidStateOther; if (getNode() && getNode()->isElementNode() && toElement(getNode())->isFormControlElement()) { HTMLFormControlElement* element = toHTMLFormControlElement(getNode()); HeapVector<Member<HTMLFormControlElement>> invalidControls; bool isInvalid = !element->checkValidity(&invalidControls, CheckValidityDispatchNoEvent); return isInvalid ? InvalidStateTrue : InvalidStateFalse; } return AXObject::getInvalidState(); }
171,912
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int jas_matrix_resize(jas_matrix_t *matrix, int numrows, int numcols) { int size; int i; size = numrows * numcols; if (size > matrix->datasize_ || numrows > matrix->maxrows_) { return -1; } matrix->numrows_ = numrows; matrix->numcols_ = numcols; for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[numcols * i]; } return 0; } Commit Message: The generation of the configuration file jas_config.h has been completely reworked in order to avoid pollution of the global namespace. Some problematic types like uchar, ulong, and friends have been replaced with names with a jas_ prefix. An option max_samples has been added to the BMP and JPEG decoders to restrict the maximum size of image that they can decode. This change was made as a (possibly temporary) fix to address security concerns. A max_samples command-line option has also been added to imginfo. Whether an image component (for jas_image_t) is stored in memory or on disk is now based on the component size (rather than the image size). Some debug log message were added. Some new integer overflow checks were added. Some new safe integer add/multiply functions were added. More pre-C99 cruft was removed. JasPer has numerous "hacks" to handle pre-C99 compilers. JasPer now assumes C99 support. So, this pre-C99 cruft is unnecessary and can be removed. The regression jasper-doublefree-mem_close.jpg has been re-enabled. Theoretically, it should work more predictably now. CWE ID: CWE-190
int jas_matrix_resize(jas_matrix_t *matrix, int numrows, int numcols) int jas_matrix_resize(jas_matrix_t *matrix, jas_matind_t numrows, jas_matind_t numcols) { jas_matind_t size; jas_matind_t i; size = numrows * numcols; if (size > matrix->datasize_ || numrows > matrix->maxrows_) { return -1; } matrix->numrows_ = numrows; matrix->numcols_ = numcols; for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[numcols * i]; } return 0; }
168,705
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: WebGLObject::WebGLObject(WebGLRenderingContext* context) : m_object(0) , m_attachmentCount(0) , m_deleted(false) { } Commit Message: Unreviewed, build fix for unused argument warning after r104954. https://bugs.webkit.org/show_bug.cgi?id=75906 Also fixed up somebody's bad merge in Source/WebCore/ChangeLog. * html/canvas/WebGLObject.cpp: (WebCore::WebGLObject::WebGLObject): git-svn-id: svn://svn.chromium.org/blink/trunk@104959 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
WebGLObject::WebGLObject(WebGLRenderingContext* context) WebGLObject::WebGLObject(WebGLRenderingContext*) : m_object(0) , m_attachmentCount(0) , m_deleted(false) { }
170,251
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void Splash::scaleMaskYdXu(SplashImageMaskSource src, void *srcData, int srcWidth, int srcHeight, int scaledWidth, int scaledHeight, SplashBitmap *dest) { Guchar *lineBuf; Guint *pixBuf; Guint pix; Guchar *destPtr; int yp, yq, xp, xq, yt, y, yStep, xt, x, xStep, d; int i, j; yp = srcHeight / scaledHeight; lineBuf = (Guchar *)gmalloc(srcWidth); pixBuf = (Guint *)gmallocn(srcWidth, sizeof(int)); yt = 0; destPtr = dest->data; for (y = 0; y < scaledHeight; ++y) { yt = 0; destPtr = dest->data; for (y = 0; y < scaledHeight; ++y) { } memset(pixBuf, 0, srcWidth * sizeof(int)); for (i = 0; i < yStep; ++i) { (*src)(srcData, lineBuf); for (j = 0; j < srcWidth; ++j) { pixBuf[j] += lineBuf[j]; } } xt = 0; d = (255 << 23) / yStep; for (x = 0; x < srcWidth; ++x) { if ((xt += xq) >= srcWidth) { xt -= srcWidth; xStep = xp + 1; } else { xStep = xp; } pix = pixBuf[x]; pix = (pix * d) >> 23; for (i = 0; i < xStep; ++i) { *destPtr++ = (Guchar)pix; } } } gfree(pixBuf); gfree(lineBuf); } Commit Message: CWE ID: CWE-119
void Splash::scaleMaskYdXu(SplashImageMaskSource src, void *srcData, int srcWidth, int srcHeight, int scaledWidth, int scaledHeight, SplashBitmap *dest) { Guchar *lineBuf; Guint *pixBuf; Guint pix; Guchar *destPtr; int yp, yq, xp, xq, yt, y, yStep, xt, x, xStep, d; int i, j; destPtr = dest->data; if (destPtr == NULL) { error(errInternal, -1, "dest->data is NULL in Splash::scaleMaskYdXu"); return; } yp = srcHeight / scaledHeight; lineBuf = (Guchar *)gmalloc(srcWidth); pixBuf = (Guint *)gmallocn(srcWidth, sizeof(int)); yt = 0; destPtr = dest->data; for (y = 0; y < scaledHeight; ++y) { yt = 0; for (y = 0; y < scaledHeight; ++y) { } memset(pixBuf, 0, srcWidth * sizeof(int)); for (i = 0; i < yStep; ++i) { (*src)(srcData, lineBuf); for (j = 0; j < srcWidth; ++j) { pixBuf[j] += lineBuf[j]; } } xt = 0; d = (255 << 23) / yStep; for (x = 0; x < srcWidth; ++x) { if ((xt += xq) >= srcWidth) { xt -= srcWidth; xStep = xp + 1; } else { xStep = xp; } pix = pixBuf[x]; pix = (pix * d) >> 23; for (i = 0; i < xStep; ++i) { *destPtr++ = (Guchar)pix; } } } gfree(pixBuf); gfree(lineBuf); }
164,737
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool WebPageSerializer::serialize(WebFrame* frame, bool recursive, WebPageSerializerClient* client, const WebVector<WebURL>& links, const WebVector<WebString>& localPaths, const WebString& localDirectoryName) { ASSERT(frame); ASSERT(client); ASSERT(links.size() == localPaths.size()); LinkLocalPathMap m_localLinks; for (size_t i = 0; i < links.size(); i++) { KURL url = links[i]; ASSERT(!m_localLinks.contains(url.string())); m_localLinks.set(url.string(), localPaths[i]); } Vector<SerializedResource> resources; PageSerializer serializer(&resources, &m_localLinks, localDirectoryName); serializer.serialize(toWebViewImpl(frame->view())->page()); for (Vector<SerializedResource>::const_iterator iter = resources.begin(); iter != resources.end(); ++iter) { client->didSerializeDataForFrame(iter->url, WebCString(iter->data->data(), iter->data->size()), WebPageSerializerClient::CurrentFrameIsFinished); } client->didSerializeDataForFrame(KURL(), WebCString("", 0), WebPageSerializerClient::AllFramesAreFinished); return true; } Commit Message: Revert 162155 "This review merges the two existing page serializ..." Change r162155 broke the world even though it was landed using the CQ. > This review merges the two existing page serializers, WebPageSerializerImpl and > PageSerializer, into one, PageSerializer. In addition to this it moves all > the old tests from WebPageNewSerializerTest and WebPageSerializerTest to the > PageSerializerTest structure and splits out one test for MHTML into a new > MHTMLTest file. > > Saving as 'Webpage, Complete', 'Webpage, HTML Only' and as MHTML when the > 'Save Page as MHTML' flag is enabled now uses the same code, and should thus > have the same feature set. Meaning that both modes now should be a bit better. > > Detailed list of changes: > > - PageSerializerTest: Prepare for more DTD test > - PageSerializerTest: Remove now unneccesary input image test > - PageSerializerTest: Remove unused WebPageSerializer/Impl code > - PageSerializerTest: Move data URI morph test > - PageSerializerTest: Move data URI test > - PageSerializerTest: Move namespace test > - PageSerializerTest: Move SVG Image test > - MHTMLTest: Move MHTML specific test to own test file > - PageSerializerTest: Delete duplicate XML header test > - PageSerializerTest: Move blank frame test > - PageSerializerTest: Move CSS test > - PageSerializerTest: Add frameset/frame test > - PageSerializerTest: Move old iframe test > - PageSerializerTest: Move old elements test > - Use PageSerizer for saving web pages > - PageSerializerTest: Test for rewriting links > - PageSerializer: Add rewrite link accumulator > - PageSerializer: Serialize images in iframes/frames src > - PageSerializer: XHTML fix for meta tags > - PageSerializer: Add presentation CSS > - PageSerializer: Rename out parameter > > BUG= > [email protected] > > Review URL: https://codereview.chromium.org/68613003 [email protected] Review URL: https://codereview.chromium.org/73673003 git-svn-id: svn://svn.chromium.org/blink/trunk@162156 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
bool WebPageSerializer::serialize(WebFrame* frame, bool recursive, WebPageSerializerClient* client, const WebVector<WebURL>& links, const WebVector<WebString>& localPaths, const WebString& localDirectoryName) { WebPageSerializerImpl serializerImpl( frame, recursive, client, links, localPaths, localDirectoryName); return serializerImpl.serialize(); } bool WebPageSerializer::retrieveAllResources(WebView* view, const WebVector<WebCString>& supportedSchemes, WebVector<WebURL>* resourceURLs, WebVector<WebURL>* frameURLs) { WebFrameImpl* mainFrame = toWebFrameImpl(view->mainFrame()); if (!mainFrame) return false; Vector<Frame*> framesToVisit; Vector<Frame*> visitedFrames; Vector<KURL> frameKURLs; Vector<KURL> resourceKURLs; // Let's retrieve the resources from every frame in this page. framesToVisit.append(mainFrame->frame()); while (!framesToVisit.isEmpty()) { Frame* frame = framesToVisit[0]; framesToVisit.remove(0); retrieveResourcesForFrame(frame, supportedSchemes, &visitedFrames, &framesToVisit, &frameKURLs, &resourceKURLs); } // Converts the results to WebURLs. WebVector<WebURL> resultResourceURLs(resourceKURLs.size()); for (size_t i = 0; i < resourceKURLs.size(); ++i) { resultResourceURLs[i] = resourceKURLs[i]; // A frame's src can point to the same URL as another resource, keep the // resource URL only in such cases. size_t index = frameKURLs.find(resourceKURLs[i]); if (index != kNotFound) frameKURLs.remove(index); } *resourceURLs = resultResourceURLs; WebVector<WebURL> resultFrameURLs(frameKURLs.size()); for (size_t i = 0; i < frameKURLs.size(); ++i) resultFrameURLs[i] = frameKURLs[i]; *frameURLs = resultFrameURLs; return true; }
171,571
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: FileMetricsProviderTest() : create_large_files_(GetParam()), task_runner_(new base::TestSimpleTaskRunner()), thread_task_runner_handle_(task_runner_), statistics_recorder_( base::StatisticsRecorder::CreateTemporaryForTesting()), prefs_(new TestingPrefServiceSimple) { EXPECT_TRUE(temp_dir_.CreateUniqueTempDir()); FileMetricsProvider::RegisterPrefs(prefs_->registry(), kMetricsName); FileMetricsProvider::SetTaskRunnerForTesting(task_runner_); base::GlobalHistogramAllocator::GetCreateHistogramResultHistogram(); } Commit Message: Remove UMA.CreatePersistentHistogram.Result This histogram isn't showing anything meaningful and the problems it could show are better observed by looking at the allocators directly. Bug: 831013 Change-Id: Ibe968597758230192e53a7675e7390e968c9e5b9 Reviewed-on: https://chromium-review.googlesource.com/1008047 Commit-Queue: Brian White <[email protected]> Reviewed-by: Alexei Svitkine <[email protected]> Cr-Commit-Position: refs/heads/master@{#549986} CWE ID: CWE-264
FileMetricsProviderTest() : create_large_files_(GetParam()), task_runner_(new base::TestSimpleTaskRunner()), thread_task_runner_handle_(task_runner_), statistics_recorder_( base::StatisticsRecorder::CreateTemporaryForTesting()), prefs_(new TestingPrefServiceSimple) { EXPECT_TRUE(temp_dir_.CreateUniqueTempDir()); FileMetricsProvider::RegisterPrefs(prefs_->registry(), kMetricsName); FileMetricsProvider::SetTaskRunnerForTesting(task_runner_); }
172,141
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: juniper_atm1_print(netdissect_options *ndo, const struct pcap_pkthdr *h, register const u_char *p) { int llc_hdrlen; struct juniper_l2info_t l2info; l2info.pictype = DLT_JUNIPER_ATM1; if (juniper_parse_header(ndo, p, h, &l2info) == 0) return l2info.header_len; p+=l2info.header_len; if (l2info.cookie[0] == 0x80) { /* OAM cell ? */ oam_print(ndo, p, l2info.length, ATM_OAM_NOHEC); return l2info.header_len; } if (EXTRACT_24BITS(p) == 0xfefe03 || /* NLPID encaps ? */ EXTRACT_24BITS(p) == 0xaaaa03) { /* SNAP encaps ? */ llc_hdrlen = llc_print(ndo, p, l2info.length, l2info.caplen, NULL, NULL); if (llc_hdrlen > 0) return l2info.header_len; } if (p[0] == 0x03) { /* Cisco style NLPID encaps ? */ isoclns_print(ndo, p + 1, l2info.length - 1); /* FIXME check if frame was recognized */ return l2info.header_len; } if (ip_heuristic_guess(ndo, p, l2info.length) != 0) /* last try - vcmux encaps ? */ return l2info.header_len; return l2info.header_len; } Commit Message: CVE-2017-12993/Juniper: Add more bounds checks. This fixes a buffer over-read discovered by Kamil Frankowicz. Add tests using the capture files supplied by the reporter(s). CWE ID: CWE-125
juniper_atm1_print(netdissect_options *ndo, const struct pcap_pkthdr *h, register const u_char *p) { int llc_hdrlen; struct juniper_l2info_t l2info; l2info.pictype = DLT_JUNIPER_ATM1; if (juniper_parse_header(ndo, p, h, &l2info) == 0) return l2info.header_len; p+=l2info.header_len; if (l2info.cookie[0] == 0x80) { /* OAM cell ? */ oam_print(ndo, p, l2info.length, ATM_OAM_NOHEC); return l2info.header_len; } ND_TCHECK2(p[0], 3); if (EXTRACT_24BITS(p) == 0xfefe03 || /* NLPID encaps ? */ EXTRACT_24BITS(p) == 0xaaaa03) { /* SNAP encaps ? */ llc_hdrlen = llc_print(ndo, p, l2info.length, l2info.caplen, NULL, NULL); if (llc_hdrlen > 0) return l2info.header_len; } if (p[0] == 0x03) { /* Cisco style NLPID encaps ? */ isoclns_print(ndo, p + 1, l2info.length - 1); /* FIXME check if frame was recognized */ return l2info.header_len; } if (ip_heuristic_guess(ndo, p, l2info.length) != 0) /* last try - vcmux encaps ? */ return l2info.header_len; return l2info.header_len; trunc: ND_PRINT((ndo, "[|juniper_atm1]")); return l2info.header_len; }
167,914
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock) { struct grub_ext2_data *data = node->data; struct grub_ext2_inode *inode = &node->inode; int blknr = -1; unsigned int blksz = EXT2_BLOCK_SIZE (data); int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data); if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG) { #ifndef _MSC_VER char buf[EXT2_BLOCK_SIZE (data)]; #else char * buf = grub_malloc (EXT2_BLOCK_SIZE(data)); #endif struct grub_ext4_extent_header *leaf; struct grub_ext4_extent *ext; int i; leaf = grub_ext4_find_leaf (data, buf, (struct grub_ext4_extent_header *) inode->blocks.dir_blocks, fileblock); if (! leaf) { grub_error (GRUB_ERR_BAD_FS, "invalid extent"); return -1; } ext = (struct grub_ext4_extent *) (leaf + 1); for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++) { if (fileblock < grub_le_to_cpu32 (ext[i].block)) break; } if (--i >= 0) { fileblock -= grub_le_to_cpu32 (ext[i].block); if (fileblock >= grub_le_to_cpu16 (ext[i].len)) return 0; else { grub_disk_addr_t start; start = grub_le_to_cpu16 (ext[i].start_hi); start = (start << 32) + grub_le_to_cpu32 (ext[i].start); return fileblock + start; } } else { grub_error (GRUB_ERR_BAD_FS, "something wrong with extent"); return -1; } } /* Direct blocks. */ if (fileblock < INDIRECT_BLOCKS) blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]); /* Indirect. */ else if (fileblock < INDIRECT_BLOCKS + blksz / 4) { grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]); grub_free (indir); } /* Double indirect. */ else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \ * (grub_disk_addr_t)(blksz / 4 + 1)) { unsigned int perblock = blksz / 4; unsigned int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4); grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.double_indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (indir[rblock / perblock])) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[rblock % perblock]); grub_free (indir); } /* triple indirect. */ else { grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "ext2fs doesn't support triple indirect blocks"); } return blknr; } Commit Message: Fix ext2 buffer overflow in r2_sbu_grub_memmove CWE ID: CWE-787
grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock) { struct grub_ext2_data *data = node->data; struct grub_ext2_inode *inode = &node->inode; int blknr = -1; unsigned int blksz = EXT2_BLOCK_SIZE (data); int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data); if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG) { #ifndef _MSC_VER char buf[EXT2_BLOCK_SIZE (data)]; #else char * buf = grub_malloc (EXT2_BLOCK_SIZE(data)); #endif struct grub_ext4_extent_header *leaf; struct grub_ext4_extent *ext; int i; leaf = grub_ext4_find_leaf (data, buf, (struct grub_ext4_extent_header *) inode->blocks.dir_blocks, fileblock); if (! leaf) { grub_error (GRUB_ERR_BAD_FS, "invalid extent"); return -1; } ext = (struct grub_ext4_extent *) (leaf + 1); for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++) { if (fileblock < grub_le_to_cpu32 (ext[i].block)) break; } if (--i >= 0) { fileblock -= grub_le_to_cpu32 (ext[i].block); if (fileblock >= grub_le_to_cpu16 (ext[i].len)) return 0; else { grub_disk_addr_t start; start = grub_le_to_cpu16 (ext[i].start_hi); start = (start << 32) + grub_le_to_cpu32 (ext[i].start); return fileblock + start; } } else { grub_error (GRUB_ERR_BAD_FS, "something wrong with extent"); return -1; } } /* Direct blocks. */ if (fileblock < INDIRECT_BLOCKS) { blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]); /* Indirect. */ } else if (fileblock < INDIRECT_BLOCKS + blksz / 4) { grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]); grub_free (indir); } /* Double indirect. */ else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \ * (grub_disk_addr_t)(blksz / 4 + 1)) { unsigned int perblock = blksz / 4; unsigned int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4); grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.double_indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (indir[rblock / perblock])) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[rblock % perblock]); grub_free (indir); } /* triple indirect. */ else { grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "ext2fs doesn't support triple indirect blocks"); } return blknr; }
168,083
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void UserSelectionScreen::OnBeforeUserRemoved(const AccountId& account_id) { for (user_manager::UserList::iterator it = users_.begin(); it != users_.end(); ++it) { if ((*it)->GetAccountId() == account_id) { users_.erase(it); break; } } } Commit Message: cros: Check initial auth type when showing views login. Bug: 859611 Change-Id: I0298db9bbf4aed6bd40600aef2e1c5794e8cd058 Reviewed-on: https://chromium-review.googlesource.com/1123056 Reviewed-by: Xiaoyin Hu <[email protected]> Commit-Queue: Jacob Dufault <[email protected]> Cr-Commit-Position: refs/heads/master@{#572224} CWE ID:
void UserSelectionScreen::OnBeforeUserRemoved(const AccountId& account_id) { for (auto it = users_.cbegin(); it != users_.cend(); ++it) { if ((*it)->GetAccountId() == account_id) { users_.erase(it); break; } } }
172,202
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool SyncManager::ReceivedExperiment(browser_sync::Experiments* experiments) const { ReadTransaction trans(FROM_HERE, GetUserShare()); ReadNode node(&trans); if (node.InitByTagLookup(kNigoriTag) != sync_api::BaseNode::INIT_OK) { DVLOG(1) << "Couldn't find Nigori node."; return false; } bool found_experiment = false; if (node.GetNigoriSpecifics().sync_tabs()) { experiments->sync_tabs = true; found_experiment = true; } if (node.GetNigoriSpecifics().sync_tab_favicons()) { experiments->sync_tab_favicons = true; found_experiment = true; } return found_experiment; } Commit Message: [Sync] Cleanup all tab sync enabling logic now that its on by default. BUG=none TEST= Review URL: https://chromiumcodereview.appspot.com/10443046 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139462 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-362
bool SyncManager::ReceivedExperiment(browser_sync::Experiments* experiments) const { ReadTransaction trans(FROM_HERE, GetUserShare()); ReadNode node(&trans); if (node.InitByTagLookup(kNigoriTag) != sync_api::BaseNode::INIT_OK) { DVLOG(1) << "Couldn't find Nigori node."; return false; } bool found_experiment = false; if (node.GetNigoriSpecifics().sync_tab_favicons()) { experiments->sync_tab_favicons = true; found_experiment = true; } return found_experiment; }
170,796
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void command_timed_out(UNUSED_ATTR void *context) { pthread_mutex_lock(&commands_pending_response_lock); if (list_is_empty(commands_pending_response)) { LOG_ERROR("%s with no commands pending response", __func__); } else { waiting_command_t *wait_entry = list_front(commands_pending_response); pthread_mutex_unlock(&commands_pending_response_lock); LOG_ERROR("%s hci layer timeout waiting for response to a command. opcode: 0x%x", __func__, wait_entry->opcode); } LOG_ERROR("%s restarting the bluetooth process.", __func__); usleep(10000); kill(getpid(), SIGKILL); } Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release CWE ID: CWE-284
static void command_timed_out(UNUSED_ATTR void *context) { pthread_mutex_lock(&commands_pending_response_lock); if (list_is_empty(commands_pending_response)) { LOG_ERROR("%s with no commands pending response", __func__); } else { waiting_command_t *wait_entry = list_front(commands_pending_response); pthread_mutex_unlock(&commands_pending_response_lock); LOG_ERROR("%s hci layer timeout waiting for response to a command. opcode: 0x%x", __func__, wait_entry->opcode); } LOG_ERROR("%s restarting the bluetooth process.", __func__); TEMP_FAILURE_RETRY(usleep(10000)); kill(getpid(), SIGKILL); }
173,478
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); status=MagickFalse; if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0)) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(short) ReadBlobShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->matte=MagickTrue; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(layer_info,0,(size_t) number_layers* sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); layer_info[i].page.y=ReadBlobSignedLong(image); layer_info[i].page.x=ReadBlobSignedLong(image); y=ReadBlobSignedLong(image); x=ReadBlobSignedLong(image); layer_info[i].page.width=(size_t) (x-layer_info[i].page.x); layer_info[i].page.height=(size_t) (y-layer_info[i].page.y); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } (void) ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=ReadBlobSignedLong(image); layer_info[i].mask.page.x=ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); /* We read it, but don't use it... */ for (j=0; j < (ssize_t) length; j+=8) { size_t blend_source=ReadBlobLong(image); size_t blend_dest=ReadBlobLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " source(%x), dest(%x)",(unsigned int) blend_source,(unsigned int) blend_dest); } } /* Layer name. */ length=(MagickSizeType) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } Commit Message: Fix improper cast that could cause an overflow as demonstrated in #347. CWE ID: CWE-119
ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); status=MagickFalse; if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0)) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(short) ReadBlobShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->matte=MagickTrue; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(layer_info,0,(size_t) number_layers* sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); layer_info[i].page.y=ReadBlobSignedLong(image); layer_info[i].page.x=ReadBlobSignedLong(image); y=ReadBlobSignedLong(image); x=ReadBlobSignedLong(image); layer_info[i].page.width=(size_t) (x-layer_info[i].page.x); layer_info[i].page.height=(size_t) (y-layer_info[i].page.y); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } (void) ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=ReadBlobSignedLong(image); layer_info[i].mask.page.x=ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); /* We read it, but don't use it... */ for (j=0; j < (ssize_t) length; j+=8) { size_t blend_source=ReadBlobLong(image); size_t blend_dest=ReadBlobLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " source(%x), dest(%x)",(unsigned int) blend_source,(unsigned int) blend_dest); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); }
170,101
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: krb5_gss_context_time(minor_status, context_handle, time_rec) OM_uint32 *minor_status; gss_ctx_id_t context_handle; OM_uint32 *time_rec; { krb5_error_code code; krb5_gss_ctx_id_rec *ctx; krb5_timestamp now; krb5_deltat lifetime; ctx = (krb5_gss_ctx_id_rec *) context_handle; if (! ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } if ((code = krb5_timeofday(ctx->k5_context, &now))) { *minor_status = code; save_error_info(*minor_status, ctx->k5_context); return(GSS_S_FAILURE); } if ((lifetime = ctx->krb_times.endtime - now) <= 0) { *time_rec = 0; *minor_status = 0; return(GSS_S_CONTEXT_EXPIRED); } else { *time_rec = lifetime; *minor_status = 0; return(GSS_S_COMPLETE); } } Commit Message: Fix gss_process_context_token() [CVE-2014-5352] [MITKRB5-SA-2015-001] The krb5 gss_process_context_token() should not actually delete the context; that leaves the caller with a dangling pointer and no way to know that it is invalid. Instead, mark the context as terminated, and check for terminated contexts in the GSS functions which expect established contexts. Also add checks in export_sec_context and pseudo_random, and adjust t_prf.c for the pseudo_random check. ticket: 8055 (new) target_version: 1.13.1 tags: pullup CWE ID:
krb5_gss_context_time(minor_status, context_handle, time_rec) OM_uint32 *minor_status; gss_ctx_id_t context_handle; OM_uint32 *time_rec; { krb5_error_code code; krb5_gss_ctx_id_rec *ctx; krb5_timestamp now; krb5_deltat lifetime; ctx = (krb5_gss_ctx_id_rec *) context_handle; if (ctx->terminated || !ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } if ((code = krb5_timeofday(ctx->k5_context, &now))) { *minor_status = code; save_error_info(*minor_status, ctx->k5_context); return(GSS_S_FAILURE); } if ((lifetime = ctx->krb_times.endtime - now) <= 0) { *time_rec = 0; *minor_status = 0; return(GSS_S_CONTEXT_EXPIRED); } else { *time_rec = lifetime; *minor_status = 0; return(GSS_S_COMPLETE); } }
166,813
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void DiceResponseHandler::ProcessDiceSignoutHeader( const std::vector<signin::DiceResponseParams::AccountInfo>& account_infos) { VLOG(1) << "Start processing Dice signout response"; if (account_consistency_ == signin::AccountConsistencyMethod::kDiceFixAuthErrors) { return; } std::string primary_account = signin_manager_->GetAuthenticatedAccountId(); bool primary_account_signed_out = false; for (const auto& account_info : account_infos) { std::string signed_out_account = account_tracker_service_->PickAccountIdForAccount(account_info.gaia_id, account_info.email); if (signed_out_account == primary_account) { primary_account_signed_out = true; RecordDiceResponseHeader(kSignoutPrimary); RecordGaiaSignoutMetrics( (account_info.session_index == 0) ? kChromePrimaryAccountIsFirstGaiaAccount : kChromePrimaryAccountIsSecondaryGaiaAccount); if (account_consistency_ == signin::AccountConsistencyMethod::kDice) { token_service_->UpdateCredentials( primary_account, MutableProfileOAuth2TokenServiceDelegate::kInvalidRefreshToken); } else { continue; } } else { token_service_->RevokeCredentials(signed_out_account); } for (auto it = token_fetchers_.begin(); it != token_fetchers_.end(); ++it) { std::string token_fetcher_account_id = account_tracker_service_->PickAccountIdForAccount( it->get()->gaia_id(), it->get()->email()); if (token_fetcher_account_id == signed_out_account) { token_fetchers_.erase(it); break; } } } if (!primary_account_signed_out) { RecordDiceResponseHeader(kSignoutSecondary); RecordGaiaSignoutMetrics(primary_account.empty() ? kNoChromePrimaryAccount : kChromePrimaryAccountIsNotInGaiaAccounts); } } Commit Message: [signin] Add metrics to track the source for refresh token updated events This CL add a source for update and revoke credentials operations. It then surfaces the source in the chrome://signin-internals page. This CL also records the following histograms that track refresh token events: * Signin.RefreshTokenUpdated.ToValidToken.Source * Signin.RefreshTokenUpdated.ToInvalidToken.Source * Signin.RefreshTokenRevoked.Source These histograms are needed to validate the assumptions of how often tokens are revoked by the browser and the sources for the token revocations. Bug: 896182 Change-Id: I2fcab80ee8e5699708e695bc3289fa6d34859a90 Reviewed-on: https://chromium-review.googlesource.com/c/1286464 Reviewed-by: Jochen Eisinger <[email protected]> Reviewed-by: David Roger <[email protected]> Reviewed-by: Ilya Sherman <[email protected]> Commit-Queue: Mihai Sardarescu <[email protected]> Cr-Commit-Position: refs/heads/master@{#606181} CWE ID: CWE-20
void DiceResponseHandler::ProcessDiceSignoutHeader( const std::vector<signin::DiceResponseParams::AccountInfo>& account_infos) { VLOG(1) << "Start processing Dice signout response"; if (account_consistency_ == signin::AccountConsistencyMethod::kDiceFixAuthErrors) { return; } std::string primary_account = signin_manager_->GetAuthenticatedAccountId(); bool primary_account_signed_out = false; for (const auto& account_info : account_infos) { std::string signed_out_account = account_tracker_service_->PickAccountIdForAccount(account_info.gaia_id, account_info.email); if (signed_out_account == primary_account) { primary_account_signed_out = true; RecordDiceResponseHeader(kSignoutPrimary); RecordGaiaSignoutMetrics( (account_info.session_index == 0) ? kChromePrimaryAccountIsFirstGaiaAccount : kChromePrimaryAccountIsSecondaryGaiaAccount); if (account_consistency_ == signin::AccountConsistencyMethod::kDice) { token_service_->UpdateCredentials( primary_account, MutableProfileOAuth2TokenServiceDelegate::kInvalidRefreshToken, signin_metrics::SourceForRefreshTokenOperation:: kDiceResponseHandler_Signout); } else { continue; } } else { token_service_->RevokeCredentials( signed_out_account, signin_metrics::SourceForRefreshTokenOperation:: kDiceResponseHandler_Signout); } for (auto it = token_fetchers_.begin(); it != token_fetchers_.end(); ++it) { std::string token_fetcher_account_id = account_tracker_service_->PickAccountIdForAccount( it->get()->gaia_id(), it->get()->email()); if (token_fetcher_account_id == signed_out_account) { token_fetchers_.erase(it); break; } } } if (!primary_account_signed_out) { RecordDiceResponseHeader(kSignoutSecondary); RecordGaiaSignoutMetrics(primary_account.empty() ? kNoChromePrimaryAccount : kChromePrimaryAccountIsNotInGaiaAccounts); } }
172,567
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool MultibufferDataSource::DidGetOpaqueResponseViaServiceWorker() const { return url_data()->has_opaque_data(); } Commit Message: Simplify "WouldTaintOrigin" concept in media/blink Currently WebMediaPlayer has three predicates: - DidGetOpaqueResponseFromServiceWorker - HasSingleSecurityOrigin - DidPassCORSAccessCheck . These are used to determine whether the response body is available for scripts. They are known to be confusing, and actually MediaElementAudioSourceHandler::WouldTaintOrigin misuses them. This CL merges the three predicates to one, WouldTaintOrigin, to remove the confusion. Now the "response type" concept is available and we don't need a custom CORS check, so this CL removes BaseAudioContext::WouldTaintOrigin. This CL also renames URLData::has_opaque_data_ and its (direct and indirect) data accessors to match the spec. Bug: 849942, 875153 Change-Id: I6acf50169d7445c4ff614e80ac606f79ee577d2a Reviewed-on: https://chromium-review.googlesource.com/c/1238098 Reviewed-by: Fredrik Hubinette <[email protected]> Reviewed-by: Kinuko Yasuda <[email protected]> Reviewed-by: Raymond Toy <[email protected]> Commit-Queue: Yutaka Hirano <[email protected]> Cr-Commit-Position: refs/heads/master@{#598258} CWE ID: CWE-732
bool MultibufferDataSource::DidGetOpaqueResponseViaServiceWorker() const { bool MultibufferDataSource::IsCorsCrossOrigin() const { return url_data()->is_cors_cross_origin(); }
172,623
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: my_object_rec_arrays (MyObject *obj, GPtrArray *in, GPtrArray **ret, GError **error) { char **strs; GArray *ints; guint v_UINT; if (in->len != 2) { g_set_error (error, MY_OBJECT_ERROR, MY_OBJECT_ERROR_FOO, "invalid array len"); return FALSE; } strs = g_ptr_array_index (in, 0); if (!*strs || strcmp (*strs, "foo")) { g_set_error (error, MY_OBJECT_ERROR, MY_OBJECT_ERROR_FOO, "invalid string 0"); return FALSE; } strs++; if (!*strs || strcmp (*strs, "bar")) { g_set_error (error, MY_OBJECT_ERROR, MY_OBJECT_ERROR_FOO, "invalid string 1"); return FALSE; } strs++; if (*strs) { g_set_error (error, MY_OBJECT_ERROR, MY_OBJECT_ERROR_FOO, "invalid string array len in pos 0"); return FALSE; } strs = g_ptr_array_index (in, 1); if (!*strs || strcmp (*strs, "baz")) { g_set_error (error, MY_OBJECT_ERROR, MY_OBJECT_ERROR_FOO, "invalid string 0"); return FALSE; } strs++; if (!*strs || strcmp (*strs, "whee")) { g_set_error (error, MY_OBJECT_ERROR, MY_OBJECT_ERROR_FOO, "invalid string 1"); return FALSE; } strs++; if (!*strs || strcmp (*strs, "moo")) { g_set_error (error, MY_OBJECT_ERROR, MY_OBJECT_ERROR_FOO, "invalid string 2"); return FALSE; } strs++; if (*strs) { g_set_error (error, MY_OBJECT_ERROR, MY_OBJECT_ERROR_FOO, "invalid string array len in pos 1"); return FALSE; } *ret = g_ptr_array_new (); ints = g_array_new (TRUE, TRUE, sizeof (guint)); v_UINT = 10; g_array_append_val (ints, v_UINT); v_UINT = 42; g_array_append_val (ints, v_UINT); v_UINT = 27; g_array_append_val (ints, v_UINT); g_ptr_array_add (*ret, ints); ints = g_array_new (TRUE, TRUE, sizeof (guint)); v_UINT = 30; g_array_append_val (ints, v_UINT); g_ptr_array_add (*ret, ints); return TRUE; } Commit Message: CWE ID: CWE-264
my_object_rec_arrays (MyObject *obj, GPtrArray *in, GPtrArray **ret, GError **error)
165,116
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: main(int argc, char **argv) { FILE *fp = stdout; const char *file_name = NULL; int color_type = 8; /* invalid */ int bit_depth = 32; /* invalid */ unsigned int colors[5]; unsigned int filters = PNG_ALL_FILTERS; png_fixed_point gamma = 0; /* not set */ chunk_insert *head_insert = NULL; chunk_insert **insert_ptr = &head_insert; memset(colors, 0, sizeof colors); while (--argc > 0) { char *arg = *++argv; if (strcmp(arg, "--sRGB") == 0) { gamma = PNG_DEFAULT_sRGB; continue; } if (strcmp(arg, "--linear") == 0) { gamma = PNG_FP_1; continue; } if (strcmp(arg, "--1.8") == 0) { gamma = PNG_GAMMA_MAC_18; continue; } if (strcmp(arg, "--nofilters") == 0) { filters = PNG_FILTER_NONE; continue; } if (strncmp(arg, "--color=", 8) == 0) { parse_color(arg+8, colors); continue; } if (argc >= 3 && strcmp(arg, "--insert") == 0) { png_const_charp what = *++argv; png_charp param = *++argv; chunk_insert *new_insert; argc -= 2; new_insert = find_insert(what, param); if (new_insert != NULL) { *insert_ptr = new_insert; insert_ptr = &new_insert->next; } continue; } if (arg[0] == '-') { fprintf(stderr, "makepng: %s: invalid option\n", arg); exit(1); } if (strcmp(arg, "palette") == 0) { color_type = PNG_COLOR_TYPE_PALETTE; continue; } if (strncmp(arg, "gray", 4) == 0) { if (arg[4] == 0) { color_type = PNG_COLOR_TYPE_GRAY; continue; } else if (strcmp(arg+4, "a") == 0 || strcmp(arg+4, "alpha") == 0 || strcmp(arg+4, "-alpha") == 0) { color_type = PNG_COLOR_TYPE_GRAY_ALPHA; continue; } } if (strncmp(arg, "rgb", 3) == 0) { if (arg[3] == 0) { color_type = PNG_COLOR_TYPE_RGB; continue; } else if (strcmp(arg+3, "a") == 0 || strcmp(arg+3, "alpha") == 0 || strcmp(arg+3, "-alpha") == 0) { color_type = PNG_COLOR_TYPE_RGB_ALPHA; continue; } } if (color_type == 8 && isdigit(arg[0])) { color_type = atoi(arg); if (color_type < 0 || color_type > 6 || color_type == 1 || color_type == 5) { fprintf(stderr, "makepng: %s: not a valid color type\n", arg); exit(1); } continue; } if (bit_depth == 32 && isdigit(arg[0])) { bit_depth = atoi(arg); if (bit_depth <= 0 || bit_depth > 16 || (bit_depth & -bit_depth) != bit_depth) { fprintf(stderr, "makepng: %s: not a valid bit depth\n", arg); exit(1); } continue; } if (argc == 1) /* It's the file name */ { fp = fopen(arg, "wb"); if (fp == NULL) { fprintf(stderr, "%s: %s: could not open\n", arg, strerror(errno)); exit(1); } file_name = arg; continue; } fprintf(stderr, "makepng: %s: unknown argument\n", arg); exit(1); } /* argument while loop */ if (color_type == 8 || bit_depth == 32) { fprintf(stderr, "usage: makepng [--sRGB|--linear|--1.8] " "[--color=...] color-type bit-depth [file-name]\n" " Make a test PNG file, by default writes to stdout.\n"); exit(1); } /* Check the colors */ { const unsigned int lim = (color_type == PNG_COLOR_TYPE_PALETTE ? 255U : (1U<<bit_depth)-1); unsigned int i; for (i=1; i<=colors[0]; ++i) if (colors[i] > lim) { fprintf(stderr, "makepng: --color=...: %u out of range [0..%u]\n", colors[i], lim); exit(1); } } /* Restrict the filters for more speed to those we know are used for the * generated images. */ if (filters == PNG_ALL_FILTERS) { if ((color_type & PNG_COLOR_MASK_PALETTE) != 0 || bit_depth < 8) filters = PNG_FILTER_NONE; else if (color_type & PNG_COLOR_MASK_COLOR) /* rgb */ { if (bit_depth == 8) filters &= ~(PNG_FILTER_NONE | PNG_FILTER_AVG); else filters = PNG_FILTER_SUB | PNG_FILTER_PAETH; } else /* gray 8 or 16-bit */ filters &= ~PNG_FILTER_NONE; } { int ret = write_png(&file_name, fp, color_type, bit_depth, gamma, head_insert, filters, colors); if (ret != 0 && file_name != NULL) remove(file_name); return ret; } } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
main(int argc, char **argv) { FILE *fp = stdout; const char *file_name = NULL; int color_type = 8; /* invalid */ int bit_depth = 32; /* invalid */ int small = 0; /* make full size images */ int tRNS = 0; /* don't output a tRNS chunk */ unsigned int colors[5]; unsigned int filters = PNG_ALL_FILTERS; png_fixed_point gamma = 0; /* not set */ chunk_insert *head_insert = NULL; chunk_insert **insert_ptr = &head_insert; memset(colors, 0, sizeof colors); while (--argc > 0) { char *arg = *++argv; if (strcmp(arg, "--small") == 0) { small = 1; continue; } if (strcmp(arg, "--tRNS") == 0) { tRNS = 1; continue; } if (strcmp(arg, "--sRGB") == 0) { gamma = PNG_DEFAULT_sRGB; continue; } if (strcmp(arg, "--linear") == 0) { gamma = PNG_FP_1; continue; } if (strcmp(arg, "--1.8") == 0) { gamma = PNG_GAMMA_MAC_18; continue; } if (strcmp(arg, "--nofilters") == 0) { filters = PNG_FILTER_NONE; continue; } if (strncmp(arg, "--color=", 8) == 0) { parse_color(arg+8, colors); continue; } if (argc >= 3 && strcmp(arg, "--insert") == 0) { png_const_charp what = *++argv; png_charp param = *++argv; chunk_insert *new_insert; argc -= 2; new_insert = find_insert(what, param); if (new_insert != NULL) { *insert_ptr = new_insert; insert_ptr = &new_insert->next; } continue; } if (arg[0] == '-') { fprintf(stderr, "makepng: %s: invalid option\n", arg); exit(1); } if (strcmp(arg, "palette") == 0) { color_type = PNG_COLOR_TYPE_PALETTE; continue; } if (strncmp(arg, "gray", 4) == 0) { if (arg[4] == 0) { color_type = PNG_COLOR_TYPE_GRAY; continue; } else if (strcmp(arg+4, "a") == 0 || strcmp(arg+4, "alpha") == 0 || strcmp(arg+4, "-alpha") == 0) { color_type = PNG_COLOR_TYPE_GRAY_ALPHA; continue; } } if (strncmp(arg, "rgb", 3) == 0) { if (arg[3] == 0) { color_type = PNG_COLOR_TYPE_RGB; continue; } else if (strcmp(arg+3, "a") == 0 || strcmp(arg+3, "alpha") == 0 || strcmp(arg+3, "-alpha") == 0) { color_type = PNG_COLOR_TYPE_RGB_ALPHA; continue; } } if (color_type == 8 && isdigit(arg[0])) { color_type = atoi(arg); if (color_type < 0 || color_type > 6 || color_type == 1 || color_type == 5) { fprintf(stderr, "makepng: %s: not a valid color type\n", arg); exit(1); } continue; } if (bit_depth == 32 && isdigit(arg[0])) { bit_depth = atoi(arg); if (bit_depth <= 0 || bit_depth > 16 || (bit_depth & -bit_depth) != bit_depth) { fprintf(stderr, "makepng: %s: not a valid bit depth\n", arg); exit(1); } continue; } if (argc == 1) /* It's the file name */ { fp = fopen(arg, "wb"); if (fp == NULL) { fprintf(stderr, "%s: %s: could not open\n", arg, strerror(errno)); exit(1); } file_name = arg; continue; } fprintf(stderr, "makepng: %s: unknown argument\n", arg); exit(1); } /* argument while loop */ if (color_type == 8 || bit_depth == 32) { fprintf(stderr, "usage: makepng [--small] [--sRGB|--linear|--1.8] " "[--color=...] color-type bit-depth [file-name]\n" " Make a test PNG file, by default writes to stdout.\n" " Other options are available, UTSL.\n"); exit(1); } /* Check the colors */ { const unsigned int lim = (color_type == PNG_COLOR_TYPE_PALETTE ? 255U : (1U<<bit_depth)-1); unsigned int i; for (i=1; i<=colors[0]; ++i) if (colors[i] > lim) { fprintf(stderr, "makepng: --color=...: %u out of range [0..%u]\n", colors[i], lim); exit(1); } } /* small and colors are incomparible (will probably crash if both are used at * the same time!) */ if (small && colors[0] != 0) { fprintf(stderr, "makepng: --color --small: only one at a time!\n"); exit(1); } /* Restrict the filters for more speed to those we know are used for the * generated images. */ if (filters == PNG_ALL_FILTERS && !small/*small provides defaults*/) { if ((color_type & PNG_COLOR_MASK_PALETTE) != 0 || bit_depth < 8) filters = PNG_FILTER_NONE; else if (color_type & PNG_COLOR_MASK_COLOR) /* rgb */ { if (bit_depth == 8) filters &= ~(PNG_FILTER_NONE | PNG_FILTER_AVG); else filters = PNG_FILTER_SUB | PNG_FILTER_PAETH; } else /* gray 8 or 16-bit */ filters &= ~PNG_FILTER_NONE; } /* Insert standard copyright and licence text. */ { static png_const_charp copyright[] = { COPYRIGHT, /* ISO-Latin-1 */ NULL }; static png_const_charp licensing[] = { IMAGE_LICENSING, /* UTF-8 */ NULL }; chunk_insert *new_insert; new_insert = add_tEXt("Copyright", copyright); if (new_insert != NULL) { *insert_ptr = new_insert; insert_ptr = &new_insert->next; } new_insert = add_iTXt("Licensing", "en", NULL, licensing); if (new_insert != NULL) { *insert_ptr = new_insert; insert_ptr = &new_insert->next; } } { int ret = write_png(&file_name, fp, color_type, bit_depth, gamma, head_insert, filters, colors, small, tRNS); if (ret != 0 && file_name != NULL) remove(file_name); return ret; } }
173,584
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static inline int mk_vhost_fdt_open(int id, unsigned int hash, struct session_request *sr) { int i; int fd; struct vhost_fdt_hash_table *ht = NULL; struct vhost_fdt_hash_chain *hc; if (config->fdt == MK_FALSE) { return open(sr->real_path.data, sr->file_info.flags_read_only); } ht = mk_vhost_fdt_table_lookup(id, sr->host_conf); if (mk_unlikely(!ht)) { return open(sr->real_path.data, sr->file_info.flags_read_only); } /* We got the hash table, now look around the chains array */ hc = mk_vhost_fdt_chain_lookup(hash, ht); if (hc) { /* Increment the readers and return the shared FD */ hc->readers++; return hc->fd; } /* * Get here means that no entry exists in the hash table for the * requested file descriptor and hash, we must try to open the file * and register the entry in the table. */ fd = open(sr->real_path.data, sr->file_info.flags_read_only); if (fd == -1) { return -1; } /* If chains are full, just return the new FD, bad luck... */ if (ht->av_slots <= 0) { return fd; } /* Register the new entry in an available slot */ for (i = 0; i < VHOST_FDT_HASHTABLE_CHAINS; i++) { hc = &ht->chain[i]; if (hc->fd == -1) { hc->fd = fd; hc->hash = hash; hc->readers++; ht->av_slots--; sr->vhost_fdt_id = id; sr->vhost_fdt_hash = hash; return fd; } } return -1; } Commit Message: Request: new request session flag to mark those files opened by FDT This patch aims to fix a potential DDoS problem that can be caused in the server quering repetitive non-existent resources. When serving a static file, the core use Vhost FDT mechanism, but if it sends a static error page it does a direct open(2). When closing the resources for the same request it was just calling mk_vhost_close() which did not clear properly the file descriptor. This patch adds a new field on the struct session_request called 'fd_is_fdt', which contains MK_TRUE or MK_FALSE depending of how fd_file was opened. Thanks to Matthew Daley <[email protected]> for report and troubleshoot this problem. Signed-off-by: Eduardo Silva <[email protected]> CWE ID: CWE-20
static inline int mk_vhost_fdt_open(int id, unsigned int hash, struct session_request *sr) { int i; int fd; struct vhost_fdt_hash_table *ht = NULL; struct vhost_fdt_hash_chain *hc; if (config->fdt == MK_FALSE) { return open(sr->real_path.data, sr->file_info.flags_read_only); } ht = mk_vhost_fdt_table_lookup(id, sr->host_conf); if (mk_unlikely(!ht)) { return open(sr->real_path.data, sr->file_info.flags_read_only); } /* We got the hash table, now look around the chains array */ hc = mk_vhost_fdt_chain_lookup(hash, ht); if (hc) { /* Increment the readers and return the shared FD */ hc->readers++; return hc->fd; } /* * Get here means that no entry exists in the hash table for the * requested file descriptor and hash, we must try to open the file * and register the entry in the table. */ fd = open(sr->real_path.data, sr->file_info.flags_read_only); if (fd == -1) { return -1; } /* If chains are full, just return the new FD, bad luck... */ if (ht->av_slots <= 0) { return fd; } /* Register the new entry in an available slot */ for (i = 0; i < VHOST_FDT_HASHTABLE_CHAINS; i++) { hc = &ht->chain[i]; if (hc->fd == -1) { hc->fd = fd; hc->hash = hash; hc->readers++; ht->av_slots--; sr->vhost_fdt_id = id; sr->vhost_fdt_hash = hash; sr->fd_is_fdt = MK_TRUE; return fd; } } return -1; }
166,279
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadTTFImage(const ImageInfo *image_info,ExceptionInfo *exception) { char buffer[MaxTextExtent], *text; const char *Text = (char *) "abcdefghijklmnopqrstuvwxyz\n" "ABCDEFGHIJKLMNOPQRSTUVWXYZ\n" "0123456789.:,;(*!?}^)#${%^&-+@\n"; const TypeInfo *type_info; DrawInfo *draw_info; Image *image; MagickBooleanType status; PixelPacket background_color; register ssize_t i, x; register PixelPacket *q; ssize_t y; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); image->columns=800; image->rows=480; type_info=GetTypeInfo(image_info->filename,exception); if ((type_info != (const TypeInfo *) NULL) && (type_info->glyphs != (char *) NULL)) (void) CopyMagickString(image->filename,type_info->glyphs,MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Color canvas with background color */ background_color=image_info->background_color; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) *q++=background_color; if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } (void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent); (void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent); /* Prepare drawing commands */ y=20; draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->font=AcquireString(image->filename); ConcatenateString(&draw_info->primitive,"push graphic-context\n"); (void) FormatLocaleString(buffer,MaxTextExtent," viewbox 0 0 %.20g %.20g\n", (double) image->columns,(double) image->rows); ConcatenateString(&draw_info->primitive,buffer); ConcatenateString(&draw_info->primitive," font-size 18\n"); (void) FormatLocaleString(buffer,MaxTextExtent," text 10,%.20g '",(double) y); ConcatenateString(&draw_info->primitive,buffer); text=EscapeString(Text,'"'); ConcatenateString(&draw_info->primitive,text); text=DestroyString(text); (void) FormatLocaleString(buffer,MaxTextExtent,"'\n"); ConcatenateString(&draw_info->primitive,buffer); y+=20*(ssize_t) MultilineCensus((char *) Text)+20; for (i=12; i <= 72; i+=6) { y+=i+12; ConcatenateString(&draw_info->primitive," font-size 18\n"); (void) FormatLocaleString(buffer,MaxTextExtent," text 10,%.20g '%.20g'\n", (double) y,(double) i); ConcatenateString(&draw_info->primitive,buffer); (void) FormatLocaleString(buffer,MaxTextExtent," font-size %.20g\n", (double) i); ConcatenateString(&draw_info->primitive,buffer); (void) FormatLocaleString(buffer,MaxTextExtent," text 50,%.20g " "'That which does not destroy me, only makes me stronger.'\n",(double) y); ConcatenateString(&draw_info->primitive,buffer); if (i >= 24) i+=6; } ConcatenateString(&draw_info->primitive,"pop graphic-context"); (void) DrawImage(image,draw_info); /* Relinquish resources. */ draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } Commit Message: CWE ID: CWE-119
static Image *ReadTTFImage(const ImageInfo *image_info,ExceptionInfo *exception) { char buffer[MaxTextExtent], *text; const char *Text = (char *) "abcdefghijklmnopqrstuvwxyz\n" "ABCDEFGHIJKLMNOPQRSTUVWXYZ\n" "0123456789.:,;(*!?}^)#${%^&-+@\n"; const TypeInfo *type_info; DrawInfo *draw_info; Image *image; MagickBooleanType status; PixelPacket background_color; register ssize_t i, x; register PixelPacket *q; ssize_t y; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); image->columns=800; image->rows=480; type_info=GetTypeInfo(image_info->filename,exception); if ((type_info != (const TypeInfo *) NULL) && (type_info->glyphs != (char *) NULL)) (void) CopyMagickString(image->filename,type_info->glyphs,MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Color canvas with background color */ background_color=image_info->background_color; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) *q++=background_color; if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } (void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent); (void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent); /* Prepare drawing commands */ y=20; draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->font=AcquireString(image->filename); ConcatenateString(&draw_info->primitive,"push graphic-context\n"); (void) FormatLocaleString(buffer,MaxTextExtent," viewbox 0 0 %.20g %.20g\n", (double) image->columns,(double) image->rows); ConcatenateString(&draw_info->primitive,buffer); ConcatenateString(&draw_info->primitive," font-size 18\n"); (void) FormatLocaleString(buffer,MaxTextExtent," text 10,%.20g '",(double) y); ConcatenateString(&draw_info->primitive,buffer); text=EscapeString(Text,'"'); ConcatenateString(&draw_info->primitive,text); text=DestroyString(text); (void) FormatLocaleString(buffer,MaxTextExtent,"'\n"); ConcatenateString(&draw_info->primitive,buffer); y+=20*(ssize_t) MultilineCensus((char *) Text)+20; for (i=12; i <= 72; i+=6) { y+=i+12; ConcatenateString(&draw_info->primitive," font-size 18\n"); (void) FormatLocaleString(buffer,MaxTextExtent," text 10,%.20g '%.20g'\n", (double) y,(double) i); ConcatenateString(&draw_info->primitive,buffer); (void) FormatLocaleString(buffer,MaxTextExtent," font-size %.20g\n", (double) i); ConcatenateString(&draw_info->primitive,buffer); (void) FormatLocaleString(buffer,MaxTextExtent," text 50,%.20g " "'That which does not destroy me, only makes me stronger.'\n",(double) y); ConcatenateString(&draw_info->primitive,buffer); if (i >= 24) i+=6; } ConcatenateString(&draw_info->primitive,"pop graphic-context"); (void) DrawImage(image,draw_info); /* Relinquish resources. */ draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
168,612
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void *Type_MLU_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag) { cmsMLU* mlu; cmsUInt32Number Count, RecLen, NumOfWchar; cmsUInt32Number SizeOfHeader; cmsUInt32Number Len, Offset; cmsUInt32Number i; wchar_t* Block; cmsUInt32Number BeginOfThisString, EndOfThisString, LargestPosition; *nItems = 0; if (!_cmsReadUInt32Number(io, &Count)) return NULL; if (!_cmsReadUInt32Number(io, &RecLen)) return NULL; if (RecLen != 12) { cmsSignalError(self->ContextID, cmsERROR_UNKNOWN_EXTENSION, "multiLocalizedUnicodeType of len != 12 is not supported."); return NULL; } mlu = cmsMLUalloc(self ->ContextID, Count); if (mlu == NULL) return NULL; mlu ->UsedEntries = Count; SizeOfHeader = 12 * Count + sizeof(_cmsTagBase); LargestPosition = 0; for (i=0; i < Count; i++) { if (!_cmsReadUInt16Number(io, &mlu ->Entries[i].Language)) goto Error; if (!_cmsReadUInt16Number(io, &mlu ->Entries[i].Country)) goto Error; if (!_cmsReadUInt32Number(io, &Len)) goto Error; if (!_cmsReadUInt32Number(io, &Offset)) goto Error; if (Offset < (SizeOfHeader + 8)) goto Error; BeginOfThisString = Offset - SizeOfHeader - 8; mlu ->Entries[i].Len = (Len * sizeof(wchar_t)) / sizeof(cmsUInt16Number); mlu ->Entries[i].StrW = (BeginOfThisString * sizeof(wchar_t)) / sizeof(cmsUInt16Number); EndOfThisString = BeginOfThisString + Len; if (EndOfThisString > LargestPosition) LargestPosition = EndOfThisString; } SizeOfTag = (LargestPosition * sizeof(wchar_t)) / sizeof(cmsUInt16Number); if (SizeOfTag == 0) { Block = NULL; NumOfWchar = 0; } else { Block = (wchar_t*) _cmsMalloc(self ->ContextID, SizeOfTag); if (Block == NULL) goto Error; NumOfWchar = SizeOfTag / sizeof(wchar_t); if (!_cmsReadWCharArray(io, NumOfWchar, Block)) goto Error; } mlu ->MemPool = Block; mlu ->PoolSize = SizeOfTag; mlu ->PoolUsed = SizeOfTag; *nItems = 1; return (void*) mlu; Error: if (mlu) cmsMLUfree(mlu); return NULL; } Commit Message: Added an extra check to MLU bounds Thanks to Ibrahim el-sayed for spotting the bug CWE ID: CWE-125
void *Type_MLU_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag) { cmsMLU* mlu; cmsUInt32Number Count, RecLen, NumOfWchar; cmsUInt32Number SizeOfHeader; cmsUInt32Number Len, Offset; cmsUInt32Number i; wchar_t* Block; cmsUInt32Number BeginOfThisString, EndOfThisString, LargestPosition; *nItems = 0; if (!_cmsReadUInt32Number(io, &Count)) return NULL; if (!_cmsReadUInt32Number(io, &RecLen)) return NULL; if (RecLen != 12) { cmsSignalError(self->ContextID, cmsERROR_UNKNOWN_EXTENSION, "multiLocalizedUnicodeType of len != 12 is not supported."); return NULL; } mlu = cmsMLUalloc(self ->ContextID, Count); if (mlu == NULL) return NULL; mlu ->UsedEntries = Count; SizeOfHeader = 12 * Count + sizeof(_cmsTagBase); LargestPosition = 0; for (i=0; i < Count; i++) { if (!_cmsReadUInt16Number(io, &mlu ->Entries[i].Language)) goto Error; if (!_cmsReadUInt16Number(io, &mlu ->Entries[i].Country)) goto Error; if (!_cmsReadUInt32Number(io, &Len)) goto Error; if (!_cmsReadUInt32Number(io, &Offset)) goto Error; if (Offset < (SizeOfHeader + 8)) goto Error; if ((Offset + Len) > SizeOfTag + 8) goto Error; BeginOfThisString = Offset - SizeOfHeader - 8; mlu ->Entries[i].Len = (Len * sizeof(wchar_t)) / sizeof(cmsUInt16Number); mlu ->Entries[i].StrW = (BeginOfThisString * sizeof(wchar_t)) / sizeof(cmsUInt16Number); EndOfThisString = BeginOfThisString + Len; if (EndOfThisString > LargestPosition) LargestPosition = EndOfThisString; } SizeOfTag = (LargestPosition * sizeof(wchar_t)) / sizeof(cmsUInt16Number); if (SizeOfTag == 0) { Block = NULL; NumOfWchar = 0; } else { Block = (wchar_t*) _cmsMalloc(self ->ContextID, SizeOfTag); if (Block == NULL) goto Error; NumOfWchar = SizeOfTag / sizeof(wchar_t); if (!_cmsReadWCharArray(io, NumOfWchar, Block)) goto Error; } mlu ->MemPool = Block; mlu ->PoolSize = SizeOfTag; mlu ->PoolUsed = SizeOfTag; *nItems = 1; return (void*) mlu; Error: if (mlu) cmsMLUfree(mlu); return NULL; }
168,512
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: v8::Handle<v8::Value> V8DataView::setUint8Callback(const v8::Arguments& args) { INC_STATS("DOM.DataView.setUint8"); if (args.Length() < 2) return V8Proxy::throwNotEnoughArgumentsError(); DataView* imp = V8DataView::toNative(args.Holder()); ExceptionCode ec = 0; EXCEPTION_BLOCK(unsigned, byteOffset, toUInt32(args[0])); EXCEPTION_BLOCK(int, value, toInt32(args[1])); imp->setUint8(byteOffset, static_cast<uint8_t>(value), ec); if (UNLIKELY(ec)) V8Proxy::setDOMException(ec, args.GetIsolate()); return v8::Handle<v8::Value>(); } Commit Message: [V8] Pass Isolate to throwNotEnoughArgumentsError() https://bugs.webkit.org/show_bug.cgi?id=86983 Reviewed by Adam Barth. The objective is to pass Isolate around in V8 bindings. This patch passes Isolate to throwNotEnoughArgumentsError(). No tests. No change in behavior. * bindings/scripts/CodeGeneratorV8.pm: (GenerateArgumentsCountCheck): (GenerateEventConstructorCallback): * bindings/scripts/test/V8/V8Float64Array.cpp: (WebCore::Float64ArrayV8Internal::fooCallback): * bindings/scripts/test/V8/V8TestActiveDOMObject.cpp: (WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback): (WebCore::TestActiveDOMObjectV8Internal::postMessageCallback): * bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp: (WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback): * bindings/scripts/test/V8/V8TestEventConstructor.cpp: (WebCore::V8TestEventConstructor::constructorCallback): * bindings/scripts/test/V8/V8TestEventTarget.cpp: (WebCore::TestEventTargetV8Internal::itemCallback): (WebCore::TestEventTargetV8Internal::dispatchEventCallback): * bindings/scripts/test/V8/V8TestInterface.cpp: (WebCore::TestInterfaceV8Internal::supplementalMethod2Callback): (WebCore::V8TestInterface::constructorCallback): * bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp: (WebCore::TestMediaQueryListListenerV8Internal::methodCallback): * bindings/scripts/test/V8/V8TestNamedConstructor.cpp: (WebCore::V8TestNamedConstructorConstructorCallback): * bindings/scripts/test/V8/V8TestObj.cpp: (WebCore::TestObjV8Internal::voidMethodWithArgsCallback): (WebCore::TestObjV8Internal::intMethodWithArgsCallback): (WebCore::TestObjV8Internal::objMethodWithArgsCallback): (WebCore::TestObjV8Internal::methodWithSequenceArgCallback): (WebCore::TestObjV8Internal::methodReturningSequenceCallback): (WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback): (WebCore::TestObjV8Internal::serializedValueCallback): (WebCore::TestObjV8Internal::idbKeyCallback): (WebCore::TestObjV8Internal::optionsObjectCallback): (WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback): (WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback): (WebCore::TestObjV8Internal::methodWithCallbackArgCallback): (WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback): (WebCore::TestObjV8Internal::overloadedMethod1Callback): (WebCore::TestObjV8Internal::overloadedMethod2Callback): (WebCore::TestObjV8Internal::overloadedMethod3Callback): (WebCore::TestObjV8Internal::overloadedMethod4Callback): (WebCore::TestObjV8Internal::overloadedMethod5Callback): (WebCore::TestObjV8Internal::overloadedMethod6Callback): (WebCore::TestObjV8Internal::overloadedMethod7Callback): (WebCore::TestObjV8Internal::overloadedMethod11Callback): (WebCore::TestObjV8Internal::overloadedMethod12Callback): (WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback): (WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback): (WebCore::TestObjV8Internal::convert1Callback): (WebCore::TestObjV8Internal::convert2Callback): (WebCore::TestObjV8Internal::convert3Callback): (WebCore::TestObjV8Internal::convert4Callback): (WebCore::TestObjV8Internal::convert5Callback): (WebCore::TestObjV8Internal::strictFunctionCallback): (WebCore::V8TestObj::constructorCallback): * bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp: (WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback): (WebCore::V8TestSerializedScriptValueInterface::constructorCallback): * bindings/v8/ScriptController.cpp: (WebCore::setValueAndClosePopupCallback): * bindings/v8/V8Proxy.cpp: (WebCore::V8Proxy::throwNotEnoughArgumentsError): * bindings/v8/V8Proxy.h: (V8Proxy): * bindings/v8/custom/V8AudioContextCustom.cpp: (WebCore::V8AudioContext::constructorCallback): * bindings/v8/custom/V8DataViewCustom.cpp: (WebCore::V8DataView::getInt8Callback): (WebCore::V8DataView::getUint8Callback): (WebCore::V8DataView::setInt8Callback): (WebCore::V8DataView::setUint8Callback): * bindings/v8/custom/V8DirectoryEntryCustom.cpp: (WebCore::V8DirectoryEntry::getDirectoryCallback): (WebCore::V8DirectoryEntry::getFileCallback): * bindings/v8/custom/V8IntentConstructor.cpp: (WebCore::V8Intent::constructorCallback): * bindings/v8/custom/V8SVGLengthCustom.cpp: (WebCore::V8SVGLength::convertToSpecifiedUnitsCallback): * bindings/v8/custom/V8WebGLRenderingContextCustom.cpp: (WebCore::getObjectParameter): (WebCore::V8WebGLRenderingContext::getAttachedShadersCallback): (WebCore::V8WebGLRenderingContext::getExtensionCallback): (WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback): (WebCore::V8WebGLRenderingContext::getParameterCallback): (WebCore::V8WebGLRenderingContext::getProgramParameterCallback): (WebCore::V8WebGLRenderingContext::getShaderParameterCallback): (WebCore::V8WebGLRenderingContext::getUniformCallback): (WebCore::vertexAttribAndUniformHelperf): (WebCore::uniformHelperi): (WebCore::uniformMatrixHelper): * bindings/v8/custom/V8WebKitMutationObserverCustom.cpp: (WebCore::V8WebKitMutationObserver::constructorCallback): (WebCore::V8WebKitMutationObserver::observeCallback): * bindings/v8/custom/V8WebSocketCustom.cpp: (WebCore::V8WebSocket::constructorCallback): (WebCore::V8WebSocket::sendCallback): * bindings/v8/custom/V8XMLHttpRequestCustom.cpp: (WebCore::V8XMLHttpRequest::openCallback): git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID:
v8::Handle<v8::Value> V8DataView::setUint8Callback(const v8::Arguments& args) { INC_STATS("DOM.DataView.setUint8"); if (args.Length() < 2) return V8Proxy::throwNotEnoughArgumentsError(args.GetIsolate()); DataView* imp = V8DataView::toNative(args.Holder()); ExceptionCode ec = 0; EXCEPTION_BLOCK(unsigned, byteOffset, toUInt32(args[0])); EXCEPTION_BLOCK(int, value, toInt32(args[1])); imp->setUint8(byteOffset, static_cast<uint8_t>(value), ec); if (UNLIKELY(ec)) V8Proxy::setDOMException(ec, args.GetIsolate()); return v8::Handle<v8::Value>(); }
171,115
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ssize_t NuPlayer::NuPlayerStreamListener::read( void *data, size_t size, sp<AMessage> *extra) { CHECK_GT(size, 0u); extra->clear(); Mutex::Autolock autoLock(mLock); if (mEOS) { return 0; } if (mQueue.empty()) { mSendDataNotification = true; return -EWOULDBLOCK; } QueueEntry *entry = &*mQueue.begin(); if (entry->mIsCommand) { switch (entry->mCommand) { case EOS: { mQueue.erase(mQueue.begin()); entry = NULL; mEOS = true; return 0; } case DISCONTINUITY: { *extra = entry->mExtra; mQueue.erase(mQueue.begin()); entry = NULL; return INFO_DISCONTINUITY; } default: TRESPASS(); break; } } size_t copy = entry->mSize; if (copy > size) { copy = size; } memcpy(data, (const uint8_t *)mBuffers.editItemAt(entry->mIndex)->pointer() + entry->mOffset, copy); entry->mOffset += copy; entry->mSize -= copy; if (entry->mSize == 0) { mSource->onBufferAvailable(entry->mIndex); mQueue.erase(mQueue.begin()); entry = NULL; } return copy; } Commit Message: NuPlayerStreamListener: NULL and bounds check before memcpy Bug: 27533704 Change-Id: I992a7709b92b1cbc3114c97bec48a3fc5b22ba6e CWE ID: CWE-264
ssize_t NuPlayer::NuPlayerStreamListener::read( void *data, size_t size, sp<AMessage> *extra) { CHECK_GT(size, 0u); extra->clear(); Mutex::Autolock autoLock(mLock); if (mEOS) { return 0; } if (mQueue.empty()) { mSendDataNotification = true; return -EWOULDBLOCK; } QueueEntry *entry = &*mQueue.begin(); if (entry->mIsCommand) { switch (entry->mCommand) { case EOS: { mQueue.erase(mQueue.begin()); entry = NULL; mEOS = true; return 0; } case DISCONTINUITY: { *extra = entry->mExtra; mQueue.erase(mQueue.begin()); entry = NULL; return INFO_DISCONTINUITY; } default: TRESPASS(); break; } } size_t copy = entry->mSize; if (copy > size) { copy = size; } if (entry->mIndex >= mBuffers.size()) { return ERROR_MALFORMED; } sp<IMemory> mem = mBuffers.editItemAt(entry->mIndex); if (mem == NULL || mem->size() < copy || mem->size() - copy < entry->mOffset) { return ERROR_MALFORMED; } memcpy(data, (const uint8_t *)mem->pointer() + entry->mOffset, copy); entry->mOffset += copy; entry->mSize -= copy; if (entry->mSize == 0) { mSource->onBufferAvailable(entry->mIndex); mQueue.erase(mQueue.begin()); entry = NULL; } return copy; }
173,884
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void RenderWidgetHostViewAura::AcceleratedSurfaceRelease( uint64 surface_handle) { DCHECK(image_transport_clients_.find(surface_handle) != image_transport_clients_.end()); if (current_surface_ == surface_handle) { current_surface_ = 0; UpdateExternalTexture(); } image_transport_clients_.erase(surface_handle); } Commit Message: Implement TextureImageTransportSurface using texture mailbox This has a couple of advantages: - allow tearing down and recreating the UI parent context without losing the renderer contexts - do not require a context to be able to generate textures when creating the GLSurfaceHandle - clearer ownership semantics that potentially allows for more robust and easier lost context handling/thumbnailing/etc., since a texture is at any given time owned by either: UI parent, mailbox, or TextureImageTransportSurface - simplify frontbuffer protection logic; the frontbuffer textures are now owned by RWHV where they are refcounted The TextureImageTransportSurface informs RenderWidgetHostView of the mailbox names for the front- and backbuffer textures by associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message. During SwapBuffers() or PostSubBuffer() cycles, it then uses produceTextureCHROMIUM() and consumeTextureCHROMIUM() to transfer ownership between renderer and browser compositor. RWHV sends back the surface_handle of the buffer being returned with the Swap ACK (or 0 if no buffer is being returned in which case TextureImageTransportSurface will allocate a new texture - note that this could be used to simply keep textures for thumbnailing). BUG=154815,139616 [email protected] Review URL: https://chromiumcodereview.appspot.com/11194042 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
void RenderWidgetHostViewAura::AcceleratedSurfaceRelease( void RenderWidgetHostViewAura::AcceleratedSurfaceRelease() { // This really tells us to release the frontbuffer. if (current_surface_ && ShouldReleaseFrontSurface()) { ui::Compositor* compositor = GetCompositor(); if (compositor) { // We need to wait for a commit to clear to guarantee that all we // will not issue any more GL referencing the previous surface. can_lock_compositor_ = NO_PENDING_COMMIT; scoped_refptr<ui::Texture> surface_ref = image_transport_clients_[current_surface_]; on_compositing_did_commit_callbacks_.push_back( base::Bind(&RenderWidgetHostViewAura:: SetSurfaceNotInUseByCompositor, AsWeakPtr(), surface_ref)); if (!compositor->HasObserver(this)) compositor->AddObserver(this); } image_transport_clients_.erase(current_surface_); current_surface_ = 0; UpdateExternalTexture(); } }
171,375
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void perf_remove_from_owner(struct perf_event *event) { struct task_struct *owner; rcu_read_lock(); owner = ACCESS_ONCE(event->owner); /* * Matches the smp_wmb() in perf_event_exit_task(). If we observe * !owner it means the list deletion is complete and we can indeed * free this event, otherwise we need to serialize on * owner->perf_event_mutex. */ smp_read_barrier_depends(); if (owner) { /* * Since delayed_put_task_struct() also drops the last * task reference we can safely take a new reference * while holding the rcu_read_lock(). */ get_task_struct(owner); } rcu_read_unlock(); if (owner) { mutex_lock(&owner->perf_event_mutex); /* * We have to re-check the event->owner field, if it is cleared * we raced with perf_event_exit_task(), acquiring the mutex * ensured they're done, and we can proceed with freeing the * event. */ if (event->owner) list_del_init(&event->owner_entry); mutex_unlock(&owner->perf_event_mutex); put_task_struct(owner); } } Commit Message: perf: Fix event->ctx locking There have been a few reported issues wrt. the lack of locking around changing event->ctx. This patch tries to address those. It avoids the whole rwsem thing; and while it appears to work, please give it some thought in review. What I did fail at is sensible runtime checks on the use of event->ctx, the RCU use makes it very hard. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Linus Torvalds <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]> CWE ID: CWE-264
static void perf_remove_from_owner(struct perf_event *event) { struct task_struct *owner; rcu_read_lock(); owner = ACCESS_ONCE(event->owner); /* * Matches the smp_wmb() in perf_event_exit_task(). If we observe * !owner it means the list deletion is complete and we can indeed * free this event, otherwise we need to serialize on * owner->perf_event_mutex. */ smp_read_barrier_depends(); if (owner) { /* * Since delayed_put_task_struct() also drops the last * task reference we can safely take a new reference * while holding the rcu_read_lock(). */ get_task_struct(owner); } rcu_read_unlock(); if (owner) { /* * If we're here through perf_event_exit_task() we're already * holding ctx->mutex which would be an inversion wrt. the * normal lock order. * * However we can safely take this lock because its the child * ctx->mutex. */ mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); /* * We have to re-check the event->owner field, if it is cleared * we raced with perf_event_exit_task(), acquiring the mutex * ensured they're done, and we can proceed with freeing the * event. */ if (event->owner) list_del_init(&event->owner_entry); mutex_unlock(&owner->perf_event_mutex); put_task_struct(owner); } }
166,994
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ContainerNode::notifyNodeInsertedInternal(Node& root, NodeVector& postInsertionNotificationTargets) { EventDispatchForbiddenScope assertNoEventDispatch; ScriptForbiddenScope forbidScript; for (Node& node : NodeTraversal::inclusiveDescendantsOf(root)) { if (!inDocument() && !node.isContainerNode()) continue; if (Node::InsertionShouldCallDidNotifySubtreeInsertions == node.insertedInto(this)) postInsertionNotificationTargets.append(&node); for (ShadowRoot* shadowRoot = node.youngestShadowRoot(); shadowRoot; shadowRoot = shadowRoot->olderShadowRoot()) notifyNodeInsertedInternal(*shadowRoot, postInsertionNotificationTargets); } } Commit Message: Fix an optimisation in ContainerNode::notifyNodeInsertedInternal [email protected] BUG=544020 Review URL: https://codereview.chromium.org/1420653003 Cr-Commit-Position: refs/heads/master@{#355240} CWE ID:
void ContainerNode::notifyNodeInsertedInternal(Node& root, NodeVector& postInsertionNotificationTargets) { EventDispatchForbiddenScope assertNoEventDispatch; ScriptForbiddenScope forbidScript; for (Node& node : NodeTraversal::inclusiveDescendantsOf(root)) { // into detached subtrees that are not in a shadow tree. if (!inDocument() && !isInShadowTree() && !node.isContainerNode()) continue; if (Node::InsertionShouldCallDidNotifySubtreeInsertions == node.insertedInto(this)) postInsertionNotificationTargets.append(&node); for (ShadowRoot* shadowRoot = node.youngestShadowRoot(); shadowRoot; shadowRoot = shadowRoot->olderShadowRoot()) notifyNodeInsertedInternal(*shadowRoot, postInsertionNotificationTargets); } }
171,772
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: mm_zalloc(struct mm_master *mm, u_int ncount, u_int size) { if (size == 0 || ncount == 0 || ncount > SIZE_MAX / size) fatal("%s: mm_zalloc(%u, %u)", __func__, ncount, size); return mm_malloc(mm, size * ncount); } Commit Message: Remove support for pre-authentication compression. Doing compression early in the protocol probably seemed reasonable in the 1990s, but today it's clearly a bad idea in terms of both cryptography (cf. multiple compression oracle attacks in TLS) and attack surface. Moreover, to support it across privilege-separation zlib needed the assistance of a complex shared-memory manager that made the required attack surface considerably larger. Prompted by Guido Vranken pointing out a compiler-elided security check in the shared memory manager found by Stack (http://css.csail.mit.edu/stack/); ok deraadt@ markus@ NB. pre-auth authentication has been disabled by default in sshd for >10 years. CWE ID: CWE-119
mm_zalloc(struct mm_master *mm, u_int ncount, u_int size)
168,647