instruction
stringclasses 1
value | input
stringlengths 222
112k
| output
stringlengths 21
113k
| __index_level_0__
int64 15
30k
|
---|---|---|---|
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void HTMLElement::setInnerHTML(const String& html, ExceptionCode& ec)
{
RefPtr<DocumentFragment> fragment = createFragmentFromSource(html, this, ec);
if (fragment)
replaceChildrenWithFragment(this, fragment.release(), ec);
}
Commit Message: There are too many poorly named functions to create a fragment from markup
https://bugs.webkit.org/show_bug.cgi?id=87339
Reviewed by Eric Seidel.
Source/WebCore:
Moved all functions that create a fragment from markup to markup.h/cpp.
There should be no behavioral change.
* dom/Range.cpp:
(WebCore::Range::createContextualFragment):
* dom/Range.h: Removed createDocumentFragmentForElement.
* dom/ShadowRoot.cpp:
(WebCore::ShadowRoot::setInnerHTML):
* editing/markup.cpp:
(WebCore::createFragmentFromMarkup):
(WebCore::createFragmentForInnerOuterHTML): Renamed from createFragmentFromSource.
(WebCore::createFragmentForTransformToFragment): Moved from XSLTProcessor.
(WebCore::removeElementPreservingChildren): Moved from Range.
(WebCore::createContextualFragment): Ditto.
* editing/markup.h:
* html/HTMLElement.cpp:
(WebCore::HTMLElement::setInnerHTML):
(WebCore::HTMLElement::setOuterHTML):
(WebCore::HTMLElement::insertAdjacentHTML):
* inspector/DOMPatchSupport.cpp:
(WebCore::DOMPatchSupport::patchNode): Added a FIXME since this code should be using
one of the functions listed in markup.h
* xml/XSLTProcessor.cpp:
(WebCore::XSLTProcessor::transformToFragment):
Source/WebKit/qt:
Replace calls to Range::createDocumentFragmentForElement by calls to
createContextualDocumentFragment.
* Api/qwebelement.cpp:
(QWebElement::appendInside):
(QWebElement::prependInside):
(QWebElement::prependOutside):
(QWebElement::appendOutside):
(QWebElement::encloseContentsWith):
(QWebElement::encloseWith):
git-svn-id: svn://svn.chromium.org/blink/trunk@118414 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-264 | void HTMLElement::setInnerHTML(const String& html, ExceptionCode& ec)
{
if (RefPtr<DocumentFragment> fragment = createFragmentForInnerOuterHTML(html, this, ec))
replaceChildrenWithFragment(this, fragment.release(), ec);
}
| 6,744 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int wasm_dis(WasmOp *op, const unsigned char *buf, int buf_len) {
op->len = 1;
op->op = buf[0];
if (op->op > 0xbf) {
return 1;
}
WasmOpDef *opdef = &opcodes[op->op];
switch (op->op) {
case WASM_OP_TRAP:
case WASM_OP_NOP:
case WASM_OP_ELSE:
case WASM_OP_RETURN:
case WASM_OP_DROP:
case WASM_OP_SELECT:
case WASM_OP_I32EQZ:
case WASM_OP_I32EQ:
case WASM_OP_I32NE:
case WASM_OP_I32LTS:
case WASM_OP_I32LTU:
case WASM_OP_I32GTS:
case WASM_OP_I32GTU:
case WASM_OP_I32LES:
case WASM_OP_I32LEU:
case WASM_OP_I32GES:
case WASM_OP_I32GEU:
case WASM_OP_I64EQZ:
case WASM_OP_I64EQ:
case WASM_OP_I64NE:
case WASM_OP_I64LTS:
case WASM_OP_I64LTU:
case WASM_OP_I64GTS:
case WASM_OP_I64GTU:
case WASM_OP_I64LES:
case WASM_OP_I64LEU:
case WASM_OP_I64GES:
case WASM_OP_I64GEU:
case WASM_OP_F32EQ:
case WASM_OP_F32NE:
case WASM_OP_F32LT:
case WASM_OP_F32GT:
case WASM_OP_F32LE:
case WASM_OP_F32GE:
case WASM_OP_F64EQ:
case WASM_OP_F64NE:
case WASM_OP_F64LT:
case WASM_OP_F64GT:
case WASM_OP_F64LE:
case WASM_OP_F64GE:
case WASM_OP_I32CLZ:
case WASM_OP_I32CTZ:
case WASM_OP_I32POPCNT:
case WASM_OP_I32ADD:
case WASM_OP_I32SUB:
case WASM_OP_I32MUL:
case WASM_OP_I32DIVS:
case WASM_OP_I32DIVU:
case WASM_OP_I32REMS:
case WASM_OP_I32REMU:
case WASM_OP_I32AND:
case WASM_OP_I32OR:
case WASM_OP_I32XOR:
case WASM_OP_I32SHL:
case WASM_OP_I32SHRS:
case WASM_OP_I32SHRU:
case WASM_OP_I32ROTL:
case WASM_OP_I32ROTR:
case WASM_OP_I64CLZ:
case WASM_OP_I64CTZ:
case WASM_OP_I64POPCNT:
case WASM_OP_I64ADD:
case WASM_OP_I64SUB:
case WASM_OP_I64MUL:
case WASM_OP_I64DIVS:
case WASM_OP_I64DIVU:
case WASM_OP_I64REMS:
case WASM_OP_I64REMU:
case WASM_OP_I64AND:
case WASM_OP_I64OR:
case WASM_OP_I64XOR:
case WASM_OP_I64SHL:
case WASM_OP_I64SHRS:
case WASM_OP_I64SHRU:
case WASM_OP_I64ROTL:
case WASM_OP_I64ROTR:
case WASM_OP_F32ABS:
case WASM_OP_F32NEG:
case WASM_OP_F32CEIL:
case WASM_OP_F32FLOOR:
case WASM_OP_F32TRUNC:
case WASM_OP_F32NEAREST:
case WASM_OP_F32SQRT:
case WASM_OP_F32ADD:
case WASM_OP_F32SUB:
case WASM_OP_F32MUL:
case WASM_OP_F32DIV:
case WASM_OP_F32MIN:
case WASM_OP_F32MAX:
case WASM_OP_F32COPYSIGN:
case WASM_OP_F64ABS:
case WASM_OP_F64NEG:
case WASM_OP_F64CEIL:
case WASM_OP_F64FLOOR:
case WASM_OP_F64TRUNC:
case WASM_OP_F64NEAREST:
case WASM_OP_F64SQRT:
case WASM_OP_F64ADD:
case WASM_OP_F64SUB:
case WASM_OP_F64MUL:
case WASM_OP_F64DIV:
case WASM_OP_F64MIN:
case WASM_OP_F64MAX:
case WASM_OP_F64COPYSIGN:
case WASM_OP_I32WRAPI64:
case WASM_OP_I32TRUNCSF32:
case WASM_OP_I32TRUNCUF32:
case WASM_OP_I32TRUNCSF64:
case WASM_OP_I32TRUNCUF64:
case WASM_OP_I64EXTENDSI32:
case WASM_OP_I64EXTENDUI32:
case WASM_OP_I64TRUNCSF32:
case WASM_OP_I64TRUNCUF32:
case WASM_OP_I64TRUNCSF64:
case WASM_OP_I64TRUNCUF64:
case WASM_OP_F32CONVERTSI32:
case WASM_OP_F32CONVERTUI32:
case WASM_OP_F32CONVERTSI64:
case WASM_OP_F32CONVERTUI64:
case WASM_OP_F32DEMOTEF64:
case WASM_OP_F64CONVERTSI32:
case WASM_OP_F64CONVERTUI32:
case WASM_OP_F64CONVERTSI64:
case WASM_OP_F64CONVERTUI64:
case WASM_OP_F64PROMOTEF32:
case WASM_OP_I32REINTERPRETF32:
case WASM_OP_I64REINTERPRETF64:
case WASM_OP_F32REINTERPRETI32:
case WASM_OP_F64REINTERPRETI64:
case WASM_OP_END:
{
snprintf (op->txt, R_ASM_BUFSIZE, "%s", opdef->txt);
}
break;
case WASM_OP_BLOCK:
case WASM_OP_LOOP:
case WASM_OP_IF:
{
st32 val = 0;
size_t n = read_i32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
switch (0x80 - val) {
case R_BIN_WASM_VALUETYPE_EMPTY:
snprintf (op->txt, R_ASM_BUFSIZE, "%s", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_i32:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result i32)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_i64:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result i64)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_f32:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result f32)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_f64:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result f64)", opdef->txt);
break;
default:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result ?)", opdef->txt);
break;
}
op->len += n;
}
break;
case WASM_OP_BR:
case WASM_OP_BRIF:
case WASM_OP_CALL:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_BRTABLE:
{
ut32 count = 0, *table = NULL, def = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &count);
if (!(n > 0 && n < buf_len)) {
goto err;
}
if (!(table = calloc (count, sizeof (ut32)))) {
goto err;
}
int i = 0;
op->len += n;
for (i = 0; i < count; i++) {
n = read_u32_leb128 (buf + op->len, buf + buf_len, &table[i]);
if (!(op->len + n <= buf_len)) {
goto beach;
}
op->len += n;
}
n = read_u32_leb128 (buf + op->len, buf + buf_len, &def);
if (!(n > 0 && n + op->len < buf_len)) {
goto beach;
}
op->len += n;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d ", opdef->txt, count);
for (i = 0; i < count && strlen (op->txt) + 10 < R_ASM_BUFSIZE; i++) {
int optxtlen = strlen (op->txt);
snprintf (op->txt + optxtlen, R_ASM_BUFSIZE - optxtlen, "%d ", table[i]);
}
snprintf (op->txt + strlen (op->txt), R_ASM_BUFSIZE, "%d", def);
free (table);
break;
beach:
free (table);
goto err;
}
break;
case WASM_OP_CALLINDIRECT:
{
ut32 val = 0, reserved = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
op->len += n;
n = read_u32_leb128 (buf + op->len, buf + buf_len, &reserved);
if (!(n == 1 && op->len + n <= buf_len)) goto err;
reserved &= 0x1;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d %d", opdef->txt, val, reserved);
op->len += n;
}
break;
case WASM_OP_GETLOCAL:
case WASM_OP_SETLOCAL:
case WASM_OP_TEELOCAL:
case WASM_OP_GETGLOBAL:
case WASM_OP_SETGLOBAL:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_I32LOAD:
case WASM_OP_I64LOAD:
case WASM_OP_F32LOAD:
case WASM_OP_F64LOAD:
case WASM_OP_I32LOAD8S:
case WASM_OP_I32LOAD8U:
case WASM_OP_I32LOAD16S:
case WASM_OP_I32LOAD16U:
case WASM_OP_I64LOAD8S:
case WASM_OP_I64LOAD8U:
case WASM_OP_I64LOAD16S:
case WASM_OP_I64LOAD16U:
case WASM_OP_I64LOAD32S:
case WASM_OP_I64LOAD32U:
case WASM_OP_I32STORE:
case WASM_OP_I64STORE:
case WASM_OP_F32STORE:
case WASM_OP_F64STORE:
case WASM_OP_I32STORE8:
case WASM_OP_I32STORE16:
case WASM_OP_I64STORE8:
case WASM_OP_I64STORE16:
case WASM_OP_I64STORE32:
{
ut32 flag = 0, offset = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &flag);
if (!(n > 0 && n < buf_len)) goto err;
op->len += n;
n = read_u32_leb128 (buf + op->len, buf + buf_len, &offset);
if (!(n > 0 && op->len + n <= buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d %d", opdef->txt, flag, offset);
op->len += n;
}
break;
case WASM_OP_CURRENTMEMORY:
case WASM_OP_GROWMEMORY:
{
ut32 reserved = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &reserved);
if (!(n == 1 && n < buf_len)) goto err;
reserved &= 0x1;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, reserved);
op->len += n;
}
break;
case WASM_OP_I32CONST:
{
st32 val = 0;
size_t n = read_i32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" PFMT32d, opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_I64CONST:
{
st64 val = 0;
size_t n = read_i64_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" PFMT64d, opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_F32CONST:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
long double d = (long double)val;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" LDBLFMT, opdef->txt, d);
op->len += n;
}
break;
case WASM_OP_F64CONST:
{
ut64 val = 0;
size_t n = read_u64_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
long double d = (long double)val;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" LDBLFMT, opdef->txt, d);
op->len += n;
}
break;
default:
goto err;
}
return op->len;
err:
op->len = 1;
snprintf (op->txt, R_ASM_BUFSIZE, "invalid");
return op->len;
}
Commit Message: Fix #9969 - Stack overflow in wasm disassembler
CWE ID: CWE-119 | int wasm_dis(WasmOp *op, const unsigned char *buf, int buf_len) {
op->len = 1;
op->op = buf[0];
if (op->op > 0xbf) {
return 1;
}
WasmOpDef *opdef = &opcodes[op->op];
switch (op->op) {
case WASM_OP_TRAP:
case WASM_OP_NOP:
case WASM_OP_ELSE:
case WASM_OP_RETURN:
case WASM_OP_DROP:
case WASM_OP_SELECT:
case WASM_OP_I32EQZ:
case WASM_OP_I32EQ:
case WASM_OP_I32NE:
case WASM_OP_I32LTS:
case WASM_OP_I32LTU:
case WASM_OP_I32GTS:
case WASM_OP_I32GTU:
case WASM_OP_I32LES:
case WASM_OP_I32LEU:
case WASM_OP_I32GES:
case WASM_OP_I32GEU:
case WASM_OP_I64EQZ:
case WASM_OP_I64EQ:
case WASM_OP_I64NE:
case WASM_OP_I64LTS:
case WASM_OP_I64LTU:
case WASM_OP_I64GTS:
case WASM_OP_I64GTU:
case WASM_OP_I64LES:
case WASM_OP_I64LEU:
case WASM_OP_I64GES:
case WASM_OP_I64GEU:
case WASM_OP_F32EQ:
case WASM_OP_F32NE:
case WASM_OP_F32LT:
case WASM_OP_F32GT:
case WASM_OP_F32LE:
case WASM_OP_F32GE:
case WASM_OP_F64EQ:
case WASM_OP_F64NE:
case WASM_OP_F64LT:
case WASM_OP_F64GT:
case WASM_OP_F64LE:
case WASM_OP_F64GE:
case WASM_OP_I32CLZ:
case WASM_OP_I32CTZ:
case WASM_OP_I32POPCNT:
case WASM_OP_I32ADD:
case WASM_OP_I32SUB:
case WASM_OP_I32MUL:
case WASM_OP_I32DIVS:
case WASM_OP_I32DIVU:
case WASM_OP_I32REMS:
case WASM_OP_I32REMU:
case WASM_OP_I32AND:
case WASM_OP_I32OR:
case WASM_OP_I32XOR:
case WASM_OP_I32SHL:
case WASM_OP_I32SHRS:
case WASM_OP_I32SHRU:
case WASM_OP_I32ROTL:
case WASM_OP_I32ROTR:
case WASM_OP_I64CLZ:
case WASM_OP_I64CTZ:
case WASM_OP_I64POPCNT:
case WASM_OP_I64ADD:
case WASM_OP_I64SUB:
case WASM_OP_I64MUL:
case WASM_OP_I64DIVS:
case WASM_OP_I64DIVU:
case WASM_OP_I64REMS:
case WASM_OP_I64REMU:
case WASM_OP_I64AND:
case WASM_OP_I64OR:
case WASM_OP_I64XOR:
case WASM_OP_I64SHL:
case WASM_OP_I64SHRS:
case WASM_OP_I64SHRU:
case WASM_OP_I64ROTL:
case WASM_OP_I64ROTR:
case WASM_OP_F32ABS:
case WASM_OP_F32NEG:
case WASM_OP_F32CEIL:
case WASM_OP_F32FLOOR:
case WASM_OP_F32TRUNC:
case WASM_OP_F32NEAREST:
case WASM_OP_F32SQRT:
case WASM_OP_F32ADD:
case WASM_OP_F32SUB:
case WASM_OP_F32MUL:
case WASM_OP_F32DIV:
case WASM_OP_F32MIN:
case WASM_OP_F32MAX:
case WASM_OP_F32COPYSIGN:
case WASM_OP_F64ABS:
case WASM_OP_F64NEG:
case WASM_OP_F64CEIL:
case WASM_OP_F64FLOOR:
case WASM_OP_F64TRUNC:
case WASM_OP_F64NEAREST:
case WASM_OP_F64SQRT:
case WASM_OP_F64ADD:
case WASM_OP_F64SUB:
case WASM_OP_F64MUL:
case WASM_OP_F64DIV:
case WASM_OP_F64MIN:
case WASM_OP_F64MAX:
case WASM_OP_F64COPYSIGN:
case WASM_OP_I32WRAPI64:
case WASM_OP_I32TRUNCSF32:
case WASM_OP_I32TRUNCUF32:
case WASM_OP_I32TRUNCSF64:
case WASM_OP_I32TRUNCUF64:
case WASM_OP_I64EXTENDSI32:
case WASM_OP_I64EXTENDUI32:
case WASM_OP_I64TRUNCSF32:
case WASM_OP_I64TRUNCUF32:
case WASM_OP_I64TRUNCSF64:
case WASM_OP_I64TRUNCUF64:
case WASM_OP_F32CONVERTSI32:
case WASM_OP_F32CONVERTUI32:
case WASM_OP_F32CONVERTSI64:
case WASM_OP_F32CONVERTUI64:
case WASM_OP_F32DEMOTEF64:
case WASM_OP_F64CONVERTSI32:
case WASM_OP_F64CONVERTUI32:
case WASM_OP_F64CONVERTSI64:
case WASM_OP_F64CONVERTUI64:
case WASM_OP_F64PROMOTEF32:
case WASM_OP_I32REINTERPRETF32:
case WASM_OP_I64REINTERPRETF64:
case WASM_OP_F32REINTERPRETI32:
case WASM_OP_F64REINTERPRETI64:
case WASM_OP_END:
{
snprintf (op->txt, R_ASM_BUFSIZE, "%s", opdef->txt);
}
break;
case WASM_OP_BLOCK:
case WASM_OP_LOOP:
case WASM_OP_IF:
{
st32 val = 0;
size_t n = read_i32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
switch (0x80 - val) {
case R_BIN_WASM_VALUETYPE_EMPTY:
snprintf (op->txt, R_ASM_BUFSIZE, "%s", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_i32:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result i32)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_i64:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result i64)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_f32:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result f32)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_f64:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result f64)", opdef->txt);
break;
default:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result ?)", opdef->txt);
break;
}
op->len += n;
}
break;
case WASM_OP_BR:
case WASM_OP_BRIF:
case WASM_OP_CALL:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_BRTABLE:
{
ut32 count = 0, *table = NULL, def = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &count);
if (!(n > 0 && n < buf_len)) {
goto err;
}
if (!(table = calloc (count, sizeof (ut32)))) {
goto err;
}
int i = 0;
op->len += n;
for (i = 0; i < count; i++) {
n = read_u32_leb128 (buf + op->len, buf + buf_len, &table[i]);
if (!(op->len + n <= buf_len)) {
goto beach;
}
op->len += n;
}
n = read_u32_leb128 (buf + op->len, buf + buf_len, &def);
if (!(n > 0 && n + op->len < buf_len)) {
goto beach;
}
op->len += n;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d ", opdef->txt, count);
char *txt = op->txt;
int txtLen = strlen (op->txt);
int txtLeft = R_ASM_BUFSIZE - txtLen;
txt += txtLen;
for (i = 0; i < count && txtLen + 10 < R_ASM_BUFSIZE; i++) {
snprintf (txt, txtLeft, "%d ", table[i]);
txtLen = strlen (txt);
txt += txtLen;
txtLeft -= txtLen;
}
snprintf (txt, txtLeft - 1, "%d", def);
free (table);
break;
beach:
free (table);
goto err;
}
break;
case WASM_OP_CALLINDIRECT:
{
ut32 val = 0, reserved = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
op->len += n;
n = read_u32_leb128 (buf + op->len, buf + buf_len, &reserved);
if (!(n == 1 && op->len + n <= buf_len)) goto err;
reserved &= 0x1;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d %d", opdef->txt, val, reserved);
op->len += n;
}
break;
case WASM_OP_GETLOCAL:
case WASM_OP_SETLOCAL:
case WASM_OP_TEELOCAL:
case WASM_OP_GETGLOBAL:
case WASM_OP_SETGLOBAL:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_I32LOAD:
case WASM_OP_I64LOAD:
case WASM_OP_F32LOAD:
case WASM_OP_F64LOAD:
case WASM_OP_I32LOAD8S:
case WASM_OP_I32LOAD8U:
case WASM_OP_I32LOAD16S:
case WASM_OP_I32LOAD16U:
case WASM_OP_I64LOAD8S:
case WASM_OP_I64LOAD8U:
case WASM_OP_I64LOAD16S:
case WASM_OP_I64LOAD16U:
case WASM_OP_I64LOAD32S:
case WASM_OP_I64LOAD32U:
case WASM_OP_I32STORE:
case WASM_OP_I64STORE:
case WASM_OP_F32STORE:
case WASM_OP_F64STORE:
case WASM_OP_I32STORE8:
case WASM_OP_I32STORE16:
case WASM_OP_I64STORE8:
case WASM_OP_I64STORE16:
case WASM_OP_I64STORE32:
{
ut32 flag = 0, offset = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &flag);
if (!(n > 0 && n < buf_len)) goto err;
op->len += n;
n = read_u32_leb128 (buf + op->len, buf + buf_len, &offset);
if (!(n > 0 && op->len + n <= buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d %d", opdef->txt, flag, offset);
op->len += n;
}
break;
case WASM_OP_CURRENTMEMORY:
case WASM_OP_GROWMEMORY:
{
ut32 reserved = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &reserved);
if (!(n == 1 && n < buf_len)) goto err;
reserved &= 0x1;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, reserved);
op->len += n;
}
break;
case WASM_OP_I32CONST:
{
st32 val = 0;
size_t n = read_i32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" PFMT32d, opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_I64CONST:
{
st64 val = 0;
size_t n = read_i64_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" PFMT64d, opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_F32CONST:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
long double d = (long double)val;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" LDBLFMT, opdef->txt, d);
op->len += n;
}
break;
case WASM_OP_F64CONST:
{
ut64 val = 0;
size_t n = read_u64_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
long double d = (long double)val;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" LDBLFMT, opdef->txt, d);
op->len += n;
}
break;
default:
goto err;
}
return op->len;
err:
op->len = 1;
snprintf (op->txt, R_ASM_BUFSIZE, "invalid");
return op->len;
}
| 19,372 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int FindStartOffsetOfFileInZipFile(const char* zip_file, const char* filename) {
FileDescriptor fd;
if (!fd.OpenReadOnly(zip_file)) {
LOG_ERRNO("%s: open failed trying to open zip file %s\n",
__FUNCTION__, zip_file);
return CRAZY_OFFSET_FAILED;
}
struct stat stat_buf;
if (stat(zip_file, &stat_buf) == -1) {
LOG_ERRNO("%s: stat failed trying to stat zip file %s\n",
__FUNCTION__, zip_file);
return CRAZY_OFFSET_FAILED;
}
if (stat_buf.st_size > kMaxZipFileLength) {
LOG("%s: The size %ld of %s is too large to map\n",
__FUNCTION__, stat_buf.st_size, zip_file);
return CRAZY_OFFSET_FAILED;
}
void* mem = fd.Map(NULL, stat_buf.st_size, PROT_READ, MAP_PRIVATE, 0);
if (mem == MAP_FAILED) {
LOG_ERRNO("%s: mmap failed trying to mmap zip file %s\n",
__FUNCTION__, zip_file);
return CRAZY_OFFSET_FAILED;
}
ScopedMMap scoped_mmap(mem, stat_buf.st_size);
uint8_t* mem_bytes = static_cast<uint8_t*>(mem);
int off;
for (off = stat_buf.st_size - sizeof(kEndOfCentralDirectoryMarker);
off >= 0; --off) {
if (ReadUInt32(mem_bytes, off) == kEndOfCentralDirectoryMarker) {
break;
}
}
if (off == -1) {
LOG("%s: Failed to find end of central directory in %s\n",
__FUNCTION__, zip_file);
return CRAZY_OFFSET_FAILED;
}
uint32_t length_of_central_dir = ReadUInt32(
mem_bytes, off + kOffsetOfCentralDirLengthInEndOfCentralDirectory);
uint32_t start_of_central_dir = ReadUInt32(
mem_bytes, off + kOffsetOfStartOfCentralDirInEndOfCentralDirectory);
if (start_of_central_dir > off) {
LOG("%s: Found out of range offset %u for start of directory in %s\n",
__FUNCTION__, start_of_central_dir, zip_file);
return CRAZY_OFFSET_FAILED;
}
uint32_t end_of_central_dir = start_of_central_dir + length_of_central_dir;
if (end_of_central_dir > off) {
LOG("%s: Found out of range offset %u for end of directory in %s\n",
__FUNCTION__, end_of_central_dir, zip_file);
return CRAZY_OFFSET_FAILED;
}
uint32_t num_entries = ReadUInt16(
mem_bytes, off + kOffsetNumOfEntriesInEndOfCentralDirectory);
off = start_of_central_dir;
const int target_len = strlen(filename);
int n = 0;
for (; n < num_entries && off < end_of_central_dir; ++n) {
uint32_t marker = ReadUInt32(mem_bytes, off);
if (marker != kCentralDirHeaderMarker) {
LOG("%s: Failed to find central directory header marker in %s. "
"Found 0x%x but expected 0x%x\n", __FUNCTION__,
zip_file, marker, kCentralDirHeaderMarker);
return CRAZY_OFFSET_FAILED;
}
uint32_t file_name_length =
ReadUInt16(mem_bytes, off + kOffsetFilenameLengthInCentralDirectory);
uint32_t extra_field_length =
ReadUInt16(mem_bytes, off + kOffsetExtraFieldLengthInCentralDirectory);
uint32_t comment_field_length =
ReadUInt16(mem_bytes, off + kOffsetCommentLengthInCentralDirectory);
uint32_t header_length = kOffsetFilenameInCentralDirectory +
file_name_length + extra_field_length + comment_field_length;
uint32_t local_header_offset =
ReadUInt32(mem_bytes, off + kOffsetLocalHeaderOffsetInCentralDirectory);
uint8_t* filename_bytes =
mem_bytes + off + kOffsetFilenameInCentralDirectory;
if (file_name_length == target_len &&
memcmp(filename_bytes, filename, target_len) == 0) {
uint32_t marker = ReadUInt32(mem_bytes, local_header_offset);
if (marker != kLocalHeaderMarker) {
LOG("%s: Failed to find local file header marker in %s. "
"Found 0x%x but expected 0x%x\n", __FUNCTION__,
zip_file, marker, kLocalHeaderMarker);
return CRAZY_OFFSET_FAILED;
}
uint32_t compression_method =
ReadUInt16(
mem_bytes,
local_header_offset + kOffsetCompressionMethodInLocalHeader);
if (compression_method != kCompressionMethodStored) {
LOG("%s: %s is compressed within %s. "
"Found compression method %u but expected %u\n", __FUNCTION__,
filename, zip_file, compression_method, kCompressionMethodStored);
return CRAZY_OFFSET_FAILED;
}
uint32_t file_name_length =
ReadUInt16(
mem_bytes,
local_header_offset + kOffsetFilenameLengthInLocalHeader);
uint32_t extra_field_length =
ReadUInt16(
mem_bytes,
local_header_offset + kOffsetExtraFieldLengthInLocalHeader);
uint32_t header_length =
kOffsetFilenameInLocalHeader + file_name_length + extra_field_length;
return local_header_offset + header_length;
}
off += header_length;
}
if (n < num_entries) {
LOG("%s: Did not find all the expected entries in the central directory. "
"Found %d but expected %d\n", __FUNCTION__, n, num_entries);
}
if (off < end_of_central_dir) {
LOG("%s: There are %d extra bytes at the end of the central directory.\n",
__FUNCTION__, end_of_central_dir - off);
}
LOG("%s: Did not find %s in %s\n", __FUNCTION__, filename, zip_file);
return CRAZY_OFFSET_FAILED;
}
Commit Message: crazy linker: Alter search for zip EOCD start
When loading directly from APK, begin searching backwards
for the zip EOCD record signature at size of EOCD record
bytes before the end of the file.
BUG=537205
[email protected]
Review URL: https://codereview.chromium.org/1390553002 .
Cr-Commit-Position: refs/heads/master@{#352577}
CWE ID: CWE-20 | int FindStartOffsetOfFileInZipFile(const char* zip_file, const char* filename) {
FileDescriptor fd;
if (!fd.OpenReadOnly(zip_file)) {
LOG_ERRNO("%s: open failed trying to open zip file %s\n",
__FUNCTION__, zip_file);
return CRAZY_OFFSET_FAILED;
}
struct stat stat_buf;
if (stat(zip_file, &stat_buf) == -1) {
LOG_ERRNO("%s: stat failed trying to stat zip file %s\n",
__FUNCTION__, zip_file);
return CRAZY_OFFSET_FAILED;
}
if (stat_buf.st_size > kMaxZipFileLength) {
LOG("%s: The size %ld of %s is too large to map\n",
__FUNCTION__, stat_buf.st_size, zip_file);
return CRAZY_OFFSET_FAILED;
}
void* mem = fd.Map(NULL, stat_buf.st_size, PROT_READ, MAP_PRIVATE, 0);
if (mem == MAP_FAILED) {
LOG_ERRNO("%s: mmap failed trying to mmap zip file %s\n",
__FUNCTION__, zip_file);
return CRAZY_OFFSET_FAILED;
}
ScopedMMap scoped_mmap(mem, stat_buf.st_size);
// central directory marker. The earliest occurrence we accept is
// size of end of central directory bytes back from from the end of the
// file.
uint8_t* mem_bytes = static_cast<uint8_t*>(mem);
int off = stat_buf.st_size - kEndOfCentralDirectoryRecordSize;
for (; off >= 0; --off) {
if (ReadUInt32(mem_bytes, off) == kEndOfCentralDirectoryMarker) {
break;
}
}
if (off == -1) {
LOG("%s: Failed to find end of central directory in %s\n",
__FUNCTION__, zip_file);
return CRAZY_OFFSET_FAILED;
}
uint32_t length_of_central_dir = ReadUInt32(
mem_bytes, off + kOffsetOfCentralDirLengthInEndOfCentralDirectory);
uint32_t start_of_central_dir = ReadUInt32(
mem_bytes, off + kOffsetOfStartOfCentralDirInEndOfCentralDirectory);
if (start_of_central_dir > off) {
LOG("%s: Found out of range offset %u for start of directory in %s\n",
__FUNCTION__, start_of_central_dir, zip_file);
return CRAZY_OFFSET_FAILED;
}
uint32_t end_of_central_dir = start_of_central_dir + length_of_central_dir;
if (end_of_central_dir > off) {
LOG("%s: Found out of range offset %u for end of directory in %s\n",
__FUNCTION__, end_of_central_dir, zip_file);
return CRAZY_OFFSET_FAILED;
}
uint32_t num_entries = ReadUInt16(
mem_bytes, off + kOffsetNumOfEntriesInEndOfCentralDirectory);
off = start_of_central_dir;
const int target_len = strlen(filename);
int n = 0;
for (; n < num_entries && off < end_of_central_dir; ++n) {
uint32_t marker = ReadUInt32(mem_bytes, off);
if (marker != kCentralDirHeaderMarker) {
LOG("%s: Failed to find central directory header marker in %s. "
"Found 0x%x but expected 0x%x\n", __FUNCTION__,
zip_file, marker, kCentralDirHeaderMarker);
return CRAZY_OFFSET_FAILED;
}
uint32_t file_name_length =
ReadUInt16(mem_bytes, off + kOffsetFilenameLengthInCentralDirectory);
uint32_t extra_field_length =
ReadUInt16(mem_bytes, off + kOffsetExtraFieldLengthInCentralDirectory);
uint32_t comment_field_length =
ReadUInt16(mem_bytes, off + kOffsetCommentLengthInCentralDirectory);
uint32_t header_length = kOffsetFilenameInCentralDirectory +
file_name_length + extra_field_length + comment_field_length;
uint32_t local_header_offset =
ReadUInt32(mem_bytes, off + kOffsetLocalHeaderOffsetInCentralDirectory);
uint8_t* filename_bytes =
mem_bytes + off + kOffsetFilenameInCentralDirectory;
if (file_name_length == target_len &&
memcmp(filename_bytes, filename, target_len) == 0) {
uint32_t marker = ReadUInt32(mem_bytes, local_header_offset);
if (marker != kLocalHeaderMarker) {
LOG("%s: Failed to find local file header marker in %s. "
"Found 0x%x but expected 0x%x\n", __FUNCTION__,
zip_file, marker, kLocalHeaderMarker);
return CRAZY_OFFSET_FAILED;
}
uint32_t compression_method =
ReadUInt16(
mem_bytes,
local_header_offset + kOffsetCompressionMethodInLocalHeader);
if (compression_method != kCompressionMethodStored) {
LOG("%s: %s is compressed within %s. "
"Found compression method %u but expected %u\n", __FUNCTION__,
filename, zip_file, compression_method, kCompressionMethodStored);
return CRAZY_OFFSET_FAILED;
}
uint32_t file_name_length =
ReadUInt16(
mem_bytes,
local_header_offset + kOffsetFilenameLengthInLocalHeader);
uint32_t extra_field_length =
ReadUInt16(
mem_bytes,
local_header_offset + kOffsetExtraFieldLengthInLocalHeader);
uint32_t header_length =
kOffsetFilenameInLocalHeader + file_name_length + extra_field_length;
return local_header_offset + header_length;
}
off += header_length;
}
if (n < num_entries) {
LOG("%s: Did not find all the expected entries in the central directory. "
"Found %d but expected %d\n", __FUNCTION__, n, num_entries);
}
if (off < end_of_central_dir) {
LOG("%s: There are %d extra bytes at the end of the central directory.\n",
__FUNCTION__, end_of_central_dir - off);
}
LOG("%s: Did not find %s in %s\n", __FUNCTION__, filename, zip_file);
return CRAZY_OFFSET_FAILED;
}
| 22,022 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int jpc_dec_tileinit(jpc_dec_t *dec, jpc_dec_tile_t *tile)
{
jpc_dec_tcomp_t *tcomp;
int compno;
int rlvlno;
jpc_dec_rlvl_t *rlvl;
jpc_dec_band_t *band;
jpc_dec_prc_t *prc;
int bndno;
jpc_tsfb_band_t *bnd;
int bandno;
jpc_dec_ccp_t *ccp;
int prccnt;
jpc_dec_cblk_t *cblk;
int cblkcnt;
uint_fast32_t tlprcxstart;
uint_fast32_t tlprcystart;
uint_fast32_t brprcxend;
uint_fast32_t brprcyend;
uint_fast32_t tlcbgxstart;
uint_fast32_t tlcbgystart;
uint_fast32_t brcbgxend;
uint_fast32_t brcbgyend;
uint_fast32_t cbgxstart;
uint_fast32_t cbgystart;
uint_fast32_t cbgxend;
uint_fast32_t cbgyend;
uint_fast32_t tlcblkxstart;
uint_fast32_t tlcblkystart;
uint_fast32_t brcblkxend;
uint_fast32_t brcblkyend;
uint_fast32_t cblkxstart;
uint_fast32_t cblkystart;
uint_fast32_t cblkxend;
uint_fast32_t cblkyend;
uint_fast32_t tmpxstart;
uint_fast32_t tmpystart;
uint_fast32_t tmpxend;
uint_fast32_t tmpyend;
jpc_dec_cp_t *cp;
jpc_tsfb_band_t bnds[64];
jpc_pchg_t *pchg;
int pchgno;
jpc_dec_cmpt_t *cmpt;
cp = tile->cp;
tile->realmode = 0;
if (cp->mctid == JPC_MCT_ICT) {
tile->realmode = 1;
}
for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno <
dec->numcomps; ++compno, ++tcomp, ++cmpt) {
ccp = &tile->cp->ccps[compno];
if (ccp->qmfbid == JPC_COX_INS) {
tile->realmode = 1;
}
tcomp->numrlvls = ccp->numrlvls;
if (!(tcomp->rlvls = jas_alloc2(tcomp->numrlvls,
sizeof(jpc_dec_rlvl_t)))) {
return -1;
}
if (!(tcomp->data = jas_seq2d_create(JPC_CEILDIV(tile->xstart,
cmpt->hstep), JPC_CEILDIV(tile->ystart, cmpt->vstep),
JPC_CEILDIV(tile->xend, cmpt->hstep), JPC_CEILDIV(tile->yend,
cmpt->vstep)))) {
return -1;
}
if (!(tcomp->tsfb = jpc_cod_gettsfb(ccp->qmfbid,
tcomp->numrlvls - 1))) {
return -1;
}
{
jpc_tsfb_getbands(tcomp->tsfb, jas_seq2d_xstart(tcomp->data),
jas_seq2d_ystart(tcomp->data), jas_seq2d_xend(tcomp->data),
jas_seq2d_yend(tcomp->data), bnds);
}
for (rlvlno = 0, rlvl = tcomp->rlvls; rlvlno < tcomp->numrlvls;
++rlvlno, ++rlvl) {
rlvl->bands = 0;
rlvl->xstart = JPC_CEILDIVPOW2(tcomp->xstart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->ystart = JPC_CEILDIVPOW2(tcomp->ystart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->xend = JPC_CEILDIVPOW2(tcomp->xend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->yend = JPC_CEILDIVPOW2(tcomp->yend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->prcwidthexpn = ccp->prcwidthexpns[rlvlno];
rlvl->prcheightexpn = ccp->prcheightexpns[rlvlno];
tlprcxstart = JPC_FLOORDIVPOW2(rlvl->xstart,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
tlprcystart = JPC_FLOORDIVPOW2(rlvl->ystart,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
brprcxend = JPC_CEILDIVPOW2(rlvl->xend,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
brprcyend = JPC_CEILDIVPOW2(rlvl->yend,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
rlvl->numhprcs = (brprcxend - tlprcxstart) >>
rlvl->prcwidthexpn;
rlvl->numvprcs = (brprcyend - tlprcystart) >>
rlvl->prcheightexpn;
rlvl->numprcs = rlvl->numhprcs * rlvl->numvprcs;
if (rlvl->xstart >= rlvl->xend || rlvl->ystart >= rlvl->yend) {
rlvl->bands = 0;
rlvl->numprcs = 0;
rlvl->numhprcs = 0;
rlvl->numvprcs = 0;
continue;
}
if (!rlvlno) {
tlcbgxstart = tlprcxstart;
tlcbgystart = tlprcystart;
brcbgxend = brprcxend;
brcbgyend = brprcyend;
rlvl->cbgwidthexpn = rlvl->prcwidthexpn;
rlvl->cbgheightexpn = rlvl->prcheightexpn;
} else {
tlcbgxstart = JPC_CEILDIVPOW2(tlprcxstart, 1);
tlcbgystart = JPC_CEILDIVPOW2(tlprcystart, 1);
brcbgxend = JPC_CEILDIVPOW2(brprcxend, 1);
brcbgyend = JPC_CEILDIVPOW2(brprcyend, 1);
rlvl->cbgwidthexpn = rlvl->prcwidthexpn - 1;
rlvl->cbgheightexpn = rlvl->prcheightexpn - 1;
}
rlvl->cblkwidthexpn = JAS_MIN(ccp->cblkwidthexpn,
rlvl->cbgwidthexpn);
rlvl->cblkheightexpn = JAS_MIN(ccp->cblkheightexpn,
rlvl->cbgheightexpn);
rlvl->numbands = (!rlvlno) ? 1 : 3;
if (!(rlvl->bands = jas_alloc2(rlvl->numbands,
sizeof(jpc_dec_band_t)))) {
return -1;
}
for (bandno = 0, band = rlvl->bands;
bandno < rlvl->numbands; ++bandno, ++band) {
bndno = (!rlvlno) ? 0 : (3 * (rlvlno - 1) +
bandno + 1);
bnd = &bnds[bndno];
band->orient = bnd->orient;
band->stepsize = ccp->stepsizes[bndno];
band->analgain = JPC_NOMINALGAIN(ccp->qmfbid,
tcomp->numrlvls - 1, rlvlno, band->orient);
band->absstepsize = jpc_calcabsstepsize(band->stepsize,
cmpt->prec + band->analgain);
band->numbps = ccp->numguardbits +
JPC_QCX_GETEXPN(band->stepsize) - 1;
band->roishift = (ccp->roishift + band->numbps >= JPC_PREC) ?
(JPC_PREC - 1 - band->numbps) : ccp->roishift;
band->data = 0;
band->prcs = 0;
if (bnd->xstart == bnd->xend || bnd->ystart == bnd->yend) {
continue;
}
if (!(band->data = jas_seq2d_create(0, 0, 0, 0))) {
return -1;
}
jas_seq2d_bindsub(band->data, tcomp->data, bnd->locxstart,
bnd->locystart, bnd->locxend, bnd->locyend);
jas_seq2d_setshift(band->data, bnd->xstart, bnd->ystart);
assert(rlvl->numprcs);
if (!(band->prcs = jas_alloc2(rlvl->numprcs,
sizeof(jpc_dec_prc_t)))) {
return -1;
}
/************************************************/
cbgxstart = tlcbgxstart;
cbgystart = tlcbgystart;
for (prccnt = rlvl->numprcs, prc = band->prcs;
prccnt > 0; --prccnt, ++prc) {
cbgxend = cbgxstart + (1 << rlvl->cbgwidthexpn);
cbgyend = cbgystart + (1 << rlvl->cbgheightexpn);
prc->xstart = JAS_MAX(cbgxstart, JAS_CAST(uint_fast32_t,
jas_seq2d_xstart(band->data)));
prc->ystart = JAS_MAX(cbgystart, JAS_CAST(uint_fast32_t,
jas_seq2d_ystart(band->data)));
prc->xend = JAS_MIN(cbgxend, JAS_CAST(uint_fast32_t,
jas_seq2d_xend(band->data)));
prc->yend = JAS_MIN(cbgyend, JAS_CAST(uint_fast32_t,
jas_seq2d_yend(band->data)));
if (prc->xend > prc->xstart && prc->yend > prc->ystart) {
tlcblkxstart = JPC_FLOORDIVPOW2(prc->xstart,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
tlcblkystart = JPC_FLOORDIVPOW2(prc->ystart,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
brcblkxend = JPC_CEILDIVPOW2(prc->xend,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
brcblkyend = JPC_CEILDIVPOW2(prc->yend,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
prc->numhcblks = (brcblkxend - tlcblkxstart) >>
rlvl->cblkwidthexpn;
prc->numvcblks = (brcblkyend - tlcblkystart) >>
rlvl->cblkheightexpn;
prc->numcblks = prc->numhcblks * prc->numvcblks;
assert(prc->numcblks > 0);
if (!(prc->incltagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->numimsbstagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->cblks = jas_alloc2(prc->numcblks,
sizeof(jpc_dec_cblk_t)))) {
return -1;
}
cblkxstart = cbgxstart;
cblkystart = cbgystart;
for (cblkcnt = prc->numcblks, cblk = prc->cblks;
cblkcnt > 0;) {
cblkxend = cblkxstart + (1 << rlvl->cblkwidthexpn);
cblkyend = cblkystart + (1 << rlvl->cblkheightexpn);
tmpxstart = JAS_MAX(cblkxstart, prc->xstart);
tmpystart = JAS_MAX(cblkystart, prc->ystart);
tmpxend = JAS_MIN(cblkxend, prc->xend);
tmpyend = JAS_MIN(cblkyend, prc->yend);
if (tmpxend > tmpxstart && tmpyend > tmpystart) {
cblk->firstpassno = -1;
cblk->mqdec = 0;
cblk->nulldec = 0;
cblk->flags = 0;
cblk->numpasses = 0;
cblk->segs.head = 0;
cblk->segs.tail = 0;
cblk->curseg = 0;
cblk->numimsbs = 0;
cblk->numlenbits = 3;
cblk->flags = 0;
if (!(cblk->data = jas_seq2d_create(0, 0, 0,
0))) {
return -1;
}
jas_seq2d_bindsub(cblk->data, band->data,
tmpxstart, tmpystart, tmpxend, tmpyend);
++cblk;
--cblkcnt;
}
cblkxstart += 1 << rlvl->cblkwidthexpn;
if (cblkxstart >= cbgxend) {
cblkxstart = cbgxstart;
cblkystart += 1 << rlvl->cblkheightexpn;
}
}
} else {
prc->cblks = 0;
prc->incltagtree = 0;
prc->numimsbstagtree = 0;
}
cbgxstart += 1 << rlvl->cbgwidthexpn;
if (cbgxstart >= brcbgxend) {
cbgxstart = tlcbgxstart;
cbgystart += 1 << rlvl->cbgheightexpn;
}
}
/********************************************/
}
}
}
if (!(tile->pi = jpc_dec_pi_create(dec, tile))) {
return -1;
}
for (pchgno = 0; pchgno < jpc_pchglist_numpchgs(tile->cp->pchglist);
++pchgno) {
pchg = jpc_pchg_copy(jpc_pchglist_get(tile->cp->pchglist, pchgno));
assert(pchg);
jpc_pi_addpchg(tile->pi, pchg);
}
jpc_pi_init(tile->pi);
return 0;
}
Commit Message: Fixed an array overflow problem in the JPC decoder.
CWE ID: CWE-119 | static int jpc_dec_tileinit(jpc_dec_t *dec, jpc_dec_tile_t *tile)
{
jpc_dec_tcomp_t *tcomp;
int compno;
int rlvlno;
jpc_dec_rlvl_t *rlvl;
jpc_dec_band_t *band;
jpc_dec_prc_t *prc;
int bndno;
jpc_tsfb_band_t *bnd;
int bandno;
jpc_dec_ccp_t *ccp;
int prccnt;
jpc_dec_cblk_t *cblk;
int cblkcnt;
uint_fast32_t tlprcxstart;
uint_fast32_t tlprcystart;
uint_fast32_t brprcxend;
uint_fast32_t brprcyend;
uint_fast32_t tlcbgxstart;
uint_fast32_t tlcbgystart;
uint_fast32_t brcbgxend;
uint_fast32_t brcbgyend;
uint_fast32_t cbgxstart;
uint_fast32_t cbgystart;
uint_fast32_t cbgxend;
uint_fast32_t cbgyend;
uint_fast32_t tlcblkxstart;
uint_fast32_t tlcblkystart;
uint_fast32_t brcblkxend;
uint_fast32_t brcblkyend;
uint_fast32_t cblkxstart;
uint_fast32_t cblkystart;
uint_fast32_t cblkxend;
uint_fast32_t cblkyend;
uint_fast32_t tmpxstart;
uint_fast32_t tmpystart;
uint_fast32_t tmpxend;
uint_fast32_t tmpyend;
jpc_dec_cp_t *cp;
jpc_tsfb_band_t bnds[JPC_MAXBANDS];
jpc_pchg_t *pchg;
int pchgno;
jpc_dec_cmpt_t *cmpt;
cp = tile->cp;
tile->realmode = 0;
if (cp->mctid == JPC_MCT_ICT) {
tile->realmode = 1;
}
for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno <
dec->numcomps; ++compno, ++tcomp, ++cmpt) {
ccp = &tile->cp->ccps[compno];
if (ccp->qmfbid == JPC_COX_INS) {
tile->realmode = 1;
}
tcomp->numrlvls = ccp->numrlvls;
if (!(tcomp->rlvls = jas_alloc2(tcomp->numrlvls,
sizeof(jpc_dec_rlvl_t)))) {
return -1;
}
if (!(tcomp->data = jas_seq2d_create(JPC_CEILDIV(tile->xstart,
cmpt->hstep), JPC_CEILDIV(tile->ystart, cmpt->vstep),
JPC_CEILDIV(tile->xend, cmpt->hstep), JPC_CEILDIV(tile->yend,
cmpt->vstep)))) {
return -1;
}
if (!(tcomp->tsfb = jpc_cod_gettsfb(ccp->qmfbid,
tcomp->numrlvls - 1))) {
return -1;
}
{
jpc_tsfb_getbands(tcomp->tsfb, jas_seq2d_xstart(tcomp->data),
jas_seq2d_ystart(tcomp->data), jas_seq2d_xend(tcomp->data),
jas_seq2d_yend(tcomp->data), bnds);
}
for (rlvlno = 0, rlvl = tcomp->rlvls; rlvlno < tcomp->numrlvls;
++rlvlno, ++rlvl) {
rlvl->bands = 0;
rlvl->xstart = JPC_CEILDIVPOW2(tcomp->xstart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->ystart = JPC_CEILDIVPOW2(tcomp->ystart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->xend = JPC_CEILDIVPOW2(tcomp->xend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->yend = JPC_CEILDIVPOW2(tcomp->yend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->prcwidthexpn = ccp->prcwidthexpns[rlvlno];
rlvl->prcheightexpn = ccp->prcheightexpns[rlvlno];
tlprcxstart = JPC_FLOORDIVPOW2(rlvl->xstart,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
tlprcystart = JPC_FLOORDIVPOW2(rlvl->ystart,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
brprcxend = JPC_CEILDIVPOW2(rlvl->xend,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
brprcyend = JPC_CEILDIVPOW2(rlvl->yend,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
rlvl->numhprcs = (brprcxend - tlprcxstart) >>
rlvl->prcwidthexpn;
rlvl->numvprcs = (brprcyend - tlprcystart) >>
rlvl->prcheightexpn;
rlvl->numprcs = rlvl->numhprcs * rlvl->numvprcs;
if (rlvl->xstart >= rlvl->xend || rlvl->ystart >= rlvl->yend) {
rlvl->bands = 0;
rlvl->numprcs = 0;
rlvl->numhprcs = 0;
rlvl->numvprcs = 0;
continue;
}
if (!rlvlno) {
tlcbgxstart = tlprcxstart;
tlcbgystart = tlprcystart;
brcbgxend = brprcxend;
brcbgyend = brprcyend;
rlvl->cbgwidthexpn = rlvl->prcwidthexpn;
rlvl->cbgheightexpn = rlvl->prcheightexpn;
} else {
tlcbgxstart = JPC_CEILDIVPOW2(tlprcxstart, 1);
tlcbgystart = JPC_CEILDIVPOW2(tlprcystart, 1);
brcbgxend = JPC_CEILDIVPOW2(brprcxend, 1);
brcbgyend = JPC_CEILDIVPOW2(brprcyend, 1);
rlvl->cbgwidthexpn = rlvl->prcwidthexpn - 1;
rlvl->cbgheightexpn = rlvl->prcheightexpn - 1;
}
rlvl->cblkwidthexpn = JAS_MIN(ccp->cblkwidthexpn,
rlvl->cbgwidthexpn);
rlvl->cblkheightexpn = JAS_MIN(ccp->cblkheightexpn,
rlvl->cbgheightexpn);
rlvl->numbands = (!rlvlno) ? 1 : 3;
if (!(rlvl->bands = jas_alloc2(rlvl->numbands,
sizeof(jpc_dec_band_t)))) {
return -1;
}
for (bandno = 0, band = rlvl->bands;
bandno < rlvl->numbands; ++bandno, ++band) {
bndno = (!rlvlno) ? 0 : (3 * (rlvlno - 1) +
bandno + 1);
bnd = &bnds[bndno];
band->orient = bnd->orient;
band->stepsize = ccp->stepsizes[bndno];
band->analgain = JPC_NOMINALGAIN(ccp->qmfbid,
tcomp->numrlvls - 1, rlvlno, band->orient);
band->absstepsize = jpc_calcabsstepsize(band->stepsize,
cmpt->prec + band->analgain);
band->numbps = ccp->numguardbits +
JPC_QCX_GETEXPN(band->stepsize) - 1;
band->roishift = (ccp->roishift + band->numbps >= JPC_PREC) ?
(JPC_PREC - 1 - band->numbps) : ccp->roishift;
band->data = 0;
band->prcs = 0;
if (bnd->xstart == bnd->xend || bnd->ystart == bnd->yend) {
continue;
}
if (!(band->data = jas_seq2d_create(0, 0, 0, 0))) {
return -1;
}
jas_seq2d_bindsub(band->data, tcomp->data, bnd->locxstart,
bnd->locystart, bnd->locxend, bnd->locyend);
jas_seq2d_setshift(band->data, bnd->xstart, bnd->ystart);
assert(rlvl->numprcs);
if (!(band->prcs = jas_alloc2(rlvl->numprcs,
sizeof(jpc_dec_prc_t)))) {
return -1;
}
/************************************************/
cbgxstart = tlcbgxstart;
cbgystart = tlcbgystart;
for (prccnt = rlvl->numprcs, prc = band->prcs;
prccnt > 0; --prccnt, ++prc) {
cbgxend = cbgxstart + (1 << rlvl->cbgwidthexpn);
cbgyend = cbgystart + (1 << rlvl->cbgheightexpn);
prc->xstart = JAS_MAX(cbgxstart, JAS_CAST(uint_fast32_t,
jas_seq2d_xstart(band->data)));
prc->ystart = JAS_MAX(cbgystart, JAS_CAST(uint_fast32_t,
jas_seq2d_ystart(band->data)));
prc->xend = JAS_MIN(cbgxend, JAS_CAST(uint_fast32_t,
jas_seq2d_xend(band->data)));
prc->yend = JAS_MIN(cbgyend, JAS_CAST(uint_fast32_t,
jas_seq2d_yend(band->data)));
if (prc->xend > prc->xstart && prc->yend > prc->ystart) {
tlcblkxstart = JPC_FLOORDIVPOW2(prc->xstart,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
tlcblkystart = JPC_FLOORDIVPOW2(prc->ystart,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
brcblkxend = JPC_CEILDIVPOW2(prc->xend,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
brcblkyend = JPC_CEILDIVPOW2(prc->yend,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
prc->numhcblks = (brcblkxend - tlcblkxstart) >>
rlvl->cblkwidthexpn;
prc->numvcblks = (brcblkyend - tlcblkystart) >>
rlvl->cblkheightexpn;
prc->numcblks = prc->numhcblks * prc->numvcblks;
assert(prc->numcblks > 0);
if (!(prc->incltagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->numimsbstagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->cblks = jas_alloc2(prc->numcblks,
sizeof(jpc_dec_cblk_t)))) {
return -1;
}
cblkxstart = cbgxstart;
cblkystart = cbgystart;
for (cblkcnt = prc->numcblks, cblk = prc->cblks;
cblkcnt > 0;) {
cblkxend = cblkxstart + (1 << rlvl->cblkwidthexpn);
cblkyend = cblkystart + (1 << rlvl->cblkheightexpn);
tmpxstart = JAS_MAX(cblkxstart, prc->xstart);
tmpystart = JAS_MAX(cblkystart, prc->ystart);
tmpxend = JAS_MIN(cblkxend, prc->xend);
tmpyend = JAS_MIN(cblkyend, prc->yend);
if (tmpxend > tmpxstart && tmpyend > tmpystart) {
cblk->firstpassno = -1;
cblk->mqdec = 0;
cblk->nulldec = 0;
cblk->flags = 0;
cblk->numpasses = 0;
cblk->segs.head = 0;
cblk->segs.tail = 0;
cblk->curseg = 0;
cblk->numimsbs = 0;
cblk->numlenbits = 3;
cblk->flags = 0;
if (!(cblk->data = jas_seq2d_create(0, 0, 0,
0))) {
return -1;
}
jas_seq2d_bindsub(cblk->data, band->data,
tmpxstart, tmpystart, tmpxend, tmpyend);
++cblk;
--cblkcnt;
}
cblkxstart += 1 << rlvl->cblkwidthexpn;
if (cblkxstart >= cbgxend) {
cblkxstart = cbgxstart;
cblkystart += 1 << rlvl->cblkheightexpn;
}
}
} else {
prc->cblks = 0;
prc->incltagtree = 0;
prc->numimsbstagtree = 0;
}
cbgxstart += 1 << rlvl->cbgwidthexpn;
if (cbgxstart >= brcbgxend) {
cbgxstart = tlcbgxstart;
cbgystart += 1 << rlvl->cbgheightexpn;
}
}
/********************************************/
}
}
}
if (!(tile->pi = jpc_dec_pi_create(dec, tile))) {
return -1;
}
for (pchgno = 0; pchgno < jpc_pchglist_numpchgs(tile->cp->pchglist);
++pchgno) {
pchg = jpc_pchg_copy(jpc_pchglist_get(tile->cp->pchglist, pchgno));
assert(pchg);
jpc_pi_addpchg(tile->pi, pchg);
}
jpc_pi_init(tile->pi);
return 0;
}
| 28,149 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void smp_proc_master_id(tSMP_CB* p_cb, tSMP_INT_DATA* p_data) {
uint8_t* p = p_data->p_data;
tBTM_LE_PENC_KEYS le_key;
SMP_TRACE_DEBUG("%s", __func__);
smp_update_key_mask(p_cb, SMP_SEC_KEY_TYPE_ENC, true);
STREAM_TO_UINT16(le_key.ediv, p);
STREAM_TO_ARRAY(le_key.rand, p, BT_OCTET8_LEN);
/* store the encryption keys from peer device */
memcpy(le_key.ltk, p_cb->ltk, BT_OCTET16_LEN);
le_key.sec_level = p_cb->sec_level;
le_key.key_size = p_cb->loc_enc_size;
if ((p_cb->peer_auth_req & SMP_AUTH_BOND) &&
(p_cb->loc_auth_req & SMP_AUTH_BOND))
btm_sec_save_le_key(p_cb->pairing_bda, BTM_LE_KEY_PENC,
(tBTM_LE_KEY_VALUE*)&le_key, true);
smp_key_distribution(p_cb, NULL);
}
Commit Message: Add packet length check in smp_proc_master_id
Bug: 111937027
Test: manual
Change-Id: I1144c9879e84fa79d68ad9d5fece4f58e2a3b075
(cherry picked from commit c8294662d07a98e9b8b1cab1ab681ec0805ce4e8)
CWE ID: CWE-200 | void smp_proc_master_id(tSMP_CB* p_cb, tSMP_INT_DATA* p_data) {
uint8_t* p = p_data->p_data;
tBTM_LE_PENC_KEYS le_key;
SMP_TRACE_DEBUG("%s", __func__);
if (p_cb->rcvd_cmd_len < 11) { // 1(Code) + 2(EDIV) + 8(Rand)
android_errorWriteLog(0x534e4554, "111937027");
SMP_TRACE_ERROR("%s: Invalid command length: %d, should be at least 11",
__func__, p_cb->rcvd_cmd_len);
return;
}
smp_update_key_mask(p_cb, SMP_SEC_KEY_TYPE_ENC, true);
STREAM_TO_UINT16(le_key.ediv, p);
STREAM_TO_ARRAY(le_key.rand, p, BT_OCTET8_LEN);
/* store the encryption keys from peer device */
memcpy(le_key.ltk, p_cb->ltk, BT_OCTET16_LEN);
le_key.sec_level = p_cb->sec_level;
le_key.key_size = p_cb->loc_enc_size;
if ((p_cb->peer_auth_req & SMP_AUTH_BOND) &&
(p_cb->loc_auth_req & SMP_AUTH_BOND))
btm_sec_save_le_key(p_cb->pairing_bda, BTM_LE_KEY_PENC,
(tBTM_LE_KEY_VALUE*)&le_key, true);
smp_key_distribution(p_cb, NULL);
}
| 3,034 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void ResourceDispatcherHostImpl::InitializeURLRequest(
net::URLRequest* request,
const Referrer& referrer,
bool is_download,
int render_process_host_id,
int render_view_routing_id,
int render_frame_routing_id,
PreviewsState previews_state,
ResourceContext* context) {
DCHECK(io_thread_task_runner_->BelongsToCurrentThread());
DCHECK(!request->is_pending());
Referrer::SetReferrerForRequest(request, referrer);
ResourceRequestInfoImpl* info = CreateRequestInfo(
render_process_host_id, render_view_routing_id, render_frame_routing_id,
previews_state, is_download, context);
info->AssociateWithRequest(request);
}
Commit Message: When turning a download into a navigation, navigate the right frame
Code changes from Nate Chapin <[email protected]>
Bug: 926105
Change-Id: I098599394e6ebe7d2fce5af838014297a337d294
Reviewed-on: https://chromium-review.googlesource.com/c/1454962
Reviewed-by: Camille Lamy <[email protected]>
Commit-Queue: Jochen Eisinger <[email protected]>
Cr-Commit-Position: refs/heads/master@{#629547}
CWE ID: CWE-284 | void ResourceDispatcherHostImpl::InitializeURLRequest(
net::URLRequest* request,
const Referrer& referrer,
bool is_download,
int render_process_host_id,
int render_view_routing_id,
int render_frame_routing_id,
int frame_tree_node_id,
PreviewsState previews_state,
ResourceContext* context) {
DCHECK(io_thread_task_runner_->BelongsToCurrentThread());
DCHECK(!request->is_pending());
Referrer::SetReferrerForRequest(request, referrer);
ResourceRequestInfoImpl* info = CreateRequestInfo(
render_process_host_id, render_view_routing_id, render_frame_routing_id,
frame_tree_node_id, previews_state, is_download, context);
info->AssociateWithRequest(request);
}
| 29,238 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: long FS_FOpenFileRead(const char *filename, fileHandle_t *file, qboolean uniqueFILE)
{
searchpath_t *search;
long len;
if(!fs_searchpaths)
Com_Error(ERR_FATAL, "Filesystem call made without initialization");
for(search = fs_searchpaths; search; search = search->next)
{
len = FS_FOpenFileReadDir(filename, search, file, uniqueFILE, qfalse);
if(file == NULL)
{
if(len > 0)
return len;
}
else
{
if(len >= 0 && *file)
return len;
}
}
#ifdef FS_MISSING
if(missingFiles)
fprintf(missingFiles, "%s\n", filename);
#endif
if(file)
{
*file = 0;
return -1;
}
else
{
return 0;
}
}
Commit Message: All: Don't load .pk3s as .dlls, and don't load user config files from .pk3s
CWE ID: CWE-269 | long FS_FOpenFileRead(const char *filename, fileHandle_t *file, qboolean uniqueFILE)
{
searchpath_t *search;
long len;
qboolean isLocalConfig;
if(!fs_searchpaths)
Com_Error(ERR_FATAL, "Filesystem call made without initialization");
isLocalConfig = !strcmp(filename, "autoexec.cfg") || !strcmp(filename, Q3CONFIG_CFG);
for(search = fs_searchpaths; search; search = search->next)
{
// autoexec.cfg and wolfconfig_mp.cfg can only be loaded outside of pk3 files.
if (isLocalConfig && search->pack)
continue;
len = FS_FOpenFileReadDir(filename, search, file, uniqueFILE, qfalse);
if(file == NULL)
{
if(len > 0)
return len;
}
else
{
if(len >= 0 && *file)
return len;
}
}
#ifdef FS_MISSING
if(missingFiles)
fprintf(missingFiles, "%s\n", filename);
#endif
if(file)
{
*file = 0;
return -1;
}
else
{
return 0;
}
}
| 14,733 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: SYSCALL_DEFINE5(add_key, const char __user *, _type,
const char __user *, _description,
const void __user *, _payload,
size_t, plen,
key_serial_t, ringid)
{
key_ref_t keyring_ref, key_ref;
char type[32], *description;
void *payload;
long ret;
ret = -EINVAL;
if (plen > 1024 * 1024 - 1)
goto error;
/* draw all the data into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
description = NULL;
if (_description) {
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
if (!*description) {
kfree(description);
description = NULL;
} else if ((description[0] == '.') &&
(strncmp(type, "keyring", 7) == 0)) {
ret = -EPERM;
goto error2;
}
}
/* pull the payload in if one was supplied */
payload = NULL;
if (_payload) {
ret = -ENOMEM;
payload = kvmalloc(plen, GFP_KERNEL);
if (!payload)
goto error2;
ret = -EFAULT;
if (copy_from_user(payload, _payload, plen) != 0)
goto error3;
}
/* find the target keyring (which must be writable) */
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error3;
}
/* create or update the requested key and add it to the target
* keyring */
key_ref = key_create_or_update(keyring_ref, type, description,
payload, plen, KEY_PERM_UNDEF,
KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key_ref)) {
ret = key_ref_to_ptr(key_ref)->serial;
key_ref_put(key_ref);
}
else {
ret = PTR_ERR(key_ref);
}
key_ref_put(keyring_ref);
error3:
kvfree(payload);
error2:
kfree(description);
error:
return ret;
}
Commit Message: KEYS: fix dereferencing NULL payload with nonzero length
sys_add_key() and the KEYCTL_UPDATE operation of sys_keyctl() allowed a
NULL payload with nonzero length to be passed to the key type's
->preparse(), ->instantiate(), and/or ->update() methods. Various key
types including asymmetric, cifs.idmap, cifs.spnego, and pkcs7_test did
not handle this case, allowing an unprivileged user to trivially cause a
NULL pointer dereference (kernel oops) if one of these key types was
present. Fix it by doing the copy_from_user() when 'plen' is nonzero
rather than when '_payload' is non-NULL, causing the syscall to fail
with EFAULT as expected when an invalid buffer is specified.
Cc: [email protected] # 2.6.10+
Signed-off-by: Eric Biggers <[email protected]>
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
CWE ID: CWE-476 | SYSCALL_DEFINE5(add_key, const char __user *, _type,
const char __user *, _description,
const void __user *, _payload,
size_t, plen,
key_serial_t, ringid)
{
key_ref_t keyring_ref, key_ref;
char type[32], *description;
void *payload;
long ret;
ret = -EINVAL;
if (plen > 1024 * 1024 - 1)
goto error;
/* draw all the data into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
description = NULL;
if (_description) {
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
if (!*description) {
kfree(description);
description = NULL;
} else if ((description[0] == '.') &&
(strncmp(type, "keyring", 7) == 0)) {
ret = -EPERM;
goto error2;
}
}
/* pull the payload in if one was supplied */
payload = NULL;
if (plen) {
ret = -ENOMEM;
payload = kvmalloc(plen, GFP_KERNEL);
if (!payload)
goto error2;
ret = -EFAULT;
if (copy_from_user(payload, _payload, plen) != 0)
goto error3;
}
/* find the target keyring (which must be writable) */
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error3;
}
/* create or update the requested key and add it to the target
* keyring */
key_ref = key_create_or_update(keyring_ref, type, description,
payload, plen, KEY_PERM_UNDEF,
KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key_ref)) {
ret = key_ref_to_ptr(key_ref)->serial;
key_ref_put(key_ref);
}
else {
ret = PTR_ERR(key_ref);
}
key_ref_put(keyring_ref);
error3:
kvfree(payload);
error2:
kfree(description);
error:
return ret;
}
| 24,726 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void RunAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
for (int j = 0; j < kNumCoeffs; ++j) {
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
}
REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
test_temp_block, pitch_));
REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
for (int j = 0; j < kNumCoeffs; ++j) {
const uint32_t diff = dst[j] - src[j];
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
}
}
EXPECT_GE(1u, max_error)
<< "Error: 16x16 FHT/IHT has an individual round trip error > 1";
EXPECT_GE(count_test_block , total_error)
<< "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | void RunAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif
// Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
if (bit_depth_ == VPX_BITS_8) {
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
test_input_block[j] = src16[j] - dst16[j];
#endif
}
}
ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
test_temp_block, pitch_));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
for (int j = 0; j < kNumCoeffs; ++j) {
#if CONFIG_VP9_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
const uint32_t diff = dst[j] - src[j];
#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
}
}
EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
<< "Error: 16x16 FHT/IHT has an individual round trip error > 1";
EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
<< "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
}
| 2,902 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: WandExport MagickBooleanType MogrifyImageList(ImageInfo *image_info,
const int argc,const char **argv,Image **images,ExceptionInfo *exception)
{
ChannelType
channel;
const char
*option;
ImageInfo
*mogrify_info;
MagickStatusType
status;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
count,
index;
/*
Apply options to the image list.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image **) NULL);
assert((*images)->previous == (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
if ((argc <= 0) || (*argv == (char *) NULL))
return(MagickTrue);
mogrify_info=CloneImageInfo(image_info);
quantize_info=AcquireQuantizeInfo(mogrify_info);
channel=mogrify_info->channel;
status=MagickTrue;
for (i=0; i < (ssize_t) argc; i++)
{
if (*images == (Image *) NULL)
break;
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception);
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("affinity",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL);
InheritException(exception,&(*images)->exception);
break;
}
i++;
break;
}
if (LocaleCompare("append",option+1) == 0)
{
Image
*append_image;
(void) SyncImagesSettings(mogrify_info,*images);
append_image=AppendImages(*images,*option == '-' ? MagickTrue :
MagickFalse,exception);
if (append_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=append_image;
break;
}
if (LocaleCompare("average",option+1) == 0)
{
Image
*average_image;
/*
Average an image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
average_image=EvaluateImages(*images,MeanEvaluateOperator,
exception);
if (average_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=average_image;
break;
}
break;
}
case 'c':
{
if (LocaleCompare("channel",option+1) == 0)
{
if (*option == '+')
{
channel=DefaultChannels;
break;
}
channel=(ChannelType) ParseChannelOption(argv[i+1]);
break;
}
if (LocaleCompare("clut",option+1) == 0)
{
Image
*clut_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
clut_image=RemoveFirstImageFromList(images);
if (clut_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) ClutImageChannel(image,channel,clut_image);
clut_image=DestroyImage(clut_image);
InheritException(exception,&image->exception);
*images=DestroyImageList(*images);
*images=image;
break;
}
if (LocaleCompare("coalesce",option+1) == 0)
{
Image
*coalesce_image;
(void) SyncImagesSettings(mogrify_info,*images);
coalesce_image=CoalesceImages(*images,exception);
if (coalesce_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=coalesce_image;
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
Image
*combine_image;
(void) SyncImagesSettings(mogrify_info,*images);
combine_image=CombineImages(*images,channel,exception);
if (combine_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=combine_image;
break;
}
if (LocaleCompare("compare",option+1) == 0)
{
const char
*option;
double
distortion;
Image
*difference_image,
*image,
*reconstruct_image;
MetricType
metric;
/*
Mathematically and visually annotate the difference between an
image and its reconstruction.
*/
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
reconstruct_image=RemoveFirstImageFromList(images);
if (reconstruct_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
metric=UndefinedMetric;
option=GetImageOption(image_info,"metric");
if (option != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,option);
difference_image=CompareImageChannels(image,reconstruct_image,
channel,metric,&distortion,exception);
if (difference_image == (Image *) NULL)
break;
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=difference_image;
break;
}
if (LocaleCompare("complex",option+1) == 0)
{
ComplexOperator
op;
Image
*complex_images;
(void) SyncImageSettings(mogrify_info,*images);
op=(ComplexOperator) ParseCommandOption(MagickComplexOptions,
MagickFalse,argv[i+1]);
complex_images=ComplexImages(*images,op,exception);
if (complex_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=complex_images;
break;
}
if (LocaleCompare("composite",option+1) == 0)
{
Image
*mask_image,
*composite_image,
*image;
RectangleInfo
geometry;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
composite_image=RemoveFirstImageFromList(images);
if (composite_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) TransformImage(&composite_image,(char *) NULL,
composite_image->geometry);
SetGeometry(composite_image,&geometry);
(void) ParseAbsoluteGeometry(composite_image->geometry,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,
&geometry);
mask_image=RemoveFirstImageFromList(images);
if (mask_image != (Image *) NULL)
{
if ((image->compose == DisplaceCompositeOp) ||
(image->compose == DistortCompositeOp))
{
/*
Merge Y displacement into X displacement image.
*/
(void) CompositeImage(composite_image,CopyGreenCompositeOp,
mask_image,0,0);
mask_image=DestroyImage(mask_image);
}
else
{
/*
Set a blending mask for the composition.
*/
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=mask_image;
(void) NegateImage(image->mask,MagickFalse);
}
}
(void) CompositeImageChannel(image,channel,image->compose,
composite_image,geometry.x,geometry.y);
if (mask_image != (Image *) NULL)
{
image->mask=DestroyImage(image->mask);
mask_image=image->mask;
}
composite_image=DestroyImage(composite_image);
InheritException(exception,&image->exception);
*images=DestroyImageList(*images);
*images=image;
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
Image
*source_image;
OffsetInfo
offset;
RectangleInfo
geometry;
/*
Copy image pixels.
*/
(void) SyncImageSettings(mogrify_info,*images);
(void) ParsePageGeometry(*images,argv[i+2],&geometry,exception);
offset.x=geometry.x;
offset.y=geometry.y;
source_image=(*images);
if (source_image->next != (Image *) NULL)
source_image=source_image->next;
(void) ParsePageGeometry(source_image,argv[i+1],&geometry,
exception);
status=CopyImagePixels(*images,source_image,&geometry,&offset,
exception);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("deconstruct",option+1) == 0)
{
Image
*deconstruct_image;
(void) SyncImagesSettings(mogrify_info,*images);
deconstruct_image=DeconstructImages(*images,exception);
if (deconstruct_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=deconstruct_image;
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (*option == '+')
DeleteImages(images,"-1",exception);
else
DeleteImages(images,argv[i+1],exception);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
quantize_info->dither=MagickFalse;
break;
}
quantize_info->dither=MagickTrue;
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
Image
*duplicate_images;
if (*option == '+')
duplicate_images=DuplicateImages(*images,1,"-1",exception);
else
{
const char
*p;
size_t
number_duplicates;
number_duplicates=(size_t) StringToLong(argv[i+1]);
p=strchr(argv[i+1],',');
if (p == (const char *) NULL)
duplicate_images=DuplicateImages(*images,number_duplicates,
"-1",exception);
else
duplicate_images=DuplicateImages(*images,number_duplicates,p,
exception);
}
AppendImageToList(images, duplicate_images);
(void) SyncImagesSettings(mogrify_info,*images);
break;
}
break;
}
case 'e':
{
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
Image
*evaluate_image;
MagickEvaluateOperator
op;
(void) SyncImageSettings(mogrify_info,*images);
op=(MagickEvaluateOperator) ParseCommandOption(
MagickEvaluateOptions,MagickFalse,argv[i+1]);
evaluate_image=EvaluateImages(*images,op,exception);
if (evaluate_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=evaluate_image;
break;
}
break;
}
case 'f':
{
if (LocaleCompare("fft",option+1) == 0)
{
Image
*fourier_image;
/*
Implements the discrete Fourier transform (DFT).
*/
(void) SyncImageSettings(mogrify_info,*images);
fourier_image=ForwardFourierTransformImage(*images,*option == '-' ?
MagickTrue : MagickFalse,exception);
if (fourier_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("flatten",option+1) == 0)
{
Image
*flatten_image;
(void) SyncImagesSettings(mogrify_info,*images);
flatten_image=MergeImageLayers(*images,FlattenLayer,exception);
if (flatten_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=flatten_image;
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
Image
*fx_image;
(void) SyncImagesSettings(mogrify_info,*images);
fx_image=FxImageChannel(*images,channel,argv[i+1],exception);
if (fx_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=fx_image;
break;
}
break;
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
{
Image
*hald_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
hald_image=RemoveFirstImageFromList(images);
if (hald_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) HaldClutImageChannel(image,channel,hald_image);
hald_image=DestroyImage(hald_image);
InheritException(exception,&image->exception);
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=image;
break;
}
break;
}
case 'i':
{
if (LocaleCompare("ift",option+1) == 0)
{
Image
*fourier_image,
*magnitude_image,
*phase_image;
/*
Implements the inverse fourier discrete Fourier transform (DFT).
*/
(void) SyncImagesSettings(mogrify_info,*images);
magnitude_image=RemoveFirstImageFromList(images);
phase_image=RemoveFirstImageFromList(images);
if (phase_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
fourier_image=InverseFourierTransformImage(magnitude_image,
phase_image,*option == '-' ? MagickTrue : MagickFalse,exception);
if (fourier_image == (Image *) NULL)
break;
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("insert",option+1) == 0)
{
Image
*p,
*q;
index=0;
if (*option != '+')
index=(ssize_t) StringToLong(argv[i+1]);
p=RemoveLastImageFromList(images);
if (p == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
q=p;
if (index == 0)
PrependImageToList(images,q);
else
if (index == (ssize_t) GetImageListLength(*images))
AppendImageToList(images,q);
else
{
q=GetImageFromList(*images,index-1);
if (q == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
InsertImageInList(&q,p);
}
*images=GetFirstImageInList(q);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("layers",option+1) == 0)
{
Image
*layers;
ImageLayerMethod
method;
(void) SyncImagesSettings(mogrify_info,*images);
layers=(Image *) NULL;
method=(ImageLayerMethod) ParseCommandOption(MagickLayerOptions,
MagickFalse,argv[i+1]);
switch (method)
{
case CoalesceLayer:
{
layers=CoalesceImages(*images,exception);
break;
}
case CompareAnyLayer:
case CompareClearLayer:
case CompareOverlayLayer:
default:
{
layers=CompareImageLayers(*images,method,exception);
break;
}
case MergeLayer:
case FlattenLayer:
case MosaicLayer:
case TrimBoundsLayer:
{
layers=MergeImageLayers(*images,method,exception);
break;
}
case DisposeLayer:
{
layers=DisposeImages(*images,exception);
break;
}
case OptimizeImageLayer:
{
layers=OptimizeImageLayers(*images,exception);
break;
}
case OptimizePlusLayer:
{
layers=OptimizePlusImageLayers(*images,exception);
break;
}
case OptimizeTransLayer:
{
OptimizeImageTransparency(*images,exception);
break;
}
case RemoveDupsLayer:
{
RemoveDuplicateLayers(images,exception);
break;
}
case RemoveZeroLayer:
{
RemoveZeroDelayLayers(images,exception);
break;
}
case OptimizeLayer:
{
/*
General Purpose, GIF Animation Optimizer.
*/
layers=CoalesceImages(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
layers=OptimizeImageLayers(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
layers=(Image *) NULL;
OptimizeImageTransparency(*images,exception);
InheritException(exception,&(*images)->exception);
(void) RemapImages(quantize_info,*images,(Image *) NULL);
break;
}
case CompositeLayer:
{
CompositeOperator
compose;
Image
*source;
RectangleInfo
geometry;
/*
Split image sequence at the first 'NULL:' image.
*/
source=(*images);
while (source != (Image *) NULL)
{
source=GetNextImageInList(source);
if ((source != (Image *) NULL) &&
(LocaleCompare(source->magick,"NULL") == 0))
break;
}
if (source != (Image *) NULL)
{
if ((GetPreviousImageInList(source) == (Image *) NULL) ||
(GetNextImageInList(source) == (Image *) NULL))
source=(Image *) NULL;
else
{
/*
Separate the two lists, junk the null: image.
*/
source=SplitImageList(source->previous);
DeleteImageFromList(&source);
}
}
if (source == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"MissingNullSeparator","layers Composite");
status=MagickFalse;
break;
}
/*
Adjust offset with gravity and virtual canvas.
*/
SetGeometry(*images,&geometry);
(void) ParseAbsoluteGeometry((*images)->geometry,&geometry);
geometry.width=source->page.width != 0 ?
source->page.width : source->columns;
geometry.height=source->page.height != 0 ?
source->page.height : source->rows;
GravityAdjustGeometry((*images)->page.width != 0 ?
(*images)->page.width : (*images)->columns,
(*images)->page.height != 0 ? (*images)->page.height :
(*images)->rows,(*images)->gravity,&geometry);
compose=OverCompositeOp;
option=GetImageOption(mogrify_info,"compose");
if (option != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,option);
CompositeLayers(*images,compose,source,geometry.x,geometry.y,
exception);
source=DestroyImageList(source);
break;
}
}
if (layers == (Image *) NULL)
break;
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
break;
}
break;
}
case 'm':
{
if (LocaleCompare("map",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL);
InheritException(exception,&(*images)->exception);
break;
}
i++;
break;
}
if (LocaleCompare("maximum",option+1) == 0)
{
Image
*maximum_image;
/*
Maximum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
maximum_image=EvaluateImages(*images,MaxEvaluateOperator,exception);
if (maximum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=maximum_image;
break;
}
if (LocaleCompare("minimum",option+1) == 0)
{
Image
*minimum_image;
/*
Minimum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
minimum_image=EvaluateImages(*images,MinEvaluateOperator,exception);
if (minimum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=minimum_image;
break;
}
if (LocaleCompare("morph",option+1) == 0)
{
Image
*morph_image;
(void) SyncImagesSettings(mogrify_info,*images);
morph_image=MorphImages(*images,StringToUnsignedLong(argv[i+1]),
exception);
if (morph_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=morph_image;
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
{
Image
*mosaic_image;
(void) SyncImagesSettings(mogrify_info,*images);
mosaic_image=MergeImageLayers(*images,MosaicLayer,exception);
if (mosaic_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=mosaic_image;
break;
}
break;
}
case 'p':
{
if (LocaleCompare("poly",option+1) == 0)
{
char
*args,
token[MaxTextExtent];
const char
*p;
double
*arguments;
Image
*polynomial_image;
register ssize_t
x;
size_t
number_arguments;
/*
Polynomial image.
*/
(void) SyncImageSettings(mogrify_info,*images);
args=InterpretImageProperties(mogrify_info,*images,argv[i+1]);
InheritException(exception,&(*images)->exception);
if (args == (char *) NULL)
break;
p=(char *) args;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
}
number_arguments=(size_t) x;
arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*arguments));
if (arguments == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*images)->filename);
(void) memset(arguments,0,number_arguments*
sizeof(*arguments));
p=(char *) args;
for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
arguments[x]=StringToDouble(token,(char **) NULL);
}
args=DestroyString(args);
polynomial_image=PolynomialImageChannel(*images,channel,
number_arguments >> 1,arguments,exception);
arguments=(double *) RelinquishMagickMemory(arguments);
if (polynomial_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=polynomial_image;
break;
}
if (LocaleCompare("print",option+1) == 0)
{
char
*string;
(void) SyncImagesSettings(mogrify_info,*images);
string=InterpretImageProperties(mogrify_info,*images,argv[i+1]);
if (string == (char *) NULL)
break;
InheritException(exception,&(*images)->exception);
(void) FormatLocaleFile(stdout,"%s",string);
string=DestroyString(string);
}
if (LocaleCompare("process",option+1) == 0)
{
char
**arguments;
int
j,
number_arguments;
(void) SyncImagesSettings(mogrify_info,*images);
arguments=StringToArgv(argv[i+1],&number_arguments);
if (arguments == (char **) NULL)
break;
if ((argc > 1) && (strchr(arguments[1],'=') != (char *) NULL))
{
char
breaker,
quote,
*token;
const char
*arguments;
int
next,
status;
size_t
length;
TokenInfo
*token_info;
/*
Support old style syntax, filter="-option arg".
*/
length=strlen(argv[i+1]);
token=(char *) NULL;
if (~length >= (MaxTextExtent-1))
token=(char *) AcquireQuantumMemory(length+MaxTextExtent,
sizeof(*token));
if (token == (char *) NULL)
break;
next=0;
arguments=argv[i+1];
token_info=AcquireTokenInfo();
status=Tokenizer(token_info,0,token,length,arguments,"","=",
"\"",'\0',&breaker,&next,"e);
token_info=DestroyTokenInfo(token_info);
if (status == 0)
{
const char
*argv;
argv=(&(arguments[next]));
(void) InvokeDynamicImageFilter(token,&(*images),1,&argv,
exception);
}
token=DestroyString(token);
break;
}
(void) SubstituteString(&arguments[1],"-","");
(void) InvokeDynamicImageFilter(arguments[1],&(*images),
number_arguments-2,(const char **) arguments+2,exception);
for (j=0; j < number_arguments; j++)
arguments[j]=DestroyString(arguments[j]);
arguments=(char **) RelinquishMagickMemory(arguments);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("reverse",option+1) == 0)
{
ReverseImageList(images);
InheritException(exception,&(*images)->exception);
break;
}
break;
}
case 's':
{
if (LocaleCompare("smush",option+1) == 0)
{
Image
*smush_image;
ssize_t
offset;
(void) SyncImagesSettings(mogrify_info,*images);
offset=(ssize_t) StringToLong(argv[i+1]);
smush_image=SmushImages(*images,*option == '-' ? MagickTrue :
MagickFalse,offset,exception);
if (smush_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=smush_image;
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
Image
*p,
*q,
*u,
*v;
ssize_t
swap_index;
index=(-1);
swap_index=(-2);
if (*option != '+')
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
swap_index=(-1);
flags=ParseGeometry(argv[i+1],&geometry_info);
index=(ssize_t) geometry_info.rho;
if ((flags & SigmaValue) != 0)
swap_index=(ssize_t) geometry_info.sigma;
}
p=GetImageFromList(*images,index);
q=GetImageFromList(*images,swap_index);
if ((p == (Image *) NULL) || (q == (Image *) NULL))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",(*images)->filename);
status=MagickFalse;
break;
}
if (p == q)
break;
u=CloneImage(p,0,0,MagickTrue,exception);
if (u == (Image *) NULL)
break;
v=CloneImage(q,0,0,MagickTrue,exception);
if (v == (Image *) NULL)
{
u=DestroyImage(u);
break;
}
ReplaceImageInList(&p,v);
ReplaceImageInList(&q,u);
*images=GetFirstImageInList(q);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("write",option+1) == 0)
{
char
key[MaxTextExtent];
Image
*write_images;
ImageInfo
*write_info;
(void) SyncImagesSettings(mogrify_info,*images);
(void) FormatLocaleString(key,MaxTextExtent,"cache:%s",argv[i+1]);
(void) DeleteImageRegistry(key);
write_images=(*images);
if (*option == '+')
write_images=CloneImageList(*images,exception);
write_info=CloneImageInfo(mogrify_info);
status&=WriteImages(write_info,write_images,argv[i+1],exception);
write_info=DestroyImageInfo(write_info);
if (*option == '+')
write_images=DestroyImageList(write_images);
break;
}
break;
}
default:
break;
}
i+=count;
}
quantize_info=DestroyQuantizeInfo(quantize_info);
mogrify_info=DestroyImageInfo(mogrify_info);
status&=MogrifyImageInfo(image_info,argc,argv,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
Commit Message: https://github.com/ImageMagick/ImageMagick/issues/1616
CWE ID: CWE-399 | WandExport MagickBooleanType MogrifyImageList(ImageInfo *image_info,
const int argc,const char **argv,Image **images,ExceptionInfo *exception)
{
ChannelType
channel;
const char
*option;
ImageInfo
*mogrify_info;
MagickStatusType
status;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
count,
index;
/*
Apply options to the image list.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image **) NULL);
assert((*images)->previous == (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
if ((argc <= 0) || (*argv == (char *) NULL))
return(MagickTrue);
mogrify_info=CloneImageInfo(image_info);
quantize_info=AcquireQuantizeInfo(mogrify_info);
channel=mogrify_info->channel;
status=MagickTrue;
for (i=0; i < (ssize_t) argc; i++)
{
if (*images == (Image *) NULL)
break;
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception);
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("affinity",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL);
InheritException(exception,&(*images)->exception);
break;
}
i++;
break;
}
if (LocaleCompare("append",option+1) == 0)
{
Image
*append_image;
(void) SyncImagesSettings(mogrify_info,*images);
append_image=AppendImages(*images,*option == '-' ? MagickTrue :
MagickFalse,exception);
if (append_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=append_image;
break;
}
if (LocaleCompare("average",option+1) == 0)
{
Image
*average_image;
/*
Average an image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
average_image=EvaluateImages(*images,MeanEvaluateOperator,
exception);
if (average_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=average_image;
break;
}
break;
}
case 'c':
{
if (LocaleCompare("channel",option+1) == 0)
{
if (*option == '+')
{
channel=DefaultChannels;
break;
}
channel=(ChannelType) ParseChannelOption(argv[i+1]);
break;
}
if (LocaleCompare("clut",option+1) == 0)
{
Image
*clut_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
clut_image=RemoveFirstImageFromList(images);
if (clut_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) ClutImageChannel(image,channel,clut_image);
clut_image=DestroyImage(clut_image);
InheritException(exception,&image->exception);
*images=DestroyImageList(*images);
*images=image;
break;
}
if (LocaleCompare("coalesce",option+1) == 0)
{
Image
*coalesce_image;
(void) SyncImagesSettings(mogrify_info,*images);
coalesce_image=CoalesceImages(*images,exception);
if (coalesce_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=coalesce_image;
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
Image
*combine_image;
(void) SyncImagesSettings(mogrify_info,*images);
combine_image=CombineImages(*images,channel,exception);
if (combine_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=combine_image;
break;
}
if (LocaleCompare("compare",option+1) == 0)
{
const char
*option;
double
distortion;
Image
*difference_image,
*image,
*reconstruct_image;
MetricType
metric;
/*
Mathematically and visually annotate the difference between an
image and its reconstruction.
*/
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
reconstruct_image=RemoveFirstImageFromList(images);
if (reconstruct_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
metric=UndefinedMetric;
option=GetImageOption(image_info,"metric");
if (option != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,option);
difference_image=CompareImageChannels(image,reconstruct_image,
channel,metric,&distortion,exception);
if (difference_image == (Image *) NULL)
break;
reconstruct_image=DestroyImage(reconstruct_image);
image=DestroyImage(image);
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=difference_image;
break;
}
if (LocaleCompare("complex",option+1) == 0)
{
ComplexOperator
op;
Image
*complex_images;
(void) SyncImageSettings(mogrify_info,*images);
op=(ComplexOperator) ParseCommandOption(MagickComplexOptions,
MagickFalse,argv[i+1]);
complex_images=ComplexImages(*images,op,exception);
if (complex_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=complex_images;
break;
}
if (LocaleCompare("composite",option+1) == 0)
{
Image
*mask_image,
*composite_image,
*image;
RectangleInfo
geometry;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
composite_image=RemoveFirstImageFromList(images);
if (composite_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) TransformImage(&composite_image,(char *) NULL,
composite_image->geometry);
SetGeometry(composite_image,&geometry);
(void) ParseAbsoluteGeometry(composite_image->geometry,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,
&geometry);
mask_image=RemoveFirstImageFromList(images);
if (mask_image != (Image *) NULL)
{
if ((image->compose == DisplaceCompositeOp) ||
(image->compose == DistortCompositeOp))
{
/*
Merge Y displacement into X displacement image.
*/
(void) CompositeImage(composite_image,CopyGreenCompositeOp,
mask_image,0,0);
mask_image=DestroyImage(mask_image);
}
else
{
/*
Set a blending mask for the composition.
*/
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=mask_image;
(void) NegateImage(image->mask,MagickFalse);
}
}
(void) CompositeImageChannel(image,channel,image->compose,
composite_image,geometry.x,geometry.y);
if (mask_image != (Image *) NULL)
{
image->mask=DestroyImage(image->mask);
mask_image=image->mask;
}
composite_image=DestroyImage(composite_image);
InheritException(exception,&image->exception);
*images=DestroyImageList(*images);
*images=image;
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
Image
*source_image;
OffsetInfo
offset;
RectangleInfo
geometry;
/*
Copy image pixels.
*/
(void) SyncImageSettings(mogrify_info,*images);
(void) ParsePageGeometry(*images,argv[i+2],&geometry,exception);
offset.x=geometry.x;
offset.y=geometry.y;
source_image=(*images);
if (source_image->next != (Image *) NULL)
source_image=source_image->next;
(void) ParsePageGeometry(source_image,argv[i+1],&geometry,
exception);
status=CopyImagePixels(*images,source_image,&geometry,&offset,
exception);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("deconstruct",option+1) == 0)
{
Image
*deconstruct_image;
(void) SyncImagesSettings(mogrify_info,*images);
deconstruct_image=DeconstructImages(*images,exception);
if (deconstruct_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=deconstruct_image;
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (*option == '+')
DeleteImages(images,"-1",exception);
else
DeleteImages(images,argv[i+1],exception);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
quantize_info->dither=MagickFalse;
break;
}
quantize_info->dither=MagickTrue;
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
Image
*duplicate_images;
if (*option == '+')
duplicate_images=DuplicateImages(*images,1,"-1",exception);
else
{
const char
*p;
size_t
number_duplicates;
number_duplicates=(size_t) StringToLong(argv[i+1]);
p=strchr(argv[i+1],',');
if (p == (const char *) NULL)
duplicate_images=DuplicateImages(*images,number_duplicates,
"-1",exception);
else
duplicate_images=DuplicateImages(*images,number_duplicates,p,
exception);
}
AppendImageToList(images, duplicate_images);
(void) SyncImagesSettings(mogrify_info,*images);
break;
}
break;
}
case 'e':
{
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
Image
*evaluate_image;
MagickEvaluateOperator
op;
(void) SyncImageSettings(mogrify_info,*images);
op=(MagickEvaluateOperator) ParseCommandOption(
MagickEvaluateOptions,MagickFalse,argv[i+1]);
evaluate_image=EvaluateImages(*images,op,exception);
if (evaluate_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=evaluate_image;
break;
}
break;
}
case 'f':
{
if (LocaleCompare("fft",option+1) == 0)
{
Image
*fourier_image;
/*
Implements the discrete Fourier transform (DFT).
*/
(void) SyncImageSettings(mogrify_info,*images);
fourier_image=ForwardFourierTransformImage(*images,*option == '-' ?
MagickTrue : MagickFalse,exception);
if (fourier_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("flatten",option+1) == 0)
{
Image
*flatten_image;
(void) SyncImagesSettings(mogrify_info,*images);
flatten_image=MergeImageLayers(*images,FlattenLayer,exception);
if (flatten_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=flatten_image;
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
Image
*fx_image;
(void) SyncImagesSettings(mogrify_info,*images);
fx_image=FxImageChannel(*images,channel,argv[i+1],exception);
if (fx_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=fx_image;
break;
}
break;
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
{
Image
*hald_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
hald_image=RemoveFirstImageFromList(images);
if (hald_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) HaldClutImageChannel(image,channel,hald_image);
hald_image=DestroyImage(hald_image);
InheritException(exception,&image->exception);
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=image;
break;
}
break;
}
case 'i':
{
if (LocaleCompare("ift",option+1) == 0)
{
Image
*fourier_image,
*magnitude_image,
*phase_image;
/*
Implements the inverse fourier discrete Fourier transform (DFT).
*/
(void) SyncImagesSettings(mogrify_info,*images);
magnitude_image=RemoveFirstImageFromList(images);
phase_image=RemoveFirstImageFromList(images);
if (phase_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
fourier_image=InverseFourierTransformImage(magnitude_image,
phase_image,*option == '-' ? MagickTrue : MagickFalse,exception);
if (fourier_image == (Image *) NULL)
break;
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("insert",option+1) == 0)
{
Image
*p,
*q;
index=0;
if (*option != '+')
index=(ssize_t) StringToLong(argv[i+1]);
p=RemoveLastImageFromList(images);
if (p == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
q=p;
if (index == 0)
PrependImageToList(images,q);
else
if (index == (ssize_t) GetImageListLength(*images))
AppendImageToList(images,q);
else
{
q=GetImageFromList(*images,index-1);
if (q == (Image *) NULL)
{
p=DestroyImage(p);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
InsertImageInList(&q,p);
}
*images=GetFirstImageInList(q);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("layers",option+1) == 0)
{
Image
*layers;
ImageLayerMethod
method;
(void) SyncImagesSettings(mogrify_info,*images);
layers=(Image *) NULL;
method=(ImageLayerMethod) ParseCommandOption(MagickLayerOptions,
MagickFalse,argv[i+1]);
switch (method)
{
case CoalesceLayer:
{
layers=CoalesceImages(*images,exception);
break;
}
case CompareAnyLayer:
case CompareClearLayer:
case CompareOverlayLayer:
default:
{
layers=CompareImageLayers(*images,method,exception);
break;
}
case MergeLayer:
case FlattenLayer:
case MosaicLayer:
case TrimBoundsLayer:
{
layers=MergeImageLayers(*images,method,exception);
break;
}
case DisposeLayer:
{
layers=DisposeImages(*images,exception);
break;
}
case OptimizeImageLayer:
{
layers=OptimizeImageLayers(*images,exception);
break;
}
case OptimizePlusLayer:
{
layers=OptimizePlusImageLayers(*images,exception);
break;
}
case OptimizeTransLayer:
{
OptimizeImageTransparency(*images,exception);
break;
}
case RemoveDupsLayer:
{
RemoveDuplicateLayers(images,exception);
break;
}
case RemoveZeroLayer:
{
RemoveZeroDelayLayers(images,exception);
break;
}
case OptimizeLayer:
{
/*
General Purpose, GIF Animation Optimizer.
*/
layers=CoalesceImages(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
layers=OptimizeImageLayers(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
layers=(Image *) NULL;
OptimizeImageTransparency(*images,exception);
InheritException(exception,&(*images)->exception);
(void) RemapImages(quantize_info,*images,(Image *) NULL);
break;
}
case CompositeLayer:
{
CompositeOperator
compose;
Image
*source;
RectangleInfo
geometry;
/*
Split image sequence at the first 'NULL:' image.
*/
source=(*images);
while (source != (Image *) NULL)
{
source=GetNextImageInList(source);
if ((source != (Image *) NULL) &&
(LocaleCompare(source->magick,"NULL") == 0))
break;
}
if (source != (Image *) NULL)
{
if ((GetPreviousImageInList(source) == (Image *) NULL) ||
(GetNextImageInList(source) == (Image *) NULL))
source=(Image *) NULL;
else
{
/*
Separate the two lists, junk the null: image.
*/
source=SplitImageList(source->previous);
DeleteImageFromList(&source);
}
}
if (source == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"MissingNullSeparator","layers Composite");
status=MagickFalse;
break;
}
/*
Adjust offset with gravity and virtual canvas.
*/
SetGeometry(*images,&geometry);
(void) ParseAbsoluteGeometry((*images)->geometry,&geometry);
geometry.width=source->page.width != 0 ?
source->page.width : source->columns;
geometry.height=source->page.height != 0 ?
source->page.height : source->rows;
GravityAdjustGeometry((*images)->page.width != 0 ?
(*images)->page.width : (*images)->columns,
(*images)->page.height != 0 ? (*images)->page.height :
(*images)->rows,(*images)->gravity,&geometry);
compose=OverCompositeOp;
option=GetImageOption(mogrify_info,"compose");
if (option != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,option);
CompositeLayers(*images,compose,source,geometry.x,geometry.y,
exception);
source=DestroyImageList(source);
break;
}
}
if (layers == (Image *) NULL)
break;
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
break;
}
break;
}
case 'm':
{
if (LocaleCompare("map",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL);
InheritException(exception,&(*images)->exception);
break;
}
i++;
break;
}
if (LocaleCompare("maximum",option+1) == 0)
{
Image
*maximum_image;
/*
Maximum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
maximum_image=EvaluateImages(*images,MaxEvaluateOperator,exception);
if (maximum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=maximum_image;
break;
}
if (LocaleCompare("minimum",option+1) == 0)
{
Image
*minimum_image;
/*
Minimum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
minimum_image=EvaluateImages(*images,MinEvaluateOperator,exception);
if (minimum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=minimum_image;
break;
}
if (LocaleCompare("morph",option+1) == 0)
{
Image
*morph_image;
(void) SyncImagesSettings(mogrify_info,*images);
morph_image=MorphImages(*images,StringToUnsignedLong(argv[i+1]),
exception);
if (morph_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=morph_image;
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
{
Image
*mosaic_image;
(void) SyncImagesSettings(mogrify_info,*images);
mosaic_image=MergeImageLayers(*images,MosaicLayer,exception);
if (mosaic_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=mosaic_image;
break;
}
break;
}
case 'p':
{
if (LocaleCompare("poly",option+1) == 0)
{
char
*args,
token[MaxTextExtent];
const char
*p;
double
*arguments;
Image
*polynomial_image;
register ssize_t
x;
size_t
number_arguments;
/*
Polynomial image.
*/
(void) SyncImageSettings(mogrify_info,*images);
args=InterpretImageProperties(mogrify_info,*images,argv[i+1]);
InheritException(exception,&(*images)->exception);
if (args == (char *) NULL)
break;
p=(char *) args;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
}
number_arguments=(size_t) x;
arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*arguments));
if (arguments == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*images)->filename);
(void) memset(arguments,0,number_arguments*
sizeof(*arguments));
p=(char *) args;
for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
arguments[x]=StringToDouble(token,(char **) NULL);
}
args=DestroyString(args);
polynomial_image=PolynomialImageChannel(*images,channel,
number_arguments >> 1,arguments,exception);
arguments=(double *) RelinquishMagickMemory(arguments);
if (polynomial_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=polynomial_image;
break;
}
if (LocaleCompare("print",option+1) == 0)
{
char
*string;
(void) SyncImagesSettings(mogrify_info,*images);
string=InterpretImageProperties(mogrify_info,*images,argv[i+1]);
if (string == (char *) NULL)
break;
InheritException(exception,&(*images)->exception);
(void) FormatLocaleFile(stdout,"%s",string);
string=DestroyString(string);
}
if (LocaleCompare("process",option+1) == 0)
{
char
**arguments;
int
j,
number_arguments;
(void) SyncImagesSettings(mogrify_info,*images);
arguments=StringToArgv(argv[i+1],&number_arguments);
if (arguments == (char **) NULL)
break;
if ((argc > 1) && (strchr(arguments[1],'=') != (char *) NULL))
{
char
breaker,
quote,
*token;
const char
*arguments;
int
next,
status;
size_t
length;
TokenInfo
*token_info;
/*
Support old style syntax, filter="-option arg".
*/
length=strlen(argv[i+1]);
token=(char *) NULL;
if (~length >= (MaxTextExtent-1))
token=(char *) AcquireQuantumMemory(length+MaxTextExtent,
sizeof(*token));
if (token == (char *) NULL)
break;
next=0;
arguments=argv[i+1];
token_info=AcquireTokenInfo();
status=Tokenizer(token_info,0,token,length,arguments,"","=",
"\"",'\0',&breaker,&next,"e);
token_info=DestroyTokenInfo(token_info);
if (status == 0)
{
const char
*argv;
argv=(&(arguments[next]));
(void) InvokeDynamicImageFilter(token,&(*images),1,&argv,
exception);
}
token=DestroyString(token);
break;
}
(void) SubstituteString(&arguments[1],"-","");
(void) InvokeDynamicImageFilter(arguments[1],&(*images),
number_arguments-2,(const char **) arguments+2,exception);
for (j=0; j < number_arguments; j++)
arguments[j]=DestroyString(arguments[j]);
arguments=(char **) RelinquishMagickMemory(arguments);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("reverse",option+1) == 0)
{
ReverseImageList(images);
InheritException(exception,&(*images)->exception);
break;
}
break;
}
case 's':
{
if (LocaleCompare("smush",option+1) == 0)
{
Image
*smush_image;
ssize_t
offset;
(void) SyncImagesSettings(mogrify_info,*images);
offset=(ssize_t) StringToLong(argv[i+1]);
smush_image=SmushImages(*images,*option == '-' ? MagickTrue :
MagickFalse,offset,exception);
if (smush_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=smush_image;
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
Image
*p,
*q,
*u,
*v;
ssize_t
swap_index;
index=(-1);
swap_index=(-2);
if (*option != '+')
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
swap_index=(-1);
flags=ParseGeometry(argv[i+1],&geometry_info);
index=(ssize_t) geometry_info.rho;
if ((flags & SigmaValue) != 0)
swap_index=(ssize_t) geometry_info.sigma;
}
p=GetImageFromList(*images,index);
q=GetImageFromList(*images,swap_index);
if ((p == (Image *) NULL) || (q == (Image *) NULL))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",(*images)->filename);
status=MagickFalse;
break;
}
if (p == q)
break;
u=CloneImage(p,0,0,MagickTrue,exception);
if (u == (Image *) NULL)
break;
v=CloneImage(q,0,0,MagickTrue,exception);
if (v == (Image *) NULL)
{
u=DestroyImage(u);
break;
}
ReplaceImageInList(&p,v);
ReplaceImageInList(&q,u);
*images=GetFirstImageInList(q);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("write",option+1) == 0)
{
char
key[MaxTextExtent];
Image
*write_images;
ImageInfo
*write_info;
(void) SyncImagesSettings(mogrify_info,*images);
(void) FormatLocaleString(key,MaxTextExtent,"cache:%s",argv[i+1]);
(void) DeleteImageRegistry(key);
write_images=(*images);
if (*option == '+')
write_images=CloneImageList(*images,exception);
write_info=CloneImageInfo(mogrify_info);
status&=WriteImages(write_info,write_images,argv[i+1],exception);
write_info=DestroyImageInfo(write_info);
if (*option == '+')
write_images=DestroyImageList(write_images);
break;
}
break;
}
default:
break;
}
i+=count;
}
quantize_info=DestroyQuantizeInfo(quantize_info);
mogrify_info=DestroyImageInfo(mogrify_info);
status&=MogrifyImageInfo(image_info,argc,argv,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
| 13,493 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static spl_filesystem_object * spl_filesystem_object_create_info(spl_filesystem_object *source, char *file_path, int file_path_len, int use_copy, zend_class_entry *ce, zval *return_value TSRMLS_DC) /* {{{ */
{
spl_filesystem_object *intern;
zval *arg1;
zend_error_handling error_handling;
if (!file_path || !file_path_len) {
#if defined(PHP_WIN32)
zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot create SplFileInfo for empty path");
if (file_path && !use_copy) {
efree(file_path);
}
#else
if (file_path && !use_copy) {
efree(file_path);
}
file_path_len = 1;
file_path = "/";
#endif
return NULL;
}
zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC);
ce = ce ? ce : source->info_class;
zend_update_class_constants(ce TSRMLS_CC);
return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC);
Z_TYPE_P(return_value) = IS_OBJECT;
if (ce->constructor->common.scope != spl_ce_SplFileInfo) {
MAKE_STD_ZVAL(arg1);
ZVAL_STRINGL(arg1, file_path, file_path_len, use_copy);
zend_call_method_with_1_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1);
zval_ptr_dtor(&arg1);
} else {
spl_filesystem_info_set_filename(intern, file_path, file_path_len, use_copy TSRMLS_CC);
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
return intern;
} /* }}} */
Commit Message: Fix bug #72262 - do not overflow int
CWE ID: CWE-190 | static spl_filesystem_object * spl_filesystem_object_create_info(spl_filesystem_object *source, char *file_path, int file_path_len, int use_copy, zend_class_entry *ce, zval *return_value TSRMLS_DC) /* {{{ */
{
spl_filesystem_object *intern;
zval *arg1;
zend_error_handling error_handling;
if (!file_path || !file_path_len) {
#if defined(PHP_WIN32)
zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot create SplFileInfo for empty path");
if (file_path && !use_copy) {
efree(file_path);
}
#else
if (file_path && !use_copy) {
efree(file_path);
}
file_path_len = 1;
file_path = "/";
#endif
return NULL;
}
zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC);
ce = ce ? ce : source->info_class;
zend_update_class_constants(ce TSRMLS_CC);
return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC);
Z_TYPE_P(return_value) = IS_OBJECT;
if (ce->constructor->common.scope != spl_ce_SplFileInfo) {
MAKE_STD_ZVAL(arg1);
ZVAL_STRINGL(arg1, file_path, file_path_len, use_copy);
zend_call_method_with_1_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1);
zval_ptr_dtor(&arg1);
} else {
spl_filesystem_info_set_filename(intern, file_path, file_path_len, use_copy TSRMLS_CC);
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
return intern;
} /* }}} */
| 29,250 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: process_db_args(krb5_context context, char **db_args, xargs_t *xargs,
OPERATION optype)
{
int i=0;
krb5_error_code st=0;
char *arg=NULL, *arg_val=NULL;
char **dptr=NULL;
unsigned int arg_val_len=0;
if (db_args) {
for (i=0; db_args[i]; ++i) {
arg = strtok_r(db_args[i], "=", &arg_val);
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
dptr = &xargs->tktpolicydn;
} else {
if (strcmp(arg, USERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL ||
xargs->linkdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->dn;
} else if (strcmp(arg, CONTAINERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->containerdn;
} else if (strcmp(arg, LINKDN_ARG) == 0) {
if (xargs->dn != NULL || xargs->linkdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->linkdn;
} else {
st = EINVAL;
k5_setmsg(context, st, _("unknown option: %s"), arg);
goto cleanup;
}
xargs->dn_from_kbd = TRUE;
if (arg_val == NULL || strlen(arg_val) == 0) {
st = EINVAL;
k5_setmsg(context, st, _("%s option value missing"), arg);
goto cleanup;
}
}
if (arg_val == NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option value missing"), arg);
goto cleanup;
}
arg_val_len = strlen(arg_val) + 1;
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
if ((st = krb5_ldap_name_to_policydn (context,
arg_val,
dptr)) != 0)
goto cleanup;
} else {
*dptr = k5memdup(arg_val, arg_val_len, &st);
if (*dptr == NULL)
goto cleanup;
}
}
}
cleanup:
return st;
}
Commit Message: Fix LDAP null deref on empty arg [CVE-2016-3119]
In the LDAP KDB module's process_db_args(), strtok_r() may return NULL
if there is an empty string in the db_args array. Check for this case
and avoid dereferencing a null pointer.
CVE-2016-3119:
In MIT krb5 1.6 and later, an authenticated attacker with permission
to modify a principal entry can cause kadmind to dereference a null
pointer by supplying an empty DB argument to the modify_principal
command, if kadmind is configured to use the LDAP KDB module.
CVSSv2 Vector: AV:N/AC:H/Au:S/C:N/I:N/A:C/E:H/RL:OF/RC:ND
ticket: 8383 (new)
target_version: 1.14-next
target_version: 1.13-next
tags: pullup
CWE ID: | process_db_args(krb5_context context, char **db_args, xargs_t *xargs,
OPERATION optype)
{
int i=0;
krb5_error_code st=0;
char *arg=NULL, *arg_val=NULL;
char **dptr=NULL;
unsigned int arg_val_len=0;
if (db_args) {
for (i=0; db_args[i]; ++i) {
arg = strtok_r(db_args[i], "=", &arg_val);
arg = (arg != NULL) ? arg : "";
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
dptr = &xargs->tktpolicydn;
} else {
if (strcmp(arg, USERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL ||
xargs->linkdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->dn;
} else if (strcmp(arg, CONTAINERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->containerdn;
} else if (strcmp(arg, LINKDN_ARG) == 0) {
if (xargs->dn != NULL || xargs->linkdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->linkdn;
} else {
st = EINVAL;
k5_setmsg(context, st, _("unknown option: %s"), arg);
goto cleanup;
}
xargs->dn_from_kbd = TRUE;
if (arg_val == NULL || strlen(arg_val) == 0) {
st = EINVAL;
k5_setmsg(context, st, _("%s option value missing"), arg);
goto cleanup;
}
}
if (arg_val == NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option value missing"), arg);
goto cleanup;
}
arg_val_len = strlen(arg_val) + 1;
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
if ((st = krb5_ldap_name_to_policydn (context,
arg_val,
dptr)) != 0)
goto cleanup;
} else {
*dptr = k5memdup(arg_val, arg_val_len, &st);
if (*dptr == NULL)
goto cleanup;
}
}
}
cleanup:
return st;
}
| 10,593 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void WebGraphicsContext3DCommandBufferImpl::FlipVertically(
uint8* framebuffer,
unsigned int width,
unsigned int height) {
uint8* scanline = scanline_.get();
if (!scanline)
return;
unsigned int row_bytes = width * 4;
unsigned int count = height / 2;
for (unsigned int i = 0; i < count; i++) {
uint8* row_a = framebuffer + i * row_bytes;
uint8* row_b = framebuffer + (height - i - 1) * row_bytes;
memcpy(scanline, row_b, row_bytes);
memcpy(row_b, row_a, row_bytes);
memcpy(row_a, scanline, row_bytes);
}
}
Commit Message: Fix mismanagement in handling of temporary scanline for vertical flip.
BUG=116637
TEST=manual test from bug report with ASAN
Review URL: https://chromiumcodereview.appspot.com/9617038
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@125301 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | void WebGraphicsContext3DCommandBufferImpl::FlipVertically(
uint8* framebuffer,
unsigned int width,
unsigned int height) {
if (width == 0)
return;
scanline_.resize(width * 4);
uint8* scanline = &scanline_[0];
unsigned int row_bytes = width * 4;
unsigned int count = height / 2;
for (unsigned int i = 0; i < count; i++) {
uint8* row_a = framebuffer + i * row_bytes;
uint8* row_b = framebuffer + (height - i - 1) * row_bytes;
memcpy(scanline, row_b, row_bytes);
memcpy(row_b, row_a, row_bytes);
memcpy(row_a, scanline, row_bytes);
}
}
| 3,594 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int itacns_add_data_files(sc_pkcs15_card_t *p15card)
{
const size_t array_size =
sizeof(itacns_data_files)/sizeof(itacns_data_files[0]);
unsigned int i;
int rv;
sc_pkcs15_data_t *p15_personaldata = NULL;
sc_pkcs15_data_info_t dinfo;
struct sc_pkcs15_object *objs[32];
struct sc_pkcs15_data_info *cinfo;
for(i=0; i < array_size; i++) {
sc_path_t path;
sc_pkcs15_data_info_t data;
sc_pkcs15_object_t obj;
if (itacns_data_files[i].cie_only &&
p15card->card->type != SC_CARD_TYPE_ITACNS_CIE_V2)
continue;
sc_format_path(itacns_data_files[i].path, &path);
memset(&data, 0, sizeof(data));
memset(&obj, 0, sizeof(obj));
strlcpy(data.app_label, itacns_data_files[i].label,
sizeof(data.app_label));
strlcpy(obj.label, itacns_data_files[i].label,
sizeof(obj.label));
data.path = path;
rv = sc_pkcs15emu_add_data_object(p15card, &obj, &data);
SC_TEST_RET(p15card->card->ctx, SC_LOG_DEBUG_NORMAL, rv,
"Could not add data file");
}
/*
* If we got this far, we can read the Personal Data file and glean
* the user's full name. Thus we can use it to put together a
* user-friendlier card name.
*/
memset(&dinfo, 0, sizeof(dinfo));
strcpy(dinfo.app_label, "EF_DatiPersonali");
/* Find EF_DatiPersonali */
rv = sc_pkcs15_get_objects(p15card, SC_PKCS15_TYPE_DATA_OBJECT,
objs, 32);
if(rv < 0) {
sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL,
"Data enumeration failed");
return SC_SUCCESS;
}
for(i=0; i<32; i++) {
cinfo = (struct sc_pkcs15_data_info *) objs[i]->data;
if(!strcmp("EF_DatiPersonali", objs[i]->label))
break;
}
if(i>=32) {
sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL,
"Could not find EF_DatiPersonali: "
"keeping generic card name");
return SC_SUCCESS;
}
rv = sc_pkcs15_read_data_object(p15card, cinfo, &p15_personaldata);
if (rv) {
sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL,
"Could not read EF_DatiPersonali: "
"keeping generic card name");
}
{
char fullname[160];
if(get_name_from_EF_DatiPersonali(p15_personaldata->data,
fullname, sizeof(fullname))) {
sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL,
"Could not parse EF_DatiPersonali: "
"keeping generic card name");
sc_pkcs15_free_data_object(p15_personaldata);
return SC_SUCCESS;
}
set_string(&p15card->tokeninfo->label, fullname);
}
sc_pkcs15_free_data_object(p15_personaldata);
return SC_SUCCESS;
}
Commit Message: fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
CWE ID: CWE-125 | static int itacns_add_data_files(sc_pkcs15_card_t *p15card)
{
const size_t array_size =
sizeof(itacns_data_files)/sizeof(itacns_data_files[0]);
unsigned int i;
int rv;
sc_pkcs15_data_t *p15_personaldata = NULL;
sc_pkcs15_data_info_t dinfo;
struct sc_pkcs15_object *objs[32];
struct sc_pkcs15_data_info *cinfo;
for(i=0; i < array_size; i++) {
sc_path_t path;
sc_pkcs15_data_info_t data;
sc_pkcs15_object_t obj;
if (itacns_data_files[i].cie_only &&
p15card->card->type != SC_CARD_TYPE_ITACNS_CIE_V2)
continue;
sc_format_path(itacns_data_files[i].path, &path);
memset(&data, 0, sizeof(data));
memset(&obj, 0, sizeof(obj));
strlcpy(data.app_label, itacns_data_files[i].label,
sizeof(data.app_label));
strlcpy(obj.label, itacns_data_files[i].label,
sizeof(obj.label));
data.path = path;
rv = sc_pkcs15emu_add_data_object(p15card, &obj, &data);
SC_TEST_RET(p15card->card->ctx, SC_LOG_DEBUG_NORMAL, rv,
"Could not add data file");
}
/*
* If we got this far, we can read the Personal Data file and glean
* the user's full name. Thus we can use it to put together a
* user-friendlier card name.
*/
memset(&dinfo, 0, sizeof(dinfo));
strcpy(dinfo.app_label, "EF_DatiPersonali");
/* Find EF_DatiPersonali */
rv = sc_pkcs15_get_objects(p15card, SC_PKCS15_TYPE_DATA_OBJECT,
objs, 32);
if(rv < 0) {
sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL,
"Data enumeration failed");
return SC_SUCCESS;
}
for(i=0; i<32; i++) {
cinfo = (struct sc_pkcs15_data_info *) objs[i]->data;
if(!strcmp("EF_DatiPersonali", objs[i]->label))
break;
}
if(i>=32) {
sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL,
"Could not find EF_DatiPersonali: "
"keeping generic card name");
return SC_SUCCESS;
}
rv = sc_pkcs15_read_data_object(p15card, cinfo, &p15_personaldata);
if (rv) {
sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL,
"Could not read EF_DatiPersonali: "
"keeping generic card name");
return SC_SUCCESS;
}
{
char fullname[160];
if(get_name_from_EF_DatiPersonali(p15_personaldata->data,
fullname, sizeof(fullname))) {
sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL,
"Could not parse EF_DatiPersonali: "
"keeping generic card name");
sc_pkcs15_free_data_object(p15_personaldata);
return SC_SUCCESS;
}
set_string(&p15card->tokeninfo->label, fullname);
}
sc_pkcs15_free_data_object(p15_personaldata);
return SC_SUCCESS;
}
| 11,261 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static sent_status_t send_data_to_app(int fd, BT_HDR *p_buf) {
if (p_buf->len == 0)
return SENT_ALL;
ssize_t sent = send(fd, p_buf->data + p_buf->offset, p_buf->len, MSG_DONTWAIT);
if (sent == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
return SENT_NONE;
LOG_ERROR("%s error writing RFCOMM data back to app: %s", __func__, strerror(errno));
return SENT_FAILED;
}
if (sent == 0)
return SENT_FAILED;
if (sent == p_buf->len)
return SENT_ALL;
p_buf->offset += sent;
p_buf->len -= sent;
return SENT_PARTIAL;
}
Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
CWE ID: CWE-284 | static sent_status_t send_data_to_app(int fd, BT_HDR *p_buf) {
if (p_buf->len == 0)
return SENT_ALL;
ssize_t sent = TEMP_FAILURE_RETRY(send(fd, p_buf->data + p_buf->offset, p_buf->len, MSG_DONTWAIT));
if (sent == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
return SENT_NONE;
LOG_ERROR("%s error writing RFCOMM data back to app: %s", __func__, strerror(errno));
return SENT_FAILED;
}
if (sent == 0)
return SENT_FAILED;
if (sent == p_buf->len)
return SENT_ALL;
p_buf->offset += sent;
p_buf->len -= sent;
return SENT_PARTIAL;
}
| 8,067 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: PHP_METHOD(Phar, count)
{
PHAR_ARCHIVE_OBJECT();
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(zend_hash_num_elements(&phar_obj->arc.archive->manifest));
}
Commit Message:
CWE ID: CWE-20 | PHP_METHOD(Phar, count)
{
PHAR_ARCHIVE_OBJECT();
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(zend_hash_num_elements(&phar_obj->arc.archive->manifest));
}
| 25,566 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: IMPEG2D_ERROR_CODES_T impeg2d_dec_pic_ext_data(dec_state_t *ps_dec)
{
stream_t *ps_stream;
UWORD32 u4_start_code;
IMPEG2D_ERROR_CODES_T e_error;
e_error = (IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE;
ps_stream = &ps_dec->s_bit_stream;
u4_start_code = impeg2d_bit_stream_nxt(ps_stream,START_CODE_LEN);
while ( (u4_start_code == EXTENSION_START_CODE ||
u4_start_code == USER_DATA_START_CODE) &&
(IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE == e_error)
{
if(u4_start_code == USER_DATA_START_CODE)
{
impeg2d_dec_user_data(ps_dec);
}
else
{
impeg2d_bit_stream_flush(ps_stream,START_CODE_LEN);
u4_start_code = impeg2d_bit_stream_nxt(ps_stream,EXT_ID_LEN);
switch(u4_start_code)
{
case QUANT_MATRIX_EXT_ID:
impeg2d_dec_quant_matrix_ext(ps_dec);
break;
case COPYRIGHT_EXT_ID:
impeg2d_dec_copyright_ext(ps_dec);
break;
case PIC_DISPLAY_EXT_ID:
impeg2d_dec_pic_disp_ext(ps_dec);
break;
case CAMERA_PARAM_EXT_ID:
impeg2d_dec_cam_param_ext(ps_dec);
break;
case ITU_T_EXT_ID:
impeg2d_dec_itu_t_ext(ps_dec);
break;
case PIC_SPATIAL_SCALABLE_EXT_ID:
case PIC_TEMPORAL_SCALABLE_EXT_ID:
e_error = IMPEG2D_SCALABLITY_NOT_SUP;
break;
default:
/* In case its a reserved extension code */
impeg2d_bit_stream_flush(ps_stream,EXT_ID_LEN);
impeg2d_next_start_code(ps_dec);
break;
}
}
u4_start_code = impeg2d_bit_stream_nxt(ps_stream,START_CODE_LEN);
}
return e_error;
}
Commit Message: Fixed bit stream access to make sure that it is not read beyond the allocated size.
Bug: 25765591
Change-Id: I98c23a3c3f84f6710f29bffe5ed73adcf51d47f6
CWE ID: CWE-254 | IMPEG2D_ERROR_CODES_T impeg2d_dec_pic_ext_data(dec_state_t *ps_dec)
{
stream_t *ps_stream;
UWORD32 u4_start_code;
IMPEG2D_ERROR_CODES_T e_error;
e_error = (IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE;
ps_stream = &ps_dec->s_bit_stream;
u4_start_code = impeg2d_bit_stream_nxt(ps_stream,START_CODE_LEN);
while ( (u4_start_code == EXTENSION_START_CODE ||
u4_start_code == USER_DATA_START_CODE) &&
(IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE == e_error &&
(ps_stream->u4_offset < ps_stream->u4_max_offset))
{
if(u4_start_code == USER_DATA_START_CODE)
{
impeg2d_dec_user_data(ps_dec);
}
else
{
impeg2d_bit_stream_flush(ps_stream,START_CODE_LEN);
u4_start_code = impeg2d_bit_stream_nxt(ps_stream,EXT_ID_LEN);
switch(u4_start_code)
{
case QUANT_MATRIX_EXT_ID:
impeg2d_dec_quant_matrix_ext(ps_dec);
break;
case COPYRIGHT_EXT_ID:
impeg2d_dec_copyright_ext(ps_dec);
break;
case PIC_DISPLAY_EXT_ID:
impeg2d_dec_pic_disp_ext(ps_dec);
break;
case CAMERA_PARAM_EXT_ID:
impeg2d_dec_cam_param_ext(ps_dec);
break;
case ITU_T_EXT_ID:
impeg2d_dec_itu_t_ext(ps_dec);
break;
case PIC_SPATIAL_SCALABLE_EXT_ID:
case PIC_TEMPORAL_SCALABLE_EXT_ID:
e_error = IMPEG2D_SCALABLITY_NOT_SUP;
break;
default:
/* In case its a reserved extension code */
impeg2d_bit_stream_flush(ps_stream,EXT_ID_LEN);
impeg2d_next_start_code(ps_dec);
break;
}
}
u4_start_code = impeg2d_bit_stream_nxt(ps_stream,START_CODE_LEN);
}
return e_error;
}
| 29,517 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: PHP_FUNCTION(locale_get_display_variant)
{
get_icu_disp_value_src_php( LOC_VARIANT_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU );
}
Commit Message: Fix bug #72241: get_icu_value_internal out-of-bounds read
CWE ID: CWE-125 | PHP_FUNCTION(locale_get_display_variant)
PHP_FUNCTION(locale_get_display_variant)
{
get_icu_disp_value_src_php( LOC_VARIANT_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU );
}
| 13,912 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: xmlParseExternalSubset(xmlParserCtxtPtr ctxt, const xmlChar *ExternalID,
const xmlChar *SystemID) {
xmlDetectSAX2(ctxt);
GROW;
if ((ctxt->encoding == (const xmlChar *)XML_CHAR_ENCODING_NONE) &&
(ctxt->input->end - ctxt->input->cur >= 4)) {
xmlChar start[4];
xmlCharEncoding enc;
start[0] = RAW;
start[1] = NXT(1);
start[2] = NXT(2);
start[3] = NXT(3);
enc = xmlDetectCharEncoding(start, 4);
if (enc != XML_CHAR_ENCODING_NONE)
xmlSwitchEncoding(ctxt, enc);
}
if (CMP5(CUR_PTR, '<', '?', 'x', 'm', 'l')) {
xmlParseTextDecl(ctxt);
if (ctxt->errNo == XML_ERR_UNSUPPORTED_ENCODING) {
/*
* The XML REC instructs us to stop parsing right here
*/
ctxt->instate = XML_PARSER_EOF;
return;
}
}
if (ctxt->myDoc == NULL) {
ctxt->myDoc = xmlNewDoc(BAD_CAST "1.0");
if (ctxt->myDoc == NULL) {
xmlErrMemory(ctxt, "New Doc failed");
return;
}
ctxt->myDoc->properties = XML_DOC_INTERNAL;
}
if ((ctxt->myDoc != NULL) && (ctxt->myDoc->intSubset == NULL))
xmlCreateIntSubset(ctxt->myDoc, NULL, ExternalID, SystemID);
ctxt->instate = XML_PARSER_DTD;
ctxt->external = 1;
while (((RAW == '<') && (NXT(1) == '?')) ||
((RAW == '<') && (NXT(1) == '!')) ||
(RAW == '%') || IS_BLANK_CH(CUR)) {
const xmlChar *check = CUR_PTR;
unsigned int cons = ctxt->input->consumed;
GROW;
if ((RAW == '<') && (NXT(1) == '!') && (NXT(2) == '[')) {
xmlParseConditionalSections(ctxt);
} else if (IS_BLANK_CH(CUR)) {
NEXT;
} else if (RAW == '%') {
xmlParsePEReference(ctxt);
} else
xmlParseMarkupDecl(ctxt);
/*
* Pop-up of finished entities.
*/
while ((RAW == 0) && (ctxt->inputNr > 1))
xmlPopInput(ctxt);
if ((CUR_PTR == check) && (cons == ctxt->input->consumed)) {
xmlFatalErr(ctxt, XML_ERR_EXT_SUBSET_NOT_FINISHED, NULL);
break;
}
}
if (RAW != 0) {
xmlFatalErr(ctxt, XML_ERR_EXT_SUBSET_NOT_FINISHED, NULL);
}
}
Commit Message: libxml: XML_PARSER_EOF checks from upstream
BUG=229019
TBR=cpu
Review URL: https://chromiumcodereview.appspot.com/14053009
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@196804 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | xmlParseExternalSubset(xmlParserCtxtPtr ctxt, const xmlChar *ExternalID,
const xmlChar *SystemID) {
xmlDetectSAX2(ctxt);
GROW;
if ((ctxt->encoding == (const xmlChar *)XML_CHAR_ENCODING_NONE) &&
(ctxt->input->end - ctxt->input->cur >= 4)) {
xmlChar start[4];
xmlCharEncoding enc;
start[0] = RAW;
start[1] = NXT(1);
start[2] = NXT(2);
start[3] = NXT(3);
enc = xmlDetectCharEncoding(start, 4);
if (enc != XML_CHAR_ENCODING_NONE)
xmlSwitchEncoding(ctxt, enc);
}
if (CMP5(CUR_PTR, '<', '?', 'x', 'm', 'l')) {
xmlParseTextDecl(ctxt);
if (ctxt->errNo == XML_ERR_UNSUPPORTED_ENCODING) {
/*
* The XML REC instructs us to stop parsing right here
*/
ctxt->instate = XML_PARSER_EOF;
return;
}
}
if (ctxt->myDoc == NULL) {
ctxt->myDoc = xmlNewDoc(BAD_CAST "1.0");
if (ctxt->myDoc == NULL) {
xmlErrMemory(ctxt, "New Doc failed");
return;
}
ctxt->myDoc->properties = XML_DOC_INTERNAL;
}
if ((ctxt->myDoc != NULL) && (ctxt->myDoc->intSubset == NULL))
xmlCreateIntSubset(ctxt->myDoc, NULL, ExternalID, SystemID);
ctxt->instate = XML_PARSER_DTD;
ctxt->external = 1;
while (((RAW == '<') && (NXT(1) == '?')) ||
((RAW == '<') && (NXT(1) == '!')) ||
(RAW == '%') || IS_BLANK_CH(CUR)) {
const xmlChar *check = CUR_PTR;
unsigned int cons = ctxt->input->consumed;
GROW;
if ((RAW == '<') && (NXT(1) == '!') && (NXT(2) == '[')) {
xmlParseConditionalSections(ctxt);
} else if (IS_BLANK_CH(CUR)) {
NEXT;
} else if (RAW == '%') {
xmlParsePEReference(ctxt);
} else
xmlParseMarkupDecl(ctxt);
/*
* Pop-up of finished entities.
*/
while ((RAW == 0) && (ctxt->inputNr > 1))
xmlPopInput(ctxt);
if ((CUR_PTR == check) && (cons == ctxt->input->consumed)) {
xmlFatalErr(ctxt, XML_ERR_EXT_SUBSET_NOT_FINISHED, NULL);
break;
}
}
if (RAW != 0) {
xmlFatalErr(ctxt, XML_ERR_EXT_SUBSET_NOT_FINISHED, NULL);
}
}
| 8,082 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void HTMLImportsController::Dispose() {
for (const auto& loader : loaders_)
loader->Dispose();
loaders_.clear();
if (root_) {
root_->Dispose();
root_.Clear();
}
}
Commit Message: Speculative fix for crashes in HTMLImportsController::Dispose().
Copy the loaders_ vector before iterating it.
This CL has no tests because we don't know stable reproduction.
Bug: 843151
Change-Id: I3d5e184657cbce56dcfca0c717d7a0c464e20efe
Reviewed-on: https://chromium-review.googlesource.com/1245017
Reviewed-by: Keishi Hattori <[email protected]>
Commit-Queue: Kent Tamura <[email protected]>
Cr-Commit-Position: refs/heads/master@{#594226}
CWE ID: CWE-416 | void HTMLImportsController::Dispose() {
// TODO(tkent): We copy loaders_ before iteration to avoid crashes.
// This copy should be unnecessary. loaders_ is not modified during
// the iteration. Also, null-check for |loader| should be
// unnecessary. crbug.com/843151.
LoaderList list;
list.swap(loaders_);
for (const auto& loader : list) {
if (loader)
loader->Dispose();
}
if (root_) {
root_->Dispose();
root_.Clear();
}
}
| 8,734 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void ip4_datagram_release_cb(struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
__be32 daddr = inet->inet_daddr;
struct flowi4 fl4;
struct rtable *rt;
if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
return;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
if (!IS_ERR(rt))
__sk_dst_set(sk, &rt->dst);
rcu_read_unlock();
}
Commit Message: ipv4: fix a race in ip4_datagram_release_cb()
Alexey gave a AddressSanitizer[1] report that finally gave a good hint
at where was the origin of various problems already reported by Dormando
in the past [2]
Problem comes from the fact that UDP can have a lockless TX path, and
concurrent threads can manipulate sk_dst_cache, while another thread,
is holding socket lock and calls __sk_dst_set() in
ip4_datagram_release_cb() (this was added in linux-3.8)
It seems that all we need to do is to use sk_dst_check() and
sk_dst_set() so that all the writers hold same spinlock
(sk->sk_dst_lock) to prevent corruptions.
TCP stack do not need this protection, as all sk_dst_cache writers hold
the socket lock.
[1]
https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerForKernel
AddressSanitizer: heap-use-after-free in ipv4_dst_check
Read of size 2 by thread T15453:
[<ffffffff817daa3a>] ipv4_dst_check+0x1a/0x90 ./net/ipv4/route.c:1116
[<ffffffff8175b789>] __sk_dst_check+0x89/0xe0 ./net/core/sock.c:531
[<ffffffff81830a36>] ip4_datagram_release_cb+0x46/0x390 ??:0
[<ffffffff8175eaea>] release_sock+0x17a/0x230 ./net/core/sock.c:2413
[<ffffffff81830882>] ip4_datagram_connect+0x462/0x5d0 ??:0
[<ffffffff81846d06>] inet_dgram_connect+0x76/0xd0 ./net/ipv4/af_inet.c:534
[<ffffffff817580ac>] SYSC_connect+0x15c/0x1c0 ./net/socket.c:1701
[<ffffffff817596ce>] SyS_connect+0xe/0x10 ./net/socket.c:1682
[<ffffffff818b0a29>] system_call_fastpath+0x16/0x1b
./arch/x86/kernel/entry_64.S:629
Freed by thread T15455:
[<ffffffff8178d9b8>] dst_destroy+0xa8/0x160 ./net/core/dst.c:251
[<ffffffff8178de25>] dst_release+0x45/0x80 ./net/core/dst.c:280
[<ffffffff818304c1>] ip4_datagram_connect+0xa1/0x5d0 ??:0
[<ffffffff81846d06>] inet_dgram_connect+0x76/0xd0 ./net/ipv4/af_inet.c:534
[<ffffffff817580ac>] SYSC_connect+0x15c/0x1c0 ./net/socket.c:1701
[<ffffffff817596ce>] SyS_connect+0xe/0x10 ./net/socket.c:1682
[<ffffffff818b0a29>] system_call_fastpath+0x16/0x1b
./arch/x86/kernel/entry_64.S:629
Allocated by thread T15453:
[<ffffffff8178d291>] dst_alloc+0x81/0x2b0 ./net/core/dst.c:171
[<ffffffff817db3b7>] rt_dst_alloc+0x47/0x50 ./net/ipv4/route.c:1406
[< inlined >] __ip_route_output_key+0x3e8/0xf70
__mkroute_output ./net/ipv4/route.c:1939
[<ffffffff817dde08>] __ip_route_output_key+0x3e8/0xf70 ./net/ipv4/route.c:2161
[<ffffffff817deb34>] ip_route_output_flow+0x14/0x30 ./net/ipv4/route.c:2249
[<ffffffff81830737>] ip4_datagram_connect+0x317/0x5d0 ??:0
[<ffffffff81846d06>] inet_dgram_connect+0x76/0xd0 ./net/ipv4/af_inet.c:534
[<ffffffff817580ac>] SYSC_connect+0x15c/0x1c0 ./net/socket.c:1701
[<ffffffff817596ce>] SyS_connect+0xe/0x10 ./net/socket.c:1682
[<ffffffff818b0a29>] system_call_fastpath+0x16/0x1b
./arch/x86/kernel/entry_64.S:629
[2]
<4>[196727.311203] general protection fault: 0000 [#1] SMP
<4>[196727.311224] Modules linked in: xt_TEE xt_dscp xt_DSCP macvlan bridge coretemp crc32_pclmul ghash_clmulni_intel gpio_ich microcode ipmi_watchdog ipmi_devintf sb_edac edac_core lpc_ich mfd_core tpm_tis tpm tpm_bios ipmi_si ipmi_msghandler isci igb libsas i2c_algo_bit ixgbe ptp pps_core mdio
<4>[196727.311333] CPU: 17 PID: 0 Comm: swapper/17 Not tainted 3.10.26 #1
<4>[196727.311344] Hardware name: Supermicro X9DRi-LN4+/X9DR3-LN4+/X9DRi-LN4+/X9DR3-LN4+, BIOS 3.0 07/05/2013
<4>[196727.311364] task: ffff885e6f069700 ti: ffff885e6f072000 task.ti: ffff885e6f072000
<4>[196727.311377] RIP: 0010:[<ffffffff815f8c7f>] [<ffffffff815f8c7f>] ipv4_dst_destroy+0x4f/0x80
<4>[196727.311399] RSP: 0018:ffff885effd23a70 EFLAGS: 00010282
<4>[196727.311409] RAX: dead000000200200 RBX: ffff8854c398ecc0 RCX: 0000000000000040
<4>[196727.311423] RDX: dead000000100100 RSI: dead000000100100 RDI: dead000000200200
<4>[196727.311437] RBP: ffff885effd23a80 R08: ffffffff815fd9e0 R09: ffff885d5a590800
<4>[196727.311451] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
<4>[196727.311464] R13: ffffffff81c8c280 R14: 0000000000000000 R15: ffff880e85ee16ce
<4>[196727.311510] FS: 0000000000000000(0000) GS:ffff885effd20000(0000) knlGS:0000000000000000
<4>[196727.311554] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
<4>[196727.311581] CR2: 00007a46751eb000 CR3: 0000005e65688000 CR4: 00000000000407e0
<4>[196727.311625] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
<4>[196727.311669] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
<4>[196727.311713] Stack:
<4>[196727.311733] ffff8854c398ecc0 ffff8854c398ecc0 ffff885effd23ab0 ffffffff815b7f42
<4>[196727.311784] ffff88be6595bc00 ffff8854c398ecc0 0000000000000000 ffff8854c398ecc0
<4>[196727.311834] ffff885effd23ad0 ffffffff815b86c6 ffff885d5a590800 ffff8816827821c0
<4>[196727.311885] Call Trace:
<4>[196727.311907] <IRQ>
<4>[196727.311912] [<ffffffff815b7f42>] dst_destroy+0x32/0xe0
<4>[196727.311959] [<ffffffff815b86c6>] dst_release+0x56/0x80
<4>[196727.311986] [<ffffffff81620bd5>] tcp_v4_do_rcv+0x2a5/0x4a0
<4>[196727.312013] [<ffffffff81622b5a>] tcp_v4_rcv+0x7da/0x820
<4>[196727.312041] [<ffffffff815fd9e0>] ? ip_rcv_finish+0x360/0x360
<4>[196727.312070] [<ffffffff815de02d>] ? nf_hook_slow+0x7d/0x150
<4>[196727.312097] [<ffffffff815fd9e0>] ? ip_rcv_finish+0x360/0x360
<4>[196727.312125] [<ffffffff815fda92>] ip_local_deliver_finish+0xb2/0x230
<4>[196727.312154] [<ffffffff815fdd9a>] ip_local_deliver+0x4a/0x90
<4>[196727.312183] [<ffffffff815fd799>] ip_rcv_finish+0x119/0x360
<4>[196727.312212] [<ffffffff815fe00b>] ip_rcv+0x22b/0x340
<4>[196727.312242] [<ffffffffa0339680>] ? macvlan_broadcast+0x160/0x160 [macvlan]
<4>[196727.312275] [<ffffffff815b0c62>] __netif_receive_skb_core+0x512/0x640
<4>[196727.312308] [<ffffffff811427fb>] ? kmem_cache_alloc+0x13b/0x150
<4>[196727.312338] [<ffffffff815b0db1>] __netif_receive_skb+0x21/0x70
<4>[196727.312368] [<ffffffff815b0fa1>] netif_receive_skb+0x31/0xa0
<4>[196727.312397] [<ffffffff815b1ae8>] napi_gro_receive+0xe8/0x140
<4>[196727.312433] [<ffffffffa00274f1>] ixgbe_poll+0x551/0x11f0 [ixgbe]
<4>[196727.312463] [<ffffffff815fe00b>] ? ip_rcv+0x22b/0x340
<4>[196727.312491] [<ffffffff815b1691>] net_rx_action+0x111/0x210
<4>[196727.312521] [<ffffffff815b0db1>] ? __netif_receive_skb+0x21/0x70
<4>[196727.312552] [<ffffffff810519d0>] __do_softirq+0xd0/0x270
<4>[196727.312583] [<ffffffff816cef3c>] call_softirq+0x1c/0x30
<4>[196727.312613] [<ffffffff81004205>] do_softirq+0x55/0x90
<4>[196727.312640] [<ffffffff81051c85>] irq_exit+0x55/0x60
<4>[196727.312668] [<ffffffff816cf5c3>] do_IRQ+0x63/0xe0
<4>[196727.312696] [<ffffffff816c5aaa>] common_interrupt+0x6a/0x6a
<4>[196727.312722] <EOI>
<1>[196727.313071] RIP [<ffffffff815f8c7f>] ipv4_dst_destroy+0x4f/0x80
<4>[196727.313100] RSP <ffff885effd23a70>
<4>[196727.313377] ---[ end trace 64b3f14fae0f2e29 ]---
<0>[196727.380908] Kernel panic - not syncing: Fatal exception in interrupt
Reported-by: Alexey Preobrazhensky <[email protected]>
Reported-by: dormando <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Fixes: 8141ed9fcedb2 ("ipv4: Add a socket release callback for datagram sockets")
Cc: Steffen Klassert <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-416 | void ip4_datagram_release_cb(struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
__be32 daddr = inet->inet_daddr;
struct dst_entry *dst;
struct flowi4 fl4;
struct rtable *rt;
rcu_read_lock();
dst = __sk_dst_get(sk);
if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
rcu_read_unlock();
return;
}
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
dst = !IS_ERR(rt) ? &rt->dst : NULL;
sk_dst_set(sk, dst);
rcu_read_unlock();
}
| 4,723 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: HRESULT WaitForLoginUIAndGetResult(
CGaiaCredentialBase::UIProcessInfo* uiprocinfo,
std::string* json_result,
DWORD* exit_code,
BSTR* status_text) {
LOGFN(INFO);
DCHECK(uiprocinfo);
DCHECK(json_result);
DCHECK(exit_code);
DCHECK(status_text);
const int kBufferSize = 4096;
std::vector<char> output_buffer(kBufferSize, '\0');
base::ScopedClosureRunner zero_buffer_on_exit(
base::BindOnce(base::IgnoreResult(&RtlSecureZeroMemory),
&output_buffer[0], kBufferSize));
HRESULT hr = WaitForProcess(uiprocinfo->procinfo.process_handle(),
uiprocinfo->parent_handles, exit_code,
&output_buffer[0], kBufferSize);
LOGFN(INFO) << "exit_code=" << exit_code;
if (*exit_code == kUiecAbort) {
LOGFN(ERROR) << "Aborted hr=" << putHR(hr);
return E_ABORT;
} else if (*exit_code != kUiecSuccess) {
LOGFN(ERROR) << "Error hr=" << putHR(hr);
*status_text =
CGaiaCredentialBase::AllocErrorString(IDS_INVALID_UI_RESPONSE_BASE);
return E_FAIL;
}
*json_result = std::string(&output_buffer[0]);
return S_OK;
}
Commit Message: [GCPW] Disallow sign in of consumer accounts when mdm is enabled.
Unless the registry key "mdm_aca" is explicitly set to 1, always
fail sign in of consumer accounts when mdm enrollment is enabled.
Consumer accounts are defined as accounts with gmail.com or
googlemail.com domain.
Bug: 944049
Change-Id: Icb822f3737d90931de16a8d3317616dd2b159edd
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1532903
Commit-Queue: Tien Mai <[email protected]>
Reviewed-by: Roger Tawa <[email protected]>
Cr-Commit-Position: refs/heads/master@{#646278}
CWE ID: CWE-284 | HRESULT WaitForLoginUIAndGetResult(
CGaiaCredentialBase::UIProcessInfo* uiprocinfo,
std::string* json_result,
DWORD* exit_code,
BSTR* status_text) {
LOGFN(INFO);
DCHECK(uiprocinfo);
DCHECK(json_result);
DCHECK(exit_code);
DCHECK(status_text);
const int kBufferSize = 4096;
std::vector<char> output_buffer(kBufferSize, '\0');
base::ScopedClosureRunner zero_buffer_on_exit(
base::BindOnce(base::IgnoreResult(&RtlSecureZeroMemory),
&output_buffer[0], kBufferSize));
HRESULT hr = WaitForProcess(uiprocinfo->procinfo.process_handle(),
uiprocinfo->parent_handles, exit_code,
&output_buffer[0], kBufferSize);
LOGFN(INFO) << "exit_code=" << *exit_code;
if (*exit_code == kUiecAbort) {
LOGFN(ERROR) << "Aborted hr=" << putHR(hr);
return E_ABORT;
} else if (*exit_code != kUiecSuccess) {
LOGFN(ERROR) << "Error hr=" << putHR(hr);
*status_text =
CGaiaCredentialBase::AllocErrorString(IDS_INVALID_UI_RESPONSE_BASE);
return E_FAIL;
}
*json_result = std::string(&output_buffer[0]);
return S_OK;
}
| 8,157 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: R_API void r_anal_bb_free(RAnalBlock *bb) {
if (!bb) {
return;
}
r_anal_cond_free (bb->cond);
R_FREE (bb->fingerprint);
r_anal_diff_free (bb->diff);
bb->diff = NULL;
R_FREE (bb->op_bytes);
r_anal_switch_op_free (bb->switch_op);
bb->switch_op = NULL;
bb->fingerprint = NULL;
bb->cond = NULL;
R_FREE (bb->label);
R_FREE (bb->op_pos);
R_FREE (bb->parent_reg_arena);
if (bb->prev) {
if (bb->prev->jumpbb == bb) {
bb->prev->jumpbb = NULL;
}
if (bb->prev->failbb == bb) {
bb->prev->failbb = NULL;
}
bb->prev = NULL;
}
if (bb->jumpbb) {
bb->jumpbb->prev = NULL;
bb->jumpbb = NULL;
}
if (bb->failbb) {
bb->failbb->prev = NULL;
bb->failbb = NULL;
}
R_FREE (bb);
}
Commit Message: Fix #10293 - Use-after-free in r_anal_bb_free()
CWE ID: CWE-416 | R_API void r_anal_bb_free(RAnalBlock *bb) {
if (!bb) {
return;
}
r_anal_cond_free (bb->cond);
R_FREE (bb->fingerprint);
r_anal_diff_free (bb->diff);
bb->diff = NULL;
R_FREE (bb->op_bytes);
r_anal_switch_op_free (bb->switch_op);
bb->switch_op = NULL;
bb->fingerprint = NULL;
bb->cond = NULL;
R_FREE (bb->label);
R_FREE (bb->op_pos);
R_FREE (bb->parent_reg_arena);
if (bb->prev) {
if (bb->prev->jumpbb == bb) {
bb->prev->jumpbb = NULL;
}
if (bb->prev->failbb == bb) {
bb->prev->failbb = NULL;
}
bb->prev = NULL;
}
if (bb->jumpbb) {
bb->jumpbb->prev = NULL;
bb->jumpbb = NULL;
}
if (bb->failbb) {
bb->failbb->prev = NULL;
bb->failbb = NULL;
}
if (bb->next) {
// avoid double free
bb->next->prev = NULL;
}
R_FREE (bb); // double free
}
| 6,749 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void BluetoothDeviceChromeOS::DisplayPasskey(
const dbus::ObjectPath& device_path,
uint32 passkey,
uint16 entered) {
DCHECK(agent_.get());
DCHECK(device_path == object_path_);
VLOG(1) << object_path_.value() << ": DisplayPasskey: " << passkey
<< " (" << entered << " entered)";
if (entered == 0)
UMA_HISTOGRAM_ENUMERATION("Bluetooth.PairingMethod",
UMA_PAIRING_METHOD_DISPLAY_PASSKEY,
UMA_PAIRING_METHOD_COUNT);
DCHECK(pairing_delegate_);
if (entered == 0)
pairing_delegate_->DisplayPasskey(this, passkey);
pairing_delegate_->KeysEntered(this, entered);
pairing_delegate_used_ = true;
}
Commit Message: Refactor to support default Bluetooth pairing delegate
In order to support a default pairing delegate we need to move the agent
service provider delegate implementation from BluetoothDevice to
BluetoothAdapter while retaining the existing API.
BUG=338492
TEST=device_unittests, unit_tests, browser_tests
Review URL: https://codereview.chromium.org/148293003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@252216 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | void BluetoothDeviceChromeOS::DisplayPasskey(
| 13,066 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: PHP_METHOD(Phar, loadPhar)
{
char *fname, *alias = NULL, *error;
size_t fname_len, alias_len = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|s!", &fname, &fname_len, &alias, &alias_len) == FAILURE) {
return;
}
phar_request_initialize();
RETVAL_BOOL(phar_open_from_filename(fname, fname_len, alias, alias_len, REPORT_ERRORS, NULL, &error) == SUCCESS);
if (error) {
zend_throw_exception_ex(phar_ce_PharException, 0, "%s", error);
efree(error);
}
} /* }}} */
/* {{{ proto string Phar::apiVersion()
Commit Message:
CWE ID: CWE-20 | PHP_METHOD(Phar, loadPhar)
{
char *fname, *alias = NULL, *error;
size_t fname_len, alias_len = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "p|s!", &fname, &fname_len, &alias, &alias_len) == FAILURE) {
return;
}
phar_request_initialize();
RETVAL_BOOL(phar_open_from_filename(fname, fname_len, alias, alias_len, REPORT_ERRORS, NULL, &error) == SUCCESS);
if (error) {
zend_throw_exception_ex(phar_ce_PharException, 0, "%s", error);
efree(error);
}
} /* }}} */
/* {{{ proto string Phar::apiVersion()
| 138 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: icmp_print(netdissect_options *ndo, const u_char *bp, u_int plen, const u_char *bp2,
int fragmented)
{
char *cp;
const struct icmp *dp;
const struct icmp_ext_t *ext_dp;
const struct ip *ip;
const char *str, *fmt;
const struct ip *oip;
const struct udphdr *ouh;
const uint8_t *obj_tptr;
uint32_t raw_label;
const u_char *snapend_save;
const struct icmp_mpls_ext_object_header_t *icmp_mpls_ext_object_header;
u_int hlen, dport, mtu, obj_tlen, obj_class_num, obj_ctype;
char buf[MAXHOSTNAMELEN + 100];
struct cksum_vec vec[1];
dp = (const struct icmp *)bp;
ext_dp = (const struct icmp_ext_t *)bp;
ip = (const struct ip *)bp2;
str = buf;
ND_TCHECK(dp->icmp_code);
switch (dp->icmp_type) {
case ICMP_ECHO:
case ICMP_ECHOREPLY:
ND_TCHECK(dp->icmp_seq);
(void)snprintf(buf, sizeof(buf), "echo %s, id %u, seq %u",
dp->icmp_type == ICMP_ECHO ?
"request" : "reply",
EXTRACT_16BITS(&dp->icmp_id),
EXTRACT_16BITS(&dp->icmp_seq));
break;
case ICMP_UNREACH:
ND_TCHECK(dp->icmp_ip.ip_dst);
switch (dp->icmp_code) {
case ICMP_UNREACH_PROTOCOL:
ND_TCHECK(dp->icmp_ip.ip_p);
(void)snprintf(buf, sizeof(buf),
"%s protocol %d unreachable",
ipaddr_string(ndo, &dp->icmp_ip.ip_dst),
dp->icmp_ip.ip_p);
break;
case ICMP_UNREACH_PORT:
ND_TCHECK(dp->icmp_ip.ip_p);
oip = &dp->icmp_ip;
hlen = IP_HL(oip) * 4;
ouh = (const struct udphdr *)(((const u_char *)oip) + hlen);
ND_TCHECK(ouh->uh_dport);
dport = EXTRACT_16BITS(&ouh->uh_dport);
switch (oip->ip_p) {
case IPPROTO_TCP:
(void)snprintf(buf, sizeof(buf),
"%s tcp port %s unreachable",
ipaddr_string(ndo, &oip->ip_dst),
tcpport_string(ndo, dport));
break;
case IPPROTO_UDP:
(void)snprintf(buf, sizeof(buf),
"%s udp port %s unreachable",
ipaddr_string(ndo, &oip->ip_dst),
udpport_string(ndo, dport));
break;
default:
(void)snprintf(buf, sizeof(buf),
"%s protocol %d port %d unreachable",
ipaddr_string(ndo, &oip->ip_dst),
oip->ip_p, dport);
break;
}
break;
case ICMP_UNREACH_NEEDFRAG:
{
register const struct mtu_discovery *mp;
mp = (const struct mtu_discovery *)(const u_char *)&dp->icmp_void;
mtu = EXTRACT_16BITS(&mp->nexthopmtu);
if (mtu) {
(void)snprintf(buf, sizeof(buf),
"%s unreachable - need to frag (mtu %d)",
ipaddr_string(ndo, &dp->icmp_ip.ip_dst), mtu);
} else {
(void)snprintf(buf, sizeof(buf),
"%s unreachable - need to frag",
ipaddr_string(ndo, &dp->icmp_ip.ip_dst));
}
}
break;
default:
fmt = tok2str(unreach2str, "#%d %%s unreachable",
dp->icmp_code);
(void)snprintf(buf, sizeof(buf), fmt,
ipaddr_string(ndo, &dp->icmp_ip.ip_dst));
break;
}
break;
case ICMP_REDIRECT:
ND_TCHECK(dp->icmp_ip.ip_dst);
fmt = tok2str(type2str, "redirect-#%d %%s to net %%s",
dp->icmp_code);
(void)snprintf(buf, sizeof(buf), fmt,
ipaddr_string(ndo, &dp->icmp_ip.ip_dst),
ipaddr_string(ndo, &dp->icmp_gwaddr));
break;
case ICMP_ROUTERADVERT:
{
register const struct ih_rdiscovery *ihp;
register const struct id_rdiscovery *idp;
u_int lifetime, num, size;
(void)snprintf(buf, sizeof(buf), "router advertisement");
cp = buf + strlen(buf);
ihp = (const struct ih_rdiscovery *)&dp->icmp_void;
ND_TCHECK(*ihp);
(void)strncpy(cp, " lifetime ", sizeof(buf) - (cp - buf));
cp = buf + strlen(buf);
lifetime = EXTRACT_16BITS(&ihp->ird_lifetime);
if (lifetime < 60) {
(void)snprintf(cp, sizeof(buf) - (cp - buf), "%u",
lifetime);
} else if (lifetime < 60 * 60) {
(void)snprintf(cp, sizeof(buf) - (cp - buf), "%u:%02u",
lifetime / 60, lifetime % 60);
} else {
(void)snprintf(cp, sizeof(buf) - (cp - buf),
"%u:%02u:%02u",
lifetime / 3600,
(lifetime % 3600) / 60,
lifetime % 60);
}
cp = buf + strlen(buf);
num = ihp->ird_addrnum;
(void)snprintf(cp, sizeof(buf) - (cp - buf), " %d:", num);
cp = buf + strlen(buf);
size = ihp->ird_addrsiz;
if (size != 2) {
(void)snprintf(cp, sizeof(buf) - (cp - buf),
" [size %d]", size);
break;
}
idp = (const struct id_rdiscovery *)&dp->icmp_data;
while (num-- > 0) {
ND_TCHECK(*idp);
(void)snprintf(cp, sizeof(buf) - (cp - buf), " {%s %u}",
ipaddr_string(ndo, &idp->ird_addr),
EXTRACT_32BITS(&idp->ird_pref));
cp = buf + strlen(buf);
++idp;
}
}
break;
case ICMP_TIMXCEED:
ND_TCHECK(dp->icmp_ip.ip_dst);
switch (dp->icmp_code) {
case ICMP_TIMXCEED_INTRANS:
str = "time exceeded in-transit";
break;
case ICMP_TIMXCEED_REASS:
str = "ip reassembly time exceeded";
break;
default:
(void)snprintf(buf, sizeof(buf), "time exceeded-#%d",
dp->icmp_code);
break;
}
break;
case ICMP_PARAMPROB:
if (dp->icmp_code)
(void)snprintf(buf, sizeof(buf),
"parameter problem - code %d", dp->icmp_code);
else {
ND_TCHECK(dp->icmp_pptr);
(void)snprintf(buf, sizeof(buf),
"parameter problem - octet %d", dp->icmp_pptr);
}
break;
case ICMP_MASKREPLY:
ND_TCHECK(dp->icmp_mask);
(void)snprintf(buf, sizeof(buf), "address mask is 0x%08x",
EXTRACT_32BITS(&dp->icmp_mask));
break;
case ICMP_TSTAMP:
ND_TCHECK(dp->icmp_seq);
(void)snprintf(buf, sizeof(buf),
"time stamp query id %u seq %u",
EXTRACT_16BITS(&dp->icmp_id),
EXTRACT_16BITS(&dp->icmp_seq));
break;
case ICMP_TSTAMPREPLY:
ND_TCHECK(dp->icmp_ttime);
(void)snprintf(buf, sizeof(buf),
"time stamp reply id %u seq %u: org %s",
EXTRACT_16BITS(&dp->icmp_id),
EXTRACT_16BITS(&dp->icmp_seq),
icmp_tstamp_print(EXTRACT_32BITS(&dp->icmp_otime)));
(void)snprintf(buf+strlen(buf),sizeof(buf)-strlen(buf),", recv %s",
icmp_tstamp_print(EXTRACT_32BITS(&dp->icmp_rtime)));
(void)snprintf(buf+strlen(buf),sizeof(buf)-strlen(buf),", xmit %s",
icmp_tstamp_print(EXTRACT_32BITS(&dp->icmp_ttime)));
break;
default:
str = tok2str(icmp2str, "type-#%d", dp->icmp_type);
break;
}
ND_PRINT((ndo, "ICMP %s, length %u", str, plen));
if (ndo->ndo_vflag && !fragmented) { /* don't attempt checksumming if this is a frag */
uint16_t sum, icmp_sum;
if (ND_TTEST2(*bp, plen)) {
vec[0].ptr = (const uint8_t *)(const void *)dp;
vec[0].len = plen;
sum = in_cksum(vec, 1);
if (sum != 0) {
icmp_sum = EXTRACT_16BITS(&dp->icmp_cksum);
ND_PRINT((ndo, " (wrong icmp cksum %x (->%x)!)",
icmp_sum,
in_cksum_shouldbe(icmp_sum, sum)));
}
}
}
/*
* print the remnants of the IP packet.
* save the snaplength as this may get overidden in the IP printer.
*/
if (ndo->ndo_vflag >= 1 && ICMP_ERRTYPE(dp->icmp_type)) {
bp += 8;
ND_PRINT((ndo, "\n\t"));
ip = (const struct ip *)bp;
snapend_save = ndo->ndo_snapend;
ip_print(ndo, bp, EXTRACT_16BITS(&ip->ip_len));
ndo->ndo_snapend = snapend_save;
}
/*
* Attempt to decode the MPLS extensions only for some ICMP types.
*/
if (ndo->ndo_vflag >= 1 && plen > ICMP_EXTD_MINLEN && ICMP_MPLS_EXT_TYPE(dp->icmp_type)) {
ND_TCHECK(*ext_dp);
/*
* Check first if the mpls extension header shows a non-zero length.
* If the length field is not set then silently verify the checksum
* to check if an extension header is present. This is expedient,
* however not all implementations set the length field proper.
*/
if (!ext_dp->icmp_length &&
ND_TTEST2(ext_dp->icmp_ext_version_res, plen - ICMP_EXTD_MINLEN)) {
vec[0].ptr = (const uint8_t *)(const void *)&ext_dp->icmp_ext_version_res;
vec[0].len = plen - ICMP_EXTD_MINLEN;
if (in_cksum(vec, 1)) {
return;
}
}
ND_PRINT((ndo, "\n\tMPLS extension v%u",
ICMP_MPLS_EXT_EXTRACT_VERSION(*(ext_dp->icmp_ext_version_res))));
/*
* Sanity checking of the header.
*/
if (ICMP_MPLS_EXT_EXTRACT_VERSION(*(ext_dp->icmp_ext_version_res)) !=
ICMP_MPLS_EXT_VERSION) {
ND_PRINT((ndo, " packet not supported"));
return;
}
hlen = plen - ICMP_EXTD_MINLEN;
if (ND_TTEST2(ext_dp->icmp_ext_version_res, hlen)) {
vec[0].ptr = (const uint8_t *)(const void *)&ext_dp->icmp_ext_version_res;
vec[0].len = hlen;
ND_PRINT((ndo, ", checksum 0x%04x (%scorrect), length %u",
EXTRACT_16BITS(ext_dp->icmp_ext_checksum),
in_cksum(vec, 1) ? "in" : "",
hlen));
}
hlen -= 4; /* subtract common header size */
obj_tptr = (const uint8_t *)ext_dp->icmp_ext_data;
while (hlen > sizeof(struct icmp_mpls_ext_object_header_t)) {
icmp_mpls_ext_object_header = (const struct icmp_mpls_ext_object_header_t *)obj_tptr;
ND_TCHECK(*icmp_mpls_ext_object_header);
obj_tlen = EXTRACT_16BITS(icmp_mpls_ext_object_header->length);
obj_class_num = icmp_mpls_ext_object_header->class_num;
obj_ctype = icmp_mpls_ext_object_header->ctype;
obj_tptr += sizeof(struct icmp_mpls_ext_object_header_t);
ND_PRINT((ndo, "\n\t %s Object (%u), Class-Type: %u, length %u",
tok2str(icmp_mpls_ext_obj_values,"unknown",obj_class_num),
obj_class_num,
obj_ctype,
obj_tlen));
hlen-=sizeof(struct icmp_mpls_ext_object_header_t); /* length field includes tlv header */
/* infinite loop protection */
if ((obj_class_num == 0) ||
(obj_tlen < sizeof(struct icmp_mpls_ext_object_header_t))) {
return;
}
obj_tlen-=sizeof(struct icmp_mpls_ext_object_header_t);
switch (obj_class_num) {
case 1:
switch(obj_ctype) {
case 1:
ND_TCHECK2(*obj_tptr, 4);
raw_label = EXTRACT_32BITS(obj_tptr);
ND_PRINT((ndo, "\n\t label %u, exp %u", MPLS_LABEL(raw_label), MPLS_EXP(raw_label)));
if (MPLS_STACK(raw_label))
ND_PRINT((ndo, ", [S]"));
ND_PRINT((ndo, ", ttl %u", MPLS_TTL(raw_label)));
break;
default:
print_unknown_data(ndo, obj_tptr, "\n\t ", obj_tlen);
}
break;
/*
* FIXME those are the defined objects that lack a decoder
* you are welcome to contribute code ;-)
*/
case 2:
default:
print_unknown_data(ndo, obj_tptr, "\n\t ", obj_tlen);
break;
}
if (hlen < obj_tlen)
break;
hlen -= obj_tlen;
obj_tptr += obj_tlen;
}
}
return;
trunc:
ND_PRINT((ndo, "[|icmp]"));
}
Commit Message: CVE-2017-13012/ICMP: Add a missing bounds check.
Check before fetching the length from the included packet's IPv4 header.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s), modified
so the capture file won't be rejected as an invalid capture.
CWE ID: CWE-125 | icmp_print(netdissect_options *ndo, const u_char *bp, u_int plen, const u_char *bp2,
int fragmented)
{
char *cp;
const struct icmp *dp;
const struct icmp_ext_t *ext_dp;
const struct ip *ip;
const char *str, *fmt;
const struct ip *oip;
const struct udphdr *ouh;
const uint8_t *obj_tptr;
uint32_t raw_label;
const u_char *snapend_save;
const struct icmp_mpls_ext_object_header_t *icmp_mpls_ext_object_header;
u_int hlen, dport, mtu, obj_tlen, obj_class_num, obj_ctype;
char buf[MAXHOSTNAMELEN + 100];
struct cksum_vec vec[1];
dp = (const struct icmp *)bp;
ext_dp = (const struct icmp_ext_t *)bp;
ip = (const struct ip *)bp2;
str = buf;
ND_TCHECK(dp->icmp_code);
switch (dp->icmp_type) {
case ICMP_ECHO:
case ICMP_ECHOREPLY:
ND_TCHECK(dp->icmp_seq);
(void)snprintf(buf, sizeof(buf), "echo %s, id %u, seq %u",
dp->icmp_type == ICMP_ECHO ?
"request" : "reply",
EXTRACT_16BITS(&dp->icmp_id),
EXTRACT_16BITS(&dp->icmp_seq));
break;
case ICMP_UNREACH:
ND_TCHECK(dp->icmp_ip.ip_dst);
switch (dp->icmp_code) {
case ICMP_UNREACH_PROTOCOL:
ND_TCHECK(dp->icmp_ip.ip_p);
(void)snprintf(buf, sizeof(buf),
"%s protocol %d unreachable",
ipaddr_string(ndo, &dp->icmp_ip.ip_dst),
dp->icmp_ip.ip_p);
break;
case ICMP_UNREACH_PORT:
ND_TCHECK(dp->icmp_ip.ip_p);
oip = &dp->icmp_ip;
hlen = IP_HL(oip) * 4;
ouh = (const struct udphdr *)(((const u_char *)oip) + hlen);
ND_TCHECK(ouh->uh_dport);
dport = EXTRACT_16BITS(&ouh->uh_dport);
switch (oip->ip_p) {
case IPPROTO_TCP:
(void)snprintf(buf, sizeof(buf),
"%s tcp port %s unreachable",
ipaddr_string(ndo, &oip->ip_dst),
tcpport_string(ndo, dport));
break;
case IPPROTO_UDP:
(void)snprintf(buf, sizeof(buf),
"%s udp port %s unreachable",
ipaddr_string(ndo, &oip->ip_dst),
udpport_string(ndo, dport));
break;
default:
(void)snprintf(buf, sizeof(buf),
"%s protocol %d port %d unreachable",
ipaddr_string(ndo, &oip->ip_dst),
oip->ip_p, dport);
break;
}
break;
case ICMP_UNREACH_NEEDFRAG:
{
register const struct mtu_discovery *mp;
mp = (const struct mtu_discovery *)(const u_char *)&dp->icmp_void;
mtu = EXTRACT_16BITS(&mp->nexthopmtu);
if (mtu) {
(void)snprintf(buf, sizeof(buf),
"%s unreachable - need to frag (mtu %d)",
ipaddr_string(ndo, &dp->icmp_ip.ip_dst), mtu);
} else {
(void)snprintf(buf, sizeof(buf),
"%s unreachable - need to frag",
ipaddr_string(ndo, &dp->icmp_ip.ip_dst));
}
}
break;
default:
fmt = tok2str(unreach2str, "#%d %%s unreachable",
dp->icmp_code);
(void)snprintf(buf, sizeof(buf), fmt,
ipaddr_string(ndo, &dp->icmp_ip.ip_dst));
break;
}
break;
case ICMP_REDIRECT:
ND_TCHECK(dp->icmp_ip.ip_dst);
fmt = tok2str(type2str, "redirect-#%d %%s to net %%s",
dp->icmp_code);
(void)snprintf(buf, sizeof(buf), fmt,
ipaddr_string(ndo, &dp->icmp_ip.ip_dst),
ipaddr_string(ndo, &dp->icmp_gwaddr));
break;
case ICMP_ROUTERADVERT:
{
register const struct ih_rdiscovery *ihp;
register const struct id_rdiscovery *idp;
u_int lifetime, num, size;
(void)snprintf(buf, sizeof(buf), "router advertisement");
cp = buf + strlen(buf);
ihp = (const struct ih_rdiscovery *)&dp->icmp_void;
ND_TCHECK(*ihp);
(void)strncpy(cp, " lifetime ", sizeof(buf) - (cp - buf));
cp = buf + strlen(buf);
lifetime = EXTRACT_16BITS(&ihp->ird_lifetime);
if (lifetime < 60) {
(void)snprintf(cp, sizeof(buf) - (cp - buf), "%u",
lifetime);
} else if (lifetime < 60 * 60) {
(void)snprintf(cp, sizeof(buf) - (cp - buf), "%u:%02u",
lifetime / 60, lifetime % 60);
} else {
(void)snprintf(cp, sizeof(buf) - (cp - buf),
"%u:%02u:%02u",
lifetime / 3600,
(lifetime % 3600) / 60,
lifetime % 60);
}
cp = buf + strlen(buf);
num = ihp->ird_addrnum;
(void)snprintf(cp, sizeof(buf) - (cp - buf), " %d:", num);
cp = buf + strlen(buf);
size = ihp->ird_addrsiz;
if (size != 2) {
(void)snprintf(cp, sizeof(buf) - (cp - buf),
" [size %d]", size);
break;
}
idp = (const struct id_rdiscovery *)&dp->icmp_data;
while (num-- > 0) {
ND_TCHECK(*idp);
(void)snprintf(cp, sizeof(buf) - (cp - buf), " {%s %u}",
ipaddr_string(ndo, &idp->ird_addr),
EXTRACT_32BITS(&idp->ird_pref));
cp = buf + strlen(buf);
++idp;
}
}
break;
case ICMP_TIMXCEED:
ND_TCHECK(dp->icmp_ip.ip_dst);
switch (dp->icmp_code) {
case ICMP_TIMXCEED_INTRANS:
str = "time exceeded in-transit";
break;
case ICMP_TIMXCEED_REASS:
str = "ip reassembly time exceeded";
break;
default:
(void)snprintf(buf, sizeof(buf), "time exceeded-#%d",
dp->icmp_code);
break;
}
break;
case ICMP_PARAMPROB:
if (dp->icmp_code)
(void)snprintf(buf, sizeof(buf),
"parameter problem - code %d", dp->icmp_code);
else {
ND_TCHECK(dp->icmp_pptr);
(void)snprintf(buf, sizeof(buf),
"parameter problem - octet %d", dp->icmp_pptr);
}
break;
case ICMP_MASKREPLY:
ND_TCHECK(dp->icmp_mask);
(void)snprintf(buf, sizeof(buf), "address mask is 0x%08x",
EXTRACT_32BITS(&dp->icmp_mask));
break;
case ICMP_TSTAMP:
ND_TCHECK(dp->icmp_seq);
(void)snprintf(buf, sizeof(buf),
"time stamp query id %u seq %u",
EXTRACT_16BITS(&dp->icmp_id),
EXTRACT_16BITS(&dp->icmp_seq));
break;
case ICMP_TSTAMPREPLY:
ND_TCHECK(dp->icmp_ttime);
(void)snprintf(buf, sizeof(buf),
"time stamp reply id %u seq %u: org %s",
EXTRACT_16BITS(&dp->icmp_id),
EXTRACT_16BITS(&dp->icmp_seq),
icmp_tstamp_print(EXTRACT_32BITS(&dp->icmp_otime)));
(void)snprintf(buf+strlen(buf),sizeof(buf)-strlen(buf),", recv %s",
icmp_tstamp_print(EXTRACT_32BITS(&dp->icmp_rtime)));
(void)snprintf(buf+strlen(buf),sizeof(buf)-strlen(buf),", xmit %s",
icmp_tstamp_print(EXTRACT_32BITS(&dp->icmp_ttime)));
break;
default:
str = tok2str(icmp2str, "type-#%d", dp->icmp_type);
break;
}
ND_PRINT((ndo, "ICMP %s, length %u", str, plen));
if (ndo->ndo_vflag && !fragmented) { /* don't attempt checksumming if this is a frag */
uint16_t sum, icmp_sum;
if (ND_TTEST2(*bp, plen)) {
vec[0].ptr = (const uint8_t *)(const void *)dp;
vec[0].len = plen;
sum = in_cksum(vec, 1);
if (sum != 0) {
icmp_sum = EXTRACT_16BITS(&dp->icmp_cksum);
ND_PRINT((ndo, " (wrong icmp cksum %x (->%x)!)",
icmp_sum,
in_cksum_shouldbe(icmp_sum, sum)));
}
}
}
/*
* print the remnants of the IP packet.
* save the snaplength as this may get overidden in the IP printer.
*/
if (ndo->ndo_vflag >= 1 && ICMP_ERRTYPE(dp->icmp_type)) {
bp += 8;
ND_PRINT((ndo, "\n\t"));
ip = (const struct ip *)bp;
snapend_save = ndo->ndo_snapend;
ND_TCHECK_16BITS(&ip->ip_len);
ip_print(ndo, bp, EXTRACT_16BITS(&ip->ip_len));
ndo->ndo_snapend = snapend_save;
}
/*
* Attempt to decode the MPLS extensions only for some ICMP types.
*/
if (ndo->ndo_vflag >= 1 && plen > ICMP_EXTD_MINLEN && ICMP_MPLS_EXT_TYPE(dp->icmp_type)) {
ND_TCHECK(*ext_dp);
/*
* Check first if the mpls extension header shows a non-zero length.
* If the length field is not set then silently verify the checksum
* to check if an extension header is present. This is expedient,
* however not all implementations set the length field proper.
*/
if (!ext_dp->icmp_length &&
ND_TTEST2(ext_dp->icmp_ext_version_res, plen - ICMP_EXTD_MINLEN)) {
vec[0].ptr = (const uint8_t *)(const void *)&ext_dp->icmp_ext_version_res;
vec[0].len = plen - ICMP_EXTD_MINLEN;
if (in_cksum(vec, 1)) {
return;
}
}
ND_PRINT((ndo, "\n\tMPLS extension v%u",
ICMP_MPLS_EXT_EXTRACT_VERSION(*(ext_dp->icmp_ext_version_res))));
/*
* Sanity checking of the header.
*/
if (ICMP_MPLS_EXT_EXTRACT_VERSION(*(ext_dp->icmp_ext_version_res)) !=
ICMP_MPLS_EXT_VERSION) {
ND_PRINT((ndo, " packet not supported"));
return;
}
hlen = plen - ICMP_EXTD_MINLEN;
if (ND_TTEST2(ext_dp->icmp_ext_version_res, hlen)) {
vec[0].ptr = (const uint8_t *)(const void *)&ext_dp->icmp_ext_version_res;
vec[0].len = hlen;
ND_PRINT((ndo, ", checksum 0x%04x (%scorrect), length %u",
EXTRACT_16BITS(ext_dp->icmp_ext_checksum),
in_cksum(vec, 1) ? "in" : "",
hlen));
}
hlen -= 4; /* subtract common header size */
obj_tptr = (const uint8_t *)ext_dp->icmp_ext_data;
while (hlen > sizeof(struct icmp_mpls_ext_object_header_t)) {
icmp_mpls_ext_object_header = (const struct icmp_mpls_ext_object_header_t *)obj_tptr;
ND_TCHECK(*icmp_mpls_ext_object_header);
obj_tlen = EXTRACT_16BITS(icmp_mpls_ext_object_header->length);
obj_class_num = icmp_mpls_ext_object_header->class_num;
obj_ctype = icmp_mpls_ext_object_header->ctype;
obj_tptr += sizeof(struct icmp_mpls_ext_object_header_t);
ND_PRINT((ndo, "\n\t %s Object (%u), Class-Type: %u, length %u",
tok2str(icmp_mpls_ext_obj_values,"unknown",obj_class_num),
obj_class_num,
obj_ctype,
obj_tlen));
hlen-=sizeof(struct icmp_mpls_ext_object_header_t); /* length field includes tlv header */
/* infinite loop protection */
if ((obj_class_num == 0) ||
(obj_tlen < sizeof(struct icmp_mpls_ext_object_header_t))) {
return;
}
obj_tlen-=sizeof(struct icmp_mpls_ext_object_header_t);
switch (obj_class_num) {
case 1:
switch(obj_ctype) {
case 1:
ND_TCHECK2(*obj_tptr, 4);
raw_label = EXTRACT_32BITS(obj_tptr);
ND_PRINT((ndo, "\n\t label %u, exp %u", MPLS_LABEL(raw_label), MPLS_EXP(raw_label)));
if (MPLS_STACK(raw_label))
ND_PRINT((ndo, ", [S]"));
ND_PRINT((ndo, ", ttl %u", MPLS_TTL(raw_label)));
break;
default:
print_unknown_data(ndo, obj_tptr, "\n\t ", obj_tlen);
}
break;
/*
* FIXME those are the defined objects that lack a decoder
* you are welcome to contribute code ;-)
*/
case 2:
default:
print_unknown_data(ndo, obj_tptr, "\n\t ", obj_tlen);
break;
}
if (hlen < obj_tlen)
break;
hlen -= obj_tlen;
obj_tptr += obj_tlen;
}
}
return;
trunc:
ND_PRINT((ndo, "[|icmp]"));
}
| 1,068 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void DownloadController::OnDownloadStarted(
DownloadItem* download_item) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
WebContents* web_contents = download_item->GetWebContents();
if (!web_contents)
return;
download_item->AddObserver(this);
ChromeDownloadDelegate::FromWebContents(web_contents)->OnDownloadStarted(
download_item->GetTargetFilePath().BaseName().value(),
download_item->GetMimeType());
}
Commit Message: Clean up Android DownloadManager code as most download now go through Chrome Network stack
The only exception is OMA DRM download.
And it only applies to context menu download interception.
Clean up the remaining unused code now.
BUG=647755
Review-Url: https://codereview.chromium.org/2371773003
Cr-Commit-Position: refs/heads/master@{#421332}
CWE ID: CWE-254 | void DownloadController::OnDownloadStarted(
DownloadItem* download_item) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
WebContents* web_contents = download_item->GetWebContents();
if (!web_contents)
return;
download_item->AddObserver(this);
ChromeDownloadDelegate::FromWebContents(web_contents)->OnDownloadStarted(
download_item->GetTargetFilePath().BaseName().value());
}
| 24,084 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int32_t PPB_Flash_MessageLoop_Impl::InternalRun(
const RunFromHostProxyCallback& callback) {
if (state_->run_called()) {
if (!callback.is_null())
callback.Run(PP_ERROR_FAILED);
return PP_ERROR_FAILED;
}
state_->set_run_called();
state_->set_run_callback(callback);
scoped_refptr<State> state_protector(state_);
{
base::MessageLoop::ScopedNestableTaskAllower allow(
base::MessageLoop::current());
base::MessageLoop::current()->Run();
}
return state_protector->result();
}
Commit Message: Fix PPB_Flash_MessageLoop.
This CL suspends script callbacks and resource loads while running nested message loop using PPB_Flash_MessageLoop.
BUG=569496
Review URL: https://codereview.chromium.org/1559113002
Cr-Commit-Position: refs/heads/master@{#374529}
CWE ID: CWE-264 | int32_t PPB_Flash_MessageLoop_Impl::InternalRun(
const RunFromHostProxyCallback& callback) {
if (state_->run_called()) {
if (!callback.is_null())
callback.Run(PP_ERROR_FAILED);
return PP_ERROR_FAILED;
}
state_->set_run_called();
state_->set_run_callback(callback);
scoped_refptr<State> state_protector(state_);
{
base::MessageLoop::ScopedNestableTaskAllower allow(
base::MessageLoop::current());
blink::WebView::willEnterModalLoop();
base::MessageLoop::current()->Run();
blink::WebView::didExitModalLoop();
}
return state_protector->result();
}
| 1,000 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: do_decrypt (const RIJNDAEL_context *ctx, unsigned char *bx,
const unsigned char *ax)
{
#ifdef USE_AMD64_ASM
return _gcry_aes_amd64_decrypt_block(ctx->keyschdec, bx, ax, ctx->rounds,
&dec_tables);
#elif defined(USE_ARM_ASM)
return _gcry_aes_arm_decrypt_block(ctx->keyschdec, bx, ax, ctx->rounds,
&dec_tables);
#else
return do_decrypt_fn (ctx, bx, ax);
#endif /*!USE_ARM_ASM && !USE_AMD64_ASM*/
}
Commit Message: AES: move look-up tables to .data section and unshare between processes
* cipher/rijndael-internal.h (ATTR_ALIGNED_64): New.
* cipher/rijndael-tables.h (encT): Move to 'enc_tables' structure.
(enc_tables): New structure for encryption table with counters before
and after.
(encT): New macro.
(dec_tables): Add counters before and after encryption table; Move
from .rodata to .data section.
(do_encrypt): Change 'encT' to 'enc_tables.T'.
(do_decrypt): Change '&dec_tables' to 'dec_tables.T'.
* cipher/cipher-gcm.c (prefetch_table): Make inline; Handle input
with length not multiple of 256.
(prefetch_enc, prefetch_dec): Modify pre- and post-table counters
to unshare look-up table pages between processes.
--
GnuPG-bug-id: 4541
Signed-off-by: Jussi Kivilinna <[email protected]>
CWE ID: CWE-310 | do_decrypt (const RIJNDAEL_context *ctx, unsigned char *bx,
const unsigned char *ax)
{
#ifdef USE_AMD64_ASM
return _gcry_aes_amd64_decrypt_block(ctx->keyschdec, bx, ax, ctx->rounds,
dec_tables.T);
#elif defined(USE_ARM_ASM)
return _gcry_aes_arm_decrypt_block(ctx->keyschdec, bx, ax, ctx->rounds,
dec_tables.T);
#else
return do_decrypt_fn (ctx, bx, ax);
#endif /*!USE_ARM_ASM && !USE_AMD64_ASM*/
}
| 8,828 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: OMX_ERRORTYPE SoftAACEncoder::internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR params) {
switch (index) {
case OMX_IndexParamStandardComponentRole:
{
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
if (strncmp((const char *)roleParams->cRole,
"audio_encoder.aac",
OMX_MAX_STRINGNAME_SIZE - 1)) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPortFormat:
{
const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
if (formatParams->nIndex > 0) {
return OMX_ErrorNoMore;
}
if ((formatParams->nPortIndex == 0
&& formatParams->eEncoding != OMX_AUDIO_CodingPCM)
|| (formatParams->nPortIndex == 1
&& formatParams->eEncoding != OMX_AUDIO_CodingAAC)) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioAac:
{
OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
if (aacParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
mBitRate = aacParams->nBitRate;
mNumChannels = aacParams->nChannels;
mSampleRate = aacParams->nSampleRate;
if (setAudioParams() != OK) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
mNumChannels = pcmParams->nChannels;
mSampleRate = pcmParams->nSamplingRate;
if (setAudioParams() != OK) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalSetParameter(index, params);
}
}
Commit Message: DO NOT MERGE Verify OMX buffer sizes prior to access
Bug: 27207275
Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d
CWE ID: CWE-119 | OMX_ERRORTYPE SoftAACEncoder::internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR params) {
switch (index) {
case OMX_IndexParamStandardComponentRole:
{
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
if (!isValidOMXParam(roleParams)) {
return OMX_ErrorBadParameter;
}
if (strncmp((const char *)roleParams->cRole,
"audio_encoder.aac",
OMX_MAX_STRINGNAME_SIZE - 1)) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPortFormat:
{
const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
if (!isValidOMXParam(formatParams)) {
return OMX_ErrorBadParameter;
}
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
if (formatParams->nIndex > 0) {
return OMX_ErrorNoMore;
}
if ((formatParams->nPortIndex == 0
&& formatParams->eEncoding != OMX_AUDIO_CodingPCM)
|| (formatParams->nPortIndex == 1
&& formatParams->eEncoding != OMX_AUDIO_CodingAAC)) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioAac:
{
OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
if (!isValidOMXParam(aacParams)) {
return OMX_ErrorBadParameter;
}
if (aacParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
mBitRate = aacParams->nBitRate;
mNumChannels = aacParams->nChannels;
mSampleRate = aacParams->nSampleRate;
if (setAudioParams() != OK) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (!isValidOMXParam(pcmParams)) {
return OMX_ErrorBadParameter;
}
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
mNumChannels = pcmParams->nChannels;
mSampleRate = pcmParams->nSamplingRate;
if (setAudioParams() != OK) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalSetParameter(index, params);
}
}
| 21,069 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void impeg2d_peek_next_start_code(dec_state_t *ps_dec)
{
stream_t *ps_stream;
ps_stream = &ps_dec->s_bit_stream;
impeg2d_bit_stream_flush_to_byte_boundary(ps_stream);
while ((impeg2d_bit_stream_nxt(ps_stream,START_CODE_PREFIX_LEN) != START_CODE_PREFIX)
&& (ps_dec->s_bit_stream.u4_offset <= ps_dec->s_bit_stream.u4_max_offset))
{
impeg2d_bit_stream_get(ps_stream,8);
}
return;
}
Commit Message: Fixed bit stream access to make sure that it is not read beyond the allocated size.
Bug: 25765591
Change-Id: I98c23a3c3f84f6710f29bffe5ed73adcf51d47f6
CWE ID: CWE-254 | void impeg2d_peek_next_start_code(dec_state_t *ps_dec)
{
stream_t *ps_stream;
ps_stream = &ps_dec->s_bit_stream;
impeg2d_bit_stream_flush_to_byte_boundary(ps_stream);
while ((impeg2d_bit_stream_nxt(ps_stream,START_CODE_PREFIX_LEN) != START_CODE_PREFIX)
&& (ps_dec->s_bit_stream.u4_offset < ps_dec->s_bit_stream.u4_max_offset))
{
impeg2d_bit_stream_get(ps_stream,8);
}
return;
}
| 3,196 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: char *M_fs_path_tmpdir(M_fs_system_t sys_type)
{
char *d = NULL;
char *out = NULL;
M_fs_error_t res;
#ifdef _WIN32
size_t len = M_fs_path_get_path_max(M_FS_SYSTEM_WINDOWS)+1;
d = M_malloc_zero(len);
/* Return is length without NULL. */
if (GetTempPath((DWORD)len, d) >= len) {
M_free(d);
d = NULL;
}
#elif defined(__APPLE__)
d = M_fs_path_mac_tmpdir();
#else
const char *const_temp;
/* Try Unix env var. */
# ifdef HAVE_SECURE_GETENV
const_temp = secure_getenv("TMPDIR");
# else
const_temp = getenv("TMPDIR");
# endif
if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) {
d = M_strdup(const_temp);
}
/* Fallback to some "standard" system paths. */
if (d == NULL) {
const_temp = "/tmp";
if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) {
d = M_strdup(const_temp);
}
}
if (d == NULL) {
const_temp = "/var/tmp";
if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) {
d = M_strdup(const_temp);
}
}
#endif
if (d != NULL) {
res = M_fs_path_norm(&out, d, M_FS_PATH_NORM_ABSOLUTE, sys_type);
if (res != M_FS_ERROR_SUCCESS) {
out = NULL;
}
}
M_free(d);
return out;
}
Commit Message: fs: Don't try to delete the file when copying. It could cause a security issue if the file exists and doesn't allow other's to read/write. delete could allow someone to create the file and have access to the data.
CWE ID: CWE-732 | char *M_fs_path_tmpdir(M_fs_system_t sys_type)
{
char *d = NULL;
char *out = NULL;
M_fs_error_t res;
#ifdef _WIN32
size_t len = M_fs_path_get_path_max(M_FS_SYSTEM_WINDOWS)+1;
d = M_malloc_zero(len);
/* Return is length without NULL. */
if (GetTempPath((DWORD)len, d) >= len) {
M_free(d);
d = NULL;
}
#elif defined(__APPLE__)
d = M_fs_path_mac_tmpdir();
#else
const char *const_temp;
/* Unix doens't have a fancy function to get the standard
* temporary directory an application can use. Instead there
* is a convoluted set of possible paths that could be used.
*
* We're going to go though each one in a priority order and
* verify if we can read and write the directory. If so then
* that's the one that will be used. We are fine using access
* here because it doesn't matter if the path ends up being
* changed out from underneath us later on. When it's used
* at that time it will fail. Right now we just want to get
* a path that can be tried. */
/* Try Unix env vars.
*
* This is not ideal but a valid way to set the temporary directory
* for a user. Per Single Unix Specification 4 and probably other things.
*/
# ifdef HAVE_SECURE_GETENV
const_temp = secure_getenv("TMPDIR");
# else
const_temp = getenv("TMPDIR");
# endif
if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) {
d = M_strdup(const_temp);
}
/* Fallback to some "standard" system paths. */
if (d == NULL) {
const_temp = "/tmp";
if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) {
d = M_strdup(const_temp);
}
}
if (d == NULL) {
const_temp = "/var/tmp";
if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) {
d = M_strdup(const_temp);
}
}
#endif
if (d != NULL) {
res = M_fs_path_norm(&out, d, M_FS_PATH_NORM_ABSOLUTE, sys_type);
if (res != M_FS_ERROR_SUCCESS) {
out = NULL;
}
}
M_free(d);
return out;
}
| 28,592 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: device_has_capability (NMDevice *self, NMDeviceCapabilities caps)
{
{
static guint32 devcount = 0;
NMDevicePrivate *priv;
g_return_if_fail (NM_IS_DEVICE (self));
priv = NM_DEVICE_GET_PRIVATE (self);
g_return_if_fail (priv->path == NULL);
priv->path = g_strdup_printf ("/org/freedesktop/NetworkManager/Devices/%d", devcount++);
_LOGI (LOGD_DEVICE, "exported as %s", priv->path);
nm_dbus_manager_register_object (nm_dbus_manager_get (), priv->path, self);
}
const char *
nm_device_get_path (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->path;
}
const char *
nm_device_get_udi (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->udi;
}
const char *
nm_device_get_iface (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), 0);
return NM_DEVICE_GET_PRIVATE (self)->iface;
}
int
nm_device_get_ifindex (NMDevice *self)
{
g_return_val_if_fail (self != NULL, 0);
return NM_DEVICE_GET_PRIVATE (self)->ifindex;
}
gboolean
nm_device_is_software (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
return priv->is_software;
}
const char *
nm_device_get_ip_iface (NMDevice *self)
{
NMDevicePrivate *priv;
g_return_val_if_fail (self != NULL, NULL);
priv = NM_DEVICE_GET_PRIVATE (self);
/* If it's not set, default to iface */
return priv->ip_iface ? priv->ip_iface : priv->iface;
}
int
nm_device_get_ip_ifindex (NMDevice *self)
{
NMDevicePrivate *priv;
g_return_val_if_fail (self != NULL, 0);
priv = NM_DEVICE_GET_PRIVATE (self);
/* If it's not set, default to iface */
return priv->ip_iface ? priv->ip_ifindex : priv->ifindex;
}
void
nm_device_set_ip_iface (NMDevice *self, const char *iface)
{
NMDevicePrivate *priv;
char *old_ip_iface;
g_return_if_fail (NM_IS_DEVICE (self));
priv = NM_DEVICE_GET_PRIVATE (self);
if (!g_strcmp0 (iface, priv->ip_iface))
return;
old_ip_iface = priv->ip_iface;
priv->ip_ifindex = 0;
priv->ip_iface = g_strdup (iface);
if (priv->ip_iface) {
priv->ip_ifindex = nm_platform_link_get_ifindex (priv->ip_iface);
if (priv->ip_ifindex > 0) {
if (nm_platform_check_support_user_ipv6ll ())
nm_platform_link_set_user_ipv6ll_enabled (priv->ip_ifindex, TRUE);
if (!nm_platform_link_is_up (priv->ip_ifindex))
nm_platform_link_set_up (priv->ip_ifindex);
} else {
/* Device IP interface must always be a kernel network interface */
_LOGW (LOGD_HW, "failed to look up interface index");
}
}
/* We don't care about any saved values from the old iface */
g_hash_table_remove_all (priv->ip6_saved_properties);
/* Emit change notification */
if (g_strcmp0 (old_ip_iface, priv->ip_iface))
g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE);
g_free (old_ip_iface);
}
static gboolean
get_ip_iface_identifier (NMDevice *self, NMUtilsIPv6IfaceId *out_iid)
{
NMLinkType link_type;
const guint8 *hwaddr = NULL;
size_t hwaddr_len = 0;
int ifindex;
gboolean success;
/* If we get here, we *must* have a kernel netdev, which implies an ifindex */
ifindex = nm_device_get_ip_ifindex (self);
g_assert (ifindex);
link_type = nm_platform_link_get_type (ifindex);
g_return_val_if_fail (link_type > NM_LINK_TYPE_UNKNOWN, 0);
hwaddr = nm_platform_link_get_address (ifindex, &hwaddr_len);
if (!hwaddr_len)
return FALSE;
success = nm_utils_get_ipv6_interface_identifier (link_type,
hwaddr,
hwaddr_len,
out_iid);
if (!success) {
_LOGW (LOGD_HW, "failed to generate interface identifier "
"for link type %u hwaddr_len %zu", link_type, hwaddr_len);
}
return success;
}
static gboolean
nm_device_get_ip_iface_identifier (NMDevice *self, NMUtilsIPv6IfaceId *iid)
{
return NM_DEVICE_GET_CLASS (self)->get_ip_iface_identifier (self, iid);
}
const char *
nm_device_get_driver (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->driver;
}
const char *
nm_device_get_driver_version (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->driver_version;
}
NMDeviceType
nm_device_get_device_type (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), NM_DEVICE_TYPE_UNKNOWN);
return NM_DEVICE_GET_PRIVATE (self)->type;
}
/**
* nm_device_get_priority():
* @self: the #NMDevice
*
* Returns: the device's routing priority. Lower numbers means a "better"
* device, eg higher priority.
*/
int
nm_device_get_priority (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), 1000);
/* Device 'priority' is used for the default route-metric and is based on
* the device type. The settings ipv4.route-metric and ipv6.route-metric
* can overwrite this default.
*
* Currently for both IPv4 and IPv6 we use the same default values.
*
* The route-metric is used for the metric of the routes of device.
* This also applies to the default route. Therefore it affects also
* which device is the "best".
*
* For comparison, note that iproute2 by default adds IPv4 routes with
* metric 0, and IPv6 routes with metric 1024. The latter is the IPv6
* "user default" in the kernel (NM_PLATFORM_ROUTE_METRIC_DEFAULT_IP6).
* In kernel, the full uint32_t range is available for route
* metrics (except for IPv6, where 0 means 1024).
*/
switch (nm_device_get_device_type (self)) {
/* 50 is reserved for VPN (NM_VPN_ROUTE_METRIC_DEFAULT) */
case NM_DEVICE_TYPE_ETHERNET:
return 100;
case NM_DEVICE_TYPE_INFINIBAND:
return 150;
case NM_DEVICE_TYPE_ADSL:
return 200;
case NM_DEVICE_TYPE_WIMAX:
return 250;
case NM_DEVICE_TYPE_BOND:
return 300;
case NM_DEVICE_TYPE_TEAM:
return 350;
case NM_DEVICE_TYPE_VLAN:
return 400;
case NM_DEVICE_TYPE_BRIDGE:
return 425;
case NM_DEVICE_TYPE_MODEM:
return 450;
case NM_DEVICE_TYPE_BT:
return 550;
case NM_DEVICE_TYPE_WIFI:
return 600;
case NM_DEVICE_TYPE_OLPC_MESH:
return 650;
case NM_DEVICE_TYPE_GENERIC:
return 950;
case NM_DEVICE_TYPE_UNKNOWN:
return 10000;
case NM_DEVICE_TYPE_UNUSED1:
case NM_DEVICE_TYPE_UNUSED2:
/* omit default: to get compiler warning about missing switch cases */
break;
}
return 11000;
}
guint32
nm_device_get_ip4_route_metric (NMDevice *self)
{
NMConnection *connection;
gint64 route_metric = -1;
g_return_val_if_fail (NM_IS_DEVICE (self), G_MAXUINT32);
connection = nm_device_get_connection (self);
if (connection)
route_metric = nm_setting_ip_config_get_route_metric (nm_connection_get_setting_ip4_config (connection));
return route_metric >= 0 ? route_metric : nm_device_get_priority (self);
}
guint32
nm_device_get_ip6_route_metric (NMDevice *self)
{
NMConnection *connection;
gint64 route_metric = -1;
g_return_val_if_fail (NM_IS_DEVICE (self), G_MAXUINT32);
connection = nm_device_get_connection (self);
if (connection)
route_metric = nm_setting_ip_config_get_route_metric (nm_connection_get_setting_ip6_config (connection));
return route_metric >= 0 ? route_metric : nm_device_get_priority (self);
}
const NMPlatformIP4Route *
nm_device_get_ip4_default_route (NMDevice *self, gboolean *out_is_assumed)
{
NMDevicePrivate *priv;
g_return_val_if_fail (NM_IS_DEVICE (self), NULL);
priv = NM_DEVICE_GET_PRIVATE (self);
if (out_is_assumed)
*out_is_assumed = priv->default_route.v4_is_assumed;
return priv->default_route.v4_has ? &priv->default_route.v4 : NULL;
}
const NMPlatformIP6Route *
nm_device_get_ip6_default_route (NMDevice *self, gboolean *out_is_assumed)
{
NMDevicePrivate *priv;
g_return_val_if_fail (NM_IS_DEVICE (self), NULL);
priv = NM_DEVICE_GET_PRIVATE (self);
if (out_is_assumed)
*out_is_assumed = priv->default_route.v6_is_assumed;
return priv->default_route.v6_has ? &priv->default_route.v6 : NULL;
}
const char *
nm_device_get_type_desc (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->type_desc;
}
gboolean
nm_device_has_carrier (NMDevice *self)
{
return NM_DEVICE_GET_PRIVATE (self)->carrier;
}
NMActRequest *
nm_device_get_act_request (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->act_request;
}
NMConnection *
nm_device_get_connection (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
return priv->act_request ? nm_act_request_get_connection (priv->act_request) : NULL;
}
RfKillType
nm_device_get_rfkill_type (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
return NM_DEVICE_GET_PRIVATE (self)->rfkill_type;
}
static const char *
nm_device_get_physical_port_id (NMDevice *self)
{
return NM_DEVICE_GET_PRIVATE (self)->physical_port_id;
}
/***********************************************************/
static gboolean
nm_device_uses_generated_assumed_connection (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
if ( priv->act_request
&& nm_active_connection_get_assumed (NM_ACTIVE_CONNECTION (priv->act_request))) {
connection = nm_act_request_get_connection (priv->act_request);
if ( connection
&& nm_settings_connection_get_nm_generated_assumed (NM_SETTINGS_CONNECTION (connection)))
return TRUE;
}
return FALSE;
}
gboolean
nm_device_uses_assumed_connection (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if ( priv->act_request
&& nm_active_connection_get_assumed (NM_ACTIVE_CONNECTION (priv->act_request)))
return TRUE;
return FALSE;
}
static SlaveInfo *
find_slave_info (NMDevice *self, NMDevice *slave)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
SlaveInfo *info;
GSList *iter;
for (iter = priv->slaves; iter; iter = g_slist_next (iter)) {
info = iter->data;
if (info->slave == slave)
return info;
}
return NULL;
}
static void
free_slave_info (SlaveInfo *info)
{
g_signal_handler_disconnect (info->slave, info->watch_id);
g_clear_object (&info->slave);
memset (info, 0, sizeof (*info));
g_free (info);
}
/**
* nm_device_enslave_slave:
* @self: the master device
* @slave: the slave device to enslave
* @connection: (allow-none): the slave device's connection
*
* If @self is capable of enslaving other devices (ie it's a bridge, bond, team,
* etc) then this function enslaves @slave.
*
* Returns: %TRUE on success, %FALSE on failure or if this device cannot enslave
* other devices.
*/
static gboolean
nm_device_enslave_slave (NMDevice *self, NMDevice *slave, NMConnection *connection)
{
SlaveInfo *info;
gboolean success = FALSE;
gboolean configure;
g_return_val_if_fail (self != NULL, FALSE);
g_return_val_if_fail (slave != NULL, FALSE);
g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->enslave_slave != NULL, FALSE);
info = find_slave_info (self, slave);
if (!info)
return FALSE;
if (info->enslaved)
success = TRUE;
else {
configure = (info->configure && connection != NULL);
if (configure)
g_return_val_if_fail (nm_device_get_state (slave) >= NM_DEVICE_STATE_DISCONNECTED, FALSE);
success = NM_DEVICE_GET_CLASS (self)->enslave_slave (self, slave, connection, configure);
info->enslaved = success;
}
nm_device_slave_notify_enslave (info->slave, success);
/* Ensure the device's hardware address is up-to-date; it often changes
* when slaves change.
*/
nm_device_update_hw_address (self);
/* Restart IP configuration if we're waiting for slaves. Do this
* after updating the hardware address as IP config may need the
* new address.
*/
if (success) {
if (NM_DEVICE_GET_PRIVATE (self)->ip4_state == IP_WAIT)
nm_device_activate_stage3_ip4_start (self);
if (NM_DEVICE_GET_PRIVATE (self)->ip6_state == IP_WAIT)
nm_device_activate_stage3_ip6_start (self);
}
return success;
}
/**
* nm_device_release_one_slave:
* @self: the master device
* @slave: the slave device to release
* @configure: whether @self needs to actually release @slave
* @reason: the state change reason for the @slave
*
* If @self is capable of enslaving other devices (ie it's a bridge, bond, team,
* etc) then this function releases the previously enslaved @slave and/or
* updates the state of @self and @slave to reflect its release.
*
* Returns: %TRUE on success, %FALSE on failure, if this device cannot enslave
* other devices, or if @slave was never enslaved.
*/
static gboolean
nm_device_release_one_slave (NMDevice *self, NMDevice *slave, gboolean configure, NMDeviceStateReason reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
SlaveInfo *info;
gboolean success = FALSE;
g_return_val_if_fail (slave != NULL, FALSE);
g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->release_slave != NULL, FALSE);
info = find_slave_info (self, slave);
if (!info)
return FALSE;
priv->slaves = g_slist_remove (priv->slaves, info);
if (info->enslaved) {
success = NM_DEVICE_GET_CLASS (self)->release_slave (self, slave, configure);
/* The release_slave() implementation logs success/failure (in the
* correct device-specific log domain), so we don't have to do anything.
*/
}
if (!configure) {
g_warn_if_fail (reason == NM_DEVICE_STATE_REASON_NONE || reason == NM_DEVICE_STATE_REASON_REMOVED);
reason = NM_DEVICE_STATE_REASON_NONE;
} else if (reason == NM_DEVICE_STATE_REASON_NONE) {
g_warn_if_reached ();
reason = NM_DEVICE_STATE_REASON_UNKNOWN;
}
nm_device_slave_notify_release (info->slave, reason);
free_slave_info (info);
/* Ensure the device's hardware address is up-to-date; it often changes
* when slaves change.
*/
nm_device_update_hw_address (self);
return success;
}
static gboolean
is_software_external (NMDevice *self)
{
return nm_device_is_software (self)
&& !nm_device_get_is_nm_owned (self);
}
/**
* nm_device_finish_init:
* @self: the master device
*
* Whatever needs to be done post-initialization, when the device has a DBus
* object name.
*/
void
nm_device_finish_init (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
g_assert (priv->initialized == FALSE);
/* Do not manage externally created software devices until they are IFF_UP */
if ( is_software_external (self)
&& !nm_platform_link_is_up (priv->ifindex)
&& priv->ifindex > 0)
nm_device_set_initial_unmanaged_flag (self, NM_UNMANAGED_EXTERNAL_DOWN, TRUE);
if (priv->master)
nm_device_enslave_slave (priv->master, self, NULL);
priv->initialized = TRUE;
}
static void
carrier_changed (NMDevice *self, gboolean carrier)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (!nm_device_get_managed (self))
return;
nm_device_recheck_available_connections (self);
/* ignore-carrier devices ignore all carrier-down events */
if (priv->ignore_carrier && !carrier)
return;
if (priv->is_master) {
/* Bridge/bond/team carrier does not affect its own activation,
* but when carrier comes on, if there are slaves waiting,
* it will restart them.
*/
if (!carrier)
return;
if (nm_device_activate_ip4_state_in_wait (self))
nm_device_activate_stage3_ip4_start (self);
if (nm_device_activate_ip6_state_in_wait (self))
nm_device_activate_stage3_ip6_start (self);
return;
} else if (nm_device_get_enslaved (self) && !carrier) {
/* Slaves don't deactivate when they lose carrier; for
* bonds/teams in particular that would be actively
* counterproductive.
*/
return;
}
if (carrier) {
g_warn_if_fail (priv->state >= NM_DEVICE_STATE_UNAVAILABLE);
if (priv->state == NM_DEVICE_STATE_UNAVAILABLE) {
nm_device_queue_state (self, NM_DEVICE_STATE_DISCONNECTED,
NM_DEVICE_STATE_REASON_CARRIER);
} else if (priv->state == NM_DEVICE_STATE_DISCONNECTED) {
/* If the device is already in DISCONNECTED state without a carrier
* (probably because it is tagged for carrier ignore) ensure that
* when the carrier appears, auto connections are rechecked for
* the device.
*/
nm_device_emit_recheck_auto_activate (self);
}
} else {
g_return_if_fail (priv->state >= NM_DEVICE_STATE_UNAVAILABLE);
if (priv->state == NM_DEVICE_STATE_UNAVAILABLE) {
if (nm_device_queued_state_peek (self) >= NM_DEVICE_STATE_DISCONNECTED)
nm_device_queued_state_clear (self);
} else {
nm_device_queue_state (self, NM_DEVICE_STATE_UNAVAILABLE,
NM_DEVICE_STATE_REASON_CARRIER);
}
}
}
#define LINK_DISCONNECT_DELAY 4
static gboolean
link_disconnect_action_cb (gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
_LOGD (LOGD_DEVICE, "link disconnected (calling deferred action) (id=%u)", priv->carrier_defer_id);
priv->carrier_defer_id = 0;
_LOGI (LOGD_DEVICE, "link disconnected (calling deferred action)");
NM_DEVICE_GET_CLASS (self)->carrier_changed (self, FALSE);
return FALSE;
}
static void
link_disconnect_action_cancel (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->carrier_defer_id) {
g_source_remove (priv->carrier_defer_id);
_LOGD (LOGD_DEVICE, "link disconnected (canceling deferred action) (id=%u)", priv->carrier_defer_id);
priv->carrier_defer_id = 0;
}
}
void
nm_device_set_carrier (NMDevice *self, gboolean carrier)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self);
NMDeviceState state = nm_device_get_state (self);
if (priv->carrier == carrier)
return;
priv->carrier = carrier;
g_object_notify (G_OBJECT (self), NM_DEVICE_CARRIER);
if (priv->carrier) {
_LOGI (LOGD_DEVICE, "link connected");
link_disconnect_action_cancel (self);
klass->carrier_changed (self, TRUE);
if (priv->carrier_wait_id) {
g_source_remove (priv->carrier_wait_id);
priv->carrier_wait_id = 0;
nm_device_remove_pending_action (self, "carrier wait", TRUE);
_carrier_wait_check_queued_act_request (self);
}
} else if (state <= NM_DEVICE_STATE_DISCONNECTED) {
_LOGI (LOGD_DEVICE, "link disconnected");
klass->carrier_changed (self, FALSE);
} else {
_LOGI (LOGD_DEVICE, "link disconnected (deferring action for %d seconds)", LINK_DISCONNECT_DELAY);
priv->carrier_defer_id = g_timeout_add_seconds (LINK_DISCONNECT_DELAY,
link_disconnect_action_cb, self);
_LOGD (LOGD_DEVICE, "link disconnected (deferring action for %d seconds) (id=%u)",
LINK_DISCONNECT_DELAY, priv->carrier_defer_id);
}
}
static void
update_for_ip_ifname_change (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
g_hash_table_remove_all (priv->ip6_saved_properties);
if (priv->dhcp4_client) {
if (!nm_device_dhcp4_renew (self, FALSE)) {
nm_device_state_changed (self,
NM_DEVICE_STATE_FAILED,
NM_DEVICE_STATE_REASON_DHCP_FAILED);
return;
}
}
if (priv->dhcp6_client) {
if (!nm_device_dhcp6_renew (self, FALSE)) {
nm_device_state_changed (self,
NM_DEVICE_STATE_FAILED,
NM_DEVICE_STATE_REASON_DHCP_FAILED);
return;
}
}
if (priv->rdisc) {
/* FIXME: todo */
}
if (priv->dnsmasq_manager) {
/* FIXME: todo */
}
}
static void
device_set_master (NMDevice *self, int ifindex)
{
NMDevice *master;
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
master = nm_manager_get_device_by_ifindex (nm_manager_get (), ifindex);
if (master && NM_DEVICE_GET_CLASS (master)->enslave_slave) {
g_clear_object (&priv->master);
priv->master = g_object_ref (master);
nm_device_master_add_slave (master, self, FALSE);
} else if (master) {
_LOGI (LOGD_DEVICE, "enslaved to non-master-type device %s; ignoring",
nm_device_get_iface (master));
} else {
_LOGW (LOGD_DEVICE, "enslaved to unknown device %d %s",
ifindex,
nm_platform_link_get_name (ifindex));
}
}
static void
device_link_changed (NMDevice *self, NMPlatformLink *info)
{
NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMUtilsIPv6IfaceId token_iid;
gboolean ip_ifname_changed = FALSE;
if (info->udi && g_strcmp0 (info->udi, priv->udi)) {
/* Update UDI to what udev gives us */
g_free (priv->udi);
priv->udi = g_strdup (info->udi);
g_object_notify (G_OBJECT (self), NM_DEVICE_UDI);
}
/* Update MTU if it has changed. */
if (priv->mtu != info->mtu) {
priv->mtu = info->mtu;
g_object_notify (G_OBJECT (self), NM_DEVICE_MTU);
}
if (info->name[0] && strcmp (priv->iface, info->name) != 0) {
_LOGI (LOGD_DEVICE, "interface index %d renamed iface from '%s' to '%s'",
priv->ifindex, priv->iface, info->name);
g_free (priv->iface);
priv->iface = g_strdup (info->name);
/* If the device has no explicit ip_iface, then changing iface changes ip_iface too. */
ip_ifname_changed = !priv->ip_iface;
g_object_notify (G_OBJECT (self), NM_DEVICE_IFACE);
if (ip_ifname_changed)
g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE);
/* Re-match available connections against the new interface name */
nm_device_recheck_available_connections (self);
/* Let any connections that use the new interface name have a chance
* to auto-activate on the device.
*/
nm_device_emit_recheck_auto_activate (self);
}
/* Update slave status for external changes */
if (priv->enslaved && info->master != nm_device_get_ifindex (priv->master))
nm_device_release_one_slave (priv->master, self, FALSE, NM_DEVICE_STATE_REASON_NONE);
if (info->master && !priv->enslaved) {
device_set_master (self, info->master);
if (priv->master)
nm_device_enslave_slave (priv->master, self, NULL);
}
if (priv->rdisc && nm_platform_link_get_ipv6_token (priv->ifindex, &token_iid)) {
_LOGD (LOGD_DEVICE, "IPv6 tokenized identifier present on device %s", priv->iface);
if (nm_rdisc_set_iid (priv->rdisc, token_iid))
nm_rdisc_start (priv->rdisc);
}
if (klass->link_changed)
klass->link_changed (self, info);
/* Update DHCP, etc, if needed */
if (ip_ifname_changed)
update_for_ip_ifname_change (self);
if (priv->up != info->up) {
priv->up = info->up;
/* Manage externally-created software interfaces only when they are IFF_UP */
g_assert (priv->ifindex > 0);
if (is_software_external (self)) {
gboolean external_down = nm_device_get_unmanaged_flag (self, NM_UNMANAGED_EXTERNAL_DOWN);
if (external_down && info->up) {
if (nm_device_get_state (self) < NM_DEVICE_STATE_DISCONNECTED) {
/* Ensure the assume check is queued before any queued state changes
* from the transition to UNAVAILABLE.
*/
nm_device_queue_recheck_assume (self);
/* Resetting the EXTERNAL_DOWN flag may change the device's state
* to UNAVAILABLE. To ensure that the state change doesn't touch
* the device before assumption occurs, pass
* NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED as the reason.
*/
nm_device_set_unmanaged (self,
NM_UNMANAGED_EXTERNAL_DOWN,
FALSE,
NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED);
} else {
/* Don't trigger a state change; if the device is in a
* state higher than UNAVAILABLE, it is already IFF_UP
* or an explicit activation request was received.
*/
priv->unmanaged_flags &= ~NM_UNMANAGED_EXTERNAL_DOWN;
}
} else if (!external_down && !info->up && nm_device_get_state (self) <= NM_DEVICE_STATE_DISCONNECTED) {
/* If the device is already disconnected and is set !IFF_UP,
* unmanage it.
*/
nm_device_set_unmanaged (self,
NM_UNMANAGED_EXTERNAL_DOWN,
TRUE,
NM_DEVICE_STATE_REASON_USER_REQUESTED);
}
}
}
}
static void
device_ip_link_changed (NMDevice *self, NMPlatformLink *info)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (info->name[0] && g_strcmp0 (priv->ip_iface, info->name)) {
_LOGI (LOGD_DEVICE, "interface index %d renamed ip_iface (%d) from '%s' to '%s'",
priv->ifindex, nm_device_get_ip_ifindex (self),
priv->ip_iface, info->name);
g_free (priv->ip_iface);
priv->ip_iface = g_strdup (info->name);
g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE);
update_for_ip_ifname_change (self);
}
}
static void
link_changed_cb (NMPlatform *platform,
int ifindex,
NMPlatformLink *info,
NMPlatformSignalChangeType change_type,
NMPlatformReason reason,
NMDevice *self)
{
if (change_type != NM_PLATFORM_SIGNAL_CHANGED)
return;
/* We don't filter by 'reason' because we are interested in *all* link
* changes. For example a call to nm_platform_link_set_up() may result
* in an internal carrier change (i.e. we ask the kernel to set IFF_UP
* and it results in also setting IFF_LOWER_UP.
*/
if (ifindex == nm_device_get_ifindex (self))
device_link_changed (self, info);
else if (ifindex == nm_device_get_ip_ifindex (self))
device_ip_link_changed (self, info);
}
static void
link_changed (NMDevice *self, NMPlatformLink *info)
{
/* Update carrier from link event if applicable. */
if ( device_has_capability (self, NM_DEVICE_CAP_CARRIER_DETECT)
&& !device_has_capability (self, NM_DEVICE_CAP_NONSTANDARD_CARRIER))
nm_device_set_carrier (self, info->connected);
}
/**
* nm_device_notify_component_added():
* @self: the #NMDevice
* @component: the component being added by a plugin
*
* Called by the manager to notify the device that a new component has
* been found. The device implementation should return %TRUE if it
* wishes to claim the component, or %FALSE if it cannot.
*
* Returns: %TRUE to claim the component, %FALSE if the component cannot be
* claimed.
*/
gboolean
nm_device_notify_component_added (NMDevice *self, GObject *component)
{
if (NM_DEVICE_GET_CLASS (self)->component_added)
return NM_DEVICE_GET_CLASS (self)->component_added (self, component);
return FALSE;
}
/**
* nm_device_owns_iface():
* @self: the #NMDevice
* @iface: an interface name
*
* Called by the manager to ask if the device or any of its components owns
* @iface. For example, a WWAN implementation would return %TRUE for an
* ethernet interface name that was owned by the WWAN device's modem component,
* because that ethernet interface is controlled by the WWAN device and cannot
* be used independently of the WWAN device.
*
* Returns: %TRUE if @self or it's components owns the interface name,
* %FALSE if not
*/
gboolean
nm_device_owns_iface (NMDevice *self, const char *iface)
{
if (NM_DEVICE_GET_CLASS (self)->owns_iface)
return NM_DEVICE_GET_CLASS (self)->owns_iface (self, iface);
return FALSE;
}
NMConnection *
nm_device_new_default_connection (NMDevice *self)
{
if (NM_DEVICE_GET_CLASS (self)->new_default_connection)
return NM_DEVICE_GET_CLASS (self)->new_default_connection (self);
return NULL;
}
static void
slave_state_changed (NMDevice *slave,
NMDeviceState slave_new_state,
NMDeviceState slave_old_state,
NMDeviceStateReason reason,
NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
gboolean release = FALSE;
_LOGD (LOGD_DEVICE, "slave %s state change %d (%s) -> %d (%s)",
nm_device_get_iface (slave),
slave_old_state,
state_to_string (slave_old_state),
slave_new_state,
state_to_string (slave_new_state));
/* Don't try to enslave slaves until the master is ready */
if (priv->state < NM_DEVICE_STATE_CONFIG)
return;
if (slave_new_state == NM_DEVICE_STATE_IP_CONFIG)
nm_device_enslave_slave (self, slave, nm_device_get_connection (slave));
else if (slave_new_state > NM_DEVICE_STATE_ACTIVATED)
release = TRUE;
else if ( slave_new_state <= NM_DEVICE_STATE_DISCONNECTED
&& slave_old_state > NM_DEVICE_STATE_DISCONNECTED) {
/* Catch failures due to unavailable or unmanaged */
release = TRUE;
}
if (release) {
nm_device_release_one_slave (self, slave, TRUE, reason);
/* Bridge/bond/team interfaces are left up until manually deactivated */
if (priv->slaves == NULL && priv->state == NM_DEVICE_STATE_ACTIVATED)
_LOGD (LOGD_DEVICE, "last slave removed; remaining activated");
}
}
/**
* nm_device_master_add_slave:
* @self: the master device
* @slave: the slave device to enslave
* @configure: pass %TRUE if the slave should be configured by the master, or
* %FALSE if it is already configured outside NetworkManager
*
* If @self is capable of enslaving other devices (ie it's a bridge, bond, team,
* etc) then this function adds @slave to the slave list for later enslavement.
*
* Returns: %TRUE on success, %FALSE on failure
*/
static gboolean
nm_device_master_add_slave (NMDevice *self, NMDevice *slave, gboolean configure)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
SlaveInfo *info;
g_return_val_if_fail (self != NULL, FALSE);
g_return_val_if_fail (slave != NULL, FALSE);
g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->enslave_slave != NULL, FALSE);
if (configure)
g_return_val_if_fail (nm_device_get_state (slave) >= NM_DEVICE_STATE_DISCONNECTED, FALSE);
if (!find_slave_info (self, slave)) {
info = g_malloc0 (sizeof (SlaveInfo));
info->slave = g_object_ref (slave);
info->configure = configure;
info->watch_id = g_signal_connect (slave, "state-changed",
G_CALLBACK (slave_state_changed), self);
priv->slaves = g_slist_append (priv->slaves, info);
}
nm_device_queue_recheck_assume (self);
return TRUE;
}
/**
* nm_device_master_get_slaves:
* @self: the master device
*
* Returns: any slaves of which @self is the master. Caller owns returned list.
*/
GSList *
nm_device_master_get_slaves (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
GSList *slaves = NULL, *iter;
for (iter = priv->slaves; iter; iter = g_slist_next (iter))
slaves = g_slist_prepend (slaves, ((SlaveInfo *) iter->data)->slave);
return slaves;
}
/**
* nm_device_master_get_slave_by_ifindex:
* @self: the master device
* @ifindex: the slave's interface index
*
* Returns: the slave with the given @ifindex of which @self is the master,
* or %NULL if no device with @ifindex is a slave of @self.
*/
NMDevice *
nm_device_master_get_slave_by_ifindex (NMDevice *self, int ifindex)
{
GSList *iter;
for (iter = NM_DEVICE_GET_PRIVATE (self)->slaves; iter; iter = g_slist_next (iter)) {
SlaveInfo *info = iter->data;
if (nm_device_get_ip_ifindex (info->slave) == ifindex)
return info->slave;
}
return NULL;
}
/**
* nm_device_master_check_slave_physical_port:
* @self: the master device
* @slave: a slave device
* @log_domain: domain to log a warning in
*
* Checks if @self already has a slave with the same #NMDevice:physical-port-id
* as @slave, and logs a warning if so.
*/
void
nm_device_master_check_slave_physical_port (NMDevice *self, NMDevice *slave,
guint64 log_domain)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
const char *slave_physical_port_id, *existing_physical_port_id;
SlaveInfo *info;
GSList *iter;
slave_physical_port_id = nm_device_get_physical_port_id (slave);
if (!slave_physical_port_id)
return;
for (iter = priv->slaves; iter; iter = iter->next) {
info = iter->data;
if (info->slave == slave)
continue;
existing_physical_port_id = nm_device_get_physical_port_id (info->slave);
if (!g_strcmp0 (slave_physical_port_id, existing_physical_port_id)) {
_LOGW (log_domain, "slave %s shares a physical port with existing slave %s",
nm_device_get_ip_iface (slave),
nm_device_get_ip_iface (info->slave));
/* Since this function will get called for every slave, we only have
* to warn about the first match we find; if there are other matches
* later in the list, we will have already warned about them matching
* @existing earlier.
*/
return;
}
}
}
/* release all slaves */
static void
nm_device_master_release_slaves (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMDeviceStateReason reason;
/* Don't release the slaves if this connection doesn't belong to NM. */
if (nm_device_uses_generated_assumed_connection (self))
return;
reason = priv->state_reason;
if (priv->state == NM_DEVICE_STATE_FAILED)
reason = NM_DEVICE_STATE_REASON_DEPENDENCY_FAILED;
while (priv->slaves) {
SlaveInfo *info = priv->slaves->data;
nm_device_release_one_slave (self, info->slave, TRUE, reason);
}
}
/**
* nm_device_get_master:
* @self: the device
*
* If @self has been enslaved by another device, this returns that
* device. Otherwise it returns %NULL. (In particular, note that if
* @self is in the process of activating as a slave, but has not yet
* been enslaved by its master, this will return %NULL.)
*
* Returns: (transfer none): @self's master, or %NULL
*/
NMDevice *
nm_device_get_master (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->enslaved)
return priv->master;
else
return NULL;
}
/**
* nm_device_slave_notify_enslave:
* @self: the slave device
* @success: whether the enslaving operation succeeded
*
* Notifies a slave that either it has been enslaved, or else its master tried
* to enslave it and failed.
*/
static void
nm_device_slave_notify_enslave (NMDevice *self, gboolean success)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection = nm_device_get_connection (self);
gboolean activating = (priv->state == NM_DEVICE_STATE_IP_CONFIG);
g_assert (priv->master);
if (!priv->enslaved) {
if (success) {
if (activating) {
_LOGI (LOGD_DEVICE, "Activation: connection '%s' enslaved, continuing activation",
nm_connection_get_id (connection));
} else
_LOGI (LOGD_DEVICE, "enslaved to %s", nm_device_get_iface (priv->master));
priv->enslaved = TRUE;
g_object_notify (G_OBJECT (self), NM_DEVICE_MASTER);
} else if (activating) {
_LOGW (LOGD_DEVICE, "Activation: connection '%s' could not be enslaved",
nm_connection_get_id (connection));
}
}
if (activating) {
priv->ip4_state = IP_DONE;
priv->ip6_state = IP_DONE;
nm_device_queue_state (self,
success ? NM_DEVICE_STATE_SECONDARIES : NM_DEVICE_STATE_FAILED,
NM_DEVICE_STATE_REASON_NONE);
} else
nm_device_queue_recheck_assume (self);
}
/**
* nm_device_slave_notify_release:
* @self: the slave device
* @reason: the reason associated with the state change
*
* Notifies a slave that it has been released, and why.
*/
static void
nm_device_slave_notify_release (NMDevice *self, NMDeviceStateReason reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection = nm_device_get_connection (self);
NMDeviceState new_state;
const char *master_status;
if ( reason != NM_DEVICE_STATE_REASON_NONE
&& priv->state > NM_DEVICE_STATE_DISCONNECTED
&& priv->state <= NM_DEVICE_STATE_ACTIVATED) {
if (reason == NM_DEVICE_STATE_REASON_DEPENDENCY_FAILED) {
new_state = NM_DEVICE_STATE_FAILED;
master_status = "failed";
} else if (reason == NM_DEVICE_STATE_REASON_USER_REQUESTED) {
new_state = NM_DEVICE_STATE_DEACTIVATING;
master_status = "deactivated by user request";
} else {
new_state = NM_DEVICE_STATE_DISCONNECTED;
master_status = "deactivated";
}
_LOGD (LOGD_DEVICE, "Activation: connection '%s' master %s",
nm_connection_get_id (connection),
master_status);
nm_device_queue_state (self, new_state, reason);
} else if (priv->master)
_LOGI (LOGD_DEVICE, "released from master %s", nm_device_get_iface (priv->master));
else
_LOGD (LOGD_DEVICE, "released from master%s", priv->enslaved ? "" : " (was not enslaved)");
if (priv->enslaved) {
priv->enslaved = FALSE;
g_object_notify (G_OBJECT (self), NM_DEVICE_MASTER);
}
}
/**
* nm_device_get_enslaved:
* @self: the #NMDevice
*
* Returns: %TRUE if the device is enslaved to a master device (eg bridge or
* bond or team), %FALSE if not
*/
gboolean
nm_device_get_enslaved (NMDevice *self)
{
return NM_DEVICE_GET_PRIVATE (self)->enslaved;
}
/**
* nm_device_removed:
* @self: the #NMDevice
*
* Called by the manager when the device was removed. Releases the device from
* the master in case it's enslaved.
*/
void
nm_device_removed (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->enslaved)
nm_device_release_one_slave (priv->master, self, FALSE, NM_DEVICE_STATE_REASON_REMOVED);
}
static gboolean
is_available (NMDevice *self, NMDeviceCheckDevAvailableFlags flags)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->carrier || priv->ignore_carrier)
return TRUE;
if (NM_FLAGS_HAS (flags, NM_DEVICE_CHECK_DEV_AVAILABLE_IGNORE_CARRIER))
return TRUE;
return FALSE;
}
/**
* nm_device_is_available:
* @self: the #NMDevice
* @flags: additional flags to influence the check. Flags have the
* meaning to increase the availability of a device.
*
* Checks if @self would currently be capable of activating a
* connection. In particular, it checks that the device is ready (eg,
* is not missing firmware), that it has carrier (if necessary), and
* that any necessary external software (eg, ModemManager,
* wpa_supplicant) is available.
*
* @self can only be in a state higher than
* %NM_DEVICE_STATE_UNAVAILABLE when nm_device_is_available() returns
* %TRUE. (But note that it can still be %NM_DEVICE_STATE_UNMANAGED
* when it is available.)
*
* Returns: %TRUE or %FALSE
*/
gboolean
nm_device_is_available (NMDevice *self, NMDeviceCheckDevAvailableFlags flags)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->firmware_missing)
return FALSE;
return NM_DEVICE_GET_CLASS (self)->is_available (self, flags);
}
gboolean
nm_device_get_enabled (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
if (NM_DEVICE_GET_CLASS (self)->get_enabled)
return NM_DEVICE_GET_CLASS (self)->get_enabled (self);
return TRUE;
}
void
nm_device_set_enabled (NMDevice *self, gboolean enabled)
{
g_return_if_fail (NM_IS_DEVICE (self));
if (NM_DEVICE_GET_CLASS (self)->set_enabled)
NM_DEVICE_GET_CLASS (self)->set_enabled (self, enabled);
}
/**
* nm_device_get_autoconnect:
* @self: the #NMDevice
*
* Returns: %TRUE if the device allows autoconnect connections, or %FALSE if the
* device is explicitly blocking all autoconnect connections. Does not take
* into account transient conditions like companion devices that may wish to
* block the device.
*/
gboolean
nm_device_get_autoconnect (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
return NM_DEVICE_GET_PRIVATE (self)->autoconnect;
}
static void
nm_device_set_autoconnect (NMDevice *self, gboolean autoconnect)
{
NMDevicePrivate *priv;
g_return_if_fail (NM_IS_DEVICE (self));
priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->autoconnect == autoconnect)
return;
if (autoconnect) {
/* Default-unmanaged devices never autoconnect */
if (!nm_device_get_default_unmanaged (self)) {
priv->autoconnect = TRUE;
g_object_notify (G_OBJECT (self), NM_DEVICE_AUTOCONNECT);
}
} else {
priv->autoconnect = FALSE;
g_object_notify (G_OBJECT (self), NM_DEVICE_AUTOCONNECT);
}
}
static gboolean
autoconnect_allowed_accumulator (GSignalInvocationHint *ihint,
GValue *return_accu,
const GValue *handler_return, gpointer data)
{
if (!g_value_get_boolean (handler_return))
g_value_set_boolean (return_accu, FALSE);
return TRUE;
}
/**
* nm_device_autoconnect_allowed:
* @self: the #NMDevice
*
* Returns: %TRUE if the device can be auto-connected immediately, taking
* transient conditions into account (like companion devices that may wish to
* block autoconnect for a time).
*/
gboolean
nm_device_autoconnect_allowed (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
GValue instance = G_VALUE_INIT;
GValue retval = G_VALUE_INIT;
if (priv->state < NM_DEVICE_STATE_DISCONNECTED || !priv->autoconnect)
return FALSE;
/* The 'autoconnect-allowed' signal is emitted on a device to allow
* other listeners to block autoconnect on the device if they wish.
* This is mainly used by the OLPC Mesh devices to block autoconnect
* on their companion WiFi device as they share radio resources and
* cannot be connected at the same time.
*/
g_value_init (&instance, G_TYPE_OBJECT);
g_value_set_object (&instance, self);
g_value_init (&retval, G_TYPE_BOOLEAN);
if (priv->autoconnect)
g_value_set_boolean (&retval, TRUE);
else
g_value_set_boolean (&retval, FALSE);
/* Use g_signal_emitv() rather than g_signal_emit() to avoid the return
* value being changed if no handlers are connected */
g_signal_emitv (&instance, signals[AUTOCONNECT_ALLOWED], 0, &retval);
g_value_unset (&instance);
return g_value_get_boolean (&retval);
}
static gboolean
can_auto_connect (NMDevice *self,
NMConnection *connection,
char **specific_object)
{
NMSettingConnection *s_con;
s_con = nm_connection_get_setting_connection (connection);
if (!nm_setting_connection_get_autoconnect (s_con))
return FALSE;
return nm_device_check_connection_available (self, connection, NM_DEVICE_CHECK_CON_AVAILABLE_NONE, NULL);
}
/**
* nm_device_can_auto_connect:
* @self: an #NMDevice
* @connection: a #NMConnection
* @specific_object: (out) (transfer full): on output, the path of an
* object associated with the returned connection, to be passed to
* nm_manager_activate_connection(), or %NULL.
*
* Checks if @connection can be auto-activated on @self right now.
* This requires, at a minimum, that the connection be compatible with
* @self, and that it have the #NMSettingConnection:autoconnect property
* set, and that the device allow auto connections. Some devices impose
* additional requirements. (Eg, a Wi-Fi connection can only be activated
* if its SSID was seen in the last scan.)
*
* Returns: %TRUE, if the @connection can be auto-activated.
**/
gboolean
nm_device_can_auto_connect (NMDevice *self,
NMConnection *connection,
char **specific_object)
{
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
g_return_val_if_fail (NM_IS_CONNECTION (connection), FALSE);
g_return_val_if_fail (specific_object && !*specific_object, FALSE);
if (nm_device_autoconnect_allowed (self))
return NM_DEVICE_GET_CLASS (self)->can_auto_connect (self, connection, specific_object);
return FALSE;
}
static gboolean
device_has_config (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
/* Check for IP configuration. */
if (priv->ip4_config && nm_ip4_config_get_num_addresses (priv->ip4_config))
return TRUE;
if (priv->ip6_config && nm_ip6_config_get_num_addresses (priv->ip6_config))
return TRUE;
/* The existence of a software device is good enough. */
if (nm_device_is_software (self))
return TRUE;
/* Slaves are also configured by definition */
if (nm_platform_link_get_master (priv->ifindex) > 0)
return TRUE;
return FALSE;
}
/**
* nm_device_master_update_slave_connection:
* @self: the master #NMDevice
* @slave: the slave #NMDevice
* @connection: the #NMConnection to update with the slave settings
* @GError: (out): error description
*
* Reads the slave configuration for @slave and updates @connection with those
* properties. This invokes a virtual function on the master device @self.
*
* Returns: %TRUE if the configuration was read and @connection updated,
* %FALSE on failure.
*/
gboolean
nm_device_master_update_slave_connection (NMDevice *self,
NMDevice *slave,
NMConnection *connection,
GError **error)
{
NMDeviceClass *klass;
gboolean success;
g_return_val_if_fail (self, FALSE);
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
g_return_val_if_fail (slave, FALSE);
g_return_val_if_fail (connection, FALSE);
g_return_val_if_fail (!error || !*error, FALSE);
g_return_val_if_fail (nm_connection_get_setting_connection (connection), FALSE);
g_return_val_if_fail (nm_device_get_iface (self), FALSE);
klass = NM_DEVICE_GET_CLASS (self);
if (klass->master_update_slave_connection) {
success = klass->master_update_slave_connection (self, slave, connection, error);
g_return_val_if_fail (!error || (success && !*error) || *error, success);
return success;
}
g_set_error (error,
NM_DEVICE_ERROR,
NM_DEVICE_ERROR_FAILED,
"master device '%s' cannot update a slave connection for slave device '%s' (master type not supported?)",
nm_device_get_iface (self), nm_device_get_iface (slave));
return FALSE;
}
NMConnection *
nm_device_generate_connection (NMDevice *self, NMDevice *master)
{
NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
const char *ifname = nm_device_get_iface (self);
NMConnection *connection;
NMSetting *s_con;
NMSetting *s_ip4;
NMSetting *s_ip6;
gs_free char *uuid = NULL;
const char *ip4_method, *ip6_method;
GError *error = NULL;
/* If update_connection() is not implemented, just fail. */
if (!klass->update_connection)
return NULL;
/* Return NULL if device is unconfigured. */
if (!device_has_config (self)) {
_LOGD (LOGD_DEVICE, "device has no existing configuration");
return NULL;
}
connection = nm_simple_connection_new ();
s_con = nm_setting_connection_new ();
uuid = nm_utils_uuid_generate ();
g_object_set (s_con,
NM_SETTING_CONNECTION_UUID, uuid,
NM_SETTING_CONNECTION_ID, ifname,
NM_SETTING_CONNECTION_AUTOCONNECT, FALSE,
NM_SETTING_CONNECTION_INTERFACE_NAME, ifname,
NM_SETTING_CONNECTION_TIMESTAMP, (guint64) time (NULL),
NULL);
if (klass->connection_type)
g_object_set (s_con, NM_SETTING_CONNECTION_TYPE, klass->connection_type, NULL);
nm_connection_add_setting (connection, s_con);
/* If the device is a slave, update various slave settings */
if (master) {
if (!nm_device_master_update_slave_connection (master,
self,
connection,
&error))
{
_LOGE (LOGD_DEVICE, "master device '%s' failed to update slave connection: %s",
nm_device_get_iface (master), error ? error->message : "(unknown error)");
g_error_free (error);
g_object_unref (connection);
return NULL;
}
} else {
/* Only regular and master devices get IP configuration; slaves do not */
s_ip4 = nm_ip4_config_create_setting (priv->ip4_config);
nm_connection_add_setting (connection, s_ip4);
s_ip6 = nm_ip6_config_create_setting (priv->ip6_config);
nm_connection_add_setting (connection, s_ip6);
}
klass->update_connection (self, connection);
/* Check the connection in case of update_connection() bug. */
if (!nm_connection_verify (connection, &error)) {
_LOGE (LOGD_DEVICE, "Generated connection does not verify: %s", error->message);
g_clear_error (&error);
g_object_unref (connection);
return NULL;
}
/* Ignore the connection if it has no IP configuration,
* no slave configuration, and is not a master interface.
*/
ip4_method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
ip6_method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG);
if ( g_strcmp0 (ip4_method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED) == 0
&& g_strcmp0 (ip6_method, NM_SETTING_IP6_CONFIG_METHOD_IGNORE) == 0
&& !nm_setting_connection_get_master (NM_SETTING_CONNECTION (s_con))
&& !priv->slaves) {
_LOGD (LOGD_DEVICE, "ignoring generated connection (no IP and not in master-slave relationship)");
g_object_unref (connection);
connection = NULL;
}
return connection;
}
gboolean
nm_device_complete_connection (NMDevice *self,
NMConnection *connection,
const char *specific_object,
const GSList *existing_connections,
GError **error)
{
gboolean success = FALSE;
g_return_val_if_fail (self != NULL, FALSE);
g_return_val_if_fail (connection != NULL, FALSE);
if (!NM_DEVICE_GET_CLASS (self)->complete_connection) {
g_set_error (error, NM_DEVICE_ERROR, NM_DEVICE_ERROR_INVALID_CONNECTION,
"Device class %s had no complete_connection method",
G_OBJECT_TYPE_NAME (self));
return FALSE;
}
success = NM_DEVICE_GET_CLASS (self)->complete_connection (self,
connection,
specific_object,
existing_connections,
error);
if (success)
success = nm_connection_verify (connection, error);
return success;
}
static gboolean
check_connection_compatible (NMDevice *self, NMConnection *connection)
{
NMSettingConnection *s_con;
const char *config_iface, *device_iface;
s_con = nm_connection_get_setting_connection (connection);
g_assert (s_con);
config_iface = nm_setting_connection_get_interface_name (s_con);
device_iface = nm_device_get_iface (self);
if (config_iface && strcmp (config_iface, device_iface) != 0)
return FALSE;
return TRUE;
}
/**
* nm_device_check_connection_compatible:
* @self: an #NMDevice
* @connection: an #NMConnection
*
* Checks if @connection could potentially be activated on @self.
* This means only that @self has the proper capabilities, and that
* @connection is not locked to some other device. It does not
* necessarily mean that @connection could be activated on @self
* right now. (Eg, it might refer to a Wi-Fi network that is not
* currently available.)
*
* Returns: #TRUE if @connection could potentially be activated on
* @self.
*/
gboolean
nm_device_check_connection_compatible (NMDevice *self, NMConnection *connection)
{
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
g_return_val_if_fail (NM_IS_CONNECTION (connection), FALSE);
return NM_DEVICE_GET_CLASS (self)->check_connection_compatible (self, connection);
}
/**
* nm_device_can_assume_connections:
* @self: #NMDevice instance
*
* This is a convenience function to determine whether connection assumption
* is available for this device.
*
* Returns: %TRUE if the device is capable of assuming connections, %FALSE if not
*/
static gboolean
nm_device_can_assume_connections (NMDevice *self)
{
return !!NM_DEVICE_GET_CLASS (self)->update_connection;
}
/**
* nm_device_can_assume_active_connection:
* @self: #NMDevice instance
*
* This is a convenience function to determine whether the device's active
* connection can be assumed if NetworkManager restarts. This method returns
* %TRUE if and only if the device can assume connections, and the device has
* an active connection, and that active connection can be assumed.
*
* Returns: %TRUE if the device's active connection can be assumed, or %FALSE
* if there is no active connection or the active connection cannot be
* assumed.
*/
gboolean
nm_device_can_assume_active_connection (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
const char *method;
const char *assumable_ip6_methods[] = {
NM_SETTING_IP6_CONFIG_METHOD_IGNORE,
NM_SETTING_IP6_CONFIG_METHOD_AUTO,
NM_SETTING_IP6_CONFIG_METHOD_DHCP,
NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL,
NM_SETTING_IP6_CONFIG_METHOD_MANUAL,
NULL
};
const char *assumable_ip4_methods[] = {
NM_SETTING_IP4_CONFIG_METHOD_DISABLED,
NM_SETTING_IP6_CONFIG_METHOD_AUTO,
NM_SETTING_IP6_CONFIG_METHOD_MANUAL,
NULL
};
if (!nm_device_can_assume_connections (self))
return FALSE;
connection = nm_device_get_connection (self);
if (!connection)
return FALSE;
/* Can't assume connections that aren't yet configured
* FIXME: what about bridges/bonds waiting for slaves?
*/
if (priv->state < NM_DEVICE_STATE_IP_CONFIG)
return FALSE;
if (priv->ip4_state != IP_DONE && priv->ip6_state != IP_DONE)
return FALSE;
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG);
if (!_nm_utils_string_in_list (method, assumable_ip6_methods))
return FALSE;
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
if (!_nm_utils_string_in_list (method, assumable_ip4_methods))
return FALSE;
return TRUE;
}
static gboolean
nm_device_emit_recheck_assume (gpointer self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
priv->recheck_assume_id = 0;
if (!nm_device_get_act_request (self)) {
_LOGD (LOGD_DEVICE, "emit RECHECK_ASSUME signal");
g_signal_emit (self, signals[RECHECK_ASSUME], 0);
}
return G_SOURCE_REMOVE;
}
void
nm_device_queue_recheck_assume (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (nm_device_can_assume_connections (self) && !priv->recheck_assume_id)
priv->recheck_assume_id = g_idle_add (nm_device_emit_recheck_assume, self);
}
void
nm_device_emit_recheck_auto_activate (NMDevice *self)
{
g_signal_emit (self, signals[RECHECK_AUTO_ACTIVATE], 0);
}
static void
dnsmasq_state_changed_cb (NMDnsMasqManager *manager, guint32 status, gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
switch (status) {
case NM_DNSMASQ_STATUS_DEAD:
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_SHARED_START_FAILED);
break;
default:
break;
}
}
static void
activation_source_clear (NMDevice *self, gboolean remove_source, int family)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
guint *act_source_id;
gpointer *act_source_func;
if (family == AF_INET6) {
act_source_id = &priv->act_source6_id;
act_source_func = &priv->act_source6_func;
} else {
act_source_id = &priv->act_source_id;
act_source_func = &priv->act_source_func;
}
if (*act_source_id) {
if (remove_source)
g_source_remove (*act_source_id);
*act_source_id = 0;
*act_source_func = NULL;
}
}
static void
activation_source_schedule (NMDevice *self, GSourceFunc func, int family)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
guint *act_source_id;
gpointer *act_source_func;
if (family == AF_INET6) {
act_source_id = &priv->act_source6_id;
act_source_func = &priv->act_source6_func;
} else {
act_source_id = &priv->act_source_id;
act_source_func = &priv->act_source_func;
}
if (*act_source_id)
_LOGE (LOGD_DEVICE, "activation stage already scheduled");
/* Don't bother rescheduling the same function that's about to
* run anyway. Fixes issues with crappy wireless drivers sending
* streams of associate events before NM has had a chance to process
* the first one.
*/
if (!*act_source_id || (*act_source_func != func)) {
activation_source_clear (self, TRUE, family);
*act_source_id = g_idle_add (func, self);
*act_source_func = func;
}
}
static gboolean
get_ip_config_may_fail (NMDevice *self, int family)
{
NMConnection *connection;
NMSettingIPConfig *s_ip = NULL;
g_return_val_if_fail (self != NULL, TRUE);
connection = nm_device_get_connection (self);
g_assert (connection);
/* Fail the connection if the failed IP method is required to complete */
switch (family) {
case AF_INET:
s_ip = nm_connection_get_setting_ip4_config (connection);
break;
case AF_INET6:
s_ip = nm_connection_get_setting_ip6_config (connection);
break;
default:
g_assert_not_reached ();
}
return nm_setting_ip_config_get_may_fail (s_ip);
}
static void
master_ready_cb (NMActiveConnection *active,
GParamSpec *pspec,
NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMActiveConnection *master;
g_assert (priv->state == NM_DEVICE_STATE_PREPARE);
/* Notify a master device that it has a new slave */
g_assert (nm_active_connection_get_master_ready (active));
master = nm_active_connection_get_master (active);
priv->master = g_object_ref (nm_active_connection_get_device (master));
nm_device_master_add_slave (priv->master,
self,
nm_active_connection_get_assumed (active) ? FALSE : TRUE);
_LOGD (LOGD_DEVICE, "master connection ready; master device %s",
nm_device_get_iface (priv->master));
if (priv->master_ready_id) {
g_signal_handler_disconnect (active, priv->master_ready_id);
priv->master_ready_id = 0;
}
nm_device_activate_schedule_stage2_device_config (self);
}
static NMActStageReturn
act_stage1_prepare (NMDevice *self, NMDeviceStateReason *reason)
{
return NM_ACT_STAGE_RETURN_SUCCESS;
}
/*
* nm_device_activate_stage1_device_prepare
*
* Prepare for device activation
*
*/
static gboolean
nm_device_activate_stage1_device_prepare (gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMActStageReturn ret = NM_ACT_STAGE_RETURN_SUCCESS;
NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE;
NMActiveConnection *active = NM_ACTIVE_CONNECTION (priv->act_request);
/* Clear the activation source ID now that this stage has run */
activation_source_clear (self, FALSE, 0);
priv->ip4_state = priv->ip6_state = IP_NONE;
/* Notify the new ActiveConnection along with the state change */
g_object_notify (G_OBJECT (self), NM_DEVICE_ACTIVE_CONNECTION);
_LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) started...");
nm_device_state_changed (self, NM_DEVICE_STATE_PREPARE, NM_DEVICE_STATE_REASON_NONE);
/* Assumed connections were already set up outside NetworkManager */
if (!nm_active_connection_get_assumed (active)) {
ret = NM_DEVICE_GET_CLASS (self)->act_stage1_prepare (self, &reason);
if (ret == NM_ACT_STAGE_RETURN_POSTPONE) {
goto out;
} else if (ret == NM_ACT_STAGE_RETURN_FAILURE) {
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason);
goto out;
}
g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS);
}
if (nm_active_connection_get_master (active)) {
/* If the master connection is ready for slaves, attach ourselves */
if (nm_active_connection_get_master_ready (active))
master_ready_cb (active, NULL, self);
else {
_LOGD (LOGD_DEVICE, "waiting for master connection to become ready");
/* Attach a signal handler and wait for the master connection to begin activating */
g_assert (priv->master_ready_id == 0);
priv->master_ready_id = g_signal_connect (active,
"notify::" NM_ACTIVE_CONNECTION_INT_MASTER_READY,
(GCallback) master_ready_cb,
self);
/* Postpone */
}
} else
nm_device_activate_schedule_stage2_device_config (self);
out:
_LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) complete.");
return FALSE;
}
/*
* nm_device_activate_schedule_stage1_device_prepare
*
* Prepare a device for activation
*
*/
void
nm_device_activate_schedule_stage1_device_prepare (NMDevice *self)
{
NMDevicePrivate *priv;
g_return_if_fail (NM_IS_DEVICE (self));
priv = NM_DEVICE_GET_PRIVATE (self);
g_return_if_fail (priv->act_request);
activation_source_schedule (self, nm_device_activate_stage1_device_prepare, 0);
_LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) scheduled...");
}
static NMActStageReturn
act_stage2_config (NMDevice *self, NMDeviceStateReason *reason)
{
/* Nothing to do */
return NM_ACT_STAGE_RETURN_SUCCESS;
}
/*
* nm_device_activate_stage2_device_config
*
* Determine device parameters and set those on the device, ie
* for wireless devices, set SSID, keys, etc.
*
*/
static gboolean
nm_device_activate_stage2_device_config (gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMActStageReturn ret;
NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE;
gboolean no_firmware = FALSE;
NMActiveConnection *active = NM_ACTIVE_CONNECTION (priv->act_request);
GSList *iter;
/* Clear the activation source ID now that this stage has run */
activation_source_clear (self, FALSE, 0);
_LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) starting...");
nm_device_state_changed (self, NM_DEVICE_STATE_CONFIG, NM_DEVICE_STATE_REASON_NONE);
/* Assumed connections were already set up outside NetworkManager */
if (!nm_active_connection_get_assumed (active)) {
if (!nm_device_bring_up (self, FALSE, &no_firmware)) {
if (no_firmware)
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_FIRMWARE_MISSING);
else
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_CONFIG_FAILED);
goto out;
}
ret = NM_DEVICE_GET_CLASS (self)->act_stage2_config (self, &reason);
if (ret == NM_ACT_STAGE_RETURN_POSTPONE)
goto out;
else if (ret == NM_ACT_STAGE_RETURN_FAILURE) {
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason);
goto out;
}
g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS);
}
/* If we have slaves that aren't yet enslaved, do that now */
for (iter = priv->slaves; iter; iter = g_slist_next (iter)) {
SlaveInfo *info = iter->data;
NMDeviceState slave_state = nm_device_get_state (info->slave);
if (slave_state == NM_DEVICE_STATE_IP_CONFIG)
nm_device_enslave_slave (self, info->slave, nm_device_get_connection (info->slave));
else if ( nm_device_uses_generated_assumed_connection (self)
&& slave_state <= NM_DEVICE_STATE_DISCONNECTED)
nm_device_queue_recheck_assume (info->slave);
}
_LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) successful.");
nm_device_activate_schedule_stage3_ip_config_start (self);
out:
_LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) complete.");
return FALSE;
}
/*
* nm_device_activate_schedule_stage2_device_config
*
* Schedule setup of the hardware device
*
*/
void
nm_device_activate_schedule_stage2_device_config (NMDevice *self)
{
NMDevicePrivate *priv;
g_return_if_fail (NM_IS_DEVICE (self));
priv = NM_DEVICE_GET_PRIVATE (self);
g_return_if_fail (priv->act_request);
activation_source_schedule (self, nm_device_activate_stage2_device_config, 0);
_LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) scheduled...");
}
/*********************************************/
/* avahi-autoipd stuff */
static void
aipd_timeout_remove (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->aipd_timeout) {
g_source_remove (priv->aipd_timeout);
priv->aipd_timeout = 0;
}
}
static void
aipd_cleanup (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->aipd_watch) {
g_source_remove (priv->aipd_watch);
priv->aipd_watch = 0;
}
if (priv->aipd_pid > 0) {
nm_utils_kill_child_sync (priv->aipd_pid, SIGKILL, LOGD_AUTOIP4, "avahi-autoipd", NULL, 0, 0);
priv->aipd_pid = -1;
}
aipd_timeout_remove (self);
}
static NMIP4Config *
aipd_get_ip4_config (NMDevice *self, guint32 lla)
{
NMIP4Config *config = NULL;
NMPlatformIP4Address address;
NMPlatformIP4Route route;
config = nm_ip4_config_new (nm_device_get_ip_ifindex (self));
g_assert (config);
memset (&address, 0, sizeof (address));
address.address = lla;
address.plen = 16;
address.source = NM_IP_CONFIG_SOURCE_IP4LL;
nm_ip4_config_add_address (config, &address);
/* Add a multicast route for link-local connections: destination= 224.0.0.0, netmask=240.0.0.0 */
memset (&route, 0, sizeof (route));
route.network = htonl (0xE0000000L);
route.plen = 4;
route.source = NM_IP_CONFIG_SOURCE_IP4LL;
route.metric = nm_device_get_ip4_route_metric (self);
nm_ip4_config_add_route (config, &route);
return config;
}
#define IPV4LL_NETWORK (htonl (0xA9FE0000L))
#define IPV4LL_NETMASK (htonl (0xFFFF0000L))
void
nm_device_handle_autoip4_event (NMDevice *self,
const char *event,
const char *address)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection = NULL;
const char *method;
NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE;
g_return_if_fail (event != NULL);
if (priv->act_request == NULL)
return;
connection = nm_act_request_get_connection (priv->act_request);
g_assert (connection);
/* Ignore if the connection isn't an AutoIP connection */
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
if (g_strcmp0 (method, NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL) != 0)
return;
if (strcmp (event, "BIND") == 0) {
guint32 lla;
NMIP4Config *config;
if (inet_pton (AF_INET, address, &lla) <= 0) {
_LOGE (LOGD_AUTOIP4, "invalid address %s received from avahi-autoipd.", address);
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_ERROR);
return;
}
if ((lla & IPV4LL_NETMASK) != IPV4LL_NETWORK) {
_LOGE (LOGD_AUTOIP4, "invalid address %s received from avahi-autoipd (not link-local).", address);
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_ERROR);
return;
}
config = aipd_get_ip4_config (self, lla);
if (config == NULL) {
_LOGE (LOGD_AUTOIP4, "failed to get autoip config");
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE);
return;
}
if (priv->ip4_state == IP_CONF) {
aipd_timeout_remove (self);
nm_device_activate_schedule_ip4_config_result (self, config);
} else if (priv->ip4_state == IP_DONE) {
if (!ip4_config_merge_and_apply (self, config, TRUE, &reason)) {
_LOGE (LOGD_AUTOIP4, "failed to update IP4 config for autoip change.");
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason);
}
} else
g_assert_not_reached ();
g_object_unref (config);
} else {
_LOGW (LOGD_AUTOIP4, "autoip address %s no longer valid because '%s'.", address, event);
/* The address is gone; terminate the connection or fail activation */
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED);
}
}
static void
aipd_watch_cb (GPid pid, gint status, gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMDeviceState state;
if (!priv->aipd_watch)
return;
priv->aipd_watch = 0;
if (WIFEXITED (status))
_LOGD (LOGD_AUTOIP4, "avahi-autoipd exited with error code %d", WEXITSTATUS (status));
else if (WIFSTOPPED (status))
_LOGW (LOGD_AUTOIP4, "avahi-autoipd stopped unexpectedly with signal %d", WSTOPSIG (status));
else if (WIFSIGNALED (status))
_LOGW (LOGD_AUTOIP4, "avahi-autoipd died with signal %d", WTERMSIG (status));
else
_LOGW (LOGD_AUTOIP4, "avahi-autoipd died from an unknown cause");
aipd_cleanup (self);
state = nm_device_get_state (self);
if (nm_device_is_activating (self) || (state == NM_DEVICE_STATE_ACTIVATED))
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_FAILED);
}
static gboolean
aipd_timeout_cb (gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->aipd_timeout) {
_LOGI (LOGD_AUTOIP4, "avahi-autoipd timed out.");
priv->aipd_timeout = 0;
aipd_cleanup (self);
if (priv->ip4_state == IP_CONF)
nm_device_activate_schedule_ip4_config_timeout (self);
}
return FALSE;
}
/* default to installed helper, but can be modified for testing */
const char *nm_device_autoipd_helper_path = LIBEXECDIR "/nm-avahi-autoipd.action";
static NMActStageReturn
aipd_start (NMDevice *self, NMDeviceStateReason *reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
const char *argv[6];
char *cmdline;
const char *aipd_binary;
int i = 0;
GError *error = NULL;
aipd_cleanup (self);
/* Find avahi-autoipd */
aipd_binary = nm_utils_find_helper ("avahi-autoipd", NULL, NULL);
if (!aipd_binary) {
_LOGW (LOGD_DEVICE | LOGD_AUTOIP4,
"Activation: Stage 3 of 5 (IP Configure Start) failed"
" to start avahi-autoipd: not found");
*reason = NM_DEVICE_STATE_REASON_AUTOIP_START_FAILED;
return NM_ACT_STAGE_RETURN_FAILURE;
}
argv[i++] = aipd_binary;
argv[i++] = "--script";
argv[i++] = nm_device_autoipd_helper_path;
if (nm_logging_enabled (LOGL_DEBUG, LOGD_AUTOIP4))
argv[i++] = "--debug";
argv[i++] = nm_device_get_ip_iface (self);
argv[i++] = NULL;
cmdline = g_strjoinv (" ", (char **) argv);
_LOGD (LOGD_AUTOIP4, "running: %s", cmdline);
g_free (cmdline);
if (!g_spawn_async ("/", (char **) argv, NULL, G_SPAWN_DO_NOT_REAP_CHILD,
nm_utils_setpgid, NULL, &(priv->aipd_pid), &error)) {
_LOGW (LOGD_DEVICE | LOGD_AUTOIP4,
"Activation: Stage 3 of 5 (IP Configure Start) failed"
" to start avahi-autoipd: %s",
error && error->message ? error->message : "(unknown)");
g_clear_error (&error);
aipd_cleanup (self);
return NM_ACT_STAGE_RETURN_FAILURE;
}
_LOGI (LOGD_DEVICE | LOGD_AUTOIP4,
"Activation: Stage 3 of 5 (IP Configure Start) started"
" avahi-autoipd...");
/* Monitor the child process so we know when it dies */
priv->aipd_watch = g_child_watch_add (priv->aipd_pid, aipd_watch_cb, self);
/* Start a timeout to bound the address attempt */
priv->aipd_timeout = g_timeout_add_seconds (20, aipd_timeout_cb, self);
return NM_ACT_STAGE_RETURN_POSTPONE;
}
/*********************************************/
static gboolean
_device_get_default_route_from_platform (NMDevice *self, int addr_family, NMPlatformIPRoute *out_route)
{
gboolean success = FALSE;
int ifindex = nm_device_get_ip_ifindex (self);
GArray *routes;
if (addr_family == AF_INET)
routes = nm_platform_ip4_route_get_all (ifindex, NM_PLATFORM_GET_ROUTE_MODE_ONLY_DEFAULT);
else
routes = nm_platform_ip6_route_get_all (ifindex, NM_PLATFORM_GET_ROUTE_MODE_ONLY_DEFAULT);
if (routes) {
guint route_metric = G_MAXUINT32, m;
const NMPlatformIPRoute *route = NULL, *r;
guint i;
/* if there are several default routes, find the one with the best metric */
for (i = 0; i < routes->len; i++) {
if (addr_family == AF_INET) {
r = (const NMPlatformIPRoute *) &g_array_index (routes, NMPlatformIP4Route, i);
m = r->metric;
} else {
r = (const NMPlatformIPRoute *) &g_array_index (routes, NMPlatformIP6Route, i);
m = nm_utils_ip6_route_metric_normalize (r->metric);
}
if (!route || m < route_metric) {
route = r;
route_metric = m;
}
}
if (route) {
if (addr_family == AF_INET)
*((NMPlatformIP4Route *) out_route) = *((NMPlatformIP4Route *) route);
else
*((NMPlatformIP6Route *) out_route) = *((NMPlatformIP6Route *) route);
success = TRUE;
}
g_array_free (routes, TRUE);
}
return success;
}
/*********************************************/
static void
ensure_con_ipx_config (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
int ip_ifindex = nm_device_get_ip_ifindex (self);
NMConnection *connection;
g_assert (!!priv->con_ip4_config == !!priv->con_ip6_config);
if (priv->con_ip4_config)
return;
connection = nm_device_get_connection (self);
if (!connection)
return;
priv->con_ip4_config = nm_ip4_config_new (ip_ifindex);
priv->con_ip6_config = nm_ip6_config_new (ip_ifindex);
nm_ip4_config_merge_setting (priv->con_ip4_config,
nm_connection_get_setting_ip4_config (connection),
nm_device_get_ip4_route_metric (self));
nm_ip6_config_merge_setting (priv->con_ip6_config,
nm_connection_get_setting_ip6_config (connection),
nm_device_get_ip6_route_metric (self));
if (nm_device_uses_assumed_connection (self)) {
/* For assumed connections ignore all addresses and routes. */
nm_ip4_config_reset_addresses (priv->con_ip4_config);
nm_ip4_config_reset_routes (priv->con_ip4_config);
nm_ip6_config_reset_addresses (priv->con_ip6_config);
nm_ip6_config_reset_routes (priv->con_ip6_config);
}
}
/*********************************************/
/* DHCPv4 stuff */
static void
dhcp4_cleanup (NMDevice *self, gboolean stop, gboolean release)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->dhcp4_client) {
/* Stop any ongoing DHCP transaction on this device */
if (priv->dhcp4_state_sigid) {
g_signal_handler_disconnect (priv->dhcp4_client, priv->dhcp4_state_sigid);
priv->dhcp4_state_sigid = 0;
}
nm_device_remove_pending_action (self, PENDING_ACTION_DHCP4, FALSE);
if (stop)
nm_dhcp_client_stop (priv->dhcp4_client, release);
g_clear_object (&priv->dhcp4_client);
}
if (priv->dhcp4_config) {
g_clear_object (&priv->dhcp4_config);
g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP4_CONFIG);
}
}
static gboolean
ip4_config_merge_and_apply (NMDevice *self,
NMIP4Config *config,
gboolean commit,
NMDeviceStateReason *out_reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
gboolean success;
NMIP4Config *composite;
gboolean has_direct_route;
const guint32 default_route_metric = nm_device_get_ip4_route_metric (self);
guint32 gateway;
/* Merge all the configs into the composite config */
if (config) {
g_clear_object (&priv->dev_ip4_config);
priv->dev_ip4_config = g_object_ref (config);
}
composite = nm_ip4_config_new (nm_device_get_ip_ifindex (self));
ensure_con_ipx_config (self);
if (priv->dev_ip4_config)
nm_ip4_config_merge (composite, priv->dev_ip4_config);
if (priv->vpn4_config)
nm_ip4_config_merge (composite, priv->vpn4_config);
if (priv->ext_ip4_config)
nm_ip4_config_merge (composite, priv->ext_ip4_config);
/* Merge WWAN config *last* to ensure modem-given settings overwrite
* any external stuff set by pppd or other scripts.
*/
if (priv->wwan_ip4_config)
nm_ip4_config_merge (composite, priv->wwan_ip4_config);
/* Merge user overrides into the composite config. For assumed connection,
* con_ip4_config is empty. */
if (priv->con_ip4_config)
nm_ip4_config_merge (composite, priv->con_ip4_config);
connection = nm_device_get_connection (self);
/* Add the default route.
*
* We keep track of the default route of a device in a private field.
* NMDevice needs to know the default route at this point, because the gateway
* might require a direct route (see below).
*
* But also, we don't want to add the default route to priv->ip4_config,
* because the default route from the setting might not be the same that
* NMDefaultRouteManager eventually configures (because the it might
* tweak the effective metric).
*/
/* unless we come to a different conclusion below, we have no default route and
* the route is assumed. */
priv->default_route.v4_has = FALSE;
priv->default_route.v4_is_assumed = TRUE;
if (!commit) {
/* during a non-commit event, we always pickup whatever is configured. */
goto END_ADD_DEFAULT_ROUTE;
}
if (nm_device_uses_assumed_connection (self))
goto END_ADD_DEFAULT_ROUTE;
/* we are about to commit (for a non-assumed connection). Enforce whatever we have
* configured. */
priv->default_route.v4_is_assumed = FALSE;
if ( !connection
|| !nm_default_route_manager_ip4_connection_has_default_route (nm_default_route_manager_get (), connection))
goto END_ADD_DEFAULT_ROUTE;
if (!nm_ip4_config_get_num_addresses (composite)) {
/* without addresses we can have no default route. */
goto END_ADD_DEFAULT_ROUTE;
}
gateway = nm_ip4_config_get_gateway (composite);
if ( !gateway
&& nm_device_get_device_type (self) != NM_DEVICE_TYPE_MODEM)
goto END_ADD_DEFAULT_ROUTE;
has_direct_route = ( gateway == 0
|| nm_ip4_config_get_subnet_for_host (composite, gateway)
|| nm_ip4_config_get_direct_route_for_host (composite, gateway));
priv->default_route.v4_has = TRUE;
memset (&priv->default_route.v4, 0, sizeof (priv->default_route.v4));
priv->default_route.v4.source = NM_IP_CONFIG_SOURCE_USER;
priv->default_route.v4.gateway = gateway;
priv->default_route.v4.metric = default_route_metric;
priv->default_route.v4.mss = nm_ip4_config_get_mss (composite);
if (!has_direct_route) {
NMPlatformIP4Route r = priv->default_route.v4;
/* add a direct route to the gateway */
r.network = gateway;
r.plen = 32;
r.gateway = 0;
nm_ip4_config_add_route (composite, &r);
}
END_ADD_DEFAULT_ROUTE:
if (priv->default_route.v4_is_assumed) {
/* If above does not explicitly assign a default route, we always pick up the
* default route based on what is currently configured.
* That means that even managed connections with never-default, can
* get a default route (if configured externally).
*/
priv->default_route.v4_has = _device_get_default_route_from_platform (self, AF_INET, (NMPlatformIPRoute *) &priv->default_route.v4);
}
/* Allow setting MTU etc */
if (commit) {
if (NM_DEVICE_GET_CLASS (self)->ip4_config_pre_commit)
NM_DEVICE_GET_CLASS (self)->ip4_config_pre_commit (self, composite);
}
success = nm_device_set_ip4_config (self, composite, default_route_metric, commit, out_reason);
g_object_unref (composite);
return success;
}
static void
dhcp4_lease_change (NMDevice *self, NMIP4Config *config)
{
NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE;
g_return_if_fail (config != NULL);
if (!ip4_config_merge_and_apply (self, config, TRUE, &reason)) {
_LOGW (LOGD_DHCP4, "failed to update IPv4 config for DHCP change.");
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason);
} else {
/* Notify dispatcher scripts of new DHCP4 config */
nm_dispatcher_call (DISPATCHER_ACTION_DHCP4_CHANGE,
nm_device_get_connection (self),
self,
NULL,
NULL,
NULL);
}
}
static void
dhcp4_fail (NMDevice *self, gboolean timeout)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
dhcp4_cleanup (self, TRUE, FALSE);
if (timeout || (priv->ip4_state == IP_CONF))
nm_device_activate_schedule_ip4_config_timeout (self);
else if (priv->ip4_state == IP_DONE)
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED);
else
g_warn_if_reached ();
}
static void
dhcp4_update_config (NMDevice *self, NMDhcp4Config *config, GHashTable *options)
{
GHashTableIter iter;
const char *key, *value;
/* Update the DHCP4 config object with new DHCP options */
nm_dhcp4_config_reset (config);
g_hash_table_iter_init (&iter, options);
while (g_hash_table_iter_next (&iter, (gpointer) &key, (gpointer) &value))
nm_dhcp4_config_add_option (config, key, value);
g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP4_CONFIG);
}
static void
dhcp4_state_changed (NMDhcpClient *client,
NMDhcpState state,
NMIP4Config *ip4_config,
GHashTable *options,
gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
g_return_if_fail (nm_dhcp_client_get_ipv6 (client) == FALSE);
g_return_if_fail (!ip4_config || NM_IS_IP4_CONFIG (ip4_config));
_LOGD (LOGD_DHCP4, "new DHCPv4 client state %d", state);
switch (state) {
case NM_DHCP_STATE_BOUND:
if (!ip4_config) {
_LOGW (LOGD_DHCP4, "failed to get IPv4 config in response to DHCP event.");
nm_device_state_changed (self,
NM_DEVICE_STATE_FAILED,
NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE);
break;
}
dhcp4_update_config (self, priv->dhcp4_config, options);
if (priv->ip4_state == IP_CONF)
nm_device_activate_schedule_ip4_config_result (self, ip4_config);
else if (priv->ip4_state == IP_DONE)
dhcp4_lease_change (self, ip4_config);
break;
case NM_DHCP_STATE_TIMEOUT:
dhcp4_fail (self, TRUE);
break;
case NM_DHCP_STATE_EXPIRE:
/* Ignore expiry before we even have a lease (NAK, old lease, etc) */
if (priv->ip4_state == IP_CONF)
break;
/* Fall through */
case NM_DHCP_STATE_DONE:
case NM_DHCP_STATE_FAIL:
dhcp4_fail (self, FALSE);
break;
default:
break;
}
}
static NMActStageReturn
dhcp4_start (NMDevice *self,
NMConnection *connection,
NMDeviceStateReason *reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMSettingIPConfig *s_ip4;
const guint8 *hw_addr;
size_t hw_addr_len = 0;
GByteArray *tmp = NULL;
s_ip4 = nm_connection_get_setting_ip4_config (connection);
/* Clear old exported DHCP options */
if (priv->dhcp4_config)
g_object_unref (priv->dhcp4_config);
priv->dhcp4_config = nm_dhcp4_config_new ();
hw_addr = nm_platform_link_get_address (nm_device_get_ip_ifindex (self), &hw_addr_len);
if (hw_addr_len) {
tmp = g_byte_array_sized_new (hw_addr_len);
g_byte_array_append (tmp, hw_addr, hw_addr_len);
}
/* Begin DHCP on the interface */
g_warn_if_fail (priv->dhcp4_client == NULL);
priv->dhcp4_client = nm_dhcp_manager_start_ip4 (nm_dhcp_manager_get (),
nm_device_get_ip_iface (self),
nm_device_get_ip_ifindex (self),
tmp,
nm_connection_get_uuid (connection),
nm_device_get_ip4_route_metric (self),
nm_setting_ip_config_get_dhcp_send_hostname (s_ip4),
nm_setting_ip_config_get_dhcp_hostname (s_ip4),
nm_setting_ip4_config_get_dhcp_client_id (NM_SETTING_IP4_CONFIG (s_ip4)),
priv->dhcp_timeout,
priv->dhcp_anycast_address,
NULL);
if (tmp)
g_byte_array_free (tmp, TRUE);
if (!priv->dhcp4_client) {
*reason = NM_DEVICE_STATE_REASON_DHCP_START_FAILED;
return NM_ACT_STAGE_RETURN_FAILURE;
}
priv->dhcp4_state_sigid = g_signal_connect (priv->dhcp4_client,
NM_DHCP_CLIENT_SIGNAL_STATE_CHANGED,
G_CALLBACK (dhcp4_state_changed),
self);
nm_device_add_pending_action (self, PENDING_ACTION_DHCP4, TRUE);
/* DHCP devices will be notified by the DHCP manager when stuff happens */
return NM_ACT_STAGE_RETURN_POSTPONE;
}
gboolean
nm_device_dhcp4_renew (NMDevice *self, gboolean release)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMActStageReturn ret;
NMDeviceStateReason reason;
NMConnection *connection;
g_return_val_if_fail (priv->dhcp4_client != NULL, FALSE);
_LOGI (LOGD_DHCP4, "DHCPv4 lease renewal requested");
/* Terminate old DHCP instance and release the old lease */
dhcp4_cleanup (self, TRUE, release);
connection = nm_device_get_connection (self);
g_assert (connection);
/* Start DHCP again on the interface */
ret = dhcp4_start (self, connection, &reason);
return (ret != NM_ACT_STAGE_RETURN_FAILURE);
}
/*********************************************/
static GHashTable *shared_ips = NULL;
static void
release_shared_ip (gpointer data)
{
g_hash_table_remove (shared_ips, data);
}
static gboolean
reserve_shared_ip (NMDevice *self, NMSettingIPConfig *s_ip4, NMPlatformIP4Address *address)
{
if (G_UNLIKELY (shared_ips == NULL))
shared_ips = g_hash_table_new (g_direct_hash, g_direct_equal);
memset (address, 0, sizeof (*address));
if (s_ip4 && nm_setting_ip_config_get_num_addresses (s_ip4)) {
/* Use the first user-supplied address */
NMIPAddress *user = nm_setting_ip_config_get_address (s_ip4, 0);
g_assert (user);
nm_ip_address_get_address_binary (user, &address->address);
address->plen = nm_ip_address_get_prefix (user);
} else {
/* Find an unused address in the 10.42.x.x range */
guint32 start = (guint32) ntohl (0x0a2a0001); /* 10.42.0.1 */
guint32 count = 0;
while (g_hash_table_lookup (shared_ips, GUINT_TO_POINTER (start + count))) {
count += ntohl (0x100);
if (count > ntohl (0xFE00)) {
_LOGE (LOGD_SHARING, "ran out of shared IP addresses!");
return FALSE;
}
}
address->address = start + count;
address->plen = 24;
g_hash_table_insert (shared_ips,
GUINT_TO_POINTER (address->address),
GUINT_TO_POINTER (TRUE));
}
return TRUE;
}
static NMIP4Config *
shared4_new_config (NMDevice *self, NMConnection *connection, NMDeviceStateReason *reason)
{
NMIP4Config *config = NULL;
NMPlatformIP4Address address;
g_return_val_if_fail (self != NULL, NULL);
if (!reserve_shared_ip (self, nm_connection_get_setting_ip4_config (connection), &address)) {
*reason = NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE;
return NULL;
}
config = nm_ip4_config_new (nm_device_get_ip_ifindex (self));
address.source = NM_IP_CONFIG_SOURCE_SHARED;
nm_ip4_config_add_address (config, &address);
/* Remove the address lock when the object gets disposed */
g_object_set_data_full (G_OBJECT (config), "shared-ip",
GUINT_TO_POINTER (address.address),
release_shared_ip);
return config;
}
/*********************************************/
static gboolean
connection_ip4_method_requires_carrier (NMConnection *connection,
gboolean *out_ip4_enabled)
{
const char *method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
static const char *ip4_carrier_methods[] = {
NM_SETTING_IP4_CONFIG_METHOD_AUTO,
NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL,
NULL
};
if (out_ip4_enabled)
*out_ip4_enabled = !!strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED);
return _nm_utils_string_in_list (method, ip4_carrier_methods);
}
static gboolean
connection_ip6_method_requires_carrier (NMConnection *connection,
gboolean *out_ip6_enabled)
{
const char *method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG);
static const char *ip6_carrier_methods[] = {
NM_SETTING_IP6_CONFIG_METHOD_AUTO,
NM_SETTING_IP6_CONFIG_METHOD_DHCP,
NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL,
NULL
};
if (out_ip6_enabled)
*out_ip6_enabled = !!strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_IGNORE);
return _nm_utils_string_in_list (method, ip6_carrier_methods);
}
static gboolean
connection_requires_carrier (NMConnection *connection)
{
NMSettingIPConfig *s_ip4, *s_ip6;
gboolean ip4_carrier_wanted, ip6_carrier_wanted;
gboolean ip4_used = FALSE, ip6_used = FALSE;
ip4_carrier_wanted = connection_ip4_method_requires_carrier (connection, &ip4_used);
if (ip4_carrier_wanted) {
/* If IPv4 wants a carrier and cannot fail, the whole connection
* requires a carrier regardless of the IPv6 method.
*/
s_ip4 = nm_connection_get_setting_ip4_config (connection);
if (s_ip4 && !nm_setting_ip_config_get_may_fail (s_ip4))
return TRUE;
}
ip6_carrier_wanted = connection_ip6_method_requires_carrier (connection, &ip6_used);
if (ip6_carrier_wanted) {
/* If IPv6 wants a carrier and cannot fail, the whole connection
* requires a carrier regardless of the IPv4 method.
*/
s_ip6 = nm_connection_get_setting_ip6_config (connection);
if (s_ip6 && !nm_setting_ip_config_get_may_fail (s_ip6))
return TRUE;
}
/* If an IP version wants a carrier and and the other IP version isn't
* used, the connection requires carrier since it will just fail without one.
*/
if (ip4_carrier_wanted && !ip6_used)
return TRUE;
if (ip6_carrier_wanted && !ip4_used)
return TRUE;
/* If both want a carrier, the whole connection wants a carrier */
return ip4_carrier_wanted && ip6_carrier_wanted;
}
static gboolean
have_any_ready_slaves (NMDevice *self, const GSList *slaves)
{
const GSList *iter;
/* Any enslaved slave is "ready" in the generic case as it's
* at least >= NM_DEVCIE_STATE_IP_CONFIG and has had Layer 2
* properties set up.
*/
for (iter = slaves; iter; iter = g_slist_next (iter)) {
if (nm_device_get_enslaved (iter->data))
return TRUE;
}
return FALSE;
}
static gboolean
ip4_requires_slaves (NMConnection *connection)
{
const char *method;
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
return strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_AUTO) == 0;
}
static NMActStageReturn
act_stage3_ip4_config_start (NMDevice *self,
NMIP4Config **out_config,
NMDeviceStateReason *reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
NMActStageReturn ret = NM_ACT_STAGE_RETURN_FAILURE;
const char *method;
GSList *slaves;
gboolean ready_slaves;
g_return_val_if_fail (reason != NULL, NM_ACT_STAGE_RETURN_FAILURE);
connection = nm_device_get_connection (self);
g_assert (connection);
if ( connection_ip4_method_requires_carrier (connection, NULL)
&& priv->is_master
&& !priv->carrier) {
_LOGI (LOGD_IP4 | LOGD_DEVICE,
"IPv4 config waiting until carrier is on");
return NM_ACT_STAGE_RETURN_WAIT;
}
if (priv->is_master && ip4_requires_slaves (connection)) {
/* If the master has no ready slaves, and depends on slaves for
* a successful IPv4 attempt, then postpone IPv4 addressing.
*/
slaves = nm_device_master_get_slaves (self);
ready_slaves = NM_DEVICE_GET_CLASS (self)->have_any_ready_slaves (self, slaves);
g_slist_free (slaves);
if (ready_slaves == FALSE) {
_LOGI (LOGD_DEVICE | LOGD_IP4,
"IPv4 config waiting until slaves are ready");
return NM_ACT_STAGE_RETURN_WAIT;
}
}
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
/* Start IPv4 addressing based on the method requested */
if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_AUTO) == 0)
ret = dhcp4_start (self, connection, reason);
else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL) == 0)
ret = aipd_start (self, reason);
else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_MANUAL) == 0) {
/* Use only IPv4 config from the connection data */
*out_config = nm_ip4_config_new (nm_device_get_ip_ifindex (self));
g_assert (*out_config);
ret = NM_ACT_STAGE_RETURN_SUCCESS;
} else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_SHARED) == 0) {
*out_config = shared4_new_config (self, connection, reason);
if (*out_config) {
priv->dnsmasq_manager = nm_dnsmasq_manager_new (nm_device_get_ip_iface (self));
ret = NM_ACT_STAGE_RETURN_SUCCESS;
} else
ret = NM_ACT_STAGE_RETURN_FAILURE;
} else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED) == 0) {
/* Nothing to do... */
ret = NM_ACT_STAGE_RETURN_STOP;
} else
_LOGW (LOGD_IP4, "unhandled IPv4 config method '%s'; will fail", method);
return ret;
}
/*********************************************/
/* DHCPv6 stuff */
static void
dhcp6_cleanup (NMDevice *self, gboolean stop, gboolean release)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
priv->dhcp6_mode = NM_RDISC_DHCP_LEVEL_NONE;
g_clear_object (&priv->dhcp6_ip6_config);
if (priv->dhcp6_client) {
if (priv->dhcp6_state_sigid) {
g_signal_handler_disconnect (priv->dhcp6_client, priv->dhcp6_state_sigid);
priv->dhcp6_state_sigid = 0;
}
if (stop)
nm_dhcp_client_stop (priv->dhcp6_client, release);
g_clear_object (&priv->dhcp6_client);
}
nm_device_remove_pending_action (self, PENDING_ACTION_DHCP6, FALSE);
if (priv->dhcp6_config) {
g_clear_object (&priv->dhcp6_config);
g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP6_CONFIG);
}
}
static gboolean
ip6_config_merge_and_apply (NMDevice *self,
gboolean commit,
NMDeviceStateReason *out_reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
gboolean success;
NMIP6Config *composite;
gboolean has_direct_route;
const struct in6_addr *gateway;
/* If no config was passed in, create a new one */
composite = nm_ip6_config_new (nm_device_get_ip_ifindex (self));
ensure_con_ipx_config (self);
g_assert (composite);
/* Merge all the IP configs into the composite config */
if (priv->ac_ip6_config)
nm_ip6_config_merge (composite, priv->ac_ip6_config);
if (priv->dhcp6_ip6_config)
nm_ip6_config_merge (composite, priv->dhcp6_ip6_config);
if (priv->vpn6_config)
nm_ip6_config_merge (composite, priv->vpn6_config);
if (priv->ext_ip6_config)
nm_ip6_config_merge (composite, priv->ext_ip6_config);
/* Merge WWAN config *last* to ensure modem-given settings overwrite
* any external stuff set by pppd or other scripts.
*/
if (priv->wwan_ip6_config)
nm_ip6_config_merge (composite, priv->wwan_ip6_config);
/* Merge user overrides into the composite config. For assumed connections,
* con_ip6_config is empty. */
if (priv->con_ip6_config)
nm_ip6_config_merge (composite, priv->con_ip6_config);
connection = nm_device_get_connection (self);
/* Add the default route.
*
* We keep track of the default route of a device in a private field.
* NMDevice needs to know the default route at this point, because the gateway
* might require a direct route (see below).
*
* But also, we don't want to add the default route to priv->ip6_config,
* because the default route from the setting might not be the same that
* NMDefaultRouteManager eventually configures (because the it might
* tweak the effective metric).
*/
/* unless we come to a different conclusion below, we have no default route and
* the route is assumed. */
priv->default_route.v6_has = FALSE;
priv->default_route.v6_is_assumed = TRUE;
if (!commit) {
/* during a non-commit event, we always pickup whatever is configured. */
goto END_ADD_DEFAULT_ROUTE;
}
if (nm_device_uses_assumed_connection (self))
goto END_ADD_DEFAULT_ROUTE;
/* we are about to commit (for a non-assumed connection). Enforce whatever we have
* configured. */
priv->default_route.v6_is_assumed = FALSE;
if ( !connection
|| !nm_default_route_manager_ip6_connection_has_default_route (nm_default_route_manager_get (), connection))
goto END_ADD_DEFAULT_ROUTE;
if (!nm_ip6_config_get_num_addresses (composite)) {
/* without addresses we can have no default route. */
goto END_ADD_DEFAULT_ROUTE;
}
gateway = nm_ip6_config_get_gateway (composite);
if (!gateway)
goto END_ADD_DEFAULT_ROUTE;
has_direct_route = nm_ip6_config_get_direct_route_for_host (composite, gateway) != NULL;
priv->default_route.v6_has = TRUE;
memset (&priv->default_route.v6, 0, sizeof (priv->default_route.v6));
priv->default_route.v6.source = NM_IP_CONFIG_SOURCE_USER;
priv->default_route.v6.gateway = *gateway;
priv->default_route.v6.metric = nm_device_get_ip6_route_metric (self);
priv->default_route.v6.mss = nm_ip6_config_get_mss (composite);
if (!has_direct_route) {
NMPlatformIP6Route r = priv->default_route.v6;
/* add a direct route to the gateway */
r.network = *gateway;
r.plen = 128;
r.gateway = in6addr_any;
nm_ip6_config_add_route (composite, &r);
}
END_ADD_DEFAULT_ROUTE:
if (priv->default_route.v6_is_assumed) {
/* If above does not explicitly assign a default route, we always pick up the
* default route based on what is currently configured.
* That means that even managed connections with never-default, can
* get a default route (if configured externally).
*/
priv->default_route.v6_has = _device_get_default_route_from_platform (self, AF_INET6, (NMPlatformIPRoute *) &priv->default_route.v6);
}
nm_ip6_config_addresses_sort (composite,
priv->rdisc ? priv->rdisc_use_tempaddr : NM_SETTING_IP6_CONFIG_PRIVACY_UNKNOWN);
/* Allow setting MTU etc */
if (commit) {
if (NM_DEVICE_GET_CLASS (self)->ip6_config_pre_commit)
NM_DEVICE_GET_CLASS (self)->ip6_config_pre_commit (self, composite);
}
success = nm_device_set_ip6_config (self, composite, commit, out_reason);
g_object_unref (composite);
return success;
}
static void
dhcp6_lease_change (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE;
if (priv->dhcp6_ip6_config == NULL) {
_LOGW (LOGD_DHCP6, "failed to get DHCPv6 config for rebind");
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED);
return;
}
g_assert (priv->dhcp6_client); /* sanity check */
connection = nm_device_get_connection (self);
g_assert (connection);
/* Apply the updated config */
if (ip6_config_merge_and_apply (self, TRUE, &reason) == FALSE) {
_LOGW (LOGD_DHCP6, "failed to update IPv6 config in response to DHCP event.");
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason);
} else {
/* Notify dispatcher scripts of new DHCPv6 config */
nm_dispatcher_call (DISPATCHER_ACTION_DHCP6_CHANGE, connection, self, NULL, NULL, NULL);
}
}
static void
dhcp6_fail (NMDevice *self, gboolean timeout)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
dhcp6_cleanup (self, TRUE, FALSE);
if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_MANAGED) {
if (timeout || (priv->ip6_state == IP_CONF))
nm_device_activate_schedule_ip6_config_timeout (self);
else if (priv->ip6_state == IP_DONE)
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED);
else
g_warn_if_reached ();
} else {
/* not a hard failure; just live with the RA info */
if (priv->ip6_state == IP_CONF)
nm_device_activate_schedule_ip6_config_result (self);
}
}
static void
dhcp6_timeout (NMDevice *self, NMDhcpClient *client)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_MANAGED)
dhcp6_fail (self, TRUE);
else {
/* not a hard failure; just live with the RA info */
dhcp6_cleanup (self, TRUE, FALSE);
if (priv->ip6_state == IP_CONF)
nm_device_activate_schedule_ip6_config_result (self);
}
}
static void
dhcp6_update_config (NMDevice *self, NMDhcp6Config *config, GHashTable *options)
{
GHashTableIter iter;
const char *key, *value;
/* Update the DHCP6 config object with new DHCP options */
nm_dhcp6_config_reset (config);
g_hash_table_iter_init (&iter, options);
while (g_hash_table_iter_next (&iter, (gpointer) &key, (gpointer) &value))
nm_dhcp6_config_add_option (config, key, value);
g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP6_CONFIG);
}
static void
dhcp6_state_changed (NMDhcpClient *client,
NMDhcpState state,
NMIP6Config *ip6_config,
GHashTable *options,
gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
g_return_if_fail (nm_dhcp_client_get_ipv6 (client) == TRUE);
g_return_if_fail (!ip6_config || NM_IS_IP6_CONFIG (ip6_config));
_LOGD (LOGD_DHCP6, "new DHCPv6 client state %d", state);
switch (state) {
case NM_DHCP_STATE_BOUND:
g_clear_object (&priv->dhcp6_ip6_config);
if (ip6_config) {
priv->dhcp6_ip6_config = g_object_ref (ip6_config);
dhcp6_update_config (self, priv->dhcp6_config, options);
}
if (priv->ip6_state == IP_CONF) {
if (priv->dhcp6_ip6_config == NULL) {
/* FIXME: Initial DHCP failed; should we fail IPv6 entirely then? */
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_DHCP_FAILED);
break;
}
nm_device_activate_schedule_ip6_config_result (self);
} else if (priv->ip6_state == IP_DONE)
dhcp6_lease_change (self);
break;
case NM_DHCP_STATE_TIMEOUT:
dhcp6_timeout (self, client);
break;
case NM_DHCP_STATE_EXPIRE:
/* Ignore expiry before we even have a lease (NAK, old lease, etc) */
if (priv->ip6_state != IP_CONF)
dhcp6_fail (self, FALSE);
break;
case NM_DHCP_STATE_DONE:
/* In IPv6 info-only mode, the client doesn't handle leases so it
* may exit right after getting a response from the server. That's
* normal. In that case we just ignore the exit.
*/
if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_OTHERCONF)
break;
/* Otherwise, fall through */
case NM_DHCP_STATE_FAIL:
dhcp6_fail (self, FALSE);
break;
default:
break;
}
}
static gboolean
dhcp6_start_with_link_ready (NMDevice *self, NMConnection *connection)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMSettingIPConfig *s_ip6;
GByteArray *tmp = NULL;
const guint8 *hw_addr;
size_t hw_addr_len = 0;
g_assert (connection);
s_ip6 = nm_connection_get_setting_ip6_config (connection);
g_assert (s_ip6);
hw_addr = nm_platform_link_get_address (nm_device_get_ip_ifindex (self), &hw_addr_len);
if (hw_addr_len) {
tmp = g_byte_array_sized_new (hw_addr_len);
g_byte_array_append (tmp, hw_addr, hw_addr_len);
}
priv->dhcp6_client = nm_dhcp_manager_start_ip6 (nm_dhcp_manager_get (),
nm_device_get_ip_iface (self),
nm_device_get_ip_ifindex (self),
tmp,
nm_connection_get_uuid (connection),
nm_device_get_ip6_route_metric (self),
nm_setting_ip_config_get_dhcp_send_hostname (s_ip6),
nm_setting_ip_config_get_dhcp_hostname (s_ip6),
priv->dhcp_timeout,
priv->dhcp_anycast_address,
(priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_OTHERCONF) ? TRUE : FALSE,
nm_setting_ip6_config_get_ip6_privacy (NM_SETTING_IP6_CONFIG (s_ip6)));
if (tmp)
g_byte_array_free (tmp, TRUE);
if (priv->dhcp6_client) {
priv->dhcp6_state_sigid = g_signal_connect (priv->dhcp6_client,
NM_DHCP_CLIENT_SIGNAL_STATE_CHANGED,
G_CALLBACK (dhcp6_state_changed),
self);
}
return !!priv->dhcp6_client;
}
static gboolean
dhcp6_start (NMDevice *self, gboolean wait_for_ll, NMDeviceStateReason *reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
NMSettingIPConfig *s_ip6;
g_clear_object (&priv->dhcp6_config);
priv->dhcp6_config = nm_dhcp6_config_new ();
g_warn_if_fail (priv->dhcp6_ip6_config == NULL);
g_clear_object (&priv->dhcp6_ip6_config);
connection = nm_device_get_connection (self);
g_assert (connection);
s_ip6 = nm_connection_get_setting_ip6_config (connection);
if (!nm_setting_ip_config_get_may_fail (s_ip6) ||
!strcmp (nm_setting_ip_config_get_method (s_ip6), NM_SETTING_IP6_CONFIG_METHOD_DHCP))
nm_device_add_pending_action (self, PENDING_ACTION_DHCP6, TRUE);
if (wait_for_ll) {
NMActStageReturn ret;
/* ensure link local is ready... */
ret = linklocal6_start (self);
if (ret == NM_ACT_STAGE_RETURN_POSTPONE) {
/* success; wait for the LL address to show up */
return TRUE;
}
/* success; already have the LL address; kick off DHCP */
g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS);
}
if (!dhcp6_start_with_link_ready (self, connection)) {
*reason = NM_DEVICE_STATE_REASON_DHCP_START_FAILED;
return FALSE;
}
return TRUE;
}
gboolean
nm_device_dhcp6_renew (NMDevice *self, gboolean release)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
g_return_val_if_fail (priv->dhcp6_client != NULL, FALSE);
_LOGI (LOGD_DHCP6, "DHCPv6 lease renewal requested");
/* Terminate old DHCP instance and release the old lease */
dhcp6_cleanup (self, TRUE, release);
/* Start DHCP again on the interface */
return dhcp6_start (self, FALSE, NULL);
}
/******************************************/
static gboolean
have_ip6_address (const NMIP6Config *ip6_config, gboolean linklocal)
{
guint i;
if (!ip6_config)
return FALSE;
linklocal = !!linklocal;
for (i = 0; i < nm_ip6_config_get_num_addresses (ip6_config); i++) {
const NMPlatformIP6Address *addr = nm_ip6_config_get_address (ip6_config, i);
if ((IN6_IS_ADDR_LINKLOCAL (&addr->address) == linklocal) &&
!(addr->flags & IFA_F_TENTATIVE))
return TRUE;
}
return FALSE;
}
static void
linklocal6_cleanup (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->linklocal6_timeout_id) {
g_source_remove (priv->linklocal6_timeout_id);
priv->linklocal6_timeout_id = 0;
}
}
static gboolean
linklocal6_timeout_cb (gpointer user_data)
{
NMDevice *self = user_data;
linklocal6_cleanup (self);
_LOGD (LOGD_DEVICE, "linklocal6: waiting for link-local addresses failed due to timeout");
nm_device_activate_schedule_ip6_config_timeout (self);
return G_SOURCE_REMOVE;
}
static void
linklocal6_complete (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
const char *method;
g_assert (priv->linklocal6_timeout_id);
g_assert (have_ip6_address (priv->ip6_config, TRUE));
linklocal6_cleanup (self);
connection = nm_device_get_connection (self);
g_assert (connection);
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG);
_LOGD (LOGD_DEVICE, "linklocal6: waiting for link-local addresses successful, continue with method %s", method);
if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_AUTO) == 0) {
if (!addrconf6_start_with_link_ready (self)) {
/* Time out IPv6 instead of failing the entire activation */
nm_device_activate_schedule_ip6_config_timeout (self);
}
} else if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_DHCP) == 0) {
if (!dhcp6_start_with_link_ready (self, connection)) {
/* Time out IPv6 instead of failing the entire activation */
nm_device_activate_schedule_ip6_config_timeout (self);
}
} else if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL) == 0)
nm_device_activate_schedule_ip6_config_result (self);
else
g_return_if_fail (FALSE);
}
static void
check_and_add_ipv6ll_addr (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
int ip_ifindex = nm_device_get_ip_ifindex (self);
NMUtilsIPv6IfaceId iid;
struct in6_addr lladdr;
guint i, n;
if (priv->nm_ipv6ll == FALSE)
return;
if (priv->ip6_config) {
n = nm_ip6_config_get_num_addresses (priv->ip6_config);
for (i = 0; i < n; i++) {
const NMPlatformIP6Address *addr;
addr = nm_ip6_config_get_address (priv->ip6_config, i);
if (IN6_IS_ADDR_LINKLOCAL (&addr->address)) {
/* Already have an LL address, nothing to do */
return;
}
}
}
if (!nm_device_get_ip_iface_identifier (self, &iid)) {
_LOGW (LOGD_IP6, "failed to get interface identifier; IPv6 may be broken");
return;
}
memset (&lladdr, 0, sizeof (lladdr));
lladdr.s6_addr16[0] = htons (0xfe80);
nm_utils_ipv6_addr_set_interface_identfier (&lladdr, iid);
_LOGD (LOGD_IP6, "adding IPv6LL address %s", nm_utils_inet6_ntop (&lladdr, NULL));
if (!nm_platform_ip6_address_add (ip_ifindex,
lladdr,
in6addr_any,
64,
NM_PLATFORM_LIFETIME_PERMANENT,
NM_PLATFORM_LIFETIME_PERMANENT,
0)) {
_LOGW (LOGD_IP6, "failed to add IPv6 link-local address %s",
nm_utils_inet6_ntop (&lladdr, NULL));
}
}
static NMActStageReturn
linklocal6_start (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
const char *method;
linklocal6_cleanup (self);
if (have_ip6_address (priv->ip6_config, TRUE))
return NM_ACT_STAGE_RETURN_SUCCESS;
connection = nm_device_get_connection (self);
g_assert (connection);
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG);
_LOGD (LOGD_DEVICE, "linklocal6: starting IPv6 with method '%s', but the device has no link-local addresses configured. Wait.", method);
check_and_add_ipv6ll_addr (self);
priv->linklocal6_timeout_id = g_timeout_add_seconds (5, linklocal6_timeout_cb, self);
return NM_ACT_STAGE_RETURN_POSTPONE;
}
/******************************************/
static void
print_support_extended_ifa_flags (NMSettingIP6ConfigPrivacy use_tempaddr)
{
static gint8 warn = 0;
static gint8 s_libnl = -1, s_kernel;
if (warn >= 2)
return;
if (s_libnl == -1) {
s_libnl = !!nm_platform_check_support_libnl_extended_ifa_flags ();
s_kernel = !!nm_platform_check_support_kernel_extended_ifa_flags ();
if (s_libnl && s_kernel) {
nm_log_dbg (LOGD_IP6, "kernel and libnl support extended IFA_FLAGS (needed by NM for IPv6 private addresses)");
warn = 2;
return;
}
}
if ( use_tempaddr != NM_SETTING_IP6_CONFIG_PRIVACY_PREFER_TEMP_ADDR
&& use_tempaddr != NM_SETTING_IP6_CONFIG_PRIVACY_PREFER_PUBLIC_ADDR) {
if (warn == 0) {
nm_log_dbg (LOGD_IP6, "%s%s%s %s not support extended IFA_FLAGS (needed by NM for IPv6 private addresses)",
!s_kernel ? "kernel" : "",
!s_kernel && !s_libnl ? " and " : "",
!s_libnl ? "libnl" : "",
!s_kernel && !s_libnl ? "do" : "does");
warn = 1;
}
return;
}
if (!s_libnl && !s_kernel) {
nm_log_warn (LOGD_IP6, "libnl and the kernel do not support extended IFA_FLAGS needed by NM for "
"IPv6 private addresses. This feature is not available");
} else if (!s_libnl) {
nm_log_warn (LOGD_IP6, "libnl does not support extended IFA_FLAGS needed by NM for "
"IPv6 private addresses. This feature is not available");
} else if (!s_kernel) {
nm_log_warn (LOGD_IP6, "The kernel does not support extended IFA_FLAGS needed by NM for "
"IPv6 private addresses. This feature is not available");
}
warn = 2;
}
static void
rdisc_config_changed (NMRDisc *rdisc, NMRDiscConfigMap changed, NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
warn = 2;
}
static void
rdisc_config_changed (NMRDisc *rdisc, NMRDiscConfigMap changed, NMDevice *self)
{
address.preferred = discovered_address->preferred;
if (address.preferred > address.lifetime)
address.preferred = address.lifetime;
address.source = NM_IP_CONFIG_SOURCE_RDISC;
address.flags = ifa_flags;
nm_ip6_config_add_address (priv->ac_ip6_config, &address);
}
}
Commit Message:
CWE ID: CWE-20 | device_has_capability (NMDevice *self, NMDeviceCapabilities caps)
{
{
static guint32 devcount = 0;
NMDevicePrivate *priv;
g_return_if_fail (NM_IS_DEVICE (self));
priv = NM_DEVICE_GET_PRIVATE (self);
g_return_if_fail (priv->path == NULL);
priv->path = g_strdup_printf ("/org/freedesktop/NetworkManager/Devices/%d", devcount++);
_LOGI (LOGD_DEVICE, "exported as %s", priv->path);
nm_dbus_manager_register_object (nm_dbus_manager_get (), priv->path, self);
}
const char *
nm_device_get_path (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->path;
}
const char *
nm_device_get_udi (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->udi;
}
const char *
nm_device_get_iface (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), 0);
return NM_DEVICE_GET_PRIVATE (self)->iface;
}
int
nm_device_get_ifindex (NMDevice *self)
{
g_return_val_if_fail (self != NULL, 0);
return NM_DEVICE_GET_PRIVATE (self)->ifindex;
}
gboolean
nm_device_is_software (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
return priv->is_software;
}
const char *
nm_device_get_ip_iface (NMDevice *self)
{
NMDevicePrivate *priv;
g_return_val_if_fail (self != NULL, NULL);
priv = NM_DEVICE_GET_PRIVATE (self);
/* If it's not set, default to iface */
return priv->ip_iface ? priv->ip_iface : priv->iface;
}
int
nm_device_get_ip_ifindex (NMDevice *self)
{
NMDevicePrivate *priv;
g_return_val_if_fail (self != NULL, 0);
priv = NM_DEVICE_GET_PRIVATE (self);
/* If it's not set, default to iface */
return priv->ip_iface ? priv->ip_ifindex : priv->ifindex;
}
void
nm_device_set_ip_iface (NMDevice *self, const char *iface)
{
NMDevicePrivate *priv;
char *old_ip_iface;
g_return_if_fail (NM_IS_DEVICE (self));
priv = NM_DEVICE_GET_PRIVATE (self);
if (!g_strcmp0 (iface, priv->ip_iface))
return;
old_ip_iface = priv->ip_iface;
priv->ip_ifindex = 0;
priv->ip_iface = g_strdup (iface);
if (priv->ip_iface) {
priv->ip_ifindex = nm_platform_link_get_ifindex (priv->ip_iface);
if (priv->ip_ifindex > 0) {
if (nm_platform_check_support_user_ipv6ll ())
nm_platform_link_set_user_ipv6ll_enabled (priv->ip_ifindex, TRUE);
if (!nm_platform_link_is_up (priv->ip_ifindex))
nm_platform_link_set_up (priv->ip_ifindex);
} else {
/* Device IP interface must always be a kernel network interface */
_LOGW (LOGD_HW, "failed to look up interface index");
}
}
/* We don't care about any saved values from the old iface */
g_hash_table_remove_all (priv->ip6_saved_properties);
/* Emit change notification */
if (g_strcmp0 (old_ip_iface, priv->ip_iface))
g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE);
g_free (old_ip_iface);
}
static gboolean
get_ip_iface_identifier (NMDevice *self, NMUtilsIPv6IfaceId *out_iid)
{
NMLinkType link_type;
const guint8 *hwaddr = NULL;
size_t hwaddr_len = 0;
int ifindex;
gboolean success;
/* If we get here, we *must* have a kernel netdev, which implies an ifindex */
ifindex = nm_device_get_ip_ifindex (self);
g_assert (ifindex);
link_type = nm_platform_link_get_type (ifindex);
g_return_val_if_fail (link_type > NM_LINK_TYPE_UNKNOWN, 0);
hwaddr = nm_platform_link_get_address (ifindex, &hwaddr_len);
if (!hwaddr_len)
return FALSE;
success = nm_utils_get_ipv6_interface_identifier (link_type,
hwaddr,
hwaddr_len,
out_iid);
if (!success) {
_LOGW (LOGD_HW, "failed to generate interface identifier "
"for link type %u hwaddr_len %zu", link_type, hwaddr_len);
}
return success;
}
static gboolean
nm_device_get_ip_iface_identifier (NMDevice *self, NMUtilsIPv6IfaceId *iid)
{
return NM_DEVICE_GET_CLASS (self)->get_ip_iface_identifier (self, iid);
}
const char *
nm_device_get_driver (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->driver;
}
const char *
nm_device_get_driver_version (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->driver_version;
}
NMDeviceType
nm_device_get_device_type (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), NM_DEVICE_TYPE_UNKNOWN);
return NM_DEVICE_GET_PRIVATE (self)->type;
}
/**
* nm_device_get_priority():
* @self: the #NMDevice
*
* Returns: the device's routing priority. Lower numbers means a "better"
* device, eg higher priority.
*/
int
nm_device_get_priority (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), 1000);
/* Device 'priority' is used for the default route-metric and is based on
* the device type. The settings ipv4.route-metric and ipv6.route-metric
* can overwrite this default.
*
* Currently for both IPv4 and IPv6 we use the same default values.
*
* The route-metric is used for the metric of the routes of device.
* This also applies to the default route. Therefore it affects also
* which device is the "best".
*
* For comparison, note that iproute2 by default adds IPv4 routes with
* metric 0, and IPv6 routes with metric 1024. The latter is the IPv6
* "user default" in the kernel (NM_PLATFORM_ROUTE_METRIC_DEFAULT_IP6).
* In kernel, the full uint32_t range is available for route
* metrics (except for IPv6, where 0 means 1024).
*/
switch (nm_device_get_device_type (self)) {
/* 50 is reserved for VPN (NM_VPN_ROUTE_METRIC_DEFAULT) */
case NM_DEVICE_TYPE_ETHERNET:
return 100;
case NM_DEVICE_TYPE_INFINIBAND:
return 150;
case NM_DEVICE_TYPE_ADSL:
return 200;
case NM_DEVICE_TYPE_WIMAX:
return 250;
case NM_DEVICE_TYPE_BOND:
return 300;
case NM_DEVICE_TYPE_TEAM:
return 350;
case NM_DEVICE_TYPE_VLAN:
return 400;
case NM_DEVICE_TYPE_BRIDGE:
return 425;
case NM_DEVICE_TYPE_MODEM:
return 450;
case NM_DEVICE_TYPE_BT:
return 550;
case NM_DEVICE_TYPE_WIFI:
return 600;
case NM_DEVICE_TYPE_OLPC_MESH:
return 650;
case NM_DEVICE_TYPE_GENERIC:
return 950;
case NM_DEVICE_TYPE_UNKNOWN:
return 10000;
case NM_DEVICE_TYPE_UNUSED1:
case NM_DEVICE_TYPE_UNUSED2:
/* omit default: to get compiler warning about missing switch cases */
break;
}
return 11000;
}
guint32
nm_device_get_ip4_route_metric (NMDevice *self)
{
NMConnection *connection;
gint64 route_metric = -1;
g_return_val_if_fail (NM_IS_DEVICE (self), G_MAXUINT32);
connection = nm_device_get_connection (self);
if (connection)
route_metric = nm_setting_ip_config_get_route_metric (nm_connection_get_setting_ip4_config (connection));
return route_metric >= 0 ? route_metric : nm_device_get_priority (self);
}
guint32
nm_device_get_ip6_route_metric (NMDevice *self)
{
NMConnection *connection;
gint64 route_metric = -1;
g_return_val_if_fail (NM_IS_DEVICE (self), G_MAXUINT32);
connection = nm_device_get_connection (self);
if (connection)
route_metric = nm_setting_ip_config_get_route_metric (nm_connection_get_setting_ip6_config (connection));
return route_metric >= 0 ? route_metric : nm_device_get_priority (self);
}
const NMPlatformIP4Route *
nm_device_get_ip4_default_route (NMDevice *self, gboolean *out_is_assumed)
{
NMDevicePrivate *priv;
g_return_val_if_fail (NM_IS_DEVICE (self), NULL);
priv = NM_DEVICE_GET_PRIVATE (self);
if (out_is_assumed)
*out_is_assumed = priv->default_route.v4_is_assumed;
return priv->default_route.v4_has ? &priv->default_route.v4 : NULL;
}
const NMPlatformIP6Route *
nm_device_get_ip6_default_route (NMDevice *self, gboolean *out_is_assumed)
{
NMDevicePrivate *priv;
g_return_val_if_fail (NM_IS_DEVICE (self), NULL);
priv = NM_DEVICE_GET_PRIVATE (self);
if (out_is_assumed)
*out_is_assumed = priv->default_route.v6_is_assumed;
return priv->default_route.v6_has ? &priv->default_route.v6 : NULL;
}
const char *
nm_device_get_type_desc (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->type_desc;
}
gboolean
nm_device_has_carrier (NMDevice *self)
{
return NM_DEVICE_GET_PRIVATE (self)->carrier;
}
NMActRequest *
nm_device_get_act_request (NMDevice *self)
{
g_return_val_if_fail (self != NULL, NULL);
return NM_DEVICE_GET_PRIVATE (self)->act_request;
}
NMConnection *
nm_device_get_connection (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
return priv->act_request ? nm_act_request_get_connection (priv->act_request) : NULL;
}
RfKillType
nm_device_get_rfkill_type (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
return NM_DEVICE_GET_PRIVATE (self)->rfkill_type;
}
static const char *
nm_device_get_physical_port_id (NMDevice *self)
{
return NM_DEVICE_GET_PRIVATE (self)->physical_port_id;
}
/***********************************************************/
static gboolean
nm_device_uses_generated_assumed_connection (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
if ( priv->act_request
&& nm_active_connection_get_assumed (NM_ACTIVE_CONNECTION (priv->act_request))) {
connection = nm_act_request_get_connection (priv->act_request);
if ( connection
&& nm_settings_connection_get_nm_generated_assumed (NM_SETTINGS_CONNECTION (connection)))
return TRUE;
}
return FALSE;
}
gboolean
nm_device_uses_assumed_connection (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if ( priv->act_request
&& nm_active_connection_get_assumed (NM_ACTIVE_CONNECTION (priv->act_request)))
return TRUE;
return FALSE;
}
static SlaveInfo *
find_slave_info (NMDevice *self, NMDevice *slave)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
SlaveInfo *info;
GSList *iter;
for (iter = priv->slaves; iter; iter = g_slist_next (iter)) {
info = iter->data;
if (info->slave == slave)
return info;
}
return NULL;
}
static void
free_slave_info (SlaveInfo *info)
{
g_signal_handler_disconnect (info->slave, info->watch_id);
g_clear_object (&info->slave);
memset (info, 0, sizeof (*info));
g_free (info);
}
/**
* nm_device_enslave_slave:
* @self: the master device
* @slave: the slave device to enslave
* @connection: (allow-none): the slave device's connection
*
* If @self is capable of enslaving other devices (ie it's a bridge, bond, team,
* etc) then this function enslaves @slave.
*
* Returns: %TRUE on success, %FALSE on failure or if this device cannot enslave
* other devices.
*/
static gboolean
nm_device_enslave_slave (NMDevice *self, NMDevice *slave, NMConnection *connection)
{
SlaveInfo *info;
gboolean success = FALSE;
gboolean configure;
g_return_val_if_fail (self != NULL, FALSE);
g_return_val_if_fail (slave != NULL, FALSE);
g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->enslave_slave != NULL, FALSE);
info = find_slave_info (self, slave);
if (!info)
return FALSE;
if (info->enslaved)
success = TRUE;
else {
configure = (info->configure && connection != NULL);
if (configure)
g_return_val_if_fail (nm_device_get_state (slave) >= NM_DEVICE_STATE_DISCONNECTED, FALSE);
success = NM_DEVICE_GET_CLASS (self)->enslave_slave (self, slave, connection, configure);
info->enslaved = success;
}
nm_device_slave_notify_enslave (info->slave, success);
/* Ensure the device's hardware address is up-to-date; it often changes
* when slaves change.
*/
nm_device_update_hw_address (self);
/* Restart IP configuration if we're waiting for slaves. Do this
* after updating the hardware address as IP config may need the
* new address.
*/
if (success) {
if (NM_DEVICE_GET_PRIVATE (self)->ip4_state == IP_WAIT)
nm_device_activate_stage3_ip4_start (self);
if (NM_DEVICE_GET_PRIVATE (self)->ip6_state == IP_WAIT)
nm_device_activate_stage3_ip6_start (self);
}
return success;
}
/**
* nm_device_release_one_slave:
* @self: the master device
* @slave: the slave device to release
* @configure: whether @self needs to actually release @slave
* @reason: the state change reason for the @slave
*
* If @self is capable of enslaving other devices (ie it's a bridge, bond, team,
* etc) then this function releases the previously enslaved @slave and/or
* updates the state of @self and @slave to reflect its release.
*
* Returns: %TRUE on success, %FALSE on failure, if this device cannot enslave
* other devices, or if @slave was never enslaved.
*/
static gboolean
nm_device_release_one_slave (NMDevice *self, NMDevice *slave, gboolean configure, NMDeviceStateReason reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
SlaveInfo *info;
gboolean success = FALSE;
g_return_val_if_fail (slave != NULL, FALSE);
g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->release_slave != NULL, FALSE);
info = find_slave_info (self, slave);
if (!info)
return FALSE;
priv->slaves = g_slist_remove (priv->slaves, info);
if (info->enslaved) {
success = NM_DEVICE_GET_CLASS (self)->release_slave (self, slave, configure);
/* The release_slave() implementation logs success/failure (in the
* correct device-specific log domain), so we don't have to do anything.
*/
}
if (!configure) {
g_warn_if_fail (reason == NM_DEVICE_STATE_REASON_NONE || reason == NM_DEVICE_STATE_REASON_REMOVED);
reason = NM_DEVICE_STATE_REASON_NONE;
} else if (reason == NM_DEVICE_STATE_REASON_NONE) {
g_warn_if_reached ();
reason = NM_DEVICE_STATE_REASON_UNKNOWN;
}
nm_device_slave_notify_release (info->slave, reason);
free_slave_info (info);
/* Ensure the device's hardware address is up-to-date; it often changes
* when slaves change.
*/
nm_device_update_hw_address (self);
return success;
}
static gboolean
is_software_external (NMDevice *self)
{
return nm_device_is_software (self)
&& !nm_device_get_is_nm_owned (self);
}
/**
* nm_device_finish_init:
* @self: the master device
*
* Whatever needs to be done post-initialization, when the device has a DBus
* object name.
*/
void
nm_device_finish_init (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
g_assert (priv->initialized == FALSE);
/* Do not manage externally created software devices until they are IFF_UP */
if ( is_software_external (self)
&& !nm_platform_link_is_up (priv->ifindex)
&& priv->ifindex > 0)
nm_device_set_initial_unmanaged_flag (self, NM_UNMANAGED_EXTERNAL_DOWN, TRUE);
if (priv->master)
nm_device_enslave_slave (priv->master, self, NULL);
priv->initialized = TRUE;
}
static void
carrier_changed (NMDevice *self, gboolean carrier)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (!nm_device_get_managed (self))
return;
nm_device_recheck_available_connections (self);
/* ignore-carrier devices ignore all carrier-down events */
if (priv->ignore_carrier && !carrier)
return;
if (priv->is_master) {
/* Bridge/bond/team carrier does not affect its own activation,
* but when carrier comes on, if there are slaves waiting,
* it will restart them.
*/
if (!carrier)
return;
if (nm_device_activate_ip4_state_in_wait (self))
nm_device_activate_stage3_ip4_start (self);
if (nm_device_activate_ip6_state_in_wait (self))
nm_device_activate_stage3_ip6_start (self);
return;
} else if (nm_device_get_enslaved (self) && !carrier) {
/* Slaves don't deactivate when they lose carrier; for
* bonds/teams in particular that would be actively
* counterproductive.
*/
return;
}
if (carrier) {
g_warn_if_fail (priv->state >= NM_DEVICE_STATE_UNAVAILABLE);
if (priv->state == NM_DEVICE_STATE_UNAVAILABLE) {
nm_device_queue_state (self, NM_DEVICE_STATE_DISCONNECTED,
NM_DEVICE_STATE_REASON_CARRIER);
} else if (priv->state == NM_DEVICE_STATE_DISCONNECTED) {
/* If the device is already in DISCONNECTED state without a carrier
* (probably because it is tagged for carrier ignore) ensure that
* when the carrier appears, auto connections are rechecked for
* the device.
*/
nm_device_emit_recheck_auto_activate (self);
}
} else {
g_return_if_fail (priv->state >= NM_DEVICE_STATE_UNAVAILABLE);
if (priv->state == NM_DEVICE_STATE_UNAVAILABLE) {
if (nm_device_queued_state_peek (self) >= NM_DEVICE_STATE_DISCONNECTED)
nm_device_queued_state_clear (self);
} else {
nm_device_queue_state (self, NM_DEVICE_STATE_UNAVAILABLE,
NM_DEVICE_STATE_REASON_CARRIER);
}
}
}
#define LINK_DISCONNECT_DELAY 4
static gboolean
link_disconnect_action_cb (gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
_LOGD (LOGD_DEVICE, "link disconnected (calling deferred action) (id=%u)", priv->carrier_defer_id);
priv->carrier_defer_id = 0;
_LOGI (LOGD_DEVICE, "link disconnected (calling deferred action)");
NM_DEVICE_GET_CLASS (self)->carrier_changed (self, FALSE);
return FALSE;
}
static void
link_disconnect_action_cancel (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->carrier_defer_id) {
g_source_remove (priv->carrier_defer_id);
_LOGD (LOGD_DEVICE, "link disconnected (canceling deferred action) (id=%u)", priv->carrier_defer_id);
priv->carrier_defer_id = 0;
}
}
void
nm_device_set_carrier (NMDevice *self, gboolean carrier)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self);
NMDeviceState state = nm_device_get_state (self);
if (priv->carrier == carrier)
return;
priv->carrier = carrier;
g_object_notify (G_OBJECT (self), NM_DEVICE_CARRIER);
if (priv->carrier) {
_LOGI (LOGD_DEVICE, "link connected");
link_disconnect_action_cancel (self);
klass->carrier_changed (self, TRUE);
if (priv->carrier_wait_id) {
g_source_remove (priv->carrier_wait_id);
priv->carrier_wait_id = 0;
nm_device_remove_pending_action (self, "carrier wait", TRUE);
_carrier_wait_check_queued_act_request (self);
}
} else if (state <= NM_DEVICE_STATE_DISCONNECTED) {
_LOGI (LOGD_DEVICE, "link disconnected");
klass->carrier_changed (self, FALSE);
} else {
_LOGI (LOGD_DEVICE, "link disconnected (deferring action for %d seconds)", LINK_DISCONNECT_DELAY);
priv->carrier_defer_id = g_timeout_add_seconds (LINK_DISCONNECT_DELAY,
link_disconnect_action_cb, self);
_LOGD (LOGD_DEVICE, "link disconnected (deferring action for %d seconds) (id=%u)",
LINK_DISCONNECT_DELAY, priv->carrier_defer_id);
}
}
static void
update_for_ip_ifname_change (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
g_hash_table_remove_all (priv->ip6_saved_properties);
if (priv->dhcp4_client) {
if (!nm_device_dhcp4_renew (self, FALSE)) {
nm_device_state_changed (self,
NM_DEVICE_STATE_FAILED,
NM_DEVICE_STATE_REASON_DHCP_FAILED);
return;
}
}
if (priv->dhcp6_client) {
if (!nm_device_dhcp6_renew (self, FALSE)) {
nm_device_state_changed (self,
NM_DEVICE_STATE_FAILED,
NM_DEVICE_STATE_REASON_DHCP_FAILED);
return;
}
}
if (priv->rdisc) {
/* FIXME: todo */
}
if (priv->dnsmasq_manager) {
/* FIXME: todo */
}
}
static void
device_set_master (NMDevice *self, int ifindex)
{
NMDevice *master;
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
master = nm_manager_get_device_by_ifindex (nm_manager_get (), ifindex);
if (master && NM_DEVICE_GET_CLASS (master)->enslave_slave) {
g_clear_object (&priv->master);
priv->master = g_object_ref (master);
nm_device_master_add_slave (master, self, FALSE);
} else if (master) {
_LOGI (LOGD_DEVICE, "enslaved to non-master-type device %s; ignoring",
nm_device_get_iface (master));
} else {
_LOGW (LOGD_DEVICE, "enslaved to unknown device %d %s",
ifindex,
nm_platform_link_get_name (ifindex));
}
}
static void
device_link_changed (NMDevice *self, NMPlatformLink *info)
{
NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMUtilsIPv6IfaceId token_iid;
gboolean ip_ifname_changed = FALSE;
if (info->udi && g_strcmp0 (info->udi, priv->udi)) {
/* Update UDI to what udev gives us */
g_free (priv->udi);
priv->udi = g_strdup (info->udi);
g_object_notify (G_OBJECT (self), NM_DEVICE_UDI);
}
/* Update MTU if it has changed. */
if (priv->mtu != info->mtu) {
priv->mtu = info->mtu;
g_object_notify (G_OBJECT (self), NM_DEVICE_MTU);
}
if (info->name[0] && strcmp (priv->iface, info->name) != 0) {
_LOGI (LOGD_DEVICE, "interface index %d renamed iface from '%s' to '%s'",
priv->ifindex, priv->iface, info->name);
g_free (priv->iface);
priv->iface = g_strdup (info->name);
/* If the device has no explicit ip_iface, then changing iface changes ip_iface too. */
ip_ifname_changed = !priv->ip_iface;
g_object_notify (G_OBJECT (self), NM_DEVICE_IFACE);
if (ip_ifname_changed)
g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE);
/* Re-match available connections against the new interface name */
nm_device_recheck_available_connections (self);
/* Let any connections that use the new interface name have a chance
* to auto-activate on the device.
*/
nm_device_emit_recheck_auto_activate (self);
}
/* Update slave status for external changes */
if (priv->enslaved && info->master != nm_device_get_ifindex (priv->master))
nm_device_release_one_slave (priv->master, self, FALSE, NM_DEVICE_STATE_REASON_NONE);
if (info->master && !priv->enslaved) {
device_set_master (self, info->master);
if (priv->master)
nm_device_enslave_slave (priv->master, self, NULL);
}
if (priv->rdisc && nm_platform_link_get_ipv6_token (priv->ifindex, &token_iid)) {
_LOGD (LOGD_DEVICE, "IPv6 tokenized identifier present on device %s", priv->iface);
if (nm_rdisc_set_iid (priv->rdisc, token_iid))
nm_rdisc_start (priv->rdisc);
}
if (klass->link_changed)
klass->link_changed (self, info);
/* Update DHCP, etc, if needed */
if (ip_ifname_changed)
update_for_ip_ifname_change (self);
if (priv->up != info->up) {
priv->up = info->up;
/* Manage externally-created software interfaces only when they are IFF_UP */
g_assert (priv->ifindex > 0);
if (is_software_external (self)) {
gboolean external_down = nm_device_get_unmanaged_flag (self, NM_UNMANAGED_EXTERNAL_DOWN);
if (external_down && info->up) {
if (nm_device_get_state (self) < NM_DEVICE_STATE_DISCONNECTED) {
/* Ensure the assume check is queued before any queued state changes
* from the transition to UNAVAILABLE.
*/
nm_device_queue_recheck_assume (self);
/* Resetting the EXTERNAL_DOWN flag may change the device's state
* to UNAVAILABLE. To ensure that the state change doesn't touch
* the device before assumption occurs, pass
* NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED as the reason.
*/
nm_device_set_unmanaged (self,
NM_UNMANAGED_EXTERNAL_DOWN,
FALSE,
NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED);
} else {
/* Don't trigger a state change; if the device is in a
* state higher than UNAVAILABLE, it is already IFF_UP
* or an explicit activation request was received.
*/
priv->unmanaged_flags &= ~NM_UNMANAGED_EXTERNAL_DOWN;
}
} else if (!external_down && !info->up && nm_device_get_state (self) <= NM_DEVICE_STATE_DISCONNECTED) {
/* If the device is already disconnected and is set !IFF_UP,
* unmanage it.
*/
nm_device_set_unmanaged (self,
NM_UNMANAGED_EXTERNAL_DOWN,
TRUE,
NM_DEVICE_STATE_REASON_USER_REQUESTED);
}
}
}
}
static void
device_ip_link_changed (NMDevice *self, NMPlatformLink *info)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (info->name[0] && g_strcmp0 (priv->ip_iface, info->name)) {
_LOGI (LOGD_DEVICE, "interface index %d renamed ip_iface (%d) from '%s' to '%s'",
priv->ifindex, nm_device_get_ip_ifindex (self),
priv->ip_iface, info->name);
g_free (priv->ip_iface);
priv->ip_iface = g_strdup (info->name);
g_object_notify (G_OBJECT (self), NM_DEVICE_IP_IFACE);
update_for_ip_ifname_change (self);
}
}
static void
link_changed_cb (NMPlatform *platform,
int ifindex,
NMPlatformLink *info,
NMPlatformSignalChangeType change_type,
NMPlatformReason reason,
NMDevice *self)
{
if (change_type != NM_PLATFORM_SIGNAL_CHANGED)
return;
/* We don't filter by 'reason' because we are interested in *all* link
* changes. For example a call to nm_platform_link_set_up() may result
* in an internal carrier change (i.e. we ask the kernel to set IFF_UP
* and it results in also setting IFF_LOWER_UP.
*/
if (ifindex == nm_device_get_ifindex (self))
device_link_changed (self, info);
else if (ifindex == nm_device_get_ip_ifindex (self))
device_ip_link_changed (self, info);
}
static void
link_changed (NMDevice *self, NMPlatformLink *info)
{
/* Update carrier from link event if applicable. */
if ( device_has_capability (self, NM_DEVICE_CAP_CARRIER_DETECT)
&& !device_has_capability (self, NM_DEVICE_CAP_NONSTANDARD_CARRIER))
nm_device_set_carrier (self, info->connected);
}
/**
* nm_device_notify_component_added():
* @self: the #NMDevice
* @component: the component being added by a plugin
*
* Called by the manager to notify the device that a new component has
* been found. The device implementation should return %TRUE if it
* wishes to claim the component, or %FALSE if it cannot.
*
* Returns: %TRUE to claim the component, %FALSE if the component cannot be
* claimed.
*/
gboolean
nm_device_notify_component_added (NMDevice *self, GObject *component)
{
if (NM_DEVICE_GET_CLASS (self)->component_added)
return NM_DEVICE_GET_CLASS (self)->component_added (self, component);
return FALSE;
}
/**
* nm_device_owns_iface():
* @self: the #NMDevice
* @iface: an interface name
*
* Called by the manager to ask if the device or any of its components owns
* @iface. For example, a WWAN implementation would return %TRUE for an
* ethernet interface name that was owned by the WWAN device's modem component,
* because that ethernet interface is controlled by the WWAN device and cannot
* be used independently of the WWAN device.
*
* Returns: %TRUE if @self or it's components owns the interface name,
* %FALSE if not
*/
gboolean
nm_device_owns_iface (NMDevice *self, const char *iface)
{
if (NM_DEVICE_GET_CLASS (self)->owns_iface)
return NM_DEVICE_GET_CLASS (self)->owns_iface (self, iface);
return FALSE;
}
NMConnection *
nm_device_new_default_connection (NMDevice *self)
{
if (NM_DEVICE_GET_CLASS (self)->new_default_connection)
return NM_DEVICE_GET_CLASS (self)->new_default_connection (self);
return NULL;
}
static void
slave_state_changed (NMDevice *slave,
NMDeviceState slave_new_state,
NMDeviceState slave_old_state,
NMDeviceStateReason reason,
NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
gboolean release = FALSE;
_LOGD (LOGD_DEVICE, "slave %s state change %d (%s) -> %d (%s)",
nm_device_get_iface (slave),
slave_old_state,
state_to_string (slave_old_state),
slave_new_state,
state_to_string (slave_new_state));
/* Don't try to enslave slaves until the master is ready */
if (priv->state < NM_DEVICE_STATE_CONFIG)
return;
if (slave_new_state == NM_DEVICE_STATE_IP_CONFIG)
nm_device_enslave_slave (self, slave, nm_device_get_connection (slave));
else if (slave_new_state > NM_DEVICE_STATE_ACTIVATED)
release = TRUE;
else if ( slave_new_state <= NM_DEVICE_STATE_DISCONNECTED
&& slave_old_state > NM_DEVICE_STATE_DISCONNECTED) {
/* Catch failures due to unavailable or unmanaged */
release = TRUE;
}
if (release) {
nm_device_release_one_slave (self, slave, TRUE, reason);
/* Bridge/bond/team interfaces are left up until manually deactivated */
if (priv->slaves == NULL && priv->state == NM_DEVICE_STATE_ACTIVATED)
_LOGD (LOGD_DEVICE, "last slave removed; remaining activated");
}
}
/**
* nm_device_master_add_slave:
* @self: the master device
* @slave: the slave device to enslave
* @configure: pass %TRUE if the slave should be configured by the master, or
* %FALSE if it is already configured outside NetworkManager
*
* If @self is capable of enslaving other devices (ie it's a bridge, bond, team,
* etc) then this function adds @slave to the slave list for later enslavement.
*
* Returns: %TRUE on success, %FALSE on failure
*/
static gboolean
nm_device_master_add_slave (NMDevice *self, NMDevice *slave, gboolean configure)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
SlaveInfo *info;
g_return_val_if_fail (self != NULL, FALSE);
g_return_val_if_fail (slave != NULL, FALSE);
g_return_val_if_fail (NM_DEVICE_GET_CLASS (self)->enslave_slave != NULL, FALSE);
if (configure)
g_return_val_if_fail (nm_device_get_state (slave) >= NM_DEVICE_STATE_DISCONNECTED, FALSE);
if (!find_slave_info (self, slave)) {
info = g_malloc0 (sizeof (SlaveInfo));
info->slave = g_object_ref (slave);
info->configure = configure;
info->watch_id = g_signal_connect (slave, "state-changed",
G_CALLBACK (slave_state_changed), self);
priv->slaves = g_slist_append (priv->slaves, info);
}
nm_device_queue_recheck_assume (self);
return TRUE;
}
/**
* nm_device_master_get_slaves:
* @self: the master device
*
* Returns: any slaves of which @self is the master. Caller owns returned list.
*/
GSList *
nm_device_master_get_slaves (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
GSList *slaves = NULL, *iter;
for (iter = priv->slaves; iter; iter = g_slist_next (iter))
slaves = g_slist_prepend (slaves, ((SlaveInfo *) iter->data)->slave);
return slaves;
}
/**
* nm_device_master_get_slave_by_ifindex:
* @self: the master device
* @ifindex: the slave's interface index
*
* Returns: the slave with the given @ifindex of which @self is the master,
* or %NULL if no device with @ifindex is a slave of @self.
*/
NMDevice *
nm_device_master_get_slave_by_ifindex (NMDevice *self, int ifindex)
{
GSList *iter;
for (iter = NM_DEVICE_GET_PRIVATE (self)->slaves; iter; iter = g_slist_next (iter)) {
SlaveInfo *info = iter->data;
if (nm_device_get_ip_ifindex (info->slave) == ifindex)
return info->slave;
}
return NULL;
}
/**
* nm_device_master_check_slave_physical_port:
* @self: the master device
* @slave: a slave device
* @log_domain: domain to log a warning in
*
* Checks if @self already has a slave with the same #NMDevice:physical-port-id
* as @slave, and logs a warning if so.
*/
void
nm_device_master_check_slave_physical_port (NMDevice *self, NMDevice *slave,
guint64 log_domain)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
const char *slave_physical_port_id, *existing_physical_port_id;
SlaveInfo *info;
GSList *iter;
slave_physical_port_id = nm_device_get_physical_port_id (slave);
if (!slave_physical_port_id)
return;
for (iter = priv->slaves; iter; iter = iter->next) {
info = iter->data;
if (info->slave == slave)
continue;
existing_physical_port_id = nm_device_get_physical_port_id (info->slave);
if (!g_strcmp0 (slave_physical_port_id, existing_physical_port_id)) {
_LOGW (log_domain, "slave %s shares a physical port with existing slave %s",
nm_device_get_ip_iface (slave),
nm_device_get_ip_iface (info->slave));
/* Since this function will get called for every slave, we only have
* to warn about the first match we find; if there are other matches
* later in the list, we will have already warned about them matching
* @existing earlier.
*/
return;
}
}
}
/* release all slaves */
static void
nm_device_master_release_slaves (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMDeviceStateReason reason;
/* Don't release the slaves if this connection doesn't belong to NM. */
if (nm_device_uses_generated_assumed_connection (self))
return;
reason = priv->state_reason;
if (priv->state == NM_DEVICE_STATE_FAILED)
reason = NM_DEVICE_STATE_REASON_DEPENDENCY_FAILED;
while (priv->slaves) {
SlaveInfo *info = priv->slaves->data;
nm_device_release_one_slave (self, info->slave, TRUE, reason);
}
}
/**
* nm_device_get_master:
* @self: the device
*
* If @self has been enslaved by another device, this returns that
* device. Otherwise it returns %NULL. (In particular, note that if
* @self is in the process of activating as a slave, but has not yet
* been enslaved by its master, this will return %NULL.)
*
* Returns: (transfer none): @self's master, or %NULL
*/
NMDevice *
nm_device_get_master (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->enslaved)
return priv->master;
else
return NULL;
}
/**
* nm_device_slave_notify_enslave:
* @self: the slave device
* @success: whether the enslaving operation succeeded
*
* Notifies a slave that either it has been enslaved, or else its master tried
* to enslave it and failed.
*/
static void
nm_device_slave_notify_enslave (NMDevice *self, gboolean success)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection = nm_device_get_connection (self);
gboolean activating = (priv->state == NM_DEVICE_STATE_IP_CONFIG);
g_assert (priv->master);
if (!priv->enslaved) {
if (success) {
if (activating) {
_LOGI (LOGD_DEVICE, "Activation: connection '%s' enslaved, continuing activation",
nm_connection_get_id (connection));
} else
_LOGI (LOGD_DEVICE, "enslaved to %s", nm_device_get_iface (priv->master));
priv->enslaved = TRUE;
g_object_notify (G_OBJECT (self), NM_DEVICE_MASTER);
} else if (activating) {
_LOGW (LOGD_DEVICE, "Activation: connection '%s' could not be enslaved",
nm_connection_get_id (connection));
}
}
if (activating) {
priv->ip4_state = IP_DONE;
priv->ip6_state = IP_DONE;
nm_device_queue_state (self,
success ? NM_DEVICE_STATE_SECONDARIES : NM_DEVICE_STATE_FAILED,
NM_DEVICE_STATE_REASON_NONE);
} else
nm_device_queue_recheck_assume (self);
}
/**
* nm_device_slave_notify_release:
* @self: the slave device
* @reason: the reason associated with the state change
*
* Notifies a slave that it has been released, and why.
*/
static void
nm_device_slave_notify_release (NMDevice *self, NMDeviceStateReason reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection = nm_device_get_connection (self);
NMDeviceState new_state;
const char *master_status;
if ( reason != NM_DEVICE_STATE_REASON_NONE
&& priv->state > NM_DEVICE_STATE_DISCONNECTED
&& priv->state <= NM_DEVICE_STATE_ACTIVATED) {
if (reason == NM_DEVICE_STATE_REASON_DEPENDENCY_FAILED) {
new_state = NM_DEVICE_STATE_FAILED;
master_status = "failed";
} else if (reason == NM_DEVICE_STATE_REASON_USER_REQUESTED) {
new_state = NM_DEVICE_STATE_DEACTIVATING;
master_status = "deactivated by user request";
} else {
new_state = NM_DEVICE_STATE_DISCONNECTED;
master_status = "deactivated";
}
_LOGD (LOGD_DEVICE, "Activation: connection '%s' master %s",
nm_connection_get_id (connection),
master_status);
nm_device_queue_state (self, new_state, reason);
} else if (priv->master)
_LOGI (LOGD_DEVICE, "released from master %s", nm_device_get_iface (priv->master));
else
_LOGD (LOGD_DEVICE, "released from master%s", priv->enslaved ? "" : " (was not enslaved)");
if (priv->enslaved) {
priv->enslaved = FALSE;
g_object_notify (G_OBJECT (self), NM_DEVICE_MASTER);
}
}
/**
* nm_device_get_enslaved:
* @self: the #NMDevice
*
* Returns: %TRUE if the device is enslaved to a master device (eg bridge or
* bond or team), %FALSE if not
*/
gboolean
nm_device_get_enslaved (NMDevice *self)
{
return NM_DEVICE_GET_PRIVATE (self)->enslaved;
}
/**
* nm_device_removed:
* @self: the #NMDevice
*
* Called by the manager when the device was removed. Releases the device from
* the master in case it's enslaved.
*/
void
nm_device_removed (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->enslaved)
nm_device_release_one_slave (priv->master, self, FALSE, NM_DEVICE_STATE_REASON_REMOVED);
}
static gboolean
is_available (NMDevice *self, NMDeviceCheckDevAvailableFlags flags)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->carrier || priv->ignore_carrier)
return TRUE;
if (NM_FLAGS_HAS (flags, NM_DEVICE_CHECK_DEV_AVAILABLE_IGNORE_CARRIER))
return TRUE;
return FALSE;
}
/**
* nm_device_is_available:
* @self: the #NMDevice
* @flags: additional flags to influence the check. Flags have the
* meaning to increase the availability of a device.
*
* Checks if @self would currently be capable of activating a
* connection. In particular, it checks that the device is ready (eg,
* is not missing firmware), that it has carrier (if necessary), and
* that any necessary external software (eg, ModemManager,
* wpa_supplicant) is available.
*
* @self can only be in a state higher than
* %NM_DEVICE_STATE_UNAVAILABLE when nm_device_is_available() returns
* %TRUE. (But note that it can still be %NM_DEVICE_STATE_UNMANAGED
* when it is available.)
*
* Returns: %TRUE or %FALSE
*/
gboolean
nm_device_is_available (NMDevice *self, NMDeviceCheckDevAvailableFlags flags)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->firmware_missing)
return FALSE;
return NM_DEVICE_GET_CLASS (self)->is_available (self, flags);
}
gboolean
nm_device_get_enabled (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
if (NM_DEVICE_GET_CLASS (self)->get_enabled)
return NM_DEVICE_GET_CLASS (self)->get_enabled (self);
return TRUE;
}
void
nm_device_set_enabled (NMDevice *self, gboolean enabled)
{
g_return_if_fail (NM_IS_DEVICE (self));
if (NM_DEVICE_GET_CLASS (self)->set_enabled)
NM_DEVICE_GET_CLASS (self)->set_enabled (self, enabled);
}
/**
* nm_device_get_autoconnect:
* @self: the #NMDevice
*
* Returns: %TRUE if the device allows autoconnect connections, or %FALSE if the
* device is explicitly blocking all autoconnect connections. Does not take
* into account transient conditions like companion devices that may wish to
* block the device.
*/
gboolean
nm_device_get_autoconnect (NMDevice *self)
{
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
return NM_DEVICE_GET_PRIVATE (self)->autoconnect;
}
static void
nm_device_set_autoconnect (NMDevice *self, gboolean autoconnect)
{
NMDevicePrivate *priv;
g_return_if_fail (NM_IS_DEVICE (self));
priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->autoconnect == autoconnect)
return;
if (autoconnect) {
/* Default-unmanaged devices never autoconnect */
if (!nm_device_get_default_unmanaged (self)) {
priv->autoconnect = TRUE;
g_object_notify (G_OBJECT (self), NM_DEVICE_AUTOCONNECT);
}
} else {
priv->autoconnect = FALSE;
g_object_notify (G_OBJECT (self), NM_DEVICE_AUTOCONNECT);
}
}
static gboolean
autoconnect_allowed_accumulator (GSignalInvocationHint *ihint,
GValue *return_accu,
const GValue *handler_return, gpointer data)
{
if (!g_value_get_boolean (handler_return))
g_value_set_boolean (return_accu, FALSE);
return TRUE;
}
/**
* nm_device_autoconnect_allowed:
* @self: the #NMDevice
*
* Returns: %TRUE if the device can be auto-connected immediately, taking
* transient conditions into account (like companion devices that may wish to
* block autoconnect for a time).
*/
gboolean
nm_device_autoconnect_allowed (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
GValue instance = G_VALUE_INIT;
GValue retval = G_VALUE_INIT;
if (priv->state < NM_DEVICE_STATE_DISCONNECTED || !priv->autoconnect)
return FALSE;
/* The 'autoconnect-allowed' signal is emitted on a device to allow
* other listeners to block autoconnect on the device if they wish.
* This is mainly used by the OLPC Mesh devices to block autoconnect
* on their companion WiFi device as they share radio resources and
* cannot be connected at the same time.
*/
g_value_init (&instance, G_TYPE_OBJECT);
g_value_set_object (&instance, self);
g_value_init (&retval, G_TYPE_BOOLEAN);
if (priv->autoconnect)
g_value_set_boolean (&retval, TRUE);
else
g_value_set_boolean (&retval, FALSE);
/* Use g_signal_emitv() rather than g_signal_emit() to avoid the return
* value being changed if no handlers are connected */
g_signal_emitv (&instance, signals[AUTOCONNECT_ALLOWED], 0, &retval);
g_value_unset (&instance);
return g_value_get_boolean (&retval);
}
static gboolean
can_auto_connect (NMDevice *self,
NMConnection *connection,
char **specific_object)
{
NMSettingConnection *s_con;
s_con = nm_connection_get_setting_connection (connection);
if (!nm_setting_connection_get_autoconnect (s_con))
return FALSE;
return nm_device_check_connection_available (self, connection, NM_DEVICE_CHECK_CON_AVAILABLE_NONE, NULL);
}
/**
* nm_device_can_auto_connect:
* @self: an #NMDevice
* @connection: a #NMConnection
* @specific_object: (out) (transfer full): on output, the path of an
* object associated with the returned connection, to be passed to
* nm_manager_activate_connection(), or %NULL.
*
* Checks if @connection can be auto-activated on @self right now.
* This requires, at a minimum, that the connection be compatible with
* @self, and that it have the #NMSettingConnection:autoconnect property
* set, and that the device allow auto connections. Some devices impose
* additional requirements. (Eg, a Wi-Fi connection can only be activated
* if its SSID was seen in the last scan.)
*
* Returns: %TRUE, if the @connection can be auto-activated.
**/
gboolean
nm_device_can_auto_connect (NMDevice *self,
NMConnection *connection,
char **specific_object)
{
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
g_return_val_if_fail (NM_IS_CONNECTION (connection), FALSE);
g_return_val_if_fail (specific_object && !*specific_object, FALSE);
if (nm_device_autoconnect_allowed (self))
return NM_DEVICE_GET_CLASS (self)->can_auto_connect (self, connection, specific_object);
return FALSE;
}
static gboolean
device_has_config (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
/* Check for IP configuration. */
if (priv->ip4_config && nm_ip4_config_get_num_addresses (priv->ip4_config))
return TRUE;
if (priv->ip6_config && nm_ip6_config_get_num_addresses (priv->ip6_config))
return TRUE;
/* The existence of a software device is good enough. */
if (nm_device_is_software (self))
return TRUE;
/* Slaves are also configured by definition */
if (nm_platform_link_get_master (priv->ifindex) > 0)
return TRUE;
return FALSE;
}
/**
* nm_device_master_update_slave_connection:
* @self: the master #NMDevice
* @slave: the slave #NMDevice
* @connection: the #NMConnection to update with the slave settings
* @GError: (out): error description
*
* Reads the slave configuration for @slave and updates @connection with those
* properties. This invokes a virtual function on the master device @self.
*
* Returns: %TRUE if the configuration was read and @connection updated,
* %FALSE on failure.
*/
gboolean
nm_device_master_update_slave_connection (NMDevice *self,
NMDevice *slave,
NMConnection *connection,
GError **error)
{
NMDeviceClass *klass;
gboolean success;
g_return_val_if_fail (self, FALSE);
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
g_return_val_if_fail (slave, FALSE);
g_return_val_if_fail (connection, FALSE);
g_return_val_if_fail (!error || !*error, FALSE);
g_return_val_if_fail (nm_connection_get_setting_connection (connection), FALSE);
g_return_val_if_fail (nm_device_get_iface (self), FALSE);
klass = NM_DEVICE_GET_CLASS (self);
if (klass->master_update_slave_connection) {
success = klass->master_update_slave_connection (self, slave, connection, error);
g_return_val_if_fail (!error || (success && !*error) || *error, success);
return success;
}
g_set_error (error,
NM_DEVICE_ERROR,
NM_DEVICE_ERROR_FAILED,
"master device '%s' cannot update a slave connection for slave device '%s' (master type not supported?)",
nm_device_get_iface (self), nm_device_get_iface (slave));
return FALSE;
}
NMConnection *
nm_device_generate_connection (NMDevice *self, NMDevice *master)
{
NMDeviceClass *klass = NM_DEVICE_GET_CLASS (self);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
const char *ifname = nm_device_get_iface (self);
NMConnection *connection;
NMSetting *s_con;
NMSetting *s_ip4;
NMSetting *s_ip6;
gs_free char *uuid = NULL;
const char *ip4_method, *ip6_method;
GError *error = NULL;
/* If update_connection() is not implemented, just fail. */
if (!klass->update_connection)
return NULL;
/* Return NULL if device is unconfigured. */
if (!device_has_config (self)) {
_LOGD (LOGD_DEVICE, "device has no existing configuration");
return NULL;
}
connection = nm_simple_connection_new ();
s_con = nm_setting_connection_new ();
uuid = nm_utils_uuid_generate ();
g_object_set (s_con,
NM_SETTING_CONNECTION_UUID, uuid,
NM_SETTING_CONNECTION_ID, ifname,
NM_SETTING_CONNECTION_AUTOCONNECT, FALSE,
NM_SETTING_CONNECTION_INTERFACE_NAME, ifname,
NM_SETTING_CONNECTION_TIMESTAMP, (guint64) time (NULL),
NULL);
if (klass->connection_type)
g_object_set (s_con, NM_SETTING_CONNECTION_TYPE, klass->connection_type, NULL);
nm_connection_add_setting (connection, s_con);
/* If the device is a slave, update various slave settings */
if (master) {
if (!nm_device_master_update_slave_connection (master,
self,
connection,
&error))
{
_LOGE (LOGD_DEVICE, "master device '%s' failed to update slave connection: %s",
nm_device_get_iface (master), error ? error->message : "(unknown error)");
g_error_free (error);
g_object_unref (connection);
return NULL;
}
} else {
/* Only regular and master devices get IP configuration; slaves do not */
s_ip4 = nm_ip4_config_create_setting (priv->ip4_config);
nm_connection_add_setting (connection, s_ip4);
s_ip6 = nm_ip6_config_create_setting (priv->ip6_config);
nm_connection_add_setting (connection, s_ip6);
}
klass->update_connection (self, connection);
/* Check the connection in case of update_connection() bug. */
if (!nm_connection_verify (connection, &error)) {
_LOGE (LOGD_DEVICE, "Generated connection does not verify: %s", error->message);
g_clear_error (&error);
g_object_unref (connection);
return NULL;
}
/* Ignore the connection if it has no IP configuration,
* no slave configuration, and is not a master interface.
*/
ip4_method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
ip6_method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG);
if ( g_strcmp0 (ip4_method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED) == 0
&& g_strcmp0 (ip6_method, NM_SETTING_IP6_CONFIG_METHOD_IGNORE) == 0
&& !nm_setting_connection_get_master (NM_SETTING_CONNECTION (s_con))
&& !priv->slaves) {
_LOGD (LOGD_DEVICE, "ignoring generated connection (no IP and not in master-slave relationship)");
g_object_unref (connection);
connection = NULL;
}
return connection;
}
gboolean
nm_device_complete_connection (NMDevice *self,
NMConnection *connection,
const char *specific_object,
const GSList *existing_connections,
GError **error)
{
gboolean success = FALSE;
g_return_val_if_fail (self != NULL, FALSE);
g_return_val_if_fail (connection != NULL, FALSE);
if (!NM_DEVICE_GET_CLASS (self)->complete_connection) {
g_set_error (error, NM_DEVICE_ERROR, NM_DEVICE_ERROR_INVALID_CONNECTION,
"Device class %s had no complete_connection method",
G_OBJECT_TYPE_NAME (self));
return FALSE;
}
success = NM_DEVICE_GET_CLASS (self)->complete_connection (self,
connection,
specific_object,
existing_connections,
error);
if (success)
success = nm_connection_verify (connection, error);
return success;
}
static gboolean
check_connection_compatible (NMDevice *self, NMConnection *connection)
{
NMSettingConnection *s_con;
const char *config_iface, *device_iface;
s_con = nm_connection_get_setting_connection (connection);
g_assert (s_con);
config_iface = nm_setting_connection_get_interface_name (s_con);
device_iface = nm_device_get_iface (self);
if (config_iface && strcmp (config_iface, device_iface) != 0)
return FALSE;
return TRUE;
}
/**
* nm_device_check_connection_compatible:
* @self: an #NMDevice
* @connection: an #NMConnection
*
* Checks if @connection could potentially be activated on @self.
* This means only that @self has the proper capabilities, and that
* @connection is not locked to some other device. It does not
* necessarily mean that @connection could be activated on @self
* right now. (Eg, it might refer to a Wi-Fi network that is not
* currently available.)
*
* Returns: #TRUE if @connection could potentially be activated on
* @self.
*/
gboolean
nm_device_check_connection_compatible (NMDevice *self, NMConnection *connection)
{
g_return_val_if_fail (NM_IS_DEVICE (self), FALSE);
g_return_val_if_fail (NM_IS_CONNECTION (connection), FALSE);
return NM_DEVICE_GET_CLASS (self)->check_connection_compatible (self, connection);
}
/**
* nm_device_can_assume_connections:
* @self: #NMDevice instance
*
* This is a convenience function to determine whether connection assumption
* is available for this device.
*
* Returns: %TRUE if the device is capable of assuming connections, %FALSE if not
*/
static gboolean
nm_device_can_assume_connections (NMDevice *self)
{
return !!NM_DEVICE_GET_CLASS (self)->update_connection;
}
/**
* nm_device_can_assume_active_connection:
* @self: #NMDevice instance
*
* This is a convenience function to determine whether the device's active
* connection can be assumed if NetworkManager restarts. This method returns
* %TRUE if and only if the device can assume connections, and the device has
* an active connection, and that active connection can be assumed.
*
* Returns: %TRUE if the device's active connection can be assumed, or %FALSE
* if there is no active connection or the active connection cannot be
* assumed.
*/
gboolean
nm_device_can_assume_active_connection (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
const char *method;
const char *assumable_ip6_methods[] = {
NM_SETTING_IP6_CONFIG_METHOD_IGNORE,
NM_SETTING_IP6_CONFIG_METHOD_AUTO,
NM_SETTING_IP6_CONFIG_METHOD_DHCP,
NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL,
NM_SETTING_IP6_CONFIG_METHOD_MANUAL,
NULL
};
const char *assumable_ip4_methods[] = {
NM_SETTING_IP4_CONFIG_METHOD_DISABLED,
NM_SETTING_IP6_CONFIG_METHOD_AUTO,
NM_SETTING_IP6_CONFIG_METHOD_MANUAL,
NULL
};
if (!nm_device_can_assume_connections (self))
return FALSE;
connection = nm_device_get_connection (self);
if (!connection)
return FALSE;
/* Can't assume connections that aren't yet configured
* FIXME: what about bridges/bonds waiting for slaves?
*/
if (priv->state < NM_DEVICE_STATE_IP_CONFIG)
return FALSE;
if (priv->ip4_state != IP_DONE && priv->ip6_state != IP_DONE)
return FALSE;
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG);
if (!_nm_utils_string_in_list (method, assumable_ip6_methods))
return FALSE;
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
if (!_nm_utils_string_in_list (method, assumable_ip4_methods))
return FALSE;
return TRUE;
}
static gboolean
nm_device_emit_recheck_assume (gpointer self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
priv->recheck_assume_id = 0;
if (!nm_device_get_act_request (self)) {
_LOGD (LOGD_DEVICE, "emit RECHECK_ASSUME signal");
g_signal_emit (self, signals[RECHECK_ASSUME], 0);
}
return G_SOURCE_REMOVE;
}
void
nm_device_queue_recheck_assume (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (nm_device_can_assume_connections (self) && !priv->recheck_assume_id)
priv->recheck_assume_id = g_idle_add (nm_device_emit_recheck_assume, self);
}
void
nm_device_emit_recheck_auto_activate (NMDevice *self)
{
g_signal_emit (self, signals[RECHECK_AUTO_ACTIVATE], 0);
}
static void
dnsmasq_state_changed_cb (NMDnsMasqManager *manager, guint32 status, gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
switch (status) {
case NM_DNSMASQ_STATUS_DEAD:
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_SHARED_START_FAILED);
break;
default:
break;
}
}
static void
activation_source_clear (NMDevice *self, gboolean remove_source, int family)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
guint *act_source_id;
gpointer *act_source_func;
if (family == AF_INET6) {
act_source_id = &priv->act_source6_id;
act_source_func = &priv->act_source6_func;
} else {
act_source_id = &priv->act_source_id;
act_source_func = &priv->act_source_func;
}
if (*act_source_id) {
if (remove_source)
g_source_remove (*act_source_id);
*act_source_id = 0;
*act_source_func = NULL;
}
}
static void
activation_source_schedule (NMDevice *self, GSourceFunc func, int family)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
guint *act_source_id;
gpointer *act_source_func;
if (family == AF_INET6) {
act_source_id = &priv->act_source6_id;
act_source_func = &priv->act_source6_func;
} else {
act_source_id = &priv->act_source_id;
act_source_func = &priv->act_source_func;
}
if (*act_source_id)
_LOGE (LOGD_DEVICE, "activation stage already scheduled");
/* Don't bother rescheduling the same function that's about to
* run anyway. Fixes issues with crappy wireless drivers sending
* streams of associate events before NM has had a chance to process
* the first one.
*/
if (!*act_source_id || (*act_source_func != func)) {
activation_source_clear (self, TRUE, family);
*act_source_id = g_idle_add (func, self);
*act_source_func = func;
}
}
static gboolean
get_ip_config_may_fail (NMDevice *self, int family)
{
NMConnection *connection;
NMSettingIPConfig *s_ip = NULL;
g_return_val_if_fail (self != NULL, TRUE);
connection = nm_device_get_connection (self);
g_assert (connection);
/* Fail the connection if the failed IP method is required to complete */
switch (family) {
case AF_INET:
s_ip = nm_connection_get_setting_ip4_config (connection);
break;
case AF_INET6:
s_ip = nm_connection_get_setting_ip6_config (connection);
break;
default:
g_assert_not_reached ();
}
return nm_setting_ip_config_get_may_fail (s_ip);
}
static void
master_ready_cb (NMActiveConnection *active,
GParamSpec *pspec,
NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMActiveConnection *master;
g_assert (priv->state == NM_DEVICE_STATE_PREPARE);
/* Notify a master device that it has a new slave */
g_assert (nm_active_connection_get_master_ready (active));
master = nm_active_connection_get_master (active);
priv->master = g_object_ref (nm_active_connection_get_device (master));
nm_device_master_add_slave (priv->master,
self,
nm_active_connection_get_assumed (active) ? FALSE : TRUE);
_LOGD (LOGD_DEVICE, "master connection ready; master device %s",
nm_device_get_iface (priv->master));
if (priv->master_ready_id) {
g_signal_handler_disconnect (active, priv->master_ready_id);
priv->master_ready_id = 0;
}
nm_device_activate_schedule_stage2_device_config (self);
}
static NMActStageReturn
act_stage1_prepare (NMDevice *self, NMDeviceStateReason *reason)
{
return NM_ACT_STAGE_RETURN_SUCCESS;
}
/*
* nm_device_activate_stage1_device_prepare
*
* Prepare for device activation
*
*/
static gboolean
nm_device_activate_stage1_device_prepare (gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMActStageReturn ret = NM_ACT_STAGE_RETURN_SUCCESS;
NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE;
NMActiveConnection *active = NM_ACTIVE_CONNECTION (priv->act_request);
/* Clear the activation source ID now that this stage has run */
activation_source_clear (self, FALSE, 0);
priv->ip4_state = priv->ip6_state = IP_NONE;
/* Notify the new ActiveConnection along with the state change */
g_object_notify (G_OBJECT (self), NM_DEVICE_ACTIVE_CONNECTION);
_LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) started...");
nm_device_state_changed (self, NM_DEVICE_STATE_PREPARE, NM_DEVICE_STATE_REASON_NONE);
/* Assumed connections were already set up outside NetworkManager */
if (!nm_active_connection_get_assumed (active)) {
ret = NM_DEVICE_GET_CLASS (self)->act_stage1_prepare (self, &reason);
if (ret == NM_ACT_STAGE_RETURN_POSTPONE) {
goto out;
} else if (ret == NM_ACT_STAGE_RETURN_FAILURE) {
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason);
goto out;
}
g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS);
}
if (nm_active_connection_get_master (active)) {
/* If the master connection is ready for slaves, attach ourselves */
if (nm_active_connection_get_master_ready (active))
master_ready_cb (active, NULL, self);
else {
_LOGD (LOGD_DEVICE, "waiting for master connection to become ready");
/* Attach a signal handler and wait for the master connection to begin activating */
g_assert (priv->master_ready_id == 0);
priv->master_ready_id = g_signal_connect (active,
"notify::" NM_ACTIVE_CONNECTION_INT_MASTER_READY,
(GCallback) master_ready_cb,
self);
/* Postpone */
}
} else
nm_device_activate_schedule_stage2_device_config (self);
out:
_LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) complete.");
return FALSE;
}
/*
* nm_device_activate_schedule_stage1_device_prepare
*
* Prepare a device for activation
*
*/
void
nm_device_activate_schedule_stage1_device_prepare (NMDevice *self)
{
NMDevicePrivate *priv;
g_return_if_fail (NM_IS_DEVICE (self));
priv = NM_DEVICE_GET_PRIVATE (self);
g_return_if_fail (priv->act_request);
activation_source_schedule (self, nm_device_activate_stage1_device_prepare, 0);
_LOGI (LOGD_DEVICE, "Activation: Stage 1 of 5 (Device Prepare) scheduled...");
}
static NMActStageReturn
act_stage2_config (NMDevice *self, NMDeviceStateReason *reason)
{
/* Nothing to do */
return NM_ACT_STAGE_RETURN_SUCCESS;
}
/*
* nm_device_activate_stage2_device_config
*
* Determine device parameters and set those on the device, ie
* for wireless devices, set SSID, keys, etc.
*
*/
static gboolean
nm_device_activate_stage2_device_config (gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMActStageReturn ret;
NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE;
gboolean no_firmware = FALSE;
NMActiveConnection *active = NM_ACTIVE_CONNECTION (priv->act_request);
GSList *iter;
/* Clear the activation source ID now that this stage has run */
activation_source_clear (self, FALSE, 0);
_LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) starting...");
nm_device_state_changed (self, NM_DEVICE_STATE_CONFIG, NM_DEVICE_STATE_REASON_NONE);
/* Assumed connections were already set up outside NetworkManager */
if (!nm_active_connection_get_assumed (active)) {
if (!nm_device_bring_up (self, FALSE, &no_firmware)) {
if (no_firmware)
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_FIRMWARE_MISSING);
else
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_CONFIG_FAILED);
goto out;
}
ret = NM_DEVICE_GET_CLASS (self)->act_stage2_config (self, &reason);
if (ret == NM_ACT_STAGE_RETURN_POSTPONE)
goto out;
else if (ret == NM_ACT_STAGE_RETURN_FAILURE) {
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason);
goto out;
}
g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS);
}
/* If we have slaves that aren't yet enslaved, do that now */
for (iter = priv->slaves; iter; iter = g_slist_next (iter)) {
SlaveInfo *info = iter->data;
NMDeviceState slave_state = nm_device_get_state (info->slave);
if (slave_state == NM_DEVICE_STATE_IP_CONFIG)
nm_device_enslave_slave (self, info->slave, nm_device_get_connection (info->slave));
else if ( nm_device_uses_generated_assumed_connection (self)
&& slave_state <= NM_DEVICE_STATE_DISCONNECTED)
nm_device_queue_recheck_assume (info->slave);
}
_LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) successful.");
nm_device_activate_schedule_stage3_ip_config_start (self);
out:
_LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) complete.");
return FALSE;
}
/*
* nm_device_activate_schedule_stage2_device_config
*
* Schedule setup of the hardware device
*
*/
void
nm_device_activate_schedule_stage2_device_config (NMDevice *self)
{
NMDevicePrivate *priv;
g_return_if_fail (NM_IS_DEVICE (self));
priv = NM_DEVICE_GET_PRIVATE (self);
g_return_if_fail (priv->act_request);
activation_source_schedule (self, nm_device_activate_stage2_device_config, 0);
_LOGI (LOGD_DEVICE, "Activation: Stage 2 of 5 (Device Configure) scheduled...");
}
/*********************************************/
/* avahi-autoipd stuff */
static void
aipd_timeout_remove (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->aipd_timeout) {
g_source_remove (priv->aipd_timeout);
priv->aipd_timeout = 0;
}
}
static void
aipd_cleanup (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->aipd_watch) {
g_source_remove (priv->aipd_watch);
priv->aipd_watch = 0;
}
if (priv->aipd_pid > 0) {
nm_utils_kill_child_sync (priv->aipd_pid, SIGKILL, LOGD_AUTOIP4, "avahi-autoipd", NULL, 0, 0);
priv->aipd_pid = -1;
}
aipd_timeout_remove (self);
}
static NMIP4Config *
aipd_get_ip4_config (NMDevice *self, guint32 lla)
{
NMIP4Config *config = NULL;
NMPlatformIP4Address address;
NMPlatformIP4Route route;
config = nm_ip4_config_new (nm_device_get_ip_ifindex (self));
g_assert (config);
memset (&address, 0, sizeof (address));
address.address = lla;
address.plen = 16;
address.source = NM_IP_CONFIG_SOURCE_IP4LL;
nm_ip4_config_add_address (config, &address);
/* Add a multicast route for link-local connections: destination= 224.0.0.0, netmask=240.0.0.0 */
memset (&route, 0, sizeof (route));
route.network = htonl (0xE0000000L);
route.plen = 4;
route.source = NM_IP_CONFIG_SOURCE_IP4LL;
route.metric = nm_device_get_ip4_route_metric (self);
nm_ip4_config_add_route (config, &route);
return config;
}
#define IPV4LL_NETWORK (htonl (0xA9FE0000L))
#define IPV4LL_NETMASK (htonl (0xFFFF0000L))
void
nm_device_handle_autoip4_event (NMDevice *self,
const char *event,
const char *address)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection = NULL;
const char *method;
NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE;
g_return_if_fail (event != NULL);
if (priv->act_request == NULL)
return;
connection = nm_act_request_get_connection (priv->act_request);
g_assert (connection);
/* Ignore if the connection isn't an AutoIP connection */
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
if (g_strcmp0 (method, NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL) != 0)
return;
if (strcmp (event, "BIND") == 0) {
guint32 lla;
NMIP4Config *config;
if (inet_pton (AF_INET, address, &lla) <= 0) {
_LOGE (LOGD_AUTOIP4, "invalid address %s received from avahi-autoipd.", address);
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_ERROR);
return;
}
if ((lla & IPV4LL_NETMASK) != IPV4LL_NETWORK) {
_LOGE (LOGD_AUTOIP4, "invalid address %s received from avahi-autoipd (not link-local).", address);
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_ERROR);
return;
}
config = aipd_get_ip4_config (self, lla);
if (config == NULL) {
_LOGE (LOGD_AUTOIP4, "failed to get autoip config");
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE);
return;
}
if (priv->ip4_state == IP_CONF) {
aipd_timeout_remove (self);
nm_device_activate_schedule_ip4_config_result (self, config);
} else if (priv->ip4_state == IP_DONE) {
if (!ip4_config_merge_and_apply (self, config, TRUE, &reason)) {
_LOGE (LOGD_AUTOIP4, "failed to update IP4 config for autoip change.");
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason);
}
} else
g_assert_not_reached ();
g_object_unref (config);
} else {
_LOGW (LOGD_AUTOIP4, "autoip address %s no longer valid because '%s'.", address, event);
/* The address is gone; terminate the connection or fail activation */
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED);
}
}
static void
aipd_watch_cb (GPid pid, gint status, gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMDeviceState state;
if (!priv->aipd_watch)
return;
priv->aipd_watch = 0;
if (WIFEXITED (status))
_LOGD (LOGD_AUTOIP4, "avahi-autoipd exited with error code %d", WEXITSTATUS (status));
else if (WIFSTOPPED (status))
_LOGW (LOGD_AUTOIP4, "avahi-autoipd stopped unexpectedly with signal %d", WSTOPSIG (status));
else if (WIFSIGNALED (status))
_LOGW (LOGD_AUTOIP4, "avahi-autoipd died with signal %d", WTERMSIG (status));
else
_LOGW (LOGD_AUTOIP4, "avahi-autoipd died from an unknown cause");
aipd_cleanup (self);
state = nm_device_get_state (self);
if (nm_device_is_activating (self) || (state == NM_DEVICE_STATE_ACTIVATED))
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_AUTOIP_FAILED);
}
static gboolean
aipd_timeout_cb (gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->aipd_timeout) {
_LOGI (LOGD_AUTOIP4, "avahi-autoipd timed out.");
priv->aipd_timeout = 0;
aipd_cleanup (self);
if (priv->ip4_state == IP_CONF)
nm_device_activate_schedule_ip4_config_timeout (self);
}
return FALSE;
}
/* default to installed helper, but can be modified for testing */
const char *nm_device_autoipd_helper_path = LIBEXECDIR "/nm-avahi-autoipd.action";
static NMActStageReturn
aipd_start (NMDevice *self, NMDeviceStateReason *reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
const char *argv[6];
char *cmdline;
const char *aipd_binary;
int i = 0;
GError *error = NULL;
aipd_cleanup (self);
/* Find avahi-autoipd */
aipd_binary = nm_utils_find_helper ("avahi-autoipd", NULL, NULL);
if (!aipd_binary) {
_LOGW (LOGD_DEVICE | LOGD_AUTOIP4,
"Activation: Stage 3 of 5 (IP Configure Start) failed"
" to start avahi-autoipd: not found");
*reason = NM_DEVICE_STATE_REASON_AUTOIP_START_FAILED;
return NM_ACT_STAGE_RETURN_FAILURE;
}
argv[i++] = aipd_binary;
argv[i++] = "--script";
argv[i++] = nm_device_autoipd_helper_path;
if (nm_logging_enabled (LOGL_DEBUG, LOGD_AUTOIP4))
argv[i++] = "--debug";
argv[i++] = nm_device_get_ip_iface (self);
argv[i++] = NULL;
cmdline = g_strjoinv (" ", (char **) argv);
_LOGD (LOGD_AUTOIP4, "running: %s", cmdline);
g_free (cmdline);
if (!g_spawn_async ("/", (char **) argv, NULL, G_SPAWN_DO_NOT_REAP_CHILD,
nm_utils_setpgid, NULL, &(priv->aipd_pid), &error)) {
_LOGW (LOGD_DEVICE | LOGD_AUTOIP4,
"Activation: Stage 3 of 5 (IP Configure Start) failed"
" to start avahi-autoipd: %s",
error && error->message ? error->message : "(unknown)");
g_clear_error (&error);
aipd_cleanup (self);
return NM_ACT_STAGE_RETURN_FAILURE;
}
_LOGI (LOGD_DEVICE | LOGD_AUTOIP4,
"Activation: Stage 3 of 5 (IP Configure Start) started"
" avahi-autoipd...");
/* Monitor the child process so we know when it dies */
priv->aipd_watch = g_child_watch_add (priv->aipd_pid, aipd_watch_cb, self);
/* Start a timeout to bound the address attempt */
priv->aipd_timeout = g_timeout_add_seconds (20, aipd_timeout_cb, self);
return NM_ACT_STAGE_RETURN_POSTPONE;
}
/*********************************************/
static gboolean
_device_get_default_route_from_platform (NMDevice *self, int addr_family, NMPlatformIPRoute *out_route)
{
gboolean success = FALSE;
int ifindex = nm_device_get_ip_ifindex (self);
GArray *routes;
if (addr_family == AF_INET)
routes = nm_platform_ip4_route_get_all (ifindex, NM_PLATFORM_GET_ROUTE_MODE_ONLY_DEFAULT);
else
routes = nm_platform_ip6_route_get_all (ifindex, NM_PLATFORM_GET_ROUTE_MODE_ONLY_DEFAULT);
if (routes) {
guint route_metric = G_MAXUINT32, m;
const NMPlatformIPRoute *route = NULL, *r;
guint i;
/* if there are several default routes, find the one with the best metric */
for (i = 0; i < routes->len; i++) {
if (addr_family == AF_INET) {
r = (const NMPlatformIPRoute *) &g_array_index (routes, NMPlatformIP4Route, i);
m = r->metric;
} else {
r = (const NMPlatformIPRoute *) &g_array_index (routes, NMPlatformIP6Route, i);
m = nm_utils_ip6_route_metric_normalize (r->metric);
}
if (!route || m < route_metric) {
route = r;
route_metric = m;
}
}
if (route) {
if (addr_family == AF_INET)
*((NMPlatformIP4Route *) out_route) = *((NMPlatformIP4Route *) route);
else
*((NMPlatformIP6Route *) out_route) = *((NMPlatformIP6Route *) route);
success = TRUE;
}
g_array_free (routes, TRUE);
}
return success;
}
/*********************************************/
static void
ensure_con_ipx_config (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
int ip_ifindex = nm_device_get_ip_ifindex (self);
NMConnection *connection;
g_assert (!!priv->con_ip4_config == !!priv->con_ip6_config);
if (priv->con_ip4_config)
return;
connection = nm_device_get_connection (self);
if (!connection)
return;
priv->con_ip4_config = nm_ip4_config_new (ip_ifindex);
priv->con_ip6_config = nm_ip6_config_new (ip_ifindex);
nm_ip4_config_merge_setting (priv->con_ip4_config,
nm_connection_get_setting_ip4_config (connection),
nm_device_get_ip4_route_metric (self));
nm_ip6_config_merge_setting (priv->con_ip6_config,
nm_connection_get_setting_ip6_config (connection),
nm_device_get_ip6_route_metric (self));
if (nm_device_uses_assumed_connection (self)) {
/* For assumed connections ignore all addresses and routes. */
nm_ip4_config_reset_addresses (priv->con_ip4_config);
nm_ip4_config_reset_routes (priv->con_ip4_config);
nm_ip6_config_reset_addresses (priv->con_ip6_config);
nm_ip6_config_reset_routes (priv->con_ip6_config);
}
}
/*********************************************/
/* DHCPv4 stuff */
static void
dhcp4_cleanup (NMDevice *self, gboolean stop, gboolean release)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->dhcp4_client) {
/* Stop any ongoing DHCP transaction on this device */
if (priv->dhcp4_state_sigid) {
g_signal_handler_disconnect (priv->dhcp4_client, priv->dhcp4_state_sigid);
priv->dhcp4_state_sigid = 0;
}
nm_device_remove_pending_action (self, PENDING_ACTION_DHCP4, FALSE);
if (stop)
nm_dhcp_client_stop (priv->dhcp4_client, release);
g_clear_object (&priv->dhcp4_client);
}
if (priv->dhcp4_config) {
g_clear_object (&priv->dhcp4_config);
g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP4_CONFIG);
}
}
static gboolean
ip4_config_merge_and_apply (NMDevice *self,
NMIP4Config *config,
gboolean commit,
NMDeviceStateReason *out_reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
gboolean success;
NMIP4Config *composite;
gboolean has_direct_route;
const guint32 default_route_metric = nm_device_get_ip4_route_metric (self);
guint32 gateway;
/* Merge all the configs into the composite config */
if (config) {
g_clear_object (&priv->dev_ip4_config);
priv->dev_ip4_config = g_object_ref (config);
}
composite = nm_ip4_config_new (nm_device_get_ip_ifindex (self));
ensure_con_ipx_config (self);
if (priv->dev_ip4_config)
nm_ip4_config_merge (composite, priv->dev_ip4_config);
if (priv->vpn4_config)
nm_ip4_config_merge (composite, priv->vpn4_config);
if (priv->ext_ip4_config)
nm_ip4_config_merge (composite, priv->ext_ip4_config);
/* Merge WWAN config *last* to ensure modem-given settings overwrite
* any external stuff set by pppd or other scripts.
*/
if (priv->wwan_ip4_config)
nm_ip4_config_merge (composite, priv->wwan_ip4_config);
/* Merge user overrides into the composite config. For assumed connection,
* con_ip4_config is empty. */
if (priv->con_ip4_config)
nm_ip4_config_merge (composite, priv->con_ip4_config);
connection = nm_device_get_connection (self);
/* Add the default route.
*
* We keep track of the default route of a device in a private field.
* NMDevice needs to know the default route at this point, because the gateway
* might require a direct route (see below).
*
* But also, we don't want to add the default route to priv->ip4_config,
* because the default route from the setting might not be the same that
* NMDefaultRouteManager eventually configures (because the it might
* tweak the effective metric).
*/
/* unless we come to a different conclusion below, we have no default route and
* the route is assumed. */
priv->default_route.v4_has = FALSE;
priv->default_route.v4_is_assumed = TRUE;
if (!commit) {
/* during a non-commit event, we always pickup whatever is configured. */
goto END_ADD_DEFAULT_ROUTE;
}
if (nm_device_uses_assumed_connection (self))
goto END_ADD_DEFAULT_ROUTE;
/* we are about to commit (for a non-assumed connection). Enforce whatever we have
* configured. */
priv->default_route.v4_is_assumed = FALSE;
if ( !connection
|| !nm_default_route_manager_ip4_connection_has_default_route (nm_default_route_manager_get (), connection))
goto END_ADD_DEFAULT_ROUTE;
if (!nm_ip4_config_get_num_addresses (composite)) {
/* without addresses we can have no default route. */
goto END_ADD_DEFAULT_ROUTE;
}
gateway = nm_ip4_config_get_gateway (composite);
if ( !gateway
&& nm_device_get_device_type (self) != NM_DEVICE_TYPE_MODEM)
goto END_ADD_DEFAULT_ROUTE;
has_direct_route = ( gateway == 0
|| nm_ip4_config_get_subnet_for_host (composite, gateway)
|| nm_ip4_config_get_direct_route_for_host (composite, gateway));
priv->default_route.v4_has = TRUE;
memset (&priv->default_route.v4, 0, sizeof (priv->default_route.v4));
priv->default_route.v4.source = NM_IP_CONFIG_SOURCE_USER;
priv->default_route.v4.gateway = gateway;
priv->default_route.v4.metric = default_route_metric;
priv->default_route.v4.mss = nm_ip4_config_get_mss (composite);
if (!has_direct_route) {
NMPlatformIP4Route r = priv->default_route.v4;
/* add a direct route to the gateway */
r.network = gateway;
r.plen = 32;
r.gateway = 0;
nm_ip4_config_add_route (composite, &r);
}
END_ADD_DEFAULT_ROUTE:
if (priv->default_route.v4_is_assumed) {
/* If above does not explicitly assign a default route, we always pick up the
* default route based on what is currently configured.
* That means that even managed connections with never-default, can
* get a default route (if configured externally).
*/
priv->default_route.v4_has = _device_get_default_route_from_platform (self, AF_INET, (NMPlatformIPRoute *) &priv->default_route.v4);
}
/* Allow setting MTU etc */
if (commit) {
if (NM_DEVICE_GET_CLASS (self)->ip4_config_pre_commit)
NM_DEVICE_GET_CLASS (self)->ip4_config_pre_commit (self, composite);
}
success = nm_device_set_ip4_config (self, composite, default_route_metric, commit, out_reason);
g_object_unref (composite);
return success;
}
static void
dhcp4_lease_change (NMDevice *self, NMIP4Config *config)
{
NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE;
g_return_if_fail (config != NULL);
if (!ip4_config_merge_and_apply (self, config, TRUE, &reason)) {
_LOGW (LOGD_DHCP4, "failed to update IPv4 config for DHCP change.");
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason);
} else {
/* Notify dispatcher scripts of new DHCP4 config */
nm_dispatcher_call (DISPATCHER_ACTION_DHCP4_CHANGE,
nm_device_get_connection (self),
self,
NULL,
NULL,
NULL);
}
}
static void
dhcp4_fail (NMDevice *self, gboolean timeout)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
dhcp4_cleanup (self, TRUE, FALSE);
if (timeout || (priv->ip4_state == IP_CONF))
nm_device_activate_schedule_ip4_config_timeout (self);
else if (priv->ip4_state == IP_DONE)
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED);
else
g_warn_if_reached ();
}
static void
dhcp4_update_config (NMDevice *self, NMDhcp4Config *config, GHashTable *options)
{
GHashTableIter iter;
const char *key, *value;
/* Update the DHCP4 config object with new DHCP options */
nm_dhcp4_config_reset (config);
g_hash_table_iter_init (&iter, options);
while (g_hash_table_iter_next (&iter, (gpointer) &key, (gpointer) &value))
nm_dhcp4_config_add_option (config, key, value);
g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP4_CONFIG);
}
static void
dhcp4_state_changed (NMDhcpClient *client,
NMDhcpState state,
NMIP4Config *ip4_config,
GHashTable *options,
gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
g_return_if_fail (nm_dhcp_client_get_ipv6 (client) == FALSE);
g_return_if_fail (!ip4_config || NM_IS_IP4_CONFIG (ip4_config));
_LOGD (LOGD_DHCP4, "new DHCPv4 client state %d", state);
switch (state) {
case NM_DHCP_STATE_BOUND:
if (!ip4_config) {
_LOGW (LOGD_DHCP4, "failed to get IPv4 config in response to DHCP event.");
nm_device_state_changed (self,
NM_DEVICE_STATE_FAILED,
NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE);
break;
}
dhcp4_update_config (self, priv->dhcp4_config, options);
if (priv->ip4_state == IP_CONF)
nm_device_activate_schedule_ip4_config_result (self, ip4_config);
else if (priv->ip4_state == IP_DONE)
dhcp4_lease_change (self, ip4_config);
break;
case NM_DHCP_STATE_TIMEOUT:
dhcp4_fail (self, TRUE);
break;
case NM_DHCP_STATE_EXPIRE:
/* Ignore expiry before we even have a lease (NAK, old lease, etc) */
if (priv->ip4_state == IP_CONF)
break;
/* Fall through */
case NM_DHCP_STATE_DONE:
case NM_DHCP_STATE_FAIL:
dhcp4_fail (self, FALSE);
break;
default:
break;
}
}
static NMActStageReturn
dhcp4_start (NMDevice *self,
NMConnection *connection,
NMDeviceStateReason *reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMSettingIPConfig *s_ip4;
const guint8 *hw_addr;
size_t hw_addr_len = 0;
GByteArray *tmp = NULL;
s_ip4 = nm_connection_get_setting_ip4_config (connection);
/* Clear old exported DHCP options */
if (priv->dhcp4_config)
g_object_unref (priv->dhcp4_config);
priv->dhcp4_config = nm_dhcp4_config_new ();
hw_addr = nm_platform_link_get_address (nm_device_get_ip_ifindex (self), &hw_addr_len);
if (hw_addr_len) {
tmp = g_byte_array_sized_new (hw_addr_len);
g_byte_array_append (tmp, hw_addr, hw_addr_len);
}
/* Begin DHCP on the interface */
g_warn_if_fail (priv->dhcp4_client == NULL);
priv->dhcp4_client = nm_dhcp_manager_start_ip4 (nm_dhcp_manager_get (),
nm_device_get_ip_iface (self),
nm_device_get_ip_ifindex (self),
tmp,
nm_connection_get_uuid (connection),
nm_device_get_ip4_route_metric (self),
nm_setting_ip_config_get_dhcp_send_hostname (s_ip4),
nm_setting_ip_config_get_dhcp_hostname (s_ip4),
nm_setting_ip4_config_get_dhcp_client_id (NM_SETTING_IP4_CONFIG (s_ip4)),
priv->dhcp_timeout,
priv->dhcp_anycast_address,
NULL);
if (tmp)
g_byte_array_free (tmp, TRUE);
if (!priv->dhcp4_client) {
*reason = NM_DEVICE_STATE_REASON_DHCP_START_FAILED;
return NM_ACT_STAGE_RETURN_FAILURE;
}
priv->dhcp4_state_sigid = g_signal_connect (priv->dhcp4_client,
NM_DHCP_CLIENT_SIGNAL_STATE_CHANGED,
G_CALLBACK (dhcp4_state_changed),
self);
nm_device_add_pending_action (self, PENDING_ACTION_DHCP4, TRUE);
/* DHCP devices will be notified by the DHCP manager when stuff happens */
return NM_ACT_STAGE_RETURN_POSTPONE;
}
gboolean
nm_device_dhcp4_renew (NMDevice *self, gboolean release)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMActStageReturn ret;
NMDeviceStateReason reason;
NMConnection *connection;
g_return_val_if_fail (priv->dhcp4_client != NULL, FALSE);
_LOGI (LOGD_DHCP4, "DHCPv4 lease renewal requested");
/* Terminate old DHCP instance and release the old lease */
dhcp4_cleanup (self, TRUE, release);
connection = nm_device_get_connection (self);
g_assert (connection);
/* Start DHCP again on the interface */
ret = dhcp4_start (self, connection, &reason);
return (ret != NM_ACT_STAGE_RETURN_FAILURE);
}
/*********************************************/
static GHashTable *shared_ips = NULL;
static void
release_shared_ip (gpointer data)
{
g_hash_table_remove (shared_ips, data);
}
static gboolean
reserve_shared_ip (NMDevice *self, NMSettingIPConfig *s_ip4, NMPlatformIP4Address *address)
{
if (G_UNLIKELY (shared_ips == NULL))
shared_ips = g_hash_table_new (g_direct_hash, g_direct_equal);
memset (address, 0, sizeof (*address));
if (s_ip4 && nm_setting_ip_config_get_num_addresses (s_ip4)) {
/* Use the first user-supplied address */
NMIPAddress *user = nm_setting_ip_config_get_address (s_ip4, 0);
g_assert (user);
nm_ip_address_get_address_binary (user, &address->address);
address->plen = nm_ip_address_get_prefix (user);
} else {
/* Find an unused address in the 10.42.x.x range */
guint32 start = (guint32) ntohl (0x0a2a0001); /* 10.42.0.1 */
guint32 count = 0;
while (g_hash_table_lookup (shared_ips, GUINT_TO_POINTER (start + count))) {
count += ntohl (0x100);
if (count > ntohl (0xFE00)) {
_LOGE (LOGD_SHARING, "ran out of shared IP addresses!");
return FALSE;
}
}
address->address = start + count;
address->plen = 24;
g_hash_table_insert (shared_ips,
GUINT_TO_POINTER (address->address),
GUINT_TO_POINTER (TRUE));
}
return TRUE;
}
static NMIP4Config *
shared4_new_config (NMDevice *self, NMConnection *connection, NMDeviceStateReason *reason)
{
NMIP4Config *config = NULL;
NMPlatformIP4Address address;
g_return_val_if_fail (self != NULL, NULL);
if (!reserve_shared_ip (self, nm_connection_get_setting_ip4_config (connection), &address)) {
*reason = NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE;
return NULL;
}
config = nm_ip4_config_new (nm_device_get_ip_ifindex (self));
address.source = NM_IP_CONFIG_SOURCE_SHARED;
nm_ip4_config_add_address (config, &address);
/* Remove the address lock when the object gets disposed */
g_object_set_data_full (G_OBJECT (config), "shared-ip",
GUINT_TO_POINTER (address.address),
release_shared_ip);
return config;
}
/*********************************************/
static gboolean
connection_ip4_method_requires_carrier (NMConnection *connection,
gboolean *out_ip4_enabled)
{
const char *method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
static const char *ip4_carrier_methods[] = {
NM_SETTING_IP4_CONFIG_METHOD_AUTO,
NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL,
NULL
};
if (out_ip4_enabled)
*out_ip4_enabled = !!strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED);
return _nm_utils_string_in_list (method, ip4_carrier_methods);
}
static gboolean
connection_ip6_method_requires_carrier (NMConnection *connection,
gboolean *out_ip6_enabled)
{
const char *method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG);
static const char *ip6_carrier_methods[] = {
NM_SETTING_IP6_CONFIG_METHOD_AUTO,
NM_SETTING_IP6_CONFIG_METHOD_DHCP,
NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL,
NULL
};
if (out_ip6_enabled)
*out_ip6_enabled = !!strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_IGNORE);
return _nm_utils_string_in_list (method, ip6_carrier_methods);
}
static gboolean
connection_requires_carrier (NMConnection *connection)
{
NMSettingIPConfig *s_ip4, *s_ip6;
gboolean ip4_carrier_wanted, ip6_carrier_wanted;
gboolean ip4_used = FALSE, ip6_used = FALSE;
ip4_carrier_wanted = connection_ip4_method_requires_carrier (connection, &ip4_used);
if (ip4_carrier_wanted) {
/* If IPv4 wants a carrier and cannot fail, the whole connection
* requires a carrier regardless of the IPv6 method.
*/
s_ip4 = nm_connection_get_setting_ip4_config (connection);
if (s_ip4 && !nm_setting_ip_config_get_may_fail (s_ip4))
return TRUE;
}
ip6_carrier_wanted = connection_ip6_method_requires_carrier (connection, &ip6_used);
if (ip6_carrier_wanted) {
/* If IPv6 wants a carrier and cannot fail, the whole connection
* requires a carrier regardless of the IPv4 method.
*/
s_ip6 = nm_connection_get_setting_ip6_config (connection);
if (s_ip6 && !nm_setting_ip_config_get_may_fail (s_ip6))
return TRUE;
}
/* If an IP version wants a carrier and and the other IP version isn't
* used, the connection requires carrier since it will just fail without one.
*/
if (ip4_carrier_wanted && !ip6_used)
return TRUE;
if (ip6_carrier_wanted && !ip4_used)
return TRUE;
/* If both want a carrier, the whole connection wants a carrier */
return ip4_carrier_wanted && ip6_carrier_wanted;
}
static gboolean
have_any_ready_slaves (NMDevice *self, const GSList *slaves)
{
const GSList *iter;
/* Any enslaved slave is "ready" in the generic case as it's
* at least >= NM_DEVCIE_STATE_IP_CONFIG and has had Layer 2
* properties set up.
*/
for (iter = slaves; iter; iter = g_slist_next (iter)) {
if (nm_device_get_enslaved (iter->data))
return TRUE;
}
return FALSE;
}
static gboolean
ip4_requires_slaves (NMConnection *connection)
{
const char *method;
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
return strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_AUTO) == 0;
}
static NMActStageReturn
act_stage3_ip4_config_start (NMDevice *self,
NMIP4Config **out_config,
NMDeviceStateReason *reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
NMActStageReturn ret = NM_ACT_STAGE_RETURN_FAILURE;
const char *method;
GSList *slaves;
gboolean ready_slaves;
g_return_val_if_fail (reason != NULL, NM_ACT_STAGE_RETURN_FAILURE);
connection = nm_device_get_connection (self);
g_assert (connection);
if ( connection_ip4_method_requires_carrier (connection, NULL)
&& priv->is_master
&& !priv->carrier) {
_LOGI (LOGD_IP4 | LOGD_DEVICE,
"IPv4 config waiting until carrier is on");
return NM_ACT_STAGE_RETURN_WAIT;
}
if (priv->is_master && ip4_requires_slaves (connection)) {
/* If the master has no ready slaves, and depends on slaves for
* a successful IPv4 attempt, then postpone IPv4 addressing.
*/
slaves = nm_device_master_get_slaves (self);
ready_slaves = NM_DEVICE_GET_CLASS (self)->have_any_ready_slaves (self, slaves);
g_slist_free (slaves);
if (ready_slaves == FALSE) {
_LOGI (LOGD_DEVICE | LOGD_IP4,
"IPv4 config waiting until slaves are ready");
return NM_ACT_STAGE_RETURN_WAIT;
}
}
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP4_CONFIG);
/* Start IPv4 addressing based on the method requested */
if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_AUTO) == 0)
ret = dhcp4_start (self, connection, reason);
else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_LINK_LOCAL) == 0)
ret = aipd_start (self, reason);
else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_MANUAL) == 0) {
/* Use only IPv4 config from the connection data */
*out_config = nm_ip4_config_new (nm_device_get_ip_ifindex (self));
g_assert (*out_config);
ret = NM_ACT_STAGE_RETURN_SUCCESS;
} else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_SHARED) == 0) {
*out_config = shared4_new_config (self, connection, reason);
if (*out_config) {
priv->dnsmasq_manager = nm_dnsmasq_manager_new (nm_device_get_ip_iface (self));
ret = NM_ACT_STAGE_RETURN_SUCCESS;
} else
ret = NM_ACT_STAGE_RETURN_FAILURE;
} else if (strcmp (method, NM_SETTING_IP4_CONFIG_METHOD_DISABLED) == 0) {
/* Nothing to do... */
ret = NM_ACT_STAGE_RETURN_STOP;
} else
_LOGW (LOGD_IP4, "unhandled IPv4 config method '%s'; will fail", method);
return ret;
}
/*********************************************/
/* DHCPv6 stuff */
static void
dhcp6_cleanup (NMDevice *self, gboolean stop, gboolean release)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
priv->dhcp6_mode = NM_RDISC_DHCP_LEVEL_NONE;
g_clear_object (&priv->dhcp6_ip6_config);
if (priv->dhcp6_client) {
if (priv->dhcp6_state_sigid) {
g_signal_handler_disconnect (priv->dhcp6_client, priv->dhcp6_state_sigid);
priv->dhcp6_state_sigid = 0;
}
if (stop)
nm_dhcp_client_stop (priv->dhcp6_client, release);
g_clear_object (&priv->dhcp6_client);
}
nm_device_remove_pending_action (self, PENDING_ACTION_DHCP6, FALSE);
if (priv->dhcp6_config) {
g_clear_object (&priv->dhcp6_config);
g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP6_CONFIG);
}
}
static gboolean
ip6_config_merge_and_apply (NMDevice *self,
gboolean commit,
NMDeviceStateReason *out_reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
gboolean success;
NMIP6Config *composite;
gboolean has_direct_route;
const struct in6_addr *gateway;
/* If no config was passed in, create a new one */
composite = nm_ip6_config_new (nm_device_get_ip_ifindex (self));
ensure_con_ipx_config (self);
g_assert (composite);
/* Merge all the IP configs into the composite config */
if (priv->ac_ip6_config)
nm_ip6_config_merge (composite, priv->ac_ip6_config);
if (priv->dhcp6_ip6_config)
nm_ip6_config_merge (composite, priv->dhcp6_ip6_config);
if (priv->vpn6_config)
nm_ip6_config_merge (composite, priv->vpn6_config);
if (priv->ext_ip6_config)
nm_ip6_config_merge (composite, priv->ext_ip6_config);
/* Merge WWAN config *last* to ensure modem-given settings overwrite
* any external stuff set by pppd or other scripts.
*/
if (priv->wwan_ip6_config)
nm_ip6_config_merge (composite, priv->wwan_ip6_config);
/* Merge user overrides into the composite config. For assumed connections,
* con_ip6_config is empty. */
if (priv->con_ip6_config)
nm_ip6_config_merge (composite, priv->con_ip6_config);
connection = nm_device_get_connection (self);
/* Add the default route.
*
* We keep track of the default route of a device in a private field.
* NMDevice needs to know the default route at this point, because the gateway
* might require a direct route (see below).
*
* But also, we don't want to add the default route to priv->ip6_config,
* because the default route from the setting might not be the same that
* NMDefaultRouteManager eventually configures (because the it might
* tweak the effective metric).
*/
/* unless we come to a different conclusion below, we have no default route and
* the route is assumed. */
priv->default_route.v6_has = FALSE;
priv->default_route.v6_is_assumed = TRUE;
if (!commit) {
/* during a non-commit event, we always pickup whatever is configured. */
goto END_ADD_DEFAULT_ROUTE;
}
if (nm_device_uses_assumed_connection (self))
goto END_ADD_DEFAULT_ROUTE;
/* we are about to commit (for a non-assumed connection). Enforce whatever we have
* configured. */
priv->default_route.v6_is_assumed = FALSE;
if ( !connection
|| !nm_default_route_manager_ip6_connection_has_default_route (nm_default_route_manager_get (), connection))
goto END_ADD_DEFAULT_ROUTE;
if (!nm_ip6_config_get_num_addresses (composite)) {
/* without addresses we can have no default route. */
goto END_ADD_DEFAULT_ROUTE;
}
gateway = nm_ip6_config_get_gateway (composite);
if (!gateway)
goto END_ADD_DEFAULT_ROUTE;
has_direct_route = nm_ip6_config_get_direct_route_for_host (composite, gateway) != NULL;
priv->default_route.v6_has = TRUE;
memset (&priv->default_route.v6, 0, sizeof (priv->default_route.v6));
priv->default_route.v6.source = NM_IP_CONFIG_SOURCE_USER;
priv->default_route.v6.gateway = *gateway;
priv->default_route.v6.metric = nm_device_get_ip6_route_metric (self);
priv->default_route.v6.mss = nm_ip6_config_get_mss (composite);
if (!has_direct_route) {
NMPlatformIP6Route r = priv->default_route.v6;
/* add a direct route to the gateway */
r.network = *gateway;
r.plen = 128;
r.gateway = in6addr_any;
nm_ip6_config_add_route (composite, &r);
}
END_ADD_DEFAULT_ROUTE:
if (priv->default_route.v6_is_assumed) {
/* If above does not explicitly assign a default route, we always pick up the
* default route based on what is currently configured.
* That means that even managed connections with never-default, can
* get a default route (if configured externally).
*/
priv->default_route.v6_has = _device_get_default_route_from_platform (self, AF_INET6, (NMPlatformIPRoute *) &priv->default_route.v6);
}
nm_ip6_config_addresses_sort (composite,
priv->rdisc ? priv->rdisc_use_tempaddr : NM_SETTING_IP6_CONFIG_PRIVACY_UNKNOWN);
/* Allow setting MTU etc */
if (commit) {
if (NM_DEVICE_GET_CLASS (self)->ip6_config_pre_commit)
NM_DEVICE_GET_CLASS (self)->ip6_config_pre_commit (self, composite);
}
success = nm_device_set_ip6_config (self, composite, commit, out_reason);
g_object_unref (composite);
return success;
}
static void
dhcp6_lease_change (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
NMDeviceStateReason reason = NM_DEVICE_STATE_REASON_NONE;
if (priv->dhcp6_ip6_config == NULL) {
_LOGW (LOGD_DHCP6, "failed to get DHCPv6 config for rebind");
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED);
return;
}
g_assert (priv->dhcp6_client); /* sanity check */
connection = nm_device_get_connection (self);
g_assert (connection);
/* Apply the updated config */
if (ip6_config_merge_and_apply (self, TRUE, &reason) == FALSE) {
_LOGW (LOGD_DHCP6, "failed to update IPv6 config in response to DHCP event.");
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, reason);
} else {
/* Notify dispatcher scripts of new DHCPv6 config */
nm_dispatcher_call (DISPATCHER_ACTION_DHCP6_CHANGE, connection, self, NULL, NULL, NULL);
}
}
static void
dhcp6_fail (NMDevice *self, gboolean timeout)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
dhcp6_cleanup (self, TRUE, FALSE);
if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_MANAGED) {
if (timeout || (priv->ip6_state == IP_CONF))
nm_device_activate_schedule_ip6_config_timeout (self);
else if (priv->ip6_state == IP_DONE)
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED);
else
g_warn_if_reached ();
} else {
/* not a hard failure; just live with the RA info */
if (priv->ip6_state == IP_CONF)
nm_device_activate_schedule_ip6_config_result (self);
}
}
static void
dhcp6_timeout (NMDevice *self, NMDhcpClient *client)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_MANAGED)
dhcp6_fail (self, TRUE);
else {
/* not a hard failure; just live with the RA info */
dhcp6_cleanup (self, TRUE, FALSE);
if (priv->ip6_state == IP_CONF)
nm_device_activate_schedule_ip6_config_result (self);
}
}
static void
dhcp6_update_config (NMDevice *self, NMDhcp6Config *config, GHashTable *options)
{
GHashTableIter iter;
const char *key, *value;
/* Update the DHCP6 config object with new DHCP options */
nm_dhcp6_config_reset (config);
g_hash_table_iter_init (&iter, options);
while (g_hash_table_iter_next (&iter, (gpointer) &key, (gpointer) &value))
nm_dhcp6_config_add_option (config, key, value);
g_object_notify (G_OBJECT (self), NM_DEVICE_DHCP6_CONFIG);
}
static void
dhcp6_state_changed (NMDhcpClient *client,
NMDhcpState state,
NMIP6Config *ip6_config,
GHashTable *options,
gpointer user_data)
{
NMDevice *self = NM_DEVICE (user_data);
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
g_return_if_fail (nm_dhcp_client_get_ipv6 (client) == TRUE);
g_return_if_fail (!ip6_config || NM_IS_IP6_CONFIG (ip6_config));
_LOGD (LOGD_DHCP6, "new DHCPv6 client state %d", state);
switch (state) {
case NM_DHCP_STATE_BOUND:
g_clear_object (&priv->dhcp6_ip6_config);
if (ip6_config) {
priv->dhcp6_ip6_config = g_object_ref (ip6_config);
dhcp6_update_config (self, priv->dhcp6_config, options);
}
if (priv->ip6_state == IP_CONF) {
if (priv->dhcp6_ip6_config == NULL) {
/* FIXME: Initial DHCP failed; should we fail IPv6 entirely then? */
nm_device_state_changed (self, NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_DHCP_FAILED);
break;
}
nm_device_activate_schedule_ip6_config_result (self);
} else if (priv->ip6_state == IP_DONE)
dhcp6_lease_change (self);
break;
case NM_DHCP_STATE_TIMEOUT:
dhcp6_timeout (self, client);
break;
case NM_DHCP_STATE_EXPIRE:
/* Ignore expiry before we even have a lease (NAK, old lease, etc) */
if (priv->ip6_state != IP_CONF)
dhcp6_fail (self, FALSE);
break;
case NM_DHCP_STATE_DONE:
/* In IPv6 info-only mode, the client doesn't handle leases so it
* may exit right after getting a response from the server. That's
* normal. In that case we just ignore the exit.
*/
if (priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_OTHERCONF)
break;
/* Otherwise, fall through */
case NM_DHCP_STATE_FAIL:
dhcp6_fail (self, FALSE);
break;
default:
break;
}
}
static gboolean
dhcp6_start_with_link_ready (NMDevice *self, NMConnection *connection)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMSettingIPConfig *s_ip6;
GByteArray *tmp = NULL;
const guint8 *hw_addr;
size_t hw_addr_len = 0;
g_assert (connection);
s_ip6 = nm_connection_get_setting_ip6_config (connection);
g_assert (s_ip6);
hw_addr = nm_platform_link_get_address (nm_device_get_ip_ifindex (self), &hw_addr_len);
if (hw_addr_len) {
tmp = g_byte_array_sized_new (hw_addr_len);
g_byte_array_append (tmp, hw_addr, hw_addr_len);
}
priv->dhcp6_client = nm_dhcp_manager_start_ip6 (nm_dhcp_manager_get (),
nm_device_get_ip_iface (self),
nm_device_get_ip_ifindex (self),
tmp,
nm_connection_get_uuid (connection),
nm_device_get_ip6_route_metric (self),
nm_setting_ip_config_get_dhcp_send_hostname (s_ip6),
nm_setting_ip_config_get_dhcp_hostname (s_ip6),
priv->dhcp_timeout,
priv->dhcp_anycast_address,
(priv->dhcp6_mode == NM_RDISC_DHCP_LEVEL_OTHERCONF) ? TRUE : FALSE,
nm_setting_ip6_config_get_ip6_privacy (NM_SETTING_IP6_CONFIG (s_ip6)));
if (tmp)
g_byte_array_free (tmp, TRUE);
if (priv->dhcp6_client) {
priv->dhcp6_state_sigid = g_signal_connect (priv->dhcp6_client,
NM_DHCP_CLIENT_SIGNAL_STATE_CHANGED,
G_CALLBACK (dhcp6_state_changed),
self);
}
return !!priv->dhcp6_client;
}
static gboolean
dhcp6_start (NMDevice *self, gboolean wait_for_ll, NMDeviceStateReason *reason)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
NMSettingIPConfig *s_ip6;
g_clear_object (&priv->dhcp6_config);
priv->dhcp6_config = nm_dhcp6_config_new ();
g_warn_if_fail (priv->dhcp6_ip6_config == NULL);
g_clear_object (&priv->dhcp6_ip6_config);
connection = nm_device_get_connection (self);
g_assert (connection);
s_ip6 = nm_connection_get_setting_ip6_config (connection);
if (!nm_setting_ip_config_get_may_fail (s_ip6) ||
!strcmp (nm_setting_ip_config_get_method (s_ip6), NM_SETTING_IP6_CONFIG_METHOD_DHCP))
nm_device_add_pending_action (self, PENDING_ACTION_DHCP6, TRUE);
if (wait_for_ll) {
NMActStageReturn ret;
/* ensure link local is ready... */
ret = linklocal6_start (self);
if (ret == NM_ACT_STAGE_RETURN_POSTPONE) {
/* success; wait for the LL address to show up */
return TRUE;
}
/* success; already have the LL address; kick off DHCP */
g_assert (ret == NM_ACT_STAGE_RETURN_SUCCESS);
}
if (!dhcp6_start_with_link_ready (self, connection)) {
*reason = NM_DEVICE_STATE_REASON_DHCP_START_FAILED;
return FALSE;
}
return TRUE;
}
gboolean
nm_device_dhcp6_renew (NMDevice *self, gboolean release)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
g_return_val_if_fail (priv->dhcp6_client != NULL, FALSE);
_LOGI (LOGD_DHCP6, "DHCPv6 lease renewal requested");
/* Terminate old DHCP instance and release the old lease */
dhcp6_cleanup (self, TRUE, release);
/* Start DHCP again on the interface */
return dhcp6_start (self, FALSE, NULL);
}
/******************************************/
static gboolean
have_ip6_address (const NMIP6Config *ip6_config, gboolean linklocal)
{
guint i;
if (!ip6_config)
return FALSE;
linklocal = !!linklocal;
for (i = 0; i < nm_ip6_config_get_num_addresses (ip6_config); i++) {
const NMPlatformIP6Address *addr = nm_ip6_config_get_address (ip6_config, i);
if ((IN6_IS_ADDR_LINKLOCAL (&addr->address) == linklocal) &&
!(addr->flags & IFA_F_TENTATIVE))
return TRUE;
}
return FALSE;
}
static void
linklocal6_cleanup (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
if (priv->linklocal6_timeout_id) {
g_source_remove (priv->linklocal6_timeout_id);
priv->linklocal6_timeout_id = 0;
}
}
static gboolean
linklocal6_timeout_cb (gpointer user_data)
{
NMDevice *self = user_data;
linklocal6_cleanup (self);
_LOGD (LOGD_DEVICE, "linklocal6: waiting for link-local addresses failed due to timeout");
nm_device_activate_schedule_ip6_config_timeout (self);
return G_SOURCE_REMOVE;
}
static void
linklocal6_complete (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
const char *method;
g_assert (priv->linklocal6_timeout_id);
g_assert (have_ip6_address (priv->ip6_config, TRUE));
linklocal6_cleanup (self);
connection = nm_device_get_connection (self);
g_assert (connection);
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG);
_LOGD (LOGD_DEVICE, "linklocal6: waiting for link-local addresses successful, continue with method %s", method);
if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_AUTO) == 0) {
if (!addrconf6_start_with_link_ready (self)) {
/* Time out IPv6 instead of failing the entire activation */
nm_device_activate_schedule_ip6_config_timeout (self);
}
} else if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_DHCP) == 0) {
if (!dhcp6_start_with_link_ready (self, connection)) {
/* Time out IPv6 instead of failing the entire activation */
nm_device_activate_schedule_ip6_config_timeout (self);
}
} else if (strcmp (method, NM_SETTING_IP6_CONFIG_METHOD_LINK_LOCAL) == 0)
nm_device_activate_schedule_ip6_config_result (self);
else
g_return_if_fail (FALSE);
}
static void
check_and_add_ipv6ll_addr (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
int ip_ifindex = nm_device_get_ip_ifindex (self);
NMUtilsIPv6IfaceId iid;
struct in6_addr lladdr;
guint i, n;
if (priv->nm_ipv6ll == FALSE)
return;
if (priv->ip6_config) {
n = nm_ip6_config_get_num_addresses (priv->ip6_config);
for (i = 0; i < n; i++) {
const NMPlatformIP6Address *addr;
addr = nm_ip6_config_get_address (priv->ip6_config, i);
if (IN6_IS_ADDR_LINKLOCAL (&addr->address)) {
/* Already have an LL address, nothing to do */
return;
}
}
}
if (!nm_device_get_ip_iface_identifier (self, &iid)) {
_LOGW (LOGD_IP6, "failed to get interface identifier; IPv6 may be broken");
return;
}
memset (&lladdr, 0, sizeof (lladdr));
lladdr.s6_addr16[0] = htons (0xfe80);
nm_utils_ipv6_addr_set_interface_identfier (&lladdr, iid);
_LOGD (LOGD_IP6, "adding IPv6LL address %s", nm_utils_inet6_ntop (&lladdr, NULL));
if (!nm_platform_ip6_address_add (ip_ifindex,
lladdr,
in6addr_any,
64,
NM_PLATFORM_LIFETIME_PERMANENT,
NM_PLATFORM_LIFETIME_PERMANENT,
0)) {
_LOGW (LOGD_IP6, "failed to add IPv6 link-local address %s",
nm_utils_inet6_ntop (&lladdr, NULL));
}
}
static NMActStageReturn
linklocal6_start (NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
NMConnection *connection;
const char *method;
linklocal6_cleanup (self);
if (have_ip6_address (priv->ip6_config, TRUE))
return NM_ACT_STAGE_RETURN_SUCCESS;
connection = nm_device_get_connection (self);
g_assert (connection);
method = nm_utils_get_ip_config_method (connection, NM_TYPE_SETTING_IP6_CONFIG);
_LOGD (LOGD_DEVICE, "linklocal6: starting IPv6 with method '%s', but the device has no link-local addresses configured. Wait.", method);
check_and_add_ipv6ll_addr (self);
priv->linklocal6_timeout_id = g_timeout_add_seconds (5, linklocal6_timeout_cb, self);
return NM_ACT_STAGE_RETURN_POSTPONE;
}
/******************************************/
static void
print_support_extended_ifa_flags (NMSettingIP6ConfigPrivacy use_tempaddr)
{
static gint8 warn = 0;
static gint8 s_libnl = -1, s_kernel;
if (warn >= 2)
return;
if (s_libnl == -1) {
s_libnl = !!nm_platform_check_support_libnl_extended_ifa_flags ();
s_kernel = !!nm_platform_check_support_kernel_extended_ifa_flags ();
if (s_libnl && s_kernel) {
nm_log_dbg (LOGD_IP6, "kernel and libnl support extended IFA_FLAGS (needed by NM for IPv6 private addresses)");
warn = 2;
return;
}
}
if ( use_tempaddr != NM_SETTING_IP6_CONFIG_PRIVACY_PREFER_TEMP_ADDR
&& use_tempaddr != NM_SETTING_IP6_CONFIG_PRIVACY_PREFER_PUBLIC_ADDR) {
if (warn == 0) {
nm_log_dbg (LOGD_IP6, "%s%s%s %s not support extended IFA_FLAGS (needed by NM for IPv6 private addresses)",
!s_kernel ? "kernel" : "",
!s_kernel && !s_libnl ? " and " : "",
!s_libnl ? "libnl" : "",
!s_kernel && !s_libnl ? "do" : "does");
warn = 1;
}
return;
}
if (!s_libnl && !s_kernel) {
nm_log_warn (LOGD_IP6, "libnl and the kernel do not support extended IFA_FLAGS needed by NM for "
"IPv6 private addresses. This feature is not available");
} else if (!s_libnl) {
nm_log_warn (LOGD_IP6, "libnl does not support extended IFA_FLAGS needed by NM for "
"IPv6 private addresses. This feature is not available");
} else if (!s_kernel) {
nm_log_warn (LOGD_IP6, "The kernel does not support extended IFA_FLAGS needed by NM for "
"IPv6 private addresses. This feature is not available");
}
warn = 2;
}
static void
rdisc_config_changed (NMRDisc *rdisc, NMRDiscConfigMap changed, NMDevice *self)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
warn = 2;
}
static void nm_device_ipv6_set_mtu (NMDevice *self, guint32 mtu);
static void
nm_device_set_mtu (NMDevice *self, guint32 mtu)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
int ifindex = nm_device_get_ifindex (self);
if (mtu)
priv->mtu = mtu;
/* Ensure the IPv6 MTU is still alright. */
if (priv->ip6_mtu)
nm_device_ipv6_set_mtu (self, priv->ip6_mtu);
if (priv->mtu != nm_platform_link_get_mtu (ifindex))
nm_platform_link_set_mtu (ifindex, priv->mtu);
}
static void
nm_device_ipv6_set_mtu (NMDevice *self, guint32 mtu)
{
NMDevicePrivate *priv = NM_DEVICE_GET_PRIVATE (self);
guint32 plat_mtu = nm_device_ipv6_sysctl_get_int32 (self, "mtu", priv->mtu);
char val[16];
priv->ip6_mtu = mtu ?: plat_mtu;
if (priv->ip6_mtu && priv->mtu < priv->ip6_mtu) {
_LOGW (LOGD_DEVICE | LOGD_IP6, "Lowering IPv6 MTU (%d) to match device MTU (%d)",
priv->ip6_mtu, priv->mtu);
priv->ip6_mtu = priv->mtu;
}
if (priv->ip6_mtu < 1280) {
_LOGW (LOGD_DEVICE | LOGD_IP6, "IPv6 MTU (%d) smaller than 1280, adjusting",
priv->ip6_mtu);
priv->ip6_mtu = 1280;
}
if (priv->mtu < priv->ip6_mtu) {
_LOGW (LOGD_DEVICE | LOGD_IP6, "Raising device MTU (%d) to match IPv6 MTU (%d)",
priv->mtu, priv->ip6_mtu);
nm_device_set_mtu (self, priv->ip6_mtu);
}
if (priv->ip6_mtu != plat_mtu) {
g_snprintf (val, sizeof (val), "%d", mtu);
nm_device_ipv6_sysctl_set (self, "mtu", val);
}
}
static void
rdisc_config_changed (NMRDisc *rdisc, NMRDiscConfigMap changed, NMDevice *self)
{
address.preferred = discovered_address->preferred;
if (address.preferred > address.lifetime)
address.preferred = address.lifetime;
address.source = NM_IP_CONFIG_SOURCE_RDISC;
address.flags = ifa_flags;
nm_ip6_config_add_address (priv->ac_ip6_config, &address);
}
}
| 26,722 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
size_t *sent)
{
int rc = 0;
int i = 0;
struct msghdr smb_msg;
unsigned int remaining;
size_t first_vec = 0;
struct socket *ssocket = server->ssocket;
*sent = 0;
if (ssocket == NULL)
return -ENOTSOCK; /* BB eventually add reconnect code here */
smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
smb_msg.msg_namelen = sizeof(struct sockaddr);
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
if (server->noblocksnd)
smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
else
smb_msg.msg_flags = MSG_NOSIGNAL;
remaining = 0;
for (i = 0; i < n_vec; i++)
remaining += iov[i].iov_len;
i = 0;
while (remaining) {
/*
* If blocking send, we try 3 times, since each can block
* for 5 seconds. For nonblocking we have to try more
* but wait increasing amounts of time allowing time for
* socket to clear. The overall time we wait in either
* case to send on the socket is about 15 seconds.
* Similarly we wait for 15 seconds for a response from
* the server in SendReceive[2] for the server to send
* a response back for most types of requests (except
* SMB Write past end of file which can be slow, and
* blocking lock operations). NFS waits slightly longer
* than CIFS, but this can make it take longer for
* nonresponsive servers to be detected and 15 seconds
* is more than enough time for modern networks to
* send a packet. In most cases if we fail to send
* after the retries we will kill the socket and
* reconnect which may clear the network problem.
*/
rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
n_vec - first_vec, remaining);
if (rc == -ENOSPC || rc == -EAGAIN) {
/*
* Catch if a low level driver returns -ENOSPC. This
* WARN_ON will be removed by 3.10 if no one reports
* seeing this.
*/
WARN_ON_ONCE(rc == -ENOSPC);
i++;
if (i >= 14 || (!server->noblocksnd && (i > 2))) {
cERROR(1, "sends on sock %p stuck for 15 "
"seconds", ssocket);
rc = -EAGAIN;
break;
}
msleep(1 << i);
continue;
}
if (rc < 0)
break;
/* send was at least partially successful */
*sent += rc;
if (rc == remaining) {
remaining = 0;
break;
}
if (rc > remaining) {
cERROR(1, "sent %d requested %d", rc, remaining);
break;
}
if (rc == 0) {
/* should never happen, letting socket clear before
retrying is our only obvious option here */
cERROR(1, "tcp sent no data");
msleep(500);
continue;
}
remaining -= rc;
/* the line below resets i */
for (i = first_vec; i < n_vec; i++) {
if (iov[i].iov_len) {
if (rc > iov[i].iov_len) {
rc -= iov[i].iov_len;
iov[i].iov_len = 0;
} else {
iov[i].iov_base += rc;
iov[i].iov_len -= rc;
first_vec = i;
break;
}
}
}
i = 0; /* in case we get ENOSPC on the next send */
rc = 0;
}
return rc;
}
Commit Message: cifs: move check for NULL socket into smb_send_rqst
Cai reported this oops:
[90701.616664] BUG: unable to handle kernel NULL pointer dereference at 0000000000000028
[90701.625438] IP: [<ffffffff814a343e>] kernel_setsockopt+0x2e/0x60
[90701.632167] PGD fea319067 PUD 103fda4067 PMD 0
[90701.637255] Oops: 0000 [#1] SMP
[90701.640878] Modules linked in: des_generic md4 nls_utf8 cifs dns_resolver binfmt_misc tun sg igb iTCO_wdt iTCO_vendor_support lpc_ich pcspkr i2c_i801 i2c_core i7core_edac edac_core ioatdma dca mfd_core coretemp kvm_intel kvm crc32c_intel microcode sr_mod cdrom ata_generic sd_mod pata_acpi crc_t10dif ata_piix libata megaraid_sas dm_mirror dm_region_hash dm_log dm_mod
[90701.677655] CPU 10
[90701.679808] Pid: 9627, comm: ls Tainted: G W 3.7.1+ #10 QCI QSSC-S4R/QSSC-S4R
[90701.688950] RIP: 0010:[<ffffffff814a343e>] [<ffffffff814a343e>] kernel_setsockopt+0x2e/0x60
[90701.698383] RSP: 0018:ffff88177b431bb8 EFLAGS: 00010206
[90701.704309] RAX: ffff88177b431fd8 RBX: 00007ffffffff000 RCX: ffff88177b431bec
[90701.712271] RDX: 0000000000000003 RSI: 0000000000000006 RDI: 0000000000000000
[90701.720223] RBP: ffff88177b431bc8 R08: 0000000000000004 R09: 0000000000000000
[90701.728185] R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000001
[90701.736147] R13: ffff88184ef92000 R14: 0000000000000023 R15: ffff88177b431c88
[90701.744109] FS: 00007fd56a1a47c0(0000) GS:ffff88105fc40000(0000) knlGS:0000000000000000
[90701.753137] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[90701.759550] CR2: 0000000000000028 CR3: 000000104f15f000 CR4: 00000000000007e0
[90701.767512] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[90701.775465] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
[90701.783428] Process ls (pid: 9627, threadinfo ffff88177b430000, task ffff88185ca4cb60)
[90701.792261] Stack:
[90701.794505] 0000000000000023 ffff88177b431c50 ffff88177b431c38 ffffffffa014fcb1
[90701.802809] ffff88184ef921bc 0000000000000000 00000001ffffffff ffff88184ef921c0
[90701.811123] ffff88177b431c08 ffffffff815ca3d9 ffff88177b431c18 ffff880857758000
[90701.819433] Call Trace:
[90701.822183] [<ffffffffa014fcb1>] smb_send_rqst+0x71/0x1f0 [cifs]
[90701.828991] [<ffffffff815ca3d9>] ? schedule+0x29/0x70
[90701.834736] [<ffffffffa014fe6d>] smb_sendv+0x3d/0x40 [cifs]
[90701.841062] [<ffffffffa014fe96>] smb_send+0x26/0x30 [cifs]
[90701.847291] [<ffffffffa015801f>] send_nt_cancel+0x6f/0xd0 [cifs]
[90701.854102] [<ffffffffa015075e>] SendReceive+0x18e/0x360 [cifs]
[90701.860814] [<ffffffffa0134a78>] CIFSFindFirst+0x1a8/0x3f0 [cifs]
[90701.867724] [<ffffffffa013f731>] ? build_path_from_dentry+0xf1/0x260 [cifs]
[90701.875601] [<ffffffffa013f731>] ? build_path_from_dentry+0xf1/0x260 [cifs]
[90701.883477] [<ffffffffa01578e6>] cifs_query_dir_first+0x26/0x30 [cifs]
[90701.890869] [<ffffffffa015480d>] initiate_cifs_search+0xed/0x250 [cifs]
[90701.898354] [<ffffffff81195970>] ? fillonedir+0x100/0x100
[90701.904486] [<ffffffffa01554cb>] cifs_readdir+0x45b/0x8f0 [cifs]
[90701.911288] [<ffffffff81195970>] ? fillonedir+0x100/0x100
[90701.917410] [<ffffffff81195970>] ? fillonedir+0x100/0x100
[90701.923533] [<ffffffff81195970>] ? fillonedir+0x100/0x100
[90701.929657] [<ffffffff81195848>] vfs_readdir+0xb8/0xe0
[90701.935490] [<ffffffff81195b9f>] sys_getdents+0x8f/0x110
[90701.941521] [<ffffffff815d3b99>] system_call_fastpath+0x16/0x1b
[90701.948222] Code: 66 90 55 65 48 8b 04 25 f0 c6 00 00 48 89 e5 53 48 83 ec 08 83 fe 01 48 8b 98 48 e0 ff ff 48 c7 80 48 e0 ff ff ff ff ff ff 74 22 <48> 8b 47 28 ff 50 68 65 48 8b 14 25 f0 c6 00 00 48 89 9a 48 e0
[90701.970313] RIP [<ffffffff814a343e>] kernel_setsockopt+0x2e/0x60
[90701.977125] RSP <ffff88177b431bb8>
[90701.981018] CR2: 0000000000000028
[90701.984809] ---[ end trace 24bd602971110a43 ]---
This is likely due to a race vs. a reconnection event.
The current code checks for a NULL socket in smb_send_kvec, but that's
too late. By the time that check is done, the socket will already have
been passed to kernel_setsockopt. Move the check into smb_send_rqst, so
that it's checked earlier.
In truth, this is a bit of a half-assed fix. The -ENOTSOCK error
return here looks like it could bubble back up to userspace. The locking
rules around the ssocket pointer are really unclear as well. There are
cases where the ssocket pointer is changed without holding the srv_mutex,
but I'm not clear whether there's a potential race here yet or not.
This code seems like it could benefit from some fundamental re-think of
how the socket handling should behave. Until then though, this patch
should at least fix the above oops in most cases.
Cc: <[email protected]> # 3.7+
Reported-and-Tested-by: CAI Qian <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]>
CWE ID: CWE-362 | smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
size_t *sent)
{
int rc = 0;
int i = 0;
struct msghdr smb_msg;
unsigned int remaining;
size_t first_vec = 0;
struct socket *ssocket = server->ssocket;
*sent = 0;
smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
smb_msg.msg_namelen = sizeof(struct sockaddr);
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
if (server->noblocksnd)
smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
else
smb_msg.msg_flags = MSG_NOSIGNAL;
remaining = 0;
for (i = 0; i < n_vec; i++)
remaining += iov[i].iov_len;
i = 0;
while (remaining) {
/*
* If blocking send, we try 3 times, since each can block
* for 5 seconds. For nonblocking we have to try more
* but wait increasing amounts of time allowing time for
* socket to clear. The overall time we wait in either
* case to send on the socket is about 15 seconds.
* Similarly we wait for 15 seconds for a response from
* the server in SendReceive[2] for the server to send
* a response back for most types of requests (except
* SMB Write past end of file which can be slow, and
* blocking lock operations). NFS waits slightly longer
* than CIFS, but this can make it take longer for
* nonresponsive servers to be detected and 15 seconds
* is more than enough time for modern networks to
* send a packet. In most cases if we fail to send
* after the retries we will kill the socket and
* reconnect which may clear the network problem.
*/
rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
n_vec - first_vec, remaining);
if (rc == -ENOSPC || rc == -EAGAIN) {
/*
* Catch if a low level driver returns -ENOSPC. This
* WARN_ON will be removed by 3.10 if no one reports
* seeing this.
*/
WARN_ON_ONCE(rc == -ENOSPC);
i++;
if (i >= 14 || (!server->noblocksnd && (i > 2))) {
cERROR(1, "sends on sock %p stuck for 15 "
"seconds", ssocket);
rc = -EAGAIN;
break;
}
msleep(1 << i);
continue;
}
if (rc < 0)
break;
/* send was at least partially successful */
*sent += rc;
if (rc == remaining) {
remaining = 0;
break;
}
if (rc > remaining) {
cERROR(1, "sent %d requested %d", rc, remaining);
break;
}
if (rc == 0) {
/* should never happen, letting socket clear before
retrying is our only obvious option here */
cERROR(1, "tcp sent no data");
msleep(500);
continue;
}
remaining -= rc;
/* the line below resets i */
for (i = first_vec; i < n_vec; i++) {
if (iov[i].iov_len) {
if (rc > iov[i].iov_len) {
rc -= iov[i].iov_len;
iov[i].iov_len = 0;
} else {
iov[i].iov_base += rc;
iov[i].iov_len -= rc;
first_vec = i;
break;
}
}
}
i = 0; /* in case we get ENOSPC on the next send */
rc = 0;
}
return rc;
}
| 21,645 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: const Block* SimpleBlock::GetBlock() const
{
return &m_block;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | const Block* SimpleBlock::GetBlock() const
| 6,575 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int error = 0;
if (sk->sk_state & PPPOX_BOUND) {
error = -EIO;
goto end;
}
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &error);
if (error < 0)
goto end;
m->msg_namelen = 0;
if (skb) {
total_len = min_t(size_t, total_len, skb->len);
error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
if (error == 0) {
consume_skb(skb);
return total_len;
}
}
kfree_skb(skb);
end:
return error;
}
Commit Message: net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-20 | static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int error = 0;
if (sk->sk_state & PPPOX_BOUND) {
error = -EIO;
goto end;
}
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &error);
if (error < 0)
goto end;
if (skb) {
total_len = min_t(size_t, total_len, skb->len);
error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
if (error == 0) {
consume_skb(skb);
return total_len;
}
}
kfree_skb(skb);
end:
return error;
}
| 18,808 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: WORD32 ixheaacd_real_synth_filt(ia_esbr_hbe_txposer_struct *ptr_hbe_txposer,
WORD32 num_columns, FLOAT32 qmf_buf_real[][64],
FLOAT32 qmf_buf_imag[][64]) {
WORD32 i, j, k, l, idx;
FLOAT32 g[640];
FLOAT32 w[640];
FLOAT32 synth_out[128];
FLOAT32 accu_r;
WORD32 synth_size = ptr_hbe_txposer->synth_size;
FLOAT32 *ptr_cos_tab_trans_qmf =
(FLOAT32 *)&ixheaacd_cos_table_trans_qmf[0][0] +
ptr_hbe_txposer->k_start * 32;
FLOAT32 *buffer = ptr_hbe_txposer->synth_buf;
for (idx = 0; idx < num_columns; idx++) {
FLOAT32 loc_qmf_buf[64];
FLOAT32 *synth_buf_r = loc_qmf_buf;
FLOAT32 *out_buf = ptr_hbe_txposer->ptr_input_buf +
(idx + 1) * ptr_hbe_txposer->synth_size;
FLOAT32 *synth_cos_tab = ptr_hbe_txposer->synth_cos_tab;
const FLOAT32 *interp_window_coeff = ptr_hbe_txposer->synth_wind_coeff;
if (ptr_hbe_txposer->k_start < 0) return -1;
for (k = 0; k < synth_size; k++) {
WORD32 ki = ptr_hbe_txposer->k_start + k;
synth_buf_r[k] = (FLOAT32)(
ptr_cos_tab_trans_qmf[(k << 1) + 0] * qmf_buf_real[idx][ki] +
ptr_cos_tab_trans_qmf[(k << 1) + 1] * qmf_buf_imag[idx][ki]);
synth_buf_r[k + ptr_hbe_txposer->synth_size] = 0;
}
for (l = (20 * synth_size - 1); l >= 2 * synth_size; l--) {
buffer[l] = buffer[l - 2 * synth_size];
}
if (synth_size == 20) {
FLOAT32 *psynth_cos_tab = synth_cos_tab;
for (l = 0; l < (synth_size + 1); l++) {
accu_r = 0.0;
for (k = 0; k < synth_size; k++) {
accu_r += synth_buf_r[k] * psynth_cos_tab[k];
}
buffer[0 + l] = accu_r;
buffer[synth_size - l] = accu_r;
psynth_cos_tab = psynth_cos_tab + synth_size;
}
for (l = (synth_size + 1); l < (2 * synth_size - synth_size / 2); l++) {
accu_r = 0.0;
for (k = 0; k < synth_size; k++) {
accu_r += synth_buf_r[k] * psynth_cos_tab[k];
}
buffer[0 + l] = accu_r;
buffer[3 * synth_size - l] = -accu_r;
psynth_cos_tab = psynth_cos_tab + synth_size;
}
accu_r = 0.0;
for (k = 0; k < synth_size; k++) {
accu_r += synth_buf_r[k] * psynth_cos_tab[k];
}
buffer[3 * synth_size >> 1] = accu_r;
} else {
FLOAT32 tmp;
FLOAT32 *ptr_u = synth_out;
WORD32 kmax = (synth_size >> 1);
FLOAT32 *syn_buf = &buffer[kmax];
kmax += synth_size;
if (ixheaacd_real_synth_fft != NULL)
(*ixheaacd_real_synth_fft)(synth_buf_r, synth_out, synth_size * 2);
else
return -1;
for (k = 0; k < kmax; k++) {
tmp = ((*ptr_u++) * (*synth_cos_tab++));
tmp -= ((*ptr_u++) * (*synth_cos_tab++));
*syn_buf++ = tmp;
}
syn_buf = &buffer[0];
kmax -= synth_size;
for (k = 0; k < kmax; k++) {
tmp = ((*ptr_u++) * (*synth_cos_tab++));
tmp -= ((*ptr_u++) * (*synth_cos_tab++));
*syn_buf++ = tmp;
}
}
for (i = 0; i < 5; i++) {
memcpy(&g[(2 * i + 0) * synth_size], &buffer[(4 * i + 0) * synth_size],
sizeof(FLOAT32) * synth_size);
memcpy(&g[(2 * i + 1) * synth_size], &buffer[(4 * i + 3) * synth_size],
sizeof(FLOAT32) * synth_size);
}
for (k = 0; k < 10 * synth_size; k++) {
w[k] = g[k] * interp_window_coeff[k];
}
for (i = 0; i < synth_size; i++) {
accu_r = 0.0;
for (j = 0; j < 10; j++) {
accu_r = accu_r + w[synth_size * j + i];
}
out_buf[i] = (FLOAT32)accu_r;
}
}
return 0;
}
Commit Message: Fix for stack corruption in esbr
Bug: 110769924
Test: poc from bug before/after
Change-Id: I99c6e89902064849ea1310c271064bdeccf7f20e
(cherry picked from commit 7e90d745c22695236437297cd8167a9312427a4a)
(cherry picked from commit 5464927f0c1fc721fa03d1c5be77b0b43dfffc50)
CWE ID: CWE-787 | WORD32 ixheaacd_real_synth_filt(ia_esbr_hbe_txposer_struct *ptr_hbe_txposer,
WORD32 num_columns, FLOAT32 qmf_buf_real[][64],
FLOAT32 qmf_buf_imag[][64]) {
WORD32 i, j, k, l, idx;
FLOAT32 g[640];
FLOAT32 w[640];
FLOAT32 synth_out[128];
FLOAT32 accu_r;
WORD32 synth_size = ptr_hbe_txposer->synth_size;
FLOAT32 *ptr_cos_tab_trans_qmf =
(FLOAT32 *)&ixheaacd_cos_table_trans_qmf[0][0] +
ptr_hbe_txposer->k_start * 32;
FLOAT32 *buffer = ptr_hbe_txposer->synth_buf;
for (idx = 0; idx < num_columns; idx++) {
FLOAT32 loc_qmf_buf[64];
FLOAT32 *synth_buf_r = loc_qmf_buf;
FLOAT32 *out_buf = ptr_hbe_txposer->ptr_input_buf +
(idx + 1) * ptr_hbe_txposer->synth_size;
FLOAT32 *synth_cos_tab = ptr_hbe_txposer->synth_cos_tab;
const FLOAT32 *interp_window_coeff = ptr_hbe_txposer->synth_wind_coeff;
if (ptr_hbe_txposer->k_start < 0) return -1;
for (k = 0; k < synth_size; k++) {
WORD32 ki = ptr_hbe_txposer->k_start + k;
synth_buf_r[k] = (FLOAT32)(
ptr_cos_tab_trans_qmf[(k << 1) + 0] * qmf_buf_real[idx][ki] +
ptr_cos_tab_trans_qmf[(k << 1) + 1] * qmf_buf_imag[idx][ki]);
synth_buf_r[k + ptr_hbe_txposer->synth_size] = 0;
}
for (l = (20 * synth_size - 1); l >= 2 * synth_size; l--) {
buffer[l] = buffer[l - 2 * synth_size];
}
if (synth_size == 20) {
FLOAT32 *psynth_cos_tab = synth_cos_tab;
for (l = 0; l < (synth_size + 1); l++) {
accu_r = 0.0;
for (k = 0; k < synth_size; k++) {
accu_r += synth_buf_r[k] * psynth_cos_tab[k];
}
buffer[0 + l] = accu_r;
buffer[synth_size - l] = accu_r;
psynth_cos_tab = psynth_cos_tab + synth_size;
}
for (l = (synth_size + 1); l < (2 * synth_size - synth_size / 2); l++) {
accu_r = 0.0;
for (k = 0; k < synth_size; k++) {
accu_r += synth_buf_r[k] * psynth_cos_tab[k];
}
buffer[0 + l] = accu_r;
buffer[3 * synth_size - l] = -accu_r;
psynth_cos_tab = psynth_cos_tab + synth_size;
}
accu_r = 0.0;
for (k = 0; k < synth_size; k++) {
accu_r += synth_buf_r[k] * psynth_cos_tab[k];
}
buffer[3 * synth_size >> 1] = accu_r;
} else {
FLOAT32 tmp;
FLOAT32 *ptr_u = synth_out;
WORD32 kmax = (synth_size >> 1);
FLOAT32 *syn_buf = &buffer[kmax];
kmax += synth_size;
if (ptr_hbe_txposer->ixheaacd_real_synth_fft != NULL)
(*(ptr_hbe_txposer->ixheaacd_real_synth_fft))(synth_buf_r, synth_out,
synth_size * 2);
else
return -1;
for (k = 0; k < kmax; k++) {
tmp = ((*ptr_u++) * (*synth_cos_tab++));
tmp -= ((*ptr_u++) * (*synth_cos_tab++));
*syn_buf++ = tmp;
}
syn_buf = &buffer[0];
kmax -= synth_size;
for (k = 0; k < kmax; k++) {
tmp = ((*ptr_u++) * (*synth_cos_tab++));
tmp -= ((*ptr_u++) * (*synth_cos_tab++));
*syn_buf++ = tmp;
}
}
for (i = 0; i < 5; i++) {
memcpy(&g[(2 * i + 0) * synth_size], &buffer[(4 * i + 0) * synth_size],
sizeof(FLOAT32) * synth_size);
memcpy(&g[(2 * i + 1) * synth_size], &buffer[(4 * i + 3) * synth_size],
sizeof(FLOAT32) * synth_size);
}
for (k = 0; k < 10 * synth_size; k++) {
w[k] = g[k] * interp_window_coeff[k];
}
for (i = 0; i < synth_size; i++) {
accu_r = 0.0;
for (j = 0; j < 10; j++) {
accu_r = accu_r + w[synth_size * j + i];
}
out_buf[i] = (FLOAT32)accu_r;
}
}
return 0;
}
| 3,549 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
{
int result = parse_rock_ridge_inode_internal(de, inode, 0);
/*
* if rockridge flag was reset and we didn't look for attributes
* behind eventual XA attributes, have a look there
*/
if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
&& (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
result = parse_rock_ridge_inode_internal(de, inode, 14);
}
return result;
}
Commit Message: isofs: Fix unbounded recursion when processing relocated directories
We did not check relocated directory in any way when processing Rock
Ridge 'CL' tag. Thus a corrupted isofs image can possibly have a CL
entry pointing to another CL entry leading to possibly unbounded
recursion in kernel code and thus stack overflow or deadlocks (if there
is a loop created from CL entries).
Fix the problem by not allowing CL entry to point to a directory entry
with CL entry (such use makes no good sense anyway) and by checking
whether CL entry doesn't point to itself.
CC: [email protected]
Reported-by: Chris Evans <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
CWE ID: CWE-20 | int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
int relocated)
{
int flags = relocated ? RR_RELOC_DE : 0;
int result = parse_rock_ridge_inode_internal(de, inode, flags);
/*
* if rockridge flag was reset and we didn't look for attributes
* behind eventual XA attributes, have a look there
*/
if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
&& (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
result = parse_rock_ridge_inode_internal(de, inode,
flags | RR_REGARD_XA);
}
return result;
}
| 1,184 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int handle_ldf_stq(u32 insn, struct pt_regs *regs)
{
unsigned long addr = compute_effective_address(regs, insn, 0);
int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
struct fpustate *f = FPUSTATE;
int asi = decode_asi(insn, regs);
int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000;
if (freg & 3) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
do_fpother(regs);
return 0;
}
if (insn & 0x200000) {
/* STQ */
u64 first = 0, second = 0;
if (current_thread_info()->fpsaved[0] & flag) {
first = *(u64 *)&f->regs[freg];
second = *(u64 *)&f->regs[freg+2];
}
if (asi < 0x80) {
do_privact(regs);
return 1;
}
switch (asi) {
case ASI_P:
case ASI_S: break;
case ASI_PL:
case ASI_SL:
{
/* Need to convert endians */
u64 tmp = __swab64p(&first);
first = __swab64p(&second);
second = tmp;
break;
}
default:
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (put_user (first >> 32, (u32 __user *)addr) ||
__put_user ((u32)first, (u32 __user *)(addr + 4)) ||
__put_user (second >> 32, (u32 __user *)(addr + 8)) ||
__put_user ((u32)second, (u32 __user *)(addr + 12))) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
} else {
/* LDF, LDDF, LDQF */
u32 data[4] __attribute__ ((aligned(8)));
int size, i;
int err;
if (asi < 0x80) {
do_privact(regs);
return 1;
} else if (asi > ASI_SNFL) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
switch (insn & 0x180000) {
case 0x000000: size = 1; break;
case 0x100000: size = 4; break;
default: size = 2; break;
}
for (i = 0; i < size; i++)
data[i] = 0;
err = get_user (data[0], (u32 __user *) addr);
if (!err) {
for (i = 1; i < size; i++)
err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
}
if (err && !(asi & 0x2 /* NF */)) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (asi & 0x8) /* Little */ {
u64 tmp;
switch (size) {
case 1: data[0] = le32_to_cpup(data + 0); break;
default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
break;
case 4: tmp = le64_to_cpup((u64 *)(data + 0));
*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
*(u64 *)(data + 2) = tmp;
break;
}
}
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current_thread_info()->fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
memcpy(f->regs + freg, data, size * 4);
current_thread_info()->fpsaved[0] |= flag;
}
advance(regs);
return 1;
}
Commit Message: perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: Michael Cree <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Deng-Cheng Zhu <[email protected]>
Cc: Anton Blanchard <[email protected]>
Cc: Eric B Munson <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jason Wessel <[email protected]>
Cc: Don Zickus <[email protected]>
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
CWE ID: CWE-399 | int handle_ldf_stq(u32 insn, struct pt_regs *regs)
{
unsigned long addr = compute_effective_address(regs, insn, 0);
int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
struct fpustate *f = FPUSTATE;
int asi = decode_asi(insn, regs);
int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000;
if (freg & 3) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
do_fpother(regs);
return 0;
}
if (insn & 0x200000) {
/* STQ */
u64 first = 0, second = 0;
if (current_thread_info()->fpsaved[0] & flag) {
first = *(u64 *)&f->regs[freg];
second = *(u64 *)&f->regs[freg+2];
}
if (asi < 0x80) {
do_privact(regs);
return 1;
}
switch (asi) {
case ASI_P:
case ASI_S: break;
case ASI_PL:
case ASI_SL:
{
/* Need to convert endians */
u64 tmp = __swab64p(&first);
first = __swab64p(&second);
second = tmp;
break;
}
default:
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (put_user (first >> 32, (u32 __user *)addr) ||
__put_user ((u32)first, (u32 __user *)(addr + 4)) ||
__put_user (second >> 32, (u32 __user *)(addr + 8)) ||
__put_user ((u32)second, (u32 __user *)(addr + 12))) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
} else {
/* LDF, LDDF, LDQF */
u32 data[4] __attribute__ ((aligned(8)));
int size, i;
int err;
if (asi < 0x80) {
do_privact(regs);
return 1;
} else if (asi > ASI_SNFL) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
switch (insn & 0x180000) {
case 0x000000: size = 1; break;
case 0x100000: size = 4; break;
default: size = 2; break;
}
for (i = 0; i < size; i++)
data[i] = 0;
err = get_user (data[0], (u32 __user *) addr);
if (!err) {
for (i = 1; i < size; i++)
err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
}
if (err && !(asi & 0x2 /* NF */)) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (asi & 0x8) /* Little */ {
u64 tmp;
switch (size) {
case 1: data[0] = le32_to_cpup(data + 0); break;
default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
break;
case 4: tmp = le64_to_cpup((u64 *)(data + 0));
*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
*(u64 *)(data + 2) = tmp;
break;
}
}
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current_thread_info()->fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
memcpy(f->regs + freg, data, size * 4);
current_thread_info()->fpsaved[0] |= flag;
}
advance(regs);
return 1;
}
| 2,411 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool BrowserCommandController::ExecuteCommandWithDisposition(
int id, WindowOpenDisposition disposition) {
if (!SupportsCommand(id) || !IsCommandEnabled(id))
return false;
if (browser_->tab_strip_model()->active_index() == TabStripModel::kNoTab)
return true;
DCHECK(command_updater_.IsCommandEnabled(id)) << "Invalid/disabled command "
<< id;
switch (id) {
case IDC_BACK:
GoBack(browser_, disposition);
break;
case IDC_FORWARD:
GoForward(browser_, disposition);
break;
case IDC_RELOAD:
Reload(browser_, disposition);
break;
case IDC_RELOAD_CLEARING_CACHE:
ClearCache(browser_);
FALLTHROUGH;
case IDC_RELOAD_BYPASSING_CACHE:
ReloadBypassingCache(browser_, disposition);
break;
case IDC_HOME:
Home(browser_, disposition);
break;
case IDC_OPEN_CURRENT_URL:
OpenCurrentURL(browser_);
break;
case IDC_STOP:
Stop(browser_);
break;
case IDC_NEW_WINDOW:
NewWindow(browser_);
break;
case IDC_NEW_INCOGNITO_WINDOW:
NewIncognitoWindow(profile());
break;
case IDC_CLOSE_WINDOW:
base::RecordAction(base::UserMetricsAction("CloseWindowByKey"));
CloseWindow(browser_);
break;
case IDC_NEW_TAB: {
NewTab(browser_);
#if BUILDFLAG(ENABLE_DESKTOP_IN_PRODUCT_HELP)
auto* new_tab_tracker =
feature_engagement::NewTabTrackerFactory::GetInstance()
->GetForProfile(profile());
new_tab_tracker->OnNewTabOpened();
new_tab_tracker->CloseBubble();
#endif
break;
}
case IDC_CLOSE_TAB:
base::RecordAction(base::UserMetricsAction("CloseTabByKey"));
CloseTab(browser_);
break;
case IDC_SELECT_NEXT_TAB:
base::RecordAction(base::UserMetricsAction("Accel_SelectNextTab"));
SelectNextTab(browser_);
break;
case IDC_SELECT_PREVIOUS_TAB:
base::RecordAction(base::UserMetricsAction("Accel_SelectPreviousTab"));
SelectPreviousTab(browser_);
break;
case IDC_MOVE_TAB_NEXT:
MoveTabNext(browser_);
break;
case IDC_MOVE_TAB_PREVIOUS:
MoveTabPrevious(browser_);
break;
case IDC_SELECT_TAB_0:
case IDC_SELECT_TAB_1:
case IDC_SELECT_TAB_2:
case IDC_SELECT_TAB_3:
case IDC_SELECT_TAB_4:
case IDC_SELECT_TAB_5:
case IDC_SELECT_TAB_6:
case IDC_SELECT_TAB_7:
base::RecordAction(base::UserMetricsAction("Accel_SelectNumberedTab"));
SelectNumberedTab(browser_, id - IDC_SELECT_TAB_0);
break;
case IDC_SELECT_LAST_TAB:
base::RecordAction(base::UserMetricsAction("Accel_SelectNumberedTab"));
SelectLastTab(browser_);
break;
case IDC_DUPLICATE_TAB:
DuplicateTab(browser_);
break;
case IDC_RESTORE_TAB:
RestoreTab(browser_);
break;
case IDC_SHOW_AS_TAB:
ConvertPopupToTabbedBrowser(browser_);
break;
case IDC_FULLSCREEN:
chrome::ToggleFullscreenMode(browser_);
break;
case IDC_OPEN_IN_PWA_WINDOW:
base::RecordAction(base::UserMetricsAction("OpenActiveTabInPwaWindow"));
ReparentSecureActiveTabIntoPwaWindow(browser_);
break;
#if defined(OS_CHROMEOS)
case IDC_VISIT_DESKTOP_OF_LRU_USER_2:
case IDC_VISIT_DESKTOP_OF_LRU_USER_3:
ExecuteVisitDesktopCommand(id, window()->GetNativeWindow());
break;
#endif
#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
case IDC_MINIMIZE_WINDOW:
browser_->window()->Minimize();
break;
case IDC_MAXIMIZE_WINDOW:
browser_->window()->Maximize();
break;
case IDC_RESTORE_WINDOW:
browser_->window()->Restore();
break;
case IDC_USE_SYSTEM_TITLE_BAR: {
PrefService* prefs = profile()->GetPrefs();
prefs->SetBoolean(prefs::kUseCustomChromeFrame,
!prefs->GetBoolean(prefs::kUseCustomChromeFrame));
break;
}
#endif
#if defined(OS_MACOSX)
case IDC_TOGGLE_FULLSCREEN_TOOLBAR:
chrome::ToggleFullscreenToolbar(browser_);
break;
case IDC_TOGGLE_JAVASCRIPT_APPLE_EVENTS: {
PrefService* prefs = profile()->GetPrefs();
prefs->SetBoolean(prefs::kAllowJavascriptAppleEvents,
!prefs->GetBoolean(prefs::kAllowJavascriptAppleEvents));
break;
}
#endif
case IDC_EXIT:
Exit();
break;
case IDC_SAVE_PAGE:
SavePage(browser_);
break;
case IDC_BOOKMARK_PAGE:
#if BUILDFLAG(ENABLE_DESKTOP_IN_PRODUCT_HELP)
feature_engagement::BookmarkTrackerFactory::GetInstance()
->GetForProfile(profile())
->OnBookmarkAdded();
#endif
BookmarkCurrentPageAllowingExtensionOverrides(browser_);
break;
case IDC_BOOKMARK_ALL_TABS:
#if BUILDFLAG(ENABLE_DESKTOP_IN_PRODUCT_HELP)
feature_engagement::BookmarkTrackerFactory::GetInstance()
->GetForProfile(profile())
->OnBookmarkAdded();
#endif
BookmarkAllTabs(browser_);
break;
case IDC_VIEW_SOURCE:
browser_->tab_strip_model()
->GetActiveWebContents()
->GetMainFrame()
->ViewSource();
break;
case IDC_EMAIL_PAGE_LOCATION:
EmailPageLocation(browser_);
break;
case IDC_PRINT:
Print(browser_);
break;
#if BUILDFLAG(ENABLE_PRINTING)
case IDC_BASIC_PRINT:
base::RecordAction(base::UserMetricsAction("Accel_Advanced_Print"));
BasicPrint(browser_);
break;
#endif // ENABLE_PRINTING
case IDC_SAVE_CREDIT_CARD_FOR_PAGE:
SaveCreditCard(browser_);
break;
case IDC_MIGRATE_LOCAL_CREDIT_CARD_FOR_PAGE:
MigrateLocalCards(browser_);
break;
case IDC_TRANSLATE_PAGE:
Translate(browser_);
break;
case IDC_MANAGE_PASSWORDS_FOR_PAGE:
ManagePasswordsForPage(browser_);
break;
case IDC_CUT:
case IDC_COPY:
case IDC_PASTE:
CutCopyPaste(browser_, id);
break;
case IDC_FIND:
Find(browser_);
break;
case IDC_FIND_NEXT:
FindNext(browser_);
break;
case IDC_FIND_PREVIOUS:
FindPrevious(browser_);
break;
case IDC_ZOOM_PLUS:
Zoom(browser_, content::PAGE_ZOOM_IN);
break;
case IDC_ZOOM_NORMAL:
Zoom(browser_, content::PAGE_ZOOM_RESET);
break;
case IDC_ZOOM_MINUS:
Zoom(browser_, content::PAGE_ZOOM_OUT);
break;
case IDC_FOCUS_TOOLBAR:
base::RecordAction(base::UserMetricsAction("Accel_Focus_Toolbar"));
FocusToolbar(browser_);
break;
case IDC_FOCUS_LOCATION:
base::RecordAction(base::UserMetricsAction("Accel_Focus_Location"));
FocusLocationBar(browser_);
break;
case IDC_FOCUS_SEARCH:
base::RecordAction(base::UserMetricsAction("Accel_Focus_Search"));
FocusSearch(browser_);
break;
case IDC_FOCUS_MENU_BAR:
FocusAppMenu(browser_);
break;
case IDC_FOCUS_BOOKMARKS:
base::RecordAction(base::UserMetricsAction("Accel_Focus_Bookmarks"));
FocusBookmarksToolbar(browser_);
break;
case IDC_FOCUS_INACTIVE_POPUP_FOR_ACCESSIBILITY:
FocusInactivePopupForAccessibility(browser_);
break;
case IDC_FOCUS_NEXT_PANE:
FocusNextPane(browser_);
break;
case IDC_FOCUS_PREVIOUS_PANE:
FocusPreviousPane(browser_);
break;
case IDC_OPEN_FILE:
browser_->OpenFile();
break;
case IDC_CREATE_SHORTCUT:
CreateBookmarkAppFromCurrentWebContents(browser_,
true /* force_shortcut_app */);
break;
case IDC_INSTALL_PWA:
CreateBookmarkAppFromCurrentWebContents(browser_,
false /* force_shortcut_app */);
break;
case IDC_DEV_TOOLS:
ToggleDevToolsWindow(browser_, DevToolsToggleAction::Show());
break;
case IDC_DEV_TOOLS_CONSOLE:
ToggleDevToolsWindow(browser_, DevToolsToggleAction::ShowConsolePanel());
break;
case IDC_DEV_TOOLS_DEVICES:
InspectUI::InspectDevices(browser_);
break;
case IDC_DEV_TOOLS_INSPECT:
ToggleDevToolsWindow(browser_, DevToolsToggleAction::Inspect());
break;
case IDC_DEV_TOOLS_TOGGLE:
ToggleDevToolsWindow(browser_, DevToolsToggleAction::Toggle());
break;
case IDC_TASK_MANAGER:
OpenTaskManager(browser_);
break;
#if defined(OS_CHROMEOS)
case IDC_TAKE_SCREENSHOT:
TakeScreenshot();
break;
#endif
#if defined(GOOGLE_CHROME_BUILD)
case IDC_FEEDBACK:
OpenFeedbackDialog(browser_, kFeedbackSourceBrowserCommand);
break;
#endif
case IDC_SHOW_BOOKMARK_BAR:
ToggleBookmarkBar(browser_);
break;
case IDC_PROFILING_ENABLED:
Profiling::Toggle();
break;
case IDC_SHOW_BOOKMARK_MANAGER:
ShowBookmarkManager(browser_);
break;
case IDC_SHOW_APP_MENU:
base::RecordAction(base::UserMetricsAction("Accel_Show_App_Menu"));
ShowAppMenu(browser_);
break;
case IDC_SHOW_AVATAR_MENU:
ShowAvatarMenu(browser_);
break;
case IDC_SHOW_HISTORY:
ShowHistory(browser_);
break;
case IDC_SHOW_DOWNLOADS:
ShowDownloads(browser_);
break;
case IDC_MANAGE_EXTENSIONS:
ShowExtensions(browser_, std::string());
break;
case IDC_OPTIONS:
ShowSettings(browser_);
break;
case IDC_EDIT_SEARCH_ENGINES:
ShowSearchEngineSettings(browser_);
break;
case IDC_VIEW_PASSWORDS:
ShowPasswordManager(browser_);
break;
case IDC_CLEAR_BROWSING_DATA:
ShowClearBrowsingDataDialog(browser_);
break;
case IDC_IMPORT_SETTINGS:
ShowImportDialog(browser_);
break;
case IDC_TOGGLE_REQUEST_TABLET_SITE:
ToggleRequestTabletSite(browser_);
break;
case IDC_ABOUT:
ShowAboutChrome(browser_);
break;
case IDC_UPGRADE_DIALOG:
OpenUpdateChromeDialog(browser_);
break;
case IDC_HELP_PAGE_VIA_KEYBOARD:
ShowHelp(browser_, HELP_SOURCE_KEYBOARD);
break;
case IDC_HELP_PAGE_VIA_MENU:
ShowHelp(browser_, HELP_SOURCE_MENU);
break;
case IDC_SHOW_BETA_FORUM:
ShowBetaForum(browser_);
break;
case IDC_SHOW_SIGNIN:
ShowBrowserSigninOrSettings(
browser_, signin_metrics::AccessPoint::ACCESS_POINT_MENU);
break;
case IDC_DISTILL_PAGE:
DistillCurrentPage(browser_);
break;
case IDC_ROUTE_MEDIA:
RouteMedia(browser_);
break;
case IDC_WINDOW_MUTE_SITE:
MuteSite(browser_);
break;
case IDC_WINDOW_PIN_TAB:
PinTab(browser_);
break;
case IDC_COPY_URL:
CopyURL(browser_);
break;
case IDC_OPEN_IN_CHROME:
OpenInChrome(browser_);
break;
case IDC_SITE_SETTINGS:
ShowSiteSettings(
browser_,
browser_->tab_strip_model()->GetActiveWebContents()->GetVisibleURL());
break;
case IDC_HOSTED_APP_MENU_APP_INFO:
ShowPageInfoDialog(browser_->tab_strip_model()->GetActiveWebContents(),
bubble_anchor_util::kAppMenuButton);
break;
default:
LOG(WARNING) << "Received Unimplemented Command: " << id;
break;
}
return true;
}
Commit Message: mac: Do not let synthetic events toggle "Allow JavaScript From AppleEvents"
Bug: 891697
Change-Id: I49eb77963515637df739c9d2ce83530d4e21cf15
Reviewed-on: https://chromium-review.googlesource.com/c/1308771
Reviewed-by: Elly Fong-Jones <[email protected]>
Commit-Queue: Robert Sesek <[email protected]>
Cr-Commit-Position: refs/heads/master@{#604268}
CWE ID: CWE-20 | bool BrowserCommandController::ExecuteCommandWithDisposition(
int id, WindowOpenDisposition disposition) {
if (!SupportsCommand(id) || !IsCommandEnabled(id))
return false;
if (browser_->tab_strip_model()->active_index() == TabStripModel::kNoTab)
return true;
DCHECK(command_updater_.IsCommandEnabled(id)) << "Invalid/disabled command "
<< id;
switch (id) {
case IDC_BACK:
GoBack(browser_, disposition);
break;
case IDC_FORWARD:
GoForward(browser_, disposition);
break;
case IDC_RELOAD:
Reload(browser_, disposition);
break;
case IDC_RELOAD_CLEARING_CACHE:
ClearCache(browser_);
FALLTHROUGH;
case IDC_RELOAD_BYPASSING_CACHE:
ReloadBypassingCache(browser_, disposition);
break;
case IDC_HOME:
Home(browser_, disposition);
break;
case IDC_OPEN_CURRENT_URL:
OpenCurrentURL(browser_);
break;
case IDC_STOP:
Stop(browser_);
break;
case IDC_NEW_WINDOW:
NewWindow(browser_);
break;
case IDC_NEW_INCOGNITO_WINDOW:
NewIncognitoWindow(profile());
break;
case IDC_CLOSE_WINDOW:
base::RecordAction(base::UserMetricsAction("CloseWindowByKey"));
CloseWindow(browser_);
break;
case IDC_NEW_TAB: {
NewTab(browser_);
#if BUILDFLAG(ENABLE_DESKTOP_IN_PRODUCT_HELP)
auto* new_tab_tracker =
feature_engagement::NewTabTrackerFactory::GetInstance()
->GetForProfile(profile());
new_tab_tracker->OnNewTabOpened();
new_tab_tracker->CloseBubble();
#endif
break;
}
case IDC_CLOSE_TAB:
base::RecordAction(base::UserMetricsAction("CloseTabByKey"));
CloseTab(browser_);
break;
case IDC_SELECT_NEXT_TAB:
base::RecordAction(base::UserMetricsAction("Accel_SelectNextTab"));
SelectNextTab(browser_);
break;
case IDC_SELECT_PREVIOUS_TAB:
base::RecordAction(base::UserMetricsAction("Accel_SelectPreviousTab"));
SelectPreviousTab(browser_);
break;
case IDC_MOVE_TAB_NEXT:
MoveTabNext(browser_);
break;
case IDC_MOVE_TAB_PREVIOUS:
MoveTabPrevious(browser_);
break;
case IDC_SELECT_TAB_0:
case IDC_SELECT_TAB_1:
case IDC_SELECT_TAB_2:
case IDC_SELECT_TAB_3:
case IDC_SELECT_TAB_4:
case IDC_SELECT_TAB_5:
case IDC_SELECT_TAB_6:
case IDC_SELECT_TAB_7:
base::RecordAction(base::UserMetricsAction("Accel_SelectNumberedTab"));
SelectNumberedTab(browser_, id - IDC_SELECT_TAB_0);
break;
case IDC_SELECT_LAST_TAB:
base::RecordAction(base::UserMetricsAction("Accel_SelectNumberedTab"));
SelectLastTab(browser_);
break;
case IDC_DUPLICATE_TAB:
DuplicateTab(browser_);
break;
case IDC_RESTORE_TAB:
RestoreTab(browser_);
break;
case IDC_SHOW_AS_TAB:
ConvertPopupToTabbedBrowser(browser_);
break;
case IDC_FULLSCREEN:
chrome::ToggleFullscreenMode(browser_);
break;
case IDC_OPEN_IN_PWA_WINDOW:
base::RecordAction(base::UserMetricsAction("OpenActiveTabInPwaWindow"));
ReparentSecureActiveTabIntoPwaWindow(browser_);
break;
#if defined(OS_CHROMEOS)
case IDC_VISIT_DESKTOP_OF_LRU_USER_2:
case IDC_VISIT_DESKTOP_OF_LRU_USER_3:
ExecuteVisitDesktopCommand(id, window()->GetNativeWindow());
break;
#endif
#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
case IDC_MINIMIZE_WINDOW:
browser_->window()->Minimize();
break;
case IDC_MAXIMIZE_WINDOW:
browser_->window()->Maximize();
break;
case IDC_RESTORE_WINDOW:
browser_->window()->Restore();
break;
case IDC_USE_SYSTEM_TITLE_BAR: {
PrefService* prefs = profile()->GetPrefs();
prefs->SetBoolean(prefs::kUseCustomChromeFrame,
!prefs->GetBoolean(prefs::kUseCustomChromeFrame));
break;
}
#endif
#if defined(OS_MACOSX)
case IDC_TOGGLE_FULLSCREEN_TOOLBAR:
chrome::ToggleFullscreenToolbar(browser_);
break;
case IDC_TOGGLE_JAVASCRIPT_APPLE_EVENTS: {
chrome::ToggleJavaScriptFromAppleEventsAllowed(browser_);
break;
}
#endif
case IDC_EXIT:
Exit();
break;
case IDC_SAVE_PAGE:
SavePage(browser_);
break;
case IDC_BOOKMARK_PAGE:
#if BUILDFLAG(ENABLE_DESKTOP_IN_PRODUCT_HELP)
feature_engagement::BookmarkTrackerFactory::GetInstance()
->GetForProfile(profile())
->OnBookmarkAdded();
#endif
BookmarkCurrentPageAllowingExtensionOverrides(browser_);
break;
case IDC_BOOKMARK_ALL_TABS:
#if BUILDFLAG(ENABLE_DESKTOP_IN_PRODUCT_HELP)
feature_engagement::BookmarkTrackerFactory::GetInstance()
->GetForProfile(profile())
->OnBookmarkAdded();
#endif
BookmarkAllTabs(browser_);
break;
case IDC_VIEW_SOURCE:
browser_->tab_strip_model()
->GetActiveWebContents()
->GetMainFrame()
->ViewSource();
break;
case IDC_EMAIL_PAGE_LOCATION:
EmailPageLocation(browser_);
break;
case IDC_PRINT:
Print(browser_);
break;
#if BUILDFLAG(ENABLE_PRINTING)
case IDC_BASIC_PRINT:
base::RecordAction(base::UserMetricsAction("Accel_Advanced_Print"));
BasicPrint(browser_);
break;
#endif // ENABLE_PRINTING
case IDC_SAVE_CREDIT_CARD_FOR_PAGE:
SaveCreditCard(browser_);
break;
case IDC_MIGRATE_LOCAL_CREDIT_CARD_FOR_PAGE:
MigrateLocalCards(browser_);
break;
case IDC_TRANSLATE_PAGE:
Translate(browser_);
break;
case IDC_MANAGE_PASSWORDS_FOR_PAGE:
ManagePasswordsForPage(browser_);
break;
case IDC_CUT:
case IDC_COPY:
case IDC_PASTE:
CutCopyPaste(browser_, id);
break;
case IDC_FIND:
Find(browser_);
break;
case IDC_FIND_NEXT:
FindNext(browser_);
break;
case IDC_FIND_PREVIOUS:
FindPrevious(browser_);
break;
case IDC_ZOOM_PLUS:
Zoom(browser_, content::PAGE_ZOOM_IN);
break;
case IDC_ZOOM_NORMAL:
Zoom(browser_, content::PAGE_ZOOM_RESET);
break;
case IDC_ZOOM_MINUS:
Zoom(browser_, content::PAGE_ZOOM_OUT);
break;
case IDC_FOCUS_TOOLBAR:
base::RecordAction(base::UserMetricsAction("Accel_Focus_Toolbar"));
FocusToolbar(browser_);
break;
case IDC_FOCUS_LOCATION:
base::RecordAction(base::UserMetricsAction("Accel_Focus_Location"));
FocusLocationBar(browser_);
break;
case IDC_FOCUS_SEARCH:
base::RecordAction(base::UserMetricsAction("Accel_Focus_Search"));
FocusSearch(browser_);
break;
case IDC_FOCUS_MENU_BAR:
FocusAppMenu(browser_);
break;
case IDC_FOCUS_BOOKMARKS:
base::RecordAction(base::UserMetricsAction("Accel_Focus_Bookmarks"));
FocusBookmarksToolbar(browser_);
break;
case IDC_FOCUS_INACTIVE_POPUP_FOR_ACCESSIBILITY:
FocusInactivePopupForAccessibility(browser_);
break;
case IDC_FOCUS_NEXT_PANE:
FocusNextPane(browser_);
break;
case IDC_FOCUS_PREVIOUS_PANE:
FocusPreviousPane(browser_);
break;
case IDC_OPEN_FILE:
browser_->OpenFile();
break;
case IDC_CREATE_SHORTCUT:
CreateBookmarkAppFromCurrentWebContents(browser_,
true /* force_shortcut_app */);
break;
case IDC_INSTALL_PWA:
CreateBookmarkAppFromCurrentWebContents(browser_,
false /* force_shortcut_app */);
break;
case IDC_DEV_TOOLS:
ToggleDevToolsWindow(browser_, DevToolsToggleAction::Show());
break;
case IDC_DEV_TOOLS_CONSOLE:
ToggleDevToolsWindow(browser_, DevToolsToggleAction::ShowConsolePanel());
break;
case IDC_DEV_TOOLS_DEVICES:
InspectUI::InspectDevices(browser_);
break;
case IDC_DEV_TOOLS_INSPECT:
ToggleDevToolsWindow(browser_, DevToolsToggleAction::Inspect());
break;
case IDC_DEV_TOOLS_TOGGLE:
ToggleDevToolsWindow(browser_, DevToolsToggleAction::Toggle());
break;
case IDC_TASK_MANAGER:
OpenTaskManager(browser_);
break;
#if defined(OS_CHROMEOS)
case IDC_TAKE_SCREENSHOT:
TakeScreenshot();
break;
#endif
#if defined(GOOGLE_CHROME_BUILD)
case IDC_FEEDBACK:
OpenFeedbackDialog(browser_, kFeedbackSourceBrowserCommand);
break;
#endif
case IDC_SHOW_BOOKMARK_BAR:
ToggleBookmarkBar(browser_);
break;
case IDC_PROFILING_ENABLED:
Profiling::Toggle();
break;
case IDC_SHOW_BOOKMARK_MANAGER:
ShowBookmarkManager(browser_);
break;
case IDC_SHOW_APP_MENU:
base::RecordAction(base::UserMetricsAction("Accel_Show_App_Menu"));
ShowAppMenu(browser_);
break;
case IDC_SHOW_AVATAR_MENU:
ShowAvatarMenu(browser_);
break;
case IDC_SHOW_HISTORY:
ShowHistory(browser_);
break;
case IDC_SHOW_DOWNLOADS:
ShowDownloads(browser_);
break;
case IDC_MANAGE_EXTENSIONS:
ShowExtensions(browser_, std::string());
break;
case IDC_OPTIONS:
ShowSettings(browser_);
break;
case IDC_EDIT_SEARCH_ENGINES:
ShowSearchEngineSettings(browser_);
break;
case IDC_VIEW_PASSWORDS:
ShowPasswordManager(browser_);
break;
case IDC_CLEAR_BROWSING_DATA:
ShowClearBrowsingDataDialog(browser_);
break;
case IDC_IMPORT_SETTINGS:
ShowImportDialog(browser_);
break;
case IDC_TOGGLE_REQUEST_TABLET_SITE:
ToggleRequestTabletSite(browser_);
break;
case IDC_ABOUT:
ShowAboutChrome(browser_);
break;
case IDC_UPGRADE_DIALOG:
OpenUpdateChromeDialog(browser_);
break;
case IDC_HELP_PAGE_VIA_KEYBOARD:
ShowHelp(browser_, HELP_SOURCE_KEYBOARD);
break;
case IDC_HELP_PAGE_VIA_MENU:
ShowHelp(browser_, HELP_SOURCE_MENU);
break;
case IDC_SHOW_BETA_FORUM:
ShowBetaForum(browser_);
break;
case IDC_SHOW_SIGNIN:
ShowBrowserSigninOrSettings(
browser_, signin_metrics::AccessPoint::ACCESS_POINT_MENU);
break;
case IDC_DISTILL_PAGE:
DistillCurrentPage(browser_);
break;
case IDC_ROUTE_MEDIA:
RouteMedia(browser_);
break;
case IDC_WINDOW_MUTE_SITE:
MuteSite(browser_);
break;
case IDC_WINDOW_PIN_TAB:
PinTab(browser_);
break;
case IDC_COPY_URL:
CopyURL(browser_);
break;
case IDC_OPEN_IN_CHROME:
OpenInChrome(browser_);
break;
case IDC_SITE_SETTINGS:
ShowSiteSettings(
browser_,
browser_->tab_strip_model()->GetActiveWebContents()->GetVisibleURL());
break;
case IDC_HOSTED_APP_MENU_APP_INFO:
ShowPageInfoDialog(browser_->tab_strip_model()->GetActiveWebContents(),
bubble_anchor_util::kAppMenuButton);
break;
default:
LOG(WARNING) << "Received Unimplemented Command: " << id;
break;
}
return true;
}
| 24,851 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
{
unsigned long hstart, hend;
if (!vma->anon_vma)
/*
* Not yet faulted in so we will register later in the
* page fault if needed.
*/
return 0;
if (vma->vm_file || vma->vm_ops)
/* khugepaged not yet working on file or special mappings */
return 0;
VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend)
return khugepaged_enter(vma);
return 0;
}
Commit Message: mm: thp: fix /dev/zero MAP_PRIVATE and vm_flags cleanups
The huge_memory.c THP page fault was allowed to run if vm_ops was null
(which would succeed for /dev/zero MAP_PRIVATE, as the f_op->mmap wouldn't
setup a special vma->vm_ops and it would fallback to regular anonymous
memory) but other THP logics weren't fully activated for vmas with vm_file
not NULL (/dev/zero has a not NULL vma->vm_file).
So this removes the vm_file checks so that /dev/zero also can safely use
THP (the other albeit safer approach to fix this bug would have been to
prevent the THP initial page fault to run if vm_file was set).
After removing the vm_file checks, this also makes huge_memory.c stricter
in khugepaged for the DEBUG_VM=y case. It doesn't replace the vm_file
check with a is_pfn_mapping check (but it keeps checking for VM_PFNMAP
under VM_BUG_ON) because for a is_cow_mapping() mapping VM_PFNMAP should
only be allowed to exist before the first page fault, and in turn when
vma->anon_vma is null (so preventing khugepaged registration). So I tend
to think the previous comment saying if vm_file was set, VM_PFNMAP might
have been set and we could still be registered in khugepaged (despite
anon_vma was not NULL to be registered in khugepaged) was too paranoid.
The is_linear_pfn_mapping check is also I think superfluous (as described
by comment) but under DEBUG_VM it is safe to stay.
Addresses https://bugzilla.kernel.org/show_bug.cgi?id=33682
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Caspar Zhang <[email protected]>
Acked-by: Mel Gorman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38.x]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-399 | int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
{
unsigned long hstart, hend;
if (!vma->anon_vma)
/*
* Not yet faulted in so we will register later in the
* page fault if needed.
*/
return 0;
if (vma->vm_ops)
/* khugepaged not yet working on file or special mappings */
return 0;
/*
* If is_pfn_mapping() is true is_learn_pfn_mapping() must be
* true too, verify it here.
*/
VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend)
return khugepaged_enter(vma);
return 0;
}
| 3,287 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int udf_symlink_filler(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
unsigned char *symlink;
int err;
unsigned char *p = kmap(page);
struct udf_inode_info *iinfo;
uint32_t pos;
/* We don't support symlinks longer than one block */
if (inode->i_size > inode->i_sb->s_blocksize) {
err = -ENAMETOOLONG;
goto out_unmap;
}
iinfo = UDF_I(inode);
pos = udf_block_map(inode, 0);
down_read(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
} else {
bh = sb_bread(inode->i_sb, pos);
if (!bh) {
err = -EIO;
goto out_unlock_inode;
}
symlink = bh->b_data;
}
udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
brelse(bh);
up_read(&iinfo->i_data_sem);
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
out_unlock_inode:
up_read(&iinfo->i_data_sem);
SetPageError(page);
out_unmap:
kunmap(page);
unlock_page(page);
return err;
}
Commit Message: udf: Check path length when reading symlink
Symlink reading code does not check whether the resulting path fits into
the page provided by the generic code. This isn't as easy as just
checking the symlink size because of various encoding conversions we
perform on path. So we have to check whether there is still enough space
in the buffer on the fly.
CC: [email protected]
Reported-by: Carl Henrik Lunde <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
CWE ID: CWE-17 | static int udf_symlink_filler(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
unsigned char *symlink;
int err;
unsigned char *p = kmap(page);
struct udf_inode_info *iinfo;
uint32_t pos;
/* We don't support symlinks longer than one block */
if (inode->i_size > inode->i_sb->s_blocksize) {
err = -ENAMETOOLONG;
goto out_unmap;
}
iinfo = UDF_I(inode);
pos = udf_block_map(inode, 0);
down_read(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
} else {
bh = sb_bread(inode->i_sb, pos);
if (!bh) {
err = -EIO;
goto out_unlock_inode;
}
symlink = bh->b_data;
}
err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
brelse(bh);
if (err)
goto out_unlock_inode;
up_read(&iinfo->i_data_sem);
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
out_unlock_inode:
up_read(&iinfo->i_data_sem);
SetPageError(page);
out_unmap:
kunmap(page);
unlock_page(page);
return err;
}
| 1,660 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: _handle_carbons(xmpp_stanza_t *const stanza)
{
xmpp_stanza_t *carbons = xmpp_stanza_get_child_by_ns(stanza, STANZA_NS_CARBONS);
if (!carbons) {
return FALSE;
}
const char *name = xmpp_stanza_get_name(carbons);
if (!name) {
log_error("Unable to retrieve stanza name for Carbon");
return TRUE;
}
if (g_strcmp0(name, "private") == 0) {
log_info("Carbon received with private element.");
return FALSE;
}
if ((g_strcmp0(name, "received") != 0) && (g_strcmp0(name, "sent") != 0)) {
log_warning("Carbon received with unrecognised stanza name: %s", name);
return TRUE;
}
xmpp_stanza_t *forwarded = xmpp_stanza_get_child_by_ns(carbons, STANZA_NS_FORWARD);
if (!forwarded) {
log_warning("Carbon received with no forwarded element");
return TRUE;
}
xmpp_stanza_t *message = xmpp_stanza_get_child_by_name(forwarded, STANZA_NAME_MESSAGE);
if (!message) {
log_warning("Carbon received with no message element");
return TRUE;
}
char *message_txt = xmpp_message_get_body(message);
if (!message_txt) {
log_warning("Carbon received with no message.");
return TRUE;
}
const gchar *to = xmpp_stanza_get_to(message);
const gchar *from = xmpp_stanza_get_from(message);
if (!to) to = from;
Jid *jid_from = jid_create(from);
Jid *jid_to = jid_create(to);
Jid *my_jid = jid_create(connection_get_fulljid());
char *enc_message = NULL;
xmpp_stanza_t *x = xmpp_stanza_get_child_by_ns(message, STANZA_NS_ENCRYPTED);
if (x) {
enc_message = xmpp_stanza_get_text(x);
}
if (g_strcmp0(my_jid->barejid, jid_to->barejid) == 0) {
sv_ev_incoming_carbon(jid_from->barejid, jid_from->resourcepart, message_txt, enc_message);
} else {
sv_ev_outgoing_carbon(jid_to->barejid, message_txt, enc_message);
}
xmpp_ctx_t *ctx = connection_get_ctx();
xmpp_free(ctx, message_txt);
xmpp_free(ctx, enc_message);
jid_destroy(jid_from);
jid_destroy(jid_to);
jid_destroy(my_jid);
return TRUE;
}
Commit Message: Add carbons from check
CWE ID: CWE-346 | _handle_carbons(xmpp_stanza_t *const stanza)
{
xmpp_stanza_t *carbons = xmpp_stanza_get_child_by_ns(stanza, STANZA_NS_CARBONS);
if (!carbons) {
return FALSE;
}
const char *name = xmpp_stanza_get_name(carbons);
if (!name) {
log_error("Unable to retrieve stanza name for Carbon");
return TRUE;
}
if (g_strcmp0(name, "private") == 0) {
log_info("Carbon received with private element.");
return FALSE;
}
if ((g_strcmp0(name, "received") != 0) && (g_strcmp0(name, "sent") != 0)) {
log_warning("Carbon received with unrecognised stanza name: %s", name);
return TRUE;
}
xmpp_stanza_t *forwarded = xmpp_stanza_get_child_by_ns(carbons, STANZA_NS_FORWARD);
if (!forwarded) {
log_warning("Carbon received with no forwarded element");
return TRUE;
}
xmpp_stanza_t *message = xmpp_stanza_get_child_by_name(forwarded, STANZA_NAME_MESSAGE);
if (!message) {
log_warning("Carbon received with no message element");
return TRUE;
}
char *message_txt = xmpp_message_get_body(message);
if (!message_txt) {
log_warning("Carbon received with no message.");
return TRUE;
}
Jid *my_jid = jid_create(connection_get_fulljid());
const char *const stanza_from = xmpp_stanza_get_from(stanza);
Jid *msg_jid = jid_create(stanza_from);
if (g_strcmp0(my_jid->barejid, msg_jid->barejid) != 0) {
log_warning("Invalid carbon received, from: %s", stanza_from);
return TRUE;
}
const gchar *to = xmpp_stanza_get_to(message);
const gchar *from = xmpp_stanza_get_from(message);
if (!to) to = from;
Jid *jid_from = jid_create(from);
Jid *jid_to = jid_create(to);
char *enc_message = NULL;
xmpp_stanza_t *x = xmpp_stanza_get_child_by_ns(message, STANZA_NS_ENCRYPTED);
if (x) {
enc_message = xmpp_stanza_get_text(x);
}
if (g_strcmp0(my_jid->barejid, jid_to->barejid) == 0) {
sv_ev_incoming_carbon(jid_from->barejid, jid_from->resourcepart, message_txt, enc_message);
} else {
sv_ev_outgoing_carbon(jid_to->barejid, message_txt, enc_message);
}
xmpp_ctx_t *ctx = connection_get_ctx();
xmpp_free(ctx, message_txt);
xmpp_free(ctx, enc_message);
jid_destroy(jid_from);
jid_destroy(jid_to);
jid_destroy(my_jid);
return TRUE;
}
| 27,308 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: create_spnego_ctx(void)
{
spnego_gss_ctx_id_t spnego_ctx = NULL;
spnego_ctx = (spnego_gss_ctx_id_t)
malloc(sizeof (spnego_gss_ctx_id_rec));
if (spnego_ctx == NULL) {
return (NULL);
}
spnego_ctx->magic_num = SPNEGO_MAGIC_ID;
spnego_ctx->ctx_handle = GSS_C_NO_CONTEXT;
spnego_ctx->mech_set = NULL;
spnego_ctx->internal_mech = NULL;
spnego_ctx->optionStr = NULL;
spnego_ctx->DER_mechTypes.length = 0;
spnego_ctx->DER_mechTypes.value = NULL;
spnego_ctx->default_cred = GSS_C_NO_CREDENTIAL;
spnego_ctx->mic_reqd = 0;
spnego_ctx->mic_sent = 0;
spnego_ctx->mic_rcvd = 0;
spnego_ctx->mech_complete = 0;
spnego_ctx->nego_done = 0;
spnego_ctx->internal_name = GSS_C_NO_NAME;
spnego_ctx->actual_mech = GSS_C_NO_OID;
check_spnego_options(spnego_ctx);
return (spnego_ctx);
}
Commit Message: Fix SPNEGO context aliasing bugs [CVE-2015-2695]
The SPNEGO mechanism currently replaces its context handle with the
mechanism context handle upon establishment, under the assumption that
most GSS functions are only called after context establishment. This
assumption is incorrect, and can lead to aliasing violations for some
programs. Maintain the SPNEGO context structure after context
establishment and refer to it in all GSS methods. Add initiate and
opened flags to the SPNEGO context structure for use in
gss_inquire_context() prior to context establishment.
CVE-2015-2695:
In MIT krb5 1.5 and later, applications which call
gss_inquire_context() on a partially-established SPNEGO context can
cause the GSS-API library to read from a pointer using the wrong type,
generally causing a process crash. This bug may go unnoticed, because
the most common SPNEGO authentication scenario establishes the context
after just one call to gss_accept_sec_context(). Java server
applications using the native JGSS provider are vulnerable to this
bug. A carefully crafted SPNEGO packet might allow the
gss_inquire_context() call to succeed with attacker-determined
results, but applications should not make access control decisions
based on gss_inquire_context() results prior to context establishment.
CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[[email protected]: several bugfixes, style changes, and edge-case
behavior changes; commit message and CVE description]
ticket: 8244
target_version: 1.14
tags: pullup
CWE ID: CWE-18 | create_spnego_ctx(void)
create_spnego_ctx(int initiate)
{
spnego_gss_ctx_id_t spnego_ctx = NULL;
spnego_ctx = (spnego_gss_ctx_id_t)
malloc(sizeof (spnego_gss_ctx_id_rec));
if (spnego_ctx == NULL) {
return (NULL);
}
spnego_ctx->magic_num = SPNEGO_MAGIC_ID;
spnego_ctx->ctx_handle = GSS_C_NO_CONTEXT;
spnego_ctx->mech_set = NULL;
spnego_ctx->internal_mech = NULL;
spnego_ctx->optionStr = NULL;
spnego_ctx->DER_mechTypes.length = 0;
spnego_ctx->DER_mechTypes.value = NULL;
spnego_ctx->default_cred = GSS_C_NO_CREDENTIAL;
spnego_ctx->mic_reqd = 0;
spnego_ctx->mic_sent = 0;
spnego_ctx->mic_rcvd = 0;
spnego_ctx->mech_complete = 0;
spnego_ctx->nego_done = 0;
spnego_ctx->opened = 0;
spnego_ctx->initiate = initiate;
spnego_ctx->internal_name = GSS_C_NO_NAME;
spnego_ctx->actual_mech = GSS_C_NO_OID;
check_spnego_options(spnego_ctx);
return (spnego_ctx);
}
| 25,083 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
struct sk_buff *skb;
int copied, error = -EINVAL;
msg->msg_namelen = 0;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
/* only handle MSG_DONTWAIT and MSG_PEEK */
if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
if (!skb)
return error;
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
skb->truesize);
atm_return(vcc, skb->truesize);
}
skb_free_datagram(sk, skb);
return copied;
}
Commit Message: net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-20 | int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
struct sk_buff *skb;
int copied, error = -EINVAL;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
/* only handle MSG_DONTWAIT and MSG_PEEK */
if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
if (!skb)
return error;
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
skb->truesize);
atm_return(vcc, skb->truesize);
}
skb_free_datagram(sk, skb);
return copied;
}
| 17,688 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
int listener)
{
struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
if (!cma_xprt)
return NULL;
svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
INIT_LIST_HEAD(&cma_xprt->sc_ctxts);
INIT_LIST_HEAD(&cma_xprt->sc_maps);
init_waitqueue_head(&cma_xprt->sc_send_wait);
spin_lock_init(&cma_xprt->sc_lock);
spin_lock_init(&cma_xprt->sc_rq_dto_lock);
spin_lock_init(&cma_xprt->sc_frmr_q_lock);
spin_lock_init(&cma_xprt->sc_ctxt_lock);
spin_lock_init(&cma_xprt->sc_map_lock);
/*
* Note that this implies that the underlying transport support
* has some form of congestion control (see RFC 7530 section 3.1
* paragraph 2). For now, we assume that all supported RDMA
* transports are suitable here.
*/
set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags);
if (listener)
set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
return cma_xprt;
}
Commit Message: Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Another RDMA update from Chuck Lever, and a bunch of miscellaneous
bugfixes"
* tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits)
nfsd: Fix up the "supattr_exclcreat" attributes
nfsd: encoders mustn't use unitialized values in error cases
nfsd: fix undefined behavior in nfsd4_layout_verify
lockd: fix lockd shutdown race
NFSv4: Fix callback server shutdown
SUNRPC: Refactor svc_set_num_threads()
NFSv4.x/callback: Create the callback service through svc_create_pooled
lockd: remove redundant check on block
svcrdma: Clean out old XDR encoders
svcrdma: Remove the req_map cache
svcrdma: Remove unused RDMA Write completion handler
svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt
svcrdma: Clean up RPC-over-RDMA backchannel reply processing
svcrdma: Report Write/Reply chunk overruns
svcrdma: Clean up RDMA_ERROR path
svcrdma: Use rdma_rw API in RPC reply path
svcrdma: Introduce local rdma_rw API helpers
svcrdma: Clean up svc_rdma_get_inv_rkey()
svcrdma: Add helper to save pages under I/O
svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT
...
CWE ID: CWE-404 | static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
int listener)
{
struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
if (!cma_xprt)
return NULL;
svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
INIT_LIST_HEAD(&cma_xprt->sc_ctxts);
INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
init_waitqueue_head(&cma_xprt->sc_send_wait);
spin_lock_init(&cma_xprt->sc_lock);
spin_lock_init(&cma_xprt->sc_rq_dto_lock);
spin_lock_init(&cma_xprt->sc_frmr_q_lock);
spin_lock_init(&cma_xprt->sc_ctxt_lock);
spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
/*
* Note that this implies that the underlying transport support
* has some form of congestion control (see RFC 7530 section 3.1
* paragraph 2). For now, we assume that all supported RDMA
* transports are suitable here.
*/
set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags);
if (listener)
set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
return cma_xprt;
}
| 3,659 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct rxrpc_skb_priv *sp;
struct rxrpc_call *call = NULL, *continue_call = NULL;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct sk_buff *skb;
long timeo;
int copy, ret, ullen, offset, copied = 0;
u32 abort_code;
DEFINE_WAIT(wait);
_enter(",,,%zu,%d", len, flags);
if (flags & (MSG_OOB | MSG_TRUNC))
return -EOPNOTSUPP;
ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);
timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
msg->msg_flags |= MSG_MORE;
lock_sock(&rx->sk);
for (;;) {
/* return immediately if a client socket has no outstanding
* calls */
if (RB_EMPTY_ROOT(&rx->calls)) {
if (copied)
goto out;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
release_sock(&rx->sk);
if (continue_call)
rxrpc_put_call(continue_call);
return -ENODATA;
}
}
/* get the next message on the Rx queue */
skb = skb_peek(&rx->sk.sk_receive_queue);
if (!skb) {
/* nothing remains on the queue */
if (copied &&
(msg->msg_flags & MSG_PEEK || timeo == 0))
goto out;
/* wait for a message to turn up */
release_sock(&rx->sk);
prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
TASK_INTERRUPTIBLE);
ret = sock_error(&rx->sk);
if (ret)
goto wait_error;
if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
if (signal_pending(current))
goto wait_interrupted;
timeo = schedule_timeout(timeo);
}
finish_wait(sk_sleep(&rx->sk), &wait);
lock_sock(&rx->sk);
continue;
}
peek_next_packet:
sp = rxrpc_skb(skb);
call = sp->call;
ASSERT(call != NULL);
_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);
/* make sure we wait for the state to be updated in this call */
spin_lock_bh(&call->lock);
spin_unlock_bh(&call->lock);
if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
_debug("packet from released call");
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
continue;
}
/* determine whether to continue last data receive */
if (continue_call) {
_debug("maybe cont");
if (call != continue_call ||
skb->mark != RXRPC_SKB_MARK_DATA) {
release_sock(&rx->sk);
rxrpc_put_call(continue_call);
_leave(" = %d [noncont]", copied);
return copied;
}
}
rxrpc_get_call(call);
/* copy the peer address and timestamp */
if (!continue_call) {
if (msg->msg_name && msg->msg_namelen > 0)
memcpy(msg->msg_name,
&call->conn->trans->peer->srx,
sizeof(call->conn->trans->peer->srx));
sock_recv_ts_and_drops(msg, &rx->sk, skb);
}
/* receive the message */
if (skb->mark != RXRPC_SKB_MARK_DATA)
goto receive_non_data_message;
_debug("recvmsg DATA #%u { %d, %d }",
ntohl(sp->hdr.seq), skb->len, sp->offset);
if (!continue_call) {
/* only set the control data once per recvmsg() */
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
ullen, &call->user_call_ID);
if (ret < 0)
goto copy_error;
ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
}
ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
call->rx_data_recv = ntohl(sp->hdr.seq);
ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);
offset = sp->offset;
copy = skb->len - offset;
if (copy > len - copied)
copy = len - copied;
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
ret = skb_copy_datagram_iovec(skb, offset,
msg->msg_iov, copy);
} else {
ret = skb_copy_and_csum_datagram_iovec(skb, offset,
msg->msg_iov);
if (ret == -EINVAL)
goto csum_copy_error;
}
if (ret < 0)
goto copy_error;
/* handle piecemeal consumption of data packets */
_debug("copied %d+%d", copy, copied);
offset += copy;
copied += copy;
if (!(flags & MSG_PEEK))
sp->offset = offset;
if (sp->offset < skb->len) {
_debug("buffer full");
ASSERTCMP(copied, ==, len);
break;
}
/* we transferred the whole data packet */
if (sp->hdr.flags & RXRPC_LAST_PACKET) {
_debug("last");
if (call->conn->out_clientflag) {
/* last byte of reply received */
ret = copied;
goto terminal_message;
}
/* last bit of request received */
if (!(flags & MSG_PEEK)) {
_debug("eat packet");
if (skb_dequeue(&rx->sk.sk_receive_queue) !=
skb)
BUG();
rxrpc_free_skb(skb);
}
msg->msg_flags &= ~MSG_MORE;
break;
}
/* move on to the next data message */
_debug("next");
if (!continue_call)
continue_call = sp->call;
else
rxrpc_put_call(call);
call = NULL;
if (flags & MSG_PEEK) {
_debug("peek next");
skb = skb->next;
if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
break;
goto peek_next_packet;
}
_debug("eat packet");
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
}
/* end of non-terminal data packet reception for the moment */
_debug("end rcv data");
out:
release_sock(&rx->sk);
if (call)
rxrpc_put_call(call);
if (continue_call)
rxrpc_put_call(continue_call);
_leave(" = %d [data]", copied);
return copied;
/* handle non-DATA messages such as aborts, incoming connections and
* final ACKs */
receive_non_data_message:
_debug("non-data");
if (skb->mark == RXRPC_SKB_MARK_NEW_CALL) {
_debug("RECV NEW CALL");
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code);
if (ret < 0)
goto copy_error;
if (!(flags & MSG_PEEK)) {
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
}
goto out;
}
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
ullen, &call->user_call_ID);
if (ret < 0)
goto copy_error;
ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
switch (skb->mark) {
case RXRPC_SKB_MARK_DATA:
BUG();
case RXRPC_SKB_MARK_FINAL_ACK:
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code);
break;
case RXRPC_SKB_MARK_BUSY:
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code);
break;
case RXRPC_SKB_MARK_REMOTE_ABORT:
abort_code = call->abort_code;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
break;
case RXRPC_SKB_MARK_NET_ERROR:
_debug("RECV NET ERROR %d", sp->error);
abort_code = sp->error;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code);
break;
case RXRPC_SKB_MARK_LOCAL_ERROR:
_debug("RECV LOCAL ERROR %d", sp->error);
abort_code = sp->error;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4,
&abort_code);
break;
default:
BUG();
break;
}
if (ret < 0)
goto copy_error;
terminal_message:
_debug("terminal");
msg->msg_flags &= ~MSG_MORE;
msg->msg_flags |= MSG_EOR;
if (!(flags & MSG_PEEK)) {
_net("free terminal skb %p", skb);
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
rxrpc_remove_user_ID(rx, call);
}
release_sock(&rx->sk);
rxrpc_put_call(call);
if (continue_call)
rxrpc_put_call(continue_call);
_leave(" = %d", ret);
return ret;
copy_error:
_debug("copy error");
release_sock(&rx->sk);
rxrpc_put_call(call);
if (continue_call)
rxrpc_put_call(continue_call);
_leave(" = %d", ret);
return ret;
csum_copy_error:
_debug("csum error");
release_sock(&rx->sk);
if (continue_call)
rxrpc_put_call(continue_call);
rxrpc_kill_skb(skb);
skb_kill_datagram(&rx->sk, skb, flags);
rxrpc_put_call(call);
return -EAGAIN;
wait_interrupted:
ret = sock_intr_errno(timeo);
wait_error:
finish_wait(sk_sleep(&rx->sk), &wait);
if (continue_call)
rxrpc_put_call(continue_call);
if (copied)
copied = ret;
_leave(" = %d [waitfail %d]", copied, ret);
return copied;
}
Commit Message: net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-20 | int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct rxrpc_skb_priv *sp;
struct rxrpc_call *call = NULL, *continue_call = NULL;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct sk_buff *skb;
long timeo;
int copy, ret, ullen, offset, copied = 0;
u32 abort_code;
DEFINE_WAIT(wait);
_enter(",,,%zu,%d", len, flags);
if (flags & (MSG_OOB | MSG_TRUNC))
return -EOPNOTSUPP;
ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);
timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
msg->msg_flags |= MSG_MORE;
lock_sock(&rx->sk);
for (;;) {
/* return immediately if a client socket has no outstanding
* calls */
if (RB_EMPTY_ROOT(&rx->calls)) {
if (copied)
goto out;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
release_sock(&rx->sk);
if (continue_call)
rxrpc_put_call(continue_call);
return -ENODATA;
}
}
/* get the next message on the Rx queue */
skb = skb_peek(&rx->sk.sk_receive_queue);
if (!skb) {
/* nothing remains on the queue */
if (copied &&
(msg->msg_flags & MSG_PEEK || timeo == 0))
goto out;
/* wait for a message to turn up */
release_sock(&rx->sk);
prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
TASK_INTERRUPTIBLE);
ret = sock_error(&rx->sk);
if (ret)
goto wait_error;
if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
if (signal_pending(current))
goto wait_interrupted;
timeo = schedule_timeout(timeo);
}
finish_wait(sk_sleep(&rx->sk), &wait);
lock_sock(&rx->sk);
continue;
}
peek_next_packet:
sp = rxrpc_skb(skb);
call = sp->call;
ASSERT(call != NULL);
_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);
/* make sure we wait for the state to be updated in this call */
spin_lock_bh(&call->lock);
spin_unlock_bh(&call->lock);
if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
_debug("packet from released call");
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
continue;
}
/* determine whether to continue last data receive */
if (continue_call) {
_debug("maybe cont");
if (call != continue_call ||
skb->mark != RXRPC_SKB_MARK_DATA) {
release_sock(&rx->sk);
rxrpc_put_call(continue_call);
_leave(" = %d [noncont]", copied);
return copied;
}
}
rxrpc_get_call(call);
/* copy the peer address and timestamp */
if (!continue_call) {
if (msg->msg_name) {
size_t len =
sizeof(call->conn->trans->peer->srx);
memcpy(msg->msg_name,
&call->conn->trans->peer->srx, len);
msg->msg_namelen = len;
}
sock_recv_ts_and_drops(msg, &rx->sk, skb);
}
/* receive the message */
if (skb->mark != RXRPC_SKB_MARK_DATA)
goto receive_non_data_message;
_debug("recvmsg DATA #%u { %d, %d }",
ntohl(sp->hdr.seq), skb->len, sp->offset);
if (!continue_call) {
/* only set the control data once per recvmsg() */
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
ullen, &call->user_call_ID);
if (ret < 0)
goto copy_error;
ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
}
ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
call->rx_data_recv = ntohl(sp->hdr.seq);
ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);
offset = sp->offset;
copy = skb->len - offset;
if (copy > len - copied)
copy = len - copied;
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
ret = skb_copy_datagram_iovec(skb, offset,
msg->msg_iov, copy);
} else {
ret = skb_copy_and_csum_datagram_iovec(skb, offset,
msg->msg_iov);
if (ret == -EINVAL)
goto csum_copy_error;
}
if (ret < 0)
goto copy_error;
/* handle piecemeal consumption of data packets */
_debug("copied %d+%d", copy, copied);
offset += copy;
copied += copy;
if (!(flags & MSG_PEEK))
sp->offset = offset;
if (sp->offset < skb->len) {
_debug("buffer full");
ASSERTCMP(copied, ==, len);
break;
}
/* we transferred the whole data packet */
if (sp->hdr.flags & RXRPC_LAST_PACKET) {
_debug("last");
if (call->conn->out_clientflag) {
/* last byte of reply received */
ret = copied;
goto terminal_message;
}
/* last bit of request received */
if (!(flags & MSG_PEEK)) {
_debug("eat packet");
if (skb_dequeue(&rx->sk.sk_receive_queue) !=
skb)
BUG();
rxrpc_free_skb(skb);
}
msg->msg_flags &= ~MSG_MORE;
break;
}
/* move on to the next data message */
_debug("next");
if (!continue_call)
continue_call = sp->call;
else
rxrpc_put_call(call);
call = NULL;
if (flags & MSG_PEEK) {
_debug("peek next");
skb = skb->next;
if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
break;
goto peek_next_packet;
}
_debug("eat packet");
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
}
/* end of non-terminal data packet reception for the moment */
_debug("end rcv data");
out:
release_sock(&rx->sk);
if (call)
rxrpc_put_call(call);
if (continue_call)
rxrpc_put_call(continue_call);
_leave(" = %d [data]", copied);
return copied;
/* handle non-DATA messages such as aborts, incoming connections and
* final ACKs */
receive_non_data_message:
_debug("non-data");
if (skb->mark == RXRPC_SKB_MARK_NEW_CALL) {
_debug("RECV NEW CALL");
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code);
if (ret < 0)
goto copy_error;
if (!(flags & MSG_PEEK)) {
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
}
goto out;
}
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
ullen, &call->user_call_ID);
if (ret < 0)
goto copy_error;
ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
switch (skb->mark) {
case RXRPC_SKB_MARK_DATA:
BUG();
case RXRPC_SKB_MARK_FINAL_ACK:
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code);
break;
case RXRPC_SKB_MARK_BUSY:
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code);
break;
case RXRPC_SKB_MARK_REMOTE_ABORT:
abort_code = call->abort_code;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
break;
case RXRPC_SKB_MARK_NET_ERROR:
_debug("RECV NET ERROR %d", sp->error);
abort_code = sp->error;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code);
break;
case RXRPC_SKB_MARK_LOCAL_ERROR:
_debug("RECV LOCAL ERROR %d", sp->error);
abort_code = sp->error;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4,
&abort_code);
break;
default:
BUG();
break;
}
if (ret < 0)
goto copy_error;
terminal_message:
_debug("terminal");
msg->msg_flags &= ~MSG_MORE;
msg->msg_flags |= MSG_EOR;
if (!(flags & MSG_PEEK)) {
_net("free terminal skb %p", skb);
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
rxrpc_remove_user_ID(rx, call);
}
release_sock(&rx->sk);
rxrpc_put_call(call);
if (continue_call)
rxrpc_put_call(continue_call);
_leave(" = %d", ret);
return ret;
copy_error:
_debug("copy error");
release_sock(&rx->sk);
rxrpc_put_call(call);
if (continue_call)
rxrpc_put_call(continue_call);
_leave(" = %d", ret);
return ret;
csum_copy_error:
_debug("csum error");
release_sock(&rx->sk);
if (continue_call)
rxrpc_put_call(continue_call);
rxrpc_kill_skb(skb);
skb_kill_datagram(&rx->sk, skb, flags);
rxrpc_put_call(call);
return -EAGAIN;
wait_interrupted:
ret = sock_intr_errno(timeo);
wait_error:
finish_wait(sk_sleep(&rx->sk), &wait);
if (continue_call)
rxrpc_put_call(continue_call);
if (copied)
copied = ret;
_leave(" = %d [waitfail %d]", copied, ret);
return copied;
}
| 12,312 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
if (cnt == 1)
return 0;
new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
if (!new_data)
return -ENOMEM;
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
}
Commit Message: bpf: fix branch pruning logic
when the verifier detects that register contains a runtime constant
and it's compared with another constant it will prune exploration
of the branch that is guaranteed not to be taken at runtime.
This is all correct, but malicious program may be constructed
in such a way that it always has a constant comparison and
the other branch is never taken under any conditions.
In this case such path through the program will not be explored
by the verifier. It won't be taken at run-time either, but since
all instructions are JITed the malicious program may cause JITs
to complain about using reserved fields, etc.
To fix the issue we have to track the instructions explored by
the verifier and sanitize instructions that are dead at run time
with NOPs. We cannot reject such dead code, since llvm generates
it for valid C code, since it doesn't do as much data flow
analysis as the verifier does.
Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
Signed-off-by: Alexei Starovoitov <[email protected]>
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
CWE ID: CWE-20 | static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
int i;
if (cnt == 1)
return 0;
new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
if (!new_data)
return -ENOMEM;
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
for (i = off; i < off + cnt - 1; i++)
new_data[i].seen = true;
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
}
| 13,747 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
skb_orphan(skb);
if (!(dev->flags & IFF_UP))
return NET_RX_DROP;
if (skb->len > (dev->mtu + dev->hard_header_len))
return NET_RX_DROP;
skb_set_dev(skb, dev);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, dev);
return netif_rx(skb);
}
Commit Message: veth: Dont kfree_skb() after dev_forward_skb()
In case of congestion, netif_rx() frees the skb, so we must assume
dev_forward_skb() also consume skb.
Bug introduced by commit 445409602c092
(veth: move loopback logic to common location)
We must change dev_forward_skb() to always consume skb, and veth to not
double free it.
Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3
Reported-by: Martín Ferrari <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-399 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
skb_orphan(skb);
if (!(dev->flags & IFF_UP) ||
(skb->len > (dev->mtu + dev->hard_header_len))) {
kfree_skb(skb);
return NET_RX_DROP;
}
skb_set_dev(skb, dev);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, dev);
return netif_rx(skb);
}
| 11,212 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: GDataEntry* GDataFile::FromDocumentEntry(
GDataDirectory* parent,
DocumentEntry* doc,
GDataDirectoryService* directory_service) {
DCHECK(doc->is_hosted_document() || doc->is_file());
GDataFile* file = new GDataFile(parent, directory_service);
file->title_ = UTF16ToUTF8(doc->title());
if (doc->is_file()) {
file->file_info_.size = doc->file_size();
file->file_md5_ = doc->file_md5();
const Link* upload_link = doc->GetLinkByType(Link::RESUMABLE_EDIT_MEDIA);
if (upload_link)
file->upload_url_ = upload_link->href();
} else {
file->document_extension_ = doc->GetHostedDocumentExtension();
file->file_info_.size = 0;
}
file->kind_ = doc->kind();
const Link* edit_link = doc->GetLinkByType(Link::EDIT);
if (edit_link)
file->edit_url_ = edit_link->href();
file->content_url_ = doc->content_url();
file->content_mime_type_ = doc->content_mime_type();
file->resource_id_ = doc->resource_id();
file->is_hosted_document_ = doc->is_hosted_document();
file->file_info_.last_modified = doc->updated_time();
file->file_info_.last_accessed = doc->updated_time();
file->file_info_.creation_time = doc->published_time();
file->deleted_ = doc->deleted();
const Link* parent_link = doc->GetLinkByType(Link::PARENT);
if (parent_link)
file->parent_resource_id_ = ExtractResourceId(parent_link->href());
file->SetBaseNameFromTitle();
const Link* thumbnail_link = doc->GetLinkByType(Link::THUMBNAIL);
if (thumbnail_link)
file->thumbnail_url_ = thumbnail_link->href();
const Link* alternate_link = doc->GetLinkByType(Link::ALTERNATE);
if (alternate_link)
file->alternate_url_ = alternate_link->href();
return file;
}
Commit Message: Remove parent* arg from GDataEntry ctor.
* Remove static FromDocumentEntry from GDataEntry, GDataFile, GDataDirectory. Replace with InitFromDocumentEntry.
* Move common code from GDataFile::InitFromDocumentEntry and GDataDirectory::InitFromDocumentEntry to GDataEntry::InitFromDocumentEntry.
* Add GDataDirectoryService::FromDocumentEntry and use this everywhere.
* Make ctors of GDataFile, GDataDirectory private, so these must be created by GDataDirectoryService's CreateGDataFile and
CreateGDataDirectory. Make GDataEntry ctor protected.
BUG=141494
TEST=unit tests.
Review URL: https://chromiumcodereview.appspot.com/10854083
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@151008 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | GDataEntry* GDataFile::FromDocumentEntry(
void GDataFile::InitFromDocumentEntry(DocumentEntry* doc) {
GDataEntry::InitFromDocumentEntry(doc);
if (doc->is_file()) {
file_info_.size = doc->file_size();
file_md5_ = doc->file_md5();
const Link* upload_link = doc->GetLinkByType(Link::RESUMABLE_EDIT_MEDIA);
if (upload_link)
upload_url_ = upload_link->href();
} else {
document_extension_ = doc->GetHostedDocumentExtension();
file_info_.size = 0;
}
kind_ = doc->kind();
content_mime_type_ = doc->content_mime_type();
is_hosted_document_ = doc->is_hosted_document();
SetBaseNameFromTitle();
const Link* thumbnail_link = doc->GetLinkByType(Link::THUMBNAIL);
if (thumbnail_link)
thumbnail_url_ = thumbnail_link->href();
const Link* alternate_link = doc->GetLinkByType(Link::ALTERNATE);
if (alternate_link)
alternate_url_ = alternate_link->href();
}
| 2,056 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool GLES2DecoderPassthroughImpl::IsEmulatedQueryTarget(GLenum target) const {
switch (target) {
case GL_COMMANDS_COMPLETED_CHROMIUM:
case GL_READBACK_SHADOW_COPIES_UPDATED_CHROMIUM:
case GL_COMMANDS_ISSUED_CHROMIUM:
case GL_LATENCY_QUERY_CHROMIUM:
case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
case GL_GET_ERROR_QUERY_CHROMIUM:
return true;
default:
return false;
}
}
Commit Message: Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Reviewed-by: Geoff Lang <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#657568}
CWE ID: CWE-416 | bool GLES2DecoderPassthroughImpl::IsEmulatedQueryTarget(GLenum target) const {
switch (target) {
case GL_COMMANDS_COMPLETED_CHROMIUM:
case GL_READBACK_SHADOW_COPIES_UPDATED_CHROMIUM:
case GL_COMMANDS_ISSUED_CHROMIUM:
case GL_LATENCY_QUERY_CHROMIUM:
case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
case GL_GET_ERROR_QUERY_CHROMIUM:
case GL_PROGRAM_COMPLETION_QUERY_CHROMIUM:
return true;
default:
return false;
}
}
| 2,440 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool CreateIpcChannel(
const std::string& channel_name,
const std::string& pipe_security_descriptor,
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
IPC::Listener* delegate,
scoped_ptr<IPC::ChannelProxy>* channel_out) {
SECURITY_ATTRIBUTES security_attributes;
security_attributes.nLength = sizeof(security_attributes);
security_attributes.bInheritHandle = FALSE;
ULONG security_descriptor_length = 0;
if (!ConvertStringSecurityDescriptorToSecurityDescriptor(
UTF8ToUTF16(pipe_security_descriptor).c_str(),
SDDL_REVISION_1,
reinterpret_cast<PSECURITY_DESCRIPTOR*>(
&security_attributes.lpSecurityDescriptor),
&security_descriptor_length)) {
LOG_GETLASTERROR(ERROR) <<
"Failed to create a security descriptor for the Chromoting IPC channel";
return false;
}
std::string pipe_name(kChromePipeNamePrefix);
pipe_name.append(channel_name);
base::win::ScopedHandle pipe;
pipe.Set(CreateNamedPipe(
UTF8ToUTF16(pipe_name).c_str(),
PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE,
PIPE_TYPE_BYTE | PIPE_READMODE_BYTE,
1,
IPC::Channel::kReadBufferSize,
IPC::Channel::kReadBufferSize,
5000,
&security_attributes));
if (!pipe.IsValid()) {
LOG_GETLASTERROR(ERROR) <<
"Failed to create the server end of the Chromoting IPC channel";
LocalFree(security_attributes.lpSecurityDescriptor);
return false;
}
LocalFree(security_attributes.lpSecurityDescriptor);
channel_out->reset(new IPC::ChannelProxy(
IPC::ChannelHandle(pipe),
IPC::Channel::MODE_SERVER,
delegate,
io_task_runner));
return true;
}
Commit Message: Validate and report peer's PID to WorkerProcessIpcDelegate so it will be able to duplicate handles to and from the worker process.
As a side effect WorkerProcessLauncher::Delegate is now responsible for retrieving the client's PID and deciding whether a launch failed due to a permanent error condition.
BUG=134694
Review URL: https://chromiumcodereview.appspot.com/11143025
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@162778 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | bool CreateIpcChannel(
const std::string& channel_name,
const std::string& pipe_security_descriptor,
base::win::ScopedHandle* pipe_out) {
SECURITY_ATTRIBUTES security_attributes;
security_attributes.nLength = sizeof(security_attributes);
security_attributes.bInheritHandle = FALSE;
ULONG security_descriptor_length = 0;
if (!ConvertStringSecurityDescriptorToSecurityDescriptor(
UTF8ToUTF16(pipe_security_descriptor).c_str(),
SDDL_REVISION_1,
reinterpret_cast<PSECURITY_DESCRIPTOR*>(
&security_attributes.lpSecurityDescriptor),
&security_descriptor_length)) {
LOG_GETLASTERROR(ERROR) <<
"Failed to create a security descriptor for the Chromoting IPC channel";
return false;
}
std::string pipe_name(kChromePipeNamePrefix);
pipe_name.append(channel_name);
base::win::ScopedHandle pipe;
pipe.Set(CreateNamedPipe(
UTF8ToUTF16(pipe_name).c_str(),
PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE,
PIPE_TYPE_BYTE | PIPE_READMODE_BYTE,
1,
IPC::Channel::kReadBufferSize,
IPC::Channel::kReadBufferSize,
5000,
&security_attributes));
if (!pipe.IsValid()) {
LOG_GETLASTERROR(ERROR) <<
"Failed to create the server end of the Chromoting IPC channel";
LocalFree(security_attributes.lpSecurityDescriptor);
return false;
}
LocalFree(security_attributes.lpSecurityDescriptor);
*pipe_out = pipe.Pass();
return true;
}
| 10,446 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: UINT32 UIPC_Read(tUIPC_CH_ID ch_id, UINT16 *p_msg_evt, UINT8 *p_buf, UINT32 len)
{
int n;
int n_read = 0;
int fd = uipc_main.ch[ch_id].fd;
struct pollfd pfd;
UNUSED(p_msg_evt);
if (ch_id >= UIPC_CH_NUM)
{
BTIF_TRACE_ERROR("UIPC_Read : invalid ch id %d", ch_id);
return 0;
}
if (fd == UIPC_DISCONNECTED)
{
BTIF_TRACE_ERROR("UIPC_Read : channel %d closed", ch_id);
return 0;
}
while (n_read < (int)len)
{
pfd.fd = fd;
pfd.events = POLLIN|POLLHUP;
/* make sure there is data prior to attempting read to avoid blocking
a read for more than poll timeout */
if (poll(&pfd, 1, uipc_main.ch[ch_id].read_poll_tmo_ms) == 0)
{
BTIF_TRACE_EVENT("poll timeout (%d ms)", uipc_main.ch[ch_id].read_poll_tmo_ms);
break;
}
if (pfd.revents & (POLLHUP|POLLNVAL) )
{
BTIF_TRACE_EVENT("poll : channel detached remotely");
UIPC_LOCK();
uipc_close_locked(ch_id);
UIPC_UNLOCK();
return 0;
}
n = recv(fd, p_buf+n_read, len-n_read, 0);
if (n == 0)
{
BTIF_TRACE_EVENT("UIPC_Read : channel detached remotely");
UIPC_LOCK();
uipc_close_locked(ch_id);
UIPC_UNLOCK();
return 0;
}
if (n < 0)
{
BTIF_TRACE_EVENT("UIPC_Read : read failed (%s)", strerror(errno));
return 0;
}
n_read+=n;
}
return n_read;
}
Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
CWE ID: CWE-284 | UINT32 UIPC_Read(tUIPC_CH_ID ch_id, UINT16 *p_msg_evt, UINT8 *p_buf, UINT32 len)
{
int n;
int n_read = 0;
int fd = uipc_main.ch[ch_id].fd;
struct pollfd pfd;
UNUSED(p_msg_evt);
if (ch_id >= UIPC_CH_NUM)
{
BTIF_TRACE_ERROR("UIPC_Read : invalid ch id %d", ch_id);
return 0;
}
if (fd == UIPC_DISCONNECTED)
{
BTIF_TRACE_ERROR("UIPC_Read : channel %d closed", ch_id);
return 0;
}
while (n_read < (int)len)
{
pfd.fd = fd;
pfd.events = POLLIN|POLLHUP;
/* make sure there is data prior to attempting read to avoid blocking
a read for more than poll timeout */
if (TEMP_FAILURE_RETRY(poll(&pfd, 1, uipc_main.ch[ch_id].read_poll_tmo_ms)) == 0)
{
BTIF_TRACE_EVENT("poll timeout (%d ms)", uipc_main.ch[ch_id].read_poll_tmo_ms);
break;
}
if (pfd.revents & (POLLHUP|POLLNVAL) )
{
BTIF_TRACE_EVENT("poll : channel detached remotely");
UIPC_LOCK();
uipc_close_locked(ch_id);
UIPC_UNLOCK();
return 0;
}
n = TEMP_FAILURE_RETRY(recv(fd, p_buf+n_read, len-n_read, 0));
if (n == 0)
{
BTIF_TRACE_EVENT("UIPC_Read : channel detached remotely");
UIPC_LOCK();
uipc_close_locked(ch_id);
UIPC_UNLOCK();
return 0;
}
if (n < 0)
{
BTIF_TRACE_EVENT("UIPC_Read : read failed (%s)", strerror(errno));
return 0;
}
n_read+=n;
}
return n_read;
}
| 20,103 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool Block::IsInvisible() const
{
return bool(int(m_flags & 0x08) != 0);
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | bool Block::IsInvisible() const
const Block::Frame& Block::GetFrame(int idx) const {
assert(idx >= 0);
assert(idx < m_frame_count);
const Frame& f = m_frames[idx];
assert(f.pos > 0);
assert(f.len > 0);
return f;
}
| 14,936 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int ParseCaffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config)
{
uint32_t chan_chunk = 0, channel_layout = 0, bcount;
unsigned char *channel_identities = NULL;
unsigned char *channel_reorder = NULL;
int64_t total_samples = 0, infilesize;
CAFFileHeader caf_file_header;
CAFChunkHeader caf_chunk_header;
CAFAudioFormat caf_audio_format;
int i;
infilesize = DoGetFileSize (infile);
memcpy (&caf_file_header, fourcc, 4);
if ((!DoReadFile (infile, ((char *) &caf_file_header) + 4, sizeof (CAFFileHeader) - 4, &bcount) ||
bcount != sizeof (CAFFileHeader) - 4)) {
error_line ("%s is not a valid .CAF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &caf_file_header, sizeof (CAFFileHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&caf_file_header, CAFFileHeaderFormat);
if (caf_file_header.mFileVersion != 1) {
error_line ("%s: can't handle version %d .CAF files!", infilename, caf_file_header.mFileVersion);
return WAVPACK_SOFT_ERROR;
}
while (1) {
if (!DoReadFile (infile, &caf_chunk_header, sizeof (CAFChunkHeader), &bcount) ||
bcount != sizeof (CAFChunkHeader)) {
error_line ("%s is not a valid .CAF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &caf_chunk_header, sizeof (CAFChunkHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&caf_chunk_header, CAFChunkHeaderFormat);
if (!strncmp (caf_chunk_header.mChunkType, "desc", 4)) {
int supported = TRUE;
if (caf_chunk_header.mChunkSize != sizeof (CAFAudioFormat) ||
!DoReadFile (infile, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize, &bcount) ||
bcount != caf_chunk_header.mChunkSize) {
error_line ("%s is not a valid .CAF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&caf_audio_format, CAFAudioFormatFormat);
if (debug_logging_mode) {
char formatstr [5];
memcpy (formatstr, caf_audio_format.mFormatID, 4);
formatstr [4] = 0;
error_line ("format = %s, flags = %x, sampling rate = %g",
formatstr, caf_audio_format.mFormatFlags, caf_audio_format.mSampleRate);
error_line ("packet = %d bytes and %d frames",
caf_audio_format.mBytesPerPacket, caf_audio_format.mFramesPerPacket);
error_line ("channels per frame = %d, bits per channel = %d",
caf_audio_format.mChannelsPerFrame, caf_audio_format.mBitsPerChannel);
}
if (strncmp (caf_audio_format.mFormatID, "lpcm", 4) || (caf_audio_format.mFormatFlags & ~3))
supported = FALSE;
else if (caf_audio_format.mSampleRate < 1.0 || caf_audio_format.mSampleRate > 16777215.0 ||
caf_audio_format.mSampleRate != floor (caf_audio_format.mSampleRate))
supported = FALSE;
else if (!caf_audio_format.mChannelsPerFrame || caf_audio_format.mChannelsPerFrame > 256)
supported = FALSE;
else if (caf_audio_format.mBitsPerChannel < 1 || caf_audio_format.mBitsPerChannel > 32 ||
((caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) && caf_audio_format.mBitsPerChannel != 32))
supported = FALSE;
else if (caf_audio_format.mFramesPerPacket != 1 ||
caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame < (caf_audio_format.mBitsPerChannel + 7) / 8 ||
caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame > 4 ||
caf_audio_format.mBytesPerPacket % caf_audio_format.mChannelsPerFrame)
supported = FALSE;
if (!supported) {
error_line ("%s is an unsupported .CAF format!", infilename);
return WAVPACK_SOFT_ERROR;
}
config->bytes_per_sample = caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame;
config->float_norm_exp = (caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) ? 127 : 0;
config->bits_per_sample = caf_audio_format.mBitsPerChannel;
config->num_channels = caf_audio_format.mChannelsPerFrame;
config->sample_rate = (int) caf_audio_format.mSampleRate;
if (!(caf_audio_format.mFormatFlags & CAF_FORMAT_LITTLE_ENDIAN) && config->bytes_per_sample > 1)
config->qmode |= QMODE_BIG_ENDIAN;
if (config->bytes_per_sample == 1)
config->qmode |= QMODE_SIGNED_BYTES;
if (debug_logging_mode) {
if (config->float_norm_exp == 127)
error_line ("data format: 32-bit %s-endian floating point", (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little");
else
error_line ("data format: %d-bit %s-endian integers stored in %d byte(s)",
config->bits_per_sample, (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little", config->bytes_per_sample);
}
}
else if (!strncmp (caf_chunk_header.mChunkType, "chan", 4)) {
CAFChannelLayout *caf_channel_layout;
if (caf_chunk_header.mChunkSize < 0 || caf_chunk_header.mChunkSize > 1024 ||
caf_chunk_header.mChunkSize < sizeof (CAFChannelLayout)) {
error_line ("this .CAF file has an invalid 'chan' chunk!");
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("'chan' chunk is %d bytes", (int) caf_chunk_header.mChunkSize);
caf_channel_layout = malloc ((size_t) caf_chunk_header.mChunkSize);
if (!DoReadFile (infile, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize, &bcount) ||
bcount != caf_chunk_header.mChunkSize) {
error_line ("%s is not a valid .CAF file!", infilename);
free (caf_channel_layout);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (caf_channel_layout);
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (caf_channel_layout, CAFChannelLayoutFormat);
chan_chunk = 1;
if (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED)) {
error_line ("this CAF file already has channel order information!");
free (caf_channel_layout);
return WAVPACK_SOFT_ERROR;
}
switch (caf_channel_layout->mChannelLayoutTag) {
case kCAFChannelLayoutTag_UseChannelDescriptions:
{
CAFChannelDescription *descriptions = (CAFChannelDescription *) (caf_channel_layout + 1);
int num_descriptions = caf_channel_layout->mNumberChannelDescriptions;
int label, cindex = 0, idents = 0;
if (caf_chunk_header.mChunkSize != sizeof (CAFChannelLayout) + sizeof (CAFChannelDescription) * num_descriptions ||
num_descriptions != config->num_channels) {
error_line ("channel descriptions in 'chan' chunk are the wrong size!");
free (caf_channel_layout);
return WAVPACK_SOFT_ERROR;
}
if (num_descriptions >= 256) {
error_line ("%d channel descriptions is more than we can handle...ignoring!");
break;
}
channel_reorder = malloc (num_descriptions);
memset (channel_reorder, -1, num_descriptions);
channel_identities = malloc (num_descriptions+1);
for (i = 0; i < num_descriptions; ++i) {
WavpackBigEndianToNative (descriptions + i, CAFChannelDescriptionFormat);
if (debug_logging_mode)
error_line ("chan %d --> %d", i + 1, descriptions [i].mChannelLabel);
}
for (label = 1; label <= 18; ++label)
for (i = 0; i < num_descriptions; ++i)
if (descriptions [i].mChannelLabel == label) {
config->channel_mask |= 1 << (label - 1);
channel_reorder [i] = cindex++;
break;
}
for (i = 0; i < num_descriptions; ++i)
if (channel_reorder [i] == (unsigned char) -1) {
uint32_t clabel = descriptions [i].mChannelLabel;
if (clabel == 0 || clabel == 0xffffffff || clabel == 100)
channel_identities [idents++] = 0xff;
else if ((clabel >= 33 && clabel <= 44) || (clabel >= 200 && clabel <= 207) || (clabel >= 301 && clabel <= 305))
channel_identities [idents++] = clabel >= 301 ? clabel - 80 : clabel;
else {
error_line ("warning: unknown channel descriptions label: %d", clabel);
channel_identities [idents++] = 0xff;
}
channel_reorder [i] = cindex++;
}
for (i = 0; i < num_descriptions; ++i)
if (channel_reorder [i] != i)
break;
if (i == num_descriptions) {
free (channel_reorder); // no reordering required, so don't
channel_reorder = NULL;
}
else {
config->qmode |= QMODE_REORDERED_CHANS; // reordering required, put channel count into layout
channel_layout = num_descriptions;
}
if (!idents) { // if no non-MS channels, free the identities string
free (channel_identities);
channel_identities = NULL;
}
else
channel_identities [idents] = 0; // otherwise NULL terminate it
if (debug_logging_mode) {
error_line ("layout_tag = 0x%08x, so generated bitmap of 0x%08x from %d descriptions, %d non-MS",
caf_channel_layout->mChannelLayoutTag, config->channel_mask,
caf_channel_layout->mNumberChannelDescriptions, idents);
if (channel_reorder && num_descriptions <= 8) {
char reorder_string [] = "12345678";
for (i = 0; i < num_descriptions; ++i)
reorder_string [i] = channel_reorder [i] + '1';
reorder_string [i] = 0;
error_line ("reordering string = \"%s\"\n", reorder_string);
}
}
}
break;
case kCAFChannelLayoutTag_UseChannelBitmap:
config->channel_mask = caf_channel_layout->mChannelBitmap;
if (debug_logging_mode)
error_line ("layout_tag = 0x%08x, so using supplied bitmap of 0x%08x",
caf_channel_layout->mChannelLayoutTag, caf_channel_layout->mChannelBitmap);
break;
default:
for (i = 0; i < NUM_LAYOUTS; ++i)
if (caf_channel_layout->mChannelLayoutTag == layouts [i].mChannelLayoutTag) {
config->channel_mask = layouts [i].mChannelBitmap;
channel_layout = layouts [i].mChannelLayoutTag;
if (layouts [i].mChannelReorder) {
channel_reorder = (unsigned char *) strdup (layouts [i].mChannelReorder);
config->qmode |= QMODE_REORDERED_CHANS;
}
if (layouts [i].mChannelIdentities)
channel_identities = (unsigned char *) strdup (layouts [i].mChannelIdentities);
if (debug_logging_mode)
error_line ("layout_tag 0x%08x found in table, bitmap = 0x%08x, reorder = %s, identities = %s",
channel_layout, config->channel_mask, channel_reorder ? "yes" : "no", channel_identities ? "yes" : "no");
break;
}
if (i == NUM_LAYOUTS && debug_logging_mode)
error_line ("layout_tag 0x%08x not found in table...all channels unassigned",
caf_channel_layout->mChannelLayoutTag);
break;
}
free (caf_channel_layout);
}
else if (!strncmp (caf_chunk_header.mChunkType, "data", 4)) { // on the data chunk, get size and exit loop
uint32_t mEditCount;
if (!DoReadFile (infile, &mEditCount, sizeof (mEditCount), &bcount) ||
bcount != sizeof (mEditCount)) {
error_line ("%s is not a valid .CAF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &mEditCount, sizeof (mEditCount))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
if ((config->qmode & QMODE_IGNORE_LENGTH) || caf_chunk_header.mChunkSize == -1) {
config->qmode |= QMODE_IGNORE_LENGTH;
if (infilesize && DoGetFilePosition (infile) != -1)
total_samples = (infilesize - DoGetFilePosition (infile)) / caf_audio_format.mBytesPerPacket;
else
total_samples = -1;
}
else {
if (infilesize && infilesize - caf_chunk_header.mChunkSize > 16777216) {
error_line (".CAF file %s has over 16 MB of extra CAFF data, probably is corrupt!", infilename);
return WAVPACK_SOFT_ERROR;
}
if ((caf_chunk_header.mChunkSize - 4) % caf_audio_format.mBytesPerPacket) {
error_line (".CAF file %s has an invalid data chunk size, probably is corrupt!", infilename);
return WAVPACK_SOFT_ERROR;
}
total_samples = (caf_chunk_header.mChunkSize - 4) / caf_audio_format.mBytesPerPacket;
if (!total_samples) {
error_line ("this .CAF file has no audio samples, probably is corrupt!");
return WAVPACK_SOFT_ERROR;
}
if (total_samples > MAX_WAVPACK_SAMPLES) {
error_line ("%s has too many samples for WavPack!", infilename);
return WAVPACK_SOFT_ERROR;
}
}
break;
}
else { // just copy unknown chunks to output file
uint32_t bytes_to_copy = (uint32_t) caf_chunk_header.mChunkSize;
char *buff;
if (caf_chunk_header.mChunkSize < 0 || caf_chunk_header.mChunkSize > 1048576) {
error_line ("%s is not a valid .CAF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
buff = malloc (bytes_to_copy);
if (debug_logging_mode)
error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes",
caf_chunk_header.mChunkType [0], caf_chunk_header.mChunkType [1], caf_chunk_header.mChunkType [2],
caf_chunk_header.mChunkType [3], caf_chunk_header.mChunkSize);
if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) ||
bcount != bytes_to_copy ||
(!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, buff, bytes_to_copy))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (buff);
return WAVPACK_SOFT_ERROR;
}
free (buff);
}
}
if (!chan_chunk && !config->channel_mask && config->num_channels <= 2 && !(config->qmode & QMODE_CHANS_UNASSIGNED))
config->channel_mask = 0x5 - config->num_channels;
if (!WavpackSetConfiguration64 (wpc, config, total_samples, channel_identities)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
if (channel_identities)
free (channel_identities);
if (channel_layout || channel_reorder) {
if (!WavpackSetChannelLayout (wpc, channel_layout, channel_reorder)) {
error_line ("problem with setting channel layout (should not happen)");
return WAVPACK_SOFT_ERROR;
}
if (channel_reorder)
free (channel_reorder);
}
return WAVPACK_NO_ERROR;
}
Commit Message: issue #66: make sure CAF files have a "desc" chunk
CWE ID: CWE-665 | int ParseCaffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config)
{
uint32_t chan_chunk = 0, desc_chunk = 0, channel_layout = 0, bcount;
unsigned char *channel_identities = NULL;
unsigned char *channel_reorder = NULL;
int64_t total_samples = 0, infilesize;
CAFFileHeader caf_file_header;
CAFChunkHeader caf_chunk_header;
CAFAudioFormat caf_audio_format;
int i;
infilesize = DoGetFileSize (infile);
memcpy (&caf_file_header, fourcc, 4);
if ((!DoReadFile (infile, ((char *) &caf_file_header) + 4, sizeof (CAFFileHeader) - 4, &bcount) ||
bcount != sizeof (CAFFileHeader) - 4)) {
error_line ("%s is not a valid .CAF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &caf_file_header, sizeof (CAFFileHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&caf_file_header, CAFFileHeaderFormat);
if (caf_file_header.mFileVersion != 1) {
error_line ("%s: can't handle version %d .CAF files!", infilename, caf_file_header.mFileVersion);
return WAVPACK_SOFT_ERROR;
}
while (1) {
if (!DoReadFile (infile, &caf_chunk_header, sizeof (CAFChunkHeader), &bcount) ||
bcount != sizeof (CAFChunkHeader)) {
error_line ("%s is not a valid .CAF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &caf_chunk_header, sizeof (CAFChunkHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&caf_chunk_header, CAFChunkHeaderFormat);
if (!strncmp (caf_chunk_header.mChunkType, "desc", 4)) {
int supported = TRUE;
if (caf_chunk_header.mChunkSize != sizeof (CAFAudioFormat) ||
!DoReadFile (infile, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize, &bcount) ||
bcount != caf_chunk_header.mChunkSize) {
error_line ("%s is not a valid .CAF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&caf_audio_format, CAFAudioFormatFormat);
desc_chunk = 1;
if (debug_logging_mode) {
char formatstr [5];
memcpy (formatstr, caf_audio_format.mFormatID, 4);
formatstr [4] = 0;
error_line ("format = %s, flags = %x, sampling rate = %g",
formatstr, caf_audio_format.mFormatFlags, caf_audio_format.mSampleRate);
error_line ("packet = %d bytes and %d frames",
caf_audio_format.mBytesPerPacket, caf_audio_format.mFramesPerPacket);
error_line ("channels per frame = %d, bits per channel = %d",
caf_audio_format.mChannelsPerFrame, caf_audio_format.mBitsPerChannel);
}
if (strncmp (caf_audio_format.mFormatID, "lpcm", 4) || (caf_audio_format.mFormatFlags & ~3))
supported = FALSE;
else if (caf_audio_format.mSampleRate < 1.0 || caf_audio_format.mSampleRate > 16777215.0 ||
caf_audio_format.mSampleRate != floor (caf_audio_format.mSampleRate))
supported = FALSE;
else if (!caf_audio_format.mChannelsPerFrame || caf_audio_format.mChannelsPerFrame > 256)
supported = FALSE;
else if (caf_audio_format.mBitsPerChannel < 1 || caf_audio_format.mBitsPerChannel > 32 ||
((caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) && caf_audio_format.mBitsPerChannel != 32))
supported = FALSE;
else if (caf_audio_format.mFramesPerPacket != 1 ||
caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame < (caf_audio_format.mBitsPerChannel + 7) / 8 ||
caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame > 4 ||
caf_audio_format.mBytesPerPacket % caf_audio_format.mChannelsPerFrame)
supported = FALSE;
if (!supported) {
error_line ("%s is an unsupported .CAF format!", infilename);
return WAVPACK_SOFT_ERROR;
}
config->bytes_per_sample = caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame;
config->float_norm_exp = (caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) ? 127 : 0;
config->bits_per_sample = caf_audio_format.mBitsPerChannel;
config->num_channels = caf_audio_format.mChannelsPerFrame;
config->sample_rate = (int) caf_audio_format.mSampleRate;
if (!(caf_audio_format.mFormatFlags & CAF_FORMAT_LITTLE_ENDIAN) && config->bytes_per_sample > 1)
config->qmode |= QMODE_BIG_ENDIAN;
if (config->bytes_per_sample == 1)
config->qmode |= QMODE_SIGNED_BYTES;
if (debug_logging_mode) {
if (config->float_norm_exp == 127)
error_line ("data format: 32-bit %s-endian floating point", (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little");
else
error_line ("data format: %d-bit %s-endian integers stored in %d byte(s)",
config->bits_per_sample, (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little", config->bytes_per_sample);
}
}
else if (!strncmp (caf_chunk_header.mChunkType, "chan", 4)) {
CAFChannelLayout *caf_channel_layout;
if (caf_chunk_header.mChunkSize < 0 || caf_chunk_header.mChunkSize > 1024 ||
caf_chunk_header.mChunkSize < sizeof (CAFChannelLayout)) {
error_line ("this .CAF file has an invalid 'chan' chunk!");
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("'chan' chunk is %d bytes", (int) caf_chunk_header.mChunkSize);
caf_channel_layout = malloc ((size_t) caf_chunk_header.mChunkSize);
if (!DoReadFile (infile, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize, &bcount) ||
bcount != caf_chunk_header.mChunkSize) {
error_line ("%s is not a valid .CAF file!", infilename);
free (caf_channel_layout);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (caf_channel_layout);
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (caf_channel_layout, CAFChannelLayoutFormat);
chan_chunk = 1;
if (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED)) {
error_line ("this CAF file already has channel order information!");
free (caf_channel_layout);
return WAVPACK_SOFT_ERROR;
}
switch (caf_channel_layout->mChannelLayoutTag) {
case kCAFChannelLayoutTag_UseChannelDescriptions:
{
CAFChannelDescription *descriptions = (CAFChannelDescription *) (caf_channel_layout + 1);
int num_descriptions = caf_channel_layout->mNumberChannelDescriptions;
int label, cindex = 0, idents = 0;
if (caf_chunk_header.mChunkSize != sizeof (CAFChannelLayout) + sizeof (CAFChannelDescription) * num_descriptions ||
num_descriptions != config->num_channels) {
error_line ("channel descriptions in 'chan' chunk are the wrong size!");
free (caf_channel_layout);
return WAVPACK_SOFT_ERROR;
}
if (num_descriptions >= 256) {
error_line ("%d channel descriptions is more than we can handle...ignoring!");
break;
}
channel_reorder = malloc (num_descriptions);
memset (channel_reorder, -1, num_descriptions);
channel_identities = malloc (num_descriptions+1);
for (i = 0; i < num_descriptions; ++i) {
WavpackBigEndianToNative (descriptions + i, CAFChannelDescriptionFormat);
if (debug_logging_mode)
error_line ("chan %d --> %d", i + 1, descriptions [i].mChannelLabel);
}
for (label = 1; label <= 18; ++label)
for (i = 0; i < num_descriptions; ++i)
if (descriptions [i].mChannelLabel == label) {
config->channel_mask |= 1 << (label - 1);
channel_reorder [i] = cindex++;
break;
}
for (i = 0; i < num_descriptions; ++i)
if (channel_reorder [i] == (unsigned char) -1) {
uint32_t clabel = descriptions [i].mChannelLabel;
if (clabel == 0 || clabel == 0xffffffff || clabel == 100)
channel_identities [idents++] = 0xff;
else if ((clabel >= 33 && clabel <= 44) || (clabel >= 200 && clabel <= 207) || (clabel >= 301 && clabel <= 305))
channel_identities [idents++] = clabel >= 301 ? clabel - 80 : clabel;
else {
error_line ("warning: unknown channel descriptions label: %d", clabel);
channel_identities [idents++] = 0xff;
}
channel_reorder [i] = cindex++;
}
for (i = 0; i < num_descriptions; ++i)
if (channel_reorder [i] != i)
break;
if (i == num_descriptions) {
free (channel_reorder); // no reordering required, so don't
channel_reorder = NULL;
}
else {
config->qmode |= QMODE_REORDERED_CHANS; // reordering required, put channel count into layout
channel_layout = num_descriptions;
}
if (!idents) { // if no non-MS channels, free the identities string
free (channel_identities);
channel_identities = NULL;
}
else
channel_identities [idents] = 0; // otherwise NULL terminate it
if (debug_logging_mode) {
error_line ("layout_tag = 0x%08x, so generated bitmap of 0x%08x from %d descriptions, %d non-MS",
caf_channel_layout->mChannelLayoutTag, config->channel_mask,
caf_channel_layout->mNumberChannelDescriptions, idents);
if (channel_reorder && num_descriptions <= 8) {
char reorder_string [] = "12345678";
for (i = 0; i < num_descriptions; ++i)
reorder_string [i] = channel_reorder [i] + '1';
reorder_string [i] = 0;
error_line ("reordering string = \"%s\"\n", reorder_string);
}
}
}
break;
case kCAFChannelLayoutTag_UseChannelBitmap:
config->channel_mask = caf_channel_layout->mChannelBitmap;
if (debug_logging_mode)
error_line ("layout_tag = 0x%08x, so using supplied bitmap of 0x%08x",
caf_channel_layout->mChannelLayoutTag, caf_channel_layout->mChannelBitmap);
break;
default:
for (i = 0; i < NUM_LAYOUTS; ++i)
if (caf_channel_layout->mChannelLayoutTag == layouts [i].mChannelLayoutTag) {
config->channel_mask = layouts [i].mChannelBitmap;
channel_layout = layouts [i].mChannelLayoutTag;
if (layouts [i].mChannelReorder) {
channel_reorder = (unsigned char *) strdup (layouts [i].mChannelReorder);
config->qmode |= QMODE_REORDERED_CHANS;
}
if (layouts [i].mChannelIdentities)
channel_identities = (unsigned char *) strdup (layouts [i].mChannelIdentities);
if (debug_logging_mode)
error_line ("layout_tag 0x%08x found in table, bitmap = 0x%08x, reorder = %s, identities = %s",
channel_layout, config->channel_mask, channel_reorder ? "yes" : "no", channel_identities ? "yes" : "no");
break;
}
if (i == NUM_LAYOUTS && debug_logging_mode)
error_line ("layout_tag 0x%08x not found in table...all channels unassigned",
caf_channel_layout->mChannelLayoutTag);
break;
}
free (caf_channel_layout);
}
else if (!strncmp (caf_chunk_header.mChunkType, "data", 4)) { // on the data chunk, get size and exit loop
uint32_t mEditCount;
if (!desc_chunk || !DoReadFile (infile, &mEditCount, sizeof (mEditCount), &bcount) ||
bcount != sizeof (mEditCount)) {
error_line ("%s is not a valid .CAF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &mEditCount, sizeof (mEditCount))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
if ((config->qmode & QMODE_IGNORE_LENGTH) || caf_chunk_header.mChunkSize == -1) {
config->qmode |= QMODE_IGNORE_LENGTH;
if (infilesize && DoGetFilePosition (infile) != -1)
total_samples = (infilesize - DoGetFilePosition (infile)) / caf_audio_format.mBytesPerPacket;
else
total_samples = -1;
}
else {
if (infilesize && infilesize - caf_chunk_header.mChunkSize > 16777216) {
error_line (".CAF file %s has over 16 MB of extra CAFF data, probably is corrupt!", infilename);
return WAVPACK_SOFT_ERROR;
}
if ((caf_chunk_header.mChunkSize - 4) % caf_audio_format.mBytesPerPacket) {
error_line (".CAF file %s has an invalid data chunk size, probably is corrupt!", infilename);
return WAVPACK_SOFT_ERROR;
}
total_samples = (caf_chunk_header.mChunkSize - 4) / caf_audio_format.mBytesPerPacket;
if (!total_samples) {
error_line ("this .CAF file has no audio samples, probably is corrupt!");
return WAVPACK_SOFT_ERROR;
}
if (total_samples > MAX_WAVPACK_SAMPLES) {
error_line ("%s has too many samples for WavPack!", infilename);
return WAVPACK_SOFT_ERROR;
}
}
break;
}
else { // just copy unknown chunks to output file
uint32_t bytes_to_copy = (uint32_t) caf_chunk_header.mChunkSize;
char *buff;
if (caf_chunk_header.mChunkSize < 0 || caf_chunk_header.mChunkSize > 1048576) {
error_line ("%s is not a valid .CAF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
buff = malloc (bytes_to_copy);
if (debug_logging_mode)
error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes",
caf_chunk_header.mChunkType [0], caf_chunk_header.mChunkType [1], caf_chunk_header.mChunkType [2],
caf_chunk_header.mChunkType [3], caf_chunk_header.mChunkSize);
if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) ||
bcount != bytes_to_copy ||
(!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, buff, bytes_to_copy))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (buff);
return WAVPACK_SOFT_ERROR;
}
free (buff);
}
}
if (!chan_chunk && !config->channel_mask && config->num_channels <= 2 && !(config->qmode & QMODE_CHANS_UNASSIGNED))
config->channel_mask = 0x5 - config->num_channels;
if (!WavpackSetConfiguration64 (wpc, config, total_samples, channel_identities)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
if (channel_identities)
free (channel_identities);
if (channel_layout || channel_reorder) {
if (!WavpackSetChannelLayout (wpc, channel_layout, channel_reorder)) {
error_line ("problem with setting channel layout (should not happen)");
return WAVPACK_SOFT_ERROR;
}
if (channel_reorder)
free (channel_reorder);
}
return WAVPACK_NO_ERROR;
}
| 18,801 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
struct vmw_resource *res;
struct vmw_resource *tmp;
union drm_vmw_gb_surface_create_arg *arg =
(union drm_vmw_gb_surface_create_arg *)data;
struct drm_vmw_gb_surface_create_req *req = &arg->req;
struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint32_t size;
uint32_t backup_handle;
if (req->multisample_count != 0)
return -EINVAL;
if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
return -EINVAL;
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
size = vmw_user_surface_size + 128;
/* Define a surface based on the parameters. */
ret = vmw_surface_gb_priv_define(dev,
size,
req->svga3d_flags,
req->format,
req->drm_surface_flags & drm_vmw_surface_flag_scanout,
req->mip_levels,
req->multisample_count,
req->array_size,
req->base_size,
&srf);
if (unlikely(ret != 0))
return ret;
user_srf = container_of(srf, struct vmw_user_surface, srf);
if (drm_is_primary_client(file_priv))
user_srf->master = drm_master_get(file_priv->master);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
res = &user_srf->srf.res;
if (req->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup,
&user_srf->backup_base);
if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
res->backup_size) {
DRM_ERROR("Surface backup buffer is too small.\n");
vmw_dmabuf_unreference(&res->backup);
ret = -EINVAL;
goto out_unlock;
}
} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
res->backup_size,
req->drm_surface_flags &
drm_vmw_surface_flag_shareable,
&backup_handle,
&res->backup,
&user_srf->backup_base);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->drm_surface_flags &
drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
&vmw_user_surface_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
vmw_resource_unreference(&res);
goto out_unlock;
}
rep->handle = user_srf->prime.base.hash.key;
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->backup->base.vma_node);
rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
rep->buffer_handle = backup_handle;
} else {
rep->buffer_map_handle = 0;
rep->buffer_size = 0;
rep->buffer_handle = SVGA3D_INVALID_ID;
}
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
Commit Message: drm/vmwgfx: Make sure backup_handle is always valid
When vmw_gb_surface_define_ioctl() is called with an existing buffer,
we end up returning an uninitialized variable in the backup_handle.
The fix is to first initialize backup_handle to 0 just to be sure, and
second, when a user-provided buffer is found, we will use the
req->buffer_handle as the backup_handle.
Cc: <[email protected]>
Reported-by: Murray McAllister <[email protected]>
Signed-off-by: Sinclair Yeh <[email protected]>
Reviewed-by: Deepak Rawat <[email protected]>
CWE ID: CWE-200 | int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
struct vmw_resource *res;
struct vmw_resource *tmp;
union drm_vmw_gb_surface_create_arg *arg =
(union drm_vmw_gb_surface_create_arg *)data;
struct drm_vmw_gb_surface_create_req *req = &arg->req;
struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint32_t size;
uint32_t backup_handle = 0;
if (req->multisample_count != 0)
return -EINVAL;
if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
return -EINVAL;
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
size = vmw_user_surface_size + 128;
/* Define a surface based on the parameters. */
ret = vmw_surface_gb_priv_define(dev,
size,
req->svga3d_flags,
req->format,
req->drm_surface_flags & drm_vmw_surface_flag_scanout,
req->mip_levels,
req->multisample_count,
req->array_size,
req->base_size,
&srf);
if (unlikely(ret != 0))
return ret;
user_srf = container_of(srf, struct vmw_user_surface, srf);
if (drm_is_primary_client(file_priv))
user_srf->master = drm_master_get(file_priv->master);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
res = &user_srf->srf.res;
if (req->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup,
&user_srf->backup_base);
if (ret == 0) {
if (res->backup->base.num_pages * PAGE_SIZE <
res->backup_size) {
DRM_ERROR("Surface backup buffer is too small.\n");
vmw_dmabuf_unreference(&res->backup);
ret = -EINVAL;
goto out_unlock;
} else {
backup_handle = req->buffer_handle;
}
}
} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
res->backup_size,
req->drm_surface_flags &
drm_vmw_surface_flag_shareable,
&backup_handle,
&res->backup,
&user_srf->backup_base);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->drm_surface_flags &
drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
&vmw_user_surface_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
vmw_resource_unreference(&res);
goto out_unlock;
}
rep->handle = user_srf->prime.base.hash.key;
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->backup->base.vma_node);
rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
rep->buffer_handle = backup_handle;
} else {
rep->buffer_map_handle = 0;
rep->buffer_size = 0;
rep->buffer_handle = SVGA3D_INVALID_ID;
}
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
| 9,887 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: UserInitiatedInfo CreateUserInitiatedInfo(
content::NavigationHandle* navigation_handle,
PageLoadTracker* committed_load) {
if (!navigation_handle->IsRendererInitiated())
return UserInitiatedInfo::BrowserInitiated();
return UserInitiatedInfo::RenderInitiated(
navigation_handle->HasUserGesture());
}
Commit Message: Add boolean to UserIntiatedInfo noting if an input event led to navigation.
Also refactor UkmPageLoadMetricsObserver to use this new boolean to
report the user initiated metric in RecordPageLoadExtraInfoMetrics, so
that it works correctly in the case when the page load failed.
Bug: 925104
Change-Id: Ie08e7d3912cb1da484190d838005e95e57a209ff
Reviewed-on: https://chromium-review.googlesource.com/c/1450460
Commit-Queue: Annie Sullivan <[email protected]>
Reviewed-by: Bryan McQuade <[email protected]>
Cr-Commit-Position: refs/heads/master@{#630870}
CWE ID: CWE-79 | UserInitiatedInfo CreateUserInitiatedInfo(
content::NavigationHandle* navigation_handle,
PageLoadTracker* committed_load) {
if (!navigation_handle->IsRendererInitiated())
return UserInitiatedInfo::BrowserInitiated();
return UserInitiatedInfo::RenderInitiated(
navigation_handle->HasUserGesture(),
!navigation_handle->NavigationInputStart().is_null());
}
| 26,030 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct dccp_net *dn;
struct dccp_hdr _dh, *dh;
const char *msg;
u_int8_t state;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
BUG_ON(dh == NULL);
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
dn = dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "nf_ct_dccp: not picking up existing connection ";
goto out_invalid;
}
case CT_DCCP_REQUEST:
break;
case CT_DCCP_INVALID:
msg = "nf_ct_dccp: invalid state transition ";
goto out_invalid;
}
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.state = CT_DCCP_NONE;
ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
ct->proto.dccp.handshake_seq = 0;
return true;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
NULL, "%s", msg);
return false;
}
Commit Message: netfilter: nf_conntrack_dccp: fix skb_header_pointer API usages
Some occurences in the netfilter tree use skb_header_pointer() in
the following way ...
struct dccp_hdr _dh, *dh;
...
skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
... where dh itself is a pointer that is being passed as the copy
buffer. Instead, we need to use &_dh as the forth argument so that
we're copying the data into an actual buffer that sits on the stack.
Currently, we probably could overwrite memory on the stack (e.g.
with a possibly mal-formed DCCP packet), but unintentionally, as
we only want the buffer to be placed into _dh variable.
Fixes: 2bc780499aa3 ("[NETFILTER]: nf_conntrack: add DCCP protocol support")
Signed-off-by: Daniel Borkmann <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
CWE ID: CWE-20 | static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct dccp_net *dn;
struct dccp_hdr _dh, *dh;
const char *msg;
u_int8_t state;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
dn = dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "nf_ct_dccp: not picking up existing connection ";
goto out_invalid;
}
case CT_DCCP_REQUEST:
break;
case CT_DCCP_INVALID:
msg = "nf_ct_dccp: invalid state transition ";
goto out_invalid;
}
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.state = CT_DCCP_NONE;
ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
ct->proto.dccp.handshake_seq = 0;
return true;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
NULL, "%s", msg);
return false;
}
| 12,092 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void TraceEvent::AppendAsJSON(
std::string* out,
const ArgumentFilterPredicate& argument_filter_predicate) const {
int64 time_int64 = timestamp_.ToInternalValue();
int process_id = TraceLog::GetInstance()->process_id();
const char* category_group_name =
TraceLog::GetCategoryGroupName(category_group_enabled_);
DCHECK(!strchr(name_, '"'));
StringAppendF(out, "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
","
"\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":",
process_id, thread_id_, time_int64, phase_, category_group_name,
name_);
bool strip_args = arg_names_[0] && !argument_filter_predicate.is_null() &&
!argument_filter_predicate.Run(category_group_name, name_);
if (strip_args) {
*out += "\"__stripped__\"";
} else {
*out += "{";
for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
if (i > 0)
*out += ",";
*out += "\"";
*out += arg_names_[i];
*out += "\":";
if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
convertable_values_[i]->AppendAsTraceFormat(out);
else
AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
}
*out += "}";
}
if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
int64 duration = duration_.ToInternalValue();
if (duration != -1)
StringAppendF(out, ",\"dur\":%" PRId64, duration);
if (!thread_timestamp_.is_null()) {
int64 thread_duration = thread_duration_.ToInternalValue();
if (thread_duration != -1)
StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
}
}
if (!thread_timestamp_.is_null()) {
int64 thread_time_int64 = thread_timestamp_.ToInternalValue();
StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
}
if (flags_ & TRACE_EVENT_FLAG_ASYNC_TTS) {
StringAppendF(out, ", \"use_async_tts\":1");
}
if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_));
if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
StringAppendF(out, ",\"bp\":\"e\"");
if ((flags_ & TRACE_EVENT_FLAG_FLOW_OUT) ||
(flags_ & TRACE_EVENT_FLAG_FLOW_IN)) {
StringAppendF(out, ",\"bind_id\":\"0x%" PRIx64 "\"",
static_cast<uint64>(bind_id_));
}
if (flags_ & TRACE_EVENT_FLAG_FLOW_IN)
StringAppendF(out, ",\"flow_in\":true");
if (flags_ & TRACE_EVENT_FLAG_FLOW_OUT)
StringAppendF(out, ",\"flow_out\":true");
if (flags_ & TRACE_EVENT_FLAG_HAS_CONTEXT_ID)
StringAppendF(out, ",\"cid\":\"0x%" PRIx64 "\"",
static_cast<uint64>(context_id_));
if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
char scope = '?';
switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
case TRACE_EVENT_SCOPE_GLOBAL:
scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
break;
case TRACE_EVENT_SCOPE_PROCESS:
scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
break;
case TRACE_EVENT_SCOPE_THREAD:
scope = TRACE_EVENT_SCOPE_NAME_THREAD;
break;
}
StringAppendF(out, ",\"s\":\"%c\"", scope);
}
*out += "}";
}
Commit Message: Tracing: Add support for PII whitelisting of individual trace event arguments
R=dsinclair,shatch
BUG=546093
Review URL: https://codereview.chromium.org/1415013003
Cr-Commit-Position: refs/heads/master@{#356690}
CWE ID: CWE-399 | void TraceEvent::AppendAsJSON(
std::string* out,
const ArgumentFilterPredicate& argument_filter_predicate) const {
int64 time_int64 = timestamp_.ToInternalValue();
int process_id = TraceLog::GetInstance()->process_id();
const char* category_group_name =
TraceLog::GetCategoryGroupName(category_group_enabled_);
DCHECK(!strchr(name_, '"'));
StringAppendF(out, "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
","
"\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":",
process_id, thread_id_, time_int64, phase_, category_group_name,
name_);
// TODO(oysteine): The dual predicates here is a bit ugly; if the filtering
// capabilities need to grow even more precise we should rethink this
// approach
ArgumentNameFilterPredicate argument_name_filter_predicate;
bool strip_args =
arg_names_[0] && !argument_filter_predicate.is_null() &&
!argument_filter_predicate.Run(category_group_name, name_,
&argument_name_filter_predicate);
if (strip_args) {
*out += "\"__stripped__\"";
} else {
*out += "{";
for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
if (i > 0)
*out += ",";
*out += "\"";
*out += arg_names_[i];
*out += "\":";
if (argument_name_filter_predicate.is_null() ||
argument_name_filter_predicate.Run(arg_names_[i])) {
if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
convertable_values_[i]->AppendAsTraceFormat(out);
else
AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
} else {
*out += "\"__stripped__\"";
}
}
*out += "}";
}
if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
int64 duration = duration_.ToInternalValue();
if (duration != -1)
StringAppendF(out, ",\"dur\":%" PRId64, duration);
if (!thread_timestamp_.is_null()) {
int64 thread_duration = thread_duration_.ToInternalValue();
if (thread_duration != -1)
StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
}
}
if (!thread_timestamp_.is_null()) {
int64 thread_time_int64 = thread_timestamp_.ToInternalValue();
StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
}
if (flags_ & TRACE_EVENT_FLAG_ASYNC_TTS) {
StringAppendF(out, ", \"use_async_tts\":1");
}
if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_));
if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
StringAppendF(out, ",\"bp\":\"e\"");
if ((flags_ & TRACE_EVENT_FLAG_FLOW_OUT) ||
(flags_ & TRACE_EVENT_FLAG_FLOW_IN)) {
StringAppendF(out, ",\"bind_id\":\"0x%" PRIx64 "\"",
static_cast<uint64>(bind_id_));
}
if (flags_ & TRACE_EVENT_FLAG_FLOW_IN)
StringAppendF(out, ",\"flow_in\":true");
if (flags_ & TRACE_EVENT_FLAG_FLOW_OUT)
StringAppendF(out, ",\"flow_out\":true");
if (flags_ & TRACE_EVENT_FLAG_HAS_CONTEXT_ID)
StringAppendF(out, ",\"cid\":\"0x%" PRIx64 "\"",
static_cast<uint64>(context_id_));
if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
char scope = '?';
switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
case TRACE_EVENT_SCOPE_GLOBAL:
scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
break;
case TRACE_EVENT_SCOPE_PROCESS:
scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
break;
case TRACE_EVENT_SCOPE_THREAD:
scope = TRACE_EVENT_SCOPE_NAME_THREAD;
break;
}
StringAppendF(out, ",\"s\":\"%c\"", scope);
}
*out += "}";
}
| 2,872 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: dissect_u3v(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data)
{
gint offset = 0;
proto_tree *u3v_tree = NULL, *ccd_tree_flag, *u3v_telegram_tree = NULL, *ccd_tree = NULL;
gint data_length = 0;
gint req_id = 0;
gint command_id = -1;
gint status = 0;
guint prefix = 0;
proto_item *ti = NULL;
proto_item *item = NULL;
const char *command_string;
usb_conv_info_t *usb_conv_info;
gint stream_detected = FALSE;
gint control_detected = FALSE;
u3v_conv_info_t *u3v_conv_info = NULL;
gencp_transaction_t *gencp_trans = NULL;
usb_conv_info = (usb_conv_info_t *)data;
/* decide if this packet belongs to U3V protocol */
u3v_conv_info = (u3v_conv_info_t *)usb_conv_info->class_data;
if (!u3v_conv_info) {
u3v_conv_info = wmem_new0(wmem_file_scope(), u3v_conv_info_t);
usb_conv_info->class_data = u3v_conv_info;
}
prefix = tvb_get_letohl(tvb, 0);
if ((tvb_reported_length(tvb) >= 4) && ( ( U3V_CONTROL_PREFIX == prefix ) || ( U3V_EVENT_PREFIX == prefix ) ) ) {
control_detected = TRUE;
}
if (((tvb_reported_length(tvb) >= 4) && (( U3V_STREAM_LEADER_PREFIX == prefix ) || ( U3V_STREAM_TRAILER_PREFIX == prefix )))
|| (usb_conv_info->endpoint == u3v_conv_info->ep_stream)) {
stream_detected = TRUE;
}
/* initialize interface class/subclass in case no descriptors have been dissected yet */
if ( control_detected || stream_detected){
if ( usb_conv_info->interfaceClass == IF_CLASS_UNKNOWN &&
usb_conv_info->interfaceSubclass == IF_SUBCLASS_UNKNOWN){
usb_conv_info->interfaceClass = IF_CLASS_MISCELLANEOUS;
usb_conv_info->interfaceSubclass = IF_SUBCLASS_MISC_U3V;
}
}
if ( control_detected ) {
/* Set the protocol column */
col_set_str(pinfo->cinfo, COL_PROTOCOL, "U3V");
/* Clear out stuff in the info column */
col_clear(pinfo->cinfo, COL_INFO);
/* Adds "USB3Vision" heading to protocol tree */
/* We will add fields to this using the u3v_tree pointer */
ti = proto_tree_add_item(tree, proto_u3v, tvb, offset, -1, ENC_NA);
u3v_tree = proto_item_add_subtree(ti, ett_u3v);
prefix = tvb_get_letohl(tvb, offset);
command_id = tvb_get_letohs(tvb, offset+6);
/* decode CCD ( DCI/DCE command data layout) */
if ((prefix == U3V_CONTROL_PREFIX || prefix == U3V_EVENT_PREFIX) && ((command_id % 2) == 0)) {
command_string = val_to_str(command_id,command_names,"Unknown Command (0x%x)");
item = proto_tree_add_item(u3v_tree, hf_u3v_ccd_cmd, tvb, offset, 8, ENC_NA);
proto_item_append_text(item, ": %s", command_string);
ccd_tree = proto_item_add_subtree(item, ett_u3v_cmd);
/* Add the prefix code: */
proto_tree_add_item(ccd_tree, hf_u3v_gencp_prefix, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
/* Add the flags */
item = proto_tree_add_item(ccd_tree, hf_u3v_flag, tvb, offset, 2, ENC_LITTLE_ENDIAN);
ccd_tree_flag = proto_item_add_subtree(item, ett_u3v_flags);
proto_tree_add_item(ccd_tree_flag, hf_u3v_acknowledge_required_flag, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
col_append_fstr(pinfo->cinfo, COL_INFO, "> %s ", command_string);
} else if (prefix == U3V_CONTROL_PREFIX && ((command_id % 2) == 1)) {
command_string = val_to_str(command_id,command_names,"Unknown Acknowledge (0x%x)");
item = proto_tree_add_item(u3v_tree, hf_u3v_ccd_ack, tvb, offset, 8, ENC_NA);
proto_item_append_text(item, ": %s", command_string);
ccd_tree = proto_item_add_subtree(item, ett_u3v_ack);
/* Add the prefix code: */
proto_tree_add_item(ccd_tree, hf_u3v_gencp_prefix, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
/* Add the status: */
proto_tree_add_item(ccd_tree, hf_u3v_status, tvb, offset, 2,ENC_LITTLE_ENDIAN);
status = tvb_get_letohs(tvb, offset);
offset += 2;
col_append_fstr(pinfo->cinfo, COL_INFO, "< %s %s",
command_string,
val_to_str(status, status_names_short, "Unknown status (0x%04X)"));
} else {
return 0;
}
/* Add the command id*/
proto_tree_add_item(ccd_tree, hf_u3v_command_id, tvb, offset, 2,ENC_LITTLE_ENDIAN);
offset += 2;
/* Parse the second part of both the command and the acknowledge header:
0 15 16 31
-------- -------- -------- --------
| status | acknowledge |
-------- -------- -------- --------
| length | req_id |
-------- -------- -------- --------
Add the data length
Number of valid data bytes in this message, not including this header. This
represents the number of bytes of payload appended after this header */
proto_tree_add_item(ccd_tree, hf_u3v_length, tvb, offset, 2, ENC_LITTLE_ENDIAN);
data_length = tvb_get_letohs(tvb, offset);
offset += 2;
/* Add the request ID */
proto_tree_add_item(ccd_tree, hf_u3v_request_id, tvb, offset, 2, ENC_LITTLE_ENDIAN);
req_id = tvb_get_letohs(tvb, offset);
offset += 2;
/* Add telegram subtree */
u3v_telegram_tree = proto_item_add_subtree(u3v_tree, ett_u3v);
if (!PINFO_FD_VISITED(pinfo)) {
if ((command_id % 2) == 0) {
/* This is a command */
gencp_trans = wmem_new(wmem_file_scope(), gencp_transaction_t);
gencp_trans->cmd_frame = pinfo->fd->num;
gencp_trans->ack_frame = 0;
gencp_trans->cmd_time = pinfo->fd->abs_ts;
/* add reference to current packet */
p_add_proto_data(wmem_file_scope(), pinfo, proto_u3v, req_id, gencp_trans);
/* add reference to current */
u3v_conv_info->trans_info = gencp_trans;
} else {
gencp_trans = u3v_conv_info->trans_info;
if (gencp_trans) {
gencp_trans->ack_frame = pinfo->fd->num;
/* add reference to current packet */
p_add_proto_data(wmem_file_scope(), pinfo, proto_u3v, req_id, gencp_trans);
}
}
} else {
gencp_trans = (gencp_transaction_t*)p_get_proto_data(wmem_file_scope(),pinfo, proto_u3v, req_id);
}
if (!gencp_trans) {
/* create a "fake" gencp_trans structure */
gencp_trans = wmem_new(wmem_packet_scope(), gencp_transaction_t);
gencp_trans->cmd_frame = 0;
gencp_trans->ack_frame = 0;
gencp_trans->cmd_time = pinfo->fd->abs_ts;
}
/* dissect depending on command? */
switch (command_id) {
case U3V_READMEM_CMD:
dissect_u3v_read_mem_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans);
break;
case U3V_WRITEMEM_CMD:
dissect_u3v_write_mem_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans);
break;
case U3V_EVENT_CMD:
dissect_u3v_event_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length);
break;
case U3V_READMEM_ACK:
if ( U3V_STATUS_GENCP_SUCCESS == status ) {
dissect_u3v_read_mem_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans);
}
break;
case U3V_WRITEMEM_ACK:
dissect_u3v_write_mem_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length, u3v_conv_info,gencp_trans);
break;
case U3V_PENDING_ACK:
dissect_u3v_pending_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length, u3v_conv_info,gencp_trans);
break;
default:
proto_tree_add_item(u3v_telegram_tree, hf_u3v_payloaddata, tvb, offset, data_length, ENC_NA);
break;
}
return data_length + 12;
} else if ( stream_detected ) {
/* this is streaming data */
/* init this stream configuration */
u3v_conv_info = (u3v_conv_info_t *)usb_conv_info->class_data;
u3v_conv_info->ep_stream = usb_conv_info->endpoint;
/* Set the protocol column */
col_set_str(pinfo->cinfo, COL_PROTOCOL, "U3V");
/* Clear out stuff in the info column */
col_clear(pinfo->cinfo, COL_INFO);
/* Adds "USB3Vision" heading to protocol tree */
/* We will add fields to this using the u3v_tree pointer */
ti = proto_tree_add_item(tree, proto_u3v, tvb, offset, -1, ENC_NA);
u3v_tree = proto_item_add_subtree(ti, ett_u3v);
if(tvb_captured_length(tvb) >=4) {
prefix = tvb_get_letohl(tvb, offset);
switch (prefix) {
case U3V_STREAM_LEADER_PREFIX:
dissect_u3v_stream_leader(u3v_tree, tvb, pinfo, usb_conv_info);
break;
case U3V_STREAM_TRAILER_PREFIX:
dissect_u3v_stream_trailer(u3v_tree, tvb, pinfo, usb_conv_info);
break;
default:
dissect_u3v_stream_payload(u3v_tree, tvb, pinfo, usb_conv_info);
break;
}
}
return tvb_captured_length(tvb);
}
return 0;
}
Commit Message: Make class "type" for USB conversations.
USB dissectors can't assume that only their class type has been passed around in the conversation. Make explicit check that class type expected matches the dissector and stop/prevent dissection if there isn't a match.
Bug: 12356
Change-Id: Ib23973a4ebd0fbb51952ffc118daf95e3389a209
Reviewed-on: https://code.wireshark.org/review/15212
Petri-Dish: Michael Mann <[email protected]>
Reviewed-by: Martin Kaiser <[email protected]>
Petri-Dish: Martin Kaiser <[email protected]>
Tested-by: Petri Dish Buildbot <[email protected]>
Reviewed-by: Michael Mann <[email protected]>
CWE ID: CWE-476 | dissect_u3v(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data)
{
gint offset = 0;
proto_tree *u3v_tree = NULL, *ccd_tree_flag, *u3v_telegram_tree = NULL, *ccd_tree = NULL;
gint data_length = 0;
gint req_id = 0;
gint command_id = -1;
gint status = 0;
guint prefix = 0;
proto_item *ti = NULL;
proto_item *item = NULL;
const char *command_string;
usb_conv_info_t *usb_conv_info;
gint stream_detected = FALSE;
gint control_detected = FALSE;
u3v_conv_info_t *u3v_conv_info = NULL;
gencp_transaction_t *gencp_trans = NULL;
usb_conv_info = (usb_conv_info_t *)data;
/* decide if this packet belongs to U3V protocol */
u3v_conv_info = (u3v_conv_info_t *)usb_conv_info->class_data;
if (!u3v_conv_info) {
u3v_conv_info = wmem_new0(wmem_file_scope(), u3v_conv_info_t);
usb_conv_info->class_data = u3v_conv_info;
usb_conv_info->class_data_type = USB_CONV_U3V;
} else if (usb_conv_info->class_data_type != USB_CONV_U3V) {
/* Don't dissect if another USB type is in the conversation */
return 0;
}
prefix = tvb_get_letohl(tvb, 0);
if ((tvb_reported_length(tvb) >= 4) && ( ( U3V_CONTROL_PREFIX == prefix ) || ( U3V_EVENT_PREFIX == prefix ) ) ) {
control_detected = TRUE;
}
if (((tvb_reported_length(tvb) >= 4) && (( U3V_STREAM_LEADER_PREFIX == prefix ) || ( U3V_STREAM_TRAILER_PREFIX == prefix )))
|| (usb_conv_info->endpoint == u3v_conv_info->ep_stream)) {
stream_detected = TRUE;
}
/* initialize interface class/subclass in case no descriptors have been dissected yet */
if ( control_detected || stream_detected){
if ( usb_conv_info->interfaceClass == IF_CLASS_UNKNOWN &&
usb_conv_info->interfaceSubclass == IF_SUBCLASS_UNKNOWN){
usb_conv_info->interfaceClass = IF_CLASS_MISCELLANEOUS;
usb_conv_info->interfaceSubclass = IF_SUBCLASS_MISC_U3V;
}
}
if ( control_detected ) {
/* Set the protocol column */
col_set_str(pinfo->cinfo, COL_PROTOCOL, "U3V");
/* Clear out stuff in the info column */
col_clear(pinfo->cinfo, COL_INFO);
/* Adds "USB3Vision" heading to protocol tree */
/* We will add fields to this using the u3v_tree pointer */
ti = proto_tree_add_item(tree, proto_u3v, tvb, offset, -1, ENC_NA);
u3v_tree = proto_item_add_subtree(ti, ett_u3v);
prefix = tvb_get_letohl(tvb, offset);
command_id = tvb_get_letohs(tvb, offset+6);
/* decode CCD ( DCI/DCE command data layout) */
if ((prefix == U3V_CONTROL_PREFIX || prefix == U3V_EVENT_PREFIX) && ((command_id % 2) == 0)) {
command_string = val_to_str(command_id,command_names,"Unknown Command (0x%x)");
item = proto_tree_add_item(u3v_tree, hf_u3v_ccd_cmd, tvb, offset, 8, ENC_NA);
proto_item_append_text(item, ": %s", command_string);
ccd_tree = proto_item_add_subtree(item, ett_u3v_cmd);
/* Add the prefix code: */
proto_tree_add_item(ccd_tree, hf_u3v_gencp_prefix, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
/* Add the flags */
item = proto_tree_add_item(ccd_tree, hf_u3v_flag, tvb, offset, 2, ENC_LITTLE_ENDIAN);
ccd_tree_flag = proto_item_add_subtree(item, ett_u3v_flags);
proto_tree_add_item(ccd_tree_flag, hf_u3v_acknowledge_required_flag, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
col_append_fstr(pinfo->cinfo, COL_INFO, "> %s ", command_string);
} else if (prefix == U3V_CONTROL_PREFIX && ((command_id % 2) == 1)) {
command_string = val_to_str(command_id,command_names,"Unknown Acknowledge (0x%x)");
item = proto_tree_add_item(u3v_tree, hf_u3v_ccd_ack, tvb, offset, 8, ENC_NA);
proto_item_append_text(item, ": %s", command_string);
ccd_tree = proto_item_add_subtree(item, ett_u3v_ack);
/* Add the prefix code: */
proto_tree_add_item(ccd_tree, hf_u3v_gencp_prefix, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
/* Add the status: */
proto_tree_add_item(ccd_tree, hf_u3v_status, tvb, offset, 2,ENC_LITTLE_ENDIAN);
status = tvb_get_letohs(tvb, offset);
offset += 2;
col_append_fstr(pinfo->cinfo, COL_INFO, "< %s %s",
command_string,
val_to_str(status, status_names_short, "Unknown status (0x%04X)"));
} else {
return 0;
}
/* Add the command id*/
proto_tree_add_item(ccd_tree, hf_u3v_command_id, tvb, offset, 2,ENC_LITTLE_ENDIAN);
offset += 2;
/* Parse the second part of both the command and the acknowledge header:
0 15 16 31
-------- -------- -------- --------
| status | acknowledge |
-------- -------- -------- --------
| length | req_id |
-------- -------- -------- --------
Add the data length
Number of valid data bytes in this message, not including this header. This
represents the number of bytes of payload appended after this header */
proto_tree_add_item(ccd_tree, hf_u3v_length, tvb, offset, 2, ENC_LITTLE_ENDIAN);
data_length = tvb_get_letohs(tvb, offset);
offset += 2;
/* Add the request ID */
proto_tree_add_item(ccd_tree, hf_u3v_request_id, tvb, offset, 2, ENC_LITTLE_ENDIAN);
req_id = tvb_get_letohs(tvb, offset);
offset += 2;
/* Add telegram subtree */
u3v_telegram_tree = proto_item_add_subtree(u3v_tree, ett_u3v);
if (!PINFO_FD_VISITED(pinfo)) {
if ((command_id % 2) == 0) {
/* This is a command */
gencp_trans = wmem_new(wmem_file_scope(), gencp_transaction_t);
gencp_trans->cmd_frame = pinfo->fd->num;
gencp_trans->ack_frame = 0;
gencp_trans->cmd_time = pinfo->fd->abs_ts;
/* add reference to current packet */
p_add_proto_data(wmem_file_scope(), pinfo, proto_u3v, req_id, gencp_trans);
/* add reference to current */
u3v_conv_info->trans_info = gencp_trans;
} else {
gencp_trans = u3v_conv_info->trans_info;
if (gencp_trans) {
gencp_trans->ack_frame = pinfo->fd->num;
/* add reference to current packet */
p_add_proto_data(wmem_file_scope(), pinfo, proto_u3v, req_id, gencp_trans);
}
}
} else {
gencp_trans = (gencp_transaction_t*)p_get_proto_data(wmem_file_scope(),pinfo, proto_u3v, req_id);
}
if (!gencp_trans) {
/* create a "fake" gencp_trans structure */
gencp_trans = wmem_new(wmem_packet_scope(), gencp_transaction_t);
gencp_trans->cmd_frame = 0;
gencp_trans->ack_frame = 0;
gencp_trans->cmd_time = pinfo->fd->abs_ts;
}
/* dissect depending on command? */
switch (command_id) {
case U3V_READMEM_CMD:
dissect_u3v_read_mem_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans);
break;
case U3V_WRITEMEM_CMD:
dissect_u3v_write_mem_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans);
break;
case U3V_EVENT_CMD:
dissect_u3v_event_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length);
break;
case U3V_READMEM_ACK:
if ( U3V_STATUS_GENCP_SUCCESS == status ) {
dissect_u3v_read_mem_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans);
}
break;
case U3V_WRITEMEM_ACK:
dissect_u3v_write_mem_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length, u3v_conv_info,gencp_trans);
break;
case U3V_PENDING_ACK:
dissect_u3v_pending_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length, u3v_conv_info,gencp_trans);
break;
default:
proto_tree_add_item(u3v_telegram_tree, hf_u3v_payloaddata, tvb, offset, data_length, ENC_NA);
break;
}
return data_length + 12;
} else if ( stream_detected ) {
/* this is streaming data */
/* init this stream configuration */
u3v_conv_info = (u3v_conv_info_t *)usb_conv_info->class_data;
u3v_conv_info->ep_stream = usb_conv_info->endpoint;
/* Set the protocol column */
col_set_str(pinfo->cinfo, COL_PROTOCOL, "U3V");
/* Clear out stuff in the info column */
col_clear(pinfo->cinfo, COL_INFO);
/* Adds "USB3Vision" heading to protocol tree */
/* We will add fields to this using the u3v_tree pointer */
ti = proto_tree_add_item(tree, proto_u3v, tvb, offset, -1, ENC_NA);
u3v_tree = proto_item_add_subtree(ti, ett_u3v);
if(tvb_captured_length(tvb) >=4) {
prefix = tvb_get_letohl(tvb, offset);
switch (prefix) {
case U3V_STREAM_LEADER_PREFIX:
dissect_u3v_stream_leader(u3v_tree, tvb, pinfo, usb_conv_info);
break;
case U3V_STREAM_TRAILER_PREFIX:
dissect_u3v_stream_trailer(u3v_tree, tvb, pinfo, usb_conv_info);
break;
default:
dissect_u3v_stream_payload(u3v_tree, tvb, pinfo, usb_conv_info);
break;
}
}
return tvb_captured_length(tvb);
}
return 0;
}
| 16,389 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static Image *ReadARTImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
MagickBooleanType
status;
size_t
length;
ssize_t
count,
y;
unsigned char
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->depth=1;
image->endian=MSBEndian;
(void) ReadBlobLSBShort(image);
image->columns=(size_t) ReadBlobLSBShort(image);
(void) ReadBlobLSBShort(image);
image->rows=(size_t) ReadBlobLSBShort(image);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image colormap.
*/
if (AcquireImageColormap(image,2) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Convert bi-level image to pixel packets.
*/
SetImageColorspace(image,GRAYColorspace);
quantum_type=IndexQuantum;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=GetQuantumPixels(quantum_info);
length=GetQuantumExtent(image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
count=ReadBlob(image,length,pixels);
if (count != (ssize_t) length)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
count=ReadBlob(image,(size_t) (-(ssize_t) length) & 0x01,pixels);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
SetQuantumImageType(image,quantum_type);
quantum_info=DestroyQuantumInfo(quantum_info);
if (EOFBlob(image) != MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
Commit Message:
CWE ID: CWE-119 | static Image *ReadARTImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
MagickBooleanType
status;
size_t
length;
ssize_t
count,
y;
unsigned char
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->depth=1;
image->endian=MSBEndian;
(void) ReadBlobLSBShort(image);
image->columns=(size_t) ReadBlobLSBShort(image);
(void) ReadBlobLSBShort(image);
image->rows=(size_t) ReadBlobLSBShort(image);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image colormap.
*/
if (AcquireImageColormap(image,2) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
/*
Convert bi-level image to pixel packets.
*/
SetImageColorspace(image,GRAYColorspace);
quantum_type=IndexQuantum;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=GetQuantumPixels(quantum_info);
length=GetQuantumExtent(image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
count=ReadBlob(image,length,pixels);
if (count != (ssize_t) length)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
count=ReadBlob(image,(size_t) (-(ssize_t) length) & 0x01,pixels);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
SetQuantumImageType(image,quantum_type);
quantum_info=DestroyQuantumInfo(quantum_info);
if (EOFBlob(image) != MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 12,937 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int __init pf_init(void)
{ /* preliminary initialisation */
struct pf_unit *pf;
int unit;
if (disable)
return -EINVAL;
pf_init_units();
if (pf_detect())
return -ENODEV;
pf_busy = 0;
if (register_blkdev(major, name)) {
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
put_disk(pf->disk);
return -EBUSY;
}
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
struct gendisk *disk = pf->disk;
if (!pf->present)
continue;
disk->private_data = pf;
add_disk(disk);
}
return 0;
}
Commit Message: paride/pf: Fix potential NULL pointer dereference
Syzkaller report this:
pf: pf version 1.04, major 47, cluster 64, nice 0
pf: No ATAPI disk detected
kasan: CONFIG_KASAN_INLINE enabled
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN PTI
CPU: 0 PID: 9887 Comm: syz-executor.0 Tainted: G C 5.1.0-rc3+ #8
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
RIP: 0010:pf_init+0x7af/0x1000 [pf]
Code: 46 77 d2 48 89 d8 48 c1 e8 03 80 3c 28 00 74 08 48 89 df e8 03 25 a6 d2 4c 8b 23 49 8d bc 24 80 05 00 00 48 89 f8 48 c1 e8 03 <80> 3c 28 00 74 05 e8 e6 24 a6 d2 49 8b bc 24 80 05 00 00 e8 79 34
RSP: 0018:ffff8881abcbf998 EFLAGS: 00010202
RAX: 00000000000000b0 RBX: ffffffffc1e4a8a8 RCX: ffffffffaec50788
RDX: 0000000000039b10 RSI: ffffc9000153c000 RDI: 0000000000000580
RBP: dffffc0000000000 R08: ffffed103ee44e59 R09: ffffed103ee44e59
R10: 0000000000000001 R11: ffffed103ee44e58 R12: 0000000000000000
R13: ffffffffc1e4b028 R14: 0000000000000000 R15: 0000000000000020
FS: 00007f1b78a91700(0000) GS:ffff8881f7200000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f6d72b207f8 CR3: 00000001d5790004 CR4: 00000000007606f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
? 0xffffffffc1e50000
do_one_initcall+0xbc/0x47d init/main.c:901
do_init_module+0x1b5/0x547 kernel/module.c:3456
load_module+0x6405/0x8c10 kernel/module.c:3804
__do_sys_finit_module+0x162/0x190 kernel/module.c:3898
do_syscall_64+0x9f/0x450 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f1b78a90c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000020000180 RDI: 0000000000000003
RBP: 00007f1b78a90c70 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f1b78a916bc
R13: 00000000004bcefa R14: 00000000006f6fb0 R15: 0000000000000004
Modules linked in: pf(+) paride gpio_tps65218 tps65218 i2c_cht_wc ati_remote dc395x act_meta_skbtcindex act_ife ife ecdh_generic rc_xbox_dvd sky81452_regulator v4l2_fwnode leds_blinkm snd_usb_hiface comedi(C) aes_ti slhc cfi_cmdset_0020 mtd cfi_util sx8654 mdio_gpio of_mdio fixed_phy mdio_bitbang libphy alcor_pci matrix_keymap hid_uclogic usbhid scsi_transport_fc videobuf2_v4l2 videobuf2_dma_sg snd_soc_pcm179x_spi snd_soc_pcm179x_codec i2c_demux_pinctrl mdev snd_indigodj isl6405 mii enc28j60 cmac adt7316_i2c(C) adt7316(C) fmc_trivial fmc nf_reject_ipv4 authenc rc_dtt200u rtc_ds1672 dvb_usb_dibusb_mc dvb_usb_dibusb_mc_common dib3000mc dibx000_common dvb_usb_dibusb_common dvb_usb dvb_core videobuf2_common videobuf2_vmalloc videobuf2_memops regulator_haptic adf7242 mac802154 ieee802154 s5h1409 da9034_ts snd_intel8x0m wmi cx24120 usbcore sdhci_cadence sdhci_pltfm sdhci mmc_core joydev i2c_algo_bit scsi_transport_iscsi iscsi_boot_sysfs ves1820 lockd grace nfs_acl auth_rpcgss sunrp
c
ip_vs snd_soc_adau7002 snd_cs4281 snd_rawmidi gameport snd_opl3_lib snd_seq_device snd_hwdep snd_ac97_codec ad7418 hid_primax hid snd_soc_cs4265 snd_soc_core snd_pcm_dmaengine snd_pcm snd_timer ac97_bus snd_compress snd soundcore ti_adc108s102 eeprom_93cx6 i2c_algo_pca mlxreg_hotplug st_pressure st_sensors industrialio_triggered_buffer kfifo_buf industrialio v4l2_common videodev media snd_soc_adau_utils rc_pinnacle_grey rc_core pps_gpio leds_lm3692x nandcore ledtrig_pattern iptable_security iptable_raw iptable_mangle iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 iptable_filter bpfilter ip6_vti ip_vti ip_gre ipip sit tunnel4 ip_tunnel hsr veth netdevsim vxcan batman_adv cfg80211 rfkill chnl_net caif nlmon dummy team bonding vcan bridge stp llc ip6_gre gre ip6_tunnel tunnel6 tun mousedev ppdev tpm kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel aesni_intel ide_pci_generic aes_x86_64 piix crypto_simd input_leds psmouse cryp
td
glue_helper ide_core intel_agp serio_raw intel_gtt agpgart ata_generic i2c_piix4 pata_acpi parport_pc parport rtc_cmos floppy sch_fq_codel ip_tables x_tables sha1_ssse3 sha1_generic ipv6 [last unloaded: paride]
Dumping ftrace buffer:
(ftrace buffer empty)
---[ end trace 7a818cf5f210d79e ]---
If alloc_disk fails in pf_init_units, pf->disk will be
NULL, however in pf_detect and pf_exit, it's not check
this before free.It may result a NULL pointer dereference.
Also when register_blkdev failed, blk_cleanup_queue() and
blk_mq_free_tag_set() should be called to free resources.
Reported-by: Hulk Robot <[email protected]>
Fixes: 6ce59025f118 ("paride/pf: cleanup queues when detection fails")
Signed-off-by: YueHaibing <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
CWE ID: CWE-476 | static int __init pf_init(void)
{ /* preliminary initialisation */
struct pf_unit *pf;
int unit;
if (disable)
return -EINVAL;
pf_init_units();
if (pf_detect())
return -ENODEV;
pf_busy = 0;
if (register_blkdev(major, name)) {
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->disk)
continue;
blk_cleanup_queue(pf->disk->queue);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
}
return -EBUSY;
}
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
struct gendisk *disk = pf->disk;
if (!pf->present)
continue;
disk->private_data = pf;
add_disk(disk);
}
return 0;
}
| 4,902 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *isk = inet_sk(sk);
int family = sk->sk_family;
struct sk_buff *skb;
int copied, err;
pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
err = -EOPNOTSUPP;
if (flags & MSG_OOB)
goto out;
if (flags & MSG_ERRQUEUE) {
if (family == AF_INET) {
return ip_recv_error(sk, msg, len);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
return pingv6_ops.ipv6_recv_error(sk, msg, len);
#endif
}
}
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
/* Don't bother checking the checksum */
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address and add cmsg data. */
if (family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
sin->sin_family = AF_INET;
sin->sin_port = 0 /* skb->h.uh->source */;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
if (isk->cmsg_flags)
ip_cmsg_recv(msg, skb);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6hdr *ip6 = ipv6_hdr(skb);
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)msg->msg_name;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = 0;
sin6->sin6_addr = ip6->saddr;
sin6->sin6_flowinfo = 0;
if (np->sndflow)
sin6->sin6_flowinfo = ip6_flowinfo(ip6);
sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
IP6CB(skb)->iif);
*addr_len = sizeof(*sin6);
if (inet6_sk(sk)->rxopt.all)
pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
#endif
} else {
BUG();
}
err = copied;
done:
skb_free_datagram(sk, skb);
out:
pr_debug("ping_recvmsg -> %d\n", err);
return err;
}
Commit Message: ping: prevent NULL pointer dereference on write to msg_name
A plain read() on a socket does set msg->msg_name to NULL. So check for
NULL pointer first.
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: | int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *isk = inet_sk(sk);
int family = sk->sk_family;
struct sk_buff *skb;
int copied, err;
pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
err = -EOPNOTSUPP;
if (flags & MSG_OOB)
goto out;
if (flags & MSG_ERRQUEUE) {
if (family == AF_INET) {
return ip_recv_error(sk, msg, len);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
return pingv6_ops.ipv6_recv_error(sk, msg, len);
#endif
}
}
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
/* Don't bother checking the checksum */
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address and add cmsg data. */
if (family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
if (sin) {
sin->sin_family = AF_INET;
sin->sin_port = 0 /* skb->h.uh->source */;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
}
if (isk->cmsg_flags)
ip_cmsg_recv(msg, skb);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6hdr *ip6 = ipv6_hdr(skb);
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)msg->msg_name;
if (sin6) {
sin6->sin6_family = AF_INET6;
sin6->sin6_port = 0;
sin6->sin6_addr = ip6->saddr;
sin6->sin6_flowinfo = 0;
if (np->sndflow)
sin6->sin6_flowinfo = ip6_flowinfo(ip6);
sin6->sin6_scope_id =
ipv6_iface_scope_id(&sin6->sin6_addr,
IP6CB(skb)->iif);
*addr_len = sizeof(*sin6);
}
if (inet6_sk(sk)->rxopt.all)
pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
#endif
} else {
BUG();
}
err = copied;
done:
skb_free_datagram(sk, skb);
out:
pr_debug("ping_recvmsg -> %d\n", err);
return err;
}
| 17,687 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int parse_options(char *options, struct super_block *sb,
unsigned long *journal_devnum,
unsigned int *journal_ioprio,
ext4_fsblk_t *n_blocks_count, int is_remount)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
char *p;
substring_t args[MAX_OPT_ARGS];
int data_opt = 0;
int option;
#ifdef CONFIG_QUOTA
int qfmt;
#endif
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
/*
* Initialize args struct so we know whether arg was
* found; some options take optional arguments.
*/
args[0].to = args[0].from = 0;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
clear_opt(sbi->s_mount_opt, MINIX_DF);
break;
case Opt_minix_df:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
set_opt(sbi->s_mount_opt, MINIX_DF);
break;
case Opt_grpid:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
set_opt(sbi->s_mount_opt, GRPID);
break;
case Opt_nogrpid:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
clear_opt(sbi->s_mount_opt, GRPID);
break;
case Opt_resuid:
if (match_int(&args[0], &option))
return 0;
sbi->s_resuid = option;
break;
case Opt_resgid:
if (match_int(&args[0], &option))
return 0;
sbi->s_resgid = option;
break;
case Opt_sb:
/* handled by get_sb_block() instead of here */
/* *sb_block = match_int(&args[0]); */
break;
case Opt_err_panic:
clear_opt(sbi->s_mount_opt, ERRORS_CONT);
clear_opt(sbi->s_mount_opt, ERRORS_RO);
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
break;
case Opt_err_ro:
clear_opt(sbi->s_mount_opt, ERRORS_CONT);
clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
set_opt(sbi->s_mount_opt, ERRORS_RO);
break;
case Opt_err_cont:
clear_opt(sbi->s_mount_opt, ERRORS_RO);
clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
set_opt(sbi->s_mount_opt, ERRORS_CONT);
break;
case Opt_nouid32:
set_opt(sbi->s_mount_opt, NO_UID32);
break;
case Opt_debug:
set_opt(sbi->s_mount_opt, DEBUG);
break;
case Opt_oldalloc:
set_opt(sbi->s_mount_opt, OLDALLOC);
break;
case Opt_orlov:
clear_opt(sbi->s_mount_opt, OLDALLOC);
break;
#ifdef CONFIG_EXT4_FS_XATTR
case Opt_user_xattr:
set_opt(sbi->s_mount_opt, XATTR_USER);
break;
case Opt_nouser_xattr:
clear_opt(sbi->s_mount_opt, XATTR_USER);
break;
#else
case Opt_user_xattr:
case Opt_nouser_xattr:
ext4_msg(sb, KERN_ERR, "(no)user_xattr options not supported");
break;
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
case Opt_acl:
set_opt(sbi->s_mount_opt, POSIX_ACL);
break;
case Opt_noacl:
clear_opt(sbi->s_mount_opt, POSIX_ACL);
break;
#else
case Opt_acl:
case Opt_noacl:
ext4_msg(sb, KERN_ERR, "(no)acl options not supported");
break;
#endif
case Opt_journal_update:
/* @@@ FIXME */
/* Eventually we will want to be able to create
a journal file here. For now, only allow the
user to specify an existing inode to be the
journal file. */
if (is_remount) {
ext4_msg(sb, KERN_ERR,
"Cannot specify journal on remount");
return 0;
}
set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
break;
case Opt_journal_dev:
if (is_remount) {
ext4_msg(sb, KERN_ERR,
"Cannot specify journal on remount");
return 0;
}
if (match_int(&args[0], &option))
return 0;
*journal_devnum = option;
break;
case Opt_journal_checksum:
set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
break;
case Opt_journal_async_commit:
set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT);
set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
break;
case Opt_noload:
set_opt(sbi->s_mount_opt, NOLOAD);
break;
case Opt_commit:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
if (option == 0)
option = JBD2_DEFAULT_MAX_COMMIT_AGE;
sbi->s_commit_interval = HZ * option;
break;
case Opt_max_batch_time:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
if (option == 0)
option = EXT4_DEF_MAX_BATCH_TIME;
sbi->s_max_batch_time = option;
break;
case Opt_min_batch_time:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
sbi->s_min_batch_time = option;
break;
case Opt_data_journal:
data_opt = EXT4_MOUNT_JOURNAL_DATA;
goto datacheck;
case Opt_data_ordered:
data_opt = EXT4_MOUNT_ORDERED_DATA;
goto datacheck;
case Opt_data_writeback:
data_opt = EXT4_MOUNT_WRITEBACK_DATA;
datacheck:
if (is_remount) {
if (test_opt(sb, DATA_FLAGS) != data_opt) {
ext4_msg(sb, KERN_ERR,
"Cannot change data mode on remount");
return 0;
}
} else {
clear_opt(sbi->s_mount_opt, DATA_FLAGS);
sbi->s_mount_opt |= data_opt;
}
break;
case Opt_data_err_abort:
set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
break;
case Opt_data_err_ignore:
clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
break;
#ifdef CONFIG_QUOTA
case Opt_usrjquota:
if (!set_qf_name(sb, USRQUOTA, &args[0]))
return 0;
break;
case Opt_grpjquota:
if (!set_qf_name(sb, GRPQUOTA, &args[0]))
return 0;
break;
case Opt_offusrjquota:
if (!clear_qf_name(sb, USRQUOTA))
return 0;
break;
case Opt_offgrpjquota:
if (!clear_qf_name(sb, GRPQUOTA))
return 0;
break;
case Opt_jqfmt_vfsold:
qfmt = QFMT_VFS_OLD;
goto set_qf_format;
case Opt_jqfmt_vfsv0:
qfmt = QFMT_VFS_V0;
goto set_qf_format;
case Opt_jqfmt_vfsv1:
qfmt = QFMT_VFS_V1;
set_qf_format:
if (sb_any_quota_loaded(sb) &&
sbi->s_jquota_fmt != qfmt) {
ext4_msg(sb, KERN_ERR, "Cannot change "
"journaled quota options when "
"quota turned on");
return 0;
}
sbi->s_jquota_fmt = qfmt;
break;
case Opt_quota:
case Opt_usrquota:
set_opt(sbi->s_mount_opt, QUOTA);
set_opt(sbi->s_mount_opt, USRQUOTA);
break;
case Opt_grpquota:
set_opt(sbi->s_mount_opt, QUOTA);
set_opt(sbi->s_mount_opt, GRPQUOTA);
break;
case Opt_noquota:
if (sb_any_quota_loaded(sb)) {
ext4_msg(sb, KERN_ERR, "Cannot change quota "
"options when quota turned on");
return 0;
}
clear_opt(sbi->s_mount_opt, QUOTA);
clear_opt(sbi->s_mount_opt, USRQUOTA);
clear_opt(sbi->s_mount_opt, GRPQUOTA);
break;
#else
case Opt_quota:
case Opt_usrquota:
case Opt_grpquota:
ext4_msg(sb, KERN_ERR,
"quota options not supported");
break;
case Opt_usrjquota:
case Opt_grpjquota:
case Opt_offusrjquota:
case Opt_offgrpjquota:
case Opt_jqfmt_vfsold:
case Opt_jqfmt_vfsv0:
case Opt_jqfmt_vfsv1:
ext4_msg(sb, KERN_ERR,
"journaled quota options not supported");
break;
case Opt_noquota:
break;
#endif
case Opt_abort:
sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
break;
case Opt_nobarrier:
clear_opt(sbi->s_mount_opt, BARRIER);
break;
case Opt_barrier:
if (args[0].from) {
if (match_int(&args[0], &option))
return 0;
} else
option = 1; /* No argument, default to 1 */
if (option)
set_opt(sbi->s_mount_opt, BARRIER);
else
clear_opt(sbi->s_mount_opt, BARRIER);
break;
case Opt_ignore:
break;
case Opt_resize:
if (!is_remount) {
ext4_msg(sb, KERN_ERR,
"resize option only available "
"for remount");
return 0;
}
if (match_int(&args[0], &option) != 0)
return 0;
*n_blocks_count = option;
break;
case Opt_nobh:
set_opt(sbi->s_mount_opt, NOBH);
break;
case Opt_bh:
clear_opt(sbi->s_mount_opt, NOBH);
break;
case Opt_i_version:
set_opt(sbi->s_mount_opt, I_VERSION);
sb->s_flags |= MS_I_VERSION;
break;
case Opt_nodelalloc:
clear_opt(sbi->s_mount_opt, DELALLOC);
break;
case Opt_stripe:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
sbi->s_stripe = option;
break;
case Opt_delalloc:
set_opt(sbi->s_mount_opt, DELALLOC);
break;
case Opt_block_validity:
set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
break;
case Opt_noblock_validity:
clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
break;
case Opt_inode_readahead_blks:
if (match_int(&args[0], &option))
return 0;
if (option < 0 || option > (1 << 30))
return 0;
if (!is_power_of_2(option)) {
ext4_msg(sb, KERN_ERR,
"EXT4-fs: inode_readahead_blks"
" must be a power of 2");
return 0;
}
sbi->s_inode_readahead_blks = option;
break;
case Opt_journal_ioprio:
if (match_int(&args[0], &option))
return 0;
if (option < 0 || option > 7)
break;
*journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE,
option);
break;
case Opt_noauto_da_alloc:
set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
break;
case Opt_auto_da_alloc:
if (args[0].from) {
if (match_int(&args[0], &option))
return 0;
} else
option = 1; /* No argument, default to 1 */
if (option)
clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
else
set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
break;
case Opt_discard:
set_opt(sbi->s_mount_opt, DISCARD);
break;
case Opt_nodiscard:
clear_opt(sbi->s_mount_opt, DISCARD);
break;
default:
ext4_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" "
"or missing value", p);
return 0;
}
}
#ifdef CONFIG_QUOTA
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sbi->s_mount_opt, USRQUOTA);
if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
clear_opt(sbi->s_mount_opt, GRPQUOTA);
if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
ext4_msg(sb, KERN_ERR, "old and new quota "
"format mixing");
return 0;
}
if (!sbi->s_jquota_fmt) {
ext4_msg(sb, KERN_ERR, "journaled quota format "
"not specified");
return 0;
}
} else {
if (sbi->s_jquota_fmt) {
ext4_msg(sb, KERN_ERR, "journaled quota format "
"specified with no journaling "
"enabled");
return 0;
}
}
#endif
return 1;
}
Commit Message: ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
CWE ID: | static int parse_options(char *options, struct super_block *sb,
unsigned long *journal_devnum,
unsigned int *journal_ioprio,
ext4_fsblk_t *n_blocks_count, int is_remount)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
char *p;
substring_t args[MAX_OPT_ARGS];
int data_opt = 0;
int option;
#ifdef CONFIG_QUOTA
int qfmt;
#endif
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
/*
* Initialize args struct so we know whether arg was
* found; some options take optional arguments.
*/
args[0].to = args[0].from = 0;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
clear_opt(sbi->s_mount_opt, MINIX_DF);
break;
case Opt_minix_df:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
set_opt(sbi->s_mount_opt, MINIX_DF);
break;
case Opt_grpid:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
set_opt(sbi->s_mount_opt, GRPID);
break;
case Opt_nogrpid:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
clear_opt(sbi->s_mount_opt, GRPID);
break;
case Opt_resuid:
if (match_int(&args[0], &option))
return 0;
sbi->s_resuid = option;
break;
case Opt_resgid:
if (match_int(&args[0], &option))
return 0;
sbi->s_resgid = option;
break;
case Opt_sb:
/* handled by get_sb_block() instead of here */
/* *sb_block = match_int(&args[0]); */
break;
case Opt_err_panic:
clear_opt(sbi->s_mount_opt, ERRORS_CONT);
clear_opt(sbi->s_mount_opt, ERRORS_RO);
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
break;
case Opt_err_ro:
clear_opt(sbi->s_mount_opt, ERRORS_CONT);
clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
set_opt(sbi->s_mount_opt, ERRORS_RO);
break;
case Opt_err_cont:
clear_opt(sbi->s_mount_opt, ERRORS_RO);
clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
set_opt(sbi->s_mount_opt, ERRORS_CONT);
break;
case Opt_nouid32:
set_opt(sbi->s_mount_opt, NO_UID32);
break;
case Opt_debug:
set_opt(sbi->s_mount_opt, DEBUG);
break;
case Opt_oldalloc:
set_opt(sbi->s_mount_opt, OLDALLOC);
break;
case Opt_orlov:
clear_opt(sbi->s_mount_opt, OLDALLOC);
break;
#ifdef CONFIG_EXT4_FS_XATTR
case Opt_user_xattr:
set_opt(sbi->s_mount_opt, XATTR_USER);
break;
case Opt_nouser_xattr:
clear_opt(sbi->s_mount_opt, XATTR_USER);
break;
#else
case Opt_user_xattr:
case Opt_nouser_xattr:
ext4_msg(sb, KERN_ERR, "(no)user_xattr options not supported");
break;
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
case Opt_acl:
set_opt(sbi->s_mount_opt, POSIX_ACL);
break;
case Opt_noacl:
clear_opt(sbi->s_mount_opt, POSIX_ACL);
break;
#else
case Opt_acl:
case Opt_noacl:
ext4_msg(sb, KERN_ERR, "(no)acl options not supported");
break;
#endif
case Opt_journal_update:
/* @@@ FIXME */
/* Eventually we will want to be able to create
a journal file here. For now, only allow the
user to specify an existing inode to be the
journal file. */
if (is_remount) {
ext4_msg(sb, KERN_ERR,
"Cannot specify journal on remount");
return 0;
}
set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
break;
case Opt_journal_dev:
if (is_remount) {
ext4_msg(sb, KERN_ERR,
"Cannot specify journal on remount");
return 0;
}
if (match_int(&args[0], &option))
return 0;
*journal_devnum = option;
break;
case Opt_journal_checksum:
set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
break;
case Opt_journal_async_commit:
set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT);
set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
break;
case Opt_noload:
set_opt(sbi->s_mount_opt, NOLOAD);
break;
case Opt_commit:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
if (option == 0)
option = JBD2_DEFAULT_MAX_COMMIT_AGE;
sbi->s_commit_interval = HZ * option;
break;
case Opt_max_batch_time:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
if (option == 0)
option = EXT4_DEF_MAX_BATCH_TIME;
sbi->s_max_batch_time = option;
break;
case Opt_min_batch_time:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
sbi->s_min_batch_time = option;
break;
case Opt_data_journal:
data_opt = EXT4_MOUNT_JOURNAL_DATA;
goto datacheck;
case Opt_data_ordered:
data_opt = EXT4_MOUNT_ORDERED_DATA;
goto datacheck;
case Opt_data_writeback:
data_opt = EXT4_MOUNT_WRITEBACK_DATA;
datacheck:
if (is_remount) {
if (test_opt(sb, DATA_FLAGS) != data_opt) {
ext4_msg(sb, KERN_ERR,
"Cannot change data mode on remount");
return 0;
}
} else {
clear_opt(sbi->s_mount_opt, DATA_FLAGS);
sbi->s_mount_opt |= data_opt;
}
break;
case Opt_data_err_abort:
set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
break;
case Opt_data_err_ignore:
clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
break;
#ifdef CONFIG_QUOTA
case Opt_usrjquota:
if (!set_qf_name(sb, USRQUOTA, &args[0]))
return 0;
break;
case Opt_grpjquota:
if (!set_qf_name(sb, GRPQUOTA, &args[0]))
return 0;
break;
case Opt_offusrjquota:
if (!clear_qf_name(sb, USRQUOTA))
return 0;
break;
case Opt_offgrpjquota:
if (!clear_qf_name(sb, GRPQUOTA))
return 0;
break;
case Opt_jqfmt_vfsold:
qfmt = QFMT_VFS_OLD;
goto set_qf_format;
case Opt_jqfmt_vfsv0:
qfmt = QFMT_VFS_V0;
goto set_qf_format;
case Opt_jqfmt_vfsv1:
qfmt = QFMT_VFS_V1;
set_qf_format:
if (sb_any_quota_loaded(sb) &&
sbi->s_jquota_fmt != qfmt) {
ext4_msg(sb, KERN_ERR, "Cannot change "
"journaled quota options when "
"quota turned on");
return 0;
}
sbi->s_jquota_fmt = qfmt;
break;
case Opt_quota:
case Opt_usrquota:
set_opt(sbi->s_mount_opt, QUOTA);
set_opt(sbi->s_mount_opt, USRQUOTA);
break;
case Opt_grpquota:
set_opt(sbi->s_mount_opt, QUOTA);
set_opt(sbi->s_mount_opt, GRPQUOTA);
break;
case Opt_noquota:
if (sb_any_quota_loaded(sb)) {
ext4_msg(sb, KERN_ERR, "Cannot change quota "
"options when quota turned on");
return 0;
}
clear_opt(sbi->s_mount_opt, QUOTA);
clear_opt(sbi->s_mount_opt, USRQUOTA);
clear_opt(sbi->s_mount_opt, GRPQUOTA);
break;
#else
case Opt_quota:
case Opt_usrquota:
case Opt_grpquota:
ext4_msg(sb, KERN_ERR,
"quota options not supported");
break;
case Opt_usrjquota:
case Opt_grpjquota:
case Opt_offusrjquota:
case Opt_offgrpjquota:
case Opt_jqfmt_vfsold:
case Opt_jqfmt_vfsv0:
case Opt_jqfmt_vfsv1:
ext4_msg(sb, KERN_ERR,
"journaled quota options not supported");
break;
case Opt_noquota:
break;
#endif
case Opt_abort:
sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
break;
case Opt_nobarrier:
clear_opt(sbi->s_mount_opt, BARRIER);
break;
case Opt_barrier:
if (args[0].from) {
if (match_int(&args[0], &option))
return 0;
} else
option = 1; /* No argument, default to 1 */
if (option)
set_opt(sbi->s_mount_opt, BARRIER);
else
clear_opt(sbi->s_mount_opt, BARRIER);
break;
case Opt_ignore:
break;
case Opt_resize:
if (!is_remount) {
ext4_msg(sb, KERN_ERR,
"resize option only available "
"for remount");
return 0;
}
if (match_int(&args[0], &option) != 0)
return 0;
*n_blocks_count = option;
break;
case Opt_nobh:
set_opt(sbi->s_mount_opt, NOBH);
break;
case Opt_bh:
clear_opt(sbi->s_mount_opt, NOBH);
break;
case Opt_i_version:
set_opt(sbi->s_mount_opt, I_VERSION);
sb->s_flags |= MS_I_VERSION;
break;
case Opt_nodelalloc:
clear_opt(sbi->s_mount_opt, DELALLOC);
break;
case Opt_stripe:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
sbi->s_stripe = option;
break;
case Opt_delalloc:
set_opt(sbi->s_mount_opt, DELALLOC);
break;
case Opt_block_validity:
set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
break;
case Opt_noblock_validity:
clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
break;
case Opt_inode_readahead_blks:
if (match_int(&args[0], &option))
return 0;
if (option < 0 || option > (1 << 30))
return 0;
if (!is_power_of_2(option)) {
ext4_msg(sb, KERN_ERR,
"EXT4-fs: inode_readahead_blks"
" must be a power of 2");
return 0;
}
sbi->s_inode_readahead_blks = option;
break;
case Opt_journal_ioprio:
if (match_int(&args[0], &option))
return 0;
if (option < 0 || option > 7)
break;
*journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE,
option);
break;
case Opt_noauto_da_alloc:
set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
break;
case Opt_auto_da_alloc:
if (args[0].from) {
if (match_int(&args[0], &option))
return 0;
} else
option = 1; /* No argument, default to 1 */
if (option)
clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
else
set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
break;
case Opt_discard:
set_opt(sbi->s_mount_opt, DISCARD);
break;
case Opt_nodiscard:
clear_opt(sbi->s_mount_opt, DISCARD);
break;
case Opt_dioread_nolock:
set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
break;
case Opt_dioread_lock:
clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
break;
default:
ext4_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" "
"or missing value", p);
return 0;
}
}
#ifdef CONFIG_QUOTA
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sbi->s_mount_opt, USRQUOTA);
if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
clear_opt(sbi->s_mount_opt, GRPQUOTA);
if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
ext4_msg(sb, KERN_ERR, "old and new quota "
"format mixing");
return 0;
}
if (!sbi->s_jquota_fmt) {
ext4_msg(sb, KERN_ERR, "journaled quota format "
"not specified");
return 0;
}
} else {
if (sbi->s_jquota_fmt) {
ext4_msg(sb, KERN_ERR, "journaled quota format "
"specified with no journaling "
"enabled");
return 0;
}
}
#endif
return 1;
}
| 9,191 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int newque(struct ipc_namespace *ns, struct ipc_params *params)
{
struct msg_queue *msq;
int id, retval;
key_t key = params->key;
int msgflg = params->flg;
msq = ipc_rcu_alloc(sizeof(*msq));
if (!msq)
return -ENOMEM;
msq->q_perm.mode = msgflg & S_IRWXUGO;
msq->q_perm.key = key;
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq);
if (retval) {
ipc_rcu_putref(msq, ipc_rcu_free);
return retval;
}
/* ipc_addid() locks msq upon success. */
id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
if (id < 0) {
ipc_rcu_putref(msq, msg_rcu_free);
return id;
}
msq->q_stime = msq->q_rtime = 0;
msq->q_ctime = get_seconds();
msq->q_cbytes = msq->q_qnum = 0;
msq->q_qbytes = ns->msg_ctlmnb;
msq->q_lspid = msq->q_lrpid = 0;
INIT_LIST_HEAD(&msq->q_messages);
INIT_LIST_HEAD(&msq->q_receivers);
INIT_LIST_HEAD(&msq->q_senders);
ipc_unlock_object(&msq->q_perm);
rcu_read_unlock();
return msq->q_perm.id;
}
Commit Message: Initialize msg/shm IPC objects before doing ipc_addid()
As reported by Dmitry Vyukov, we really shouldn't do ipc_addid() before
having initialized the IPC object state. Yes, we initialize the IPC
object in a locked state, but with all the lockless RCU lookup work,
that IPC object lock no longer means that the state cannot be seen.
We already did this for the IPC semaphore code (see commit e8577d1f0329:
"ipc/sem.c: fully initialize sem_array before making it visible") but we
clearly forgot about msg and shm.
Reported-by: Dmitry Vyukov <[email protected]>
Cc: Manfred Spraul <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-362 | static int newque(struct ipc_namespace *ns, struct ipc_params *params)
{
struct msg_queue *msq;
int id, retval;
key_t key = params->key;
int msgflg = params->flg;
msq = ipc_rcu_alloc(sizeof(*msq));
if (!msq)
return -ENOMEM;
msq->q_perm.mode = msgflg & S_IRWXUGO;
msq->q_perm.key = key;
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq);
if (retval) {
ipc_rcu_putref(msq, ipc_rcu_free);
return retval;
}
msq->q_stime = msq->q_rtime = 0;
msq->q_ctime = get_seconds();
msq->q_cbytes = msq->q_qnum = 0;
msq->q_qbytes = ns->msg_ctlmnb;
msq->q_lspid = msq->q_lrpid = 0;
INIT_LIST_HEAD(&msq->q_messages);
INIT_LIST_HEAD(&msq->q_receivers);
INIT_LIST_HEAD(&msq->q_senders);
/* ipc_addid() locks msq upon success. */
id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
if (id < 0) {
ipc_rcu_putref(msq, msg_rcu_free);
return id;
}
ipc_unlock_object(&msq->q_perm);
rcu_read_unlock();
return msq->q_perm.id;
}
| 28,898 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: image_transform_png_set_@_mod(PNG_CONST image_transform *this,
image_pixel *that, png_const_structp pp,
PNG_CONST transform_display *display)
{
this->next->mod(this->next, that, pp, display);
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | image_transform_png_set_@_mod(PNG_CONST image_transform *this,
| 896 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: ShelfLayoutManager::AutoHideState ShelfLayoutManager::CalculateAutoHideState(
VisibilityState visibility_state) const {
if (visibility_state != AUTO_HIDE || !launcher_widget())
return AUTO_HIDE_HIDDEN;
Shell* shell = Shell::GetInstance();
if (shell->GetAppListTargetVisibility())
return AUTO_HIDE_SHOWN;
if (shell->system_tray() && shell->system_tray()->should_show_launcher())
return AUTO_HIDE_SHOWN;
if (launcher_ && launcher_->IsShowingMenu())
return AUTO_HIDE_SHOWN;
if (launcher_widget()->IsActive() || status_->IsActive())
return AUTO_HIDE_SHOWN;
if (event_filter_.get() && event_filter_->in_mouse_drag())
return AUTO_HIDE_HIDDEN;
aura::RootWindow* root = launcher_widget()->GetNativeView()->GetRootWindow();
bool mouse_over_launcher =
launcher_widget()->GetWindowScreenBounds().Contains(
root->last_mouse_location());
return mouse_over_launcher ? AUTO_HIDE_SHOWN : AUTO_HIDE_HIDDEN;
}
Commit Message: ash: Add launcher overflow bubble.
- Host a LauncherView in bubble to display overflown items;
- Mouse wheel and two-finger scroll to scroll the LauncherView in bubble in case overflow bubble is overflown;
- Fit bubble when items are added/removed;
- Keep launcher bar on screen when the bubble is shown;
BUG=128054
TEST=Verify launcher overflown items are in a bubble instead of menu.
Review URL: https://chromiumcodereview.appspot.com/10659003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@146460 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | ShelfLayoutManager::AutoHideState ShelfLayoutManager::CalculateAutoHideState(
VisibilityState visibility_state) const {
if (visibility_state != AUTO_HIDE || !launcher_widget())
return AUTO_HIDE_HIDDEN;
Shell* shell = Shell::GetInstance();
if (shell->GetAppListTargetVisibility())
return AUTO_HIDE_SHOWN;
if (shell->system_tray() && shell->system_tray()->should_show_launcher())
return AUTO_HIDE_SHOWN;
if (launcher_ && launcher_->IsShowingMenu())
return AUTO_HIDE_SHOWN;
if (launcher_ && launcher_->IsShowingOverflowBubble())
return AUTO_HIDE_SHOWN;
if (launcher_widget()->IsActive() || status_->IsActive())
return AUTO_HIDE_SHOWN;
if (event_filter_.get() && event_filter_->in_mouse_drag())
return AUTO_HIDE_HIDDEN;
aura::RootWindow* root = launcher_widget()->GetNativeView()->GetRootWindow();
bool mouse_over_launcher =
launcher_widget()->GetWindowScreenBounds().Contains(
root->last_mouse_location());
return mouse_over_launcher ? AUTO_HIDE_SHOWN : AUTO_HIDE_HIDDEN;
}
| 18,548 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: xsltStylePreCompute(xsltStylesheetPtr style, xmlNodePtr inst) {
/*
* URGENT TODO: Normally inst->psvi Should never be reserved here,
* BUT: since if we include the same stylesheet from
* multiple imports, then the stylesheet will be parsed
* again. We simply must not try to compute the stylesheet again.
* TODO: Get to the point where we don't need to query the
* namespace- and local-name of the node, but can evaluate this
* using cctxt->style->inode->category;
*/
if ((inst == NULL) || (inst->type != XML_ELEMENT_NODE) ||
(inst->psvi != NULL))
return;
if (IS_XSLT_ELEM(inst)) {
xsltStylePreCompPtr cur;
if (IS_XSLT_NAME(inst, "apply-templates")) {
xsltCheckInstructionElement(style, inst);
xsltApplyTemplatesComp(style, inst);
} else if (IS_XSLT_NAME(inst, "with-param")) {
xsltCheckParentElement(style, inst, BAD_CAST "apply-templates",
BAD_CAST "call-template");
xsltWithParamComp(style, inst);
} else if (IS_XSLT_NAME(inst, "value-of")) {
xsltCheckInstructionElement(style, inst);
xsltValueOfComp(style, inst);
} else if (IS_XSLT_NAME(inst, "copy")) {
xsltCheckInstructionElement(style, inst);
xsltCopyComp(style, inst);
} else if (IS_XSLT_NAME(inst, "copy-of")) {
xsltCheckInstructionElement(style, inst);
xsltCopyOfComp(style, inst);
} else if (IS_XSLT_NAME(inst, "if")) {
xsltCheckInstructionElement(style, inst);
xsltIfComp(style, inst);
} else if (IS_XSLT_NAME(inst, "when")) {
xsltCheckParentElement(style, inst, BAD_CAST "choose", NULL);
xsltWhenComp(style, inst);
} else if (IS_XSLT_NAME(inst, "choose")) {
xsltCheckInstructionElement(style, inst);
xsltChooseComp(style, inst);
} else if (IS_XSLT_NAME(inst, "for-each")) {
xsltCheckInstructionElement(style, inst);
xsltForEachComp(style, inst);
} else if (IS_XSLT_NAME(inst, "apply-imports")) {
xsltCheckInstructionElement(style, inst);
xsltApplyImportsComp(style, inst);
} else if (IS_XSLT_NAME(inst, "attribute")) {
xmlNodePtr parent = inst->parent;
if ((parent == NULL) || (parent->ns == NULL) ||
((parent->ns != inst->ns) &&
(!xmlStrEqual(parent->ns->href, inst->ns->href))) ||
(!xmlStrEqual(parent->name, BAD_CAST "attribute-set"))) {
xsltCheckInstructionElement(style, inst);
}
xsltAttributeComp(style, inst);
} else if (IS_XSLT_NAME(inst, "element")) {
xsltCheckInstructionElement(style, inst);
xsltElementComp(style, inst);
} else if (IS_XSLT_NAME(inst, "text")) {
xsltCheckInstructionElement(style, inst);
xsltTextComp(style, inst);
} else if (IS_XSLT_NAME(inst, "sort")) {
xsltCheckParentElement(style, inst, BAD_CAST "apply-templates",
BAD_CAST "for-each");
xsltSortComp(style, inst);
} else if (IS_XSLT_NAME(inst, "comment")) {
xsltCheckInstructionElement(style, inst);
xsltCommentComp(style, inst);
} else if (IS_XSLT_NAME(inst, "number")) {
xsltCheckInstructionElement(style, inst);
xsltNumberComp(style, inst);
} else if (IS_XSLT_NAME(inst, "processing-instruction")) {
xsltCheckInstructionElement(style, inst);
xsltProcessingInstructionComp(style, inst);
} else if (IS_XSLT_NAME(inst, "call-template")) {
xsltCheckInstructionElement(style, inst);
xsltCallTemplateComp(style, inst);
} else if (IS_XSLT_NAME(inst, "param")) {
if (xsltCheckTopLevelElement(style, inst, 0) == 0)
xsltCheckInstructionElement(style, inst);
xsltParamComp(style, inst);
} else if (IS_XSLT_NAME(inst, "variable")) {
if (xsltCheckTopLevelElement(style, inst, 0) == 0)
xsltCheckInstructionElement(style, inst);
xsltVariableComp(style, inst);
} else if (IS_XSLT_NAME(inst, "otherwise")) {
xsltCheckParentElement(style, inst, BAD_CAST "choose", NULL);
xsltCheckInstructionElement(style, inst);
return;
} else if (IS_XSLT_NAME(inst, "template")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "output")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "preserve-space")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "strip-space")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if ((IS_XSLT_NAME(inst, "stylesheet")) ||
(IS_XSLT_NAME(inst, "transform"))) {
xmlNodePtr parent = inst->parent;
if ((parent == NULL) || (parent->type != XML_DOCUMENT_NODE)) {
xsltTransformError(NULL, style, inst,
"element %s only allowed only as root element\n",
inst->name);
style->errors++;
}
return;
} else if (IS_XSLT_NAME(inst, "key")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "message")) {
xsltCheckInstructionElement(style, inst);
return;
} else if (IS_XSLT_NAME(inst, "attribute-set")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "namespace-alias")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "include")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "import")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "decimal-format")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "fallback")) {
xsltCheckInstructionElement(style, inst);
return;
} else if (IS_XSLT_NAME(inst, "document")) {
xsltCheckInstructionElement(style, inst);
inst->psvi = (void *) xsltDocumentComp(style, inst,
(xsltTransformFunction) xsltDocumentElem);
} else {
xsltTransformError(NULL, style, inst,
"xsltStylePreCompute: unknown xsl:%s\n", inst->name);
if (style != NULL) style->warnings++;
}
cur = (xsltStylePreCompPtr) inst->psvi;
/*
* A ns-list is build for every XSLT item in the
* node-tree. This is needed for XPath expressions.
*/
if (cur != NULL) {
int i = 0;
cur->nsList = xmlGetNsList(inst->doc, inst);
if (cur->nsList != NULL) {
while (cur->nsList[i] != NULL)
i++;
}
cur->nsNr = i;
}
} else {
inst->psvi =
(void *) xsltPreComputeExtModuleElement(style, inst);
/*
* Unknown element, maybe registered at the context
* level. Mark it for later recognition.
*/
if (inst->psvi == NULL)
inst->psvi = (void *) xsltExtMarker;
}
}
Commit Message: Roll libxslt to 891681e3e948f31732229f53cb6db7215f740fc7
BUG=583156,583171
Review URL: https://codereview.chromium.org/1853083002
Cr-Commit-Position: refs/heads/master@{#385338}
CWE ID: CWE-119 | xsltStylePreCompute(xsltStylesheetPtr style, xmlNodePtr inst) {
/*
* URGENT TODO: Normally inst->psvi Should never be reserved here,
* BUT: since if we include the same stylesheet from
* multiple imports, then the stylesheet will be parsed
* again. We simply must not try to compute the stylesheet again.
* TODO: Get to the point where we don't need to query the
* namespace- and local-name of the node, but can evaluate this
* using cctxt->style->inode->category;
*/
if ((inst == NULL) || (inst->type != XML_ELEMENT_NODE) ||
(inst->psvi != NULL))
return;
if (IS_XSLT_ELEM(inst)) {
xsltStylePreCompPtr cur;
if (IS_XSLT_NAME(inst, "apply-templates")) {
xsltCheckInstructionElement(style, inst);
xsltApplyTemplatesComp(style, inst);
} else if (IS_XSLT_NAME(inst, "with-param")) {
xsltCheckParentElement(style, inst, BAD_CAST "apply-templates",
BAD_CAST "call-template");
xsltWithParamComp(style, inst);
} else if (IS_XSLT_NAME(inst, "value-of")) {
xsltCheckInstructionElement(style, inst);
xsltValueOfComp(style, inst);
} else if (IS_XSLT_NAME(inst, "copy")) {
xsltCheckInstructionElement(style, inst);
xsltCopyComp(style, inst);
} else if (IS_XSLT_NAME(inst, "copy-of")) {
xsltCheckInstructionElement(style, inst);
xsltCopyOfComp(style, inst);
} else if (IS_XSLT_NAME(inst, "if")) {
xsltCheckInstructionElement(style, inst);
xsltIfComp(style, inst);
} else if (IS_XSLT_NAME(inst, "when")) {
xsltCheckParentElement(style, inst, BAD_CAST "choose", NULL);
xsltWhenComp(style, inst);
} else if (IS_XSLT_NAME(inst, "choose")) {
xsltCheckInstructionElement(style, inst);
xsltChooseComp(style, inst);
} else if (IS_XSLT_NAME(inst, "for-each")) {
xsltCheckInstructionElement(style, inst);
xsltForEachComp(style, inst);
} else if (IS_XSLT_NAME(inst, "apply-imports")) {
xsltCheckInstructionElement(style, inst);
xsltApplyImportsComp(style, inst);
} else if (IS_XSLT_NAME(inst, "attribute")) {
xmlNodePtr parent = inst->parent;
if ((parent == NULL) ||
(parent->type != XML_ELEMENT_NODE) || (parent->ns == NULL) ||
((parent->ns != inst->ns) &&
(!xmlStrEqual(parent->ns->href, inst->ns->href))) ||
(!xmlStrEqual(parent->name, BAD_CAST "attribute-set"))) {
xsltCheckInstructionElement(style, inst);
}
xsltAttributeComp(style, inst);
} else if (IS_XSLT_NAME(inst, "element")) {
xsltCheckInstructionElement(style, inst);
xsltElementComp(style, inst);
} else if (IS_XSLT_NAME(inst, "text")) {
xsltCheckInstructionElement(style, inst);
xsltTextComp(style, inst);
} else if (IS_XSLT_NAME(inst, "sort")) {
xsltCheckParentElement(style, inst, BAD_CAST "apply-templates",
BAD_CAST "for-each");
xsltSortComp(style, inst);
} else if (IS_XSLT_NAME(inst, "comment")) {
xsltCheckInstructionElement(style, inst);
xsltCommentComp(style, inst);
} else if (IS_XSLT_NAME(inst, "number")) {
xsltCheckInstructionElement(style, inst);
xsltNumberComp(style, inst);
} else if (IS_XSLT_NAME(inst, "processing-instruction")) {
xsltCheckInstructionElement(style, inst);
xsltProcessingInstructionComp(style, inst);
} else if (IS_XSLT_NAME(inst, "call-template")) {
xsltCheckInstructionElement(style, inst);
xsltCallTemplateComp(style, inst);
} else if (IS_XSLT_NAME(inst, "param")) {
if (xsltCheckTopLevelElement(style, inst, 0) == 0)
xsltCheckInstructionElement(style, inst);
xsltParamComp(style, inst);
} else if (IS_XSLT_NAME(inst, "variable")) {
if (xsltCheckTopLevelElement(style, inst, 0) == 0)
xsltCheckInstructionElement(style, inst);
xsltVariableComp(style, inst);
} else if (IS_XSLT_NAME(inst, "otherwise")) {
xsltCheckParentElement(style, inst, BAD_CAST "choose", NULL);
xsltCheckInstructionElement(style, inst);
return;
} else if (IS_XSLT_NAME(inst, "template")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "output")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "preserve-space")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "strip-space")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if ((IS_XSLT_NAME(inst, "stylesheet")) ||
(IS_XSLT_NAME(inst, "transform"))) {
xmlNodePtr parent = inst->parent;
if ((parent == NULL) || (parent->type != XML_DOCUMENT_NODE)) {
xsltTransformError(NULL, style, inst,
"element %s only allowed only as root element\n",
inst->name);
style->errors++;
}
return;
} else if (IS_XSLT_NAME(inst, "key")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "message")) {
xsltCheckInstructionElement(style, inst);
return;
} else if (IS_XSLT_NAME(inst, "attribute-set")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "namespace-alias")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "include")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "import")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "decimal-format")) {
xsltCheckTopLevelElement(style, inst, 1);
return;
} else if (IS_XSLT_NAME(inst, "fallback")) {
xsltCheckInstructionElement(style, inst);
return;
} else if (IS_XSLT_NAME(inst, "document")) {
xsltCheckInstructionElement(style, inst);
inst->psvi = (void *) xsltDocumentComp(style, inst,
(xsltTransformFunction) xsltDocumentElem);
} else {
xsltTransformError(NULL, style, inst,
"xsltStylePreCompute: unknown xsl:%s\n", inst->name);
if (style != NULL) style->warnings++;
}
cur = (xsltStylePreCompPtr) inst->psvi;
/*
* A ns-list is build for every XSLT item in the
* node-tree. This is needed for XPath expressions.
*/
if (cur != NULL) {
int i = 0;
cur->nsList = xmlGetNsList(inst->doc, inst);
if (cur->nsList != NULL) {
while (cur->nsList[i] != NULL)
i++;
}
cur->nsNr = i;
}
} else {
inst->psvi =
(void *) xsltPreComputeExtModuleElement(style, inst);
/*
* Unknown element, maybe registered at the context
* level. Mark it for later recognition.
*/
if (inst->psvi == NULL)
inst->psvi = (void *) xsltExtMarker;
}
}
| 16,139 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int cJSON_GetArraySize( cJSON *array )
{
cJSON *c = array->child;
int i = 0;
while ( c ) {
++i;
c = c->next;
}
return i;
}
Commit Message: Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <[email protected]>
CWE ID: CWE-119 | int cJSON_GetArraySize( cJSON *array )
| 17,126 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: EncodedJSValue JSC_HOST_CALL JSWorkerConstructor::constructJSWorker(ExecState* exec)
{
JSWorkerConstructor* jsConstructor = jsCast<JSWorkerConstructor*>(exec->callee());
if (!exec->argumentCount())
return throwVMError(exec, createTypeError(exec, "Not enough arguments"));
UString scriptURL = exec->argument(0).toString(exec)->value(exec);
if (exec->hadException())
return JSValue::encode(JSValue());
DOMWindow* window = asJSDOMWindow(exec->lexicalGlobalObject())->impl();
ExceptionCode ec = 0;
RefPtr<Worker> worker = Worker::create(window->document(), ustringToString(scriptURL), ec);
if (ec) {
setDOMException(exec, ec);
return JSValue::encode(JSValue());
}
return JSValue::encode(asObject(toJS(exec, jsConstructor->globalObject(), worker.release())));
}
Commit Message: [JSC] Implement a helper method createNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=85102
Reviewed by Geoffrey Garen.
In bug 84787, kbr@ requested to avoid hard-coding
createTypeError(exec, "Not enough arguments") here and there.
This patch implements createNotEnoughArgumentsError(exec)
and uses it in JSC bindings.
c.f. a corresponding bug for V8 bindings is bug 85097.
Source/JavaScriptCore:
* runtime/Error.cpp:
(JSC::createNotEnoughArgumentsError):
(JSC):
* runtime/Error.h:
(JSC):
Source/WebCore:
Test: bindings/scripts/test/TestObj.idl
* bindings/scripts/CodeGeneratorJS.pm: Modified as described above.
(GenerateArgumentsCountCheck):
* bindings/js/JSDataViewCustom.cpp: Ditto.
(WebCore::getDataViewMember):
(WebCore::setDataViewMember):
* bindings/js/JSDeprecatedPeerConnectionCustom.cpp:
(WebCore::JSDeprecatedPeerConnectionConstructor::constructJSDeprecatedPeerConnection):
* bindings/js/JSDirectoryEntryCustom.cpp:
(WebCore::JSDirectoryEntry::getFile):
(WebCore::JSDirectoryEntry::getDirectory):
* bindings/js/JSSharedWorkerCustom.cpp:
(WebCore::JSSharedWorkerConstructor::constructJSSharedWorker):
* bindings/js/JSWebKitMutationObserverCustom.cpp:
(WebCore::JSWebKitMutationObserverConstructor::constructJSWebKitMutationObserver):
(WebCore::JSWebKitMutationObserver::observe):
* bindings/js/JSWorkerCustom.cpp:
(WebCore::JSWorkerConstructor::constructJSWorker):
* bindings/scripts/test/JS/JSFloat64Array.cpp: Updated run-bindings-tests.
(WebCore::jsFloat64ArrayPrototypeFunctionFoo):
* bindings/scripts/test/JS/JSTestActiveDOMObject.cpp:
(WebCore::jsTestActiveDOMObjectPrototypeFunctionExcitingFunction):
(WebCore::jsTestActiveDOMObjectPrototypeFunctionPostMessage):
* bindings/scripts/test/JS/JSTestCustomNamedGetter.cpp:
(WebCore::jsTestCustomNamedGetterPrototypeFunctionAnotherFunction):
* bindings/scripts/test/JS/JSTestEventTarget.cpp:
(WebCore::jsTestEventTargetPrototypeFunctionItem):
(WebCore::jsTestEventTargetPrototypeFunctionAddEventListener):
(WebCore::jsTestEventTargetPrototypeFunctionRemoveEventListener):
(WebCore::jsTestEventTargetPrototypeFunctionDispatchEvent):
* bindings/scripts/test/JS/JSTestInterface.cpp:
(WebCore::JSTestInterfaceConstructor::constructJSTestInterface):
(WebCore::jsTestInterfacePrototypeFunctionSupplementalMethod2):
* bindings/scripts/test/JS/JSTestMediaQueryListListener.cpp:
(WebCore::jsTestMediaQueryListListenerPrototypeFunctionMethod):
* bindings/scripts/test/JS/JSTestNamedConstructor.cpp:
(WebCore::JSTestNamedConstructorNamedConstructor::constructJSTestNamedConstructor):
* bindings/scripts/test/JS/JSTestObj.cpp:
(WebCore::JSTestObjConstructor::constructJSTestObj):
(WebCore::jsTestObjPrototypeFunctionVoidMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionIntMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionObjMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionMethodWithSequenceArg):
(WebCore::jsTestObjPrototypeFunctionMethodReturningSequence):
(WebCore::jsTestObjPrototypeFunctionMethodThatRequiresAllArgsAndThrows):
(WebCore::jsTestObjPrototypeFunctionSerializedValue):
(WebCore::jsTestObjPrototypeFunctionIdbKey):
(WebCore::jsTestObjPrototypeFunctionOptionsObject):
(WebCore::jsTestObjPrototypeFunctionAddEventListener):
(WebCore::jsTestObjPrototypeFunctionRemoveEventListener):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndOptionalArg):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndTwoOptionalArgs):
(WebCore::jsTestObjPrototypeFunctionMethodWithCallbackArg):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonCallbackArgAndCallbackArg):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod1):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod2):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod3):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod4):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod5):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod6):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod7):
(WebCore::jsTestObjConstructorFunctionClassMethod2):
(WebCore::jsTestObjConstructorFunctionOverloadedMethod11):
(WebCore::jsTestObjConstructorFunctionOverloadedMethod12):
(WebCore::jsTestObjPrototypeFunctionMethodWithUnsignedLongArray):
(WebCore::jsTestObjPrototypeFunctionConvert1):
(WebCore::jsTestObjPrototypeFunctionConvert2):
(WebCore::jsTestObjPrototypeFunctionConvert3):
(WebCore::jsTestObjPrototypeFunctionConvert4):
(WebCore::jsTestObjPrototypeFunctionConvert5):
(WebCore::jsTestObjPrototypeFunctionStrictFunction):
* bindings/scripts/test/JS/JSTestSerializedScriptValueInterface.cpp:
(WebCore::JSTestSerializedScriptValueInterfaceConstructor::constructJSTestSerializedScriptValueInterface):
(WebCore::jsTestSerializedScriptValueInterfacePrototypeFunctionAcceptTransferList):
git-svn-id: svn://svn.chromium.org/blink/trunk@115536 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-20 | EncodedJSValue JSC_HOST_CALL JSWorkerConstructor::constructJSWorker(ExecState* exec)
{
JSWorkerConstructor* jsConstructor = jsCast<JSWorkerConstructor*>(exec->callee());
if (!exec->argumentCount())
return throwVMError(exec, createNotEnoughArgumentsError(exec));
UString scriptURL = exec->argument(0).toString(exec)->value(exec);
if (exec->hadException())
return JSValue::encode(JSValue());
DOMWindow* window = asJSDOMWindow(exec->lexicalGlobalObject())->impl();
ExceptionCode ec = 0;
RefPtr<Worker> worker = Worker::create(window->document(), ustringToString(scriptURL), ec);
if (ec) {
setDOMException(exec, ec);
return JSValue::encode(JSValue());
}
return JSValue::encode(asObject(toJS(exec, jsConstructor->globalObject(), worker.release())));
}
| 15,729 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: GF_Err urn_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i, to_read;
char *tmpName;
GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s;
if (! ptr->size ) return GF_OK;
to_read = (u32) ptr->size;
tmpName = (char*)gf_malloc(sizeof(char) * to_read);
if (!tmpName) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, tmpName, to_read);
i = 0;
while ( (tmpName[i] != 0) && (i < to_read) ) {
i++;
}
if (i == to_read) {
gf_free(tmpName);
return GF_ISOM_INVALID_FILE;
}
if (i == to_read - 1) {
ptr->nameURN = tmpName;
ptr->location = NULL;
return GF_OK;
}
ptr->nameURN = (char*)gf_malloc(sizeof(char) * (i+1));
if (!ptr->nameURN) {
gf_free(tmpName);
return GF_OUT_OF_MEM;
}
ptr->location = (char*)gf_malloc(sizeof(char) * (to_read - i - 1));
if (!ptr->location) {
gf_free(tmpName);
gf_free(ptr->nameURN);
ptr->nameURN = NULL;
return GF_OUT_OF_MEM;
}
memcpy(ptr->nameURN, tmpName, i + 1);
memcpy(ptr->location, tmpName + i + 1, (to_read - i - 1));
gf_free(tmpName);
return GF_OK;
}
Commit Message: fixed 2 possible heap overflows (inc. #1088)
CWE ID: CWE-125 | GF_Err urn_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i, to_read;
char *tmpName;
GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s;
if (! ptr->size ) return GF_OK;
to_read = (u32) ptr->size;
tmpName = (char*)gf_malloc(sizeof(char) * to_read);
if (!tmpName) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, tmpName, to_read);
i = 0;
while ( (i < to_read) && (tmpName[i] != 0) ) {
i++;
}
if (i == to_read) {
gf_free(tmpName);
return GF_ISOM_INVALID_FILE;
}
if (i == to_read - 1) {
ptr->nameURN = tmpName;
ptr->location = NULL;
return GF_OK;
}
ptr->nameURN = (char*)gf_malloc(sizeof(char) * (i+1));
if (!ptr->nameURN) {
gf_free(tmpName);
return GF_OUT_OF_MEM;
}
ptr->location = (char*)gf_malloc(sizeof(char) * (to_read - i - 1));
if (!ptr->location) {
gf_free(tmpName);
gf_free(ptr->nameURN);
ptr->nameURN = NULL;
return GF_OUT_OF_MEM;
}
memcpy(ptr->nameURN, tmpName, i + 1);
memcpy(ptr->location, tmpName + i + 1, (to_read - i - 1));
gf_free(tmpName);
return GF_OK;
}
| 9,635 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool attach()
{
GRefPtr<WebKitWebViewBase> inspectorView = webkit_web_inspector_get_web_view(m_inspector);
if (m_inspectorWindow) {
gtk_container_remove(GTK_CONTAINER(m_inspectorWindow), GTK_WIDGET(inspectorView.get()));
gtk_widget_destroy(m_inspectorWindow);
m_inspectorWindow = 0;
}
GtkWidget* pane;
if (gtk_bin_get_child(GTK_BIN(m_parentWindow)) == GTK_WIDGET(m_webView)) {
GRefPtr<WebKitWebView> inspectedView = m_webView;
gtk_container_remove(GTK_CONTAINER(m_parentWindow), GTK_WIDGET(m_webView));
pane = gtk_paned_new(GTK_ORIENTATION_VERTICAL);
gtk_paned_add1(GTK_PANED(pane), GTK_WIDGET(m_webView));
gtk_container_add(GTK_CONTAINER(m_parentWindow), pane);
gtk_widget_show_all(pane);
} else
pane = gtk_bin_get_child(GTK_BIN(m_parentWindow));
gtk_paned_add2(GTK_PANED(pane), GTK_WIDGET(inspectorView.get()));
return InspectorTest::attach();
}
Commit Message: [GTK] Inspector should set a default attached height before being attached
https://bugs.webkit.org/show_bug.cgi?id=90767
Reviewed by Xan Lopez.
We are currently using the minimum attached height in
WebKitWebViewBase as the default height for the inspector when
attached. It would be easier for WebKitWebViewBase and embedders
implementing attach() if the inspector already had an attached
height set when it's being attached.
* UIProcess/API/gtk/WebKitWebViewBase.cpp:
(webkitWebViewBaseContainerAdd): Don't initialize
inspectorViewHeight.
(webkitWebViewBaseSetInspectorViewHeight): Allow to set the
inspector view height before having an inpector view, but only
queue a resize when the view already has an inspector view.
* UIProcess/API/gtk/tests/TestInspector.cpp:
(testInspectorDefault):
(testInspectorManualAttachDetach):
* UIProcess/gtk/WebInspectorProxyGtk.cpp:
(WebKit::WebInspectorProxy::platformAttach): Set the default
attached height before attach the inspector view.
git-svn-id: svn://svn.chromium.org/blink/trunk@124479 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-399 | bool attach()
{
GRefPtr<WebKitWebViewBase> inspectorView = webkit_web_inspector_get_web_view(m_inspector);
if (m_inspectorWindow) {
gtk_container_remove(GTK_CONTAINER(m_inspectorWindow), GTK_WIDGET(inspectorView.get()));
gtk_widget_destroy(m_inspectorWindow);
m_inspectorWindow = 0;
}
GtkWidget* pane;
if (gtk_bin_get_child(GTK_BIN(m_parentWindow)) == GTK_WIDGET(m_webView)) {
GRefPtr<WebKitWebView> inspectedView = m_webView;
gtk_container_remove(GTK_CONTAINER(m_parentWindow), GTK_WIDGET(m_webView));
pane = gtk_paned_new(GTK_ORIENTATION_VERTICAL);
gtk_paned_add1(GTK_PANED(pane), GTK_WIDGET(m_webView));
gtk_container_add(GTK_CONTAINER(m_parentWindow), pane);
gtk_widget_show_all(pane);
} else
pane = gtk_bin_get_child(GTK_BIN(m_parentWindow));
gtk_paned_set_position(GTK_PANED(pane), webkit_web_inspector_get_attached_height(m_inspector));
gtk_paned_add2(GTK_PANED(pane), GTK_WIDGET(inspectorView.get()));
return InspectorTest::attach();
}
| 18,363 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num,
size_t size, off_t fsize, int *flags, int mach, int strtab)
{
Elf32_Shdr sh32;
Elf64_Shdr sh64;
int stripped = 1;
size_t nbadcap = 0;
void *nbuf;
off_t noff, coff, name_off;
uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */
uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */
char name[50];
if (size != xsh_sizeof) {
if (file_printf(ms, ", corrupted section header size") == -1)
return -1;
return 0;
}
/* Read offset of name section to be able to read section names later */
if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) == -1) {
file_badread(ms);
return -1;
}
name_off = xsh_offset;
for ( ; num; num--) {
/* Read the name of this section. */
if (pread(fd, name, sizeof(name), name_off + xsh_name) == -1) {
file_badread(ms);
return -1;
}
name[sizeof(name) - 1] = '\0';
if (strcmp(name, ".debug_info") == 0)
stripped = 0;
if (pread(fd, xsh_addr, xsh_sizeof, off) == -1) {
file_badread(ms);
return -1;
}
off += size;
/* Things we can determine before we seek */
switch (xsh_type) {
case SHT_SYMTAB:
#if 0
case SHT_DYNSYM:
#endif
stripped = 0;
break;
default:
if (fsize != SIZE_UNKNOWN && xsh_offset > fsize) {
/* Perhaps warn here */
continue;
}
break;
}
/* Things we can determine when we seek */
switch (xsh_type) {
case SHT_NOTE:
if ((nbuf = malloc(xsh_size)) == NULL) {
file_error(ms, errno, "Cannot allocate memory"
" for note");
return -1;
}
if (pread(fd, nbuf, xsh_size, xsh_offset) == -1) {
file_badread(ms);
free(nbuf);
return -1;
}
noff = 0;
for (;;) {
if (noff >= (off_t)xsh_size)
break;
noff = donote(ms, nbuf, (size_t)noff,
xsh_size, clazz, swap, 4, flags);
if (noff == 0)
break;
}
free(nbuf);
break;
case SHT_SUNW_cap:
switch (mach) {
case EM_SPARC:
case EM_SPARCV9:
case EM_IA_64:
case EM_386:
case EM_AMD64:
break;
default:
goto skip;
}
if (nbadcap > 5)
break;
if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) {
file_badseek(ms);
return -1;
}
coff = 0;
for (;;) {
Elf32_Cap cap32;
Elf64_Cap cap64;
char cbuf[/*CONSTCOND*/
MAX(sizeof cap32, sizeof cap64)];
if ((coff += xcap_sizeof) > (off_t)xsh_size)
break;
if (read(fd, cbuf, (size_t)xcap_sizeof) !=
(ssize_t)xcap_sizeof) {
file_badread(ms);
return -1;
}
if (cbuf[0] == 'A') {
#ifdef notyet
char *p = cbuf + 1;
uint32_t len, tag;
memcpy(&len, p, sizeof(len));
p += 4;
len = getu32(swap, len);
if (memcmp("gnu", p, 3) != 0) {
if (file_printf(ms,
", unknown capability %.3s", p)
== -1)
return -1;
break;
}
p += strlen(p) + 1;
tag = *p++;
memcpy(&len, p, sizeof(len));
p += 4;
len = getu32(swap, len);
if (tag != 1) {
if (file_printf(ms, ", unknown gnu"
" capability tag %d", tag)
== -1)
return -1;
break;
}
#endif
break;
}
(void)memcpy(xcap_addr, cbuf, xcap_sizeof);
switch (xcap_tag) {
case CA_SUNW_NULL:
break;
case CA_SUNW_HW_1:
cap_hw1 |= xcap_val;
break;
case CA_SUNW_SF_1:
cap_sf1 |= xcap_val;
break;
default:
if (file_printf(ms,
", with unknown capability "
"0x%" INT64_T_FORMAT "x = 0x%"
INT64_T_FORMAT "x",
(unsigned long long)xcap_tag,
(unsigned long long)xcap_val) == -1)
return -1;
if (nbadcap++ > 2)
coff = xsh_size;
break;
}
}
/*FALLTHROUGH*/
skip:
default:
break;
}
}
if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1)
return -1;
if (cap_hw1) {
const cap_desc_t *cdp;
switch (mach) {
case EM_SPARC:
case EM_SPARC32PLUS:
case EM_SPARCV9:
cdp = cap_desc_sparc;
break;
case EM_386:
case EM_IA_64:
case EM_AMD64:
cdp = cap_desc_386;
break;
default:
cdp = NULL;
break;
}
if (file_printf(ms, ", uses") == -1)
return -1;
if (cdp) {
while (cdp->cd_name) {
if (cap_hw1 & cdp->cd_mask) {
if (file_printf(ms,
" %s", cdp->cd_name) == -1)
return -1;
cap_hw1 &= ~cdp->cd_mask;
}
++cdp;
}
if (cap_hw1)
if (file_printf(ms,
" unknown hardware capability 0x%"
INT64_T_FORMAT "x",
(unsigned long long)cap_hw1) == -1)
return -1;
} else {
if (file_printf(ms,
" hardware capability 0x%" INT64_T_FORMAT "x",
(unsigned long long)cap_hw1) == -1)
return -1;
}
}
if (cap_sf1) {
if (cap_sf1 & SF1_SUNW_FPUSED) {
if (file_printf(ms,
(cap_sf1 & SF1_SUNW_FPKNWN)
? ", uses frame pointer"
: ", not known to use frame pointer") == -1)
return -1;
}
cap_sf1 &= ~SF1_SUNW_MASK;
if (cap_sf1)
if (file_printf(ms,
", with unknown software capability 0x%"
INT64_T_FORMAT "x",
(unsigned long long)cap_sf1) == -1)
return -1;
}
return 0;
}
Commit Message: Bail out on partial reads, from Alexander Cherepanov
CWE ID: CWE-20 | doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num,
size_t size, off_t fsize, int *flags, int mach, int strtab)
{
Elf32_Shdr sh32;
Elf64_Shdr sh64;
int stripped = 1;
size_t nbadcap = 0;
void *nbuf;
off_t noff, coff, name_off;
uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */
uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */
char name[50];
ssize_t namesize;
if (size != xsh_sizeof) {
if (file_printf(ms, ", corrupted section header size") == -1)
return -1;
return 0;
}
/* Read offset of name section to be able to read section names later */
if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) < (ssize_t)xsh_sizeof) {
file_badread(ms);
return -1;
}
name_off = xsh_offset;
for ( ; num; num--) {
/* Read the name of this section. */
if ((namesize = pread(fd, name, sizeof(name) - 1, name_off + xsh_name)) == -1) {
file_badread(ms);
return -1;
}
name[namesize] = '\0';
if (strcmp(name, ".debug_info") == 0)
stripped = 0;
if (pread(fd, xsh_addr, xsh_sizeof, off) < (ssize_t)xsh_sizeof) {
file_badread(ms);
return -1;
}
off += size;
/* Things we can determine before we seek */
switch (xsh_type) {
case SHT_SYMTAB:
#if 0
case SHT_DYNSYM:
#endif
stripped = 0;
break;
default:
if (fsize != SIZE_UNKNOWN && xsh_offset > fsize) {
/* Perhaps warn here */
continue;
}
break;
}
/* Things we can determine when we seek */
switch (xsh_type) {
case SHT_NOTE:
if ((nbuf = malloc(xsh_size)) == NULL) {
file_error(ms, errno, "Cannot allocate memory"
" for note");
return -1;
}
if (pread(fd, nbuf, xsh_size, xsh_offset) < (ssize_t)xsh_size) {
file_badread(ms);
free(nbuf);
return -1;
}
noff = 0;
for (;;) {
if (noff >= (off_t)xsh_size)
break;
noff = donote(ms, nbuf, (size_t)noff,
xsh_size, clazz, swap, 4, flags);
if (noff == 0)
break;
}
free(nbuf);
break;
case SHT_SUNW_cap:
switch (mach) {
case EM_SPARC:
case EM_SPARCV9:
case EM_IA_64:
case EM_386:
case EM_AMD64:
break;
default:
goto skip;
}
if (nbadcap > 5)
break;
if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) {
file_badseek(ms);
return -1;
}
coff = 0;
for (;;) {
Elf32_Cap cap32;
Elf64_Cap cap64;
char cbuf[/*CONSTCOND*/
MAX(sizeof cap32, sizeof cap64)];
if ((coff += xcap_sizeof) > (off_t)xsh_size)
break;
if (read(fd, cbuf, (size_t)xcap_sizeof) !=
(ssize_t)xcap_sizeof) {
file_badread(ms);
return -1;
}
if (cbuf[0] == 'A') {
#ifdef notyet
char *p = cbuf + 1;
uint32_t len, tag;
memcpy(&len, p, sizeof(len));
p += 4;
len = getu32(swap, len);
if (memcmp("gnu", p, 3) != 0) {
if (file_printf(ms,
", unknown capability %.3s", p)
== -1)
return -1;
break;
}
p += strlen(p) + 1;
tag = *p++;
memcpy(&len, p, sizeof(len));
p += 4;
len = getu32(swap, len);
if (tag != 1) {
if (file_printf(ms, ", unknown gnu"
" capability tag %d", tag)
== -1)
return -1;
break;
}
#endif
break;
}
(void)memcpy(xcap_addr, cbuf, xcap_sizeof);
switch (xcap_tag) {
case CA_SUNW_NULL:
break;
case CA_SUNW_HW_1:
cap_hw1 |= xcap_val;
break;
case CA_SUNW_SF_1:
cap_sf1 |= xcap_val;
break;
default:
if (file_printf(ms,
", with unknown capability "
"0x%" INT64_T_FORMAT "x = 0x%"
INT64_T_FORMAT "x",
(unsigned long long)xcap_tag,
(unsigned long long)xcap_val) == -1)
return -1;
if (nbadcap++ > 2)
coff = xsh_size;
break;
}
}
/*FALLTHROUGH*/
skip:
default:
break;
}
}
if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1)
return -1;
if (cap_hw1) {
const cap_desc_t *cdp;
switch (mach) {
case EM_SPARC:
case EM_SPARC32PLUS:
case EM_SPARCV9:
cdp = cap_desc_sparc;
break;
case EM_386:
case EM_IA_64:
case EM_AMD64:
cdp = cap_desc_386;
break;
default:
cdp = NULL;
break;
}
if (file_printf(ms, ", uses") == -1)
return -1;
if (cdp) {
while (cdp->cd_name) {
if (cap_hw1 & cdp->cd_mask) {
if (file_printf(ms,
" %s", cdp->cd_name) == -1)
return -1;
cap_hw1 &= ~cdp->cd_mask;
}
++cdp;
}
if (cap_hw1)
if (file_printf(ms,
" unknown hardware capability 0x%"
INT64_T_FORMAT "x",
(unsigned long long)cap_hw1) == -1)
return -1;
} else {
if (file_printf(ms,
" hardware capability 0x%" INT64_T_FORMAT "x",
(unsigned long long)cap_hw1) == -1)
return -1;
}
}
if (cap_sf1) {
if (cap_sf1 & SF1_SUNW_FPUSED) {
if (file_printf(ms,
(cap_sf1 & SF1_SUNW_FPKNWN)
? ", uses frame pointer"
: ", not known to use frame pointer") == -1)
return -1;
}
cap_sf1 &= ~SF1_SUNW_MASK;
if (cap_sf1)
if (file_printf(ms,
", with unknown software capability 0x%"
INT64_T_FORMAT "x",
(unsigned long long)cap_sf1) == -1)
return -1;
}
return 0;
}
| 29,350 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void WillDispatchTabUpdatedEvent(WebContents* contents,
Profile* profile,
const Extension* extension,
ListValue* event_args) {
DictionaryValue* tab_value = ExtensionTabUtil::CreateTabValue(
contents, extension);
}
Commit Message: Do not pass URLs in onUpdated events to extensions unless they have the
"tabs" permission.
BUG=168442
Review URL: https://chromiumcodereview.appspot.com/11824004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@176406 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-264 | static void WillDispatchTabUpdatedEvent(WebContents* contents,
static void WillDispatchTabUpdatedEvent(
WebContents* contents,
const DictionaryValue* changed_properties,
Profile* profile,
const Extension* extension,
ListValue* event_args) {
// Overwrite the second argument with the appropriate properties dictionary,
// depending on extension permissions.
DictionaryValue* properties_value = changed_properties->DeepCopy();
ExtensionTabUtil::ScrubTabValueForExtension(contents, extension,
properties_value);
event_args->Set(1, properties_value);
// Overwrite the third arg with our tab value as seen by this extension.
DictionaryValue* tab_value = ExtensionTabUtil::CreateTabValue(
contents, extension);
}
| 19,342 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void gdImageGifCtx(gdImagePtr im, gdIOCtxPtr out)
{
gdImagePtr pim = 0, tim = im;
int interlace, BitsPerPixel;
interlace = im->interlace;
if (im->trueColor) {
/* Expensive, but the only way that produces an
acceptable result: mix down to a palette
based temporary image. */
pim = gdImageCreatePaletteFromTrueColor(im, 1, 256);
if (!pim) {
return;
}
tim = pim;
}
BitsPerPixel = colorstobpp(tim->colorsTotal);
/* All set, let's do it. */
GIFEncode(
out, tim->sx, tim->sy, tim->interlace, 0, tim->transparent, BitsPerPixel,
tim->red, tim->green, tim->blue, tim);
if (pim) {
/* Destroy palette based temporary image. */
gdImageDestroy( pim);
}
}
Commit Message: Sync with upstream
Even though libgd/libgd#492 is not a relevant bug fix for PHP, since
the binding doesn't use the `gdImage*Ptr()` functions at all, we're
porting the fix to stay in sync here.
CWE ID: CWE-415 | void gdImageGifCtx(gdImagePtr im, gdIOCtxPtr out)
{
_gdImageGifCtx(im, out);
}
/* returns 0 on success, 1 on failure */
static int _gdImageGifCtx(gdImagePtr im, gdIOCtxPtr out)
{
gdImagePtr pim = 0, tim = im;
int interlace, BitsPerPixel;
interlace = im->interlace;
if (im->trueColor) {
/* Expensive, but the only way that produces an
acceptable result: mix down to a palette
based temporary image. */
pim = gdImageCreatePaletteFromTrueColor(im, 1, 256);
if (!pim) {
return 1;
}
tim = pim;
}
BitsPerPixel = colorstobpp(tim->colorsTotal);
/* All set, let's do it. */
GIFEncode(
out, tim->sx, tim->sy, tim->interlace, 0, tim->transparent, BitsPerPixel,
tim->red, tim->green, tim->blue, tim);
if (pim) {
/* Destroy palette based temporary image. */
gdImageDestroy( pim);
}
return 0;
}
| 21,574 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static double pcerr(PNG_CONST png_modifier *pm, int in_depth, int out_depth)
{
/* Percentage error permitted in the linear values. Note that the specified
* value is a percentage but this routine returns a simple number.
*/
if (pm->assume_16_bit_calculations ||
(pm->calculations_use_input_precision ? in_depth : out_depth) == 16)
return pm->maxpc16 * .01;
else
return pm->maxpc8 * .01;
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | static double pcerr(PNG_CONST png_modifier *pm, int in_depth, int out_depth)
static double pcerr(const png_modifier *pm, int in_depth, int out_depth)
{
/* Percentage error permitted in the linear values. Note that the specified
* value is a percentage but this routine returns a simple number.
*/
if (pm->assume_16_bit_calculations ||
(pm->calculations_use_input_precision ? in_depth : out_depth) == 16)
return pm->maxpc16 * .01;
else
return pm->maxpc8 * .01;
}
| 2,777 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void CopyToOMX(const OMX_BUFFERHEADERTYPE *header) {
if (!mIsBackup) {
return;
}
memcpy(header->pBuffer + header->nOffset,
(const OMX_U8 *)mMem->pointer() + header->nOffset,
header->nFilledLen);
}
Commit Message: DO NOT MERGE: IOMX: work against metadata buffer spoofing
- Prohibit direct set/getParam/Settings for extensions meant for
OMXNodeInstance alone. This disallows enabling metadata mode
without the knowledge of OMXNodeInstance.
- Use a backup buffer for metadata mode buffers and do not directly
share with clients.
- Disallow setting up metadata mode/tunneling/input surface
after first sendCommand.
- Disallow store-meta for input cross process.
- Disallow emptyBuffer for surface input (via IOMX).
- Fix checking for input surface.
Bug: 29422020
Change-Id: I801c77b80e703903f62e42d76fd2e76a34e4bc8e
(cherry picked from commit 7c3c2fa3e233c656fc8c2fc2a6634b3ecf8a23e8)
CWE ID: CWE-200 | void CopyToOMX(const OMX_BUFFERHEADERTYPE *header) {
if (!mCopyToOmx) {
return;
}
memcpy(header->pBuffer + header->nOffset,
(const OMX_U8 *)mMem->pointer() + header->nOffset,
header->nFilledLen);
}
| 27,791 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static OPJ_BOOL bmp_read_rle4_data(FILE* IN, OPJ_UINT8* pData,
OPJ_UINT32 stride, OPJ_UINT32 width, OPJ_UINT32 height)
{
OPJ_UINT32 x, y;
OPJ_UINT8 *pix;
const OPJ_UINT8 *beyond;
beyond = pData + stride * height;
pix = pData;
x = y = 0U;
while (y < height) {
int c = getc(IN);
if (c == EOF) {
break;
}
if (c) { /* encoded mode */
int j;
OPJ_UINT8 c1 = (OPJ_UINT8)getc(IN);
for (j = 0; (j < c) && (x < width) &&
((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) {
*pix = (OPJ_UINT8)((j & 1) ? (c1 & 0x0fU) : ((c1 >> 4) & 0x0fU));
}
} else { /* absolute mode */
c = getc(IN);
if (c == EOF) {
break;
}
if (c == 0x00) { /* EOL */
x = 0;
y++;
pix = pData + y * stride;
} else if (c == 0x01) { /* EOP */
break;
} else if (c == 0x02) { /* MOVE by dxdy */
c = getc(IN);
x += (OPJ_UINT32)c;
c = getc(IN);
y += (OPJ_UINT32)c;
pix = pData + y * stride + x;
} else { /* 03 .. 255 : absolute mode */
int j;
OPJ_UINT8 c1 = 0U;
for (j = 0; (j < c) && (x < width) &&
((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) {
if ((j & 1) == 0) {
c1 = (OPJ_UINT8)getc(IN);
}
*pix = (OPJ_UINT8)((j & 1) ? (c1 & 0x0fU) : ((c1 >> 4) & 0x0fU));
}
if (((c & 3) == 1) || ((c & 3) == 2)) { /* skip padding byte */
getc(IN);
}
}
}
} /* while(y < height) */
return OPJ_TRUE;
}
Commit Message: convertbmp: detect invalid file dimensions early
width/length dimensions read from bmp headers are not necessarily
valid. For instance they may have been maliciously set to very large
values with the intention to cause DoS (large memory allocation, stack
overflow). In these cases we want to detect the invalid size as early
as possible.
This commit introduces a counter which verifies that the number of
written bytes corresponds to the advertized width/length.
See commit 8ee335227bbc for details.
Signed-off-by: Young Xiao <[email protected]>
CWE ID: CWE-400 | static OPJ_BOOL bmp_read_rle4_data(FILE* IN, OPJ_UINT8* pData,
OPJ_UINT32 stride, OPJ_UINT32 width, OPJ_UINT32 height)
{
OPJ_UINT32 x, y, written;
OPJ_UINT8 *pix;
const OPJ_UINT8 *beyond;
beyond = pData + stride * height;
pix = pData;
x = y = written = 0U;
while (y < height) {
int c = getc(IN);
if (c == EOF) {
break;
}
if (c) { /* encoded mode */
int j;
OPJ_UINT8 c1 = (OPJ_UINT8)getc(IN);
for (j = 0; (j < c) && (x < width) &&
((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) {
*pix = (OPJ_UINT8)((j & 1) ? (c1 & 0x0fU) : ((c1 >> 4) & 0x0fU));
written++;
}
} else { /* absolute mode */
c = getc(IN);
if (c == EOF) {
break;
}
if (c == 0x00) { /* EOL */
x = 0;
y++;
pix = pData + y * stride;
} else if (c == 0x01) { /* EOP */
break;
} else if (c == 0x02) { /* MOVE by dxdy */
c = getc(IN);
x += (OPJ_UINT32)c;
c = getc(IN);
y += (OPJ_UINT32)c;
pix = pData + y * stride + x;
} else { /* 03 .. 255 : absolute mode */
int j;
OPJ_UINT8 c1 = 0U;
for (j = 0; (j < c) && (x < width) &&
((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) {
if ((j & 1) == 0) {
c1 = (OPJ_UINT8)getc(IN);
}
*pix = (OPJ_UINT8)((j & 1) ? (c1 & 0x0fU) : ((c1 >> 4) & 0x0fU));
written++;
}
if (((c & 3) == 1) || ((c & 3) == 2)) { /* skip padding byte */
getc(IN);
}
}
}
} /* while(y < height) */
if (written != width * height) {
fprintf(stderr, "warning, image's actual size does not match advertized one\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
| 3,236 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool GDataCache::CreateCacheDirectories(
const std::vector<FilePath>& paths_to_create) {
bool success = true;
for (size_t i = 0; i < paths_to_create.size(); ++i) {
if (file_util::DirectoryExists(paths_to_create[i]))
continue;
if (!file_util::CreateDirectory(paths_to_create[i])) {
success = false;
PLOG(ERROR) << "Error creating directory " << paths_to_create[i].value();
} else {
DVLOG(1) << "Created directory " << paths_to_create[i].value();
}
}
return success;
}
Commit Message: Revert 144993 - gdata: Remove invalid files in the cache directories
Broke linux_chromeos_valgrind:
http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20OS%20%28valgrind%29%285%29/builds/8628/steps/memory%20test%3A%20unit/logs/stdio
In theory, we shouldn't have any invalid files left in the
cache directories, but things can go wrong and invalid files
may be left if the device shuts down unexpectedly, for instance.
Besides, it's good to be defensive.
BUG=134862
TEST=added unit tests
Review URL: https://chromiumcodereview.appspot.com/10693020
[email protected]
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@145029 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | bool GDataCache::CreateCacheDirectories(
| 6,609 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int parse_csum_name(const char *name, int len)
{
if (len < 0 && name)
len = strlen(name);
if (!name || (len == 4 && strncasecmp(name, "auto", 4) == 0)) {
if (protocol_version >= 30)
return CSUM_MD5;
if (protocol_version >= 27)
return CSUM_MD4_OLD;
if (protocol_version >= 21)
return CSUM_MD4_BUSTED;
return CSUM_ARCHAIC;
}
if (len == 3 && strncasecmp(name, "md4", 3) == 0)
return CSUM_MD4;
if (len == 3 && strncasecmp(name, "md5", 3) == 0)
return CSUM_MD5;
if (len == 4 && strncasecmp(name, "none", 4) == 0)
return CSUM_NONE;
rprintf(FERROR, "unknown checksum name: %s\n", name);
exit_cleanup(RERR_UNSUPPORTED);
}
Commit Message:
CWE ID: CWE-354 | int parse_csum_name(const char *name, int len)
{
if (len < 0 && name)
len = strlen(name);
if (!name || (len == 4 && strncasecmp(name, "auto", 4) == 0)) {
if (protocol_version >= 30)
return CSUM_MD5;
if (protocol_version >= 27)
return CSUM_MD4_OLD;
if (protocol_version >= 21)
return CSUM_MD4_BUSTED;
return CSUM_MD4_ARCHAIC;
}
if (len == 3 && strncasecmp(name, "md4", 3) == 0)
return CSUM_MD4;
if (len == 3 && strncasecmp(name, "md5", 3) == 0)
return CSUM_MD5;
if (len == 4 && strncasecmp(name, "none", 4) == 0)
return CSUM_NONE;
rprintf(FERROR, "unknown checksum name: %s\n", name);
exit_cleanup(RERR_UNSUPPORTED);
}
| 6,844 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool FrameFetchContext::UpdateTimingInfoForIFrameNavigation(
ResourceTimingInfo* info) {
if (IsDetached())
return false;
if (!GetFrame()->Owner())
return false;
if (!GetFrame()->should_send_resource_timing_info_to_parent())
return false;
if (MasterDocumentLoader()->LoadType() == WebFrameLoadType::kBackForward)
return false;
return true;
}
Commit Message: Do not forward resource timing to parent frame after back-forward navigation
LocalFrame has |should_send_resource_timing_info_to_parent_| flag not to
send timing info to parent except for the first navigation. This flag is
cleared when the first timing is sent to parent, however this does not happen
if iframe's first navigation was by back-forward navigation. For such
iframes, we shouldn't send timings to parent at all.
Bug: 876822
Change-Id: I128b51a82ef278c439548afc8283ae63abdef5c5
Reviewed-on: https://chromium-review.googlesource.com/1186215
Reviewed-by: Kinuko Yasuda <[email protected]>
Commit-Queue: Kunihiko Sakamoto <[email protected]>
Cr-Commit-Position: refs/heads/master@{#585736}
CWE ID: CWE-200 | bool FrameFetchContext::UpdateTimingInfoForIFrameNavigation(
ResourceTimingInfo* info) {
if (IsDetached())
return false;
if (!GetFrame()->Owner())
return false;
if (!GetFrame()->should_send_resource_timing_info_to_parent())
return false;
// location may have been changed after initial navigation,
if (MasterDocumentLoader()->LoadType() == WebFrameLoadType::kBackForward) {
// ...and do not report subsequent navigations in the iframe too.
GetFrame()->SetShouldSendResourceTimingInfoToParent(false);
return false;
}
return true;
}
| 7,136 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
logging,
status;
MngInfo
*mng_info;
char
magic_number[MaxTextExtent];
ssize_t
count;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadPNGImage()");
image=AcquireImage(image_info);
mng_info=(MngInfo *) NULL;
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
ThrowReaderException(FileOpenError,"UnableToOpenFile");
/*
Verify PNG signature.
*/
count=ReadBlob(image,8,(unsigned char *) magic_number);
if (count < 8 || memcmp(magic_number,"\211PNG\r\n\032\n",8) != 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Allocate a MngInfo structure.
*/
mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo));
if (mng_info == (MngInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/*
Initialize members of the MngInfo structure.
*/
(void) ResetMagickMemory(mng_info,0,sizeof(MngInfo));
mng_info->image=image;
image=ReadOnePNGImage(mng_info,image_info,exception);
mng_info=MngInfoFreeStruct(mng_info);
if (image == (Image *) NULL)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"exit ReadPNGImage() with error");
return((Image *) NULL);
}
(void) CloseBlob(image);
if ((image->columns == 0) || (image->rows == 0))
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"exit ReadPNGImage() with error.");
ThrowReaderException(CorruptImageError,"CorruptImage");
}
if ((IssRGBColorspace(image->colorspace) != MagickFalse) &&
((image->gamma < .45) || (image->gamma > .46)) &&
!(image->chromaticity.red_primary.x>0.6399f &&
image->chromaticity.red_primary.x<0.6401f &&
image->chromaticity.red_primary.y>0.3299f &&
image->chromaticity.red_primary.y<0.3301f &&
image->chromaticity.green_primary.x>0.2999f &&
image->chromaticity.green_primary.x<0.3001f &&
image->chromaticity.green_primary.y>0.5999f &&
image->chromaticity.green_primary.y<0.6001f &&
image->chromaticity.blue_primary.x>0.1499f &&
image->chromaticity.blue_primary.x<0.1501f &&
image->chromaticity.blue_primary.y>0.0599f &&
image->chromaticity.blue_primary.y<0.0601f &&
image->chromaticity.white_point.x>0.3126f &&
image->chromaticity.white_point.x<0.3128f &&
image->chromaticity.white_point.y>0.3289f &&
image->chromaticity.white_point.y<0.3291f))
SetImageColorspace(image,RGBColorspace);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" page.w: %.20g, page.h: %.20g,page.x: %.20g, page.y: %.20g.",
(double) image->page.width,(double) image->page.height,
(double) image->page.x,(double) image->page.y);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadPNGImage()");
return(image);
}
Commit Message: ...
CWE ID: CWE-754 | static Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
logging,
status;
MngInfo
*mng_info;
char
magic_number[MaxTextExtent];
ssize_t
count;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadPNGImage()");
image=AcquireImage(image_info);
mng_info=(MngInfo *) NULL;
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
ThrowReaderException(FileOpenError,"UnableToOpenFile");
/*
Verify PNG signature.
*/
count=ReadBlob(image,8,(unsigned char *) magic_number);
if (count < 8 || memcmp(magic_number,"\211PNG\r\n\032\n",8) != 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Verify that file size large enough to contain a PNG datastream.
*/
if (GetBlobSize(image) < 61)
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
/*
Allocate a MngInfo structure.
*/
mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo));
if (mng_info == (MngInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/*
Initialize members of the MngInfo structure.
*/
(void) ResetMagickMemory(mng_info,0,sizeof(MngInfo));
mng_info->image=image;
image=ReadOnePNGImage(mng_info,image_info,exception);
mng_info=MngInfoFreeStruct(mng_info);
if (image == (Image *) NULL)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"exit ReadPNGImage() with error");
return((Image *) NULL);
}
(void) CloseBlob(image);
if ((image->columns == 0) || (image->rows == 0))
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"exit ReadPNGImage() with error.");
ThrowReaderException(CorruptImageError,"CorruptImage");
}
if ((IssRGBColorspace(image->colorspace) != MagickFalse) &&
((image->gamma < .45) || (image->gamma > .46)) &&
!(image->chromaticity.red_primary.x>0.6399f &&
image->chromaticity.red_primary.x<0.6401f &&
image->chromaticity.red_primary.y>0.3299f &&
image->chromaticity.red_primary.y<0.3301f &&
image->chromaticity.green_primary.x>0.2999f &&
image->chromaticity.green_primary.x<0.3001f &&
image->chromaticity.green_primary.y>0.5999f &&
image->chromaticity.green_primary.y<0.6001f &&
image->chromaticity.blue_primary.x>0.1499f &&
image->chromaticity.blue_primary.x<0.1501f &&
image->chromaticity.blue_primary.y>0.0599f &&
image->chromaticity.blue_primary.y<0.0601f &&
image->chromaticity.white_point.x>0.3126f &&
image->chromaticity.white_point.x<0.3128f &&
image->chromaticity.white_point.y>0.3289f &&
image->chromaticity.white_point.y<0.3291f))
SetImageColorspace(image,RGBColorspace);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" page.w: %.20g, page.h: %.20g,page.x: %.20g, page.y: %.20g.",
(double) image->page.width,(double) image->page.height,
(double) image->page.x,(double) image->page.y);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadPNGImage()");
return(image);
}
| 9,974 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: DictionaryValue* ExtensionTabUtil::CreateTabValue(
const WebContents* contents,
TabStripModel* tab_strip,
int tab_index,
IncludePrivacySensitiveFields include_privacy_sensitive_fields) {
NOTIMPLEMENTED();
return NULL;
}
Commit Message: Do not pass URLs in onUpdated events to extensions unless they have the
"tabs" permission.
BUG=168442
Review URL: https://chromiumcodereview.appspot.com/11824004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@176406 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-264 | DictionaryValue* ExtensionTabUtil::CreateTabValue(
const WebContents* contents,
TabStripModel* tab_strip,
int tab_index) {
NOTIMPLEMENTED();
return NULL;
}
| 16,524 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void WebGLRenderingContextBase::TexImageImpl(
TexImageFunctionID function_id,
GLenum target,
GLint level,
GLint internalformat,
GLint xoffset,
GLint yoffset,
GLint zoffset,
GLenum format,
GLenum type,
Image* image,
WebGLImageConversion::ImageHtmlDomSource dom_source,
bool flip_y,
bool premultiply_alpha,
const IntRect& source_image_rect,
GLsizei depth,
GLint unpack_image_height) {
const char* func_name = GetTexImageFunctionName(function_id);
if (type == GL_UNSIGNED_INT_10F_11F_11F_REV) {
type = GL_FLOAT;
}
Vector<uint8_t> data;
IntRect sub_rect = source_image_rect;
if (sub_rect == SentinelEmptyRect()) {
sub_rect = SafeGetImageSize(image);
}
bool selecting_sub_rectangle = false;
if (!ValidateTexImageSubRectangle(func_name, function_id, image, sub_rect,
depth, unpack_image_height,
&selecting_sub_rectangle)) {
return;
}
IntRect adjusted_source_image_rect = sub_rect;
if (flip_y) {
adjusted_source_image_rect.SetY(image->height() -
adjusted_source_image_rect.MaxY());
}
WebGLImageConversion::ImageExtractor image_extractor(
image, dom_source, premultiply_alpha,
unpack_colorspace_conversion_ == GL_NONE);
if (!image_extractor.ImagePixelData()) {
SynthesizeGLError(GL_INVALID_VALUE, func_name, "bad image data");
return;
}
WebGLImageConversion::DataFormat source_data_format =
image_extractor.ImageSourceFormat();
WebGLImageConversion::AlphaOp alpha_op = image_extractor.ImageAlphaOp();
const void* image_pixel_data = image_extractor.ImagePixelData();
bool need_conversion = true;
if (type == GL_UNSIGNED_BYTE &&
source_data_format == WebGLImageConversion::kDataFormatRGBA8 &&
format == GL_RGBA && alpha_op == WebGLImageConversion::kAlphaDoNothing &&
!flip_y && !selecting_sub_rectangle && depth == 1) {
need_conversion = false;
} else {
if (!WebGLImageConversion::PackImageData(
image, image_pixel_data, format, type, flip_y, alpha_op,
source_data_format, image_extractor.ImageWidth(),
image_extractor.ImageHeight(), adjusted_source_image_rect, depth,
image_extractor.ImageSourceUnpackAlignment(), unpack_image_height,
data)) {
SynthesizeGLError(GL_INVALID_VALUE, func_name, "packImage error");
return;
}
}
ScopedUnpackParametersResetRestore temporary_reset_unpack(this);
if (function_id == kTexImage2D) {
TexImage2DBase(target, level, internalformat,
adjusted_source_image_rect.Width(),
adjusted_source_image_rect.Height(), 0, format, type,
need_conversion ? data.data() : image_pixel_data);
} else if (function_id == kTexSubImage2D) {
ContextGL()->TexSubImage2D(
target, level, xoffset, yoffset, adjusted_source_image_rect.Width(),
adjusted_source_image_rect.Height(), format, type,
need_conversion ? data.data() : image_pixel_data);
} else {
if (function_id == kTexImage3D) {
ContextGL()->TexImage3D(
target, level, internalformat, adjusted_source_image_rect.Width(),
adjusted_source_image_rect.Height(), depth, 0, format, type,
need_conversion ? data.data() : image_pixel_data);
} else {
DCHECK_EQ(function_id, kTexSubImage3D);
ContextGL()->TexSubImage3D(
target, level, xoffset, yoffset, zoffset,
adjusted_source_image_rect.Width(),
adjusted_source_image_rect.Height(), depth, format, type,
need_conversion ? data.data() : image_pixel_data);
}
}
}
Commit Message: Tighten about IntRect use in WebGL with overflow detection
BUG=784183
TEST=test case in the bug in ASAN build
[email protected]
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: Ie25ca328af99de7828e28e6a6e3d775f1bebc43f
Reviewed-on: https://chromium-review.googlesource.com/811826
Reviewed-by: Kenneth Russell <[email protected]>
Commit-Queue: Zhenyao Mo <[email protected]>
Cr-Commit-Position: refs/heads/master@{#522213}
CWE ID: CWE-125 | void WebGLRenderingContextBase::TexImageImpl(
TexImageFunctionID function_id,
GLenum target,
GLint level,
GLint internalformat,
GLint xoffset,
GLint yoffset,
GLint zoffset,
GLenum format,
GLenum type,
Image* image,
WebGLImageConversion::ImageHtmlDomSource dom_source,
bool flip_y,
bool premultiply_alpha,
const IntRect& source_image_rect,
GLsizei depth,
GLint unpack_image_height) {
const char* func_name = GetTexImageFunctionName(function_id);
if (type == GL_UNSIGNED_INT_10F_11F_11F_REV) {
type = GL_FLOAT;
}
Vector<uint8_t> data;
IntRect sub_rect = source_image_rect;
if (sub_rect.IsValid() && sub_rect == SentinelEmptyRect()) {
sub_rect = SafeGetImageSize(image);
}
bool selecting_sub_rectangle = false;
if (!ValidateTexImageSubRectangle(func_name, function_id, image, sub_rect,
depth, unpack_image_height,
&selecting_sub_rectangle)) {
return;
}
IntRect adjusted_source_image_rect = sub_rect;
if (flip_y) {
adjusted_source_image_rect.SetY(image->height() -
adjusted_source_image_rect.MaxY());
}
WebGLImageConversion::ImageExtractor image_extractor(
image, dom_source, premultiply_alpha,
unpack_colorspace_conversion_ == GL_NONE);
if (!image_extractor.ImagePixelData()) {
SynthesizeGLError(GL_INVALID_VALUE, func_name, "bad image data");
return;
}
WebGLImageConversion::DataFormat source_data_format =
image_extractor.ImageSourceFormat();
WebGLImageConversion::AlphaOp alpha_op = image_extractor.ImageAlphaOp();
const void* image_pixel_data = image_extractor.ImagePixelData();
bool need_conversion = true;
if (type == GL_UNSIGNED_BYTE &&
source_data_format == WebGLImageConversion::kDataFormatRGBA8 &&
format == GL_RGBA && alpha_op == WebGLImageConversion::kAlphaDoNothing &&
!flip_y && !selecting_sub_rectangle && depth == 1) {
need_conversion = false;
} else {
if (!WebGLImageConversion::PackImageData(
image, image_pixel_data, format, type, flip_y, alpha_op,
source_data_format, image_extractor.ImageWidth(),
image_extractor.ImageHeight(), adjusted_source_image_rect, depth,
image_extractor.ImageSourceUnpackAlignment(), unpack_image_height,
data)) {
SynthesizeGLError(GL_INVALID_VALUE, func_name, "packImage error");
return;
}
}
ScopedUnpackParametersResetRestore temporary_reset_unpack(this);
if (function_id == kTexImage2D) {
TexImage2DBase(target, level, internalformat,
adjusted_source_image_rect.Width(),
adjusted_source_image_rect.Height(), 0, format, type,
need_conversion ? data.data() : image_pixel_data);
} else if (function_id == kTexSubImage2D) {
ContextGL()->TexSubImage2D(
target, level, xoffset, yoffset, adjusted_source_image_rect.Width(),
adjusted_source_image_rect.Height(), format, type,
need_conversion ? data.data() : image_pixel_data);
} else {
if (function_id == kTexImage3D) {
ContextGL()->TexImage3D(
target, level, internalformat, adjusted_source_image_rect.Width(),
adjusted_source_image_rect.Height(), depth, 0, format, type,
need_conversion ? data.data() : image_pixel_data);
} else {
DCHECK_EQ(function_id, kTexSubImage3D);
ContextGL()->TexSubImage3D(
target, level, xoffset, yoffset, zoffset,
adjusted_source_image_rect.Width(),
adjusted_source_image_rect.Height(), depth, format, type,
need_conversion ? data.data() : image_pixel_data);
}
}
}
| 16,447 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *dir;
struct fscrypt_info *ci;
int dir_has_key, cached_with_key;
if (flags & LOOKUP_RCU)
return -ECHILD;
dir = dget_parent(dentry);
if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
dput(dir);
return 0;
}
ci = d_inode(dir)->i_crypt_info;
if (ci && ci->ci_keyring_key &&
(ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED) |
(1 << KEY_FLAG_DEAD))))
ci = NULL;
/* this should eventually be an flag in d_flags */
spin_lock(&dentry->d_lock);
cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
spin_unlock(&dentry->d_lock);
dir_has_key = (ci != NULL);
dput(dir);
/*
* If the dentry was cached without the key, and it is a
* negative dentry, it might be a valid name. We can't check
* if the key has since been made available due to locking
* reasons, so we fail the validation so ext4_lookup() can do
* this check.
*
* We also fail the validation if the dentry was created with
* the key present, but we no longer have the key, or vice versa.
*/
if ((!cached_with_key && d_is_negative(dentry)) ||
(!cached_with_key && dir_has_key) ||
(cached_with_key && !dir_has_key))
return 0;
return 1;
}
Commit Message: fscrypt: remove broken support for detecting keyring key revocation
Filesystem encryption ostensibly supported revoking a keyring key that
had been used to "unlock" encrypted files, causing those files to become
"locked" again. This was, however, buggy for several reasons, the most
severe of which was that when key revocation happened to be detected for
an inode, its fscrypt_info was immediately freed, even while other
threads could be using it for encryption or decryption concurrently.
This could be exploited to crash the kernel or worse.
This patch fixes the use-after-free by removing the code which detects
the keyring key having been revoked, invalidated, or expired. Instead,
an encrypted inode that is "unlocked" now simply remains unlocked until
it is evicted from memory. Note that this is no worse than the case for
block device-level encryption, e.g. dm-crypt, and it still remains
possible for a privileged user to evict unused pages, inodes, and
dentries by running 'sync; echo 3 > /proc/sys/vm/drop_caches', or by
simply unmounting the filesystem. In fact, one of those actions was
already needed anyway for key revocation to work even somewhat sanely.
This change is not expected to break any applications.
In the future I'd like to implement a real API for fscrypt key
revocation that interacts sanely with ongoing filesystem operations ---
waiting for existing operations to complete and blocking new operations,
and invalidating and sanitizing key material and plaintext from the VFS
caches. But this is a hard problem, and for now this bug must be fixed.
This bug affected almost all versions of ext4, f2fs, and ubifs
encryption, and it was potentially reachable in any kernel configured
with encryption support (CONFIG_EXT4_ENCRYPTION=y,
CONFIG_EXT4_FS_ENCRYPTION=y, CONFIG_F2FS_FS_ENCRYPTION=y, or
CONFIG_UBIFS_FS_ENCRYPTION=y). Note that older kernels did not use the
shared fs/crypto/ code, but due to the potential security implications
of this bug, it may still be worthwhile to backport this fix to them.
Fixes: b7236e21d55f ("ext4 crypto: reorganize how we store keys in the inode")
Cc: [email protected] # v4.2+
Signed-off-by: Eric Biggers <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
Acked-by: Michael Halcrow <[email protected]>
CWE ID: CWE-416 | static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *dir;
int dir_has_key, cached_with_key;
if (flags & LOOKUP_RCU)
return -ECHILD;
dir = dget_parent(dentry);
if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
dput(dir);
return 0;
}
/* this should eventually be an flag in d_flags */
spin_lock(&dentry->d_lock);
cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
spin_unlock(&dentry->d_lock);
dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
dput(dir);
/*
* If the dentry was cached without the key, and it is a
* negative dentry, it might be a valid name. We can't check
* if the key has since been made available due to locking
* reasons, so we fail the validation so ext4_lookup() can do
* this check.
*
* We also fail the validation if the dentry was created with
* the key present, but we no longer have the key, or vice versa.
*/
if ((!cached_with_key && d_is_negative(dentry)) ||
(!cached_with_key && dir_has_key) ||
(cached_with_key && !dir_has_key))
return 0;
return 1;
}
| 26,308 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void ID3::Iterator::getstring(String8 *id, bool otherdata) const {
id->setTo("");
const uint8_t *frameData = mFrameData;
if (frameData == NULL) {
return;
}
uint8_t encoding = *frameData;
if (mParent.mVersion == ID3_V1 || mParent.mVersion == ID3_V1_1) {
if (mOffset == 126 || mOffset == 127) {
char tmp[16];
sprintf(tmp, "%d", (int)*frameData);
id->setTo(tmp);
return;
}
id->setTo((const char*)frameData, mFrameSize);
return;
}
if (mFrameSize < getHeaderLength() + 1) {
return;
}
size_t n = mFrameSize - getHeaderLength() - 1;
if (otherdata) {
frameData += 4;
int32_t i = n - 4;
while(--i >= 0 && *++frameData != 0) ;
int skipped = (frameData - mFrameData);
if (skipped >= (int)n) {
return;
}
n -= skipped;
}
if (encoding == 0x00) {
id->setTo((const char*)frameData + 1, n);
} else if (encoding == 0x03) {
id->setTo((const char *)(frameData + 1), n);
} else if (encoding == 0x02) {
int len = n / 2;
const char16_t *framedata = (const char16_t *) (frameData + 1);
char16_t *framedatacopy = NULL;
#if BYTE_ORDER == LITTLE_ENDIAN
framedatacopy = new char16_t[len];
for (int i = 0; i < len; i++) {
framedatacopy[i] = bswap_16(framedata[i]);
}
framedata = framedatacopy;
#endif
id->setTo(framedata, len);
if (framedatacopy != NULL) {
delete[] framedatacopy;
}
} else if (encoding == 0x01) {
int len = n / 2;
const char16_t *framedata = (const char16_t *) (frameData + 1);
char16_t *framedatacopy = NULL;
if (*framedata == 0xfffe) {
framedatacopy = new char16_t[len];
for (int i = 0; i < len; i++) {
framedatacopy[i] = bswap_16(framedata[i]);
}
framedata = framedatacopy;
}
if (*framedata == 0xfeff) {
framedata++;
len--;
}
bool eightBit = true;
for (int i = 0; i < len; i++) {
if (framedata[i] > 0xff) {
eightBit = false;
break;
}
}
if (eightBit) {
char *frame8 = new char[len];
for (int i = 0; i < len; i++) {
frame8[i] = framedata[i];
}
id->setTo(frame8, len);
delete [] frame8;
} else {
id->setTo(framedata, len);
}
if (framedatacopy != NULL) {
delete[] framedatacopy;
}
}
}
Commit Message: better validation lengths of strings in ID3 tags
Validate lengths on strings in ID3 tags, particularly around 0.
Also added code to handle cases when we can't get memory for
copies of strings we want to extract from these tags.
Affects L/M/N/master, same patch for all of them.
Bug: 30744884
Change-Id: I2675a817a39f0927ec1f7e9f9c09f2e61020311e
Test: play mp3 file which caused a <0 length.
(cherry picked from commit d23c01546c4f82840a01a380def76ab6cae5d43f)
CWE ID: CWE-20 | void ID3::Iterator::getstring(String8 *id, bool otherdata) const {
id->setTo("");
const uint8_t *frameData = mFrameData;
if (frameData == NULL) {
return;
}
uint8_t encoding = *frameData;
if (mParent.mVersion == ID3_V1 || mParent.mVersion == ID3_V1_1) {
if (mOffset == 126 || mOffset == 127) {
char tmp[16];
sprintf(tmp, "%d", (int)*frameData);
id->setTo(tmp);
return;
}
id->setTo((const char*)frameData, mFrameSize);
return;
}
if (mFrameSize < getHeaderLength() + 1) {
return;
}
size_t n = mFrameSize - getHeaderLength() - 1;
if (otherdata) {
frameData += 4;
int32_t i = n - 4;
while(--i >= 0 && *++frameData != 0) ;
int skipped = (frameData - mFrameData);
if (skipped >= (int)n) {
return;
}
n -= skipped;
}
if (n <= 0) {
return;
}
if (encoding == 0x00) {
id->setTo((const char*)frameData + 1, n);
} else if (encoding == 0x03) {
id->setTo((const char *)(frameData + 1), n);
} else if (encoding == 0x02) {
int len = n / 2;
const char16_t *framedata = (const char16_t *) (frameData + 1);
char16_t *framedatacopy = NULL;
#if BYTE_ORDER == LITTLE_ENDIAN
if (len > 0) {
framedatacopy = new (std::nothrow) char16_t[len];
if (framedatacopy == NULL) {
return;
}
for (int i = 0; i < len; i++) {
framedatacopy[i] = bswap_16(framedata[i]);
}
framedata = framedatacopy;
}
#endif
id->setTo(framedata, len);
if (framedatacopy != NULL) {
delete[] framedatacopy;
}
} else if (encoding == 0x01) {
int len = n / 2;
const char16_t *framedata = (const char16_t *) (frameData + 1);
char16_t *framedatacopy = NULL;
if (*framedata == 0xfffe) {
// endianness marker != host endianness, convert & skip
if (len <= 1) {
return; // nothing after the marker
}
framedatacopy = new (std::nothrow) char16_t[len];
if (framedatacopy == NULL) {
return;
}
for (int i = 0; i < len; i++) {
framedatacopy[i] = bswap_16(framedata[i]);
}
framedata = framedatacopy;
// and skip over the marker
framedata++;
len--;
} else if (*framedata == 0xfeff) {
// endianness marker == host endianness, skip it
if (len <= 1) {
return; // nothing after the marker
}
framedata++;
len--;
}
bool eightBit = true;
for (int i = 0; i < len; i++) {
if (framedata[i] > 0xff) {
eightBit = false;
break;
}
}
if (eightBit) {
char *frame8 = new (std::nothrow) char[len];
if (frame8 != NULL) {
for (int i = 0; i < len; i++) {
frame8[i] = framedata[i];
}
id->setTo(frame8, len);
delete [] frame8;
} else {
id->setTo(framedata, len);
}
} else {
id->setTo(framedata, len);
}
if (framedatacopy != NULL) {
delete[] framedatacopy;
}
}
}
| 12,793 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int _snd_timer_stop(struct snd_timer_instance * timeri,
int keep_flag, int event)
{
struct snd_timer *timer;
unsigned long flags;
if (snd_BUG_ON(!timeri))
return -ENXIO;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
if (!keep_flag) {
spin_lock_irqsave(&slave_active_lock, flags);
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
spin_unlock_irqrestore(&slave_active_lock, flags);
}
goto __end;
}
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
if (!keep_flag)
timeri->flags &=
~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
spin_unlock_irqrestore(&timer->lock, flags);
__end:
if (event != SNDRV_TIMER_EVENT_RESOLUTION)
snd_timer_notify1(timeri, event);
return 0;
}
Commit Message: ALSA: timer: Harden slave timer list handling
A slave timer instance might be still accessible in a racy way while
operating the master instance as it lacks of locking. Since the
master operation is mostly protected with timer->lock, we should cope
with it while changing the slave instance, too. Also, some linked
lists (active_list and ack_list) of slave instances aren't unlinked
immediately at stopping or closing, and this may lead to unexpected
accesses.
This patch tries to address these issues. It adds spin lock of
timer->lock (either from master or slave, which is equivalent) in a
few places. For avoiding a deadlock, we ensure that the global
slave_active_lock is always locked at first before each timer lock.
Also, ack and active_list of slave instances are properly unlinked at
snd_timer_stop() and snd_timer_close().
Last but not least, remove the superfluous call of _snd_timer_stop()
at removing slave links. This is a noop, and calling it may confuse
readers wrt locking. Further cleanup will follow in a later patch.
Actually we've got reports of use-after-free by syzkaller fuzzer, and
this hopefully fixes these issues.
Reported-by: Dmitry Vyukov <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
CWE ID: CWE-20 | static int _snd_timer_stop(struct snd_timer_instance * timeri,
int keep_flag, int event)
{
struct snd_timer *timer;
unsigned long flags;
if (snd_BUG_ON(!timeri))
return -ENXIO;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
if (!keep_flag) {
spin_lock_irqsave(&slave_active_lock, flags);
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
spin_unlock_irqrestore(&slave_active_lock, flags);
}
goto __end;
}
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
if (!keep_flag)
timeri->flags &=
~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
spin_unlock_irqrestore(&timer->lock, flags);
__end:
if (event != SNDRV_TIMER_EVENT_RESOLUTION)
snd_timer_notify1(timeri, event);
return 0;
}
| 26,819 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void DataReductionProxyConfig::InitializeOnIOThread(
scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory,
WarmupURLFetcher::CreateCustomProxyConfigCallback
create_custom_proxy_config_callback,
NetworkPropertiesManager* manager,
const std::string& user_agent) {
DCHECK(thread_checker_.CalledOnValidThread());
network_properties_manager_ = manager;
network_properties_manager_->ResetWarmupURLFetchMetrics();
secure_proxy_checker_.reset(new SecureProxyChecker(url_loader_factory));
warmup_url_fetcher_.reset(new WarmupURLFetcher(
create_custom_proxy_config_callback,
base::BindRepeating(
&DataReductionProxyConfig::HandleWarmupFetcherResponse,
base::Unretained(this)),
base::BindRepeating(&DataReductionProxyConfig::GetHttpRttEstimate,
base::Unretained(this)),
ui_task_runner_, user_agent));
AddDefaultProxyBypassRules();
network_connection_tracker_->AddNetworkConnectionObserver(this);
network_connection_tracker_->GetConnectionType(
&connection_type_,
base::BindOnce(&DataReductionProxyConfig::OnConnectionChanged,
weak_factory_.GetWeakPtr()));
}
Commit Message: Disable all DRP URL fetches when holdback is enabled
Disable secure proxy checker, warmup url fetcher
and client config fetch when the client is in DRP
(Data Reduction Proxy) holdback.
This CL does not disable pingbacks when client is in the
holdback, but the pingback code is going away soon.
Change-Id: Icbb59d814d1452123869c609e0770d1439c1db51
Bug: 984964
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1709965
Commit-Queue: Tarun Bansal <[email protected]>
Reviewed-by: Robert Ogden <[email protected]>
Cr-Commit-Position: refs/heads/master@{#679649}
CWE ID: CWE-416 | void DataReductionProxyConfig::InitializeOnIOThread(
scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory,
WarmupURLFetcher::CreateCustomProxyConfigCallback
create_custom_proxy_config_callback,
NetworkPropertiesManager* manager,
const std::string& user_agent) {
DCHECK(thread_checker_.CalledOnValidThread());
network_properties_manager_ = manager;
network_properties_manager_->ResetWarmupURLFetchMetrics();
if (!params::IsIncludedInHoldbackFieldTrial()) {
secure_proxy_checker_.reset(new SecureProxyChecker(url_loader_factory));
warmup_url_fetcher_.reset(new WarmupURLFetcher(
create_custom_proxy_config_callback,
base::BindRepeating(
&DataReductionProxyConfig::HandleWarmupFetcherResponse,
base::Unretained(this)),
base::BindRepeating(&DataReductionProxyConfig::GetHttpRttEstimate,
base::Unretained(this)),
ui_task_runner_, user_agent));
}
AddDefaultProxyBypassRules();
network_connection_tracker_->AddNetworkConnectionObserver(this);
network_connection_tracker_->GetConnectionType(
&connection_type_,
base::BindOnce(&DataReductionProxyConfig::OnConnectionChanged,
weak_factory_.GetWeakPtr()));
}
| 20,196 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: PS_SERIALIZER_DECODE_FUNC(php_serialize) /* {{{ */
{
const char *endptr = val + vallen;
zval *session_vars;
php_unserialize_data_t var_hash;
PHP_VAR_UNSERIALIZE_INIT(var_hash);
ALLOC_INIT_ZVAL(session_vars);
if (php_var_unserialize(&session_vars, &val, endptr, &var_hash TSRMLS_CC)) {
var_push_dtor(&var_hash, &session_vars);
}
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
if (PS(http_session_vars)) {
zval_ptr_dtor(&PS(http_session_vars));
}
if (Z_TYPE_P(session_vars) == IS_NULL) {
array_init(session_vars);
}
PS(http_session_vars) = session_vars;
ZEND_SET_GLOBAL_VAR_WITH_LENGTH("_SESSION", sizeof("_SESSION"), PS(http_session_vars), Z_REFCOUNT_P(PS(http_session_vars)) + 1, 1);
return SUCCESS;
}
/* }}} */
Commit Message:
CWE ID: CWE-416 | PS_SERIALIZER_DECODE_FUNC(php_serialize) /* {{{ */
{
const char *endptr = val + vallen;
zval *session_vars;
php_unserialize_data_t var_hash;
PHP_VAR_UNSERIALIZE_INIT(var_hash);
ALLOC_INIT_ZVAL(session_vars);
if (php_var_unserialize(&session_vars, &val, endptr, &var_hash TSRMLS_CC)) {
var_push_dtor(&var_hash, &session_vars);
}
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
if (PS(http_session_vars)) {
zval_ptr_dtor(&PS(http_session_vars));
}
if (Z_TYPE_P(session_vars) == IS_NULL) {
array_init(session_vars);
}
PS(http_session_vars) = session_vars;
ZEND_SET_GLOBAL_VAR_WITH_LENGTH("_SESSION", sizeof("_SESSION"), PS(http_session_vars), Z_REFCOUNT_P(PS(http_session_vars)) + 1, 1);
return SUCCESS;
}
/* }}} */
| 25,721 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: ChildProcessTerminationInfo ChildProcessLauncherHelper::GetTerminationInfo(
const ChildProcessLauncherHelper::Process& process,
bool known_dead) {
ChildProcessTerminationInfo info;
if (!java_peer_avaiable_on_client_thread_)
return info;
Java_ChildProcessLauncherHelperImpl_getTerminationInfo(
AttachCurrentThread(), java_peer_, reinterpret_cast<intptr_t>(&info));
base::android::ApplicationState app_state =
base::android::ApplicationStatusListener::GetState();
bool app_foreground =
app_state == base::android::APPLICATION_STATE_HAS_RUNNING_ACTIVITIES ||
app_state == base::android::APPLICATION_STATE_HAS_PAUSED_ACTIVITIES;
if (app_foreground &&
(info.binding_state == base::android::ChildBindingState::MODERATE ||
info.binding_state == base::android::ChildBindingState::STRONG)) {
info.status = base::TERMINATION_STATUS_OOM_PROTECTED;
} else {
info.status = base::TERMINATION_STATUS_NORMAL_TERMINATION;
}
return info;
}
Commit Message: android: Stop child process in GetTerminationInfo
Android currently abuses TerminationStatus to pass whether process is
"oom protected" rather than whether it has died or not. This confuses
cross-platform code about the state process.
Only TERMINATION_STATUS_STILL_RUNNING is treated as still running, which
android never passes. Also it appears to be ok to kill the process in
getTerminationInfo as it's only called when the child process is dead or
dying. Also posix kills the process on some calls.
Bug: 940245
Change-Id: Id165711848c279bbe77ef8a784c8cf0b14051877
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1516284
Reviewed-by: Robert Sesek <[email protected]>
Reviewed-by: ssid <[email protected]>
Commit-Queue: Bo <[email protected]>
Cr-Commit-Position: refs/heads/master@{#639639}
CWE ID: CWE-664 | ChildProcessTerminationInfo ChildProcessLauncherHelper::GetTerminationInfo(
const ChildProcessLauncherHelper::Process& process,
bool known_dead) {
ChildProcessTerminationInfo info;
if (!java_peer_avaiable_on_client_thread_)
return info;
Java_ChildProcessLauncherHelperImpl_getTerminationInfoAndStop(
AttachCurrentThread(), java_peer_, reinterpret_cast<intptr_t>(&info));
base::android::ApplicationState app_state =
base::android::ApplicationStatusListener::GetState();
bool app_foreground =
app_state == base::android::APPLICATION_STATE_HAS_RUNNING_ACTIVITIES ||
app_state == base::android::APPLICATION_STATE_HAS_PAUSED_ACTIVITIES;
if (app_foreground &&
(info.binding_state == base::android::ChildBindingState::MODERATE ||
info.binding_state == base::android::ChildBindingState::STRONG)) {
info.status = base::TERMINATION_STATUS_OOM_PROTECTED;
} else {
info.status = base::TERMINATION_STATUS_NORMAL_TERMINATION;
}
return info;
}
| 27,906 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off,
int num, size_t size, off_t fsize, int *flags, int sh_num)
{
Elf32_Phdr ph32;
Elf64_Phdr ph64;
const char *linking_style = "statically";
const char *interp = "";
unsigned char nbuf[BUFSIZ];
char ibuf[BUFSIZ];
ssize_t bufsize;
size_t offset, align, len;
if (size != xph_sizeof) {
if (file_printf(ms, ", corrupted program header size") == -1)
return -1;
return 0;
}
for ( ; num; num--) {
if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) {
file_badread(ms);
return -1;
}
off += size;
bufsize = 0;
align = 4;
/* Things we can determine before we seek */
switch (xph_type) {
case PT_DYNAMIC:
linking_style = "dynamically";
break;
case PT_NOTE:
if (sh_num) /* Did this through section headers */
continue;
if (((align = xph_align) & 0x80000000UL) != 0 ||
align < 4) {
if (file_printf(ms,
", invalid note alignment 0x%lx",
(unsigned long)align) == -1)
return -1;
align = 4;
}
/*FALLTHROUGH*/
case PT_INTERP:
len = xph_filesz < sizeof(nbuf) ? xph_filesz
: sizeof(nbuf);
bufsize = pread(fd, nbuf, len, xph_offset);
if (bufsize == -1) {
file_badread(ms);
return -1;
}
break;
default:
if (fsize != SIZE_UNKNOWN && xph_offset > fsize) {
/* Maybe warn here? */
continue;
}
break;
}
/* Things we can determine when we seek */
switch (xph_type) {
case PT_INTERP:
if (bufsize && nbuf[0]) {
nbuf[bufsize - 1] = '\0';
interp = (const char *)nbuf;
} else
interp = "*empty*";
break;
case PT_NOTE:
/*
* This is a PT_NOTE section; loop through all the notes
* in the section.
*/
offset = 0;
for (;;) {
if (offset >= (size_t)bufsize)
break;
offset = donote(ms, nbuf, offset,
(size_t)bufsize, clazz, swap, align,
flags);
if (offset == 0)
break;
}
break;
default:
break;
}
}
if (file_printf(ms, ", %s linked", linking_style)
== -1)
return -1;
if (interp[0])
if (file_printf(ms, ", interpreter %s",
file_printable(ibuf, sizeof(ibuf), interp)) == -1)
return -1;
return 0;
}
Commit Message: - Add a limit to the number of ELF notes processed (Suggested by Alexander
Cherepanov)
- Restructure ELF note printing so that we don't print the same message
multiple times on repeated notes of the same kind.
CWE ID: CWE-399 | dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off,
int num, size_t size, off_t fsize, int sh_num, int *flags,
uint16_t *notecount)
{
Elf32_Phdr ph32;
Elf64_Phdr ph64;
const char *linking_style = "statically";
const char *interp = "";
unsigned char nbuf[BUFSIZ];
char ibuf[BUFSIZ];
ssize_t bufsize;
size_t offset, align, len;
if (size != xph_sizeof) {
if (file_printf(ms, ", corrupted program header size") == -1)
return -1;
return 0;
}
for ( ; num; num--) {
if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) {
file_badread(ms);
return -1;
}
off += size;
bufsize = 0;
align = 4;
/* Things we can determine before we seek */
switch (xph_type) {
case PT_DYNAMIC:
linking_style = "dynamically";
break;
case PT_NOTE:
if (sh_num) /* Did this through section headers */
continue;
if (((align = xph_align) & 0x80000000UL) != 0 ||
align < 4) {
if (file_printf(ms,
", invalid note alignment 0x%lx",
(unsigned long)align) == -1)
return -1;
align = 4;
}
/*FALLTHROUGH*/
case PT_INTERP:
len = xph_filesz < sizeof(nbuf) ? xph_filesz
: sizeof(nbuf);
bufsize = pread(fd, nbuf, len, xph_offset);
if (bufsize == -1) {
file_badread(ms);
return -1;
}
break;
default:
if (fsize != SIZE_UNKNOWN && xph_offset > fsize) {
/* Maybe warn here? */
continue;
}
break;
}
/* Things we can determine when we seek */
switch (xph_type) {
case PT_INTERP:
if (bufsize && nbuf[0]) {
nbuf[bufsize - 1] = '\0';
interp = (const char *)nbuf;
} else
interp = "*empty*";
break;
case PT_NOTE:
/*
* This is a PT_NOTE section; loop through all the notes
* in the section.
*/
offset = 0;
for (;;) {
if (offset >= (size_t)bufsize)
break;
offset = donote(ms, nbuf, offset,
(size_t)bufsize, clazz, swap, align,
flags, notecount);
if (offset == 0)
break;
}
break;
default:
break;
}
}
if (file_printf(ms, ", %s linked", linking_style)
== -1)
return -1;
if (interp[0])
if (file_printf(ms, ", interpreter %s",
file_printable(ibuf, sizeof(ibuf), interp)) == -1)
return -1;
return 0;
}
| 20,515 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void ContextImpl::CreateFrame(
fidl::InterfaceHandle<chromium::web::FrameObserver> observer,
fidl::InterfaceRequest<chromium::web::Frame> frame_request) {
auto web_contents = content::WebContents::Create(
content::WebContents::CreateParams(browser_context_, nullptr));
frame_bindings_.AddBinding(
std::make_unique<FrameImpl>(std::move(web_contents), observer.Bind()),
std::move(frame_request));
}
Commit Message: [fuchsia] Implement browser tests for WebRunner Context service.
Tests may interact with the WebRunner FIDL services and the underlying
browser objects for end to end testing of service and browser
functionality.
* Add a browser test launcher main() for WebRunner.
* Add some simple navigation tests.
* Wire up GoBack()/GoForward() FIDL calls.
* Add embedded test server resources and initialization logic.
* Add missing deletion & notification calls to BrowserContext dtor.
* Use FIDL events for navigation state changes.
* Bug fixes:
** Move BrowserContext and Screen deletion to PostMainMessageLoopRun(),
so that they may use the MessageLoop during teardown.
** Fix Frame dtor to allow for null WindowTreeHosts (headless case)
** Fix std::move logic in Frame ctor which lead to no WebContents
observer being registered.
Bug: 871594
Change-Id: I36bcbd2436d534d366c6be4eeb54b9f9feadd1ac
Reviewed-on: https://chromium-review.googlesource.com/1164539
Commit-Queue: Kevin Marshall <[email protected]>
Reviewed-by: Wez <[email protected]>
Reviewed-by: Fabrice de Gans-Riberi <[email protected]>
Reviewed-by: Scott Violet <[email protected]>
Cr-Commit-Position: refs/heads/master@{#584155}
CWE ID: CWE-264 | void ContextImpl::CreateFrame(
fidl::InterfaceRequest<chromium::web::Frame> frame_request) {
auto web_contents = content::WebContents::Create(
content::WebContents::CreateParams(browser_context_, nullptr));
frames_.insert(std::make_unique<FrameImpl>(std::move(web_contents), this,
std::move(frame_request)));
}
void ContextImpl::DestroyFrame(FrameImpl* frame) {
DCHECK(frames_.find(frame) != frames_.end());
frames_.erase(frames_.find(frame));
}
FrameImpl* ContextImpl::GetFrameImplForTest(
chromium::web::FramePtr* frame_ptr) {
DCHECK(frame_ptr);
// Find the FrameImpl whose channel is connected to |frame_ptr| by inspecting
// the related_koids of active FrameImpls.
zx_info_handle_basic_t handle_info;
zx_status_t status = frame_ptr->channel().get_info(
ZX_INFO_HANDLE_BASIC, &handle_info, sizeof(zx_info_handle_basic_t),
nullptr, nullptr);
ZX_CHECK(status == ZX_OK, status) << "zx_object_get_info";
zx_handle_t client_handle_koid = handle_info.koid;
for (const std::unique_ptr<FrameImpl>& frame : frames_) {
status = frame->GetBindingChannelForTest()->get_info(
ZX_INFO_HANDLE_BASIC, &handle_info, sizeof(zx_info_handle_basic_t),
nullptr, nullptr);
ZX_CHECK(status == ZX_OK, status) << "zx_object_get_info";
if (client_handle_koid == handle_info.related_koid)
return frame.get();
}
return nullptr;
}
| 24,172 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void PrintWebViewHelper::OnPrintForPrintPreview(
const base::DictionaryValue& job_settings) {
if (prep_frame_view_)
return;
if (!render_view()->GetWebView())
return;
blink::WebFrame* main_frame = render_view()->GetWebView()->mainFrame();
if (!main_frame)
return;
blink::WebDocument document = main_frame->document();
blink::WebElement pdf_element = document.getElementById("pdf-viewer");
if (pdf_element.isNull()) {
NOTREACHED();
return;
}
blink::WebLocalFrame* plugin_frame = pdf_element.document().frame();
blink::WebElement plugin_element = pdf_element;
if (pdf_element.hasHTMLTagName("iframe")) {
plugin_frame = blink::WebLocalFrame::fromFrameOwnerElement(pdf_element);
plugin_element = delegate_->GetPdfElement(plugin_frame);
if (plugin_element.isNull()) {
NOTREACHED();
return;
}
}
base::AutoReset<bool> set_printing_flag(&print_for_preview_, true);
if (!UpdatePrintSettings(plugin_frame, plugin_element, job_settings)) {
LOG(ERROR) << "UpdatePrintSettings failed";
DidFinishPrinting(FAIL_PRINT);
return;
}
PrintMsg_Print_Params& print_params = print_pages_params_->params;
print_params.printable_area = gfx::Rect(print_params.page_size);
if (!RenderPagesForPrint(plugin_frame, plugin_element)) {
LOG(ERROR) << "RenderPagesForPrint failed";
DidFinishPrinting(FAIL_PRINT);
}
}
Commit Message: Crash on nested IPC handlers in PrintWebViewHelper
Class is not designed to handle nested IPC. Regular flows also does not
expect them. Still during printing of plugging them may show message
boxes and start nested message loops.
For now we are going just crash. If stats show us that this case is
frequent we will have to do something more complicated.
BUG=502562
Review URL: https://codereview.chromium.org/1228693002
Cr-Commit-Position: refs/heads/master@{#338100}
CWE ID: | void PrintWebViewHelper::OnPrintForPrintPreview(
const base::DictionaryValue& job_settings) {
CHECK_LE(ipc_nesting_level_, 1);
if (prep_frame_view_)
return;
if (!render_view()->GetWebView())
return;
blink::WebFrame* main_frame = render_view()->GetWebView()->mainFrame();
if (!main_frame)
return;
blink::WebDocument document = main_frame->document();
blink::WebElement pdf_element = document.getElementById("pdf-viewer");
if (pdf_element.isNull()) {
NOTREACHED();
return;
}
blink::WebLocalFrame* plugin_frame = pdf_element.document().frame();
blink::WebElement plugin_element = pdf_element;
if (pdf_element.hasHTMLTagName("iframe")) {
plugin_frame = blink::WebLocalFrame::fromFrameOwnerElement(pdf_element);
plugin_element = delegate_->GetPdfElement(plugin_frame);
if (plugin_element.isNull()) {
NOTREACHED();
return;
}
}
base::AutoReset<bool> set_printing_flag(&print_for_preview_, true);
if (!UpdatePrintSettings(plugin_frame, plugin_element, job_settings)) {
LOG(ERROR) << "UpdatePrintSettings failed";
DidFinishPrinting(FAIL_PRINT);
return;
}
PrintMsg_Print_Params& print_params = print_pages_params_->params;
print_params.printable_area = gfx::Rect(print_params.page_size);
if (!RenderPagesForPrint(plugin_frame, plugin_element)) {
LOG(ERROR) << "RenderPagesForPrint failed";
DidFinishPrinting(FAIL_PRINT);
}
}
| 23,533 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: check(FILE *fp, int argc, const char **argv, png_uint_32p flags/*out*/,
display *d, int set_callback)
{
int i, npasses, ipass;
png_uint_32 height;
d->keep = PNG_HANDLE_CHUNK_AS_DEFAULT;
d->before_IDAT = 0;
d->after_IDAT = 0;
/* Some of these errors are permanently fatal and cause an exit here, others
* are per-test and cause an error return.
*/
d->png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, d, error,
warning);
if (d->png_ptr == NULL)
{
fprintf(stderr, "%s(%s): could not allocate png struct\n", d->file,
d->test);
/* Terminate here, this error is not test specific. */
exit(1);
}
d->info_ptr = png_create_info_struct(d->png_ptr);
d->end_ptr = png_create_info_struct(d->png_ptr);
if (d->info_ptr == NULL || d->end_ptr == NULL)
{
fprintf(stderr, "%s(%s): could not allocate png info\n", d->file,
d->test);
clean_display(d);
exit(1);
}
png_init_io(d->png_ptr, fp);
# ifdef PNG_READ_USER_CHUNKS_SUPPORTED
/* This is only done if requested by the caller; it interferes with the
* standard store/save mechanism.
*/
if (set_callback)
png_set_read_user_chunk_fn(d->png_ptr, d, read_callback);
# else
UNUSED(set_callback)
# endif
/* Handle each argument in turn; multiple settings are possible for the same
* chunk and multiple calls will occur (the last one should override all
* preceding ones).
*/
for (i=0; i<argc; ++i)
{
const char *equals = strchr(argv[i], '=');
if (equals != NULL)
{
int chunk, option;
if (strcmp(equals+1, "default") == 0)
option = PNG_HANDLE_CHUNK_AS_DEFAULT;
else if (strcmp(equals+1, "discard") == 0)
option = PNG_HANDLE_CHUNK_NEVER;
else if (strcmp(equals+1, "if-safe") == 0)
option = PNG_HANDLE_CHUNK_IF_SAFE;
else if (strcmp(equals+1, "save") == 0)
option = PNG_HANDLE_CHUNK_ALWAYS;
else
{
fprintf(stderr, "%s(%s): %s: unrecognized chunk option\n", d->file,
d->test, argv[i]);
display_exit(d);
}
switch (equals - argv[i])
{
case 4: /* chunk name */
chunk = find(argv[i]);
if (chunk >= 0)
{
/* These #if tests have the effect of skipping the arguments
* if SAVE support is unavailable - we can't do a useful test
* in this case, so we just check the arguments! This could
* be improved in the future by using the read callback.
*/
png_byte name[5];
memcpy(name, chunk_info[chunk].name, 5);
png_set_keep_unknown_chunks(d->png_ptr, option, name, 1);
chunk_info[chunk].keep = option;
continue;
}
break;
case 7: /* default */
if (memcmp(argv[i], "default", 7) == 0)
{
png_set_keep_unknown_chunks(d->png_ptr, option, NULL, 0);
d->keep = option;
continue;
}
break;
case 3: /* all */
if (memcmp(argv[i], "all", 3) == 0)
{
png_set_keep_unknown_chunks(d->png_ptr, option, NULL, -1);
d->keep = option;
for (chunk = 0; chunk < NINFO; ++chunk)
if (chunk_info[chunk].all)
chunk_info[chunk].keep = option;
continue;
}
break;
default: /* some misplaced = */
break;
}
}
fprintf(stderr, "%s(%s): %s: unrecognized chunk argument\n", d->file,
d->test, argv[i]);
display_exit(d);
}
png_read_info(d->png_ptr, d->info_ptr);
switch (png_get_interlace_type(d->png_ptr, d->info_ptr))
{
case PNG_INTERLACE_NONE:
npasses = 1;
break;
case PNG_INTERLACE_ADAM7:
npasses = PNG_INTERLACE_ADAM7_PASSES;
break;
default:
/* Hard error because it is not test specific */
fprintf(stderr, "%s(%s): invalid interlace type\n", d->file, d->test);
clean_display(d);
exit(1);
}
/* Skip the image data, if IDAT is not being handled then don't do this
* because it will cause a CRC error.
*/
if (chunk_info[0/*IDAT*/].keep == PNG_HANDLE_CHUNK_AS_DEFAULT)
{
png_start_read_image(d->png_ptr);
height = png_get_image_height(d->png_ptr, d->info_ptr);
if (npasses > 1)
{
png_uint_32 width = png_get_image_width(d->png_ptr, d->info_ptr);
for (ipass=0; ipass<npasses; ++ipass)
{
png_uint_32 wPass = PNG_PASS_COLS(width, ipass);
if (wPass > 0)
{
png_uint_32 y;
for (y=0; y<height; ++y) if (PNG_ROW_IN_INTERLACE_PASS(y, ipass))
png_read_row(d->png_ptr, NULL, NULL);
}
}
} /* interlaced */
else /* not interlaced */
{
png_uint_32 y;
for (y=0; y<height; ++y)
png_read_row(d->png_ptr, NULL, NULL);
}
}
png_read_end(d->png_ptr, d->end_ptr);
flags[0] = get_valid(d, d->info_ptr);
flags[1] = get_unknown(d, d->info_ptr, 0/*before IDAT*/);
/* Only png_read_png sets PNG_INFO_IDAT! */
flags[chunk_info[0/*IDAT*/].keep != PNG_HANDLE_CHUNK_AS_DEFAULT] |=
PNG_INFO_IDAT;
flags[2] = get_valid(d, d->end_ptr);
flags[3] = get_unknown(d, d->end_ptr, 1/*after IDAT*/);
clean_display(d);
return d->keep;
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | check(FILE *fp, int argc, const char **argv, png_uint_32p flags/*out*/,
display *d, int set_callback)
{
int i, npasses, ipass;
png_uint_32 height;
d->keep = PNG_HANDLE_CHUNK_AS_DEFAULT;
d->before_IDAT = 0;
d->after_IDAT = 0;
/* Some of these errors are permanently fatal and cause an exit here, others
* are per-test and cause an error return.
*/
d->png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, d, error,
warning);
if (d->png_ptr == NULL)
{
fprintf(stderr, "%s(%s): could not allocate png struct\n", d->file,
d->test);
/* Terminate here, this error is not test specific. */
exit(1);
}
d->info_ptr = png_create_info_struct(d->png_ptr);
d->end_ptr = png_create_info_struct(d->png_ptr);
if (d->info_ptr == NULL || d->end_ptr == NULL)
{
fprintf(stderr, "%s(%s): could not allocate png info\n", d->file,
d->test);
clean_display(d);
exit(1);
}
png_init_io(d->png_ptr, fp);
# ifdef PNG_READ_USER_CHUNKS_SUPPORTED
/* This is only done if requested by the caller; it interferes with the
* standard store/save mechanism.
*/
if (set_callback)
png_set_read_user_chunk_fn(d->png_ptr, d, read_callback);
# else
UNUSED(set_callback)
# endif
/* Handle each argument in turn; multiple settings are possible for the same
* chunk and multiple calls will occur (the last one should override all
* preceding ones).
*/
for (i=0; i<argc; ++i)
{
const char *equals = strchr(argv[i], '=');
if (equals != NULL)
{
int chunk, option;
if (strcmp(equals+1, "default") == 0)
option = PNG_HANDLE_CHUNK_AS_DEFAULT;
else if (strcmp(equals+1, "discard") == 0)
option = PNG_HANDLE_CHUNK_NEVER;
else if (strcmp(equals+1, "if-safe") == 0)
option = PNG_HANDLE_CHUNK_IF_SAFE;
else if (strcmp(equals+1, "save") == 0)
option = PNG_HANDLE_CHUNK_ALWAYS;
else
{
fprintf(stderr, "%s(%s): %s: unrecognized chunk option\n", d->file,
d->test, argv[i]);
display_exit(d);
}
switch (equals - argv[i])
{
case 4: /* chunk name */
chunk = find(argv[i]);
if (chunk >= 0)
{
/* These #if tests have the effect of skipping the arguments
* if SAVE support is unavailable - we can't do a useful test
* in this case, so we just check the arguments! This could
* be improved in the future by using the read callback.
*/
# if PNG_LIBPNG_VER >= 10700 &&\
!defined(PNG_SAVE_UNKNOWN_CHUNKS_SUPPORTED)
if (option < PNG_HANDLE_CHUNK_IF_SAFE)
# endif /* 1.7+ SAVE_UNKNOWN_CHUNKS */
{
png_byte name[5];
memcpy(name, chunk_info[chunk].name, 5);
png_set_keep_unknown_chunks(d->png_ptr, option, name, 1);
chunk_info[chunk].keep = option;
}
continue;
}
break;
case 7: /* default */
if (memcmp(argv[i], "default", 7) == 0)
{
# if PNG_LIBPNG_VER >= 10700 &&\
!defined(PNG_SAVE_UNKNOWN_CHUNKS_SUPPORTED)
if (option < PNG_HANDLE_CHUNK_IF_SAFE)
# endif /* 1.7+ SAVE_UNKNOWN_CHUNKS */
png_set_keep_unknown_chunks(d->png_ptr, option, NULL, 0);
d->keep = option;
continue;
}
break;
case 3: /* all */
if (memcmp(argv[i], "all", 3) == 0)
{
# if PNG_LIBPNG_VER >= 10700 &&\
!defined(PNG_SAVE_UNKNOWN_CHUNKS_SUPPORTED)
if (option < PNG_HANDLE_CHUNK_IF_SAFE)
# endif /* 1.7+ SAVE_UNKNOWN_CHUNKS */
png_set_keep_unknown_chunks(d->png_ptr, option, NULL, -1);
d->keep = option;
for (chunk = 0; chunk < NINFO; ++chunk)
if (chunk_info[chunk].all)
chunk_info[chunk].keep = option;
continue;
}
break;
default: /* some misplaced = */
break;
}
}
fprintf(stderr, "%s(%s): %s: unrecognized chunk argument\n", d->file,
d->test, argv[i]);
display_exit(d);
}
png_read_info(d->png_ptr, d->info_ptr);
switch (png_get_interlace_type(d->png_ptr, d->info_ptr))
{
case PNG_INTERLACE_NONE:
npasses = 1;
break;
case PNG_INTERLACE_ADAM7:
npasses = PNG_INTERLACE_ADAM7_PASSES;
break;
default:
/* Hard error because it is not test specific */
fprintf(stderr, "%s(%s): invalid interlace type\n", d->file, d->test);
clean_display(d);
exit(1);
}
/* Skip the image data, if IDAT is not being handled then don't do this
* because it will cause a CRC error.
*/
if (chunk_info[0/*IDAT*/].keep == PNG_HANDLE_CHUNK_AS_DEFAULT)
{
png_start_read_image(d->png_ptr);
height = png_get_image_height(d->png_ptr, d->info_ptr);
if (npasses > 1)
{
png_uint_32 width = png_get_image_width(d->png_ptr, d->info_ptr);
for (ipass=0; ipass<npasses; ++ipass)
{
png_uint_32 wPass = PNG_PASS_COLS(width, ipass);
if (wPass > 0)
{
png_uint_32 y;
for (y=0; y<height; ++y) if (PNG_ROW_IN_INTERLACE_PASS(y, ipass))
png_read_row(d->png_ptr, NULL, NULL);
}
}
} /* interlaced */
else /* not interlaced */
{
png_uint_32 y;
for (y=0; y<height; ++y)
png_read_row(d->png_ptr, NULL, NULL);
}
}
png_read_end(d->png_ptr, d->end_ptr);
flags[0] = get_valid(d, d->info_ptr);
flags[1] = get_unknown(d, d->info_ptr, 0/*before IDAT*/);
/* Only png_read_png sets PNG_INFO_IDAT! */
flags[chunk_info[0/*IDAT*/].keep != PNG_HANDLE_CHUNK_AS_DEFAULT] |=
PNG_INFO_IDAT;
flags[2] = get_valid(d, d->end_ptr);
flags[3] = get_unknown(d, d->end_ptr, 1/*after IDAT*/);
clean_display(d);
return d->keep;
}
| 7,379 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int wait_for_key_construction(struct key *key, bool intr)
{
int ret;
ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT,
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret)
return -ERESTARTSYS;
if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
smp_rmb();
return key->reject_error;
}
return key_validate(key);
}
Commit Message: KEYS: Fix race between updating and finding a negative key
Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection
error into one field such that:
(1) The instantiation state can be modified/read atomically.
(2) The error can be accessed atomically with the state.
(3) The error isn't stored unioned with the payload pointers.
This deals with the problem that the state is spread over three different
objects (two bits and a separate variable) and reading or updating them
atomically isn't practical, given that not only can uninstantiated keys
change into instantiated or rejected keys, but rejected keys can also turn
into instantiated keys - and someone accessing the key might not be using
any locking.
The main side effect of this problem is that what was held in the payload
may change, depending on the state. For instance, you might observe the
key to be in the rejected state. You then read the cached error, but if
the key semaphore wasn't locked, the key might've become instantiated
between the two reads - and you might now have something in hand that isn't
actually an error code.
The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error
code if the key is negatively instantiated. The key_is_instantiated()
function is replaced with key_is_positive() to avoid confusion as negative
keys are also 'instantiated'.
Additionally, barriering is included:
(1) Order payload-set before state-set during instantiation.
(2) Order state-read before payload-read when using the key.
Further separate barriering is necessary if RCU is being used to access the
payload content after reading the payload pointers.
Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data")
Cc: [email protected] # v4.4+
Reported-by: Eric Biggers <[email protected]>
Signed-off-by: David Howells <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
CWE ID: CWE-20 | int wait_for_key_construction(struct key *key, bool intr)
{
int ret;
ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT,
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret)
return -ERESTARTSYS;
ret = key_read_state(key);
if (ret < 0)
return ret;
return key_validate(key);
}
| 18,204 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void put_crypt_info(struct fscrypt_info *ci)
{
if (!ci)
return;
key_put(ci->ci_keyring_key);
crypto_free_skcipher(ci->ci_ctfm);
kmem_cache_free(fscrypt_info_cachep, ci);
}
Commit Message: fscrypt: remove broken support for detecting keyring key revocation
Filesystem encryption ostensibly supported revoking a keyring key that
had been used to "unlock" encrypted files, causing those files to become
"locked" again. This was, however, buggy for several reasons, the most
severe of which was that when key revocation happened to be detected for
an inode, its fscrypt_info was immediately freed, even while other
threads could be using it for encryption or decryption concurrently.
This could be exploited to crash the kernel or worse.
This patch fixes the use-after-free by removing the code which detects
the keyring key having been revoked, invalidated, or expired. Instead,
an encrypted inode that is "unlocked" now simply remains unlocked until
it is evicted from memory. Note that this is no worse than the case for
block device-level encryption, e.g. dm-crypt, and it still remains
possible for a privileged user to evict unused pages, inodes, and
dentries by running 'sync; echo 3 > /proc/sys/vm/drop_caches', or by
simply unmounting the filesystem. In fact, one of those actions was
already needed anyway for key revocation to work even somewhat sanely.
This change is not expected to break any applications.
In the future I'd like to implement a real API for fscrypt key
revocation that interacts sanely with ongoing filesystem operations ---
waiting for existing operations to complete and blocking new operations,
and invalidating and sanitizing key material and plaintext from the VFS
caches. But this is a hard problem, and for now this bug must be fixed.
This bug affected almost all versions of ext4, f2fs, and ubifs
encryption, and it was potentially reachable in any kernel configured
with encryption support (CONFIG_EXT4_ENCRYPTION=y,
CONFIG_EXT4_FS_ENCRYPTION=y, CONFIG_F2FS_FS_ENCRYPTION=y, or
CONFIG_UBIFS_FS_ENCRYPTION=y). Note that older kernels did not use the
shared fs/crypto/ code, but due to the potential security implications
of this bug, it may still be worthwhile to backport this fix to them.
Fixes: b7236e21d55f ("ext4 crypto: reorganize how we store keys in the inode")
Cc: [email protected] # v4.2+
Signed-off-by: Eric Biggers <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
Acked-by: Michael Halcrow <[email protected]>
CWE ID: CWE-416 | static void put_crypt_info(struct fscrypt_info *ci)
{
if (!ci)
return;
crypto_free_skcipher(ci->ci_ctfm);
kmem_cache_free(fscrypt_info_cachep, ci);
}
| 25,336 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: PHP_NAMED_FUNCTION(zif_locale_set_default)
{
char* locale_name = NULL;
int len=0;
if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s",
&locale_name ,&len ) == FAILURE)
{
intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR,
"locale_set_default: unable to parse input params", 0 TSRMLS_CC );
RETURN_FALSE;
}
if(len == 0) {
locale_name = (char *)uloc_getDefault() ;
len = strlen(locale_name);
}
zend_alter_ini_entry(LOCALE_INI_NAME, sizeof(LOCALE_INI_NAME), locale_name, len, PHP_INI_USER, PHP_INI_STAGE_RUNTIME);
RETURN_TRUE;
}
Commit Message: Fix bug #72241: get_icu_value_internal out-of-bounds read
CWE ID: CWE-125 | PHP_NAMED_FUNCTION(zif_locale_set_default)
{
char* locale_name = NULL;
int len=0;
if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s",
&locale_name ,&len ) == FAILURE)
{
intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR,
"locale_set_default: unable to parse input params", 0 TSRMLS_CC );
RETURN_FALSE;
}
if(len == 0) {
locale_name = (char *)uloc_getDefault() ;
len = strlen(locale_name);
}
zend_alter_ini_entry(LOCALE_INI_NAME, sizeof(LOCALE_INI_NAME), locale_name, len, PHP_INI_USER, PHP_INI_STAGE_RUNTIME);
RETURN_TRUE;
}
| 15,873 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void NetworkHandler::DeleteCookies(
const std::string& name,
Maybe<std::string> url,
Maybe<std::string> domain,
Maybe<std::string> path,
std::unique_ptr<DeleteCookiesCallback> callback) {
if (!process_) {
callback->sendFailure(Response::InternalError());
return;
}
if (!url.isJust() && !domain.isJust()) {
callback->sendFailure(Response::InvalidParams(
"At least one of the url and domain needs to be specified"));
}
BrowserThread::PostTask(
BrowserThread::IO, FROM_HERE,
base::BindOnce(
&DeleteCookiesOnIO,
base::Unretained(
process_->GetStoragePartition()->GetURLRequestContext()),
name, url.fromMaybe(""), domain.fromMaybe(""), path.fromMaybe(""),
base::BindOnce(&DeleteCookiesCallback::sendSuccess,
std::move(callback))));
}
Commit Message: DevTools: speculative fix for crash in NetworkHandler::Disable
This keeps BrowserContext* and StoragePartition* instead of
RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost
upon closure of DevTools front-end.
Bug: 801117, 783067, 780694
Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b
Reviewed-on: https://chromium-review.googlesource.com/876657
Commit-Queue: Andrey Kosyakov <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#531157}
CWE ID: CWE-20 | void NetworkHandler::DeleteCookies(
const std::string& name,
Maybe<std::string> url,
Maybe<std::string> domain,
Maybe<std::string> path,
std::unique_ptr<DeleteCookiesCallback> callback) {
if (!storage_partition_) {
callback->sendFailure(Response::InternalError());
return;
}
if (!url.isJust() && !domain.isJust()) {
callback->sendFailure(Response::InvalidParams(
"At least one of the url and domain needs to be specified"));
}
BrowserThread::PostTask(
BrowserThread::IO, FROM_HERE,
base::BindOnce(
&DeleteCookiesOnIO,
base::Unretained(storage_partition_->GetURLRequestContext()), name,
url.fromMaybe(""), domain.fromMaybe(""), path.fromMaybe(""),
base::BindOnce(&DeleteCookiesCallback::sendSuccess,
std::move(callback))));
}
| 9,993 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void Rp_test(js_State *J)
{
js_Regexp *re;
const char *text;
int opts;
Resub m;
re = js_toregexp(J, 0);
text = js_tostring(J, 1);
opts = 0;
if (re->flags & JS_REGEXP_G) {
if (re->last > strlen(text)) {
re->last = 0;
js_pushboolean(J, 0);
return;
}
if (re->last > 0) {
text += re->last;
opts |= REG_NOTBOL;
}
}
if (!js_regexec(re->prog, text, &m, opts)) {
if (re->flags & JS_REGEXP_G)
re->last = re->last + (m.sub[0].ep - text);
js_pushboolean(J, 1);
return;
}
if (re->flags & JS_REGEXP_G)
re->last = 0;
js_pushboolean(J, 0);
}
Commit Message: Bug 700937: Limit recursion in regexp matcher.
Also handle negative return code as an error in the JS bindings.
CWE ID: CWE-400 | static void Rp_test(js_State *J)
{
js_Regexp *re;
const char *text;
int result;
int opts;
Resub m;
re = js_toregexp(J, 0);
text = js_tostring(J, 1);
opts = 0;
if (re->flags & JS_REGEXP_G) {
if (re->last > strlen(text)) {
re->last = 0;
js_pushboolean(J, 0);
return;
}
if (re->last > 0) {
text += re->last;
opts |= REG_NOTBOL;
}
}
result = js_regexec(re->prog, text, &m, opts);
if (result < 0)
js_error(J, "regexec failed");
if (result == 0) {
if (re->flags & JS_REGEXP_G)
re->last = re->last + (m.sub[0].ep - text);
js_pushboolean(J, 1);
return;
}
if (re->flags & JS_REGEXP_G)
re->last = 0;
js_pushboolean(J, 0);
}
| 3,430 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void MaybeCreateIBus() {
if (ibus_) {
return;
}
ibus_init();
ibus_ = ibus_bus_new();
if (!ibus_) {
LOG(ERROR) << "ibus_bus_new() failed";
return;
}
ConnectIBusSignals();
ibus_bus_set_watch_dbus_signal(ibus_, TRUE);
ibus_bus_set_watch_ibus_signal(ibus_, TRUE);
if (ibus_bus_is_connected(ibus_)) {
LOG(INFO) << "IBus connection is ready.";
}
}
Commit Message: Remove use of libcros from InputMethodLibrary.
BUG=chromium-os:16238
TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before.
Review URL: http://codereview.chromium.org/7003086
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void MaybeCreateIBus() {
if (ibus_) {
return;
}
ibus_init();
ibus_ = ibus_bus_new();
if (!ibus_) {
LOG(ERROR) << "ibus_bus_new() failed";
return;
}
ConnectIBusSignals();
ibus_bus_set_watch_dbus_signal(ibus_, TRUE);
ibus_bus_set_watch_ibus_signal(ibus_, TRUE);
if (ibus_bus_is_connected(ibus_)) {
VLOG(1) << "IBus connection is ready.";
}
}
| 10,555 |
Subsets and Splits