text
stringlengths 478
483k
|
---|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: pg_open_tzfile(const char *name, char *canonname)
{
const char *fname;
char fullname[MAXPGPATH];
int fullnamelen;
int orignamelen;
/*
* Loop to split the given name into directory levels; for each level,
* search using scan_directory_ci().
*/
strcpy(fullname, pg_TZDIR());
orignamelen = fullnamelen = strlen(fullname);
fname = name;
for (;;)
{
const char *slashptr;
int fnamelen;
slashptr = strchr(fname, '/');
if (slashptr)
fnamelen = slashptr - fname;
else
fnamelen = strlen(fname);
if (fullnamelen + 1 + fnamelen >= MAXPGPATH)
return -1; /* not gonna fit */
if (!scan_directory_ci(fullname, fname, fnamelen,
fullname + fullnamelen + 1,
MAXPGPATH - fullnamelen - 1))
return -1;
fullname[fullnamelen++] = '/';
fullnamelen += strlen(fullname + fullnamelen);
if (slashptr)
fname = slashptr + 1;
else
break;
}
if (canonname)
strlcpy(canonname, fullname + orignamelen + 1, TZ_STRLEN_MAX + 1);
return open(fullname, O_RDONLY | PG_BINARY, 0);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: identify_system_timezone(void)
{
static char resultbuf[TZ_STRLEN_MAX + 1];
time_t tnow;
time_t t;
struct tztry tt;
struct tm *tm;
int thisyear;
int bestscore;
char tmptzdir[MAXPGPATH];
int std_ofs;
char std_zone_name[TZ_STRLEN_MAX + 1],
dst_zone_name[TZ_STRLEN_MAX + 1];
char cbuf[TZ_STRLEN_MAX + 1];
/* Initialize OS timezone library */
tzset();
/*
* Set up the list of dates to be probed to see how well our timezone
* matches the system zone. We first probe January and July of the
* current year; this serves to quickly eliminate the vast majority of the
* TZ database entries. If those dates match, we probe every week for 100
* years backwards from the current July. (Weekly resolution is good
* enough to identify DST transition rules, since everybody switches on
* Sundays.) This is sufficient to cover most of the Unix time_t range,
* and we don't want to look further than that since many systems won't
* have sane TZ behavior further back anyway. The further back the zone
* matches, the better we score it. This may seem like a rather random
* way of doing things, but experience has shown that system-supplied
* timezone definitions are likely to have DST behavior that is right for
* the recent past and not so accurate further back. Scoring in this way
* allows us to recognize zones that have some commonality with the Olson
* database, without insisting on exact match. (Note: we probe Thursdays,
* not Sundays, to avoid triggering DST-transition bugs in localtime
* itself.)
*/
tnow = time(NULL);
tm = localtime(&tnow);
if (!tm)
return NULL; /* give up if localtime is broken... */
thisyear = tm->tm_year + 1900;
t = build_time_t(thisyear, 1, 15);
/*
* Round back to GMT midnight Thursday. This depends on the knowledge
* that the time_t origin is Thu Jan 01 1970. (With a different origin
* we'd be probing some other day of the week, but it wouldn't matter
* anyway unless localtime() had DST-transition bugs.)
*/
t -= (t % T_WEEK);
tt.n_test_times = 0;
tt.test_times[tt.n_test_times++] = t;
t = build_time_t(thisyear, 7, 15);
t -= (t % T_WEEK);
tt.test_times[tt.n_test_times++] = t;
while (tt.n_test_times < MAX_TEST_TIMES)
{
t -= T_WEEK;
tt.test_times[tt.n_test_times++] = t;
}
/* Search for the best-matching timezone file */
strcpy(tmptzdir, pg_TZDIR());
bestscore = -1;
resultbuf[0] = '\0';
scan_available_timezones(tmptzdir, tmptzdir + strlen(tmptzdir) + 1,
&tt,
&bestscore, resultbuf);
if (bestscore > 0)
{
/* Ignore Olson's rather silly "Factory" zone; use GMT instead */
if (strcmp(resultbuf, "Factory") == 0)
return NULL;
return resultbuf;
}
/*
* Couldn't find a match in the database, so next we try constructed zone
* names (like "PST8PDT").
*
* First we need to determine the names of the local standard and daylight
* zones. The idea here is to scan forward from today until we have seen
* both zones, if both are in use.
*/
memset(std_zone_name, 0, sizeof(std_zone_name));
memset(dst_zone_name, 0, sizeof(dst_zone_name));
std_ofs = 0;
tnow = time(NULL);
/*
* Round back to a GMT midnight so results don't depend on local time of
* day
*/
tnow -= (tnow % T_DAY);
/*
* We have to look a little further ahead than one year, in case today is
* just past a DST boundary that falls earlier in the year than the next
* similar boundary. Arbitrarily scan up to 14 months.
*/
for (t = tnow; t <= tnow + T_MONTH * 14; t += T_MONTH)
{
tm = localtime(&t);
if (!tm)
continue;
if (tm->tm_isdst < 0)
continue;
if (tm->tm_isdst == 0 && std_zone_name[0] == '\0')
{
/* found STD zone */
memset(cbuf, 0, sizeof(cbuf));
strftime(cbuf, sizeof(cbuf) - 1, "%Z", tm); /* zone abbr */
strcpy(std_zone_name, cbuf);
std_ofs = get_timezone_offset(tm);
}
if (tm->tm_isdst > 0 && dst_zone_name[0] == '\0')
{
/* found DST zone */
memset(cbuf, 0, sizeof(cbuf));
strftime(cbuf, sizeof(cbuf) - 1, "%Z", tm); /* zone abbr */
strcpy(dst_zone_name, cbuf);
}
/* Done if found both */
if (std_zone_name[0] && dst_zone_name[0])
break;
}
/* We should have found a STD zone name by now... */
if (std_zone_name[0] == '\0')
{
#ifdef DEBUG_IDENTIFY_TIMEZONE
fprintf(stderr, "could not determine system time zone\n");
#endif
return NULL; /* go to GMT */
}
/* If we found DST then try STD<ofs>DST */
if (dst_zone_name[0] != '\0')
{
snprintf(resultbuf, sizeof(resultbuf), "%s%d%s",
std_zone_name, -std_ofs / 3600, dst_zone_name);
if (score_timezone(resultbuf, &tt) > 0)
return resultbuf;
}
/* Try just the STD timezone (works for GMT at least) */
strcpy(resultbuf, std_zone_name);
if (score_timezone(resultbuf, &tt) > 0)
return resultbuf;
/* Try STD<ofs> */
snprintf(resultbuf, sizeof(resultbuf), "%s%d",
std_zone_name, -std_ofs / 3600);
if (score_timezone(resultbuf, &tt) > 0)
return resultbuf;
/*
* Did not find the timezone. Fallback to use a GMT zone. Note that the
* Olson timezone database names the GMT-offset zones in POSIX style: plus
* is west of Greenwich. It's unfortunate that this is opposite of SQL
* conventions. Should we therefore change the names? Probably not...
*/
snprintf(resultbuf, sizeof(resultbuf), "Etc/GMT%s%d",
(-std_ofs > 0) ? "+" : "", -std_ofs / 3600);
#ifdef DEBUG_IDENTIFY_TIMEZONE
fprintf(stderr, "could not recognize system time zone, using \"%s\"\n",
resultbuf);
#endif
return resultbuf;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: ConvertTimeZoneAbbrevs(TimeZoneAbbrevTable *tbl,
struct tzEntry *abbrevs, int n)
{
datetkn *newtbl = tbl->abbrevs;
int i;
tbl->numabbrevs = n;
for (i = 0; i < n; i++)
{
strncpy(newtbl[i].token, abbrevs[i].abbrev, TOKMAXLEN);
newtbl[i].type = abbrevs[i].is_dst ? DTZ : TZ;
TOVAL(&newtbl[i], abbrevs[i].offset / MINS_PER_HOUR);
}
/* Check the ordering, if testing */
Assert(CheckDateTokenTable("timezone offset", newtbl, n));
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: validate_exec(const char *path)
{
struct stat buf;
int is_r;
int is_x;
#ifdef WIN32
char path_exe[MAXPGPATH + sizeof(".exe") - 1];
/* Win32 requires a .exe suffix for stat() */
if (strlen(path) >= strlen(".exe") &&
pg_strcasecmp(path + strlen(path) - strlen(".exe"), ".exe") != 0)
{
strcpy(path_exe, path);
strcat(path_exe, ".exe");
path = path_exe;
}
#endif
/*
* Ensure that the file exists and is a regular file.
*
* XXX if you have a broken system where stat() looks at the symlink
* instead of the underlying file, you lose.
*/
if (stat(path, &buf) < 0)
return -1;
if (!S_ISREG(buf.st_mode))
return -1;
/*
* Ensure that the file is both executable and readable (required for
* dynamic loading).
*/
#ifndef WIN32
is_r = (access(path, R_OK) == 0);
is_x = (access(path, X_OK) == 0);
#else
is_r = buf.st_mode & S_IRUSR;
is_x = buf.st_mode & S_IXUSR;
#endif
return is_x ? (is_r ? 0 : -2) : -1;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: chkpass_eq(PG_FUNCTION_ARGS)
{
chkpass *a1 = (chkpass *) PG_GETARG_POINTER(0);
text *a2 = PG_GETARG_TEXT_PP(1);
char str[9];
text_to_cstring_buffer(a2, str, sizeof(str));
PG_RETURN_BOOL(strcmp(a1->password, crypt(str, a1->password)) == 0);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: recoveryStopsAfter(XLogRecord *record)
{
uint8 record_info;
TimestampTz recordXtime;
record_info = record->xl_info & ~XLR_INFO_MASK;
/*
* There can be many restore points that share the same name; we stop
* at the first one.
*/
if (recoveryTarget == RECOVERY_TARGET_NAME &&
record->xl_rmid == RM_XLOG_ID && record_info == XLOG_RESTORE_POINT)
{
xl_restore_point *recordRestorePointData;
recordRestorePointData = (xl_restore_point *) XLogRecGetData(record);
if (strcmp(recordRestorePointData->rp_name, recoveryTargetName) == 0)
{
recoveryStopAfter = true;
recoveryStopXid = InvalidTransactionId;
(void) getRecordTimestamp(record, &recoveryStopTime);
strncpy(recoveryStopName, recordRestorePointData->rp_name, MAXFNAMELEN);
ereport(LOG,
(errmsg("recovery stopping at restore point \"%s\", time %s",
recoveryStopName,
timestamptz_to_str(recoveryStopTime))));
return true;
}
}
if (record->xl_rmid == RM_XACT_ID &&
(record_info == XLOG_XACT_COMMIT_COMPACT ||
record_info == XLOG_XACT_COMMIT ||
record_info == XLOG_XACT_ABORT))
{
/* Update the last applied transaction timestamp */
if (getRecordTimestamp(record, &recordXtime))
SetLatestXTime(recordXtime);
/*
* There can be only one transaction end record with this exact
* transactionid
*
* when testing for an xid, we MUST test for equality only, since
* transactions are numbered in the order they start, not the order
* they complete. A higher numbered xid will complete before you about
* 50% of the time...
*/
if (recoveryTarget == RECOVERY_TARGET_XID && recoveryTargetInclusive &&
record->xl_xid == recoveryTargetXid)
{
recoveryStopAfter = true;
recoveryStopXid = record->xl_xid;
recoveryStopTime = recordXtime;
recoveryStopName[0] = '\0';
if (record_info == XLOG_XACT_COMMIT_COMPACT || record_info == XLOG_XACT_COMMIT)
{
ereport(LOG,
(errmsg("recovery stopping after commit of transaction %u, time %s",
recoveryStopXid,
timestamptz_to_str(recoveryStopTime))));
}
else if (record_info == XLOG_XACT_ABORT)
{
ereport(LOG,
(errmsg("recovery stopping after abort of transaction %u, time %s",
recoveryStopXid,
timestamptz_to_str(recoveryStopTime))));
}
return true;
}
}
/* Check if we should stop as soon as reaching consistency */
if (recoveryTarget == RECOVERY_TARGET_IMMEDIATE && reachedConsistency)
{
ereport(LOG,
(errmsg("recovery stopping after reaching consistency")));
recoveryStopAfter = true;
recoveryStopXid = InvalidTransactionId;
recoveryStopTime = 0;
recoveryStopName[0] = '\0';
return true;
}
return false;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: XLogRestorePoint(const char *rpName)
{
XLogRecPtr RecPtr;
XLogRecData rdata;
xl_restore_point xlrec;
xlrec.rp_time = GetCurrentTimestamp();
strncpy(xlrec.rp_name, rpName, MAXFNAMELEN);
rdata.buffer = InvalidBuffer;
rdata.data = (char *) &xlrec;
rdata.len = sizeof(xl_restore_point);
rdata.next = NULL;
RecPtr = XLogInsert(RM_XLOG_ID, XLOG_RESTORE_POINT, &rdata);
ereport(LOG,
(errmsg("restore point \"%s\" created at %X/%X",
rpName, (uint32) (RecPtr >> 32), (uint32) RecPtr)));
return RecPtr;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: results_differ(const char *testname, const char *resultsfile, const char *default_expectfile)
{
char expectfile[MAXPGPATH];
char diff[MAXPGPATH];
char cmd[MAXPGPATH * 3];
char best_expect_file[MAXPGPATH];
FILE *difffile;
int best_line_count;
int i;
int l;
const char *platform_expectfile;
/*
* We can pass either the resultsfile or the expectfile, they should have
* the same type (filename.type) anyway.
*/
platform_expectfile = get_expectfile(testname, resultsfile);
strcpy(expectfile, default_expectfile);
if (platform_expectfile)
{
/*
* Replace everything afer the last slash in expectfile with what the
* platform_expectfile contains.
*/
char *p = strrchr(expectfile, '/');
if (p)
strcpy(++p, platform_expectfile);
}
/* Name to use for temporary diff file */
snprintf(diff, sizeof(diff), "%s.diff", resultsfile);
/* OK, run the diff */
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "diff %s \"%s\" \"%s\" > \"%s\"" SYSTEMQUOTE,
basic_diff_opts, expectfile, resultsfile, diff);
/* Is the diff file empty? */
if (run_diff(cmd, diff) == 0)
{
unlink(diff);
return false;
}
/* There may be secondary comparison files that match better */
best_line_count = file_line_count(diff);
strcpy(best_expect_file, expectfile);
for (i = 0; i <= 9; i++)
{
char *alt_expectfile;
alt_expectfile = get_alternative_expectfile(expectfile, i);
if (!file_exists(alt_expectfile))
continue;
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "diff %s \"%s\" \"%s\" > \"%s\"" SYSTEMQUOTE,
basic_diff_opts, alt_expectfile, resultsfile, diff);
if (run_diff(cmd, diff) == 0)
{
unlink(diff);
return false;
}
l = file_line_count(diff);
if (l < best_line_count)
{
/* This diff was a better match than the last one */
best_line_count = l;
strcpy(best_expect_file, alt_expectfile);
}
free(alt_expectfile);
}
/*
* fall back on the canonical results file if we haven't tried it yet and
* haven't found a complete match yet.
*/
if (platform_expectfile)
{
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "diff %s \"%s\" \"%s\" > \"%s\"" SYSTEMQUOTE,
basic_diff_opts, default_expectfile, resultsfile, diff);
if (run_diff(cmd, diff) == 0)
{
/* No diff = no changes = good */
unlink(diff);
return false;
}
l = file_line_count(diff);
if (l < best_line_count)
{
/* This diff was a better match than the last one */
best_line_count = l;
strcpy(best_expect_file, default_expectfile);
}
}
/*
* Use the best comparison file to generate the "pretty" diff, which we
* append to the diffs summary file.
*/
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "diff %s \"%s\" \"%s\" >> \"%s\"" SYSTEMQUOTE,
pretty_diff_opts, best_expect_file, resultsfile, difffilename);
run_diff(cmd, difffilename);
/* And append a separator */
difffile = fopen(difffilename, "a");
if (difffile)
{
fprintf(difffile,
"\n======================================================================\n\n");
fclose(difffile);
}
unlink(diff);
return true;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: resolve_symlinks(char *path)
{
#ifdef HAVE_READLINK
struct stat buf;
char orig_wd[MAXPGPATH],
link_buf[MAXPGPATH];
char *fname;
/*
* To resolve a symlink properly, we have to chdir into its directory and
* then chdir to where the symlink points; otherwise we may fail to
* resolve relative links correctly (consider cases involving mount
* points, for example). After following the final symlink, we use
* getcwd() to figure out where the heck we're at.
*
* One might think we could skip all this if path doesn't point to a
* symlink to start with, but that's wrong. We also want to get rid of
* any directory symlinks that are present in the given path. We expect
* getcwd() to give us an accurate, symlink-free path.
*/
if (!getcwd(orig_wd, MAXPGPATH))
{
log_error(_("could not identify current directory: %s"),
strerror(errno));
return -1;
}
for (;;)
{
char *lsep;
int rllen;
lsep = last_dir_separator(path);
if (lsep)
{
*lsep = '\0';
if (chdir(path) == -1)
{
log_error4(_("could not change directory to \"%s\": %s"), path, strerror(errno));
return -1;
}
fname = lsep + 1;
}
else
fname = path;
if (lstat(fname, &buf) < 0 ||
!S_ISLNK(buf.st_mode))
break;
rllen = readlink(fname, link_buf, sizeof(link_buf));
if (rllen < 0 || rllen >= sizeof(link_buf))
{
log_error(_("could not read symbolic link \"%s\""), fname);
return -1;
}
link_buf[rllen] = '\0';
strcpy(path, link_buf);
}
/* must copy final component out of 'path' temporarily */
strcpy(link_buf, fname);
if (!getcwd(path, MAXPGPATH))
{
log_error(_("could not identify current directory: %s"),
strerror(errno));
return -1;
}
join_path_components(path, path, link_buf);
canonicalize_path(path);
if (chdir(orig_wd) == -1)
{
log_error4(_("could not change directory to \"%s\": %s"), orig_wd, strerror(errno));
return -1;
}
#endif /* HAVE_READLINK */
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: file_softmagic(struct magic_set *ms, const unsigned char *buf, size_t nbytes,
int mode, int text)
{
struct mlist *ml;
int rv, printed_something = 0, need_separator = 0;
for (ml = ms->mlist[0]->next; ml != ms->mlist[0]; ml = ml->next)
if ((rv = match(ms, ml->magic, ml->nmagic, buf, nbytes, 0, mode,
text, 0, 0, &printed_something, &need_separator,
NULL)) != 0)
return rv;
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-755'], 'message': 'count indirect recursion as recursion.'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: file_ascmagic_with_encoding(struct magic_set *ms, const unsigned char *buf,
size_t nbytes, unichar *ubuf, size_t ulen, const char *code,
const char *type, int text)
{
unsigned char *utf8_buf = NULL, *utf8_end;
size_t mlen, i;
int rv = -1;
int mime = ms->flags & MAGIC_MIME;
const char *subtype = NULL;
const char *subtype_mime = NULL;
int has_escapes = 0;
int has_backspace = 0;
int seen_cr = 0;
int n_crlf = 0;
int n_lf = 0;
int n_cr = 0;
int n_nel = 0;
int executable = 0;
size_t last_line_end = (size_t)-1;
int has_long_lines = 0;
if (ms->flags & MAGIC_APPLE)
return 0;
nbytes = trim_nuls(buf, nbytes);
/* If we have fewer than 2 bytes, give up. */
if (nbytes <= 1) {
rv = 0;
goto done;
}
if (ulen > 0 && (ms->flags & MAGIC_NO_CHECK_SOFT) == 0) {
/* Convert ubuf to UTF-8 and try text soft magic */
/* malloc size is a conservative overestimate; could be
improved, or at least realloced after conversion. */
mlen = ulen * 6;
if ((utf8_buf = CAST(unsigned char *, malloc(mlen))) == NULL) {
file_oomem(ms, mlen);
goto done;
}
if ((utf8_end = encode_utf8(utf8_buf, mlen, ubuf, ulen))
== NULL)
goto done;
if ((rv = file_softmagic(ms, utf8_buf,
(size_t)(utf8_end - utf8_buf), TEXTTEST, text)) == 0)
rv = -1;
}
/* Now try to discover other details about the file. */
for (i = 0; i < ulen; i++) {
if (ubuf[i] == '\n') {
if (seen_cr)
n_crlf++;
else
n_lf++;
last_line_end = i;
} else if (seen_cr)
n_cr++;
seen_cr = (ubuf[i] == '\r');
if (seen_cr)
last_line_end = i;
if (ubuf[i] == 0x85) { /* X3.64/ECMA-43 "next line" character */
n_nel++;
last_line_end = i;
}
/* If this line is _longer_ than MAXLINELEN, remember it. */
if (i > last_line_end + MAXLINELEN)
has_long_lines = 1;
if (ubuf[i] == '\033')
has_escapes = 1;
if (ubuf[i] == '\b')
has_backspace = 1;
}
/* Beware, if the data has been truncated, the final CR could have
been followed by a LF. If we have HOWMANY bytes, it indicates
that the data might have been truncated, probably even before
this function was called. */
if (seen_cr && nbytes < HOWMANY)
n_cr++;
if (strcmp(type, "binary") == 0) {
rv = 0;
goto done;
}
if (mime) {
if (!file_printedlen(ms) && (mime & MAGIC_MIME_TYPE) != 0) {
if (subtype_mime) {
if (file_printf(ms, "%s", subtype_mime) == -1)
goto done;
} else {
if (file_printf(ms, "text/plain") == -1)
goto done;
}
}
} else {
if (file_printedlen(ms)) {
switch (file_replace(ms, " text$", ", ")) {
case 0:
switch (file_replace(ms, " text executable$",
", ")) {
case 0:
if (file_printf(ms, ", ") == -1)
goto done;
break;
case -1:
goto done;
default:
executable = 1;
break;
}
break;
case -1:
goto done;
default:
break;
}
}
if (file_printf(ms, "%s", code) == -1)
goto done;
if (subtype) {
if (file_printf(ms, " %s", subtype) == -1)
goto done;
}
if (file_printf(ms, " %s", type) == -1)
goto done;
if (executable)
if (file_printf(ms, " executable") == -1)
goto done;
if (has_long_lines)
if (file_printf(ms, ", with very long lines") == -1)
goto done;
/*
* Only report line terminators if we find one other than LF,
* or if we find none at all.
*/
if ((n_crlf == 0 && n_cr == 0 && n_nel == 0 && n_lf == 0) ||
(n_crlf != 0 || n_cr != 0 || n_nel != 0)) {
if (file_printf(ms, ", with") == -1)
goto done;
if (n_crlf == 0 && n_cr == 0 && n_nel == 0 && n_lf == 0) {
if (file_printf(ms, " no") == -1)
goto done;
} else {
if (n_crlf) {
if (file_printf(ms, " CRLF") == -1)
goto done;
if (n_cr || n_lf || n_nel)
if (file_printf(ms, ",") == -1)
goto done;
}
if (n_cr) {
if (file_printf(ms, " CR") == -1)
goto done;
if (n_lf || n_nel)
if (file_printf(ms, ",") == -1)
goto done;
}
if (n_lf) {
if (file_printf(ms, " LF") == -1)
goto done;
if (n_nel)
if (file_printf(ms, ",") == -1)
goto done;
}
if (n_nel)
if (file_printf(ms, " NEL") == -1)
goto done;
}
if (file_printf(ms, " line terminators") == -1)
goto done;
}
if (has_escapes)
if (file_printf(ms, ", with escape sequences") == -1)
goto done;
if (has_backspace)
if (file_printf(ms, ", with overstriking") == -1)
goto done;
}
rv = 1;
done:
free(utf8_buf);
return rv;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-755'], 'message': 'count indirect recursion as recursion.'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: match(struct magic_set *ms, struct magic *magic, uint32_t nmagic,
const unsigned char *s, size_t nbytes, size_t offset, int mode, int text,
int flip, int *returnval)
{
uint32_t magindex = 0;
unsigned int cont_level = 0;
int need_separator = 0;
int returnvalv = 0, e; /* if a match is found it is set to 1*/
int firstline = 1; /* a flag to print X\n X\n- X */
int printed_something = 0;
int print = (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0;
if (returnval == NULL)
returnval = &returnvalv;
if (file_check_mem(ms, cont_level) == -1)
return -1;
for (magindex = 0; magindex < nmagic; magindex++) {
int flush = 0;
struct magic *m = &magic[magindex];
if (m->type != FILE_NAME)
if ((IS_STRING(m->type) &&
((text && (m->str_flags & (STRING_BINTEST | STRING_TEXTTEST)) == STRING_BINTEST) ||
(!text && (m->str_flags & (STRING_TEXTTEST | STRING_BINTEST)) == STRING_TEXTTEST))) ||
(m->flag & mode) != mode) {
/* Skip sub-tests */
while (magindex + 1 < nmagic &&
magic[magindex + 1].cont_level != 0 &&
++magindex)
continue;
continue; /* Skip to next top-level test*/
}
ms->offset = m->offset;
ms->line = m->lineno;
/* if main entry matches, print it... */
switch (mget(ms, s, m, nbytes, offset, cont_level, mode, text,
flip, returnval)) {
case -1:
return -1;
case 0:
flush = m->reln != '!';
break;
default:
if (m->type == FILE_INDIRECT)
*returnval = 1;
switch (magiccheck(ms, m)) {
case -1:
return -1;
case 0:
flush++;
break;
default:
flush = 0;
break;
}
break;
}
if (flush) {
/*
* main entry didn't match,
* flush its continuations
*/
while (magindex < nmagic - 1 &&
magic[magindex + 1].cont_level != 0)
magindex++;
continue;
}
if ((e = handle_annotation(ms, m)) != 0) {
*returnval = 1;
return e;
}
/*
* If we are going to print something, we'll need to print
* a blank before we print something else.
*/
if (*m->desc) {
need_separator = 1;
printed_something = 1;
if (print_sep(ms, firstline) == -1)
return -1;
}
if (print && mprint(ms, m) == -1)
return -1;
ms->c.li[cont_level].off = moffset(ms, m);
/* and any continuations that match */
if (file_check_mem(ms, ++cont_level) == -1)
return -1;
while (magic[magindex+1].cont_level != 0 &&
++magindex < nmagic) {
m = &magic[magindex];
ms->line = m->lineno; /* for messages */
if (cont_level < m->cont_level)
continue;
if (cont_level > m->cont_level) {
/*
* We're at the end of the level
* "cont_level" continuations.
*/
cont_level = m->cont_level;
}
ms->offset = m->offset;
if (m->flag & OFFADD) {
ms->offset +=
ms->c.li[cont_level - 1].off;
}
#ifdef ENABLE_CONDITIONALS
if (m->cond == COND_ELSE ||
m->cond == COND_ELIF) {
if (ms->c.li[cont_level].last_match == 1)
continue;
}
#endif
switch (mget(ms, s, m, nbytes, offset, cont_level, mode,
text, flip, returnval)) {
case -1:
return -1;
case 0:
if (m->reln != '!')
continue;
flush = 1;
break;
default:
if (m->type == FILE_INDIRECT)
*returnval = 1;
flush = 0;
break;
}
switch (flush ? 1 : magiccheck(ms, m)) {
case -1:
return -1;
case 0:
#ifdef ENABLE_CONDITIONALS
ms->c.li[cont_level].last_match = 0;
#endif
break;
default:
#ifdef ENABLE_CONDITIONALS
ms->c.li[cont_level].last_match = 1;
#endif
if (m->type != FILE_DEFAULT)
ms->c.li[cont_level].got_match = 1;
else if (ms->c.li[cont_level].got_match) {
ms->c.li[cont_level].got_match = 0;
break;
}
if ((e = handle_annotation(ms, m)) != 0) {
*returnval = 1;
return e;
}
/*
* If we are going to print something,
* make sure that we have a separator first.
*/
if (*m->desc) {
if (!printed_something) {
printed_something = 1;
if (print_sep(ms, firstline)
== -1)
return -1;
}
}
/*
* This continuation matched. Print
* its message, with a blank before it
* if the previous item printed and
* this item isn't empty.
*/
/* space if previous printed */
if (need_separator
&& ((m->flag & NOSPACE) == 0)
&& *m->desc) {
if (print &&
file_printf(ms, " ") == -1)
return -1;
need_separator = 0;
}
if (print && mprint(ms, m) == -1)
return -1;
ms->c.li[cont_level].off = moffset(ms, m);
if (*m->desc)
need_separator = 1;
/*
* If we see any continuations
* at a higher level,
* process them.
*/
if (file_check_mem(ms, ++cont_level) == -1)
return -1;
break;
}
}
if (printed_something) {
firstline = 0;
if (print)
*returnval = 1;
}
if ((ms->flags & MAGIC_CONTINUE) == 0 && printed_something) {
return *returnval; /* don't keep searching */
}
}
return *returnval; /* This is hit if -k is set or there is no match */
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-755'], 'message': 'limit recursion level for mget'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code:
private int
mget(struct magic_set *ms, const unsigned char *s, struct magic *m,
size_t nbytes, size_t o, unsigned int cont_level, int mode, int text,
int flip, int *returnval)
{
uint32_t offset = ms->offset;
uint32_t count = m->str_range;
int rv;
char *sbuf, *rbuf;
union VALUETYPE *p = &ms->ms_value;
struct mlist ml;
if (mcopy(ms, p, m->type, m->flag & INDIR, s, (uint32_t)(offset + o),
(uint32_t)nbytes, count) == -1)
return -1;
if ((ms->flags & MAGIC_DEBUG) != 0) {
fprintf(stderr, "mget(type=%d, flag=%x, offset=%u, o=%zu, "
"nbytes=%zu, count=%u)\n", m->type, m->flag, offset, o,
nbytes, count);
mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE));
#ifndef COMPILE_ONLY
file_mdump(m);
#endif
}
if (m->flag & INDIR) {
int off = m->in_offset;
if (m->in_op & FILE_OPINDIRECT) {
const union VALUETYPE *q = CAST(const union VALUETYPE *,
((const void *)(s + offset + off)));
switch (cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
off = q->b;
break;
case FILE_SHORT:
off = q->h;
break;
case FILE_BESHORT:
off = (short)((q->hs[0]<<8)|(q->hs[1]));
break;
case FILE_LESHORT:
off = (short)((q->hs[1]<<8)|(q->hs[0]));
break;
case FILE_LONG:
off = q->l;
break;
case FILE_BELONG:
case FILE_BEID3:
off = (int32_t)((q->hl[0]<<24)|(q->hl[1]<<16)|
(q->hl[2]<<8)|(q->hl[3]));
break;
case FILE_LEID3:
case FILE_LELONG:
off = (int32_t)((q->hl[3]<<24)|(q->hl[2]<<16)|
(q->hl[1]<<8)|(q->hl[0]));
break;
case FILE_MELONG:
off = (int32_t)((q->hl[1]<<24)|(q->hl[0]<<16)|
(q->hl[3]<<8)|(q->hl[2]));
break;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect offs=%u\n", off);
}
switch (cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
if (nbytes < (offset + 1))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->b & off;
break;
case FILE_OPOR:
offset = p->b | off;
break;
case FILE_OPXOR:
offset = p->b ^ off;
break;
case FILE_OPADD:
offset = p->b + off;
break;
case FILE_OPMINUS:
offset = p->b - off;
break;
case FILE_OPMULTIPLY:
offset = p->b * off;
break;
case FILE_OPDIVIDE:
offset = p->b / off;
break;
case FILE_OPMODULO:
offset = p->b % off;
break;
}
} else
offset = p->b;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BESHORT:
if (nbytes < (offset + 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) &
off;
break;
case FILE_OPOR:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) |
off;
break;
case FILE_OPXOR:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) ^
off;
break;
case FILE_OPADD:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) +
off;
break;
case FILE_OPMINUS:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) *
off;
break;
case FILE_OPDIVIDE:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) /
off;
break;
case FILE_OPMODULO:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) %
off;
break;
}
} else
offset = (short)((p->hs[0]<<8)|
(p->hs[1]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LESHORT:
if (nbytes < (offset + 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) &
off;
break;
case FILE_OPOR:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) |
off;
break;
case FILE_OPXOR:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) ^
off;
break;
case FILE_OPADD:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) +
off;
break;
case FILE_OPMINUS:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) *
off;
break;
case FILE_OPDIVIDE:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) /
off;
break;
case FILE_OPMODULO:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) %
off;
break;
}
} else
offset = (short)((p->hs[1]<<8)|
(p->hs[0]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_SHORT:
if (nbytes < (offset + 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->h & off;
break;
case FILE_OPOR:
offset = p->h | off;
break;
case FILE_OPXOR:
offset = p->h ^ off;
break;
case FILE_OPADD:
offset = p->h + off;
break;
case FILE_OPMINUS:
offset = p->h - off;
break;
case FILE_OPMULTIPLY:
offset = p->h * off;
break;
case FILE_OPDIVIDE:
offset = p->h / off;
break;
case FILE_OPMODULO:
offset = p->h % off;
break;
}
}
else
offset = p->h;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BELONG:
case FILE_BEID3:
if (nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LELONG:
case FILE_LEID3:
if (nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_MELONG:
if (nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LONG:
if (nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->l & off;
break;
case FILE_OPOR:
offset = p->l | off;
break;
case FILE_OPXOR:
offset = p->l ^ off;
break;
case FILE_OPADD:
offset = p->l + off;
break;
case FILE_OPMINUS:
offset = p->l - off;
break;
case FILE_OPMULTIPLY:
offset = p->l * off;
break;
case FILE_OPDIVIDE:
offset = p->l / off;
break;
case FILE_OPMODULO:
offset = p->l % off;
break;
}
} else
offset = p->l;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
}
switch (cvt_flip(m->in_type, flip)) {
case FILE_LEID3:
case FILE_BEID3:
offset = ((((offset >> 0) & 0x7f) << 0) |
(((offset >> 8) & 0x7f) << 7) |
(((offset >> 16) & 0x7f) << 14) |
(((offset >> 24) & 0x7f) << 21)) + 10;
break;
default:
break;
}
if (m->flag & INDIROFFADD) {
offset += ms->c.li[cont_level-1].off;
if (offset == 0) {
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr,
"indirect *zero* offset\n");
return 0;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect +offs=%u\n", offset);
}
if (mcopy(ms, p, m->type, 0, s, offset, nbytes, count) == -1)
return -1;
ms->offset = offset;
if ((ms->flags & MAGIC_DEBUG) != 0) {
mdebug(offset, (char *)(void *)p,
sizeof(union VALUETYPE));
#ifndef COMPILE_ONLY
file_mdump(m);
#endif
}
}
/* Verify we have enough data to match magic type */
switch (m->type) {
case FILE_BYTE:
if (nbytes < (offset + 1)) /* should alway be true */
return 0;
break;
case FILE_SHORT:
case FILE_BESHORT:
case FILE_LESHORT:
if (nbytes < (offset + 2))
return 0;
break;
case FILE_LONG:
case FILE_BELONG:
case FILE_LELONG:
case FILE_MELONG:
case FILE_DATE:
case FILE_BEDATE:
case FILE_LEDATE:
case FILE_MEDATE:
case FILE_LDATE:
case FILE_BELDATE:
case FILE_LELDATE:
case FILE_MELDATE:
case FILE_FLOAT:
case FILE_BEFLOAT:
case FILE_LEFLOAT:
if (nbytes < (offset + 4))
return 0;
break;
case FILE_DOUBLE:
case FILE_BEDOUBLE:
case FILE_LEDOUBLE:
if (nbytes < (offset + 8))
return 0;
break;
case FILE_STRING:
case FILE_PSTRING:
case FILE_SEARCH:
if (nbytes < (offset + m->vallen))
return 0;
break;
case FILE_REGEX:
if (nbytes < offset)
return 0;
break;
case FILE_INDIRECT:
if (nbytes < offset)
return 0;
sbuf = ms->o.buf;
ms->o.buf = NULL;
ms->offset = 0;
rv = file_softmagic(ms, s + offset, nbytes - offset,
BINTEST, text);
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect @offs=%u[%d]\n", offset, rv);
if (rv == 1) {
rbuf = ms->o.buf;
ms->o.buf = sbuf;
if ((ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0 &&
file_printf(ms, m->desc, offset) == -1)
return -1;
if (file_printf(ms, "%s", rbuf) == -1)
return -1;
free(rbuf);
} else
ms->o.buf = sbuf;
return rv;
case FILE_USE:
if (nbytes < offset)
return 0;
sbuf = m->value.s;
if (*sbuf == '^') {
sbuf++;
flip = 1;
} else
flip = 0;
if (file_magicfind(ms, sbuf, &ml) == -1) {
file_error(ms, 0, "cannot find entry `%s'", sbuf);
return -1;
}
return match(ms, ml.magic, ml.nmagic, s, nbytes, offset + o,
mode, text, flip, returnval);
case FILE_NAME:
if (file_printf(ms, "%s", m->desc) == -1)
return -1;
return 1;
case FILE_DEFAULT: /* nothing to check */
default:
break;
}
if (!mconvert(ms, m, flip))
return 0; ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-755'], 'message': 'limit recursion level for mget'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
{
VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
int ret, i, link_down;
if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
return -EINVAL;
ret = virtio_load(vdev, f);
if (ret) {
return ret;
}
qemu_get_buffer(f, n->mac, ETH_ALEN);
n->vqs[0].tx_waiting = qemu_get_be32(f);
virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
if (version_id >= 3)
n->status = qemu_get_be16(f);
if (version_id >= 4) {
if (version_id < 8) {
n->promisc = qemu_get_be32(f);
n->allmulti = qemu_get_be32(f);
} else {
n->promisc = qemu_get_byte(f);
n->allmulti = qemu_get_byte(f);
}
}
if (version_id >= 5) {
n->mac_table.in_use = qemu_get_be32(f);
/* MAC_TABLE_ENTRIES may be different from the saved image */
if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
qemu_get_buffer(f, n->mac_table.macs,
n->mac_table.in_use * ETH_ALEN);
} else if (n->mac_table.in_use) {
uint8_t *buf = g_malloc0(n->mac_table.in_use);
qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN);
g_free(buf);
n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
n->mac_table.in_use = 0;
}
}
if (version_id >= 6)
qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
if (version_id >= 7) {
if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
error_report("virtio-net: saved image requires vnet_hdr=on");
return -1;
}
}
if (version_id >= 9) {
n->mac_table.multi_overflow = qemu_get_byte(f);
n->mac_table.uni_overflow = qemu_get_byte(f);
}
if (version_id >= 10) {
n->alluni = qemu_get_byte(f);
n->nomulti = qemu_get_byte(f);
n->nouni = qemu_get_byte(f);
n->nobcast = qemu_get_byte(f);
}
if (version_id >= 11) {
if (qemu_get_byte(f) && !peer_has_ufo(n)) {
error_report("virtio-net: saved image requires TUN_F_UFO support");
return -1;
}
}
if (n->max_queues > 1) {
if (n->max_queues != qemu_get_be16(f)) {
error_report("virtio-net: different max_queues ");
return -1;
}
n->curr_queues = qemu_get_be16(f);
if (n->curr_queues > n->max_queues) {
error_report("virtio-net: curr_queues %x > max_queues %x",
n->curr_queues, n->max_queues);
return -1;
}
for (i = 1; i < n->curr_queues; i++) {
n->vqs[i].tx_waiting = qemu_get_be32(f);
}
}
if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
n->curr_guest_offloads = qemu_get_be64(f);
} else {
n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
}
if (peer_has_vnet_hdr(n)) {
virtio_net_apply_guest_offloads(n);
}
virtio_net_set_queues(n);
/* Find the first multicast entry in the saved MAC filter */
for (i = 0; i < n->mac_table.in_use; i++) {
if (n->mac_table.macs[i * ETH_ALEN] & 1) {
break;
}
}
n->mac_table.first_multi = i;
/* nc.link_down can't be migrated, so infer link_down according
* to link status bit in n->status */
link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
for (i = 0; i < n->max_queues; i++) {
qemu_get_subqueue(n->nic, i)->link_down = link_down;
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'virtio-net: out-of-bounds buffer write on load
CVE-2013-4149 QEMU 1.3.0 out-of-bounds buffer write in
virtio_net_load()@hw/net/virtio-net.c
> } else if (n->mac_table.in_use) {
> uint8_t *buf = g_malloc0(n->mac_table.in_use);
We are allocating buffer of size n->mac_table.in_use
> qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN);
and read to the n->mac_table.in_use size buffer n->mac_table.in_use *
ETH_ALEN bytes, corrupting memory.
If adversary controls state then memory written there is controlled
by adversary.
Reviewed-by: Michael Roth <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Juan Quintela <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int virtio_load(VirtIODevice *vdev, QEMUFile *f)
{
int num, i, ret;
uint32_t features;
uint32_t supported_features;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
if (k->load_config) {
ret = k->load_config(qbus->parent, f);
if (ret)
return ret;
}
qemu_get_8s(f, &vdev->status);
qemu_get_8s(f, &vdev->isr);
qemu_get_be16s(f, &vdev->queue_sel);
qemu_get_be32s(f, &features);
if (virtio_set_features(vdev, features) < 0) {
supported_features = k->get_features(qbus->parent);
error_report("Features 0x%x unsupported. Allowed features: 0x%x",
features, supported_features);
return -1;
}
vdev->config_len = qemu_get_be32(f);
qemu_get_buffer(f, vdev->config, vdev->config_len);
num = qemu_get_be32(f);
for (i = 0; i < num; i++) {
vdev->vq[i].vring.num = qemu_get_be32(f);
if (k->has_variable_vring_alignment) {
vdev->vq[i].vring.align = qemu_get_be32(f);
}
vdev->vq[i].pa = qemu_get_be64(f);
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
if (vdev->vq[i].pa) {
uint16_t nheads;
virtqueue_init(&vdev->vq[i]);
nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
/* Check it isn't doing very strange things with descriptor numbers. */
if (nheads > vdev->vq[i].vring.num) {
error_report("VQ %d size 0x%x Guest index 0x%x "
"inconsistent with Host index 0x%x: delta 0x%x",
i, vdev->vq[i].vring.num,
vring_avail_idx(&vdev->vq[i]),
vdev->vq[i].last_avail_idx, nheads);
return -1;
}
} else if (vdev->vq[i].last_avail_idx) {
error_report("VQ %d address 0x0 "
"inconsistent with Host index 0x%x",
i, vdev->vq[i].last_avail_idx);
return -1;
}
if (k->load_queue) {
ret = k->load_queue(qbus->parent, i, f);
if (ret)
return ret;
}
}
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-94'], 'message': 'virtio: out-of-bounds buffer write on invalid state load
CVE-2013-4151 QEMU 1.0 out-of-bounds buffer write in
virtio_load@hw/virtio/virtio.c
So we have this code since way back when:
num = qemu_get_be32(f);
for (i = 0; i < num; i++) {
vdev->vq[i].vring.num = qemu_get_be32(f);
array of vqs has size VIRTIO_PCI_QUEUE_MAX, so
on invalid input this will write beyond end of buffer.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Michael Roth <[email protected]>
Signed-off-by: Juan Quintela <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int virtio_load(VirtIODevice *vdev, QEMUFile *f)
{
int i, ret;
uint32_t num;
uint32_t features;
uint32_t supported_features;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
if (k->load_config) {
ret = k->load_config(qbus->parent, f);
if (ret)
return ret;
}
qemu_get_8s(f, &vdev->status);
qemu_get_8s(f, &vdev->isr);
qemu_get_be16s(f, &vdev->queue_sel);
qemu_get_be32s(f, &features);
if (virtio_set_features(vdev, features) < 0) {
supported_features = k->get_features(qbus->parent);
error_report("Features 0x%x unsupported. Allowed features: 0x%x",
features, supported_features);
return -1;
}
vdev->config_len = qemu_get_be32(f);
qemu_get_buffer(f, vdev->config, vdev->config_len);
num = qemu_get_be32(f);
if (num > VIRTIO_PCI_QUEUE_MAX) {
error_report("Invalid number of PCI queues: 0x%x", num);
return -1;
}
for (i = 0; i < num; i++) {
vdev->vq[i].vring.num = qemu_get_be32(f);
if (k->has_variable_vring_alignment) {
vdev->vq[i].vring.align = qemu_get_be32(f);
}
vdev->vq[i].pa = qemu_get_be64(f);
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
if (vdev->vq[i].pa) {
uint16_t nheads;
virtqueue_init(&vdev->vq[i]);
nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
/* Check it isn't doing very strange things with descriptor numbers. */
if (nheads > vdev->vq[i].vring.num) {
error_report("VQ %d size 0x%x Guest index 0x%x "
"inconsistent with Host index 0x%x: delta 0x%x",
i, vdev->vq[i].vring.num,
vring_avail_idx(&vdev->vq[i]),
vdev->vq[i].last_avail_idx, nheads);
return -1;
}
} else if (vdev->vq[i].last_avail_idx) {
error_report("VQ %d address 0x0 "
"inconsistent with Host index 0x%x",
i, vdev->vq[i].last_avail_idx);
return -1;
}
if (k->load_queue) {
ret = k->load_queue(qbus->parent, i, f);
if (ret)
return ret;
}
}
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-94'], 'message': 'virtio: avoid buffer overrun on incoming migration
CVE-2013-6399
vdev->queue_sel is read from the wire, and later used in the
emulation code as an index into vdev->vq[]. If the value of
vdev->queue_sel exceeds the length of vdev->vq[], currently
allocated to be VIRTIO_PCI_QUEUE_MAX elements, subsequent PIO
operations such as VIRTIO_PCI_QUEUE_PFN can be used to overrun
the buffer with arbitrary data originating from the source.
Fix this by failing migration if the value from the wire exceeds
VIRTIO_PCI_QUEUE_MAX.
Signed-off-by: Michael Roth <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Peter Maydell <[email protected]>
Signed-off-by: Juan Quintela <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int usb_device_post_load(void *opaque, int version_id)
{
USBDevice *dev = opaque;
if (dev->state == USB_STATE_NOTATTACHED) {
dev->attached = 0;
} else {
dev->attached = 1;
}
if (dev->setup_index >= sizeof(dev->data_buf) ||
dev->setup_len >= sizeof(dev->data_buf)) {
return -EINVAL;
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'usb: sanity check setup_index+setup_len in post_load
CVE-2013-4541
s->setup_len and s->setup_index are fed into usb_packet_copy as
size/offset into s->data_buf, it's possible for invalid state to exploit
this to load arbitrary data.
setup_len and setup_index should be checked to make sure
they are not negative.
Cc: Gerd Hoffmann <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Gerd Hoffmann <[email protected]>
Signed-off-by: Juan Quintela <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int ssd0323_load(QEMUFile *f, void *opaque, int version_id)
{
SSISlave *ss = SSI_SLAVE(opaque);
ssd0323_state *s = (ssd0323_state *)opaque;
int i;
if (version_id != 1)
return -EINVAL;
s->cmd_len = qemu_get_be32(f);
s->cmd = qemu_get_be32(f);
for (i = 0; i < 8; i++)
s->cmd_data[i] = qemu_get_be32(f);
s->row = qemu_get_be32(f);
s->row_start = qemu_get_be32(f);
s->row_end = qemu_get_be32(f);
s->col = qemu_get_be32(f);
s->col_start = qemu_get_be32(f);
s->col_end = qemu_get_be32(f);
s->redraw = qemu_get_be32(f);
s->remap = qemu_get_be32(f);
s->mode = qemu_get_be32(f);
qemu_get_buffer(f, s->framebuffer, sizeof(s->framebuffer));
ss->cs = qemu_get_be32(f);
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'ssd0323: fix buffer overun on invalid state load
CVE-2013-4538
s->cmd_len used as index in ssd0323_transfer() to store 32-bit field.
Possible this field might then be supplied by guest to overwrite a
return addr somewhere. Same for row/col fields, which are indicies into
framebuffer array.
To fix validate after load.
Additionally, validate that the row/col_start/end are within bounds;
otherwise the guest can provoke an overrun by either setting the _end
field so large that the row++ increments just walk off the end of the
array, or by setting the _start value to something bogus and then
letting the "we hit end of row" logic reset row to row_start.
For completeness, validate mode as well.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Peter Maydell <[email protected]>
Signed-off-by: Juan Quintela <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: void virtqueue_map_sg(struct iovec *sg, hwaddr *addr,
size_t num_sg, int is_write)
{
unsigned int i;
hwaddr len;
for (i = 0; i < num_sg; i++) {
len = sg[i].iov_len;
sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
error_report("virtio: trying to map MMIO memory");
exit(1);
}
}
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'virtio: validate num_sg when mapping
CVE-2013-4535
CVE-2013-4536
Both virtio-block and virtio-serial read,
VirtQueueElements are read in as buffers, and passed to
virtqueue_map_sg(), where num_sg is taken from the wire and can force
writes to indicies beyond VIRTQUEUE_MAX_SIZE.
To fix, validate num_sg.
Reported-by: Michael Roth <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Cc: Amit Shah <[email protected]>
Signed-off-by: Juan Quintela <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static void openpic_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *d = SYS_BUS_DEVICE(dev);
OpenPICState *opp = OPENPIC(dev);
int i, j;
int list_count = 0;
static const MemReg list_le[] = {
{"glb", &openpic_glb_ops_le,
OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
{"tmr", &openpic_tmr_ops_le,
OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
{"src", &openpic_src_ops_le,
OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
{"cpu", &openpic_cpu_ops_le,
OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
{NULL}
};
static const MemReg list_be[] = {
{"glb", &openpic_glb_ops_be,
OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
{"tmr", &openpic_tmr_ops_be,
OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
{"src", &openpic_src_ops_be,
OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
{"cpu", &openpic_cpu_ops_be,
OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
{NULL}
};
static const MemReg list_fsl[] = {
{"msi", &openpic_msi_ops_be,
OPENPIC_MSI_REG_START, OPENPIC_MSI_REG_SIZE},
{"summary", &openpic_summary_ops_be,
OPENPIC_SUMMARY_REG_START, OPENPIC_SUMMARY_REG_SIZE},
{NULL}
};
switch (opp->model) {
case OPENPIC_MODEL_FSL_MPIC_20:
default:
opp->fsl = &fsl_mpic_20;
opp->brr1 = 0x00400200;
opp->flags |= OPENPIC_FLAG_IDR_CRIT;
opp->nb_irqs = 80;
opp->mpic_mode_mask = GCR_MODE_MIXED;
fsl_common_init(opp);
map_list(opp, list_be, &list_count);
map_list(opp, list_fsl, &list_count);
break;
case OPENPIC_MODEL_FSL_MPIC_42:
opp->fsl = &fsl_mpic_42;
opp->brr1 = 0x00400402;
opp->flags |= OPENPIC_FLAG_ILR;
opp->nb_irqs = 196;
opp->mpic_mode_mask = GCR_MODE_PROXY;
fsl_common_init(opp);
map_list(opp, list_be, &list_count);
map_list(opp, list_fsl, &list_count);
break;
case OPENPIC_MODEL_RAVEN:
opp->nb_irqs = RAVEN_MAX_EXT;
opp->vid = VID_REVISION_1_3;
opp->vir = VIR_GENERIC;
opp->vector_mask = 0xFF;
opp->tfrr_reset = 4160000;
opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK;
opp->idr_reset = 0;
opp->max_irq = RAVEN_MAX_IRQ;
opp->irq_ipi0 = RAVEN_IPI_IRQ;
opp->irq_tim0 = RAVEN_TMR_IRQ;
opp->brr1 = -1;
opp->mpic_mode_mask = GCR_MODE_MIXED;
if (opp->nb_cpus != 1) {
error_setg(errp, "Only UP supported today");
return;
}
map_list(opp, list_le, &list_count);
break;
}
for (i = 0; i < opp->nb_cpus; i++) {
opp->dst[i].irqs = g_new(qemu_irq, OPENPIC_OUTPUT_NB);
for (j = 0; j < OPENPIC_OUTPUT_NB; j++) {
sysbus_init_irq(d, &opp->dst[i].irqs[j]);
}
}
register_savevm(dev, "openpic", 0, 2,
openpic_save, openpic_load, opp);
sysbus_init_mmio(d, &opp->mem);
qdev_init_gpio_in(dev, openpic_set_irq, opp->max_irq);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'openpic: avoid buffer overrun on incoming migration
CVE-2013-4534
opp->nb_cpus is read from the wire and used to determine how many
IRQDest elements to read into opp->dst[]. If the value exceeds the
length of opp->dst[], MAX_CPU, opp->dst[] can be overrun with arbitrary
data from the wire.
Fix this by failing migration if the value read from the wire exceeds
MAX_CPU.
Signed-off-by: Michael Roth <[email protected]>
Reviewed-by: Alexander Graf <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Juan Quintela <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: _gnutls_x509_verify_certificate (const gnutls_x509_crt_t * certificate_list,
int clist_size,
const gnutls_x509_crt_t * trusted_cas,
int tcas_size,
const gnutls_x509_crl_t * CRLs,
int crls_size, unsigned int flags)
{
int i = 0, ret;
unsigned int status = 0, output;
if (clist_size > 1)
{
/* Check if the last certificate in the path is self signed.
* In that case ignore it (a certificate is trusted only if it
* leads to a trusted party by us, not the server's).
*
* This prevents from verifying self signed certificates against
* themselves. This (although not bad) caused verification
* failures on some root self signed certificates that use the
* MD2 algorithm.
*/
if (gnutls_x509_crt_check_issuer (certificate_list[clist_size - 1],
certificate_list[clist_size - 1]) > 0)
{
clist_size--;
}
}
/* We want to shorten the chain by removing the cert that matches
* one of the certs we trust and all the certs after that i.e. if
* cert chain is A signed-by B signed-by C signed-by D (signed-by
* self-signed E but already removed above), and we trust B, remove
* B, C and D. We must leave the first cert on chain. */
if (clist_size > 1 && !(flags & GNUTLS_VERIFY_DO_NOT_ALLOW_SAME))
{
for (i = 1; i < clist_size; i++)
{
int j;
for (j = 0; j < tcas_size; j++)
{
if (check_if_same_cert (certificate_list[i],
trusted_cas[j]) == 0)
{
clist_size = i;
break;
}
}
/* clist_size may have been changed which gets out of loop */
}
}
/* Verify the last certificate in the certificate path
* against the trusted CA certificate list.
*
* If no CAs are present returns CERT_INVALID. Thus works
* in self signed etc certificates.
*/
ret = _gnutls_verify_certificate2 (certificate_list[clist_size - 1],
trusted_cas, tcas_size, flags, &output);
if (ret == 0)
{
/* if the last certificate in the certificate
* list is invalid, then the certificate is not
* trusted.
*/
gnutls_assert ();
status |= output;
status |= GNUTLS_CERT_INVALID;
return status;
}
/* Check for revoked certificates in the chain
*/
#ifdef ENABLE_PKI
for (i = 0; i < clist_size; i++)
{
ret = gnutls_x509_crt_check_revocation (certificate_list[i],
CRLs, crls_size);
if (ret == 1)
{ /* revoked */
status |= GNUTLS_CERT_REVOKED;
status |= GNUTLS_CERT_INVALID;
return status;
}
}
#endif
/* Verify the certificate path (chain)
*/
for (i = clist_size - 1; i > 0; i--)
{
if (i - 1 < 0)
break;
/* note that here we disable this V1 CA flag. So that no version 1
* certificates can exist in a supplied chain.
*/
if (!(flags & GNUTLS_VERIFY_ALLOW_ANY_X509_V1_CA_CRT))
flags ^= GNUTLS_VERIFY_ALLOW_X509_V1_CA_CRT;
if ((ret =
_gnutls_verify_certificate2 (certificate_list[i - 1],
&certificate_list[i], 1, flags,
NULL)) == 0)
{
status |= GNUTLS_CERT_INVALID;
return status;
}
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'Corrected bit disable (was flipping instead).
Initialy reported by Daniel Kahn Gillmor on 9/1/2008. Many thanks to
[email protected] for bringing this into my attention.'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
struct inet_frag_queue *qp_in, struct inet_frags *f,
void *arg)
{
struct inet_frag_bucket *hb;
struct inet_frag_queue *qp;
unsigned int hash;
read_lock(&f->lock); /* Protects against hash rebuild */
/*
* While we stayed w/o the lock other CPU could update
* the rnd seed, so we need to re-calculate the hash
* chain. Fortunatelly the qp_in can be used to get one.
*/
hash = f->hashfn(qp_in);
hb = &f->hash[hash];
spin_lock(&hb->chain_lock);
#ifdef CONFIG_SMP
/* With SMP race we have to recheck hash table, because
* such entry could be created on other cpu, while we
* released the hash bucket lock.
*/
hlist_for_each_entry(qp, &hb->chain, list) {
if (qp->net == nf && f->match(qp, arg)) {
atomic_inc(&qp->refcnt);
spin_unlock(&hb->chain_lock);
read_unlock(&f->lock);
qp_in->last_in |= INET_FRAG_COMPLETE;
inet_frag_put(qp_in, f);
return qp;
}
}
#endif
qp = qp_in;
if (!mod_timer(&qp->timer, jiffies + nf->timeout))
atomic_inc(&qp->refcnt);
atomic_inc(&qp->refcnt);
hlist_add_head(&qp->list, &hb->chain);
spin_unlock(&hb->chain_lock);
read_unlock(&f->lock);
inet_frag_lru_add(nf, qp);
return qp;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-362'], 'message': 'net: fix for a race condition in the inet frag code
I stumbled upon this very serious bug while hunting for another one,
it's a very subtle race condition between inet_frag_evictor,
inet_frag_intern and the IPv4/6 frag_queue and expire functions
(basically the users of inet_frag_kill/inet_frag_put).
What happens is that after a fragment has been added to the hash chain
but before it's been added to the lru_list (inet_frag_lru_add) in
inet_frag_intern, it may get deleted (either by an expired timer if
the system load is high or the timer sufficiently low, or by the
fraq_queue function for different reasons) before it's added to the
lru_list, then after it gets added it's a matter of time for the
evictor to get to a piece of memory which has been freed leading to a
number of different bugs depending on what's left there.
I've been able to trigger this on both IPv4 and IPv6 (which is normal
as the frag code is the same), but it's been much more difficult to
trigger on IPv4 due to the protocol differences about how fragments
are treated.
The setup I used to reproduce this is: 2 machines with 4 x 10G bonded
in a RR bond, so the same flow can be seen on multiple cards at the
same time. Then I used multiple instances of ping/ping6 to generate
fragmented packets and flood the machines with them while running
other processes to load the attacked machine.
*It is very important to have the _same flow_ coming in on multiple CPUs
concurrently. Usually the attacked machine would die in less than 30
minutes, if configured properly to have many evictor calls and timeouts
it could happen in 10 minutes or so.
An important point to make is that any caller (frag_queue or timer) of
inet_frag_kill will remove both the timer refcount and the
original/guarding refcount thus removing everything that's keeping the
frag from being freed at the next inet_frag_put. All of this could
happen before the frag was ever added to the LRU list, then it gets
added and the evictor uses a freed fragment.
An example for IPv6 would be if a fragment is being added and is at
the stage of being inserted in the hash after the hash lock is
released, but before inet_frag_lru_add executes (or is able to obtain
the lru lock) another overlapping fragment for the same flow arrives
at a different CPU which finds it in the hash, but since it's
overlapping it drops it invoking inet_frag_kill and thus removing all
guarding refcounts, and afterwards freeing it by invoking
inet_frag_put which removes the last refcount added previously by
inet_frag_find, then inet_frag_lru_add gets executed by
inet_frag_intern and we have a freed fragment in the lru_list.
The fix is simple, just move the lru_add under the hash chain locked
region so when a removing function is called it'll have to wait for
the fragment to be added to the lru_list, and then it'll remove it (it
works because the hash chain removal is done before the lru_list one
and there's no window between the two list adds when the frag can get
dropped). With this fix applied I couldn't kill the same machine in 24
hours with the same setup.
Fixes: 3ef0eb0db4bf ("net: frag, move LRU list maintenance outside of
rwlock")
CC: Florian Westphal <[email protected]>
CC: Jesper Dangaard Brouer <[email protected]>
CC: David S. Miller <[email protected]>
Signed-off-by: Nikolay Aleksandrov <[email protected]>
Acked-by: Jesper Dangaard Brouer <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int ssh_bind_accept_fd(ssh_bind sshbind, ssh_session session, socket_t fd){
int i, rc;
if (session == NULL){
ssh_set_error(sshbind, SSH_FATAL,"session is null");
return SSH_ERROR;
}
session->server = 1;
session->version = 2;
/* copy options */
for (i = 0; i < 10; ++i) {
if (sshbind->wanted_methods[i]) {
session->opts.wanted_methods[i] = strdup(sshbind->wanted_methods[i]);
if (session->opts.wanted_methods[i] == NULL) {
return SSH_ERROR;
}
}
}
if (sshbind->bindaddr == NULL)
session->opts.bindaddr = NULL;
else {
SAFE_FREE(session->opts.bindaddr);
session->opts.bindaddr = strdup(sshbind->bindaddr);
if (session->opts.bindaddr == NULL) {
return SSH_ERROR;
}
}
session->common.log_verbosity = sshbind->common.log_verbosity;
if(sshbind->banner != NULL)
session->opts.custombanner = strdup(sshbind->banner);
ssh_socket_free(session->socket);
session->socket = ssh_socket_new(session);
if (session->socket == NULL) {
/* perhaps it may be better to copy the error from session to sshbind */
ssh_set_error_oom(sshbind);
return SSH_ERROR;
}
ssh_socket_set_fd(session->socket, fd);
ssh_socket_get_poll_handle_out(session->socket);
/* We must try to import any keys that could be imported in case
* we are not using ssh_bind_listen (which is the other place
* where keys can be imported) on this ssh_bind and are instead
* only using ssh_bind_accept_fd to manage sockets ourselves.
*/
rc = ssh_bind_import_keys(sshbind);
if (rc != SSH_OK) {
return SSH_ERROR;
}
#ifdef HAVE_ECC
if (sshbind->ecdsa) {
session->srv.ecdsa_key = ssh_key_dup(sshbind->ecdsa);
if (session->srv.ecdsa_key == NULL) {
ssh_set_error_oom(sshbind);
return SSH_ERROR;
}
}
#endif
if (sshbind->dsa) {
session->srv.dsa_key = ssh_key_dup(sshbind->dsa);
if (session->srv.dsa_key == NULL) {
ssh_set_error_oom(sshbind);
return SSH_ERROR;
}
}
if (sshbind->rsa) {
session->srv.rsa_key = ssh_key_dup(sshbind->rsa);
if (session->srv.rsa_key == NULL) {
ssh_set_error_oom(sshbind);
return SSH_ERROR;
}
}
return SSH_OK;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-310'], 'message': 'security: fix for vulnerability CVE-2014-0017
When accepting a new connection, a forking server based on libssh forks
and the child process handles the request. The RAND_bytes() function of
openssl doesn't reset its state after the fork, but simply adds the
current process id (getpid) to the PRNG state, which is not guaranteed
to be unique.
This can cause several children to end up with same PRNG state which is
a security issue.'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code:
private int
mget(struct magic_set *ms, const unsigned char *s, struct magic *m,
size_t nbytes, size_t o, unsigned int cont_level, int mode, int text,
int flip, int recursion_level, int *printed_something,
int *need_separator, int *returnval)
{
uint32_t soffset, offset = ms->offset;
uint32_t count = m->str_range;
int rv, oneed_separator;
char *sbuf, *rbuf;
union VALUETYPE *p = &ms->ms_value;
struct mlist ml;
if (recursion_level >= 20) {
file_error(ms, 0, "recursion nesting exceeded");
return -1;
}
if (mcopy(ms, p, m->type, m->flag & INDIR, s, (uint32_t)(offset + o),
(uint32_t)nbytes, count) == -1)
return -1;
if ((ms->flags & MAGIC_DEBUG) != 0) {
fprintf(stderr, "mget(type=%d, flag=%x, offset=%u, o=%zu, "
"nbytes=%zu, count=%u)\n", m->type, m->flag, offset, o,
nbytes, count);
mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE));
}
if (m->flag & INDIR) {
int off = m->in_offset;
if (m->in_op & FILE_OPINDIRECT) {
const union VALUETYPE *q = CAST(const union VALUETYPE *,
((const void *)(s + offset + off)));
switch (cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
off = q->b;
break;
case FILE_SHORT:
off = q->h;
break;
case FILE_BESHORT:
off = (short)((q->hs[0]<<8)|(q->hs[1]));
break;
case FILE_LESHORT:
off = (short)((q->hs[1]<<8)|(q->hs[0]));
break;
case FILE_LONG:
off = q->l;
break;
case FILE_BELONG:
case FILE_BEID3:
off = (int32_t)((q->hl[0]<<24)|(q->hl[1]<<16)|
(q->hl[2]<<8)|(q->hl[3]));
break;
case FILE_LEID3:
case FILE_LELONG:
off = (int32_t)((q->hl[3]<<24)|(q->hl[2]<<16)|
(q->hl[1]<<8)|(q->hl[0]));
break;
case FILE_MELONG:
off = (int32_t)((q->hl[1]<<24)|(q->hl[0]<<16)|
(q->hl[3]<<8)|(q->hl[2]));
break;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect offs=%u\n", off);
}
switch (cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
if (nbytes < (offset + 1))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->b & off;
break;
case FILE_OPOR:
offset = p->b | off;
break;
case FILE_OPXOR:
offset = p->b ^ off;
break;
case FILE_OPADD:
offset = p->b + off;
break;
case FILE_OPMINUS:
offset = p->b - off;
break;
case FILE_OPMULTIPLY:
offset = p->b * off;
break;
case FILE_OPDIVIDE:
offset = p->b / off;
break;
case FILE_OPMODULO:
offset = p->b % off;
break;
}
} else
offset = p->b;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BESHORT:
if (nbytes < (offset + 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) &
off;
break;
case FILE_OPOR:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) |
off;
break;
case FILE_OPXOR:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) ^
off;
break;
case FILE_OPADD:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) +
off;
break;
case FILE_OPMINUS:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) *
off;
break;
case FILE_OPDIVIDE:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) /
off;
break;
case FILE_OPMODULO:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) %
off;
break;
}
} else
offset = (short)((p->hs[0]<<8)|
(p->hs[1]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LESHORT:
if (nbytes < (offset + 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) &
off;
break;
case FILE_OPOR:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) |
off;
break;
case FILE_OPXOR:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) ^
off;
break;
case FILE_OPADD:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) +
off;
break;
case FILE_OPMINUS:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) *
off;
break;
case FILE_OPDIVIDE:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) /
off;
break;
case FILE_OPMODULO:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) %
off;
break;
}
} else
offset = (short)((p->hs[1]<<8)|
(p->hs[0]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_SHORT:
if (nbytes < (offset + 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->h & off;
break;
case FILE_OPOR:
offset = p->h | off;
break;
case FILE_OPXOR:
offset = p->h ^ off;
break;
case FILE_OPADD:
offset = p->h + off;
break;
case FILE_OPMINUS:
offset = p->h - off;
break;
case FILE_OPMULTIPLY:
offset = p->h * off;
break;
case FILE_OPDIVIDE:
offset = p->h / off;
break;
case FILE_OPMODULO:
offset = p->h % off;
break;
}
}
else
offset = p->h;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BELONG:
case FILE_BEID3:
if (nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LELONG:
case FILE_LEID3:
if (nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_MELONG:
if (nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LONG:
if (nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->l & off;
break;
case FILE_OPOR:
offset = p->l | off;
break;
case FILE_OPXOR:
offset = p->l ^ off;
break;
case FILE_OPADD:
offset = p->l + off;
break;
case FILE_OPMINUS:
offset = p->l - off;
break;
case FILE_OPMULTIPLY:
offset = p->l * off;
break;
case FILE_OPDIVIDE:
offset = p->l / off;
break;
case FILE_OPMODULO:
offset = p->l % off;
break;
}
} else
offset = p->l;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
}
switch (cvt_flip(m->in_type, flip)) {
case FILE_LEID3:
case FILE_BEID3:
offset = ((((offset >> 0) & 0x7f) << 0) |
(((offset >> 8) & 0x7f) << 7) |
(((offset >> 16) & 0x7f) << 14) |
(((offset >> 24) & 0x7f) << 21)) + 10;
break;
default:
break;
}
if (m->flag & INDIROFFADD) {
offset += ms->c.li[cont_level-1].off;
if (offset == 0) {
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr,
"indirect *zero* offset\n");
return 0;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect +offs=%u\n", offset);
}
if (mcopy(ms, p, m->type, 0, s, offset, nbytes, count) == -1)
return -1;
ms->offset = offset;
if ((ms->flags & MAGIC_DEBUG) != 0) {
mdebug(offset, (char *)(void *)p,
sizeof(union VALUETYPE));
}
}
/* Verify we have enough data to match magic type */
switch (m->type) {
case FILE_BYTE:
if (nbytes < (offset + 1)) /* should always be true */
return 0;
break;
case FILE_SHORT:
case FILE_BESHORT:
case FILE_LESHORT:
if (nbytes < (offset + 2))
return 0;
break;
case FILE_LONG:
case FILE_BELONG:
case FILE_LELONG:
case FILE_MELONG:
case FILE_DATE:
case FILE_BEDATE:
case FILE_LEDATE:
case FILE_MEDATE:
case FILE_LDATE:
case FILE_BELDATE:
case FILE_LELDATE:
case FILE_MELDATE:
case FILE_FLOAT:
case FILE_BEFLOAT:
case FILE_LEFLOAT:
if (nbytes < (offset + 4))
return 0;
break;
case FILE_DOUBLE:
case FILE_BEDOUBLE:
case FILE_LEDOUBLE:
if (nbytes < (offset + 8))
return 0;
break;
case FILE_STRING:
case FILE_PSTRING:
case FILE_SEARCH:
if (nbytes < (offset + m->vallen))
return 0;
break;
case FILE_REGEX:
if (nbytes < offset)
return 0;
break;
case FILE_INDIRECT:
if (offset == 0)
return 0;
if (nbytes < offset)
return 0;
sbuf = ms->o.buf;
soffset = ms->offset;
ms->o.buf = NULL;
ms->offset = 0;
rv = file_softmagic(ms, s + offset, nbytes - offset,
recursion_level, BINTEST, text);
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect @offs=%u[%d]\n", offset, rv);
rbuf = ms->o.buf;
ms->o.buf = sbuf;
ms->offset = soffset;
if (rv == 1) {
if ((ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0 &&
file_printf(ms, m->desc, offset) == -1) {
if (rbuf) {
efree(rbuf);
}
return -1;
}
if (file_printf(ms, "%s", rbuf) == -1) {
if (rbuf) {
efree(rbuf);
}
return -1;
}
}
if (rbuf) {
efree(rbuf);
}
return rv;
case FILE_USE:
if (nbytes < offset)
return 0;
sbuf = m->value.s;
if (*sbuf == '^') {
sbuf++;
flip = !flip;
}
if (file_magicfind(ms, sbuf, &ml) == -1) {
file_error(ms, 0, "cannot find entry `%s'", sbuf);
return -1;
}
oneed_separator = *need_separator;
if (m->flag & NOSPACE)
*need_separator = 0;
rv = match(ms, ml.magic, ml.nmagic, s, nbytes, offset + o,
mode, text, flip, recursion_level, printed_something,
need_separator, returnval);
if (rv != 1)
*need_separator = oneed_separator;
return rv;
case FILE_NAME:
if (file_printf(ms, "%s", m->desc) == -1)
return -1;
return 1;
case FILE_DEFAULT: /* nothing to check */
default:
break;
}
if (!mconvert(ms, m, flip))
return 0; ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'Fixed Bug #66820 out-of-bounds memory access in fileinfo
Upstream fix:
https://github.com/glensc/file/commit/447558595a3650db2886cd2f416ad0beba965801
Notice, test changed, with upstream agreement:
-define OFFSET_OOB(n, o, i) ((n) < (o) || (i) >= ((n) - (o)))
+define OFFSET_OOB(n, o, i) ((n) < (o) || (i) > ((n) - (o)))'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct dccp_net *dn;
struct dccp_hdr _dh, *dh;
const char *msg;
u_int8_t state;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
BUG_ON(dh == NULL);
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
dn = dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "nf_ct_dccp: not picking up existing connection ";
goto out_invalid;
}
case CT_DCCP_REQUEST:
break;
case CT_DCCP_INVALID:
msg = "nf_ct_dccp: invalid state transition ";
goto out_invalid;
}
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.state = CT_DCCP_NONE;
ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
ct->proto.dccp.handshake_seq = 0;
return true;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
NULL, "%s", msg);
return false;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'netfilter: nf_conntrack_dccp: fix skb_header_pointer API usages
Some occurences in the netfilter tree use skb_header_pointer() in
the following way ...
struct dccp_hdr _dh, *dh;
...
skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
... where dh itself is a pointer that is being passed as the copy
buffer. Instead, we need to use &_dh as the forth argument so that
we're copying the data into an actual buffer that sits on the stack.
Currently, we probably could overwrite memory on the stack (e.g.
with a possibly mal-formed DCCP packet), but unintentionally, as
we only want the buffer to be placed into _dh variable.
Fixes: 2bc780499aa3 ("[NETFILTER]: nf_conntrack: add DCCP protocol support")
Signed-off-by: Daniel Borkmann <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int dccp_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb, unsigned int dataoff,
enum ip_conntrack_info *ctinfo,
u_int8_t pf, unsigned int hooknum)
{
struct dccp_hdr _dh, *dh;
unsigned int dccp_len = skb->len - dataoff;
unsigned int cscov;
const char *msg;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
if (dh == NULL) {
msg = "nf_ct_dccp: short packet ";
goto out_invalid;
}
if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
dh->dccph_doff * 4 > dccp_len) {
msg = "nf_ct_dccp: truncated/malformed packet ";
goto out_invalid;
}
cscov = dccp_len;
if (dh->dccph_cscov) {
cscov = (dh->dccph_cscov - 1) * 4;
if (cscov > dccp_len) {
msg = "nf_ct_dccp: bad checksum coverage ";
goto out_invalid;
}
}
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP,
pf)) {
msg = "nf_ct_dccp: bad checksum ";
goto out_invalid;
}
if (dh->dccph_type >= DCCP_PKT_INVALID) {
msg = "nf_ct_dccp: reserved packet type ";
goto out_invalid;
}
return NF_ACCEPT;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
return -NF_ACCEPT;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'netfilter: nf_conntrack_dccp: fix skb_header_pointer API usages
Some occurences in the netfilter tree use skb_header_pointer() in
the following way ...
struct dccp_hdr _dh, *dh;
...
skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
... where dh itself is a pointer that is being passed as the copy
buffer. Instead, we need to use &_dh as the forth argument so that
we're copying the data into an actual buffer that sits on the stack.
Currently, we probably could overwrite memory on the stack (e.g.
with a possibly mal-formed DCCP packet), but unintentionally, as
we only want the buffer to be placed into _dh variable.
Fixes: 2bc780499aa3 ("[NETFILTER]: nf_conntrack: add DCCP protocol support")
Signed-off-by: Daniel Borkmann <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVBochsState *s = bs->opaque;
uint32_t i;
struct bochs_header bochs;
int ret;
bs->read_only = 1; // no write support yet
ret = bdrv_pread(bs->file, 0, &bochs, sizeof(bochs));
if (ret < 0) {
return ret;
}
if (strcmp(bochs.magic, HEADER_MAGIC) ||
strcmp(bochs.type, REDOLOG_TYPE) ||
strcmp(bochs.subtype, GROWING_TYPE) ||
((le32_to_cpu(bochs.version) != HEADER_VERSION) &&
(le32_to_cpu(bochs.version) != HEADER_V1))) {
error_setg(errp, "Image not in Bochs format");
return -EINVAL;
}
if (le32_to_cpu(bochs.version) == HEADER_V1) {
bs->total_sectors = le64_to_cpu(bochs.extra.redolog_v1.disk) / 512;
} else {
bs->total_sectors = le64_to_cpu(bochs.extra.redolog.disk) / 512;
}
/* Limit to 1M entries to avoid unbounded allocation. This is what is
* needed for the largest image that bximage can create (~8 TB). */
s->catalog_size = le32_to_cpu(bochs.catalog);
if (s->catalog_size > 0x100000) {
error_setg(errp, "Catalog size is too large");
return -EFBIG;
}
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
ret = bdrv_pread(bs->file, le32_to_cpu(bochs.header), s->catalog_bitmap,
s->catalog_size * 4);
if (ret < 0) {
goto fail;
}
for (i = 0; i < s->catalog_size; i++)
le32_to_cpus(&s->catalog_bitmap[i]);
s->data_offset = le32_to_cpu(bochs.header) + (s->catalog_size * 4);
s->bitmap_blocks = 1 + (le32_to_cpu(bochs.bitmap) - 1) / 512;
s->extent_blocks = 1 + (le32_to_cpu(bochs.extent) - 1) / 512;
s->extent_size = le32_to_cpu(bochs.extent);
if (s->catalog_size < bs->total_sectors / s->extent_size) {
error_setg(errp, "Catalog size is too small for this disk size");
ret = -EINVAL;
goto fail;
}
qemu_co_mutex_init(&s->lock);
return 0;
fail:
g_free(s->catalog_bitmap);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-369'], 'message': 'bochs: Check extent_size header field (CVE-2014-0142)
This fixes two possible division by zero crashes: In bochs_open() and in
seek_to_sector().
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVQcowState *s = bs->opaque;
unsigned int len, i;
int ret = 0;
QCowHeader header;
QemuOpts *opts;
Error *local_err = NULL;
uint64_t ext_end;
uint64_t l1_vm_state_index;
const char *opt_overlap_check;
int overlap_check_template = 0;
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read qcow2 header");
goto fail;
}
be32_to_cpus(&header.magic);
be32_to_cpus(&header.version);
be64_to_cpus(&header.backing_file_offset);
be32_to_cpus(&header.backing_file_size);
be64_to_cpus(&header.size);
be32_to_cpus(&header.cluster_bits);
be32_to_cpus(&header.crypt_method);
be64_to_cpus(&header.l1_table_offset);
be32_to_cpus(&header.l1_size);
be64_to_cpus(&header.refcount_table_offset);
be32_to_cpus(&header.refcount_table_clusters);
be64_to_cpus(&header.snapshots_offset);
be32_to_cpus(&header.nb_snapshots);
if (header.magic != QCOW_MAGIC) {
error_setg(errp, "Image is not in qcow2 format");
ret = -EINVAL;
goto fail;
}
if (header.version < 2 || header.version > 3) {
report_unsupported(bs, errp, "QCOW version %d", header.version);
ret = -ENOTSUP;
goto fail;
}
s->qcow_version = header.version;
/* Initialise cluster size */
if (header.cluster_bits < MIN_CLUSTER_BITS ||
header.cluster_bits > MAX_CLUSTER_BITS) {
error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);
ret = -EINVAL;
goto fail;
}
s->cluster_bits = header.cluster_bits;
s->cluster_size = 1 << s->cluster_bits;
s->cluster_sectors = 1 << (s->cluster_bits - 9);
/* Initialise version 3 header fields */
if (header.version == 2) {
header.incompatible_features = 0;
header.compatible_features = 0;
header.autoclear_features = 0;
header.refcount_order = 4;
header.header_length = 72;
} else {
be64_to_cpus(&header.incompatible_features);
be64_to_cpus(&header.compatible_features);
be64_to_cpus(&header.autoclear_features);
be32_to_cpus(&header.refcount_order);
be32_to_cpus(&header.header_length);
if (header.header_length < 104) {
error_setg(errp, "qcow2 header too short");
ret = -EINVAL;
goto fail;
}
}
if (header.header_length > s->cluster_size) {
error_setg(errp, "qcow2 header exceeds cluster size");
ret = -EINVAL;
goto fail;
}
if (header.header_length > sizeof(header)) {
s->unknown_header_fields_size = header.header_length - sizeof(header);
s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
s->unknown_header_fields_size);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
"fields");
goto fail;
}
}
if (header.backing_file_offset > s->cluster_size) {
error_setg(errp, "Invalid backing file offset");
ret = -EINVAL;
goto fail;
}
if (header.backing_file_offset) {
ext_end = header.backing_file_offset;
} else {
ext_end = 1 << header.cluster_bits;
}
/* Handle feature bits */
s->incompatible_features = header.incompatible_features;
s->compatible_features = header.compatible_features;
s->autoclear_features = header.autoclear_features;
if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
void *feature_table = NULL;
qcow2_read_extensions(bs, header.header_length, ext_end,
&feature_table, NULL);
report_unsupported_feature(bs, errp, feature_table,
s->incompatible_features &
~QCOW2_INCOMPAT_MASK);
ret = -ENOTSUP;
g_free(feature_table);
goto fail;
}
if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
/* Corrupt images may not be written to unless they are being repaired
*/
if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
error_setg(errp, "qcow2: Image is corrupt; cannot be opened "
"read/write");
ret = -EACCES;
goto fail;
}
}
/* Check support for various header values */
if (header.refcount_order != 4) {
report_unsupported(bs, errp, "%d bit reference counts",
1 << header.refcount_order);
ret = -ENOTSUP;
goto fail;
}
s->refcount_order = header.refcount_order;
if (header.crypt_method > QCOW_CRYPT_AES) {
error_setg(errp, "Unsupported encryption method: %i",
header.crypt_method);
ret = -EINVAL;
goto fail;
}
s->crypt_method_header = header.crypt_method;
if (s->crypt_method_header) {
bs->encrypted = 1;
}
s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
s->l2_size = 1 << s->l2_bits;
bs->total_sectors = header.size / 512;
s->csize_shift = (62 - (s->cluster_bits - 8));
s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
s->refcount_table_offset = header.refcount_table_offset;
s->refcount_table_size =
header.refcount_table_clusters << (s->cluster_bits - 3);
if (header.refcount_table_clusters > qcow2_max_refcount_clusters(s)) {
error_setg(errp, "Reference count table too large");
ret = -EINVAL;
goto fail;
}
ret = validate_table_offset(bs, s->refcount_table_offset,
s->refcount_table_size, sizeof(uint64_t));
if (ret < 0) {
error_setg(errp, "Invalid reference count table offset");
goto fail;
}
/* Snapshot table offset/length */
if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) {
error_setg(errp, "Too many snapshots");
ret = -EINVAL;
goto fail;
}
ret = validate_table_offset(bs, header.snapshots_offset,
header.nb_snapshots,
sizeof(QCowSnapshotHeader));
if (ret < 0) {
error_setg(errp, "Invalid snapshot table offset");
goto fail;
}
s->snapshots_offset = header.snapshots_offset;
s->nb_snapshots = header.nb_snapshots;
/* read the level 1 table */
if (header.l1_size > 0x2000000) {
/* 32 MB L1 table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
error_setg(errp, "Active L1 table too large");
ret = -EFBIG;
goto fail;
}
s->l1_size = header.l1_size;
l1_vm_state_index = size_to_l1(s, header.size);
if (l1_vm_state_index > INT_MAX) {
error_setg(errp, "Image is too big");
ret = -EFBIG;
goto fail;
}
s->l1_vm_state_index = l1_vm_state_index;
/* the L1 table must contain at least enough entries to put
header.size bytes */
if (s->l1_size < s->l1_vm_state_index) {
error_setg(errp, "L1 table is too small");
ret = -EINVAL;
goto fail;
}
ret = validate_table_offset(bs, header.l1_table_offset,
header.l1_size, sizeof(uint64_t));
if (ret < 0) {
error_setg(errp, "Invalid L1 table offset");
goto fail;
}
s->l1_table_offset = header.l1_table_offset;
if (s->l1_size > 0) {
s->l1_table = g_malloc0(
align_offset(s->l1_size * sizeof(uint64_t), 512));
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read L1 table");
goto fail;
}
for(i = 0;i < s->l1_size; i++) {
be64_to_cpus(&s->l1_table[i]);
}
}
/* alloc L2 table/refcount block cache */
s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE);
s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE);
s->cluster_cache = g_malloc(s->cluster_size);
/* one more sector for decompressed data alignment */
s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
+ 512);
s->cluster_cache_offset = -1;
s->flags = flags;
ret = qcow2_refcount_init(bs);
if (ret != 0) {
error_setg_errno(errp, -ret, "Could not initialize refcount handling");
goto fail;
}
QLIST_INIT(&s->cluster_allocs);
QTAILQ_INIT(&s->discards);
/* read qcow2 extensions */
if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL,
&local_err)) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
/* read the backing file name */
if (header.backing_file_offset != 0) {
len = header.backing_file_size;
if (len > MIN(1023, s->cluster_size - header.backing_file_offset)) {
error_setg(errp, "Backing file name too long");
ret = -EINVAL;
goto fail;
}
ret = bdrv_pread(bs->file, header.backing_file_offset,
bs->backing_file, len);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read backing file name");
goto fail;
}
bs->backing_file[len] = '\0';
}
ret = qcow2_read_snapshots(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read snapshots");
goto fail;
}
/* Clear unknown autoclear feature bits */
if (!bs->read_only && !(flags & BDRV_O_INCOMING) && s->autoclear_features) {
s->autoclear_features = 0;
ret = qcow2_update_header(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not update qcow2 header");
goto fail;
}
}
/* Initialise locks */
qemu_co_mutex_init(&s->lock);
/* Repair image if dirty */
if (!(flags & (BDRV_O_CHECK | BDRV_O_INCOMING)) && !bs->read_only &&
(s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
BdrvCheckResult result = {0};
ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not repair dirty image");
goto fail;
}
}
/* Enable lazy_refcounts according to image and command line options */
opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
if (local_err) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
s->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
(s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
s->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
s->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
s->discard_passthrough[QCOW2_DISCARD_REQUEST] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
flags & BDRV_O_UNMAP);
s->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
s->discard_passthrough[QCOW2_DISCARD_OTHER] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
opt_overlap_check = qemu_opt_get(opts, "overlap-check") ?: "cached";
if (!strcmp(opt_overlap_check, "none")) {
overlap_check_template = 0;
} else if (!strcmp(opt_overlap_check, "constant")) {
overlap_check_template = QCOW2_OL_CONSTANT;
} else if (!strcmp(opt_overlap_check, "cached")) {
overlap_check_template = QCOW2_OL_CACHED;
} else if (!strcmp(opt_overlap_check, "all")) {
overlap_check_template = QCOW2_OL_ALL;
} else {
error_setg(errp, "Unsupported value '%s' for qcow2 option "
"'overlap-check'. Allowed are either of the following: "
"none, constant, cached, all", opt_overlap_check);
qemu_opts_del(opts);
ret = -EINVAL;
goto fail;
}
s->overlap_check = 0;
for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) {
/* overlap-check defines a template bitmask, but every flag may be
* overwritten through the associated boolean option */
s->overlap_check |=
qemu_opt_get_bool(opts, overlap_bool_option_names[i],
overlap_check_template & (1 << i)) << i;
}
qemu_opts_del(opts);
if (s->use_lazy_refcounts && s->qcow_version < 3) {
error_setg(errp, "Lazy refcounts require a qcow2 image with at least "
"qemu 1.1 compatibility level");
ret = -EINVAL;
goto fail;
}
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
qcow2_check_refcounts(bs, &result, 0);
}
#endif
return ret;
fail:
g_free(s->unknown_header_fields);
cleanup_unknown_header_ext(bs);
qcow2_free_snapshots(bs);
qcow2_refcount_close(bs);
g_free(s->l1_table);
/* else pre-write overlap checks in cache_destroy may crash */
s->l1_table = NULL;
if (s->l2_table_cache) {
qcow2_cache_destroy(bs, s->l2_table_cache);
}
if (s->refcount_block_cache) {
qcow2_cache_destroy(bs, s->refcount_block_cache);
}
g_free(s->cluster_cache);
qemu_vfree(s->cluster_data);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476'], 'message': 'qcow2: Fix NULL dereference in qcow2_open() error path (CVE-2014-0146)
The qcow2 code assumes that s->snapshots is non-NULL if s->nb_snapshots
!= 0. By having the initialisation of both fields separated in
qcow2_open(), any error occuring in between would cause the error path
to dereference NULL in qcow2_free_snapshots() if the image had any
snapshots.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size)
{
int64_t offset;
int ret;
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
offset = alloc_clusters_noref(bs, size);
if (offset < 0) {
return offset;
}
ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER);
if (ret < 0) {
return ret;
}
return offset;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190'], 'message': 'qcow2: Don't rely on free_cluster_index in alloc_refcount_block() (CVE-2014-0147)
free_cluster_index is only correct if update_refcount() was called from
an allocation function, and even there it's brittle because it's used to
protect unfinished allocations which still have a refcount of 0 - if it
moves in the wrong place, the unfinished allocation can be corrupted.
So not using it any more seems to be a good idea. Instead, use the
first requested cluster to do the calculations. Return -EAGAIN if
unfinished allocations could become invalid and let the caller restart
its search for some free clusters.
The context of creating a snapsnot is one situation where
update_refcount() is called outside of a cluster allocation. For this
case, the change fixes a buffer overflow if a cluster is referenced in
an L2 table that cannot be represented by an existing refcount block.
(new_table[refcount_table_index] was out of bounds)
[Bump the qemu-iotests 026 refblock_alloc.write leak count from 10 to
11.
--Stefan]
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int alloc_refcount_block(BlockDriverState *bs,
int64_t cluster_index, uint16_t **refcount_block)
{
BDRVQcowState *s = bs->opaque;
unsigned int refcount_table_index;
int ret;
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
/* Find the refcount block for the given cluster */
refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
if (refcount_table_index < s->refcount_table_size) {
uint64_t refcount_block_offset =
s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
/* If it's already there, we're done */
if (refcount_block_offset) {
return load_refcount_block(bs, refcount_block_offset,
(void**) refcount_block);
}
}
/*
* If we came here, we need to allocate something. Something is at least
* a cluster for the new refcount block. It may also include a new refcount
* table if the old refcount table is too small.
*
* Note that allocating clusters here needs some special care:
*
* - We can't use the normal qcow2_alloc_clusters(), it would try to
* increase the refcount and very likely we would end up with an endless
* recursion. Instead we must place the refcount blocks in a way that
* they can describe them themselves.
*
* - We need to consider that at this point we are inside update_refcounts
* and doing the initial refcount increase. This means that some clusters
* have already been allocated by the caller, but their refcount isn't
* accurate yet. free_cluster_index tells us where this allocation ends
* as long as we don't overwrite it by freeing clusters.
*
* - alloc_clusters_noref and qcow2_free_clusters may load a different
* refcount block into the cache
*/
*refcount_block = NULL;
/* We write to the refcount table, so we might depend on L2 tables */
ret = qcow2_cache_flush(bs, s->l2_table_cache);
if (ret < 0) {
return ret;
}
/* Allocate the refcount block itself and mark it as used */
int64_t new_block = alloc_clusters_noref(bs, s->cluster_size);
if (new_block < 0) {
return new_block;
}
#ifdef DEBUG_ALLOC2
fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64
" at %" PRIx64 "\n",
refcount_table_index, cluster_index << s->cluster_bits, new_block);
#endif
if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) {
/* Zero the new refcount block before updating it */
ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
(void**) refcount_block);
if (ret < 0) {
goto fail_block;
}
memset(*refcount_block, 0, s->cluster_size);
/* The block describes itself, need to update the cache */
int block_index = (new_block >> s->cluster_bits) &
((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
(*refcount_block)[block_index] = cpu_to_be16(1);
} else {
/* Described somewhere else. This can recurse at most twice before we
* arrive at a block that describes itself. */
ret = update_refcount(bs, new_block, s->cluster_size, 1,
QCOW2_DISCARD_NEVER);
if (ret < 0) {
goto fail_block;
}
ret = qcow2_cache_flush(bs, s->refcount_block_cache);
if (ret < 0) {
goto fail_block;
}
/* Initialize the new refcount block only after updating its refcount,
* update_refcount uses the refcount cache itself */
ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
(void**) refcount_block);
if (ret < 0) {
goto fail_block;
}
memset(*refcount_block, 0, s->cluster_size);
}
/* Now the new refcount block needs to be written to disk */
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE);
qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block);
ret = qcow2_cache_flush(bs, s->refcount_block_cache);
if (ret < 0) {
goto fail_block;
}
/* If the refcount table is big enough, just hook the block up there */
if (refcount_table_index < s->refcount_table_size) {
uint64_t data64 = cpu_to_be64(new_block);
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
ret = bdrv_pwrite_sync(bs->file,
s->refcount_table_offset + refcount_table_index * sizeof(uint64_t),
&data64, sizeof(data64));
if (ret < 0) {
goto fail_block;
}
s->refcount_table[refcount_table_index] = new_block;
return 0;
}
ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block);
if (ret < 0) {
goto fail_block;
}
/*
* If we come here, we need to grow the refcount table. Again, a new
* refcount table needs some space and we can't simply allocate to avoid
* endless recursion.
*
* Therefore let's grab new refcount blocks at the end of the image, which
* will describe themselves and the new refcount table. This way we can
* reference them only in the new table and do the switch to the new
* refcount table at once without producing an inconsistent state in
* between.
*/
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW);
/* Calculate the number of refcount blocks needed so far */
uint64_t refcount_block_clusters = 1 << (s->cluster_bits - REFCOUNT_SHIFT);
uint64_t blocks_used = (s->free_cluster_index +
refcount_block_clusters - 1) / refcount_block_clusters;
/* And now we need at least one block more for the new metadata */
uint64_t table_size = next_refcount_table_size(s, blocks_used + 1);
uint64_t last_table_size;
uint64_t blocks_clusters;
do {
uint64_t table_clusters =
size_to_clusters(s, table_size * sizeof(uint64_t));
blocks_clusters = 1 +
((table_clusters + refcount_block_clusters - 1)
/ refcount_block_clusters);
uint64_t meta_clusters = table_clusters + blocks_clusters;
last_table_size = table_size;
table_size = next_refcount_table_size(s, blocks_used +
((meta_clusters + refcount_block_clusters - 1)
/ refcount_block_clusters));
} while (last_table_size != table_size);
#ifdef DEBUG_ALLOC2
fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n",
s->refcount_table_size, table_size);
#endif
/* Create the new refcount table and blocks */
uint64_t meta_offset = (blocks_used * refcount_block_clusters) *
s->cluster_size;
uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size;
uint16_t *new_blocks = g_malloc0(blocks_clusters * s->cluster_size);
uint64_t *new_table = g_malloc0(table_size * sizeof(uint64_t));
assert(meta_offset >= (s->free_cluster_index * s->cluster_size));
/* Fill the new refcount table */
memcpy(new_table, s->refcount_table,
s->refcount_table_size * sizeof(uint64_t));
new_table[refcount_table_index] = new_block;
int i;
for (i = 0; i < blocks_clusters; i++) {
new_table[blocks_used + i] = meta_offset + (i * s->cluster_size);
}
/* Fill the refcount blocks */
uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t));
int block = 0;
for (i = 0; i < table_clusters + blocks_clusters; i++) {
new_blocks[block++] = cpu_to_be16(1);
}
/* Write refcount blocks to disk */
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS);
ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks,
blocks_clusters * s->cluster_size);
g_free(new_blocks);
if (ret < 0) {
goto fail_table;
}
/* Write refcount table to disk */
for(i = 0; i < table_size; i++) {
cpu_to_be64s(&new_table[i]);
}
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
ret = bdrv_pwrite_sync(bs->file, table_offset, new_table,
table_size * sizeof(uint64_t));
if (ret < 0) {
goto fail_table;
}
for(i = 0; i < table_size; i++) {
be64_to_cpus(&new_table[i]);
}
/* Hook up the new refcount table in the qcow2 header */
uint8_t data[12];
cpu_to_be64w((uint64_t*)data, table_offset);
cpu_to_be32w((uint32_t*)(data + 8), table_clusters);
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE);
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset),
data, sizeof(data));
if (ret < 0) {
goto fail_table;
}
/* And switch it in memory */
uint64_t old_table_offset = s->refcount_table_offset;
uint64_t old_table_size = s->refcount_table_size;
g_free(s->refcount_table);
s->refcount_table = new_table;
s->refcount_table_size = table_size;
s->refcount_table_offset = table_offset;
/* Free old table. Remember, we must not change free_cluster_index */
uint64_t old_free_cluster_index = s->free_cluster_index;
qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
QCOW2_DISCARD_OTHER);
s->free_cluster_index = old_free_cluster_index;
ret = load_refcount_block(bs, new_block, (void**) refcount_block);
if (ret < 0) {
return ret;
}
return 0;
fail_table:
g_free(new_table);
fail_block:
if (*refcount_block != NULL) {
qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block);
}
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190'], 'message': 'qcow2: Don't rely on free_cluster_index in alloc_refcount_block() (CVE-2014-0147)
free_cluster_index is only correct if update_refcount() was called from
an allocation function, and even there it's brittle because it's used to
protect unfinished allocations which still have a refcount of 0 - if it
moves in the wrong place, the unfinished allocation can be corrupted.
So not using it any more seems to be a good idea. Instead, use the
first requested cluster to do the calculations. Return -EAGAIN if
unfinished allocations could become invalid and let the caller restart
its search for some free clusters.
The context of creating a snapsnot is one situation where
update_refcount() is called outside of a cluster allocation. For this
case, the change fixes a buffer overflow if a cluster is referenced in
an L2 table that cannot be represented by an existing refcount block.
(new_table[refcount_table_index] was out of bounds)
[Bump the qemu-iotests 026 refblock_alloc.write leak count from 10 to
11.
--Stefan]
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
{
BDRVBochsState *s = bs->opaque;
int64_t offset = sector_num * 512;
int64_t extent_index, extent_offset, bitmap_offset;
char bitmap_entry;
// seek to sector
extent_index = offset / s->extent_size;
extent_offset = (offset % s->extent_size) / 512;
if (s->catalog_bitmap[extent_index] == 0xffffffff) {
return -1; /* not allocated */
}
bitmap_offset = s->data_offset + (512 * s->catalog_bitmap[extent_index] *
(s->extent_blocks + s->bitmap_blocks));
/* read in bitmap for current extent */
if (bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8),
&bitmap_entry, 1) != 1) {
return -1;
}
if (!((bitmap_entry >> (extent_offset % 8)) & 1)) {
return -1; /* not allocated */
}
return bitmap_offset + (512 * (s->bitmap_blocks + extent_offset));
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190'], 'message': 'bochs: Use unsigned variables for offsets and sizes (CVE-2014-0147)
Gets us rid of integer overflows resulting in negative sizes which
aren't correctly checked.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int qcow2_snapshot_load_tmp(BlockDriverState *bs,
const char *snapshot_id,
const char *name,
Error **errp)
{
int i, snapshot_index;
BDRVQcowState *s = bs->opaque;
QCowSnapshot *sn;
uint64_t *new_l1_table;
int new_l1_bytes;
int ret;
assert(bs->read_only);
/* Search the snapshot */
snapshot_index = find_snapshot_by_id_and_name(bs, snapshot_id, name);
if (snapshot_index < 0) {
error_setg(errp,
"Can't find snapshot");
return -ENOENT;
}
sn = &s->snapshots[snapshot_index];
/* Allocate and read in the snapshot's L1 table */
new_l1_bytes = s->l1_size * sizeof(uint64_t);
new_l1_table = g_malloc0(align_offset(new_l1_bytes, 512));
ret = bdrv_pread(bs->file, sn->l1_table_offset, new_l1_table, new_l1_bytes);
if (ret < 0) {
error_setg(errp, "Failed to read l1 table for snapshot");
g_free(new_l1_table);
return ret;
}
/* Switch the L1 table */
g_free(s->l1_table);
s->l1_size = sn->l1_size;
s->l1_table_offset = sn->l1_table_offset;
s->l1_table = new_l1_table;
for(i = 0;i < s->l1_size; i++) {
be64_to_cpus(&s->l1_table[i]);
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'qcow2: Fix L1 allocation size in qcow2_snapshot_load_tmp() (CVE-2014-0145)
For the L1 table to loaded for an internal snapshot, the code allocated
only enough memory to hold the currently active L1 table. If the
snapshot's L1 table is actually larger than the current one, this leads
to a buffer overflow.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVDMGState *s = bs->opaque;
uint64_t info_begin, info_end, last_in_offset, last_out_offset;
uint32_t count, tmp;
uint32_t max_compressed_size = 1, max_sectors_per_chunk = 1, i;
int64_t offset;
int ret;
bs->read_only = 1;
s->n_chunks = 0;
s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
/* read offset of info blocks */
offset = bdrv_getlength(bs->file);
if (offset < 0) {
ret = offset;
goto fail;
}
offset -= 0x1d8;
ret = read_uint64(bs, offset, &info_begin);
if (ret < 0) {
goto fail;
} else if (info_begin == 0) {
ret = -EINVAL;
goto fail;
}
ret = read_uint32(bs, info_begin, &tmp);
if (ret < 0) {
goto fail;
} else if (tmp != 0x100) {
ret = -EINVAL;
goto fail;
}
ret = read_uint32(bs, info_begin + 4, &count);
if (ret < 0) {
goto fail;
} else if (count == 0) {
ret = -EINVAL;
goto fail;
}
info_end = info_begin + count;
offset = info_begin + 0x100;
/* read offsets */
last_in_offset = last_out_offset = 0;
while (offset < info_end) {
uint32_t type;
ret = read_uint32(bs, offset, &count);
if (ret < 0) {
goto fail;
} else if (count == 0) {
ret = -EINVAL;
goto fail;
}
offset += 4;
ret = read_uint32(bs, offset, &type);
if (ret < 0) {
goto fail;
}
if (type == 0x6d697368 && count >= 244) {
size_t new_size;
uint32_t chunk_count;
offset += 4;
offset += 200;
chunk_count = (count - 204) / 40;
new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
s->types = g_realloc(s->types, new_size / 2);
s->offsets = g_realloc(s->offsets, new_size);
s->lengths = g_realloc(s->lengths, new_size);
s->sectors = g_realloc(s->sectors, new_size);
s->sectorcounts = g_realloc(s->sectorcounts, new_size);
for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
ret = read_uint32(bs, offset, &s->types[i]);
if (ret < 0) {
goto fail;
}
offset += 4;
if (s->types[i] != 0x80000005 && s->types[i] != 1 &&
s->types[i] != 2) {
if (s->types[i] == 0xffffffff && i > 0) {
last_in_offset = s->offsets[i - 1] + s->lengths[i - 1];
last_out_offset = s->sectors[i - 1] +
s->sectorcounts[i - 1];
}
chunk_count--;
i--;
offset += 36;
continue;
}
offset += 4;
ret = read_uint64(bs, offset, &s->sectors[i]);
if (ret < 0) {
goto fail;
}
s->sectors[i] += last_out_offset;
offset += 8;
ret = read_uint64(bs, offset, &s->sectorcounts[i]);
if (ret < 0) {
goto fail;
}
offset += 8;
ret = read_uint64(bs, offset, &s->offsets[i]);
if (ret < 0) {
goto fail;
}
s->offsets[i] += last_in_offset;
offset += 8;
ret = read_uint64(bs, offset, &s->lengths[i]);
if (ret < 0) {
goto fail;
}
offset += 8;
if (s->lengths[i] > max_compressed_size) {
max_compressed_size = s->lengths[i];
}
if (s->sectorcounts[i] > max_sectors_per_chunk) {
max_sectors_per_chunk = s->sectorcounts[i];
}
}
s->n_chunks += chunk_count;
}
}
/* initialize zlib engine */
s->compressed_chunk = g_malloc(max_compressed_size + 1);
s->uncompressed_chunk = g_malloc(512 * max_sectors_per_chunk);
if (inflateInit(&s->zstream) != Z_OK) {
ret = -EINVAL;
goto fail;
}
s->current_chunk = s->n_chunks;
qemu_co_mutex_init(&s->lock);
return 0;
fail:
g_free(s->types);
g_free(s->offsets);
g_free(s->lengths);
g_free(s->sectors);
g_free(s->sectorcounts);
g_free(s->compressed_chunk);
g_free(s->uncompressed_chunk);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'dmg: sanitize chunk length and sectorcount (CVE-2014-0145)
Chunk length and sectorcount are used for decompression buffers as well
as the bdrv_pread() count argument. Ensure that they have reasonable
values so neither memory allocation nor conversion from uint64_t to int
will cause problems.
Signed-off-by: Stefan Hajnoczi <[email protected]>
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
struct nf_queue_entry *entry,
__be32 **packet_id_ptr)
{
size_t size;
size_t data_len = 0, cap_len = 0;
unsigned int hlen = 0;
struct sk_buff *skb;
struct nlattr *nla;
struct nfqnl_msg_packet_hdr *pmsg;
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
struct sk_buff *entskb = entry->skb;
struct net_device *indev;
struct net_device *outdev;
struct nf_conn *ct = NULL;
enum ip_conntrack_info uninitialized_var(ctinfo);
bool csum_verify;
size = nlmsg_total_size(sizeof(struct nfgenmsg))
+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#ifdef CONFIG_BRIDGE_NETFILTER
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#endif
+ nla_total_size(sizeof(u_int32_t)) /* mark */
+ nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
+ nla_total_size(sizeof(u_int32_t)) /* skbinfo */
+ nla_total_size(sizeof(u_int32_t)); /* cap_len */
if (entskb->tstamp.tv64)
size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
if (entry->hook <= NF_INET_FORWARD ||
(entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
csum_verify = !skb_csum_unnecessary(entskb);
else
csum_verify = false;
outdev = entry->outdev;
switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
case NFQNL_COPY_META:
case NFQNL_COPY_NONE:
break;
case NFQNL_COPY_PACKET:
if (!(queue->flags & NFQA_CFG_F_GSO) &&
entskb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(entskb))
return NULL;
data_len = ACCESS_ONCE(queue->copy_range);
if (data_len > entskb->len)
data_len = entskb->len;
hlen = skb_zerocopy_headlen(entskb);
hlen = min_t(unsigned int, hlen, data_len);
size += sizeof(struct nlattr) + hlen;
cap_len = entskb->len;
break;
}
if (queue->flags & NFQA_CFG_F_CONNTRACK)
ct = nfqnl_ct_get(entskb, &size, &ctinfo);
if (queue->flags & NFQA_CFG_F_UID_GID) {
size += (nla_total_size(sizeof(u_int32_t)) /* uid */
+ nla_total_size(sizeof(u_int32_t))); /* gid */
}
skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
GFP_ATOMIC);
if (!skb)
return NULL;
nlh = nlmsg_put(skb, 0, 0,
NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
sizeof(struct nfgenmsg), 0);
if (!nlh) {
kfree_skb(skb);
return NULL;
}
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = entry->pf;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(queue->queue_num);
nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
pmsg = nla_data(nla);
pmsg->hw_protocol = entskb->protocol;
pmsg->hook = entry->hook;
*packet_id_ptr = &pmsg->packet_id;
indev = entry->indev;
if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
goto nla_put_failure;
#else
if (entry->pf == PF_BRIDGE) {
/* Case 1: indev is physical input device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
htonl(indev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by __nf_queue */
nla_put_be32(skb, NFQA_IFINDEX_INDEV,
htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
htonl(indev->ifindex)))
goto nla_put_failure;
if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
htonl(entskb->nf_bridge->physindev->ifindex)))
goto nla_put_failure;
}
#endif
}
if (outdev) {
#ifndef CONFIG_BRIDGE_NETFILTER
if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
goto nla_put_failure;
#else
if (entry->pf == PF_BRIDGE) {
/* Case 1: outdev is physical output device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
htonl(outdev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by __nf_queue */
nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
/* Case 2: outdev is bridge group, we need to look for
* physical output device (when called from ipv4) */
if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
htonl(outdev->ifindex)))
goto nla_put_failure;
if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
htonl(entskb->nf_bridge->physoutdev->ifindex)))
goto nla_put_failure;
}
#endif
}
if (entskb->mark &&
nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
goto nla_put_failure;
if (indev && entskb->dev &&
entskb->mac_header != entskb->network_header) {
struct nfqnl_msg_packet_hw phw;
int len;
memset(&phw, 0, sizeof(phw));
len = dev_parse_header(entskb, phw.hw_addr);
if (len) {
phw.hw_addrlen = htons(len);
if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
goto nla_put_failure;
}
}
if (entskb->tstamp.tv64) {
struct nfqnl_msg_packet_timestamp ts;
struct timeval tv = ktime_to_timeval(entskb->tstamp);
ts.sec = cpu_to_be64(tv.tv_sec);
ts.usec = cpu_to_be64(tv.tv_usec);
if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
goto nla_put_failure;
}
if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
goto nla_put_failure;
if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
goto nla_put_failure;
if (cap_len > data_len &&
nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
goto nla_put_failure;
if (nfqnl_put_packet_info(skb, entskb, csum_verify))
goto nla_put_failure;
if (data_len) {
struct nlattr *nla;
if (skb_tailroom(skb) < sizeof(*nla) + hlen)
goto nla_put_failure;
nla = (struct nlattr *)skb_put(skb, sizeof(*nla));
nla->nla_type = NFQA_PAYLOAD;
nla->nla_len = nla_attr_size(data_len);
skb_zerocopy(skb, entskb, data_len, hlen);
}
nlh->nlmsg_len = skb->len;
return skb;
nla_put_failure:
kfree_skb(skb);
net_err_ratelimited("nf_queue: error creating packet message\n");
return NULL;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-416'], 'message': 'core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle errors
skb_zerocopy can copy elements of the frags array between skbs, but it doesn't
orphan them. Also, it doesn't handle errors, so this patch takes care of that
as well, and modify the callers accordingly. skb_tx_error() is also added to
the callers so they will signal the failed delivery towards the creator of the
skb.
Signed-off-by: Zoltan Kiss <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVParallelsState *s = bs->opaque;
int i;
struct parallels_header ph;
int ret;
bs->read_only = 1; // no write support yet
ret = bdrv_pread(bs->file, 0, &ph, sizeof(ph));
if (ret < 0) {
goto fail;
}
if (memcmp(ph.magic, HEADER_MAGIC, 16) ||
(le32_to_cpu(ph.version) != HEADER_VERSION)) {
error_setg(errp, "Image not in Parallels format");
ret = -EINVAL;
goto fail;
}
bs->total_sectors = le32_to_cpu(ph.nb_sectors);
s->tracks = le32_to_cpu(ph.tracks);
s->catalog_size = le32_to_cpu(ph.catalog_entries);
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
ret = bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4);
if (ret < 0) {
goto fail;
}
for (i = 0; i < s->catalog_size; i++)
le32_to_cpus(&s->catalog_bitmap[i]);
qemu_co_mutex_init(&s->lock);
return 0;
fail:
g_free(s->catalog_bitmap);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190'], 'message': 'parallels: Fix catalog size integer overflow (CVE-2014-0143)
The first test case would cause a huge memory allocation, leading to a
qemu abort; the second one to a too small malloc() for the catalog
(smaller than s->catalog_size), which causes a read-only out-of-bounds
array access and on big endian hosts an endianess conversion for an
undefined memory area.
The sample image used here is not an original Parallels image. It was
created using an hexeditor on the basis of the struct that qemu uses.
Good enough for trying to crash the driver, but not for ensuring
compatibility.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVQcowState *s = bs->opaque;
unsigned int len, i;
int ret = 0;
QCowHeader header;
QemuOpts *opts;
Error *local_err = NULL;
uint64_t ext_end;
uint64_t l1_vm_state_index;
const char *opt_overlap_check;
int overlap_check_template = 0;
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read qcow2 header");
goto fail;
}
be32_to_cpus(&header.magic);
be32_to_cpus(&header.version);
be64_to_cpus(&header.backing_file_offset);
be32_to_cpus(&header.backing_file_size);
be64_to_cpus(&header.size);
be32_to_cpus(&header.cluster_bits);
be32_to_cpus(&header.crypt_method);
be64_to_cpus(&header.l1_table_offset);
be32_to_cpus(&header.l1_size);
be64_to_cpus(&header.refcount_table_offset);
be32_to_cpus(&header.refcount_table_clusters);
be64_to_cpus(&header.snapshots_offset);
be32_to_cpus(&header.nb_snapshots);
if (header.magic != QCOW_MAGIC) {
error_setg(errp, "Image is not in qcow2 format");
ret = -EINVAL;
goto fail;
}
if (header.version < 2 || header.version > 3) {
report_unsupported(bs, errp, "QCOW version %d", header.version);
ret = -ENOTSUP;
goto fail;
}
s->qcow_version = header.version;
/* Initialise cluster size */
if (header.cluster_bits < MIN_CLUSTER_BITS ||
header.cluster_bits > MAX_CLUSTER_BITS) {
error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);
ret = -EINVAL;
goto fail;
}
s->cluster_bits = header.cluster_bits;
s->cluster_size = 1 << s->cluster_bits;
s->cluster_sectors = 1 << (s->cluster_bits - 9);
/* Initialise version 3 header fields */
if (header.version == 2) {
header.incompatible_features = 0;
header.compatible_features = 0;
header.autoclear_features = 0;
header.refcount_order = 4;
header.header_length = 72;
} else {
be64_to_cpus(&header.incompatible_features);
be64_to_cpus(&header.compatible_features);
be64_to_cpus(&header.autoclear_features);
be32_to_cpus(&header.refcount_order);
be32_to_cpus(&header.header_length);
if (header.header_length < 104) {
error_setg(errp, "qcow2 header too short");
ret = -EINVAL;
goto fail;
}
}
if (header.header_length > s->cluster_size) {
error_setg(errp, "qcow2 header exceeds cluster size");
ret = -EINVAL;
goto fail;
}
if (header.header_length > sizeof(header)) {
s->unknown_header_fields_size = header.header_length - sizeof(header);
s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
s->unknown_header_fields_size);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
"fields");
goto fail;
}
}
if (header.backing_file_offset > s->cluster_size) {
error_setg(errp, "Invalid backing file offset");
ret = -EINVAL;
goto fail;
}
if (header.backing_file_offset) {
ext_end = header.backing_file_offset;
} else {
ext_end = 1 << header.cluster_bits;
}
/* Handle feature bits */
s->incompatible_features = header.incompatible_features;
s->compatible_features = header.compatible_features;
s->autoclear_features = header.autoclear_features;
if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
void *feature_table = NULL;
qcow2_read_extensions(bs, header.header_length, ext_end,
&feature_table, NULL);
report_unsupported_feature(bs, errp, feature_table,
s->incompatible_features &
~QCOW2_INCOMPAT_MASK);
ret = -ENOTSUP;
g_free(feature_table);
goto fail;
}
if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
/* Corrupt images may not be written to unless they are being repaired
*/
if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
error_setg(errp, "qcow2: Image is corrupt; cannot be opened "
"read/write");
ret = -EACCES;
goto fail;
}
}
/* Check support for various header values */
if (header.refcount_order != 4) {
report_unsupported(bs, errp, "%d bit reference counts",
1 << header.refcount_order);
ret = -ENOTSUP;
goto fail;
}
s->refcount_order = header.refcount_order;
if (header.crypt_method > QCOW_CRYPT_AES) {
error_setg(errp, "Unsupported encryption method: %i",
header.crypt_method);
ret = -EINVAL;
goto fail;
}
s->crypt_method_header = header.crypt_method;
if (s->crypt_method_header) {
bs->encrypted = 1;
}
s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
s->l2_size = 1 << s->l2_bits;
bs->total_sectors = header.size / 512;
s->csize_shift = (62 - (s->cluster_bits - 8));
s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
s->refcount_table_offset = header.refcount_table_offset;
s->refcount_table_size =
header.refcount_table_clusters << (s->cluster_bits - 3);
if (header.refcount_table_clusters > qcow2_max_refcount_clusters(s)) {
error_setg(errp, "Reference count table too large");
ret = -EINVAL;
goto fail;
}
ret = validate_table_offset(bs, s->refcount_table_offset,
s->refcount_table_size, sizeof(uint64_t));
if (ret < 0) {
error_setg(errp, "Invalid reference count table offset");
goto fail;
}
/* Snapshot table offset/length */
if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) {
error_setg(errp, "Too many snapshots");
ret = -EINVAL;
goto fail;
}
ret = validate_table_offset(bs, header.snapshots_offset,
header.nb_snapshots,
sizeof(QCowSnapshotHeader));
if (ret < 0) {
error_setg(errp, "Invalid snapshot table offset");
goto fail;
}
/* read the level 1 table */
if (header.l1_size > 0x2000000) {
/* 32 MB L1 table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
error_setg(errp, "Active L1 table too large");
ret = -EFBIG;
goto fail;
}
s->l1_size = header.l1_size;
l1_vm_state_index = size_to_l1(s, header.size);
if (l1_vm_state_index > INT_MAX) {
error_setg(errp, "Image is too big");
ret = -EFBIG;
goto fail;
}
s->l1_vm_state_index = l1_vm_state_index;
/* the L1 table must contain at least enough entries to put
header.size bytes */
if (s->l1_size < s->l1_vm_state_index) {
error_setg(errp, "L1 table is too small");
ret = -EINVAL;
goto fail;
}
ret = validate_table_offset(bs, header.l1_table_offset,
header.l1_size, sizeof(uint64_t));
if (ret < 0) {
error_setg(errp, "Invalid L1 table offset");
goto fail;
}
s->l1_table_offset = header.l1_table_offset;
if (s->l1_size > 0) {
s->l1_table = g_malloc0(
align_offset(s->l1_size * sizeof(uint64_t), 512));
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read L1 table");
goto fail;
}
for(i = 0;i < s->l1_size; i++) {
be64_to_cpus(&s->l1_table[i]);
}
}
/* alloc L2 table/refcount block cache */
s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE);
s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE);
s->cluster_cache = g_malloc(s->cluster_size);
/* one more sector for decompressed data alignment */
s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
+ 512);
s->cluster_cache_offset = -1;
s->flags = flags;
ret = qcow2_refcount_init(bs);
if (ret != 0) {
error_setg_errno(errp, -ret, "Could not initialize refcount handling");
goto fail;
}
QLIST_INIT(&s->cluster_allocs);
QTAILQ_INIT(&s->discards);
/* read qcow2 extensions */
if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL,
&local_err)) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
/* read the backing file name */
if (header.backing_file_offset != 0) {
len = header.backing_file_size;
if (len > MIN(1023, s->cluster_size - header.backing_file_offset)) {
error_setg(errp, "Backing file name too long");
ret = -EINVAL;
goto fail;
}
ret = bdrv_pread(bs->file, header.backing_file_offset,
bs->backing_file, len);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read backing file name");
goto fail;
}
bs->backing_file[len] = '\0';
}
/* Internal snapshots */
s->snapshots_offset = header.snapshots_offset;
s->nb_snapshots = header.nb_snapshots;
ret = qcow2_read_snapshots(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read snapshots");
goto fail;
}
/* Clear unknown autoclear feature bits */
if (!bs->read_only && !(flags & BDRV_O_INCOMING) && s->autoclear_features) {
s->autoclear_features = 0;
ret = qcow2_update_header(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not update qcow2 header");
goto fail;
}
}
/* Initialise locks */
qemu_co_mutex_init(&s->lock);
/* Repair image if dirty */
if (!(flags & (BDRV_O_CHECK | BDRV_O_INCOMING)) && !bs->read_only &&
(s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
BdrvCheckResult result = {0};
ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not repair dirty image");
goto fail;
}
}
/* Enable lazy_refcounts according to image and command line options */
opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
if (local_err) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
s->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
(s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
s->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
s->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
s->discard_passthrough[QCOW2_DISCARD_REQUEST] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
flags & BDRV_O_UNMAP);
s->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
s->discard_passthrough[QCOW2_DISCARD_OTHER] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
opt_overlap_check = qemu_opt_get(opts, "overlap-check") ?: "cached";
if (!strcmp(opt_overlap_check, "none")) {
overlap_check_template = 0;
} else if (!strcmp(opt_overlap_check, "constant")) {
overlap_check_template = QCOW2_OL_CONSTANT;
} else if (!strcmp(opt_overlap_check, "cached")) {
overlap_check_template = QCOW2_OL_CACHED;
} else if (!strcmp(opt_overlap_check, "all")) {
overlap_check_template = QCOW2_OL_ALL;
} else {
error_setg(errp, "Unsupported value '%s' for qcow2 option "
"'overlap-check'. Allowed are either of the following: "
"none, constant, cached, all", opt_overlap_check);
qemu_opts_del(opts);
ret = -EINVAL;
goto fail;
}
s->overlap_check = 0;
for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) {
/* overlap-check defines a template bitmask, but every flag may be
* overwritten through the associated boolean option */
s->overlap_check |=
qemu_opt_get_bool(opts, overlap_bool_option_names[i],
overlap_check_template & (1 << i)) << i;
}
qemu_opts_del(opts);
if (s->use_lazy_refcounts && s->qcow_version < 3) {
error_setg(errp, "Lazy refcounts require a qcow2 image with at least "
"qemu 1.1 compatibility level");
ret = -EINVAL;
goto fail;
}
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
qcow2_check_refcounts(bs, &result, 0);
}
#endif
return ret;
fail:
g_free(s->unknown_header_fields);
cleanup_unknown_header_ext(bs);
qcow2_free_snapshots(bs);
qcow2_refcount_close(bs);
g_free(s->l1_table);
/* else pre-write overlap checks in cache_destroy may crash */
s->l1_table = NULL;
if (s->l2_table_cache) {
qcow2_cache_destroy(bs, s->l2_table_cache);
}
if (s->refcount_block_cache) {
qcow2_cache_destroy(bs, s->refcount_block_cache);
}
g_free(s->cluster_cache);
qemu_vfree(s->cluster_data);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190'], 'message': 'qcow2: Check maximum L1 size in qcow2_snapshot_load_tmp() (CVE-2014-0143)
This avoids an unbounded allocation.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
bool exact_size)
{
BDRVQcowState *s = bs->opaque;
int new_l1_size2, ret, i;
uint64_t *new_l1_table;
int64_t old_l1_table_offset, old_l1_size;
int64_t new_l1_table_offset, new_l1_size;
uint8_t data[12];
if (min_size <= s->l1_size)
return 0;
if (exact_size) {
new_l1_size = min_size;
} else {
/* Bump size up to reduce the number of times we have to grow */
new_l1_size = s->l1_size;
if (new_l1_size == 0) {
new_l1_size = 1;
}
while (min_size > new_l1_size) {
new_l1_size = (new_l1_size * 3 + 1) / 2;
}
}
if (new_l1_size > INT_MAX) {
return -EFBIG;
}
#ifdef DEBUG_ALLOC2
fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
s->l1_size, new_l1_size);
#endif
new_l1_size2 = sizeof(uint64_t) * new_l1_size;
new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
/* write new table (align to cluster) */
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
if (new_l1_table_offset < 0) {
g_free(new_l1_table);
return new_l1_table_offset;
}
ret = qcow2_cache_flush(bs, s->refcount_block_cache);
if (ret < 0) {
goto fail;
}
/* the L1 position has not yet been updated, so these clusters must
* indeed be completely free */
ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
new_l1_size2);
if (ret < 0) {
goto fail;
}
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
for(i = 0; i < s->l1_size; i++)
new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
if (ret < 0)
goto fail;
for(i = 0; i < s->l1_size; i++)
new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
/* set new table */
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
cpu_to_be32w((uint32_t*)data, new_l1_size);
stq_be_p(data + 4, new_l1_table_offset);
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
if (ret < 0) {
goto fail;
}
g_free(s->l1_table);
old_l1_table_offset = s->l1_table_offset;
s->l1_table_offset = new_l1_table_offset;
s->l1_table = new_l1_table;
old_l1_size = s->l1_size;
s->l1_size = new_l1_size;
qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
QCOW2_DISCARD_OTHER);
return 0;
fail:
g_free(new_l1_table);
qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
QCOW2_DISCARD_OTHER);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190'], 'message': 'qcow2: Fix new L1 table size check (CVE-2014-0143)
The size in bytes is assigned to an int later, so check that instead of
the number of entries.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVBochsState *s = bs->opaque;
uint32_t i;
struct bochs_header bochs;
int ret;
bs->read_only = 1; // no write support yet
ret = bdrv_pread(bs->file, 0, &bochs, sizeof(bochs));
if (ret < 0) {
return ret;
}
if (strcmp(bochs.magic, HEADER_MAGIC) ||
strcmp(bochs.type, REDOLOG_TYPE) ||
strcmp(bochs.subtype, GROWING_TYPE) ||
((le32_to_cpu(bochs.version) != HEADER_VERSION) &&
(le32_to_cpu(bochs.version) != HEADER_V1))) {
error_setg(errp, "Image not in Bochs format");
return -EINVAL;
}
if (le32_to_cpu(bochs.version) == HEADER_V1) {
bs->total_sectors = le64_to_cpu(bochs.extra.redolog_v1.disk) / 512;
} else {
bs->total_sectors = le64_to_cpu(bochs.extra.redolog.disk) / 512;
}
s->catalog_size = le32_to_cpu(bochs.catalog);
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
ret = bdrv_pread(bs->file, le32_to_cpu(bochs.header), s->catalog_bitmap,
s->catalog_size * 4);
if (ret < 0) {
goto fail;
}
for (i = 0; i < s->catalog_size; i++)
le32_to_cpus(&s->catalog_bitmap[i]);
s->data_offset = le32_to_cpu(bochs.header) + (s->catalog_size * 4);
s->bitmap_blocks = 1 + (le32_to_cpu(bochs.bitmap) - 1) / 512;
s->extent_blocks = 1 + (le32_to_cpu(bochs.extent) - 1) / 512;
s->extent_size = le32_to_cpu(bochs.extent);
qemu_co_mutex_init(&s->lock);
return 0;
fail:
g_free(s->catalog_bitmap);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190'], 'message': 'bochs: Check catalog_size header field (CVE-2014-0143)
It should neither become negative nor allow unbounded memory
allocations. This fixes aborts in g_malloc() and an s->catalog_bitmap
buffer overflow on big endian hosts.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: cert_hostcheck(const char *match_pattern, const char *hostname)
{
if(!match_pattern || !*match_pattern ||
!hostname || !*hostname) /* sanity check */
return 0;
if(Curl_raw_equal(hostname, match_pattern)) /* trivial case */
return 1;
if(hostmatch(hostname,match_pattern) == HOST_MATCH)
return 1;
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-310'], 'message': 'use hostmatch function from latest curl (addresses CVE-2014-0139)'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int rds_ib_laddr_check(__be32 addr)
{
int ret;
struct rdma_cm_id *cm_id;
struct sockaddr_in sin;
/* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs.
*/
cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = addr;
/* rdma_bind_addr will only succeed for IB & iWARP devices */
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support iWARP devices unless we
check node_type. */
if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
&addr, ret,
cm_id->device ? cm_id->device->node_type : -1);
rdma_destroy_id(cm_id);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-399', 'CWE-476'], 'message': 'rds: prevent dereference of a NULL device
Binding might result in a NULL device, which is dereferenced
causing this BUG:
[ 1317.260548] BUG: unable to handle kernel NULL pointer dereference at 000000000000097
4
[ 1317.261847] IP: [<ffffffff84225f52>] rds_ib_laddr_check+0x82/0x110
[ 1317.263315] PGD 418bcb067 PUD 3ceb21067 PMD 0
[ 1317.263502] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC
[ 1317.264179] Dumping ftrace buffer:
[ 1317.264774] (ftrace buffer empty)
[ 1317.265220] Modules linked in:
[ 1317.265824] CPU: 4 PID: 836 Comm: trinity-child46 Tainted: G W 3.13.0-rc4-
next-20131218-sasha-00013-g2cebb9b-dirty #4159
[ 1317.267415] task: ffff8803ddf33000 ti: ffff8803cd31a000 task.ti: ffff8803cd31a000
[ 1317.268399] RIP: 0010:[<ffffffff84225f52>] [<ffffffff84225f52>] rds_ib_laddr_check+
0x82/0x110
[ 1317.269670] RSP: 0000:ffff8803cd31bdf8 EFLAGS: 00010246
[ 1317.270230] RAX: 0000000000000000 RBX: ffff88020b0dd388 RCX: 0000000000000000
[ 1317.270230] RDX: ffffffff8439822e RSI: 00000000000c000a RDI: 0000000000000286
[ 1317.270230] RBP: ffff8803cd31be38 R08: 0000000000000000 R09: 0000000000000000
[ 1317.270230] R10: 0000000000000000 R11: 0000000000000001 R12: 0000000000000000
[ 1317.270230] R13: 0000000054086700 R14: 0000000000a25de0 R15: 0000000000000031
[ 1317.270230] FS: 00007ff40251d700(0000) GS:ffff88022e200000(0000) knlGS:000000000000
0000
[ 1317.270230] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[ 1317.270230] CR2: 0000000000000974 CR3: 00000003cd478000 CR4: 00000000000006e0
[ 1317.270230] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 1317.270230] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000090602
[ 1317.270230] Stack:
[ 1317.270230] 0000000054086700 5408670000a25de0 5408670000000002 0000000000000000
[ 1317.270230] ffffffff84223542 00000000ea54c767 0000000000000000 ffffffff86d26160
[ 1317.270230] ffff8803cd31be68 ffffffff84223556 ffff8803cd31beb8 ffff8800c6765280
[ 1317.270230] Call Trace:
[ 1317.270230] [<ffffffff84223542>] ? rds_trans_get_preferred+0x42/0xa0
[ 1317.270230] [<ffffffff84223556>] rds_trans_get_preferred+0x56/0xa0
[ 1317.270230] [<ffffffff8421c9c3>] rds_bind+0x73/0xf0
[ 1317.270230] [<ffffffff83e4ce62>] SYSC_bind+0x92/0xf0
[ 1317.270230] [<ffffffff812493f8>] ? context_tracking_user_exit+0xb8/0x1d0
[ 1317.270230] [<ffffffff8119313d>] ? trace_hardirqs_on+0xd/0x10
[ 1317.270230] [<ffffffff8107a852>] ? syscall_trace_enter+0x32/0x290
[ 1317.270230] [<ffffffff83e4cece>] SyS_bind+0xe/0x10
[ 1317.270230] [<ffffffff843a6ad0>] tracesys+0xdd/0xe2
[ 1317.270230] Code: 00 8b 45 cc 48 8d 75 d0 48 c7 45 d8 00 00 00 00 66 c7 45 d0 02 00
89 45 d4 48 89 df e8 78 49 76 ff 41 89 c4 85 c0 75 0c 48 8b 03 <80> b8 74 09 00 00 01 7
4 06 41 bc 9d ff ff ff f6 05 2a b6 c2 02
[ 1317.270230] RIP [<ffffffff84225f52>] rds_ib_laddr_check+0x82/0x110
[ 1317.270230] RSP <ffff8803cd31bdf8>
[ 1317.270230] CR2: 0000000000000974
Signed-off-by: Sasha Levin <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
{
BDRVQcowState *s = bs->opaque;
QCowSnapshot *new_snapshot_list = NULL;
QCowSnapshot *old_snapshot_list = NULL;
QCowSnapshot sn1, *sn = &sn1;
int i, ret;
uint64_t *l1_table = NULL;
int64_t l1_table_offset;
memset(sn, 0, sizeof(*sn));
/* Generate an ID if it wasn't passed */
if (sn_info->id_str[0] == '\0') {
find_new_snapshot_id(bs, sn_info->id_str, sizeof(sn_info->id_str));
}
/* Check that the ID is unique */
if (find_snapshot_by_id_and_name(bs, sn_info->id_str, NULL) >= 0) {
return -EEXIST;
}
/* Populate sn with passed data */
sn->id_str = g_strdup(sn_info->id_str);
sn->name = g_strdup(sn_info->name);
sn->disk_size = bs->total_sectors * BDRV_SECTOR_SIZE;
sn->vm_state_size = sn_info->vm_state_size;
sn->date_sec = sn_info->date_sec;
sn->date_nsec = sn_info->date_nsec;
sn->vm_clock_nsec = sn_info->vm_clock_nsec;
/* Allocate the L1 table of the snapshot and copy the current one there. */
l1_table_offset = qcow2_alloc_clusters(bs, s->l1_size * sizeof(uint64_t));
if (l1_table_offset < 0) {
ret = l1_table_offset;
goto fail;
}
sn->l1_table_offset = l1_table_offset;
sn->l1_size = s->l1_size;
l1_table = g_malloc(s->l1_size * sizeof(uint64_t));
for(i = 0; i < s->l1_size; i++) {
l1_table[i] = cpu_to_be64(s->l1_table[i]);
}
ret = qcow2_pre_write_overlap_check(bs, 0, sn->l1_table_offset,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
goto fail;
}
ret = bdrv_pwrite(bs->file, sn->l1_table_offset, l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
goto fail;
}
g_free(l1_table);
l1_table = NULL;
/*
* Increase the refcounts of all clusters and make sure everything is
* stable on disk before updating the snapshot table to contain a pointer
* to the new L1 table.
*/
ret = qcow2_update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 1);
if (ret < 0) {
goto fail;
}
/* Append the new snapshot to the snapshot list */
new_snapshot_list = g_malloc((s->nb_snapshots + 1) * sizeof(QCowSnapshot));
if (s->snapshots) {
memcpy(new_snapshot_list, s->snapshots,
s->nb_snapshots * sizeof(QCowSnapshot));
old_snapshot_list = s->snapshots;
}
s->snapshots = new_snapshot_list;
s->snapshots[s->nb_snapshots++] = *sn;
ret = qcow2_write_snapshots(bs);
if (ret < 0) {
g_free(s->snapshots);
s->snapshots = old_snapshot_list;
s->nb_snapshots--;
goto fail;
}
g_free(old_snapshot_list);
/* The VM state isn't needed any more in the active L1 table; in fact, it
* hurts by causing expensive COW for the next snapshot. */
qcow2_discard_clusters(bs, qcow2_vm_state_offset(s),
align_offset(sn->vm_state_size, s->cluster_size)
>> BDRV_SECTOR_BITS,
QCOW2_DISCARD_NEVER);
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
qcow2_check_refcounts(bs, &result, 0);
}
#endif
return 0;
fail:
g_free(sn->id_str);
g_free(sn->name);
g_free(l1_table);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'qcow2: Validate snapshot table offset/size (CVE-2014-0144)
This avoid unbounded memory allocation and fixes a potential buffer
overflow on 32 bit hosts.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int qcow2_refcount_init(BlockDriverState *bs)
{
BDRVQcowState *s = bs->opaque;
int ret, refcount_table_size2, i;
refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
s->refcount_table = g_malloc(refcount_table_size2);
if (s->refcount_table_size > 0) {
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
ret = bdrv_pread(bs->file, s->refcount_table_offset,
s->refcount_table, refcount_table_size2);
if (ret != refcount_table_size2)
goto fail;
for(i = 0; i < s->refcount_table_size; i++)
be64_to_cpus(&s->refcount_table[i]);
}
return 0;
fail:
return -ENOMEM;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'qcow2: Check refcount table size (CVE-2014-0144)
Limit the in-memory reference count table size to 8 MB, it's enough in
practice. This fixes an unbounded allocation as well as a buffer
overflow in qcow2_refcount_init().
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVQcowState *s = bs->opaque;
int len, i, ret = 0;
QCowHeader header;
QemuOpts *opts;
Error *local_err = NULL;
uint64_t ext_end;
uint64_t l1_vm_state_index;
const char *opt_overlap_check;
int overlap_check_template = 0;
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read qcow2 header");
goto fail;
}
be32_to_cpus(&header.magic);
be32_to_cpus(&header.version);
be64_to_cpus(&header.backing_file_offset);
be32_to_cpus(&header.backing_file_size);
be64_to_cpus(&header.size);
be32_to_cpus(&header.cluster_bits);
be32_to_cpus(&header.crypt_method);
be64_to_cpus(&header.l1_table_offset);
be32_to_cpus(&header.l1_size);
be64_to_cpus(&header.refcount_table_offset);
be32_to_cpus(&header.refcount_table_clusters);
be64_to_cpus(&header.snapshots_offset);
be32_to_cpus(&header.nb_snapshots);
if (header.magic != QCOW_MAGIC) {
error_setg(errp, "Image is not in qcow2 format");
ret = -EINVAL;
goto fail;
}
if (header.version < 2 || header.version > 3) {
report_unsupported(bs, errp, "QCOW version %d", header.version);
ret = -ENOTSUP;
goto fail;
}
s->qcow_version = header.version;
/* Initialise cluster size */
if (header.cluster_bits < MIN_CLUSTER_BITS ||
header.cluster_bits > MAX_CLUSTER_BITS) {
error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);
ret = -EINVAL;
goto fail;
}
s->cluster_bits = header.cluster_bits;
s->cluster_size = 1 << s->cluster_bits;
s->cluster_sectors = 1 << (s->cluster_bits - 9);
/* Initialise version 3 header fields */
if (header.version == 2) {
header.incompatible_features = 0;
header.compatible_features = 0;
header.autoclear_features = 0;
header.refcount_order = 4;
header.header_length = 72;
} else {
be64_to_cpus(&header.incompatible_features);
be64_to_cpus(&header.compatible_features);
be64_to_cpus(&header.autoclear_features);
be32_to_cpus(&header.refcount_order);
be32_to_cpus(&header.header_length);
if (header.header_length < 104) {
error_setg(errp, "qcow2 header too short");
ret = -EINVAL;
goto fail;
}
}
if (header.header_length > s->cluster_size) {
error_setg(errp, "qcow2 header exceeds cluster size");
ret = -EINVAL;
goto fail;
}
if (header.header_length > sizeof(header)) {
s->unknown_header_fields_size = header.header_length - sizeof(header);
s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
s->unknown_header_fields_size);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
"fields");
goto fail;
}
}
if (header.backing_file_offset) {
ext_end = header.backing_file_offset;
} else {
ext_end = 1 << header.cluster_bits;
}
/* Handle feature bits */
s->incompatible_features = header.incompatible_features;
s->compatible_features = header.compatible_features;
s->autoclear_features = header.autoclear_features;
if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
void *feature_table = NULL;
qcow2_read_extensions(bs, header.header_length, ext_end,
&feature_table, NULL);
report_unsupported_feature(bs, errp, feature_table,
s->incompatible_features &
~QCOW2_INCOMPAT_MASK);
ret = -ENOTSUP;
g_free(feature_table);
goto fail;
}
if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
/* Corrupt images may not be written to unless they are being repaired
*/
if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
error_setg(errp, "qcow2: Image is corrupt; cannot be opened "
"read/write");
ret = -EACCES;
goto fail;
}
}
/* Check support for various header values */
if (header.refcount_order != 4) {
report_unsupported(bs, errp, "%d bit reference counts",
1 << header.refcount_order);
ret = -ENOTSUP;
goto fail;
}
s->refcount_order = header.refcount_order;
if (header.crypt_method > QCOW_CRYPT_AES) {
error_setg(errp, "Unsupported encryption method: %i",
header.crypt_method);
ret = -EINVAL;
goto fail;
}
s->crypt_method_header = header.crypt_method;
if (s->crypt_method_header) {
bs->encrypted = 1;
}
s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
s->l2_size = 1 << s->l2_bits;
bs->total_sectors = header.size / 512;
s->csize_shift = (62 - (s->cluster_bits - 8));
s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
s->refcount_table_offset = header.refcount_table_offset;
s->refcount_table_size =
header.refcount_table_clusters << (s->cluster_bits - 3);
s->snapshots_offset = header.snapshots_offset;
s->nb_snapshots = header.nb_snapshots;
/* read the level 1 table */
s->l1_size = header.l1_size;
l1_vm_state_index = size_to_l1(s, header.size);
if (l1_vm_state_index > INT_MAX) {
error_setg(errp, "Image is too big");
ret = -EFBIG;
goto fail;
}
s->l1_vm_state_index = l1_vm_state_index;
/* the L1 table must contain at least enough entries to put
header.size bytes */
if (s->l1_size < s->l1_vm_state_index) {
error_setg(errp, "L1 table is too small");
ret = -EINVAL;
goto fail;
}
s->l1_table_offset = header.l1_table_offset;
if (s->l1_size > 0) {
s->l1_table = g_malloc0(
align_offset(s->l1_size * sizeof(uint64_t), 512));
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read L1 table");
goto fail;
}
for(i = 0;i < s->l1_size; i++) {
be64_to_cpus(&s->l1_table[i]);
}
}
/* alloc L2 table/refcount block cache */
s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE);
s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE);
s->cluster_cache = g_malloc(s->cluster_size);
/* one more sector for decompressed data alignment */
s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
+ 512);
s->cluster_cache_offset = -1;
s->flags = flags;
ret = qcow2_refcount_init(bs);
if (ret != 0) {
error_setg_errno(errp, -ret, "Could not initialize refcount handling");
goto fail;
}
QLIST_INIT(&s->cluster_allocs);
QTAILQ_INIT(&s->discards);
/* read qcow2 extensions */
if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL,
&local_err)) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
/* read the backing file name */
if (header.backing_file_offset != 0) {
len = header.backing_file_size;
if (len > 1023) {
len = 1023;
}
ret = bdrv_pread(bs->file, header.backing_file_offset,
bs->backing_file, len);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read backing file name");
goto fail;
}
bs->backing_file[len] = '\0';
}
ret = qcow2_read_snapshots(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read snapshots");
goto fail;
}
/* Clear unknown autoclear feature bits */
if (!bs->read_only && !(flags & BDRV_O_INCOMING) && s->autoclear_features) {
s->autoclear_features = 0;
ret = qcow2_update_header(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not update qcow2 header");
goto fail;
}
}
/* Initialise locks */
qemu_co_mutex_init(&s->lock);
/* Repair image if dirty */
if (!(flags & (BDRV_O_CHECK | BDRV_O_INCOMING)) && !bs->read_only &&
(s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
BdrvCheckResult result = {0};
ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not repair dirty image");
goto fail;
}
}
/* Enable lazy_refcounts according to image and command line options */
opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
if (local_err) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
s->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
(s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
s->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
s->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
s->discard_passthrough[QCOW2_DISCARD_REQUEST] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
flags & BDRV_O_UNMAP);
s->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
s->discard_passthrough[QCOW2_DISCARD_OTHER] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
opt_overlap_check = qemu_opt_get(opts, "overlap-check") ?: "cached";
if (!strcmp(opt_overlap_check, "none")) {
overlap_check_template = 0;
} else if (!strcmp(opt_overlap_check, "constant")) {
overlap_check_template = QCOW2_OL_CONSTANT;
} else if (!strcmp(opt_overlap_check, "cached")) {
overlap_check_template = QCOW2_OL_CACHED;
} else if (!strcmp(opt_overlap_check, "all")) {
overlap_check_template = QCOW2_OL_ALL;
} else {
error_setg(errp, "Unsupported value '%s' for qcow2 option "
"'overlap-check'. Allowed are either of the following: "
"none, constant, cached, all", opt_overlap_check);
qemu_opts_del(opts);
ret = -EINVAL;
goto fail;
}
s->overlap_check = 0;
for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) {
/* overlap-check defines a template bitmask, but every flag may be
* overwritten through the associated boolean option */
s->overlap_check |=
qemu_opt_get_bool(opts, overlap_bool_option_names[i],
overlap_check_template & (1 << i)) << i;
}
qemu_opts_del(opts);
if (s->use_lazy_refcounts && s->qcow_version < 3) {
error_setg(errp, "Lazy refcounts require a qcow2 image with at least "
"qemu 1.1 compatibility level");
ret = -EINVAL;
goto fail;
}
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
qcow2_check_refcounts(bs, &result, 0);
}
#endif
return ret;
fail:
g_free(s->unknown_header_fields);
cleanup_unknown_header_ext(bs);
qcow2_free_snapshots(bs);
qcow2_refcount_close(bs);
g_free(s->l1_table);
/* else pre-write overlap checks in cache_destroy may crash */
s->l1_table = NULL;
if (s->l2_table_cache) {
qcow2_cache_destroy(bs, s->l2_table_cache);
}
if (s->refcount_block_cache) {
qcow2_cache_destroy(bs, s->refcount_block_cache);
}
g_free(s->cluster_cache);
qemu_vfree(s->cluster_data);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'qcow2: Check backing_file_offset (CVE-2014-0144)
Header, header extension and the backing file name must all be stored in
the first cluster. Setting the backing file to a much higher value
allowed header extensions to become much bigger than we want them to be
(unbounded allocation).
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
{
CURLState *s = ((CURLState*)opaque);
size_t realsize = size * nmemb;
int i;
DPRINTF("CURL: Just reading %zd bytes\n", realsize);
if (!s || !s->orig_buf)
goto read_end;
memcpy(s->orig_buf + s->buf_off, ptr, realsize);
s->buf_off += realsize;
for(i=0; i<CURL_NUM_ACB; i++) {
CURLAIOCB *acb = s->acb[i];
if (!acb)
continue;
if ((s->buf_off >= acb->end)) {
qemu_iovec_from_buf(acb->qiov, 0, s->orig_buf + acb->start,
acb->end - acb->start);
acb->common.cb(acb->common.opaque, 0);
qemu_aio_release(acb);
s->acb[i] = NULL;
}
}
read_end:
return realsize;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'curl: check data size before memcpy to local buffer. (CVE-2014-0144)
curl_read_cb is callback function for libcurl when data arrives. The
data size passed in here is not guaranteed to be within the range of
request we submitted, so we may overflow the guest IO buffer. Check the
real size we have before memcpy to buffer to avoid overflow.
Signed-off-by: Fam Zheng <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVVdiState *s = bs->opaque;
VdiHeader header;
size_t bmap_size;
int ret;
logout("\n");
ret = bdrv_read(bs->file, 0, (uint8_t *)&header, 1);
if (ret < 0) {
goto fail;
}
vdi_header_to_cpu(&header);
#if defined(CONFIG_VDI_DEBUG)
vdi_header_print(&header);
#endif
if (header.disk_size % SECTOR_SIZE != 0) {
/* 'VBoxManage convertfromraw' can create images with odd disk sizes.
We accept them but round the disk size to the next multiple of
SECTOR_SIZE. */
logout("odd disk size %" PRIu64 " B, round up\n", header.disk_size);
header.disk_size += SECTOR_SIZE - 1;
header.disk_size &= ~(SECTOR_SIZE - 1);
}
if (header.signature != VDI_SIGNATURE) {
error_setg(errp, "Image not in VDI format (bad signature %08x)", header.signature);
ret = -EINVAL;
goto fail;
} else if (header.version != VDI_VERSION_1_1) {
error_setg(errp, "unsupported VDI image (version %u.%u)",
header.version >> 16, header.version & 0xffff);
ret = -ENOTSUP;
goto fail;
} else if (header.offset_bmap % SECTOR_SIZE != 0) {
/* We only support block maps which start on a sector boundary. */
error_setg(errp, "unsupported VDI image (unaligned block map offset "
"0x%x)", header.offset_bmap);
ret = -ENOTSUP;
goto fail;
} else if (header.offset_data % SECTOR_SIZE != 0) {
/* We only support data blocks which start on a sector boundary. */
error_setg(errp, "unsupported VDI image (unaligned data offset 0x%x)",
header.offset_data);
ret = -ENOTSUP;
goto fail;
} else if (header.sector_size != SECTOR_SIZE) {
error_setg(errp, "unsupported VDI image (sector size %u is not %u)",
header.sector_size, SECTOR_SIZE);
ret = -ENOTSUP;
goto fail;
} else if (header.block_size != 1 * MiB) {
error_setg(errp, "unsupported VDI image (sector size %u is not %u)",
header.block_size, 1 * MiB);
ret = -ENOTSUP;
goto fail;
} else if (header.disk_size >
(uint64_t)header.blocks_in_image * header.block_size) {
error_setg(errp, "unsupported VDI image (disk size %" PRIu64 ", "
"image bitmap has room for %" PRIu64 ")",
header.disk_size,
(uint64_t)header.blocks_in_image * header.block_size);
ret = -ENOTSUP;
goto fail;
} else if (!uuid_is_null(header.uuid_link)) {
error_setg(errp, "unsupported VDI image (non-NULL link UUID)");
ret = -ENOTSUP;
goto fail;
} else if (!uuid_is_null(header.uuid_parent)) {
error_setg(errp, "unsupported VDI image (non-NULL parent UUID)");
ret = -ENOTSUP;
goto fail;
}
bs->total_sectors = header.disk_size / SECTOR_SIZE;
s->block_size = header.block_size;
s->block_sectors = header.block_size / SECTOR_SIZE;
s->bmap_sector = header.offset_bmap / SECTOR_SIZE;
s->header = header;
bmap_size = header.blocks_in_image * sizeof(uint32_t);
bmap_size = (bmap_size + SECTOR_SIZE - 1) / SECTOR_SIZE;
s->bmap = g_malloc(bmap_size * SECTOR_SIZE);
ret = bdrv_read(bs->file, s->bmap_sector, (uint8_t *)s->bmap, bmap_size);
if (ret < 0) {
goto fail_free_bmap;
}
/* Disable migration when vdi images are used */
error_set(&s->migration_blocker,
QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
"vdi", bs->device_name, "live migration");
migrate_add_blocker(s->migration_blocker);
return 0;
fail_free_bmap:
g_free(s->bmap);
fail:
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'vdi: add bounds checks for blocks_in_image and disk_size header fields (CVE-2014-0144)
The maximum blocks_in_image is 0xffffffff / 4, which also limits the
maximum disk_size for a VDI image to 1024TB. Note that this is the maximum
size that QEMU will currently support with this driver, not necessarily the
maximum size allowed by the image format.
This also fixes an incorrect error message, a bug introduced by commit
5b7aa9b56d1bfc79916262f380c3fc7961becb50 (Reported by Stefan Weil)
Signed-off-by: Jeff Cody <[email protected]>
Signed-off-by: Kevin Wolf <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVVPCState *s = bs->opaque;
int i;
VHDFooter *footer;
VHDDynDiskHeader *dyndisk_header;
uint8_t buf[HEADER_SIZE];
uint32_t checksum;
int disk_type = VHD_DYNAMIC;
int ret;
ret = bdrv_pread(bs->file, 0, s->footer_buf, HEADER_SIZE);
if (ret < 0) {
goto fail;
}
footer = (VHDFooter *) s->footer_buf;
if (strncmp(footer->creator, "conectix", 8)) {
int64_t offset = bdrv_getlength(bs->file);
if (offset < 0) {
ret = offset;
goto fail;
} else if (offset < HEADER_SIZE) {
ret = -EINVAL;
goto fail;
}
/* If a fixed disk, the footer is found only at the end of the file */
ret = bdrv_pread(bs->file, offset-HEADER_SIZE, s->footer_buf,
HEADER_SIZE);
if (ret < 0) {
goto fail;
}
if (strncmp(footer->creator, "conectix", 8)) {
error_setg(errp, "invalid VPC image");
ret = -EINVAL;
goto fail;
}
disk_type = VHD_FIXED;
}
checksum = be32_to_cpu(footer->checksum);
footer->checksum = 0;
if (vpc_checksum(s->footer_buf, HEADER_SIZE) != checksum)
fprintf(stderr, "block-vpc: The header checksum of '%s' is "
"incorrect.\n", bs->filename);
/* Write 'checksum' back to footer, or else will leave it with zero. */
footer->checksum = be32_to_cpu(checksum);
// The visible size of a image in Virtual PC depends on the geometry
// rather than on the size stored in the footer (the size in the footer
// is too large usually)
bs->total_sectors = (int64_t)
be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl;
/* images created with disk2vhd report a far higher virtual size
* than expected with the cyls * heads * sectors_per_cyl formula.
* use the footer->size instead if the image was created with
* disk2vhd.
*/
if (!strncmp(footer->creator_app, "d2v", 4)) {
bs->total_sectors = be64_to_cpu(footer->size) / BDRV_SECTOR_SIZE;
}
/* Allow a maximum disk size of approximately 2 TB */
if (bs->total_sectors >= 65535LL * 255 * 255) {
ret = -EFBIG;
goto fail;
}
if (disk_type == VHD_DYNAMIC) {
ret = bdrv_pread(bs->file, be64_to_cpu(footer->data_offset), buf,
HEADER_SIZE);
if (ret < 0) {
goto fail;
}
dyndisk_header = (VHDDynDiskHeader *) buf;
if (strncmp(dyndisk_header->magic, "cxsparse", 8)) {
ret = -EINVAL;
goto fail;
}
s->block_size = be32_to_cpu(dyndisk_header->block_size);
s->bitmap_size = ((s->block_size / (8 * 512)) + 511) & ~511;
s->max_table_entries = be32_to_cpu(dyndisk_header->max_table_entries);
s->pagetable = g_malloc(s->max_table_entries * 4);
s->bat_offset = be64_to_cpu(dyndisk_header->table_offset);
ret = bdrv_pread(bs->file, s->bat_offset, s->pagetable,
s->max_table_entries * 4);
if (ret < 0) {
goto fail;
}
s->free_data_block_offset =
(s->bat_offset + (s->max_table_entries * 4) + 511) & ~511;
for (i = 0; i < s->max_table_entries; i++) {
be32_to_cpus(&s->pagetable[i]);
if (s->pagetable[i] != 0xFFFFFFFF) {
int64_t next = (512 * (int64_t) s->pagetable[i]) +
s->bitmap_size + s->block_size;
if (next > s->free_data_block_offset) {
s->free_data_block_offset = next;
}
}
}
if (s->free_data_block_offset > bdrv_getlength(bs->file)) {
error_setg(errp, "block-vpc: free_data_block_offset points after "
"the end of file. The image has been truncated.");
ret = -EINVAL;
goto fail;
}
s->last_bitmap_offset = (int64_t) -1;
#ifdef CACHE
s->pageentry_u8 = g_malloc(512);
s->pageentry_u32 = s->pageentry_u8;
s->pageentry_u16 = s->pageentry_u8;
s->last_pagetable = -1;
#endif
}
qemu_co_mutex_init(&s->lock);
/* Disable migration when VHD images are used */
error_set(&s->migration_blocker,
QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
"vpc", bs->device_name, "live migration");
migrate_add_blocker(s->migration_blocker);
return 0;
fail:
g_free(s->pagetable);
#ifdef CACHE
g_free(s->pageentry_u8);
#endif
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'vpc/vhd: add bounds check for max_table_entries and block_size (CVE-2014-0144)
This adds checks to make sure that max_table_entries and block_size
are in sane ranges. Memory is allocated based on max_table_entries,
and block_size is used to calculate indices into that allocated
memory, so if these values are incorrect that can lead to potential
unbounded memory allocation, or invalid memory accesses.
Also, the allocation of the pagetable is changed from g_malloc0()
to qemu_blockalign().
Signed-off-by: Jeff Cody <[email protected]>
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVCloopState *s = bs->opaque;
uint32_t offsets_size, max_compressed_block_size = 1, i;
int ret;
bs->read_only = 1;
/* read header */
ret = bdrv_pread(bs->file, 128, &s->block_size, 4);
if (ret < 0) {
return ret;
}
s->block_size = be32_to_cpu(s->block_size);
if (s->block_size % 512) {
error_setg(errp, "block_size %u must be a multiple of 512",
s->block_size);
return -EINVAL;
}
if (s->block_size == 0) {
error_setg(errp, "block_size cannot be zero");
return -EINVAL;
}
/* cloop's create_compressed_fs.c warns about block sizes beyond 256 KB but
* we can accept more. Prevent ridiculous values like 4 GB - 1 since we
* need a buffer this big.
*/
if (s->block_size > MAX_BLOCK_SIZE) {
error_setg(errp, "block_size %u must be %u MB or less",
s->block_size,
MAX_BLOCK_SIZE / (1024 * 1024));
return -EINVAL;
}
ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4);
if (ret < 0) {
return ret;
}
s->n_blocks = be32_to_cpu(s->n_blocks);
/* read offsets */
if (s->n_blocks > UINT32_MAX / sizeof(uint64_t)) {
/* Prevent integer overflow */
error_setg(errp, "n_blocks %u must be %zu or less",
s->n_blocks,
UINT32_MAX / sizeof(uint64_t));
return -EINVAL;
}
offsets_size = s->n_blocks * sizeof(uint64_t);
s->offsets = g_malloc(offsets_size);
ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
if (ret < 0) {
goto fail;
}
for(i=0;i<s->n_blocks;i++) {
s->offsets[i] = be64_to_cpu(s->offsets[i]);
if (i > 0) {
uint32_t size = s->offsets[i] - s->offsets[i - 1];
if (size > max_compressed_block_size) {
max_compressed_block_size = size;
}
}
}
/* initialize zlib engine */
s->compressed_block = g_malloc(max_compressed_block_size + 1);
s->uncompressed_block = g_malloc(s->block_size);
if (inflateInit(&s->zstream) != Z_OK) {
ret = -EINVAL;
goto fail;
}
s->current_block = s->n_blocks;
s->sectors_per_block = s->block_size/512;
bs->total_sectors = s->n_blocks * s->sectors_per_block;
qemu_co_mutex_init(&s->lock);
return 0;
fail:
g_free(s->offsets);
g_free(s->compressed_block);
g_free(s->uncompressed_block);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'block/cloop: refuse images with huge offsets arrays (CVE-2014-0144)
Limit offsets_size to 512 MB so that:
1. g_malloc() does not abort due to an unreasonable size argument.
2. offsets_size does not overflow the bdrv_pread() int size argument.
This limit imposes a maximum image size of 16 TB at 256 KB block size.
Signed-off-by: Stefan Hajnoczi <[email protected]>
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static void xenvif_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
xenvif_carrier_off(vif);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-399'], 'message': 'xen-netback: disable rogue vif in kthread context
When netback discovers frontend is sending malformed packet it will
disables the interface which serves that frontend.
However disabling a network interface involving taking a mutex which
cannot be done in softirq context, so we need to defer this process to
kthread context.
This patch does the following:
1. introduce a flag to indicate the interface is disabled.
2. check that flag in TX path, don't do any work if it's true.
3. check that flag in RX path, turn off that interface if it's true.
The reason to disable it in RX path is because RX uses kthread. After
this change the behavior of netback is still consistent -- it won't do
any TX work for a rogue frontend, and the interface will be eventually
turned off.
Also change a "continue" to "break" after xenvif_fatal_tx_err, as it
doesn't make sense to continue processing packets if frontend is rogue.
This is a fix for XSA-90.
Reported-by: Török Edwin <[email protected]>
Signed-off-by: Wei Liu <[email protected]>
Cc: Ian Campbell <[email protected]>
Reviewed-by: David Vrabel <[email protected]>
Acked-by: Ian Campbell <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
{
struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
struct sk_buff *skb;
int ret;
while (skb_queue_len(&vif->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
u16 pending_idx;
RING_IDX idx;
int work_to_do;
unsigned int data_len;
pending_ring_idx_t index;
if (vif->tx.sring->req_prod - vif->tx.req_cons >
XEN_NETIF_TX_RING_SIZE) {
netdev_err(vif->dev,
"Impossible number of requests. "
"req_prod %d, req_cons %d, size %ld\n",
vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
xenvif_fatal_tx_err(vif);
continue;
}
work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
if (!work_to_do)
break;
idx = vif->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
/* Credit-based scheduling. */
if (txreq.size > vif->remaining_credit &&
tx_credit_exceeded(vif, txreq.size))
break;
vif->remaining_credit -= txreq.size;
work_to_do--;
vif->tx.req_cons = ++idx;
memset(extras, 0, sizeof(extras));
if (txreq.flags & XEN_NETTXF_extra_info) {
work_to_do = xenvif_get_extras(vif, extras,
work_to_do);
idx = vif->tx.req_cons;
if (unlikely(work_to_do < 0))
break;
}
ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
break;
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(vif->dev,
"Bad packet size: %d\n", txreq.size);
xenvif_tx_err(vif, &txreq, idx);
break;
}
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
netdev_err(vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
xenvif_fatal_tx_err(vif);
break;
}
index = pending_index(vif->pending_cons);
pending_idx = vif->pending_ring[index];
data_len = (txreq.size > PKT_PROT_LEN &&
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
PKT_PROT_LEN : txreq.size;
skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n");
xenvif_tx_err(vif, &txreq, idx);
break;
}
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
if (xenvif_set_skb_gso(vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
break;
}
}
xenvif_tx_create_gop(vif, pending_idx, &txreq, gop);
gop++;
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
}
vif->pending_cons++;
request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
if (request_gop == NULL) {
kfree_skb(skb);
xenvif_tx_err(vif, &txreq, idx);
break;
}
gop = request_gop;
__skb_queue_tail(&vif->tx_queue, skb);
vif->tx.req_cons = idx;
if ((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops))
break;
}
return gop - vif->tx_map_ops;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-399'], 'message': 'xen-netback: disable rogue vif in kthread context
When netback discovers frontend is sending malformed packet it will
disables the interface which serves that frontend.
However disabling a network interface involving taking a mutex which
cannot be done in softirq context, so we need to defer this process to
kthread context.
This patch does the following:
1. introduce a flag to indicate the interface is disabled.
2. check that flag in TX path, don't do any work if it's true.
3. check that flag in RX path, turn off that interface if it's true.
The reason to disable it in RX path is because RX uses kthread. After
this change the behavior of netback is still consistent -- it won't do
any TX work for a rogue frontend, and the interface will be eventually
turned off.
Also change a "continue" to "break" after xenvif_fatal_tx_err, as it
doesn't make sense to continue processing packets if frontend is rogue.
This is a fix for XSA-90.
Reported-by: Török Edwin <[email protected]>
Signed-off-by: Wei Liu <[email protected]>
Cc: Ian Campbell <[email protected]>
Reviewed-by: David Vrabel <[email protected]>
Acked-by: Ian Campbell <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
unsigned int handle)
{
int err;
struct net_device *dev;
struct xenvif *vif;
char name[IFNAMSIZ] = {};
int i;
snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
if (dev == NULL) {
pr_warn("Could not allocate netdev for %s\n", name);
return ERR_PTR(-ENOMEM);
}
SET_NETDEV_DEV(dev, parent);
vif = netdev_priv(dev);
vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
MAX_GRANT_COPY_OPS);
if (vif->grant_copy_op == NULL) {
pr_warn("Could not allocate grant copy space for %s\n", name);
free_netdev(dev);
return ERR_PTR(-ENOMEM);
}
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
vif->ip_csum = 1;
vif->dev = dev;
vif->credit_bytes = vif->remaining_credit = ~0UL;
vif->credit_usec = 0UL;
init_timer(&vif->credit_timeout);
vif->credit_window_start = get_jiffies_64();
init_timer(&vif->wake_queue);
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
dev->features = dev->hw_features | NETIF_F_RXCSUM;
SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
skb_queue_head_init(&vif->rx_queue);
skb_queue_head_init(&vif->tx_queue);
vif->pending_cons = 0;
vif->pending_prod = MAX_PENDING_REQS;
for (i = 0; i < MAX_PENDING_REQS; i++)
vif->pending_ring[i] = i;
spin_lock_init(&vif->callback_lock);
spin_lock_init(&vif->response_lock);
/* If ballooning is disabled, this will consume real memory, so you
* better enable it. The long term solution would be to use just a
* bunch of valid page descriptors, without dependency on ballooning
*/
err = alloc_xenballooned_pages(MAX_PENDING_REQS,
vif->mmap_pages,
false);
if (err) {
netdev_err(dev, "Could not reserve mmap_pages\n");
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < MAX_PENDING_REQS; i++) {
vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
{ .callback = xenvif_zerocopy_callback,
.ctx = NULL,
.desc = i };
vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
}
/*
* Initialise a dummy MAC address. We choose the numerically
* largest non-broadcast address to prevent the address getting
* stolen by an Ethernet bridge for STP purposes.
* (FE:FF:FF:FF:FF:FF)
*/
memset(dev->dev_addr, 0xFF, ETH_ALEN);
dev->dev_addr[0] &= ~0x01;
netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
netif_carrier_off(dev);
err = register_netdev(dev);
if (err) {
netdev_warn(dev, "Could not register device: err=%d\n", err);
free_netdev(dev);
return ERR_PTR(err);
}
netdev_dbg(dev, "Successfully created xenvif\n");
__module_get(THIS_MODULE);
return vif;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-399'], 'message': 'xen-netback: disable rogue vif in kthread context
When netback discovers frontend is sending malformed packet it will
disables the interface which serves that frontend.
However disabling a network interface involving taking a mutex which
cannot be done in softirq context, so we need to defer this process to
kthread context.
This patch does the following:
1. introduce a flag to indicate the interface is disabled.
2. check that flag in TX path, don't do any work if it's true.
3. check that flag in RX path, turn off that interface if it's true.
The reason to disable it in RX path is because RX uses kthread. After
this change the behavior of netback is still consistent -- it won't do
any TX work for a rogue frontend, and the interface will be eventually
turned off.
Also change a "continue" to "break" after xenvif_fatal_tx_err, as it
doesn't make sense to continue processing packets if frontend is rogue.
This is a fix for XSA-90.
Reported-by: Török Edwin <[email protected]>
Signed-off-by: Wei Liu <[email protected]>
Cc: Ian Campbell <[email protected]>
Reviewed-by: David Vrabel <[email protected]>
Acked-by: Ian Campbell <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: get_ruser(pam_handle_t *pamh, char *ruserbuf, size_t ruserbuflen)
{
const void *ruser;
struct passwd *pwd;
if (ruserbuf == NULL || ruserbuflen < 1)
return -2;
/* Get the name of the source user. */
if (pam_get_item(pamh, PAM_RUSER, &ruser) != PAM_SUCCESS) {
ruser = NULL;
}
if ((ruser == NULL) || (strlen(ruser) == 0)) {
/* Barring that, use the current RUID. */
pwd = pam_modutil_getpwuid(pamh, getuid());
if (pwd != NULL) {
ruser = pwd->pw_name;
}
}
if (ruser == NULL || strlen(ruser) >= ruserbuflen) {
*ruserbuf = '\0';
return -1;
}
strcpy(ruserbuf, ruser);
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-22'], 'message': 'pam_timestamp: fix potential directory traversal issue (ticket #27)
pam_timestamp uses values of PAM_RUSER and PAM_TTY as components of
the timestamp pathname it creates, so extra care should be taken to
avoid potential directory traversal issues.
* modules/pam_timestamp/pam_timestamp.c (check_tty): Treat
"." and ".." tty values as invalid.
(get_ruser): Treat "." and ".." ruser values, as well as any ruser
value containing '/', as invalid.
Fixes CVE-2014-2583.
Reported-by: Sebastian Krahmer <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
{
union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
struct kvm_lapic_irq irqe;
int ret;
if (entry->fields.mask)
return -1;
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
"vector=%x trig_mode=%x\n",
entry->fields.dest_id, entry->fields.dest_mode,
entry->fields.delivery_mode, entry->fields.vector,
entry->fields.trig_mode);
irqe.dest_id = entry->fields.dest_id;
irqe.vector = entry->fields.vector;
irqe.dest_mode = entry->fields.dest_mode;
irqe.trig_mode = entry->fields.trig_mode;
irqe.delivery_mode = entry->fields.delivery_mode << 8;
irqe.level = 1;
irqe.shorthand = 0;
if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
ioapic->irr &= ~(1 << irq);
if (irq == RTC_GSI && line_status) {
BUG_ON(ioapic->rtc_status.pending_eoi != 0);
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
ioapic->rtc_status.dest_map);
ioapic->rtc_status.pending_eoi = ret;
} else
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
entry->fields.remote_irr = 1;
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'KVM: ioapic: fix assignment of ioapic->rtc_status.pending_eoi (CVE-2014-0155)
QE reported that they got the BUG_ON in ioapic_service to trigger.
I cannot reproduce it, but there are two reasons why this could happen.
The less likely but also easiest one, is when kvm_irq_delivery_to_apic
does not deliver to any APIC and returns -1.
Because irqe.shorthand == 0, the kvm_for_each_vcpu loop in that
function is never reached. However, you can target the similar loop in
kvm_irq_delivery_to_apic_fast; just program a zero logical destination
address into the IOAPIC, or an out-of-range physical destination address.
Signed-off-by: Paolo Bonzini <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: apr_status_t modsecurity_tx_init(modsec_rec *msr) {
const char *s = NULL;
const apr_array_header_t *arr;
char *semicolon = NULL;
char *comma = NULL;
apr_table_entry_t *te;
int i;
/* Register TX cleanup */
apr_pool_cleanup_register(msr->mp, msr, modsecurity_tx_cleanup, apr_pool_cleanup_null);
/* Initialise C-L */
msr->request_content_length = -1;
s = apr_table_get(msr->request_headers, "Content-Length");
if (s != NULL) {
msr->request_content_length = strtol(s, NULL, 10);
}
/* Figure out whether this request has a body */
msr->reqbody_chunked = 0;
msr->reqbody_should_exist = 0;
if (msr->request_content_length == -1) {
/* There's no C-L, but is chunked encoding used? */
char *transfer_encoding = (char *)apr_table_get(msr->request_headers, "Transfer-Encoding");
if ((transfer_encoding != NULL)&&(strstr(transfer_encoding, "chunked") != NULL)) {
msr->reqbody_should_exist = 1;
msr->reqbody_chunked = 1;
}
} else {
/* C-L found */
msr->reqbody_should_exist = 1;
}
/* Initialise C-T */
msr->request_content_type = NULL;
s = apr_table_get(msr->request_headers, "Content-Type");
if (s != NULL) msr->request_content_type = s;
/* Decide what to do with the request body. */
if ((msr->request_content_type != NULL)
&& (strncasecmp(msr->request_content_type, "application/x-www-form-urlencoded", 33) == 0))
{
/* Always place POST requests with
* "application/x-www-form-urlencoded" payloads in memory.
*/
msr->msc_reqbody_storage = MSC_REQBODY_MEMORY;
msr->msc_reqbody_spilltodisk = 0;
msr->msc_reqbody_processor = "URLENCODED";
} else {
/* If the C-L is known and there's more data than
* our limit go to disk straight away.
*/
if ((msr->request_content_length != -1)
&& (msr->request_content_length > msr->txcfg->reqbody_inmemory_limit))
{
msr->msc_reqbody_storage = MSC_REQBODY_DISK;
}
/* In all other cases, try using the memory first
* but switch over to disk for larger bodies.
*/
msr->msc_reqbody_storage = MSC_REQBODY_MEMORY;
msr->msc_reqbody_spilltodisk = 1;
if (msr->request_content_type != NULL) {
if (strncasecmp(msr->request_content_type, "multipart/form-data", 19) == 0) {
msr->msc_reqbody_processor = "MULTIPART";
}
}
}
/* Check if we are forcing buffering, then use memory only. */
if (msr->txcfg->reqbody_buffering != REQUEST_BODY_FORCEBUF_OFF) {
msr->msc_reqbody_storage = MSC_REQBODY_MEMORY;
msr->msc_reqbody_spilltodisk = 0;
}
/* Initialise arguments */
msr->arguments = apr_table_make(msr->mp, 32);
if (msr->arguments == NULL) return -1;
if (msr->query_string != NULL) {
int invalid_count = 0;
if (parse_arguments(msr, msr->query_string, strlen(msr->query_string),
msr->txcfg->argument_separator, "QUERY_STRING", msr->arguments,
&invalid_count) < 0)
{
msr_log(msr, 1, "Initialisation: Error occurred while parsing QUERY_STRING arguments.");
return -1;
}
if (invalid_count) {
msr->urlencoded_error = 1;
}
}
msr->arguments_to_sanitize = apr_table_make(msr->mp, 16);
if (msr->arguments_to_sanitize == NULL) return -1;
msr->request_headers_to_sanitize = apr_table_make(msr->mp, 16);
if (msr->request_headers_to_sanitize == NULL) return -1;
msr->response_headers_to_sanitize = apr_table_make(msr->mp, 16);
if (msr->response_headers_to_sanitize == NULL) return -1;
msr->pattern_to_sanitize = apr_table_make(msr->mp, 32);
if (msr->pattern_to_sanitize == NULL) return -1;
/* remove targets */
msr->removed_targets = apr_table_make(msr->mp, 16);
if (msr->removed_targets == NULL) return -1;
/* Initialise cookies */
msr->request_cookies = apr_table_make(msr->mp, 16);
if (msr->request_cookies == NULL) return -1;
/* Initialize matched vars */
msr->matched_vars = apr_table_make(msr->mp, 8);
if (msr->matched_vars == NULL) return -1;
apr_table_clear(msr->matched_vars);
msr->perf_rules = apr_table_make(msr->mp, 8);
if (msr->perf_rules == NULL) return -1;
apr_table_clear(msr->perf_rules);
/* Locate the cookie headers and parse them */
arr = apr_table_elts(msr->request_headers);
te = (apr_table_entry_t *)arr->elts;
for (i = 0; i < arr->nelts; i++) {
if (strcasecmp(te[i].key, "Cookie") == 0) {
if (msr->txcfg->cookie_format == COOKIES_V0) {
semicolon = apr_pstrdup(msr->mp, te[i].val);
while((*semicolon != 0)&&(*semicolon != ';')) semicolon++;
if(*semicolon == ';') {
parse_cookies_v0(msr, te[i].val, msr->request_cookies, ";");
} else {
comma = apr_pstrdup(msr->mp, te[i].val);
while((*comma != 0)&&(*comma != ',')) comma++;
if(*comma == ',') {
comma++;
if(*comma == 0x20) {// looks like comma is the separator
if (msr->txcfg->debuglog_level >= 5) {
msr_log(msr, 5, "Cookie v0 parser: Using comma as a separator. Semi-colon was not identified!");
}
parse_cookies_v0(msr, te[i].val, msr->request_cookies, ",");
} else {
parse_cookies_v0(msr, te[i].val, msr->request_cookies, ";");
}
} else {
parse_cookies_v0(msr, te[i].val, msr->request_cookies, ";");
}
}
} else {
parse_cookies_v1(msr, te[i].val, msr->request_cookies);
}
}
}
/* Collections. */
msr->tx_vars = apr_table_make(msr->mp, 32);
if (msr->tx_vars == NULL) return -1;
msr->geo_vars = apr_table_make(msr->mp, 8);
if (msr->geo_vars == NULL) return -1;
msr->collections_original = apr_table_make(msr->mp, 8);
if (msr->collections_original == NULL) return -1;
msr->collections = apr_table_make(msr->mp, 8);
if (msr->collections == NULL) return -1;
msr->collections_dirty = apr_table_make(msr->mp, 8);
if (msr->collections_dirty == NULL) return -1;
/* Other */
msr->tcache = NULL;
msr->tcache_items = 0;
msr->matched_rules = apr_array_make(msr->mp, 16, sizeof(void *));
if (msr->matched_rules == NULL) return -1;
msr->matched_var = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string));
if (msr->matched_var == NULL) return -1;
msr->highest_severity = 255; /* high, invalid value */
msr->removed_rules = apr_array_make(msr->mp, 16, sizeof(char *));
if (msr->removed_rules == NULL) return -1;
msr->removed_rules_tag = apr_array_make(msr->mp, 16, sizeof(char *));
if (msr->removed_rules_tag == NULL) return -1;
msr->removed_rules_msg = apr_array_make(msr->mp, 16, sizeof(char *));
if (msr->removed_rules_msg == NULL) return -1;
return 1;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-703', 'CWE-264'], 'message': 'Fix Chunked string case sensitive issue - CVE-2013-5705'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
struct ath_node *an)
{
struct ath_atx_tid *tid;
struct ath_atx_ac *ac;
struct ath_txq *txq;
bool buffered;
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
if (!tid->sched)
continue;
ac = tid->ac;
txq = ac->txq;
ath_txq_lock(sc, txq);
buffered = ath_tid_has_buffered(tid);
tid->sched = false;
list_del(&tid->list);
if (ac->sched) {
ac->sched = false;
list_del(&ac->list);
}
ath_txq_unlock(sc, txq);
ieee80211_sta_set_buffered(sta, tidno, buffered);
}
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-362', 'CWE-241'], 'message': 'ath9k: protect tid->sched check
We check tid->sched without a lock taken on ath_tx_aggr_sleep(). That
is race condition which can result of doing list_del(&tid->list) twice
(second time with poisoned list node) and cause crash like shown below:
[424271.637220] BUG: unable to handle kernel paging request at 00100104
[424271.637328] IP: [<f90fc072>] ath_tx_aggr_sleep+0x62/0xe0 [ath9k]
...
[424271.639953] Call Trace:
[424271.639998] [<f90f6900>] ? ath9k_get_survey+0x110/0x110 [ath9k]
[424271.640083] [<f90f6942>] ath9k_sta_notify+0x42/0x50 [ath9k]
[424271.640177] [<f809cfef>] sta_ps_start+0x8f/0x1c0 [mac80211]
[424271.640258] [<c10f730e>] ? free_compound_page+0x2e/0x40
[424271.640346] [<f809e915>] ieee80211_rx_handlers+0x9d5/0x2340 [mac80211]
[424271.640437] [<c112f048>] ? kmem_cache_free+0x1d8/0x1f0
[424271.640510] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640578] [<c10fc23c>] ? put_page+0x2c/0x40
[424271.640640] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640706] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640787] [<f809dde3>] ? ieee80211_rx_handlers_result+0x73/0x1d0 [mac80211]
[424271.640897] [<f80a07a0>] ieee80211_prepare_and_rx_handle+0x520/0xad0 [mac80211]
[424271.641009] [<f809e22d>] ? ieee80211_rx_handlers+0x2ed/0x2340 [mac80211]
[424271.641104] [<c13846ce>] ? ip_output+0x7e/0xd0
[424271.641182] [<f80a1057>] ieee80211_rx+0x307/0x7c0 [mac80211]
[424271.641266] [<f90fa6ee>] ath_rx_tasklet+0x88e/0xf70 [ath9k]
[424271.641358] [<f80a0f2c>] ? ieee80211_rx+0x1dc/0x7c0 [mac80211]
[424271.641445] [<f90f82db>] ath9k_tasklet+0xcb/0x130 [ath9k]
Bug report:
https://bugzilla.kernel.org/show_bug.cgi?id=70551
Reported-and-tested-by: Max Sydorenko <[email protected]>
Cc: [email protected]
Signed-off-by: Stanislaw Gruszka <[email protected]>
Signed-off-by: John W. Linville <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int rds_iw_laddr_check(__be32 addr)
{
int ret;
struct rdma_cm_id *cm_id;
struct sockaddr_in sin;
/* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs.
*/
cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = addr;
/* rdma_bind_addr will only succeed for IB & iWARP devices */
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support IB devices unless we
check node_type. */
if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
&addr, ret,
cm_id->device ? cm_id->device->node_type : -1);
rdma_destroy_id(cm_id);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476'], 'message': 'rds: prevent dereference of a NULL device in rds_iw_laddr_check
Binding might result in a NULL device which is later dereferenced
without checking.
Signed-off-by: Sasha Levin <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static const char *check_secret(int module, const char *user, const char *group,
const char *challenge, const char *pass)
{
char line[1024];
char pass2[MAX_DIGEST_LEN*2];
const char *fname = lp_secrets_file(module);
STRUCT_STAT st;
int fd, ok = 1;
int user_len = strlen(user);
int group_len = group ? strlen(group) : 0;
char *err;
if (!fname || !*fname || (fd = open(fname, O_RDONLY)) < 0)
return "no secrets file";
if (do_fstat(fd, &st) == -1) {
rsyserr(FLOG, errno, "fstat(%s)", fname);
ok = 0;
} else if (lp_strict_modes(module)) {
if ((st.st_mode & 06) != 0) {
rprintf(FLOG, "secrets file must not be other-accessible (see strict modes option)\n");
ok = 0;
} else if (MY_UID() == 0 && st.st_uid != 0) {
rprintf(FLOG, "secrets file must be owned by root when running as root (see strict modes)\n");
ok = 0;
}
}
if (!ok) {
close(fd);
return "ignoring secrets file";
}
if (*user == '#') {
/* Reject attempt to match a comment. */
close(fd);
return "invalid username";
}
/* Try to find a line that starts with the user (or @group) name and a ':'. */
err = "secret not found";
while ((user || group) && read_line_old(fd, line, sizeof line, 1)) {
const char **ptr, *s;
int len;
if (*line == '@') {
ptr = &group;
len = group_len;
s = line+1;
} else {
ptr = &user;
len = user_len;
s = line;
}
if (!*ptr || strncmp(s, *ptr, len) != 0 || s[len] != ':')
continue;
generate_hash(s+len+1, challenge, pass2);
if (strcmp(pass, pass2) == 0) {
err = NULL;
break;
}
err = "password mismatch";
*ptr = NULL; /* Don't look for name again. */
}
close(fd);
memset(line, 0, sizeof line);
memset(pass2, 0, sizeof pass2);
return err;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'Avoid infinite wait reading secrets file.'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int virtio_load(VirtIODevice *vdev, QEMUFile *f)
{
int i, ret;
uint32_t num;
uint32_t features;
uint32_t supported_features;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
if (k->load_config) {
ret = k->load_config(qbus->parent, f);
if (ret)
return ret;
}
qemu_get_8s(f, &vdev->status);
qemu_get_8s(f, &vdev->isr);
qemu_get_be16s(f, &vdev->queue_sel);
if (vdev->queue_sel >= VIRTIO_PCI_QUEUE_MAX) {
return -1;
}
qemu_get_be32s(f, &features);
if (virtio_set_features(vdev, features) < 0) {
supported_features = k->get_features(qbus->parent);
error_report("Features 0x%x unsupported. Allowed features: 0x%x",
features, supported_features);
return -1;
}
vdev->config_len = qemu_get_be32(f);
qemu_get_buffer(f, vdev->config, vdev->config_len);
num = qemu_get_be32(f);
if (num > VIRTIO_PCI_QUEUE_MAX) {
error_report("Invalid number of PCI queues: 0x%x", num);
return -1;
}
for (i = 0; i < num; i++) {
vdev->vq[i].vring.num = qemu_get_be32(f);
if (k->has_variable_vring_alignment) {
vdev->vq[i].vring.align = qemu_get_be32(f);
}
vdev->vq[i].pa = qemu_get_be64(f);
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
if (vdev->vq[i].pa) {
uint16_t nheads;
virtqueue_init(&vdev->vq[i]);
nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
/* Check it isn't doing very strange things with descriptor numbers. */
if (nheads > vdev->vq[i].vring.num) {
error_report("VQ %d size 0x%x Guest index 0x%x "
"inconsistent with Host index 0x%x: delta 0x%x",
i, vdev->vq[i].vring.num,
vring_avail_idx(&vdev->vq[i]),
vdev->vq[i].last_avail_idx, nheads);
return -1;
}
} else if (vdev->vq[i].last_avail_idx) {
error_report("VQ %d address 0x0 "
"inconsistent with Host index 0x%x",
i, vdev->vq[i].last_avail_idx);
return -1;
}
if (k->load_queue) {
ret = k->load_queue(qbus->parent, i, f);
if (ret)
return ret;
}
}
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'virtio: validate config_len on load
Malformed input can have config_len in migration stream
exceed the array size allocated on destination, the
result will be heap overflow.
To fix, that config_len matches on both sides.
CVE-2014-0182
Reported-by: "Dr. David Alan Gilbert" <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Juan Quintela <[email protected]>
--
v2: use %ix and %zx to print config_len values
Signed-off-by: Juan Quintela <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
const unsigned char *b = buf;
DECLARE_WAITQUEUE(wait, current);
int c;
ssize_t retval = 0;
/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
retval = tty_check_change(tty);
if (retval)
return retval;
}
down_read(&tty->termios_rwsem);
/* Write out any echoed characters that are still pending */
process_echoes(tty);
add_wait_queue(&tty->write_wait, &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) {
retval = -EIO;
break;
}
if (O_OPOST(tty)) {
while (nr > 0) {
ssize_t num = process_output_block(tty, b, nr);
if (num < 0) {
if (num == -EAGAIN)
break;
retval = num;
goto break_out;
}
b += num;
nr -= num;
if (nr == 0)
break;
c = *b;
if (process_output(c, tty) < 0)
break;
b++; nr--;
}
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
} else {
while (nr > 0) {
c = tty->ops->write(tty, b, nr);
if (c < 0) {
retval = c;
goto break_out;
}
if (!c)
break;
b += c;
nr -= c;
}
}
if (!nr)
break;
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
break;
}
up_read(&tty->termios_rwsem);
schedule();
down_read(&tty->termios_rwsem);
}
break_out:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
if (b - buf != nr && tty->fasync)
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
up_read(&tty->termios_rwsem);
return (b - buf) ? b - buf : retval;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-362'], 'message': 'n_tty: Fix n_tty_write crash when echoing in raw mode
The tty atomic_write_lock does not provide an exclusion guarantee for
the tty driver if the termios settings are LECHO & !OPOST. And since
it is unexpected and not allowed to call TTY buffer helpers like
tty_insert_flip_string concurrently, this may lead to crashes when
concurrect writers call pty_write. In that case the following two
writers:
* the ECHOing from a workqueue and
* pty_write from the process
race and can overflow the corresponding TTY buffer like follows.
If we look into tty_insert_flip_string_fixed_flag, there is:
int space = __tty_buffer_request_room(port, goal, flags);
struct tty_buffer *tb = port->buf.tail;
...
memcpy(char_buf_ptr(tb, tb->used), chars, space);
...
tb->used += space;
so the race of the two can result in something like this:
A B
__tty_buffer_request_room
__tty_buffer_request_room
memcpy(buf(tb->used), ...)
tb->used += space;
memcpy(buf(tb->used), ...) ->BOOM
B's memcpy is past the tty_buffer due to the previous A's tb->used
increment.
Since the N_TTY line discipline input processing can output
concurrently with a tty write, obtain the N_TTY ldisc output_lock to
serialize echo output with normal tty writes. This ensures the tty
buffer helper tty_insert_flip_string is not called concurrently and
everything is fine.
Note that this is nicely reproducible by an ordinary user using
forkpty and some setup around that (raw termios + ECHO). And it is
present in kernels at least after commit
d945cb9cce20ac7143c2de8d88b187f62db99bdc (pty: Rework the pty layer to
use the normal buffering logic) in 2.6.31-rc3.
js: add more info to the commit log
js: switch to bool
js: lock unconditionally
js: lock only the tty->ops->write call
References: CVE-2014-0196
Reported-and-tested-by: Jiri Slaby <[email protected]>
Signed-off-by: Peter Hurley <[email protected]>
Signed-off-by: Jiri Slaby <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Alan Cox <[email protected]>
Cc: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct rtcanmsg *r;
struct cgw_job *gwj;
u8 limhops = 0;
int err = 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (nlmsg_len(nlh) < sizeof(*r))
return -EINVAL;
r = nlmsg_data(nlh);
if (r->can_family != AF_CAN)
return -EPFNOSUPPORT;
/* so far we only support CAN -> CAN routings */
if (r->gwtype != CGW_TYPE_CAN_CAN)
return -EINVAL;
gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
if (!gwj)
return -ENOMEM;
gwj->handled_frames = 0;
gwj->dropped_frames = 0;
gwj->deleted_frames = 0;
gwj->flags = r->flags;
gwj->gwtype = r->gwtype;
err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw,
&limhops);
if (err < 0)
goto out;
err = -ENODEV;
/* ifindex == 0 is not allowed for job creation */
if (!gwj->ccgw.src_idx || !gwj->ccgw.dst_idx)
goto out;
gwj->src.dev = __dev_get_by_index(&init_net, gwj->ccgw.src_idx);
if (!gwj->src.dev)
goto out;
if (gwj->src.dev->type != ARPHRD_CAN)
goto out;
gwj->dst.dev = __dev_get_by_index(&init_net, gwj->ccgw.dst_idx);
if (!gwj->dst.dev)
goto out;
if (gwj->dst.dev->type != ARPHRD_CAN)
goto out;
gwj->limit_hops = limhops;
ASSERT_RTNL();
err = cgw_register_filter(gwj);
if (!err)
hlist_add_head_rcu(&gwj->list, &cgw_list);
out:
if (err)
kmem_cache_free(cgw_cache, gwj);
return err;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct cgw_job *gwj = NULL;
struct hlist_node *nx;
struct rtcanmsg *r;
struct cf_mod mod;
struct can_can_gw ccgw;
u8 limhops = 0;
int err = 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (nlmsg_len(nlh) < sizeof(*r))
return -EINVAL;
r = nlmsg_data(nlh);
if (r->can_family != AF_CAN)
return -EPFNOSUPPORT;
/* so far we only support CAN -> CAN routings */
if (r->gwtype != CGW_TYPE_CAN_CAN)
return -EINVAL;
err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
if (err < 0)
return err;
/* two interface indices both set to 0 => remove all entries */
if (!ccgw.src_idx && !ccgw.dst_idx) {
cgw_remove_all_jobs();
return 0;
}
err = -EINVAL;
ASSERT_RTNL();
/* remove only the first matching entry */
hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
if (gwj->flags != r->flags)
continue;
if (gwj->limit_hops != limhops)
continue;
if (memcmp(&gwj->mod, &mod, sizeof(mod)))
continue;
/* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
continue;
hlist_del(&gwj->list);
cgw_unregister_filter(gwj);
kmem_cache_free(cgw_cache, gwj);
err = 0;
break;
}
return err;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int num = 0, s_num = cb->args[0];
struct packet_diag_req *req;
struct net *net;
struct sock *sk;
bool may_report_filterinfo;
net = sock_net(skb->sk);
req = nlmsg_data(cb->nlh);
may_report_filterinfo = ns_capable(net->user_ns, CAP_NET_ADMIN);
mutex_lock(&net->packet.sklist_lock);
sk_for_each(sk, &net->packet.sklist) {
if (!net_eq(sock_net(sk), net))
continue;
if (num < s_num)
goto next;
if (sk_diag_fill(sk, skb, req,
may_report_filterinfo,
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
sock_i_ino(sk)) < 0)
goto done;
next:
num++;
}
done:
mutex_unlock(&net->packet.sklist_lock);
cb->args[0] = num;
return skb->len;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
struct net_device *dev;
int err;
struct nlattr *tb[IFLA_MAX+1];
char ifname[IFNAMSIZ];
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
goto errout;
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
ifname[0] = '\0';
err = -EINVAL;
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(net, ifname);
else
goto errout;
if (dev == NULL) {
err = -ENODEV;
goto errout;
}
err = validate_linkmsg(dev, tb);
if (err < 0)
goto errout;
err = do_setlink(dev, ifm, tb, ifname, 0);
errout:
return err;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
return;
if (!capable(CAP_NET_ADMIN))
RCV_SKB_FAIL(-EPERM);
/* Eventually we might send routing messages too */
RCV_SKB_FAIL(-EINVAL);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *rep_buf;
struct nlmsghdr *rep_nlh;
struct nlmsghdr *req_nlh = info->nlhdr;
struct tipc_genlmsghdr *req_userhdr = info->userhdr;
int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
u16 cmd;
if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
cmd = TIPC_CMD_NOT_NET_ADMIN;
else
cmd = req_userhdr->cmd;
rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd,
nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
hdr_space);
if (rep_buf) {
skb_push(rep_buf, hdr_space);
rep_nlh = nlmsg_hdr(rep_buf);
memcpy(rep_nlh, req_nlh, hdr_space);
rep_nlh->nlmsg_len = rep_buf->len;
genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid);
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX+1];
struct net_device *dev;
int err = -EINVAL;
__u8 *addr;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
if (err < 0)
return err;
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex == 0) {
pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
return -EINVAL;
}
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
return -ENODEV;
}
if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
return -EINVAL;
}
addr = nla_data(tb[NDA_LLADDR]);
err = -EOPNOTSUPP;
/* Support fdb on master device the net/bridge default case */
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
(dev->priv_flags & IFF_BRIDGE_PORT)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops = br_dev->netdev_ops;
if (ops->ndo_fdb_del)
err = ops->ndo_fdb_del(ndm, tb, dev, addr);
if (err)
goto out;
else
ndm->ndm_flags &= ~NTF_MASTER;
}
/* Embedded bridge, macvlan, and any other device support */
if (ndm->ndm_flags & NTF_SELF) {
if (dev->netdev_ops->ndo_fdb_del)
err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr);
else
err = ndo_dflt_fdb_del(ndm, tb, dev, addr);
if (!err) {
rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
ndm->ndm_flags &= ~NTF_SELF;
}
}
out:
return err;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *attrs[XFRMA_MAX+1];
const struct xfrm_link *link;
int type, err;
type = nlh->nlmsg_type;
if (type > XFRM_MSG_MAX)
return -EINVAL;
type -= XFRM_MSG_BASE;
link = &xfrm_dispatch[type];
/* All operations require privileges, even GET */
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
(nlh->nlmsg_flags & NLM_F_DUMP)) {
if (link->dump == NULL)
return -EINVAL;
{
struct netlink_dump_control c = {
.dump = link->dump,
.done = link->done,
};
return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
}
}
err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
xfrma_policy);
if (err < 0)
return err;
if (link->doit == NULL)
return -EINVAL;
return link->doit(skb, nlh, attrs);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[IFA_MAX+1];
struct net_device *dev;
struct ifaddrmsg *ifm;
int err;
u8 pnaddr;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ASSERT_RTNL();
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy);
if (err < 0)
return err;
ifm = nlmsg_data(nlh);
if (tb[IFA_LOCAL] == NULL)
return -EINVAL;
pnaddr = nla_get_u8(tb[IFA_LOCAL]);
if (pnaddr & 3)
/* Phonet addresses only have 6 high-order bits */
return -EINVAL;
dev = __dev_get_by_index(net, ifm->ifa_index);
if (dev == NULL)
return -ENODEV;
if (nlh->nlmsg_type == RTM_NEWADDR)
err = phonet_address_add(dev, pnaddr);
else
err = phonet_address_del(dev, pnaddr);
if (!err)
phonet_address_notify(nlh->nlmsg_type, dev, pnaddr);
return err;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: scsi_nl_rcv_msg(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
struct scsi_nl_hdr *hdr;
u32 rlen;
int err, tport;
while (skb->len >= NLMSG_HDRLEN) {
err = 0;
nlh = nlmsg_hdr(skb);
if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
(skb->len < nlh->nlmsg_len)) {
printk(KERN_WARNING "%s: discarding partial skb\n",
__func__);
return;
}
rlen = NLMSG_ALIGN(nlh->nlmsg_len);
if (rlen > skb->len)
rlen = skb->len;
if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
err = -EBADMSG;
goto next_msg;
}
hdr = nlmsg_data(nlh);
if ((hdr->version != SCSI_NL_VERSION) ||
(hdr->magic != SCSI_NL_MAGIC)) {
err = -EPROTOTYPE;
goto next_msg;
}
if (!capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto next_msg;
}
if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
printk(KERN_WARNING "%s: discarding partial message\n",
__func__);
goto next_msg;
}
/*
* Deliver message to the appropriate transport
*/
tport = hdr->transport;
if (tport == SCSI_NL_TRANSPORT) {
switch (hdr->msgtype) {
case SCSI_NL_SHOST_VENDOR:
/* Locate the driver that corresponds to the message */
err = -ESRCH;
break;
default:
err = -EBADR;
break;
}
if (err)
printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
__func__, hdr->msgtype, err);
}
else
err = -ENOENT;
next_msg:
if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
netlink_ack(skb, nlh, err);
skb_pull(skb, rlen);
}
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int genl_family_rcv_msg(struct genl_family *family,
struct sk_buff *skb,
struct nlmsghdr *nlh)
{
const struct genl_ops *ops;
struct net *net = sock_net(skb->sk);
struct genl_info info;
struct genlmsghdr *hdr = nlmsg_data(nlh);
struct nlattr **attrbuf;
int hdrlen, err;
/* this family doesn't exist in this netns */
if (!family->netnsok && !net_eq(net, &init_net))
return -ENOENT;
hdrlen = GENL_HDRLEN + family->hdrsize;
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -EINVAL;
ops = genl_get_cmd(hdr->cmd, family);
if (ops == NULL)
return -EOPNOTSUPP;
if ((ops->flags & GENL_ADMIN_PERM) &&
!capable(CAP_NET_ADMIN))
return -EPERM;
if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
int rc;
if (ops->dumpit == NULL)
return -EOPNOTSUPP;
if (!family->parallel_ops) {
struct netlink_dump_control c = {
.module = family->module,
/* we have const, but the netlink API doesn't */
.data = (void *)ops,
.dump = genl_lock_dumpit,
.done = genl_lock_done,
};
genl_unlock();
rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
genl_lock();
} else {
struct netlink_dump_control c = {
.module = family->module,
.dump = ops->dumpit,
.done = ops->done,
};
rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
}
return rc;
}
if (ops->doit == NULL)
return -EOPNOTSUPP;
if (family->maxattr && family->parallel_ops) {
attrbuf = kmalloc((family->maxattr+1) *
sizeof(struct nlattr *), GFP_KERNEL);
if (attrbuf == NULL)
return -ENOMEM;
} else
attrbuf = family->attrbuf;
if (attrbuf) {
err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
ops->policy);
if (err < 0)
goto out;
}
info.snd_seq = nlh->nlmsg_seq;
info.snd_portid = NETLINK_CB(skb).portid;
info.nlhdr = nlh;
info.genlhdr = nlmsg_data(nlh);
info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
info.attrs = attrbuf;
info.dst_sk = skb->sk;
genl_info_net_set(&info, net);
memset(&info.user_ptr, 0, sizeof(info.user_ptr));
if (family->pre_doit) {
err = family->pre_doit(ops, skb, &info);
if (err)
goto out;
}
err = ops->doit(skb, &info);
if (family->post_doit)
family->post_doit(ops, skb, &info);
out:
if (family->parallel_ops)
kfree(attrbuf);
return err;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static void cn_proc_mcast_ctl(struct cn_msg *msg,
struct netlink_skb_parms *nsp)
{
enum proc_cn_mcast_op *mc_op = NULL;
int err = 0;
if (msg->len != sizeof(*mc_op))
return;
/*
* Events are reported with respect to the initial pid
* and user namespaces so ignore requestors from
* other namespaces.
*/
if ((current_user_ns() != &init_user_ns) ||
(task_active_pid_ns(current) != &init_pid_ns))
return;
/* Can only change if privileged. */
if (!capable(CAP_NET_ADMIN)) {
err = EPERM;
goto out;
}
mc_op = (enum proc_cn_mcast_op *)msg->data;
switch (*mc_op) {
case PROC_CN_MCAST_LISTEN:
atomic_inc(&proc_event_num_listeners);
break;
case PROC_CN_MCAST_IGNORE:
atomic_dec(&proc_event_num_listeners);
break;
default:
err = EINVAL;
break;
}
out:
cn_proc_ack(err, msg->seq, msg->ack);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct nlattr *attrs[CRYPTOCFGA_MAX+1];
const struct crypto_link *link;
int type, err;
type = nlh->nlmsg_type;
if (type > CRYPTO_MSG_MAX)
return -EINVAL;
type -= CRYPTO_MSG_BASE;
link = &crypto_dispatch[type];
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
(nlh->nlmsg_flags & NLM_F_DUMP))) {
struct crypto_alg *alg;
u16 dump_alloc = 0;
if (link->dump == NULL)
return -EINVAL;
list_for_each_entry(alg, &crypto_alg_list, cra_list)
dump_alloc += CRYPTO_REPORT_MAXSIZE;
{
struct netlink_dump_control c = {
.dump = link->dump,
.done = link->done,
.min_dump_alloc = dump_alloc,
};
return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
}
}
err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
crypto_policy);
if (err < 0)
return err;
if (link->doit == NULL)
return -EINVAL;
return link->doit(skb, nlh, attrs);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
{
struct net *net = sock_net(skb->sk);
struct tcmsg *tcm;
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
u32 clid;
struct Qdisc *q, *p;
int err;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
replay:
/* Reinit, just in case something touches this. */
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
if (err < 0)
return err;
tcm = nlmsg_data(n);
clid = tcm->tcm_parent;
q = p = NULL;
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return -ENODEV;
if (clid) {
if (clid != TC_H_ROOT) {
if (clid != TC_H_INGRESS) {
p = qdisc_lookup(dev, TC_H_MAJ(clid));
if (!p)
return -ENOENT;
q = qdisc_leaf(p, clid);
} else if (dev_ingress_queue_create(dev)) {
q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
q = dev->qdisc;
}
/* It may be default qdisc, ignore it */
if (q && q->handle == 0)
q = NULL;
if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
if (tcm->tcm_handle) {
if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
return -EEXIST;
if (TC_H_MIN(tcm->tcm_handle))
return -EINVAL;
q = qdisc_lookup(dev, tcm->tcm_handle);
if (!q)
goto create_n_graft;
if (n->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
return -EINVAL;
if (q == p ||
(p && check_loop(q, p, 0)))
return -ELOOP;
atomic_inc(&q->refcnt);
goto graft;
} else {
if (!q)
goto create_n_graft;
/* This magic test requires explanation.
*
* We know, that some child q is already
* attached to this parent and have choice:
* either to change it or to create/graft new one.
*
* 1. We are allowed to create/graft only
* if CREATE and REPLACE flags are set.
*
* 2. If EXCL is set, requestor wanted to say,
* that qdisc tcm_handle is not expected
* to exist, so that we choose create/graft too.
*
* 3. The last case is when no flags are set.
* Alas, it is sort of hole in API, we
* cannot decide what to do unambiguously.
* For now we select create/graft, if
* user gave KIND, which does not match existing.
*/
if ((n->nlmsg_flags & NLM_F_CREATE) &&
(n->nlmsg_flags & NLM_F_REPLACE) &&
((n->nlmsg_flags & NLM_F_EXCL) ||
(tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id))))
goto create_n_graft;
}
}
} else {
if (!tcm->tcm_handle)
return -EINVAL;
q = qdisc_lookup(dev, tcm->tcm_handle);
}
/* Change qdisc parameters */
if (q == NULL)
return -ENOENT;
if (n->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
return -EINVAL;
err = qdisc_change(q, tca);
if (err == 0)
qdisc_notify(net, skb, n, clid, NULL, q);
return err;
create_n_graft:
if (!(n->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
if (clid == TC_H_INGRESS) {
if (dev_ingress_queue(dev))
q = qdisc_create(dev, dev_ingress_queue(dev), p,
tcm->tcm_parent, tcm->tcm_parent,
tca, &err);
else
err = -ENOENT;
} else {
struct netdev_queue *dev_queue;
if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
dev_queue = p->ops->cl_ops->select_queue(p, tcm);
else if (p)
dev_queue = p->dev_queue;
else
dev_queue = netdev_get_tx_queue(dev, 0);
q = qdisc_create(dev, dev_queue, p,
tcm->tcm_parent, tcm->tcm_handle,
tca, &err);
}
if (q == NULL) {
if (err == -EAGAIN)
goto replay;
return err;
}
graft:
err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
if (err) {
if (q)
qdisc_destroy(q);
return err;
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct net_device *netdev;
struct dcbmsg *dcb = nlmsg_data(nlh);
struct nlattr *tb[DCB_ATTR_MAX + 1];
u32 portid = skb ? NETLINK_CB(skb).portid : 0;
int ret = -EINVAL;
struct sk_buff *reply_skb;
struct nlmsghdr *reply_nlh = NULL;
const struct reply_func *fn;
if ((nlh->nlmsg_type == RTM_SETDCB) && !capable(CAP_NET_ADMIN))
return -EPERM;
ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
dcbnl_rtnl_policy);
if (ret < 0)
return ret;
if (dcb->cmd > DCB_CMD_MAX)
return -EINVAL;
/* check if a reply function has been defined for the command */
fn = &reply_funcs[dcb->cmd];
if (!fn->cb)
return -EOPNOTSUPP;
if (!tb[DCB_ATTR_IFNAME])
return -EINVAL;
netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
if (!netdev)
return -ENODEV;
if (!netdev->dcbnl_ops)
return -EOPNOTSUPP;
reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
nlh->nlmsg_flags, &reply_nlh);
if (!reply_skb)
return -ENOBUFS;
ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
if (ret < 0) {
nlmsg_free(reply_skb);
goto out;
}
nlmsg_end(reply_skb, reply_nlh);
ret = rtnl_unicast(reply_skb, net, portid);
out:
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd)
{
struct floppy_raw_cmd *ptr;
int ret;
int i;
*rcmd = NULL;
loop:
ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER);
if (!ptr)
return -ENOMEM;
*rcmd = ptr;
ret = copy_from_user(ptr, param, sizeof(*ptr));
if (ret)
return -EFAULT;
ptr->next = NULL;
ptr->buffer_length = 0;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > 33)
/* the command may now also take up the space
* initially intended for the reply & the
* reply count. Needed for long 82078 commands
* such as RESTORE, which takes ... 17 command
* bytes. Murphy's law #137: When you reserve
* 16 bytes for a structure, you'll one day
* discover that you really need 17...
*/
return -EINVAL;
for (i = 0; i < 16; i++)
ptr->reply[i] = 0;
ptr->resultcode = 0;
ptr->kernel_data = NULL;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
if (ptr->length <= 0)
return -EINVAL;
ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length);
fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
if (!ptr->kernel_data)
return -ENOMEM;
ptr->buffer_length = ptr->length;
}
if (ptr->flags & FD_RAW_WRITE) {
ret = fd_copyin(ptr->data, ptr->kernel_data, ptr->length);
if (ret)
return ret;
}
if (ptr->flags & FD_RAW_MORE) {
rcmd = &(ptr->next);
ptr->rate &= 0x43;
goto loop;
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264', 'CWE-754'], 'message': 'floppy: ignore kernel-only members in FDRAWCMD ioctl input
Always clear out these floppy_raw_cmd struct members after copying the
entire structure from userspace so that the in-kernel version is always
valid and never left in an interdeterminate state.
Signed-off-by: Matthew Daley <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
int error = -EACCES;
/* We don't need a base pointer in the /proc filesystem */
path_put(&nd->path);
/* Are we allowed to snoop on the tasks file descriptors? */
if (!proc_fd_access_allowed(inode))
goto out;
error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
nd->last_type = LAST_BIND;
out:
return ERR_PTR(error);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20', 'CWE-362', 'CWE-416'], 'message': 'fix autofs/afs/etc. magic mountpoint breakage
We end up trying to kfree() nd.last.name on open("/mnt/tmp", O_CREAT)
if /mnt/tmp is an autofs direct mount. The reason is that nd.last_type
is bogus here; we want LAST_BIND for everything of that kind and we
get LAST_NORM left over from finding parent directory.
So make sure that it *is* set properly; set to LAST_BIND before
doing ->follow_link() - for normal symlinks it will be changed
by __vfs_follow_link() and everything else needs it set that way.
Signed-off-by: Al Viro <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
{
spin_unlock(&hb->lock);
drop_futex_key_refs(&q->key);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'futex: Fix errors in nested key ref-counting
futex_wait() is leaking key references due to futex_wait_setup()
acquiring an additional reference via the queue_lock() routine. The
nested key ref-counting has been masking bugs and complicating code
analysis. queue_lock() is only called with a previously ref-counted
key, so remove the additional ref-counting from the queue_(un)lock()
functions.
Also futex_wait_requeue_pi() drops one key reference too many in
unqueue_me_pi(). Remove the key reference handling from
unqueue_me_pi(). This was paired with a queue_lock() in
futex_lock_pi(), so the count remains unchanged.
Document remaining nested key ref-counting sites.
Signed-off-by: Darren Hart <[email protected]>
Reported-and-tested-by: Matthieu Fertré<[email protected]>
Reported-by: Louis Rilling<[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: John Kacur <[email protected]>
Cc: Rusty Russell <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: [email protected]'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
u32 val, ktime_t *abs_time, u32 bitset,
int clockrt, u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2;
struct futex_q q;
int res, ret;
if (!bitset)
return -EINVAL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
/*
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
debug_rt_mutex_init_waiter(&rt_waiter);
rt_waiter.task = NULL;
key2 = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
goto out;
q.pi_state = NULL;
q.bitset = bitset;
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
/* Prepare to wait on uaddr. */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
goto out_key2;
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquition by the requeue code.
*/
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current,
fshared);
spin_unlock(q.lock_ptr);
}
} else {
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
WARN_ON(!&q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr2, fshared, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
/*
* If fixup_pi_state_owner() faulted and was unable to handle the
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
if (rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
* it would detect that the user space "val" changed and return
* -EWOULDBLOCK. Save the overhead of the restart and return
* -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
out_put_keys:
put_futex_key(fshared, &q.key);
out_key2:
put_futex_key(fshared, &key2);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'futex: Fix errors in nested key ref-counting
futex_wait() is leaking key references due to futex_wait_setup()
acquiring an additional reference via the queue_lock() routine. The
nested key ref-counting has been masking bugs and complicating code
analysis. queue_lock() is only called with a previously ref-counted
key, so remove the additional ref-counting from the queue_(un)lock()
functions.
Also futex_wait_requeue_pi() drops one key reference too many in
unqueue_me_pi(). Remove the key reference handling from
unqueue_me_pi(). This was paired with a queue_lock() in
futex_lock_pi(), so the count remains unchanged.
Document remaining nested key ref-counting sites.
Signed-off-by: Darren Hart <[email protected]>
Reported-and-tested-by: Matthieu Fertré<[email protected]>
Reported-by: Louis Rilling<[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: John Kacur <[email protected]>
Cc: Rusty Russell <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: [email protected]'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
{
struct futex_hash_bucket *hb;
get_futex_key_refs(&q->key);
hb = hash_futex(&q->key);
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock);
return hb;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'futex: Fix errors in nested key ref-counting
futex_wait() is leaking key references due to futex_wait_setup()
acquiring an additional reference via the queue_lock() routine. The
nested key ref-counting has been masking bugs and complicating code
analysis. queue_lock() is only called with a previously ref-counted
key, so remove the additional ref-counting from the queue_(un)lock()
functions.
Also futex_wait_requeue_pi() drops one key reference too many in
unqueue_me_pi(). Remove the key reference handling from
unqueue_me_pi(). This was paired with a queue_lock() in
futex_lock_pi(), so the count remains unchanged.
Document remaining nested key ref-counting sites.
Signed-off-by: Darren Hart <[email protected]>
Reported-and-tested-by: Matthieu Fertré<[email protected]>
Reported-by: Louis Rilling<[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: John Kacur <[email protected]>
Cc: Rusty Russell <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: [email protected]'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
{
struct task_struct *me = current;
unsigned char comm[sizeof(me->comm)];
long error;
error = security_task_prctl(option, arg2, arg3, arg4, arg5);
if (error != -ENOSYS)
return error;
error = 0;
switch (option) {
case PR_SET_PDEATHSIG:
if (!valid_signal(arg2)) {
error = -EINVAL;
break;
}
me->pdeath_signal = arg2;
error = 0;
break;
case PR_GET_PDEATHSIG:
error = put_user(me->pdeath_signal, (int __user *)arg2);
break;
case PR_GET_DUMPABLE:
error = get_dumpable(me->mm);
break;
case PR_SET_DUMPABLE:
if (arg2 < 0 || arg2 > 1) {
error = -EINVAL;
break;
}
set_dumpable(me->mm, arg2);
error = 0;
break;
case PR_SET_UNALIGN:
error = SET_UNALIGN_CTL(me, arg2);
break;
case PR_GET_UNALIGN:
error = GET_UNALIGN_CTL(me, arg2);
break;
case PR_SET_FPEMU:
error = SET_FPEMU_CTL(me, arg2);
break;
case PR_GET_FPEMU:
error = GET_FPEMU_CTL(me, arg2);
break;
case PR_SET_FPEXC:
error = SET_FPEXC_CTL(me, arg2);
break;
case PR_GET_FPEXC:
error = GET_FPEXC_CTL(me, arg2);
break;
case PR_GET_TIMING:
error = PR_TIMING_STATISTICAL;
break;
case PR_SET_TIMING:
if (arg2 != PR_TIMING_STATISTICAL)
error = -EINVAL;
else
error = 0;
break;
case PR_SET_NAME:
comm[sizeof(me->comm)-1] = 0;
if (strncpy_from_user(comm, (char __user *)arg2,
sizeof(me->comm) - 1) < 0)
return -EFAULT;
set_task_comm(me, comm);
proc_comm_connector(me);
return 0;
case PR_GET_NAME:
get_task_comm(comm, me);
if (copy_to_user((char __user *)arg2, comm,
sizeof(comm)))
return -EFAULT;
return 0;
case PR_GET_ENDIAN:
error = GET_ENDIAN(me, arg2);
break;
case PR_SET_ENDIAN:
error = SET_ENDIAN(me, arg2);
break;
case PR_GET_SECCOMP:
error = prctl_get_seccomp();
break;
case PR_SET_SECCOMP:
error = prctl_set_seccomp(arg2);
break;
case PR_GET_TSC:
error = GET_TSC_CTL(arg2);
break;
case PR_SET_TSC:
error = SET_TSC_CTL(arg2);
break;
case PR_TASK_PERF_EVENTS_DISABLE:
error = perf_event_task_disable();
break;
case PR_TASK_PERF_EVENTS_ENABLE:
error = perf_event_task_enable();
break;
case PR_GET_TIMERSLACK:
error = current->timer_slack_ns;
break;
case PR_SET_TIMERSLACK:
if (arg2 <= 0)
current->timer_slack_ns =
current->default_timer_slack_ns;
else
current->timer_slack_ns = arg2;
error = 0;
break;
case PR_MCE_KILL:
if (arg4 | arg5)
return -EINVAL;
switch (arg2) {
case PR_MCE_KILL_CLEAR:
if (arg3 != 0)
return -EINVAL;
current->flags &= ~PF_MCE_PROCESS;
break;
case PR_MCE_KILL_SET:
current->flags |= PF_MCE_PROCESS;
if (arg3 == PR_MCE_KILL_EARLY)
current->flags |= PF_MCE_EARLY;
else if (arg3 == PR_MCE_KILL_LATE)
current->flags &= ~PF_MCE_EARLY;
else if (arg3 == PR_MCE_KILL_DEFAULT)
current->flags &=
~(PF_MCE_EARLY|PF_MCE_PROCESS);
else
return -EINVAL;
break;
default:
return -EINVAL;
}
error = 0;
break;
case PR_MCE_KILL_GET:
if (arg2 | arg3 | arg4 | arg5)
return -EINVAL;
if (current->flags & PF_MCE_PROCESS)
error = (current->flags & PF_MCE_EARLY) ?
PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
else
error = PR_MCE_KILL_DEFAULT;
break;
case PR_SET_MM:
error = prctl_set_mm(arg2, arg3, arg4, arg5);
break;
case PR_SET_CHILD_SUBREAPER:
me->signal->is_child_subreaper = !!arg2;
error = 0;
break;
case PR_GET_CHILD_SUBREAPER:
error = put_user(me->signal->is_child_subreaper,
(int __user *) arg2);
break;
default:
error = -EINVAL;
break;
}
return error;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs
With this change, calling
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
disables privilege granting operations at execve-time. For example, a
process will not be able to execute a setuid binary to change their uid
or gid if this bit is set. The same is true for file capabilities.
Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that
LSMs respect the requested behavior.
To determine if the NO_NEW_PRIVS bit is set, a task may call
prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
It returns 1 if set and 0 if it is not set. If any of the arguments are
non-zero, it will return -1 and set errno to -EINVAL.
(PR_SET_NO_NEW_PRIVS behaves similarly.)
This functionality is desired for the proposed seccomp filter patch
series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the
system call behavior for itself and its child tasks without being
able to impact the behavior of a more privileged task.
Another potential use is making certain privileged operations
unprivileged. For example, chroot may be considered "safe" if it cannot
affect privileged tasks.
Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is
set and AppArmor is in use. It is fixed in a subsequent patch.
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Will Drewry <[email protected]>
Acked-by: Eric Paris <[email protected]>
Acked-by: Kees Cook <[email protected]>
v18: updated change desc
v17: using new define values as per 3.4
Signed-off-by: James Morris <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int apparmor_bprm_set_creds(struct linux_binprm *bprm)
{
struct aa_task_cxt *cxt;
struct aa_profile *profile, *new_profile = NULL;
struct aa_namespace *ns;
char *buffer = NULL;
unsigned int state;
struct file_perms perms = {};
struct path_cond cond = {
bprm->file->f_path.dentry->d_inode->i_uid,
bprm->file->f_path.dentry->d_inode->i_mode
};
const char *name = NULL, *target = NULL, *info = NULL;
int error = cap_bprm_set_creds(bprm);
if (error)
return error;
if (bprm->cred_prepared)
return 0;
cxt = bprm->cred->security;
BUG_ON(!cxt);
profile = aa_get_profile(aa_newest_version(cxt->profile));
/*
* get the namespace from the replacement profile as replacement
* can change the namespace
*/
ns = profile->ns;
state = profile->file.start;
/* buffer freed below, name is pointer into buffer */
error = aa_path_name(&bprm->file->f_path, profile->path_flags, &buffer,
&name, &info);
if (error) {
if (profile->flags &
(PFLAG_IX_ON_NAME_ERROR | PFLAG_UNCONFINED))
error = 0;
name = bprm->filename;
goto audit;
}
/* Test for onexec first as onexec directives override other
* x transitions.
*/
if (unconfined(profile)) {
/* unconfined task */
if (cxt->onexec)
/* change_profile on exec already been granted */
new_profile = aa_get_profile(cxt->onexec);
else
new_profile = find_attach(ns, &ns->base.profiles, name);
if (!new_profile)
goto cleanup;
goto apply;
}
/* find exec permissions for name */
state = aa_str_perms(profile->file.dfa, state, name, &cond, &perms);
if (cxt->onexec) {
struct file_perms cp;
info = "change_profile onexec";
if (!(perms.allow & AA_MAY_ONEXEC))
goto audit;
/* test if this exec can be paired with change_profile onexec.
* onexec permission is linked to exec with a standard pairing
* exec\0change_profile
*/
state = aa_dfa_null_transition(profile->file.dfa, state);
cp = change_profile_perms(profile, cxt->onexec->ns,
cxt->onexec->base.name,
AA_MAY_ONEXEC, state);
if (!(cp.allow & AA_MAY_ONEXEC))
goto audit;
new_profile = aa_get_profile(aa_newest_version(cxt->onexec));
goto apply;
}
if (perms.allow & MAY_EXEC) {
/* exec permission determine how to transition */
new_profile = x_to_profile(profile, name, perms.xindex);
if (!new_profile) {
if (perms.xindex & AA_X_INHERIT) {
/* (p|c|n)ix - don't change profile but do
* use the newest version, which was picked
* up above when getting profile
*/
info = "ix fallback";
new_profile = aa_get_profile(profile);
goto x_clear;
} else if (perms.xindex & AA_X_UNCONFINED) {
new_profile = aa_get_profile(ns->unconfined);
info = "ux fallback";
} else {
error = -ENOENT;
info = "profile not found";
}
}
} else if (COMPLAIN_MODE(profile)) {
/* no exec permission - are we in learning mode */
new_profile = aa_new_null_profile(profile, 0);
if (!new_profile) {
error = -ENOMEM;
info = "could not create null profile";
} else {
error = -EACCES;
target = new_profile->base.hname;
}
perms.xindex |= AA_X_UNSAFE;
} else
/* fail exec */
error = -EACCES;
if (!new_profile)
goto audit;
if (bprm->unsafe & LSM_UNSAFE_SHARE) {
/* FIXME: currently don't mediate shared state */
;
}
if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
error = may_change_ptraced_domain(current, new_profile);
if (error) {
aa_put_profile(new_profile);
goto audit;
}
}
/* Determine if secure exec is needed.
* Can be at this point for the following reasons:
* 1. unconfined switching to confined
* 2. confined switching to different confinement
* 3. confined switching to unconfined
*
* Cases 2 and 3 are marked as requiring secure exec
* (unless policy specified "unsafe exec")
*
* bprm->unsafe is used to cache the AA_X_UNSAFE permission
* to avoid having to recompute in secureexec
*/
if (!(perms.xindex & AA_X_UNSAFE)) {
AA_DEBUG("scrubbing environment variables for %s profile=%s\n",
name, new_profile->base.hname);
bprm->unsafe |= AA_SECURE_X_NEEDED;
}
apply:
target = new_profile->base.hname;
/* when transitioning profiles clear unsafe personality bits */
bprm->per_clear |= PER_CLEAR_ON_SETID;
x_clear:
aa_put_profile(cxt->profile);
/* transfer new profile reference will be released when cxt is freed */
cxt->profile = new_profile;
/* clear out all temporary/transitional state from the context */
aa_put_profile(cxt->previous);
aa_put_profile(cxt->onexec);
cxt->previous = NULL;
cxt->onexec = NULL;
cxt->token = 0;
audit:
error = aa_audit_file(profile, &perms, GFP_KERNEL, OP_EXEC, MAY_EXEC,
name, target, cond.uid, info, error);
cleanup:
aa_put_profile(profile);
kfree(buffer);
return error;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs
With this change, calling
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
disables privilege granting operations at execve-time. For example, a
process will not be able to execute a setuid binary to change their uid
or gid if this bit is set. The same is true for file capabilities.
Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that
LSMs respect the requested behavior.
To determine if the NO_NEW_PRIVS bit is set, a task may call
prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
It returns 1 if set and 0 if it is not set. If any of the arguments are
non-zero, it will return -1 and set errno to -EINVAL.
(PR_SET_NO_NEW_PRIVS behaves similarly.)
This functionality is desired for the proposed seccomp filter patch
series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the
system call behavior for itself and its child tasks without being
able to impact the behavior of a more privileged task.
Another potential use is making certain privileged operations
unprivileged. For example, chroot may be considered "safe" if it cannot
affect privileged tasks.
Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is
set and AppArmor is in use. It is fixed in a subsequent patch.
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Will Drewry <[email protected]>
Acked-by: Eric Paris <[email protected]>
Acked-by: Kees Cook <[email protected]>
v18: updated change desc
v17: using new define values as per 3.4
Signed-off-by: James Morris <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int check_unsafe_exec(struct linux_binprm *bprm)
{
struct task_struct *p = current, *t;
unsigned n_fs;
int res = 0;
if (p->ptrace) {
if (p->ptrace & PT_PTRACE_CAP)
bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
else
bprm->unsafe |= LSM_UNSAFE_PTRACE;
}
n_fs = 1;
spin_lock(&p->fs->lock);
rcu_read_lock();
for (t = next_thread(p); t != p; t = next_thread(t)) {
if (t->fs == p->fs)
n_fs++;
}
rcu_read_unlock();
if (p->fs->users > n_fs) {
bprm->unsafe |= LSM_UNSAFE_SHARE;
} else {
res = -EAGAIN;
if (!p->fs->in_exec) {
p->fs->in_exec = 1;
res = 1;
}
}
spin_unlock(&p->fs->lock);
return res;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs
With this change, calling
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
disables privilege granting operations at execve-time. For example, a
process will not be able to execute a setuid binary to change their uid
or gid if this bit is set. The same is true for file capabilities.
Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that
LSMs respect the requested behavior.
To determine if the NO_NEW_PRIVS bit is set, a task may call
prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
It returns 1 if set and 0 if it is not set. If any of the arguments are
non-zero, it will return -1 and set errno to -EINVAL.
(PR_SET_NO_NEW_PRIVS behaves similarly.)
This functionality is desired for the proposed seccomp filter patch
series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the
system call behavior for itself and its child tasks without being
able to impact the behavior of a more privileged task.
Another potential use is making certain privileged operations
unprivileged. For example, chroot may be considered "safe" if it cannot
affect privileged tasks.
Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is
set and AppArmor is in use. It is fixed in a subsequent patch.
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Will Drewry <[email protected]>
Acked-by: Eric Paris <[email protected]>
Acked-by: Kees Cook <[email protected]>
v18: updated change desc
v17: using new define values as per 3.4
Signed-off-by: James Morris <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int selinux_bprm_set_creds(struct linux_binprm *bprm)
{
const struct task_security_struct *old_tsec;
struct task_security_struct *new_tsec;
struct inode_security_struct *isec;
struct common_audit_data ad;
struct inode *inode = file_inode(bprm->file);
int rc;
rc = cap_bprm_set_creds(bprm);
if (rc)
return rc;
/* SELinux context only depends on initial program or script and not
* the script interpreter */
if (bprm->cred_prepared)
return 0;
old_tsec = current_security();
new_tsec = bprm->cred->security;
isec = inode->i_security;
/* Default to the current task SID. */
new_tsec->sid = old_tsec->sid;
new_tsec->osid = old_tsec->sid;
/* Reset fs, key, and sock SIDs on execve. */
new_tsec->create_sid = 0;
new_tsec->keycreate_sid = 0;
new_tsec->sockcreate_sid = 0;
if (old_tsec->exec_sid) {
new_tsec->sid = old_tsec->exec_sid;
/* Reset exec SID on execve. */
new_tsec->exec_sid = 0;
/*
* Minimize confusion: if no_new_privs or nosuid and a
* transition is explicitly requested, then fail the exec.
*/
if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)
return -EPERM;
if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
return -EACCES;
} else {
/* Check for a default transition on this program. */
rc = security_transition_sid(old_tsec->sid, isec->sid,
SECCLASS_PROCESS, NULL,
&new_tsec->sid);
if (rc)
return rc;
}
ad.type = LSM_AUDIT_DATA_PATH;
ad.u.path = bprm->file->f_path;
if ((bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) ||
(bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS))
new_tsec->sid = old_tsec->sid;
if (new_tsec->sid == old_tsec->sid) {
rc = avc_has_perm(old_tsec->sid, isec->sid,
SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, &ad);
if (rc)
return rc;
} else {
/* Check permissions for the transition. */
rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
SECCLASS_PROCESS, PROCESS__TRANSITION, &ad);
if (rc)
return rc;
rc = avc_has_perm(new_tsec->sid, isec->sid,
SECCLASS_FILE, FILE__ENTRYPOINT, &ad);
if (rc)
return rc;
/* Check for shared state */
if (bprm->unsafe & LSM_UNSAFE_SHARE) {
rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
SECCLASS_PROCESS, PROCESS__SHARE,
NULL);
if (rc)
return -EPERM;
}
/* Make sure that anyone attempting to ptrace over a task that
* changes its SID has the appropriate permit */
if (bprm->unsafe &
(LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
struct task_struct *tracer;
struct task_security_struct *sec;
u32 ptsid = 0;
rcu_read_lock();
tracer = ptrace_parent(current);
if (likely(tracer != NULL)) {
sec = __task_cred(tracer)->security;
ptsid = sec->sid;
}
rcu_read_unlock();
if (ptsid != 0) {
rc = avc_has_perm(ptsid, new_tsec->sid,
SECCLASS_PROCESS,
PROCESS__PTRACE, NULL);
if (rc)
return -EPERM;
}
}
/* Clear any possibly unsafe personality bits on exec: */
bprm->per_clear |= PER_CLEAR_ON_SETID;
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'selinux: Permit bounded transitions under NO_NEW_PRIVS or NOSUID.
If the callee SID is bounded by the caller SID, then allowing
the transition to occur poses no risk of privilege escalation and we can
therefore safely allow the transition to occur. Add this exemption
for both the case where a transition was explicitly requested by the
application and the case where an automatic transition is defined in
policy.
Signed-off-by: Stephen Smalley <[email protected]>
Reviewed-by: Andy Lutomirski <[email protected]>
Signed-off-by: Paul Moore <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements; j++, i++) {
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'CVE-2014-0207: Prevent 0 element vectors and vectors longer than the number
of properties from accessing random memory.'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static void sdap_initgr_nested_search(struct tevent_req *subreq)
{
struct tevent_req *req;
struct sdap_initgr_nested_state *state;
struct sysdb_attrs **groups;
size_t count;
int ret;
req = tevent_req_callback_data(subreq, struct tevent_req);
state = tevent_req_data(req, struct sdap_initgr_nested_state);
ret = sdap_get_generic_recv(subreq, state, &count, &groups);
talloc_zfree(subreq);
if (ret) {
tevent_req_error(req, ret);
return;
}
if (count == 1) {
state->groups[state->groups_cur] = talloc_steal(state->groups,
groups[0]);
state->groups_cur++;
} else {
DEBUG(SSSDBG_OP_FAILURE,
"Search for group %s, returned %zu results. Skipping\n",
state->group_dns[state->cur], count);
}
state->cur++;
/* note that state->memberof->num_values is the count of original
* memberOf which might not be only groups, but permissions, etc.
* Use state->groups_cur for group index cap */
if (state->cur < state->memberof->num_values) {
subreq = sdap_get_generic_send(state, state->ev,
state->opts, state->sh,
state->group_dns[state->cur],
LDAP_SCOPE_BASE,
state->filter, state->grp_attrs,
state->opts->group_map,
SDAP_OPTS_GROUP,
dp_opt_get_int(state->opts->basic,
SDAP_SEARCH_TIMEOUT),
false);
if (!subreq) {
tevent_req_error(req, ENOMEM);
return;
}
tevent_req_set_callback(subreq, sdap_initgr_nested_search, req);
} else {
sdap_initgr_nested_store(req);
}
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'IPA: process non-posix nested groups
Do not expect objectClass to be posixGroup but rather more general
groupofnames.
Resolves:
https://fedorahosted.org/sssd/ticket/2343
Reviewed-by: Michal Židek <[email protected]>
(cherry picked from commit bc8c93ffe881271043492c938c626a9be948000e)'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: sdap_ad_tokengroups_get_posix_members(TALLOC_CTX *mem_ctx,
struct sdap_ad_tokengroups_initgr_posix_state *state,
size_t num_sids,
char **sids,
size_t *_num_missing,
char ***_missing,
size_t *_num_valid,
char ***_valid_groups)
{
TALLOC_CTX *tmp_ctx = NULL;
struct sss_domain_info *domain = NULL;
struct ldb_message *msg = NULL;
const char *attrs[] = {SYSDB_NAME, SYSDB_POSIX, NULL};
const char *is_posix = NULL;
const char *name = NULL;
char *sid = NULL;
char **valid_groups = NULL;
size_t num_valid_groups;
char **missing_sids = NULL;
size_t num_missing_sids;
size_t i;
errno_t ret;
tmp_ctx = talloc_new(NULL);
if (tmp_ctx == NULL) {
DEBUG(SSSDBG_CRIT_FAILURE, "talloc_new() failed\n");
ret = ENOMEM;
goto done;
}
num_valid_groups = 0;
valid_groups = talloc_zero_array(tmp_ctx, char*, num_sids + 1);
if (valid_groups == NULL) {
ret = ENOMEM;
goto done;
}
num_missing_sids = 0;
missing_sids = talloc_zero_array(tmp_ctx, char*, num_sids + 1);
if (missing_sids == NULL) {
ret = ENOMEM;
goto done;
}
/* For each SID check if it is already present in the cache. If yes, we
* will get name of the group and update the membership. Otherwise we need
* to remember the SID and download missing groups one by one. */
for (i = 0; i < num_sids; i++) {
sid = sids[i];
DEBUG(SSSDBG_TRACE_LIBS, "Processing membership SID [%s]\n", sid);
domain = sss_get_domain_by_sid_ldap_fallback(state->domain, sid);
if (domain == NULL) {
DEBUG(SSSDBG_MINOR_FAILURE, "Domain not found for SID %s\n", sid);
continue;
}
ret = sysdb_search_group_by_sid_str(tmp_ctx, domain->sysdb, domain,
sid, attrs, &msg);
if (ret == EOK) {
is_posix = ldb_msg_find_attr_as_string(msg, SYSDB_POSIX, NULL);
if (is_posix != NULL && strcmp(is_posix, "FALSE") == 0) {
/* skip non-posix group */
continue;
}
/* we will update membership of this group */
name = ldb_msg_find_attr_as_string(msg, SYSDB_NAME, NULL);
if (name == NULL) {
DEBUG(SSSDBG_MINOR_FAILURE,
"Could not retrieve group name from sysdb\n");
ret = EINVAL;
goto done;
}
valid_groups[num_valid_groups] = sysdb_group_strdn(valid_groups,
domain->name,
name);
if (valid_groups[num_valid_groups] == NULL) {
ret = ENOMEM;
goto done;
}
num_valid_groups++;
} else if (ret == ENOENT) {
if (_missing != NULL) {
/* we need to download this group */
missing_sids[num_missing_sids] = talloc_steal(missing_sids,
sid);
num_missing_sids++;
DEBUG(SSSDBG_TRACE_FUNC, "Missing SID %s will be downloaded\n",
sid);
}
/* else: We have downloaded missing groups but some of them may
* remained missing because they are outside of search base. We
* will just ignore them and continue with the next group. */
} else {
DEBUG(SSSDBG_MINOR_FAILURE, "Could not look up SID %s in sysdb: "
"[%s]\n", sid, strerror(ret));
goto done;
}
}
valid_groups[num_valid_groups] = NULL;
missing_sids[num_missing_sids] = NULL;
/* return list of missing groups */
if (_missing != NULL) {
*_missing = talloc_steal(mem_ctx, missing_sids);
*_num_missing = num_missing_sids;
}
/* return list of missing groups */
if (_valid_groups != NULL) {
*_valid_groups = talloc_steal(mem_ctx, valid_groups);
*_num_valid = num_valid_groups;
}
ret = EOK;
done:
talloc_free(tmp_ctx);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264'], 'message': 'AD: process non-posix nested groups using tokenGroups
When initgr is performed for AD supporting tokenGroups, do not skip
non-posix groups.
Resolves:
https://fedorahosted.org/sssd/ticket/2343
Reviewed-by: Michal Židek <[email protected]>
(cherry picked from commit 4932db6258ccfb612a3a28eb6a618c2f042b9d58)'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int Adapter_output(AdapterObject *self, const char *data, int length,
int exception_when_aborted)
{
int i = 0;
int n = 0;
apr_status_t rv;
request_rec *r;
#if defined(MOD_WSGI_WITH_DAEMONS)
if (wsgi_inactivity_timeout) {
apr_thread_mutex_lock(wsgi_shutdown_lock);
wsgi_inactivity_shutdown_time = apr_time_now();
wsgi_inactivity_shutdown_time += wsgi_inactivity_timeout;
apr_thread_mutex_unlock(wsgi_shutdown_lock);
}
#endif
if (!self->status_line) {
PyErr_SetString(PyExc_RuntimeError, "response has not been started");
return 0;
}
r = self->r;
/* Have response headers yet been sent. */
if (self->headers) {
/*
* Apache prior to Apache 2.2.8 has a bug in it
* whereby it doesn't force '100 Continue'
* response before responding with headers if no
* read. So, force a zero length read before
* sending the headers if haven't yet attempted
* to read anything. This will ensure that if no
* request content has been read that any '100
* Continue' response will be flushed and sent
* back to the client if client was expecting
* one. Only want to do this for 2xx and 3xx
* status values. Note that even though Apple
* supplied version of Apache on MacOS X Leopard
* is newer than version 2.2.8, the header file
* has never been patched when they make updates
* and so anything compiled against it thinks it
* is older.
*/
#if (AP_SERVER_MAJORVERSION_NUMBER == 1) || \
(AP_SERVER_MAJORVERSION_NUMBER == 2 && \
AP_SERVER_MINORVERSION_NUMBER < 2) || \
(AP_SERVER_MAJORVERSION_NUMBER == 2 && \
AP_SERVER_MINORVERSION_NUMBER == 2 && \
AP_SERVER_PATCHLEVEL_NUMBER < 8)
if (!self->input->init) {
if (self->status >= 200 && self->status < 400) {
PyObject *args = NULL;
PyObject *result = NULL;
args = Py_BuildValue("(i)", 0);
result = Input_read(self->input, args);
if (PyErr_Occurred())
PyErr_Clear();
Py_DECREF(args);
Py_XDECREF(result);
}
}
#endif
/* Now setup response headers in request object. */
r->status = self->status;
r->status_line = self->status_line;
for (i = 0; i < PyList_Size(self->headers); i++) {
PyObject *tuple = NULL;
PyObject *object1 = NULL;
PyObject *object2 = NULL;
char *name = NULL;
char *value = NULL;
tuple = PyList_GetItem(self->headers, i);
if (!PyTuple_Check(tuple)) {
PyErr_Format(PyExc_TypeError, "list of tuple values "
"expected, value of type %.200s found",
tuple->ob_type->tp_name);
return 0;
}
if (PyTuple_Size(tuple) != 2) {
PyErr_Format(PyExc_ValueError, "tuple of length 2 "
"expected, length is %d",
(int)PyTuple_Size(tuple));
return 0;
}
object1 = PyTuple_GetItem(tuple, 0);
object2 = PyTuple_GetItem(tuple, 1);
if (PyString_Check(object1)) {
name = PyString_AsString(object1);
}
#if PY_MAJOR_VERSION >= 3
else if (PyUnicode_Check(object1)) {
PyObject *latin_object;
latin_object = PyUnicode_AsLatin1String(object1);
if (!latin_object) {
PyErr_Format(PyExc_TypeError, "header name "
"contained non 'latin-1' characters ");
return 0;
}
name = apr_pstrdup(r->pool, PyString_AsString(latin_object));
Py_DECREF(latin_object);
}
#endif
else {
PyErr_Format(PyExc_TypeError, "expected byte string object "
"for header name, value of type %.200s "
"found", object1->ob_type->tp_name);
return 0;
}
if (PyString_Check(object2)) {
value = PyString_AsString(object2);
}
#if PY_MAJOR_VERSION >= 3
else if (PyUnicode_Check(object2)) {
PyObject *latin_object;
latin_object = PyUnicode_AsLatin1String(object2);
if (!latin_object) {
PyErr_Format(PyExc_TypeError, "header value "
"contained non 'latin-1' characters ");
return 0;
}
value = apr_pstrdup(r->pool, PyString_AsString(latin_object));
Py_DECREF(latin_object);
}
#endif
else {
PyErr_Format(PyExc_TypeError, "expected byte string object "
"for header value, value of type %.200s "
"found", object2->ob_type->tp_name);
return 0;
}
if (strchr(name, '\n') != 0 || strchr(value, '\n') != 0) {
PyErr_Format(PyExc_ValueError, "embedded newline in "
"response header with name '%s' and value '%s'",
name, value);
return 0;
}
if (!strcasecmp(name, "Content-Type")) {
#if AP_SERVER_MAJORVERSION_NUMBER < 2
r->content_type = apr_pstrdup(r->pool, value);
#else
/*
* In a daemon child process we cannot call the
* function ap_set_content_type() as want to
* avoid adding any output filters based on the
* type of file being served as this will be
* done in the main Apache child process which
* proxied the request to the daemon process.
*/
if (*self->config->process_group)
r->content_type = apr_pstrdup(r->pool, value);
else
ap_set_content_type(r, value);
#endif
}
else if (!strcasecmp(name, "Content-Length")) {
char *v = value;
long l = 0;
errno = 0;
l = strtol(v, &v, 10);
if (*v || errno == ERANGE || l < 0) {
PyErr_SetString(PyExc_ValueError,
"invalid content length");
return 0;
}
ap_set_content_length(r, l);
self->content_length_set = 1;
self->content_length = l;
}
else if (!strcasecmp(name, "WWW-Authenticate")) {
apr_table_add(r->err_headers_out, name, value);
}
else {
apr_table_add(r->headers_out, name, value);
}
}
/* Need to force output of headers when using Apache 1.3. */
Py_BEGIN_ALLOW_THREADS
ap_send_http_header(r);
Py_END_ALLOW_THREADS
/*
* Reset flag indicating whether '100 Continue' response
* expected. If we don't do this then if an attempt to read
* input for the first time is after headers have been
* sent, then Apache is wrongly generate the '100 Continue'
* response into the response content. Not sure if this is
* a bug in Apache, or that it truly believes that input
* will never be read after the response headers have been
* sent.
*/
r->expecting_100 = 0;
/* No longer need headers now that they have been sent. */
Py_DECREF(self->headers);
self->headers = NULL;
}
/*
* If content length was specified, ensure that we don't
* actually output more data than was specified as being
* sent as otherwise technically in violation of HTTP RFC.
*/
if (length) {
int output_length = length;
if (self->content_length_set) {
if (self->output_length < self->content_length) {
if (self->output_length + length > self->content_length) {
length = self->content_length - self->output_length;
}
}
else
length = 0;
}
self->output_length += output_length;
}
/* Now output any data. */
if (length) {
#if defined(MOD_WSGI_WITH_BUCKETS)
apr_bucket *b;
/*
* When using Apache 2.X can use lower level
* bucket brigade APIs. This is preferred as
* ap_rwrite()/ap_rflush() will grow memory in
* the request pool on each call, which will
* result in an increase in memory use over time
* when streaming of data is being performed.
* The memory is still reclaimed, but only at
* the end of the request. Using bucket brigade
* API avoids this, and also avoids any copying
* of response data due to buffering performed
* by ap_rwrite().
*/
if (r->connection->aborted) {
if (!exception_when_aborted) {
ap_log_rerror(APLOG_MARK, WSGI_LOG_DEBUG(0), self->r,
"mod_wsgi (pid=%d): Client closed connection.",
getpid());
}
else
PyErr_SetString(PyExc_IOError, "client connection closed");
return 0;
}
if (!self->bb) {
self->bb = apr_brigade_create(r->pool,
r->connection->bucket_alloc);
}
b = apr_bucket_transient_create(data, length,
r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(self->bb, b);
b = apr_bucket_flush_create(r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(self->bb, b);
Py_BEGIN_ALLOW_THREADS
rv = ap_pass_brigade(r->output_filters, self->bb);
Py_END_ALLOW_THREADS
if (rv != APR_SUCCESS) {
PyErr_SetString(PyExc_IOError, "failed to write data");
return 0;
}
Py_BEGIN_ALLOW_THREADS
apr_brigade_cleanup(self->bb);
Py_END_ALLOW_THREADS
#else
/*
* In Apache 1.3, the bucket brigade system doesn't exist,
* so have no choice but to use ap_rwrite()/ap_rflush().
* It is not believed that Apache 1.3 suffers the memory
* accumulation problem when streaming lots of data.
*/
Py_BEGIN_ALLOW_THREADS
n = ap_rwrite(data, length, r);
Py_END_ALLOW_THREADS
if (n == -1) {
PyErr_SetString(PyExc_IOError, "failed to write data");
return 0;
}
Py_BEGIN_ALLOW_THREADS
n = ap_rflush(r);
Py_END_ALLOW_THREADS
if (n == -1) {
PyErr_SetString(PyExc_IOError, "failed to flush data");
return 0;
}
#endif
}
/*
* Check whether aborted connection was found when data
* being written, otherwise will not be flagged until next
* time that data is being written. Early detection is
* better as it may have been the last data block being
* written and application may think that data has all
* been written. In a streaming application, we also want
* to avoid any additional data processing to generate any
* successive data.
*/
if (r->connection->aborted) {
if (!exception_when_aborted) {
ap_log_rerror(APLOG_MARK, WSGI_LOG_DEBUG(0), self->r,
"mod_wsgi (pid=%d): Client closed connection.",
getpid());
}
else
PyErr_SetString(PyExc_IOError, "client connection closed");
return 0;
}
return 1;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-200'], 'message': 'Fix bug where content type returned for response could be corrupted when using embedded mode.'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static NTSTATUS vfswrap_fsctl(struct vfs_handle_struct *handle,
struct files_struct *fsp,
TALLOC_CTX *ctx,
uint32_t function,
uint16_t req_flags, /* Needed for UNICODE ... */
const uint8_t *_in_data,
uint32_t in_len,
uint8_t **_out_data,
uint32_t max_out_len,
uint32_t *out_len)
{
const char *in_data = (const char *)_in_data;
char **out_data = (char **)_out_data;
switch (function) {
case FSCTL_SET_SPARSE:
{
bool set_sparse = true;
NTSTATUS status;
if (in_len >= 1 && in_data[0] == 0) {
set_sparse = false;
}
status = file_set_sparse(handle->conn, fsp, set_sparse);
DEBUG(NT_STATUS_IS_OK(status) ? 10 : 9,
("FSCTL_SET_SPARSE: fname[%s] set[%u] - %s\n",
smb_fname_str_dbg(fsp->fsp_name), set_sparse,
nt_errstr(status)));
return status;
}
case FSCTL_CREATE_OR_GET_OBJECT_ID:
{
unsigned char objid[16];
char *return_data = NULL;
/* This should return the object-id on this file.
* I think I'll make this be the inode+dev. JRA.
*/
DEBUG(10,("FSCTL_CREATE_OR_GET_OBJECT_ID: called on %s\n",
fsp_fnum_dbg(fsp)));
*out_len = (max_out_len >= 64) ? 64 : max_out_len;
/* Hmmm, will this cause problems if less data asked for? */
return_data = talloc_array(ctx, char, 64);
if (return_data == NULL) {
return NT_STATUS_NO_MEMORY;
}
/* For backwards compatibility only store the dev/inode. */
push_file_id_16(return_data, &fsp->file_id);
memcpy(return_data+16,create_volume_objectid(fsp->conn,objid),16);
push_file_id_16(return_data+32, &fsp->file_id);
*out_data = return_data;
return NT_STATUS_OK;
}
case FSCTL_GET_REPARSE_POINT:
{
/* Fail it with STATUS_NOT_A_REPARSE_POINT */
DEBUG(10, ("FSCTL_GET_REPARSE_POINT: called on %s. "
"Status: NOT_IMPLEMENTED\n", fsp_fnum_dbg(fsp)));
return NT_STATUS_NOT_A_REPARSE_POINT;
}
case FSCTL_SET_REPARSE_POINT:
{
/* Fail it with STATUS_NOT_A_REPARSE_POINT */
DEBUG(10, ("FSCTL_SET_REPARSE_POINT: called on %s. "
"Status: NOT_IMPLEMENTED\n", fsp_fnum_dbg(fsp)));
return NT_STATUS_NOT_A_REPARSE_POINT;
}
case FSCTL_GET_SHADOW_COPY_DATA:
{
/*
* This is called to retrieve the number of Shadow Copies (a.k.a. snapshots)
* and return their volume names. If max_data_count is 16, then it is just
* asking for the number of volumes and length of the combined names.
*
* pdata is the data allocated by our caller, but that uses
* total_data_count (which is 0 in our case) rather than max_data_count.
* Allocate the correct amount and return the pointer to let
* it be deallocated when we return.
*/
struct shadow_copy_data *shadow_data = NULL;
bool labels = False;
uint32 labels_data_count = 0;
uint32 i;
char *cur_pdata = NULL;
if (max_out_len < 16) {
DEBUG(0,("FSCTL_GET_SHADOW_COPY_DATA: max_data_count(%u) < 16 is invalid!\n",
max_out_len));
return NT_STATUS_INVALID_PARAMETER;
}
if (max_out_len > 16) {
labels = True;
}
shadow_data = talloc_zero(ctx, struct shadow_copy_data);
if (shadow_data == NULL) {
DEBUG(0,("TALLOC_ZERO() failed!\n"));
return NT_STATUS_NO_MEMORY;
}
/*
* Call the VFS routine to actually do the work.
*/
if (SMB_VFS_GET_SHADOW_COPY_DATA(fsp, shadow_data, labels)!=0) {
TALLOC_FREE(shadow_data);
if (errno == ENOSYS) {
DEBUG(5,("FSCTL_GET_SHADOW_COPY_DATA: connectpath %s, not supported.\n",
fsp->conn->connectpath));
return NT_STATUS_NOT_SUPPORTED;
} else {
DEBUG(0,("FSCTL_GET_SHADOW_COPY_DATA: connectpath %s, failed.\n",
fsp->conn->connectpath));
return NT_STATUS_UNSUCCESSFUL;
}
}
labels_data_count = (shadow_data->num_volumes * 2 *
sizeof(SHADOW_COPY_LABEL)) + 2;
if (!labels) {
*out_len = 16;
} else {
*out_len = 12 + labels_data_count + 4;
}
if (max_out_len < *out_len) {
DEBUG(0,("FSCTL_GET_SHADOW_COPY_DATA: max_data_count(%u) too small (%u) bytes needed!\n",
max_out_len, *out_len));
TALLOC_FREE(shadow_data);
return NT_STATUS_BUFFER_TOO_SMALL;
}
cur_pdata = talloc_array(ctx, char, *out_len);
if (cur_pdata == NULL) {
TALLOC_FREE(shadow_data);
return NT_STATUS_NO_MEMORY;
}
*out_data = cur_pdata;
/* num_volumes 4 bytes */
SIVAL(cur_pdata, 0, shadow_data->num_volumes);
if (labels) {
/* num_labels 4 bytes */
SIVAL(cur_pdata, 4, shadow_data->num_volumes);
}
/* needed_data_count 4 bytes */
SIVAL(cur_pdata, 8, labels_data_count + 4);
cur_pdata += 12;
DEBUG(10,("FSCTL_GET_SHADOW_COPY_DATA: %u volumes for path[%s].\n",
shadow_data->num_volumes, fsp_str_dbg(fsp)));
if (labels && shadow_data->labels) {
for (i=0; i<shadow_data->num_volumes; i++) {
srvstr_push(cur_pdata, req_flags,
cur_pdata, shadow_data->labels[i],
2 * sizeof(SHADOW_COPY_LABEL),
STR_UNICODE|STR_TERMINATE);
cur_pdata += 2 * sizeof(SHADOW_COPY_LABEL);
DEBUGADD(10,("Label[%u]: '%s'\n",i,shadow_data->labels[i]));
}
}
TALLOC_FREE(shadow_data);
return NT_STATUS_OK;
}
case FSCTL_FIND_FILES_BY_SID:
{
/* pretend this succeeded -
*
* we have to send back a list with all files owned by this SID
*
* but I have to check that --metze
*/
struct dom_sid sid;
uid_t uid;
size_t sid_len;
DEBUG(10, ("FSCTL_FIND_FILES_BY_SID: called on %s\n",
fsp_fnum_dbg(fsp)));
if (in_len < 8) {
/* NT_STATUS_BUFFER_TOO_SMALL maybe? */
return NT_STATUS_INVALID_PARAMETER;
}
sid_len = MIN(in_len - 4,SID_MAX_SIZE);
/* unknown 4 bytes: this is not the length of the sid :-( */
/*unknown = IVAL(pdata,0);*/
if (!sid_parse(in_data + 4, sid_len, &sid)) {
return NT_STATUS_INVALID_PARAMETER;
}
DEBUGADD(10, ("for SID: %s\n", sid_string_dbg(&sid)));
if (!sid_to_uid(&sid, &uid)) {
DEBUG(0,("sid_to_uid: failed, sid[%s] sid_len[%lu]\n",
sid_string_dbg(&sid),
(unsigned long)sid_len));
uid = (-1);
}
/* we can take a look at the find source :-)
*
* find ./ -uid $uid -name '*' is what we need here
*
*
* and send 4bytes len and then NULL terminated unicode strings
* for each file
*
* but I don't know how to deal with the paged results
* (maybe we can hang the result anywhere in the fsp struct)
*
* but I don't know how to deal with the paged results
* (maybe we can hang the result anywhere in the fsp struct)
*
* we don't send all files at once
* and at the next we should *not* start from the beginning,
* so we have to cache the result
*
* --metze
*/
/* this works for now... */
return NT_STATUS_OK;
}
case FSCTL_QUERY_ALLOCATED_RANGES:
{
/* FIXME: This is just a dummy reply, telling that all of the
* file is allocated. MKS cp needs that.
* Adding the real allocated ranges via FIEMAP on Linux
* and SEEK_DATA/SEEK_HOLE on Solaris is needed to make
* this FSCTL correct for sparse files.
*/
NTSTATUS status;
uint64_t offset, length;
char *out_data_tmp = NULL;
if (in_len != 16) {
DEBUG(0,("FSCTL_QUERY_ALLOCATED_RANGES: data_count(%u) != 16 is invalid!\n",
in_len));
return NT_STATUS_INVALID_PARAMETER;
}
if (max_out_len < 16) {
DEBUG(0,("FSCTL_QUERY_ALLOCATED_RANGES: max_out_len (%u) < 16 is invalid!\n",
max_out_len));
return NT_STATUS_INVALID_PARAMETER;
}
offset = BVAL(in_data,0);
length = BVAL(in_data,8);
if (offset + length < offset) {
/* No 64-bit integer wrap. */
return NT_STATUS_INVALID_PARAMETER;
}
/* Shouldn't this be SMB_VFS_STAT ... ? */
status = vfs_stat_fsp(fsp);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
*out_len = 16;
out_data_tmp = talloc_array(ctx, char, *out_len);
if (out_data_tmp == NULL) {
DEBUG(10, ("unable to allocate memory for response\n"));
return NT_STATUS_NO_MEMORY;
}
if (offset > fsp->fsp_name->st.st_ex_size ||
fsp->fsp_name->st.st_ex_size == 0 ||
length == 0) {
memset(out_data_tmp, 0, *out_len);
} else {
uint64_t end = offset + length;
end = MIN(end, fsp->fsp_name->st.st_ex_size);
SBVAL(out_data_tmp, 0, 0);
SBVAL(out_data_tmp, 8, end);
}
*out_data = out_data_tmp;
return NT_STATUS_OK;
}
case FSCTL_IS_VOLUME_DIRTY:
{
DEBUG(10,("FSCTL_IS_VOLUME_DIRTY: called on %s "
"(but remotely not supported)\n", fsp_fnum_dbg(fsp)));
/*
* http://msdn.microsoft.com/en-us/library/cc232128%28PROT.10%29.aspx
* says we have to respond with NT_STATUS_INVALID_PARAMETER
*/
return NT_STATUS_INVALID_PARAMETER;
}
default:
/*
* Only print once ... unfortunately there could be lots of
* different FSCTLs that are called.
*/
if (!vfswrap_logged_ioctl_message) {
vfswrap_logged_ioctl_message = true;
DEBUG(2, ("%s (0x%x): Currently not implemented.\n",
__func__, function));
}
}
return NT_STATUS_NOT_SUPPORTED;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-665'], 'message': 'FSCTL_GET_SHADOW_COPY_DATA: Initialize output array to zero
Otherwise num_volumes and the end marker can return uninitialized data
to the client.
Signed-off-by: Christof Schmitt <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]>
Reviewed-by: Simo Sorce <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: asn1_get_bit_der (const unsigned char *der, int der_len,
int *ret_len, unsigned char *str, int str_size,
int *bit_len)
{
int len_len, len_byte;
if (der_len <= 0)
return ASN1_GENERIC_ERROR;
len_byte = asn1_get_length_der (der, der_len, &len_len) - 1;
if (len_byte < 0)
return ASN1_DER_ERROR;
*ret_len = len_byte + len_len + 1;
*bit_len = len_byte * 8 - der[len_len];
if (str_size >= len_byte)
memcpy (str, der + len_len + 1, len_byte);
else
{
return ASN1_MEM_ERROR;
}
return ASN1_SUCCESS;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-131'], 'message': 'Do not return illegal values in asn1_get_bit_der().'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: asn1_read_value_type (asn1_node root, const char *name, void *ivalue,
int *len, unsigned int *etype)
{
asn1_node node, p, p2;
int len2, len3, result;
int value_size = *len;
unsigned char *value = ivalue;
unsigned type;
node = asn1_find_node (root, name);
if (node == NULL)
return ASN1_ELEMENT_NOT_FOUND;
type = type_field (node->type);
if ((type != ASN1_ETYPE_NULL) &&
(type != ASN1_ETYPE_CHOICE) &&
!(node->type & CONST_DEFAULT) && !(node->type & CONST_ASSIGN) &&
(node->value == NULL))
return ASN1_VALUE_NOT_FOUND;
if (etype)
*etype = type;
switch (type)
{
case ASN1_ETYPE_NULL:
PUT_STR_VALUE (value, value_size, "NULL");
break;
case ASN1_ETYPE_BOOLEAN:
if ((node->type & CONST_DEFAULT) && (node->value == NULL))
{
p = node->down;
while (type_field (p->type) != ASN1_ETYPE_DEFAULT)
p = p->right;
if (p->type & CONST_TRUE)
{
PUT_STR_VALUE (value, value_size, "TRUE");
}
else
{
PUT_STR_VALUE (value, value_size, "FALSE");
}
}
else if (node->value[0] == 'T')
{
PUT_STR_VALUE (value, value_size, "TRUE");
}
else
{
PUT_STR_VALUE (value, value_size, "FALSE");
}
break;
case ASN1_ETYPE_INTEGER:
case ASN1_ETYPE_ENUMERATED:
if ((node->type & CONST_DEFAULT) && (node->value == NULL))
{
p = node->down;
while (type_field (p->type) != ASN1_ETYPE_DEFAULT)
p = p->right;
if ((isdigit (p->value[0])) || (p->value[0] == '-')
|| (p->value[0] == '+'))
{
result = _asn1_convert_integer
(p->value, value, value_size, len);
if (result != ASN1_SUCCESS)
return result;
}
else
{ /* is an identifier like v1 */
p2 = node->down;
while (p2)
{
if (type_field (p2->type) == ASN1_ETYPE_CONSTANT)
{
if (!_asn1_strcmp (p2->name, p->value))
{
result = _asn1_convert_integer
(p2->value, value, value_size,
len);
if (result != ASN1_SUCCESS)
return result;
break;
}
}
p2 = p2->right;
}
}
}
else
{
len2 = -1;
result = asn1_get_octet_der
(node->value, node->value_len, &len2, value, value_size,
len);
if (result != ASN1_SUCCESS)
return result;
}
break;
case ASN1_ETYPE_OBJECT_ID:
if (node->type & CONST_ASSIGN)
{
value[0] = 0;
p = node->down;
while (p)
{
if (type_field (p->type) == ASN1_ETYPE_CONSTANT)
{
ADD_STR_VALUE (value, value_size, p->value);
if (p->right)
{
ADD_STR_VALUE (value, value_size, ".");
}
}
p = p->right;
}
*len = _asn1_strlen (value) + 1;
}
else if ((node->type & CONST_DEFAULT) && (node->value == NULL))
{
p = node->down;
while (type_field (p->type) != ASN1_ETYPE_DEFAULT)
p = p->right;
PUT_STR_VALUE (value, value_size, p->value);
}
else
{
PUT_STR_VALUE (value, value_size, node->value);
}
break;
case ASN1_ETYPE_GENERALIZED_TIME:
case ASN1_ETYPE_UTC_TIME:
PUT_AS_STR_VALUE (value, value_size, node->value, node->value_len);
break;
case ASN1_ETYPE_OCTET_STRING:
case ASN1_ETYPE_GENERALSTRING:
case ASN1_ETYPE_NUMERIC_STRING:
case ASN1_ETYPE_IA5_STRING:
case ASN1_ETYPE_TELETEX_STRING:
case ASN1_ETYPE_PRINTABLE_STRING:
case ASN1_ETYPE_UNIVERSAL_STRING:
case ASN1_ETYPE_BMP_STRING:
case ASN1_ETYPE_UTF8_STRING:
case ASN1_ETYPE_VISIBLE_STRING:
len2 = -1;
result = asn1_get_octet_der
(node->value, node->value_len, &len2, value, value_size,
len);
if (result != ASN1_SUCCESS)
return result;
break;
case ASN1_ETYPE_BIT_STRING:
len2 = -1;
result = asn1_get_bit_der
(node->value, node->value_len, &len2, value, value_size,
len);
if (result != ASN1_SUCCESS)
return result;
break;
case ASN1_ETYPE_CHOICE:
PUT_STR_VALUE (value, value_size, node->down->name);
break;
case ASN1_ETYPE_ANY:
len3 = -1;
len2 = asn1_get_length_der (node->value, node->value_len, &len3);
if (len2 < 0)
return ASN1_DER_ERROR;
PUT_VALUE (value, value_size, node->value + len3, len2);
break;
default:
return ASN1_ELEMENT_NOT_FOUND;
break;
}
return ASN1_SUCCESS;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476'], 'message': 'safer usage of memcpy().'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: asn1_get_bit_der (const unsigned char *der, int der_len,
int *ret_len, unsigned char *str, int str_size,
int *bit_len)
{
int len_len = 0, len_byte;
if (der_len <= 0)
return ASN1_GENERIC_ERROR;
len_byte = asn1_get_length_der (der, der_len, &len_len) - 1;
if (len_byte < 0)
return ASN1_DER_ERROR;
*ret_len = len_byte + len_len + 1;
*bit_len = len_byte * 8 - der[len_len];
if (*bit_len < 0)
return ASN1_DER_ERROR;
if (str_size >= len_byte)
{
if (len_byte > 0)
memcpy (str, der + len_len + 1, len_byte);
}
else
{
return ASN1_MEM_ERROR;
}
return ASN1_SUCCESS;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476'], 'message': 'Allow a NULL value in asn1_read_value() for all types.'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: asn1_read_value_type (asn1_node root, const char *name, void *ivalue,
int *len, unsigned int *etype)
{
asn1_node node, p, p2;
int len2, len3, result;
int value_size = *len;
unsigned char *value = ivalue;
unsigned type;
node = asn1_find_node (root, name);
if (node == NULL)
return ASN1_ELEMENT_NOT_FOUND;
type = type_field (node->type);
if ((type != ASN1_ETYPE_NULL) &&
(type != ASN1_ETYPE_CHOICE) &&
!(node->type & CONST_DEFAULT) && !(node->type & CONST_ASSIGN) &&
(node->value == NULL))
return ASN1_VALUE_NOT_FOUND;
if (etype)
*etype = type;
switch (type)
{
case ASN1_ETYPE_NULL:
PUT_STR_VALUE (value, value_size, "NULL");
break;
case ASN1_ETYPE_BOOLEAN:
if ((node->type & CONST_DEFAULT) && (node->value == NULL))
{
p = node->down;
while (type_field (p->type) != ASN1_ETYPE_DEFAULT)
p = p->right;
if (p->type & CONST_TRUE)
{
PUT_STR_VALUE (value, value_size, "TRUE");
}
else
{
PUT_STR_VALUE (value, value_size, "FALSE");
}
}
else if (node->value[0] == 'T')
{
PUT_STR_VALUE (value, value_size, "TRUE");
}
else
{
PUT_STR_VALUE (value, value_size, "FALSE");
}
break;
case ASN1_ETYPE_INTEGER:
case ASN1_ETYPE_ENUMERATED:
if ((node->type & CONST_DEFAULT) && (node->value == NULL))
{
p = node->down;
while (type_field (p->type) != ASN1_ETYPE_DEFAULT)
p = p->right;
if ((isdigit (p->value[0])) || (p->value[0] == '-')
|| (p->value[0] == '+'))
{
result = _asn1_convert_integer
(p->value, value, value_size, len);
if (result != ASN1_SUCCESS)
return result;
}
else
{ /* is an identifier like v1 */
p2 = node->down;
while (p2)
{
if (type_field (p2->type) == ASN1_ETYPE_CONSTANT)
{
if (!_asn1_strcmp (p2->name, p->value))
{
result = _asn1_convert_integer
(p2->value, value, value_size,
len);
if (result != ASN1_SUCCESS)
return result;
break;
}
}
p2 = p2->right;
}
}
}
else
{
len2 = -1;
result = asn1_get_octet_der
(node->value, node->value_len, &len2, value, value_size,
len);
if (result != ASN1_SUCCESS)
return result;
}
break;
case ASN1_ETYPE_OBJECT_ID:
if (node->type & CONST_ASSIGN)
{
if (value)
value[0] = 0;
p = node->down;
while (p)
{
if (type_field (p->type) == ASN1_ETYPE_CONSTANT)
{
ADD_STR_VALUE (value, value_size, p->value);
if (p->right)
{
ADD_STR_VALUE (value, value_size, ".");
}
}
p = p->right;
}
*len = _asn1_strlen (value) + 1;
}
else if ((node->type & CONST_DEFAULT) && (node->value == NULL))
{
p = node->down;
while (type_field (p->type) != ASN1_ETYPE_DEFAULT)
p = p->right;
PUT_STR_VALUE (value, value_size, p->value);
}
else
{
PUT_STR_VALUE (value, value_size, node->value);
}
break;
case ASN1_ETYPE_GENERALIZED_TIME:
case ASN1_ETYPE_UTC_TIME:
PUT_AS_STR_VALUE (value, value_size, node->value, node->value_len);
break;
case ASN1_ETYPE_OCTET_STRING:
case ASN1_ETYPE_GENERALSTRING:
case ASN1_ETYPE_NUMERIC_STRING:
case ASN1_ETYPE_IA5_STRING:
case ASN1_ETYPE_TELETEX_STRING:
case ASN1_ETYPE_PRINTABLE_STRING:
case ASN1_ETYPE_UNIVERSAL_STRING:
case ASN1_ETYPE_BMP_STRING:
case ASN1_ETYPE_UTF8_STRING:
case ASN1_ETYPE_VISIBLE_STRING:
len2 = -1;
result = asn1_get_octet_der
(node->value, node->value_len, &len2, value, value_size,
len);
if (result != ASN1_SUCCESS)
return result;
break;
case ASN1_ETYPE_BIT_STRING:
len2 = -1;
result = asn1_get_bit_der
(node->value, node->value_len, &len2, value, value_size,
len);
if (result != ASN1_SUCCESS)
return result;
break;
case ASN1_ETYPE_CHOICE:
PUT_STR_VALUE (value, value_size, node->down->name);
break;
case ASN1_ETYPE_ANY:
len3 = -1;
len2 = asn1_get_length_der (node->value, node->value_len, &len3);
if (len2 < 0)
return ASN1_DER_ERROR;
PUT_VALUE (value, value_size, node->value + len3, len2);
break;
default:
return ASN1_ELEMENT_NOT_FOUND;
break;
}
return ASN1_SUCCESS;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476'], 'message': 'Allow a NULL value in asn1_read_value() for all types.'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
u32 val, ktime_t *abs_time, u32 bitset,
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
if (uaddr == uaddr2)
return -EINVAL;
if (!bitset)
return -EINVAL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
/*
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
debug_rt_mutex_init_waiter(&rt_waiter);
RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
RB_CLEAR_NODE(&rt_waiter.tree_entry);
rt_waiter.task = NULL;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
q.bitset = bitset;
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
/*
* Prepare to wait on uaddr. On success, increments q.key (key1) ref
* count.
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
goto out_key2;
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquisition by the requeue code. The
* futex_requeue dropped our key1 reference and incremented our key2
* reference count.
*/
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
spin_unlock(q.lock_ptr);
}
} else {
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr2, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
/*
* If fixup_pi_state_owner() faulted and was unable to handle the
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
* it would detect that the user space "val" changed and return
* -EWOULDBLOCK. Save the overhead of the restart and return
* -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
out_put_keys:
put_futex_key(&q.key);
out_key2:
put_futex_key(&key2);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-264', 'CWE-269'], 'message': 'futex-prevent-requeue-pi-on-same-futex.patch futex: Forbid uaddr == uaddr2 in futex_requeue(..., requeue_pi=1)
If uaddr == uaddr2, then we have broken the rule of only requeueing from
a non-pi futex to a pi futex with this call. If we attempt this, then
dangling pointers may be left for rt_waiter resulting in an exploitable
condition.
This change brings futex_requeue() in line with futex_wait_requeue_pi()
which performs the same check as per commit 6f7b0a2a5c0f ("futex: Forbid
uaddr == uaddr2 in futex_wait_requeue_pi()")
[ tglx: Compare the resulting keys as well, as uaddrs might be
different depending on the mapping ]
Fixes CVE-2014-3153.
Reported-by: Pinkie Pie
Signed-off-by: Will Drewry <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Cc: [email protected]
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Darren Hart <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
{
struct task_struct *new_owner;
struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval;
if (!pi_state)
return -EINVAL;
/*
* If current does not own the pi_state then the futex is
* inconsistent and user space fiddled with the futex value.
*/
if (pi_state->owner != current)
return -EINVAL;
raw_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
/*
* It is possible that the next waiter (the one that brought
* this owner to the kernel) timed out and is no longer
* waiting on the lock.
*/
if (!new_owner)
new_owner = this->task;
/*
* We pass it to the next owner. (The WAITERS bit is always
* kept enabled while there is PI state around. We must also
* preserve the owner died bit.)
*/
if (!(uval & FUTEX_OWNER_DIED)) {
int ret = 0;
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
ret = -EFAULT;
else if (curval != uval)
ret = -EINVAL;
if (ret) {
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
}
}
raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
raw_spin_lock_irq(&new_owner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &new_owner->pi_state_list);
pi_state->owner = new_owner;
raw_spin_unlock_irq(&new_owner->pi_lock);
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-269'], 'message': 'futex: Always cleanup owner tid in unlock_pi
If the owner died bit is set at futex_unlock_pi, we currently do not
cleanup the user space futex. So the owner TID of the current owner
(the unlocker) persists. That's observable inconsistant state,
especially when the ownership of the pi state got transferred.
Clean it up unconditionally.
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Will Drewry <[email protected]>
Cc: Darren Hart <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
u32 __user *uaddr2, int nr_wake, int nr_requeue,
u32 *cmpval, int requeue_pi)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
int drop_count = 0, task_count = 0, ret;
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
if (requeue_pi) {
/*
* Requeue PI only works on two distinct uaddrs. This
* check is only valid for private futexes. See below.
*/
if (uaddr1 == uaddr2)
return -EINVAL;
/*
* requeue_pi requires a pi_state, try to allocate it now
* without any locks in case it fails.
*/
if (refill_pi_state_cache())
return -ENOMEM;
/*
* requeue_pi must wake as many tasks as it can, up to nr_wake
* + nr_requeue, since it acquires the rt_mutex prior to
* returning to userspace, so as to not leave the rt_mutex with
* waiters and no owner. However, second and third wake-ups
* cannot be predicted as they involve race conditions with the
* first wake and a fault while looking up the pi_state. Both
* pthread_cond_signal() and pthread_cond_broadcast() should
* use nr_wake=1.
*/
if (nr_wake != 1)
return -EINVAL;
}
retry:
if (pi_state != NULL) {
/*
* We will have to lookup the pi_state again, so free this one
* to keep the accounting correct.
*/
free_pi_state(pi_state);
pi_state = NULL;
}
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
/*
* The check above which compares uaddrs is not sufficient for
* shared futexes. We need to compare the keys:
*/
if (requeue_pi && match_futex(&key1, &key2)) {
ret = -EINVAL;
goto out_put_keys;
}
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
hb_waiters_inc(hb2);
double_lock_hb(hb1, hb2);
if (likely(cmpval != NULL)) {
u32 curval;
ret = get_futex_value_locked(&curval, uaddr1);
if (unlikely(ret)) {
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
ret = get_user(curval, uaddr1);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
if (curval != *cmpval) {
ret = -EAGAIN;
goto out_unlock;
}
}
if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
/*
* Attempt to acquire uaddr2 and wake the top waiter. If we
* intend to requeue waiters, force setting the FUTEX_WAITERS
* bit. We force this here where we are able to easily handle
* faults rather in the requeue loop below.
*/
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
&key2, &pi_state, nr_requeue);
/*
* At this point the top_waiter has either taken uaddr2 or is
* waiting on it. If the former, then the pi_state will not
* exist yet, look it up one more time to ensure we have a
* reference to it. If the lock was taken, ret contains the
* vpid of the top waiter task.
*/
if (ret > 0) {
WARN_ON(pi_state);
drop_count++;
task_count++;
/*
* If we acquired the lock, then the user
* space value of uaddr2 should be vpid. It
* cannot be changed by the top waiter as it
* is blocked on hb2 lock if it tries to do
* so. If something fiddled with it behind our
* back the pi state lookup might unearth
* it. So we rather use the known value than
* rereading and handing potential crap to
* lookup_pi_state.
*/
ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL);
}
switch (ret) {
case 0:
break;
case -EFAULT:
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
ret = fault_in_user_writeable(uaddr2);
if (!ret)
goto retry;
goto out;
case -EAGAIN:
/* The owner was exiting, try again. */
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
cond_resched();
goto retry;
default:
goto out_unlock;
}
}
plist_for_each_entry_safe(this, next, &hb1->chain, list) {
if (task_count - nr_wake >= nr_requeue)
break;
if (!match_futex(&this->key, &key1))
continue;
/*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
*
* We should never be requeueing a futex_q with a pi_state,
* which is awaiting a futex_unlock_pi().
*/
if ((requeue_pi && !this->rt_waiter) ||
(!requeue_pi && this->rt_waiter) ||
this->pi_state) {
ret = -EINVAL;
break;
}
/*
* Wake nr_wake waiters. For requeue_pi, if we acquired the
* lock, we already woke the top_waiter. If not, it will be
* woken by futex_unlock_pi().
*/
if (++task_count <= nr_wake && !requeue_pi) {
wake_futex(this);
continue;
}
/* Ensure we requeue to the expected futex for requeue_pi. */
if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
ret = -EINVAL;
break;
}
/*
* Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically.
*/
if (requeue_pi) {
/* Prepare the waiter to take the rt_mutex. */
atomic_inc(&pi_state->refcount);
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter,
this->task, 1);
if (ret == 1) {
/* We got the lock. */
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
free_pi_state(pi_state);
goto out_unlock;
}
}
requeue_futex(this, hb1, hb2, &key2);
drop_count++;
}
out_unlock:
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
/*
* drop_futex_key_refs() must be called outside the spinlocks. During
* the requeue we moved futex_q's from the hash bucket at key1 to the
* one at key2 and updated their key pointer. We no longer need to
* hold the references to key1.
*/
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
if (pi_state != NULL)
free_pi_state(pi_state);
return ret ? ret : task_count;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-269'], 'message': 'futex: Make lookup_pi_state more robust
The current implementation of lookup_pi_state has ambigous handling of
the TID value 0 in the user space futex. We can get into the kernel
even if the TID value is 0, because either there is a stale waiters bit
or the owner died bit is set or we are called from the requeue_pi path
or from user space just for fun.
The current code avoids an explicit sanity check for pid = 0 in case
that kernel internal state (waiters) are found for the user space
address. This can lead to state leakage and worse under some
circumstances.
Handle the cases explicit:
Waiter | pi_state | pi->owner | uTID | uODIED | ?
[1] NULL | --- | --- | 0 | 0/1 | Valid
[2] NULL | --- | --- | >0 | 0/1 | Valid
[3] Found | NULL | -- | Any | 0/1 | Invalid
[4] Found | Found | NULL | 0 | 1 | Valid
[5] Found | Found | NULL | >0 | 1 | Invalid
[6] Found | Found | task | 0 | 1 | Valid
[7] Found | Found | NULL | Any | 0 | Invalid
[8] Found | Found | task | ==taskTID | 0/1 | Valid
[9] Found | Found | task | 0 | 0 | Invalid
[10] Found | Found | task | !=taskTID | 0/1 | Invalid
[1] Indicates that the kernel can acquire the futex atomically. We
came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
[2] Valid, if TID does not belong to a kernel thread. If no matching
thread is found then it indicates that the owner TID has died.
[3] Invalid. The waiter is queued on a non PI futex
[4] Valid state after exit_robust_list(), which sets the user space
value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
[5] The user space value got manipulated between exit_robust_list()
and exit_pi_state_list()
[6] Valid state after exit_pi_state_list() which sets the new owner in
the pi_state but cannot access the user space value.
[7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
[8] Owner and user space value match
[9] There is no transient state which sets the user space TID to 0
except exit_robust_list(), but this is indicated by the
FUTEX_OWNER_DIED bit. See [4]
[10] There is no transient state which leaves owner and user space
TID out of sync.
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Will Drewry <[email protected]>
Cc: Darren Hart <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
union futex_key *key,
struct futex_pi_state **ps,
struct task_struct *task, int set_waiters)
{
int lock_taken, ret, force_take = 0;
u32 uval, newval, curval, vpid = task_pid_vnr(task);
retry:
ret = lock_taken = 0;
/*
* To avoid races, we attempt to take the lock here again
* (by doing a 0 -> TID atomic cmpxchg), while holding all
* the locks. It will most likely not succeed.
*/
newval = vpid;
if (set_waiters)
newval |= FUTEX_WAITERS;
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
return -EFAULT;
/*
* Detect deadlocks.
*/
if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
return -EDEADLK;
/*
* Surprise - we got the lock, but we do not trust user space at all.
*/
if (unlikely(!curval)) {
/*
* We verify whether there is kernel state for this
* futex. If not, we can safely assume, that the 0 ->
* TID transition is correct. If state exists, we do
* not bother to fixup the user space state as it was
* corrupted already.
*/
return futex_top_waiter(hb, key) ? -EINVAL : 1;
}
uval = curval;
/*
* Set the FUTEX_WAITERS flag, so the owner will know it has someone
* to wake at the next unlock.
*/
newval = curval | FUTEX_WAITERS;
/*
* Should we force take the futex? See below.
*/
if (unlikely(force_take)) {
/*
* Keep the OWNER_DIED and the WAITERS bit and set the
* new TID value.
*/
newval = (curval & ~FUTEX_TID_MASK) | vpid;
force_take = 0;
lock_taken = 1;
}
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
return -EFAULT;
if (unlikely(curval != uval))
goto retry;
/*
* We took the lock due to forced take over.
*/
if (unlikely(lock_taken))
return 1;
/*
* We dont have the lock. Look up the PI state (or create it if
* we are the first waiter):
*/
ret = lookup_pi_state(uval, hb, key, ps, task);
if (unlikely(ret)) {
switch (ret) {
case -ESRCH:
/*
* We failed to find an owner for this
* futex. So we have no pi_state to block
* on. This can happen in two cases:
*
* 1) The owner died
* 2) A stale FUTEX_WAITERS bit
*
* Re-read the futex value.
*/
if (get_futex_value_locked(&curval, uaddr))
return -EFAULT;
/*
* If the owner died or we have a stale
* WAITERS bit the owner TID in the user space
* futex is 0.
*/
if (!(curval & FUTEX_TID_MASK)) {
force_take = 1;
goto retry;
}
default:
break;
}
}
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-269'], 'message': 'futex: Make lookup_pi_state more robust
The current implementation of lookup_pi_state has ambigous handling of
the TID value 0 in the user space futex. We can get into the kernel
even if the TID value is 0, because either there is a stale waiters bit
or the owner died bit is set or we are called from the requeue_pi path
or from user space just for fun.
The current code avoids an explicit sanity check for pid = 0 in case
that kernel internal state (waiters) are found for the user space
address. This can lead to state leakage and worse under some
circumstances.
Handle the cases explicit:
Waiter | pi_state | pi->owner | uTID | uODIED | ?
[1] NULL | --- | --- | 0 | 0/1 | Valid
[2] NULL | --- | --- | >0 | 0/1 | Valid
[3] Found | NULL | -- | Any | 0/1 | Invalid
[4] Found | Found | NULL | 0 | 1 | Valid
[5] Found | Found | NULL | >0 | 1 | Invalid
[6] Found | Found | task | 0 | 1 | Valid
[7] Found | Found | NULL | Any | 0 | Invalid
[8] Found | Found | task | ==taskTID | 0/1 | Valid
[9] Found | Found | task | 0 | 0 | Invalid
[10] Found | Found | task | !=taskTID | 0/1 | Invalid
[1] Indicates that the kernel can acquire the futex atomically. We
came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
[2] Valid, if TID does not belong to a kernel thread. If no matching
thread is found then it indicates that the owner TID has died.
[3] Invalid. The waiter is queued on a non PI futex
[4] Valid state after exit_robust_list(), which sets the user space
value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
[5] The user space value got manipulated between exit_robust_list()
and exit_pi_state_list()
[6] Valid state after exit_pi_state_list() which sets the new owner in
the pi_state but cannot access the user space value.
[7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
[8] Owner and user space value match
[9] There is no transient state which sets the user space TID to 0
except exit_robust_list(), but this is indicated by the
FUTEX_OWNER_DIED bit. See [4]
[10] There is no transient state which leaves owner and user space
TID out of sync.
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Will Drewry <[email protected]>
Cc: Darren Hart <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: */
private int
mconvert(struct magic_set *ms, struct magic *m, int flip)
{
union VALUETYPE *p = &ms->ms_value;
switch (cvt_flip(m->type, flip)) {
case FILE_BYTE:
cvt_8(p, m);
return 1;
case FILE_SHORT:
cvt_16(p, m);
return 1;
case FILE_LONG:
case FILE_DATE:
case FILE_LDATE:
cvt_32(p, m);
return 1;
case FILE_QUAD:
case FILE_QDATE:
case FILE_QLDATE:
case FILE_QWDATE:
cvt_64(p, m);
return 1;
case FILE_STRING:
case FILE_BESTRING16:
case FILE_LESTRING16: {
/* Null terminate and eat *trailing* return */
p->s[sizeof(p->s) - 1] = '\0';
return 1;
}
case FILE_PSTRING: {
char *ptr1 = p->s, *ptr2 = ptr1 + file_pstring_length_size(m);
size_t len = file_pstring_get_length(m, ptr1);
if (len >= sizeof(p->s))
len = sizeof(p->s) - 1;
while (len--)
*ptr1++ = *ptr2++;
*ptr1 = '\0';
return 1;
}
case FILE_BESHORT:
p->h = (short)((p->hs[0]<<8)|(p->hs[1]));
cvt_16(p, m);
return 1;
case FILE_BELONG:
case FILE_BEDATE:
case FILE_BELDATE:
p->l = (int32_t)
((p->hl[0]<<24)|(p->hl[1]<<16)|(p->hl[2]<<8)|(p->hl[3]));
cvt_32(p, m);
return 1;
case FILE_BEQUAD:
case FILE_BEQDATE:
case FILE_BEQLDATE:
case FILE_BEQWDATE:
p->q = (uint64_t)
(((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)|
((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)|
((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)|
((uint64_t)p->hq[6]<<8)|((uint64_t)p->hq[7]));
cvt_64(p, m);
return 1;
case FILE_LESHORT:
p->h = (short)((p->hs[1]<<8)|(p->hs[0]));
cvt_16(p, m);
return 1;
case FILE_LELONG:
case FILE_LEDATE:
case FILE_LELDATE:
p->l = (int32_t)
((p->hl[3]<<24)|(p->hl[2]<<16)|(p->hl[1]<<8)|(p->hl[0]));
cvt_32(p, m);
return 1;
case FILE_LEQUAD:
case FILE_LEQDATE:
case FILE_LEQLDATE:
case FILE_LEQWDATE:
p->q = (uint64_t)
(((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)|
((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)|
((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)|
((uint64_t)p->hq[1]<<8)|((uint64_t)p->hq[0]));
cvt_64(p, m);
return 1;
case FILE_MELONG:
case FILE_MEDATE:
case FILE_MELDATE:
p->l = (int32_t)
((p->hl[1]<<24)|(p->hl[0]<<16)|(p->hl[3]<<8)|(p->hl[2]));
cvt_32(p, m);
return 1;
case FILE_FLOAT:
cvt_float(p, m);
return 1;
case FILE_BEFLOAT:
p->l = ((uint32_t)p->hl[0]<<24)|((uint32_t)p->hl[1]<<16)|
((uint32_t)p->hl[2]<<8) |((uint32_t)p->hl[3]);
cvt_float(p, m);
return 1;
case FILE_LEFLOAT:
p->l = ((uint32_t)p->hl[3]<<24)|((uint32_t)p->hl[2]<<16)|
((uint32_t)p->hl[1]<<8) |((uint32_t)p->hl[0]);
cvt_float(p, m);
return 1;
case FILE_DOUBLE:
cvt_double(p, m);
return 1;
case FILE_BEDOUBLE:
p->q = ((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)|
((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)|
((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)|
((uint64_t)p->hq[6]<<8) |((uint64_t)p->hq[7]);
cvt_double(p, m);
return 1;
case FILE_LEDOUBLE:
p->q = ((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)|
((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)|
((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)|
((uint64_t)p->hq[1]<<8) |((uint64_t)p->hq[0]);
cvt_double(p, m);
return 1;
case FILE_REGEX:
case FILE_SEARCH:
case FILE_DEFAULT:
case FILE_NAME:
case FILE_USE:
return 1;
default:
file_magerror(ms, "invalid type %d in mconvert()", m->type);
return 0; ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'Fixed Bug #67410 fileinfo: mconvert incorrect handling of truncated pascal string size
Upstream
https://github.com/file/file/commit/27a14bc7ba285a0a5ebfdb55e54001aa11932b08'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
if (nelements == 0) {
DPRINTF(("CDF_VECTOR with nelements == 0\n"));
goto out;
}
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements && i < sh.sh_properties;
j++, i++)
{
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'Add missing check offset test (Francisco Alonso, Jan Kaluza at RedHat)'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static void wsgi_setup_access(WSGIDaemonProcess *daemon)
{
/* Setup the umask for the effective user. */
if (daemon->group->umask != -1)
umask(daemon->group->umask);
/* Change to chroot environment. */
if (daemon->group->root) {
if (chroot(daemon->group->root) == -1) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to change root "
"directory to '%s'.", getpid(), daemon->group->root);
}
}
/* Setup the working directory.*/
if (daemon->group->home) {
if (chdir(daemon->group->home) == -1) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to change working "
"directory to '%s'.", getpid(), daemon->group->home);
}
}
else if (geteuid()) {
struct passwd *pwent;
pwent = getpwuid(geteuid());
if (pwent) {
if (chdir(pwent->pw_dir) == -1) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to change working "
"directory to '%s'.", getpid(), pwent->pw_dir);
}
}
else {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to determine home "
"directory for uid=%ld.", getpid(), (long)geteuid());
}
}
else {
struct passwd *pwent;
pwent = getpwuid(daemon->group->uid);
if (pwent) {
if (chdir(pwent->pw_dir) == -1) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to change working "
"directory to '%s'.", getpid(), pwent->pw_dir);
}
}
else {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to determine home "
"directory for uid=%ld.", getpid(),
(long)daemon->group->uid);
}
}
/* Don't bother switch user/group if not root. */
if (geteuid())
return;
/* Setup the daemon process real and effective group. */
if (setgid(daemon->group->gid) == -1) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to set group id to gid=%u.",
getpid(), (unsigned)daemon->group->gid);
}
else {
if (daemon->group->groups) {
if (setgroups(daemon->group->groups_count,
daemon->group->groups) == -1) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno,
wsgi_server, "mod_wsgi (pid=%d): Unable "
"to set supplementary groups for uname=%s "
"of '%s'.", getpid(), daemon->group->user,
daemon->group->groups_list);
}
}
else if (initgroups(daemon->group->user, daemon->group->gid) == -1) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno,
wsgi_server, "mod_wsgi (pid=%d): Unable "
"to set groups for uname=%s and gid=%u.", getpid(),
daemon->group->user, (unsigned)daemon->group->gid);
}
}
/* Setup the daemon process real and effective user. */
if (setuid(daemon->group->uid) == -1) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to change to uid=%ld.",
getpid(), (long)daemon->group->uid);
/*
* On true UNIX systems this should always succeed at
* this point. With certain Linux kernel versions though
* we can get back EAGAIN where the target user had
* reached their process limit. In that case will be left
* running as wrong user. Just exit on all failures to be
* safe. Don't die immediately to avoid a fork bomb.
*/
sleep(20);
exit(-1);
}
/*
* Linux prevents generation of core dumps after setuid()
* has been used. Attempt to reenable ability to dump core
* so that the CoreDumpDirectory directive still works.
*/
#if defined(HAVE_PRCTL) && defined(PR_SET_DUMPABLE)
/* This applies to Linux 2.4 and later. */
if (ap_coredumpdir_configured) {
if (prctl(PR_SET_DUMPABLE, 1)) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Set dumpable failed. This child "
"will not coredump after software errors.", getpid());
}
}
#endif
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-254'], 'message': 'When there is any sort of error in setting up daemon process group, kill the process rather than risk running in an unexpected state.'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int lz4_uncompress(const char *source, char *dest, int osize)
{
const BYTE *ip = (const BYTE *) source;
const BYTE *ref;
BYTE *op = (BYTE *) dest;
BYTE * const oend = op + osize;
BYTE *cpy;
unsigned token;
size_t length;
size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
while (1) {
/* get runlength */
token = *ip++;
length = (token >> ML_BITS);
if (length == RUN_MASK) {
size_t len;
len = *ip++;
for (; len == 255; length += 255)
len = *ip++;
length += len;
}
/* copy literals */
cpy = op + length;
if (unlikely(cpy > oend - COPYLENGTH)) {
/*
* Error: not enough place for another match
* (min 4) + 5 literals
*/
if (cpy != oend)
goto _output_error;
memcpy(op, ip, length);
ip += length;
break; /* EOF */
}
LZ4_WILDCOPY(ip, op, cpy);
ip -= (op - cpy);
op = cpy;
/* get offset */
LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
ip += 2;
/* Error: offset create reference outside destination buffer */
if (unlikely(ref < (BYTE *const) dest))
goto _output_error;
/* get matchlength */
length = token & ML_MASK;
if (length == ML_MASK) {
for (; *ip == 255; length += 255)
ip++;
length += *ip++;
}
/* copy repeated sequence */
if (unlikely((op - ref) < STEPSIZE)) {
#if LZ4_ARCH64
size_t dec64 = dec64table[op - ref];
#else
const int dec64 = 0;
#endif
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4;
ref += 4;
ref -= dec32table[op-ref];
PUT4(ref, op);
op += STEPSIZE - 4;
ref -= dec64;
} else {
LZ4_COPYSTEP(ref, op);
}
cpy = op + length - (STEPSIZE - 4);
if (cpy > (oend - COPYLENGTH)) {
/* Error: request to write beyond destination buffer */
if (cpy > oend)
goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
op = cpy;
/*
* Check EOF (should never happen, since last 5 bytes
* are supposed to be literals)
*/
if (op == oend)
goto _output_error;
continue;
}
LZ4_SECURECOPY(ref, op, cpy);
op = cpy; /* correction */
}
/* end of decoding */
return (int) (((char *)ip) - source);
/* write overflow error detected */
_output_error:
return (int) (-(((char *)ip) - source));
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'lz4: ensure length does not wrap
Given some pathologically compressed data, lz4 could possibly decide to
wrap a few internal variables, causing unknown things to happen. Catch
this before the wrapping happens and abort the decompression.
Reported-by: "Don A. Bailey" <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>'</s>
|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int main(int argc, char** argv) {
char userInput[50] = {0};
U32 timestamp = FUZ_GetMilliStart();
U32 seed=0;
int seedset=0;
int argNb;
int nbTests = NB_ATTEMPTS;
int testNb = 0;
int proba = FUZ_COMPRESSIBILITY_DEFAULT;
// Check command line
programName = argv[0];
for(argNb=1; argNb<argc; argNb++)
{
char* argument = argv[argNb];
if(!argument) continue; // Protection if argument empty
// Decode command (note : aggregated commands are allowed)
if (argument[0]=='-')
{
if (!strcmp(argument, "--no-prompt")) { no_prompt=1; seedset=1; displayLevel=1; continue; }
while (argument[1]!=0)
{
argument++;
switch(*argument)
{
case 'h':
return FUZ_usage();
case 'v':
argument++;
displayLevel=4;
break;
case 'i':
argument++;
nbTests=0;
while ((*argument>='0') && (*argument<='9'))
{
nbTests *= 10;
nbTests += *argument - '0';
argument++;
}
break;
case 's':
argument++;
seed=0; seedset=1;
while ((*argument>='0') && (*argument<='9'))
{
seed *= 10;
seed += *argument - '0';
argument++;
}
break;
case 't':
argument++;
testNb=0;
while ((*argument>='0') && (*argument<='9'))
{
testNb *= 10;
testNb += *argument - '0';
argument++;
}
break;
case 'p':
argument++;
proba=0;
while ((*argument>='0') && (*argument<='9'))
{
proba *= 10;
proba += *argument - '0';
argument++;
}
if (proba<0) proba=0;
if (proba>100) proba=100;
break;
default: ;
}
}
}
}
// Get Seed
printf("Starting LZ4 fuzzer (%i-bits, %s)\n", (int)(sizeof(size_t)*8), LZ4_VERSION);
if (!seedset)
{
printf("Select an Initialisation number (default : random) : ");
fflush(stdout);
if ( no_prompt || fgets(userInput, sizeof userInput, stdin) )
{
if ( sscanf(userInput, "%u", &seed) == 1 ) {}
else seed = FUZ_GetMilliSpan(timestamp);
}
}
printf("Seed = %u\n", seed);
if (proba!=FUZ_COMPRESSIBILITY_DEFAULT) printf("Compressibility : %i%%\n", proba);
//FUZ_SecurityTest();
if (nbTests<=0) nbTests=1;
return FUZ_test(seed, nbTests, testNb, ((double)proba) / 100);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'Fixed : issue 52 (reported by Ludwig Strigeus)'</s>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.