func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequence | project
stringlengths 2
29
| commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
jas_matrix_t *jas_matrix_create(int numrows, int numcols)
{
jas_matrix_t *matrix;
int i;
if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) {
return 0;
}
matrix->flags_ = 0;
matrix->numrows_ = numrows;
matrix->numcols_ = numcols;
matrix->rows_ = 0;
matrix->maxrows_ = numrows;
matrix->data_ = 0;
matrix->datasize_ = numrows * numcols;
if (matrix->maxrows_ > 0) {
if (!(matrix->rows_ = jas_malloc(matrix->maxrows_ *
sizeof(jas_seqent_t *)))) {
jas_matrix_destroy(matrix);
return 0;
}
}
if (matrix->datasize_ > 0) {
if (!(matrix->data_ = jas_malloc(matrix->datasize_ *
sizeof(jas_seqent_t)))) {
jas_matrix_destroy(matrix);
return 0;
}
}
for (i = 0; i < numrows; ++i) {
matrix->rows_[i] = &matrix->data_[i * matrix->numcols_];
}
for (i = 0; i < matrix->datasize_; ++i) {
matrix->data_[i] = 0;
}
matrix->xstart_ = 0;
matrix->ystart_ = 0;
matrix->xend_ = matrix->numcols_;
matrix->yend_ = matrix->numrows_;
return matrix;
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 60,119,243,757,426,160,000,000,000,000,000,000,000 | 47 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
void *jas_calloc(size_t nmemb, size_t size)
{
void *ptr;
size_t n;
n = nmemb * size;
if (!(ptr = jas_malloc(n * sizeof(char)))) {
return 0;
}
memset(ptr, 0, n);
return ptr;
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 88,656,556,554,237,760,000,000,000,000,000,000,000 | 11 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
char *jas_strdup(const char *s)
{
int n;
char *p;
n = strlen(s) + 1;
if (!(p = jas_malloc(n * sizeof(char)))) {
return 0;
}
strcpy(p, s);
return p;
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 210,565,341,194,386,930,000,000,000,000,000,000,000 | 11 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
static int jpc_siz_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate,
jas_stream_t *in)
{
jpc_siz_t *siz = &ms->parms.siz;
unsigned int i;
uint_fast8_t tmp;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
if (jpc_getuint16(in, &siz->caps) ||
jpc_getuint32(in, &siz->width) ||
jpc_getuint32(in, &siz->height) ||
jpc_getuint32(in, &siz->xoff) ||
jpc_getuint32(in, &siz->yoff) ||
jpc_getuint32(in, &siz->tilewidth) ||
jpc_getuint32(in, &siz->tileheight) ||
jpc_getuint32(in, &siz->tilexoff) ||
jpc_getuint32(in, &siz->tileyoff) ||
jpc_getuint16(in, &siz->numcomps)) {
return -1;
}
if (!siz->width || !siz->height || !siz->tilewidth ||
!siz->tileheight || !siz->numcomps) {
return -1;
}
if (!(siz->comps = jas_malloc(siz->numcomps * sizeof(jpc_sizcomp_t)))) {
return -1;
}
for (i = 0; i < siz->numcomps; ++i) {
if (jpc_getuint8(in, &tmp) ||
jpc_getuint8(in, &siz->comps[i].hsamp) ||
jpc_getuint8(in, &siz->comps[i].vsamp)) {
jas_free(siz->comps);
return -1;
}
siz->comps[i].sgnd = (tmp >> 7) & 1;
siz->comps[i].prec = (tmp & 0x7f) + 1;
}
if (jas_stream_eof(in)) {
jas_free(siz->comps);
return -1;
}
return 0;
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 334,886,412,224,388,530,000,000,000,000,000,000,000 | 45 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
void jpc_qmfb_join_col(jpc_fix_t *a, int numrows, int stride,
int parity)
{
int bufsize = JPC_CEILDIVPOW2(numrows, 1);
jpc_fix_t joinbuf[QMFB_JOINBUFSIZE];
jpc_fix_t *buf = joinbuf;
register jpc_fix_t *srcptr;
register jpc_fix_t *dstptr;
register int n;
int hstartcol;
/* Allocate memory for the join buffer from the heap. */
if (bufsize > QMFB_JOINBUFSIZE) {
if (!(buf = jas_malloc(bufsize * sizeof(jpc_fix_t)))) {
/* We have no choice but to commit suicide. */
abort();
}
}
hstartcol = (numrows + 1 - parity) >> 1;
/* Save the samples from the lowpass channel. */
n = hstartcol;
srcptr = &a[0];
dstptr = buf;
while (n-- > 0) {
*dstptr = *srcptr;
srcptr += stride;
++dstptr;
}
/* Copy the samples from the highpass channel into place. */
srcptr = &a[hstartcol * stride];
dstptr = &a[(1 - parity) * stride];
n = numrows - hstartcol;
while (n-- > 0) {
*dstptr = *srcptr;
dstptr += 2 * stride;
srcptr += stride;
}
/* Copy the samples from the lowpass channel into place. */
srcptr = buf;
dstptr = &a[parity * stride];
n = hstartcol;
while (n-- > 0) {
*dstptr = *srcptr;
dstptr += 2 * stride;
++srcptr;
}
/* If the join buffer was allocated on the heap, free this memory. */
if (buf != joinbuf) {
jas_free(buf);
}
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 282,878,809,360,809,600,000,000,000,000,000,000,000 | 56 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
static int jas_icclut16_input(jas_iccattrval_t *attrval, jas_stream_t *in,
int cnt)
{
int i;
int j;
int clutsize;
jas_icclut16_t *lut16 = &attrval->data.lut16;
lut16->clut = 0;
lut16->intabs = 0;
lut16->intabsbuf = 0;
lut16->outtabs = 0;
lut16->outtabsbuf = 0;
if (jas_iccgetuint8(in, &lut16->numinchans) ||
jas_iccgetuint8(in, &lut16->numoutchans) ||
jas_iccgetuint8(in, &lut16->clutlen) ||
jas_stream_getc(in) == EOF)
goto error;
for (i = 0; i < 3; ++i) {
for (j = 0; j < 3; ++j) {
if (jas_iccgetsint32(in, &lut16->e[i][j]))
goto error;
}
}
if (jas_iccgetuint16(in, &lut16->numintabents) ||
jas_iccgetuint16(in, &lut16->numouttabents))
goto error;
clutsize = jas_iccpowi(lut16->clutlen, lut16->numinchans) * lut16->numoutchans;
if (!(lut16->clut = jas_malloc(clutsize * sizeof(jas_iccuint16_t))) ||
!(lut16->intabsbuf = jas_malloc(lut16->numinchans *
lut16->numintabents * sizeof(jas_iccuint16_t))) ||
!(lut16->intabs = jas_malloc(lut16->numinchans *
sizeof(jas_iccuint16_t *))))
goto error;
for (i = 0; i < lut16->numinchans; ++i)
lut16->intabs[i] = &lut16->intabsbuf[i * lut16->numintabents];
if (!(lut16->outtabsbuf = jas_malloc(lut16->numoutchans *
lut16->numouttabents * sizeof(jas_iccuint16_t))) ||
!(lut16->outtabs = jas_malloc(lut16->numoutchans *
sizeof(jas_iccuint16_t *))))
goto error;
for (i = 0; i < lut16->numoutchans; ++i)
lut16->outtabs[i] = &lut16->outtabsbuf[i * lut16->numouttabents];
for (i = 0; i < lut16->numinchans; ++i) {
for (j = 0; j < JAS_CAST(int, lut16->numintabents); ++j) {
if (jas_iccgetuint16(in, &lut16->intabs[i][j]))
goto error;
}
}
for (i = 0; i < lut16->numoutchans; ++i) {
for (j = 0; j < JAS_CAST(int, lut16->numouttabents); ++j) {
if (jas_iccgetuint16(in, &lut16->outtabs[i][j]))
goto error;
}
}
for (i = 0; i < clutsize; ++i) {
if (jas_iccgetuint16(in, &lut16->clut[i]))
goto error;
}
if (JAS_CAST(int, 44 + 2 * (lut16->numinchans * lut16->numintabents +
lut16->numoutchans * lut16->numouttabents +
jas_iccpowi(lut16->clutlen, lut16->numinchans) *
lut16->numoutchans)) != cnt)
goto error;
return 0;
error:
jas_icclut16_destroy(attrval);
return -1;
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 236,904,309,724,426,900,000,000,000,000,000,000,000 | 68 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
static jpc_enc_prc_t *prc_create(jpc_enc_prc_t *prc, jpc_enc_cp_t *cp, jpc_enc_band_t *band)
{
uint_fast32_t prcno;
uint_fast32_t prcxind;
uint_fast32_t prcyind;
uint_fast32_t cbgtlx;
uint_fast32_t cbgtly;
uint_fast32_t tlprctlx;
uint_fast32_t tlprctly;
uint_fast32_t tlcbgtlx;
uint_fast32_t tlcbgtly;
uint_fast16_t rlvlno;
jpc_enc_rlvl_t *rlvl;
uint_fast32_t tlcblktlx;
uint_fast32_t tlcblktly;
uint_fast32_t brcblkbrx;
uint_fast32_t brcblkbry;
uint_fast32_t cblkno;
jpc_enc_cblk_t *cblk;
jpc_enc_tcmpt_t *tcmpt;
prc->cblks = 0;
prc->incltree = 0;
prc->savincltree = 0;
prc->nlibtree = 0;
prc->savnlibtree = 0;
rlvl = band->rlvl;
tcmpt = rlvl->tcmpt;
rlvlno = rlvl - tcmpt->rlvls;
prcno = prc - band->prcs;
prcxind = prcno % rlvl->numhprcs;
prcyind = prcno / rlvl->numhprcs;
prc->band = band;
tlprctlx = JPC_FLOORTOMULTPOW2(rlvl->tlx, rlvl->prcwidthexpn);
tlprctly = JPC_FLOORTOMULTPOW2(rlvl->tly, rlvl->prcheightexpn);
if (!rlvlno) {
tlcbgtlx = tlprctlx;
tlcbgtly = tlprctly;
} else {
tlcbgtlx = JPC_CEILDIVPOW2(tlprctlx, 1);
tlcbgtly = JPC_CEILDIVPOW2(tlprctly, 1);
}
/* Compute the coordinates of the top-left and bottom-right
corners of the precinct. */
cbgtlx = tlcbgtlx + (prcxind << rlvl->cbgwidthexpn);
cbgtly = tlcbgtly + (prcyind << rlvl->cbgheightexpn);
prc->tlx = JAS_MAX(jas_seq2d_xstart(band->data), cbgtlx);
prc->tly = JAS_MAX(jas_seq2d_ystart(band->data), cbgtly);
prc->brx = JAS_MIN(jas_seq2d_xend(band->data), cbgtlx +
(1 << rlvl->cbgwidthexpn));
prc->bry = JAS_MIN(jas_seq2d_yend(band->data), cbgtly +
(1 << rlvl->cbgheightexpn));
if (prc->tlx < prc->brx && prc->tly < prc->bry) {
/* The precinct contains at least one code block. */
tlcblktlx = JPC_FLOORTOMULTPOW2(prc->tlx, rlvl->cblkwidthexpn);
tlcblktly = JPC_FLOORTOMULTPOW2(prc->tly, rlvl->cblkheightexpn);
brcblkbrx = JPC_CEILTOMULTPOW2(prc->brx, rlvl->cblkwidthexpn);
brcblkbry = JPC_CEILTOMULTPOW2(prc->bry, rlvl->cblkheightexpn);
prc->numhcblks = JPC_FLOORDIVPOW2(brcblkbrx - tlcblktlx,
rlvl->cblkwidthexpn);
prc->numvcblks = JPC_FLOORDIVPOW2(brcblkbry - tlcblktly,
rlvl->cblkheightexpn);
prc->numcblks = prc->numhcblks * prc->numvcblks;
if (!(prc->incltree = jpc_tagtree_create(prc->numhcblks,
prc->numvcblks))) {
goto error;
}
if (!(prc->nlibtree = jpc_tagtree_create(prc->numhcblks,
prc->numvcblks))) {
goto error;
}
if (!(prc->savincltree = jpc_tagtree_create(prc->numhcblks,
prc->numvcblks))) {
goto error;
}
if (!(prc->savnlibtree = jpc_tagtree_create(prc->numhcblks,
prc->numvcblks))) {
goto error;
}
if (!(prc->cblks = jas_malloc(prc->numcblks * sizeof(jpc_enc_cblk_t)))) {
goto error;
}
for (cblkno = 0, cblk = prc->cblks; cblkno < prc->numcblks;
++cblkno, ++cblk) {
cblk->passes = 0;
cblk->stream = 0;
cblk->mqenc = 0;
cblk->data = 0;
cblk->flags = 0;
cblk->prc = prc;
}
for (cblkno = 0, cblk = prc->cblks; cblkno < prc->numcblks;
++cblkno, ++cblk) {
if (!cblk_create(cblk, cp, prc)) {
goto error;
}
}
} else {
/* The precinct does not contain any code blocks. */
prc->tlx = prc->brx;
prc->tly = prc->bry;
prc->numcblks = 0;
prc->numhcblks = 0;
prc->numvcblks = 0;
prc->cblks = 0;
prc->incltree = 0;
prc->nlibtree = 0;
prc->savincltree = 0;
prc->savnlibtree = 0;
}
return prc;
error:
prc_destroy(prc);
return 0;
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 179,756,962,391,814,660,000,000,000,000,000,000,000 | 124 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
void jpc_qmfb_join_colgrp(jpc_fix_t *a, int numrows, int stride,
int parity)
{
int bufsize = JPC_CEILDIVPOW2(numrows, 1);
jpc_fix_t joinbuf[QMFB_JOINBUFSIZE * JPC_QMFB_COLGRPSIZE];
jpc_fix_t *buf = joinbuf;
jpc_fix_t *srcptr;
jpc_fix_t *dstptr;
register jpc_fix_t *srcptr2;
register jpc_fix_t *dstptr2;
register int n;
register int i;
int hstartcol;
/* Allocate memory for the join buffer from the heap. */
if (bufsize > QMFB_JOINBUFSIZE) {
if (!(buf = jas_malloc(bufsize * JPC_QMFB_COLGRPSIZE * sizeof(jpc_fix_t)))) {
/* We have no choice but to commit suicide. */
abort();
}
}
hstartcol = (numrows + 1 - parity) >> 1;
/* Save the samples from the lowpass channel. */
n = hstartcol;
srcptr = &a[0];
dstptr = buf;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
srcptr += stride;
dstptr += JPC_QMFB_COLGRPSIZE;
}
/* Copy the samples from the highpass channel into place. */
srcptr = &a[hstartcol * stride];
dstptr = &a[(1 - parity) * stride];
n = numrows - hstartcol;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += 2 * stride;
srcptr += stride;
}
/* Copy the samples from the lowpass channel into place. */
srcptr = buf;
dstptr = &a[parity * stride];
n = hstartcol;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += 2 * stride;
srcptr += JPC_QMFB_COLGRPSIZE;
}
/* If the join buffer was allocated on the heap, free this memory. */
if (buf != joinbuf) {
jas_free(buf);
}
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 13,372,672,139,245,177,000,000,000,000,000,000,000 | 77 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
jpc_pi_t *jpc_dec_pi_create(jpc_dec_t *dec, jpc_dec_tile_t *tile)
{
jpc_pi_t *pi;
int compno;
jpc_picomp_t *picomp;
jpc_pirlvl_t *pirlvl;
jpc_dec_tcomp_t *tcomp;
int rlvlno;
jpc_dec_rlvl_t *rlvl;
int prcno;
int *prclyrno;
jpc_dec_cmpt_t *cmpt;
if (!(pi = jpc_pi_create0())) {
return 0;
}
pi->numcomps = dec->numcomps;
if (!(pi->picomps = jas_malloc(pi->numcomps * sizeof(jpc_picomp_t)))) {
jpc_pi_destroy(pi);
return 0;
}
for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno,
++picomp) {
picomp->pirlvls = 0;
}
for (compno = 0, tcomp = tile->tcomps, picomp = pi->picomps;
compno < pi->numcomps; ++compno, ++tcomp, ++picomp) {
picomp->numrlvls = tcomp->numrlvls;
if (!(picomp->pirlvls = jas_malloc(picomp->numrlvls *
sizeof(jpc_pirlvl_t)))) {
jpc_pi_destroy(pi);
return 0;
}
for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno <
picomp->numrlvls; ++rlvlno, ++pirlvl) {
pirlvl->prclyrnos = 0;
}
for (rlvlno = 0, pirlvl = picomp->pirlvls, rlvl = tcomp->rlvls;
rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl, ++rlvl) {
/* XXX sizeof(long) should be sizeof different type */
pirlvl->numprcs = rlvl->numprcs;
if (!(pirlvl->prclyrnos = jas_malloc(pirlvl->numprcs *
sizeof(long)))) {
jpc_pi_destroy(pi);
return 0;
}
}
}
pi->maxrlvls = 0;
for (compno = 0, tcomp = tile->tcomps, picomp = pi->picomps, cmpt =
dec->cmpts; compno < pi->numcomps; ++compno, ++tcomp, ++picomp,
++cmpt) {
picomp->hsamp = cmpt->hstep;
picomp->vsamp = cmpt->vstep;
for (rlvlno = 0, pirlvl = picomp->pirlvls, rlvl = tcomp->rlvls;
rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl, ++rlvl) {
pirlvl->prcwidthexpn = rlvl->prcwidthexpn;
pirlvl->prcheightexpn = rlvl->prcheightexpn;
for (prcno = 0, prclyrno = pirlvl->prclyrnos;
prcno < pirlvl->numprcs; ++prcno, ++prclyrno) {
*prclyrno = 0;
}
pirlvl->numhprcs = rlvl->numhprcs;
}
if (pi->maxrlvls < tcomp->numrlvls) {
pi->maxrlvls = tcomp->numrlvls;
}
}
pi->numlyrs = tile->cp->numlyrs;
pi->xstart = tile->xstart;
pi->ystart = tile->ystart;
pi->xend = tile->xend;
pi->yend = tile->yend;
pi->picomp = 0;
pi->pirlvl = 0;
pi->x = 0;
pi->y = 0;
pi->compno = 0;
pi->rlvlno = 0;
pi->prcno = 0;
pi->lyrno = 0;
pi->xstep = 0;
pi->ystep = 0;
pi->pchgno = -1;
pi->defaultpchg.prgord = tile->cp->prgord;
pi->defaultpchg.compnostart = 0;
pi->defaultpchg.compnoend = pi->numcomps;
pi->defaultpchg.rlvlnostart = 0;
pi->defaultpchg.rlvlnoend = pi->maxrlvls;
pi->defaultpchg.lyrnoend = pi->numlyrs;
pi->pchg = 0;
pi->valid = 0;
return pi;
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 52,529,377,815,656,390,000,000,000,000,000,000,000 | 102 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
static int jp2_bpcc_getdata(jp2_box_t *box, jas_stream_t *in)
{
jp2_bpcc_t *bpcc = &box->data.bpcc;
unsigned int i;
bpcc->numcmpts = box->datalen;
if (!(bpcc->bpcs = jas_malloc(bpcc->numcmpts * sizeof(uint_fast8_t)))) {
return -1;
}
for (i = 0; i < bpcc->numcmpts; ++i) {
if (jp2_getuint8(in, &bpcc->bpcs[i])) {
return -1;
}
}
return 0;
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 174,232,309,941,156,660,000,000,000,000,000,000,000 | 15 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
static jpc_enc_tcmpt_t *tcmpt_create(jpc_enc_tcmpt_t *tcmpt, jpc_enc_cp_t *cp,
jas_image_t *image, jpc_enc_tile_t *tile)
{
uint_fast16_t cmptno;
uint_fast16_t rlvlno;
jpc_enc_rlvl_t *rlvl;
uint_fast32_t tlx;
uint_fast32_t tly;
uint_fast32_t brx;
uint_fast32_t bry;
uint_fast32_t cmpttlx;
uint_fast32_t cmpttly;
jpc_enc_ccp_t *ccp;
jpc_tsfb_band_t bandinfos[JPC_MAXBANDS];
tcmpt->tile = tile;
tcmpt->tsfb = 0;
tcmpt->data = 0;
tcmpt->rlvls = 0;
/* Deduce the component number. */
cmptno = tcmpt - tile->tcmpts;
ccp = &cp->ccps[cmptno];
/* Compute the coordinates of the top-left and bottom-right
corners of this tile-component. */
tlx = JPC_CEILDIV(tile->tlx, ccp->sampgrdstepx);
tly = JPC_CEILDIV(tile->tly, ccp->sampgrdstepy);
brx = JPC_CEILDIV(tile->brx, ccp->sampgrdstepx);
bry = JPC_CEILDIV(tile->bry, ccp->sampgrdstepy);
/* Create a sequence to hold the tile-component sample data. */
if (!(tcmpt->data = jas_seq2d_create(tlx, tly, brx, bry))) {
goto error;
}
/* Get the image data associated with this tile-component. */
cmpttlx = JPC_CEILDIV(cp->imgareatlx, ccp->sampgrdstepx);
cmpttly = JPC_CEILDIV(cp->imgareatly, ccp->sampgrdstepy);
if (jas_image_readcmpt(image, cmptno, tlx - cmpttlx, tly - cmpttly,
brx - tlx, bry - tly, tcmpt->data)) {
goto error;
}
tcmpt->synweight = 0;
tcmpt->qmfbid = cp->tccp.qmfbid;
tcmpt->numrlvls = cp->tccp.maxrlvls;
tcmpt->numbands = 3 * tcmpt->numrlvls - 2;
if (!(tcmpt->tsfb = jpc_cod_gettsfb(tcmpt->qmfbid, tcmpt->numrlvls - 1))) {
goto error;
}
for (rlvlno = 0; rlvlno < tcmpt->numrlvls; ++rlvlno) {
tcmpt->prcwidthexpns[rlvlno] = cp->tccp.prcwidthexpns[rlvlno];
tcmpt->prcheightexpns[rlvlno] = cp->tccp.prcheightexpns[rlvlno];
}
tcmpt->cblkwidthexpn = cp->tccp.cblkwidthexpn;
tcmpt->cblkheightexpn = cp->tccp.cblkheightexpn;
tcmpt->cblksty = cp->tccp.cblksty;
tcmpt->csty = cp->tccp.csty;
tcmpt->numstepsizes = tcmpt->numbands;
assert(tcmpt->numstepsizes <= JPC_MAXBANDS);
memset(tcmpt->stepsizes, 0, tcmpt->numstepsizes * sizeof(uint_fast16_t));
/* Retrieve information about the various bands. */
jpc_tsfb_getbands(tcmpt->tsfb, jas_seq2d_xstart(tcmpt->data),
jas_seq2d_ystart(tcmpt->data), jas_seq2d_xend(tcmpt->data),
jas_seq2d_yend(tcmpt->data), bandinfos);
if (!(tcmpt->rlvls = jas_malloc(tcmpt->numrlvls * sizeof(jpc_enc_rlvl_t)))) {
goto error;
}
for (rlvlno = 0, rlvl = tcmpt->rlvls; rlvlno < tcmpt->numrlvls;
++rlvlno, ++rlvl) {
rlvl->bands = 0;
rlvl->tcmpt = tcmpt;
}
for (rlvlno = 0, rlvl = tcmpt->rlvls; rlvlno < tcmpt->numrlvls;
++rlvlno, ++rlvl) {
if (!rlvl_create(rlvl, cp, tcmpt, bandinfos)) {
goto error;
}
}
return tcmpt;
error:
tcmpt_destroy(tcmpt);
return 0;
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 150,086,594,326,513,160,000,000,000,000,000,000,000 | 94 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
jas_stream_t *jas_stream_memopen(char *buf, int bufsize)
{
jas_stream_t *stream;
jas_stream_memobj_t *obj;
if (!(stream = jas_stream_create())) {
return 0;
}
/* A stream associated with a memory buffer is always opened
for both reading and writing in binary mode. */
stream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY;
/* Since the stream data is already resident in memory, buffering
is not necessary. */
/* But... It still may be faster to use buffering anyways. */
jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0);
/* Select the operations for a memory stream. */
stream->ops_ = &jas_stream_memops;
/* Allocate memory for the underlying memory stream object. */
if (!(obj = jas_malloc(sizeof(jas_stream_memobj_t)))) {
jas_stream_destroy(stream);
return 0;
}
stream->obj_ = (void *) obj;
/* Initialize a few important members of the memory stream object. */
obj->myalloc_ = 0;
obj->buf_ = 0;
/* If the buffer size specified is nonpositive, then the buffer
is allocated internally and automatically grown as needed. */
if (bufsize <= 0) {
obj->bufsize_ = 1024;
obj->growable_ = 1;
} else {
obj->bufsize_ = bufsize;
obj->growable_ = 0;
}
if (buf) {
obj->buf_ = (unsigned char *) buf;
} else {
obj->buf_ = jas_malloc(obj->bufsize_ * sizeof(char));
obj->myalloc_ = 1;
}
if (!obj->buf_) {
jas_stream_close(stream);
return 0;
}
if (bufsize > 0 && buf) {
/* If a buffer was supplied by the caller and its length is positive,
make the associated buffer data appear in the stream initially. */
obj->len_ = bufsize;
} else {
/* The stream is initially empty. */
obj->len_ = 0;
}
obj->pos_ = 0;
return stream;
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 234,582,491,881,735,780,000,000,000,000,000,000,000 | 64 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
void jpc_qmfb_split_colres(jpc_fix_t *a, int numrows, int numcols,
int stride, int parity)
{
int bufsize = JPC_CEILDIVPOW2(numrows, 1);
jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE * JPC_QMFB_COLGRPSIZE];
jpc_fix_t *buf = splitbuf;
jpc_fix_t *srcptr;
jpc_fix_t *dstptr;
register jpc_fix_t *srcptr2;
register jpc_fix_t *dstptr2;
register int n;
register int i;
int m;
int hstartcol;
/* Get a buffer. */
if (bufsize > QMFB_SPLITBUFSIZE) {
if (!(buf = jas_malloc(bufsize * sizeof(jpc_fix_t)))) {
/* We have no choice but to commit suicide in this case. */
abort();
}
}
if (numrows >= 2) {
hstartcol = (numrows + 1 - parity) >> 1;
m = (parity) ? hstartcol : (numrows - hstartcol);
/* Save the samples destined for the highpass channel. */
n = m;
dstptr = buf;
srcptr = &a[(1 - parity) * stride];
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < numcols; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += numcols;
srcptr += stride << 1;
}
/* Copy the appropriate samples into the lowpass channel. */
dstptr = &a[(1 - parity) * stride];
srcptr = &a[(2 - parity) * stride];
n = numrows - m - (!parity);
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < numcols; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += stride;
srcptr += stride << 1;
}
/* Copy the saved samples into the highpass channel. */
dstptr = &a[hstartcol * stride];
srcptr = buf;
n = m;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < numcols; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += stride;
srcptr += numcols;
}
}
/* If the split buffer was allocated on the heap, free this memory. */
if (buf != splitbuf) {
jas_free(buf);
}
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 168,239,743,450,930,700,000,000,000,000,000,000,000 | 80 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
static int jp2_pclr_getdata(jp2_box_t *box, jas_stream_t *in)
{
jp2_pclr_t *pclr = &box->data.pclr;
int lutsize;
unsigned int i;
unsigned int j;
int_fast32_t x;
pclr->lutdata = 0;
if (jp2_getuint16(in, &pclr->numlutents) ||
jp2_getuint8(in, &pclr->numchans)) {
return -1;
}
lutsize = pclr->numlutents * pclr->numchans;
if (!(pclr->lutdata = jas_malloc(lutsize * sizeof(int_fast32_t)))) {
return -1;
}
if (!(pclr->bpc = jas_malloc(pclr->numchans * sizeof(uint_fast8_t)))) {
return -1;
}
for (i = 0; i < pclr->numchans; ++i) {
if (jp2_getuint8(in, &pclr->bpc[i])) {
return -1;
}
}
for (i = 0; i < pclr->numlutents; ++i) {
for (j = 0; j < pclr->numchans; ++j) {
if (jp2_getint(in, (pclr->bpc[j] & 0x80) != 0,
(pclr->bpc[j] & 0x7f) + 1, &x)) {
return -1;
}
pclr->lutdata[i * pclr->numchans + j] = x;
}
}
return 0;
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 239,259,977,131,698,830,000,000,000,000,000,000,000 | 37 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
int jas_stream_printf(jas_stream_t *stream, const char *fmt, ...)
{
va_list ap;
char buf[4096];
int ret;
va_start(ap, fmt);
ret = vsprintf(buf, fmt, ap);
jas_stream_puts(stream, buf);
va_end(ap);
return ret;
} | 1 | [
"CWE-119"
] | jasper | d678ccd27b8a062e3bfd4c80d8ce2676a8166a27 | 194,542,991,776,402,800,000,000,000,000,000,000,000 | 12 | CVE-2008-3522 |
static inline int nla_ok(const struct nlattr *nla, int remaining)
{
return remaining >= sizeof(*nla) &&
nla->nla_len >= sizeof(*nla) &&
nla->nla_len <= remaining;
} | 1 | [] | linux-2.6 | 1045b03e07d85f3545118510a587035536030c1c | 202,512,783,775,518,600,000,000,000,000,000,000,000 | 6 | netlink: fix overrun in attribute iteration
kmemcheck reported this:
kmemcheck: Caught 16-bit read from uninitialized memory (f6c1ba30)
0500110001508abf050010000500000002017300140000006f72672e66726565
i i i i i i i i i i i i i u u u u u u u u u u u u u u u u u u u
^
Pid: 3462, comm: wpa_supplicant Not tainted (2.6.27-rc3-00054-g6397ab9-dirty #13)
EIP: 0060:[<c05de64a>] EFLAGS: 00010296 CPU: 0
EIP is at nla_parse+0x5a/0xf0
EAX: 00000008 EBX: fffffffd ECX: c06f16c0 EDX: 00000005
ESI: 00000010 EDI: f6c1ba30 EBP: f6367c6c ESP: c0a11e88
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
CR0: 8005003b CR2: f781cc84 CR3: 3632f000 CR4: 000006d0
DR0: c0ead9bc DR1: 00000000 DR2: 00000000 DR3: 00000000
DR6: ffff4ff0 DR7: 00000400
[<c05d4b23>] rtnl_setlink+0x63/0x130
[<c05d5f75>] rtnetlink_rcv_msg+0x165/0x200
[<c05ddf66>] netlink_rcv_skb+0x76/0xa0
[<c05d5dfe>] rtnetlink_rcv+0x1e/0x30
[<c05dda21>] netlink_unicast+0x281/0x290
[<c05ddbe9>] netlink_sendmsg+0x1b9/0x2b0
[<c05beef2>] sock_sendmsg+0xd2/0x100
[<c05bf945>] sys_sendto+0xa5/0xd0
[<c05bf9a6>] sys_send+0x36/0x40
[<c05c03d6>] sys_socketcall+0x1e6/0x2c0
[<c020353b>] sysenter_do_call+0x12/0x3f
[<ffffffff>] 0xffffffff
This is the line in nla_ok():
/**
* nla_ok - check if the netlink attribute fits into the remaining bytes
* @nla: netlink attribute
* @remaining: number of bytes remaining in attribute stream
*/
static inline int nla_ok(const struct nlattr *nla, int remaining)
{
return remaining >= sizeof(*nla) &&
nla->nla_len >= sizeof(*nla) &&
nla->nla_len <= remaining;
}
It turns out that remaining can become negative due to alignment in
nla_next(). But GCC promotes "remaining" to unsigned in the test
against sizeof(*nla) above. Therefore the test succeeds, and the
nla_for_each_attr() may access memory outside the received buffer.
A short example illustrating this point is here:
#include <stdio.h>
main(void)
{
printf("%d\n", -1 >= sizeof(int));
}
...which prints "1".
This patch adds a cast in front of the sizeof so that GCC will make
a signed comparison and fix the illegal memory dereference. With the
patch applied, there is no kmemcheck report.
Signed-off-by: Vegard Nossum <[email protected]>
Acked-by: Thomas Graf <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
struct file *file = sd->u.file;
struct address_space *mapping = file->f_mapping;
unsigned int offset, this_len;
struct page *page;
pgoff_t index;
int ret;
/*
* make sure the data in this buffer is uptodate
*/
ret = buf->ops->confirm(pipe, buf);
if (unlikely(ret))
return ret;
index = sd->pos >> PAGE_CACHE_SHIFT;
offset = sd->pos & ~PAGE_CACHE_MASK;
this_len = sd->len;
if (this_len + offset > PAGE_CACHE_SIZE)
this_len = PAGE_CACHE_SIZE - offset;
find_page:
page = find_lock_page(mapping, index);
if (!page) {
ret = -ENOMEM;
page = page_cache_alloc_cold(mapping);
if (unlikely(!page))
goto out_ret;
/*
* This will also lock the page
*/
ret = add_to_page_cache_lru(page, mapping, index,
GFP_KERNEL);
if (unlikely(ret))
goto out;
}
ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
if (unlikely(ret)) {
loff_t isize = i_size_read(mapping->host);
if (ret != AOP_TRUNCATED_PAGE)
unlock_page(page);
page_cache_release(page);
if (ret == AOP_TRUNCATED_PAGE)
goto find_page;
/*
* prepare_write() may have instantiated a few blocks
* outside i_size. Trim these off again.
*/
if (sd->pos + this_len > isize)
vmtruncate(mapping->host, isize);
goto out_ret;
}
if (buf->page != page) {
/*
* Careful, ->map() uses KM_USER0!
*/
char *src = buf->ops->map(pipe, buf, 1);
char *dst = kmap_atomic(page, KM_USER1);
memcpy(dst + offset, src + buf->offset, this_len);
flush_dcache_page(page);
kunmap_atomic(dst, KM_USER1);
buf->ops->unmap(pipe, buf, src);
}
ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
if (ret) {
if (ret == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
goto find_page;
}
if (ret < 0)
goto out;
/*
* Partial write has happened, so 'ret' already initialized by
* number of bytes written, Where is nothing we have to do here.
*/
} else
ret = this_len;
/*
* Return the number of bytes written and mark page as
* accessed, we are now done!
*/
mark_page_accessed(page);
out:
page_cache_release(page);
unlock_page(page);
out_ret:
return ret;
} | 1 | [
"CWE-399"
] | linux-2.6 | 6a860c979b35469e4d77da781a96bdb2ca05ae64 | 14,782,008,078,368,307,000,000,000,000,000,000,000 | 99 | splice: fix bad unlock_page() in error case
If add_to_page_cache_lru() fails, the page will not be locked. But
splice jumps to an error path that does a page release and unlock,
causing a BUG() in unlock_page().
Fix this by adding one more label that just releases the page. This bug
was actually triggered on EL5 by gurudas pai <[email protected]>
using fio.
Signed-off-by: Jens Axboe <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int sctp_setsockopt_hmac_ident(struct sock *sk,
char __user *optval,
int optlen)
{
struct sctp_hmacalgo *hmacs;
int err;
if (!sctp_auth_enable)
return -EACCES;
if (optlen < sizeof(struct sctp_hmacalgo))
return -EINVAL;
hmacs = kmalloc(optlen, GFP_KERNEL);
if (!hmacs)
return -ENOMEM;
if (copy_from_user(hmacs, optval, optlen)) {
err = -EFAULT;
goto out;
}
if (hmacs->shmac_num_idents == 0 ||
hmacs->shmac_num_idents > SCTP_AUTH_NUM_HMACS) {
err = -EINVAL;
goto out;
}
err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs);
out:
kfree(hmacs);
return err;
} | 1 | [
"CWE-200"
] | linux-2.6 | d97240552cd98c4b07322f30f66fd9c3ba4171de | 45,738,712,490,204,000,000,000,000,000,000,000,000 | 33 | sctp: fix random memory dereference with SCTP_HMAC_IDENT option.
The number of identifiers needs to be checked against the option
length. Also, the identifier index provided needs to be verified
to make sure that it doesn't exceed the bounds of the array.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
struct sctp_hmacalgo *hmacs)
{
int has_sha1 = 0;
__u16 id;
int i;
/* Scan the list looking for unsupported id. Also make sure that
* SHA1 is specified.
*/
for (i = 0; i < hmacs->shmac_num_idents; i++) {
id = hmacs->shmac_idents[i];
if (SCTP_AUTH_HMAC_ID_SHA1 == id)
has_sha1 = 1;
if (!sctp_hmac_list[id].hmac_name)
return -EOPNOTSUPP;
}
if (!has_sha1)
return -EINVAL;
memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
hmacs->shmac_num_idents * sizeof(__u16));
ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
hmacs->shmac_num_idents * sizeof(__u16));
return 0;
} | 1 | [
"CWE-200"
] | linux-2.6 | d97240552cd98c4b07322f30f66fd9c3ba4171de | 6,605,163,766,667,128,000,000,000,000,000,000,000 | 29 | sctp: fix random memory dereference with SCTP_HMAC_IDENT option.
The number of identifiers needs to be checked against the option
length. Also, the identifier index provided needs to be verified
to make sure that it doesn't exceed the bounds of the array.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
struct file *filp)
{
int err;
struct iattr newattrs;
/* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */
if (length < 0)
return -EINVAL;
newattrs.ia_size = length;
newattrs.ia_valid = ATTR_SIZE | time_attrs;
if (filp) {
newattrs.ia_file = filp;
newattrs.ia_valid |= ATTR_FILE;
}
mutex_lock(&dentry->d_inode->i_mutex);
err = notify_change(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
return err;
} | 1 | [
"CWE-264"
] | linux-2.6 | 7b82dc0e64e93f430182f36b46b79fcee87d3532 | 291,990,395,039,251,430,000,000,000,000,000,000,000 | 22 | Remove suid/sgid bits on [f]truncate()
.. to match what we do on write(). This way, people who write to files
by using [f]truncate + writable mmap have the same semantics as if they
were using the write() family of system calls.
Signed-off-by: Linus Torvalds <[email protected]> |
generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
struct address_space *mapping = out->f_mapping;
ssize_t ret;
ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
if (ret > 0) {
struct inode *inode = mapping->host;
*ppos += ret;
/*
* If file or inode is SYNC and we actually wrote some data,
* sync it.
*/
if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err;
mutex_lock(&inode->i_mutex);
err = generic_osync_inode(inode, mapping,
OSYNC_METADATA|OSYNC_DATA);
mutex_unlock(&inode->i_mutex);
if (err)
ret = err;
}
}
return ret;
} | 1 | [
"CWE-264"
] | linux-2.6 | 8c34e2d63231d4bf4852bac8521883944d770fe3 | 75,070,500,229,133,030,000,000,000,000,000,000,000 | 31 | [PATCH] Remove SUID when splicing into an inode
Originally from Mark Fasheh <[email protected]>
generic_file_splice_write() does not remove S_ISUID or S_ISGID. This is
inconsistent with the way we generally write to files.
Signed-off-by: Mark Fasheh <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
struct address_space *mapping = out->f_mapping;
struct inode *inode = mapping->host;
ssize_t ret;
int err;
ret = __splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
if (ret > 0) {
*ppos += ret;
/*
* If file or inode is SYNC and we actually wrote some data,
* sync it.
*/
if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
err = generic_osync_inode(inode, mapping,
OSYNC_METADATA|OSYNC_DATA);
if (err)
ret = err;
}
}
return ret;
} | 1 | [
"CWE-264"
] | linux-2.6 | 8c34e2d63231d4bf4852bac8521883944d770fe3 | 76,296,563,285,025,260,000,000,000,000,000,000,000 | 27 | [PATCH] Remove SUID when splicing into an inode
Originally from Mark Fasheh <[email protected]>
generic_file_splice_write() does not remove S_ISUID or S_ISGID. This is
inconsistent with the way we generally write to files.
Signed-off-by: Mark Fasheh <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
_dbus_validate_signature_with_reason (const DBusString *type_str,
int type_pos,
int len)
{
const unsigned char *p;
const unsigned char *end;
int last;
int struct_depth;
int array_depth;
int dict_entry_depth;
DBusValidity result;
int element_count;
DBusList *element_count_stack;
result = DBUS_VALID;
element_count_stack = NULL;
if (!_dbus_list_append (&element_count_stack, _DBUS_INT_TO_POINTER (0)))
{
result = DBUS_VALIDITY_UNKNOWN_OOM_ERROR;
goto out;
}
_dbus_assert (type_str != NULL);
_dbus_assert (type_pos < _DBUS_INT32_MAX - len);
_dbus_assert (len >= 0);
_dbus_assert (type_pos >= 0);
if (len > DBUS_MAXIMUM_SIGNATURE_LENGTH)
{
result = DBUS_INVALID_SIGNATURE_TOO_LONG;
goto out;
}
p = _dbus_string_get_const_data_len (type_str, type_pos, 0);
end = _dbus_string_get_const_data_len (type_str, type_pos + len, 0);
struct_depth = 0;
array_depth = 0;
dict_entry_depth = 0;
last = DBUS_TYPE_INVALID;
while (p != end)
{
switch (*p)
{
case DBUS_TYPE_BYTE:
case DBUS_TYPE_BOOLEAN:
case DBUS_TYPE_INT16:
case DBUS_TYPE_UINT16:
case DBUS_TYPE_INT32:
case DBUS_TYPE_UINT32:
case DBUS_TYPE_INT64:
case DBUS_TYPE_UINT64:
case DBUS_TYPE_DOUBLE:
case DBUS_TYPE_STRING:
case DBUS_TYPE_OBJECT_PATH:
case DBUS_TYPE_SIGNATURE:
case DBUS_TYPE_VARIANT:
break;
case DBUS_TYPE_ARRAY:
array_depth += 1;
if (array_depth > DBUS_MAXIMUM_TYPE_RECURSION_DEPTH)
{
result = DBUS_INVALID_EXCEEDED_MAXIMUM_ARRAY_RECURSION;
goto out;
}
break;
case DBUS_STRUCT_BEGIN_CHAR:
struct_depth += 1;
if (struct_depth > DBUS_MAXIMUM_TYPE_RECURSION_DEPTH)
{
result = DBUS_INVALID_EXCEEDED_MAXIMUM_STRUCT_RECURSION;
goto out;
}
if (!_dbus_list_append (&element_count_stack,
_DBUS_INT_TO_POINTER (0)))
{
result = DBUS_VALIDITY_UNKNOWN_OOM_ERROR;
goto out;
}
break;
case DBUS_STRUCT_END_CHAR:
if (struct_depth == 0)
{
result = DBUS_INVALID_STRUCT_ENDED_BUT_NOT_STARTED;
goto out;
}
if (last == DBUS_STRUCT_BEGIN_CHAR)
{
result = DBUS_INVALID_STRUCT_HAS_NO_FIELDS;
goto out;
}
_dbus_list_pop_last (&element_count_stack);
struct_depth -= 1;
break;
case DBUS_DICT_ENTRY_BEGIN_CHAR:
if (last != DBUS_TYPE_ARRAY)
{
result = DBUS_INVALID_DICT_ENTRY_NOT_INSIDE_ARRAY;
goto out;
}
dict_entry_depth += 1;
if (dict_entry_depth > DBUS_MAXIMUM_TYPE_RECURSION_DEPTH)
{
result = DBUS_INVALID_EXCEEDED_MAXIMUM_DICT_ENTRY_RECURSION;
goto out;
}
if (!_dbus_list_append (&element_count_stack,
_DBUS_INT_TO_POINTER (0)))
{
result = DBUS_VALIDITY_UNKNOWN_OOM_ERROR;
goto out;
}
break;
case DBUS_DICT_ENTRY_END_CHAR:
if (dict_entry_depth == 0)
{
result = DBUS_INVALID_DICT_ENTRY_ENDED_BUT_NOT_STARTED;
goto out;
}
dict_entry_depth -= 1;
element_count =
_DBUS_POINTER_TO_INT (_dbus_list_pop_last (&element_count_stack));
if (element_count != 2)
{
if (element_count == 0)
result = DBUS_INVALID_DICT_ENTRY_HAS_NO_FIELDS;
else if (element_count == 1)
result = DBUS_INVALID_DICT_ENTRY_HAS_ONLY_ONE_FIELD;
else
result = DBUS_INVALID_DICT_ENTRY_HAS_TOO_MANY_FIELDS;
goto out;
}
break;
case DBUS_TYPE_STRUCT: /* doesn't appear in signatures */
case DBUS_TYPE_DICT_ENTRY: /* ditto */
default:
result = DBUS_INVALID_UNKNOWN_TYPECODE;
goto out;
}
if (*p != DBUS_TYPE_ARRAY &&
*p != DBUS_DICT_ENTRY_BEGIN_CHAR &&
*p != DBUS_STRUCT_BEGIN_CHAR)
{
element_count =
_DBUS_POINTER_TO_INT (_dbus_list_pop_last (&element_count_stack));
++element_count;
if (!_dbus_list_append (&element_count_stack,
_DBUS_INT_TO_POINTER (element_count)))
{
result = DBUS_VALIDITY_UNKNOWN_OOM_ERROR;
goto out;
}
}
if (array_depth > 0)
{
if (*p == DBUS_TYPE_ARRAY && p != end)
{
const char *p1;
p1 = p + 1;
if (*p1 == DBUS_STRUCT_END_CHAR ||
*p1 == DBUS_DICT_ENTRY_END_CHAR)
{
result = DBUS_INVALID_MISSING_ARRAY_ELEMENT_TYPE;
goto out;
}
}
else
{
array_depth = 0;
}
}
if (last == DBUS_DICT_ENTRY_BEGIN_CHAR &&
!dbus_type_is_basic (*p))
{
result = DBUS_INVALID_DICT_KEY_MUST_BE_BASIC_TYPE;
goto out;
}
last = *p;
++p;
}
if (array_depth > 0)
{
result = DBUS_INVALID_MISSING_ARRAY_ELEMENT_TYPE;
goto out;
}
if (struct_depth > 0)
{
result = DBUS_INVALID_STRUCT_STARTED_BUT_NOT_ENDED;
goto out;
}
if (dict_entry_depth > 0)
{
result = DBUS_INVALID_DICT_ENTRY_STARTED_BUT_NOT_ENDED;
goto out;
}
_dbus_assert (last != DBUS_TYPE_ARRAY);
_dbus_assert (last != DBUS_STRUCT_BEGIN_CHAR);
_dbus_assert (last != DBUS_DICT_ENTRY_BEGIN_CHAR);
result = DBUS_VALID;
out:
_dbus_list_clear (&element_count_stack);
return result;
} | 1 | [
"CWE-20"
] | dbus | 7b10b46c5c8658449783ce45f1273dd35c353bce | 213,008,185,461,133,900,000,000,000,000,000,000,000 | 239 | Bug 17803: Panic from dbus_signature_validate
* dbus/dbus-marshal-validate.c: Ensure we validate
a basic type before calling is_basic on it.
* dbus-marshal-validate-util.c: Test. |
_dbus_marshal_validate_test (void)
{
DBusString str;
int i;
const char *valid_paths[] = {
"/",
"/foo/bar",
"/foo",
"/foo/bar/baz"
};
const char *invalid_paths[] = {
"bar",
"bar/baz",
"/foo/bar/",
"/foo/"
"foo/",
"boo//blah",
"//",
"///",
"foo///blah/",
"Hello World",
"",
" ",
"foo bar"
};
const char *valid_interfaces[] = {
"org.freedesktop.Foo",
"Bar.Baz",
"Blah.Blah.Blah.Blah.Blah",
"a.b",
"a.b.c.d.e.f.g",
"a0.b1.c2.d3.e4.f5.g6",
"abc123.foo27"
};
const char *invalid_interfaces[] = {
".",
"",
"..",
".Foo.Bar",
"..Foo.Bar",
"Foo.Bar.",
"Foo.Bar..",
"Foo",
"9foo.bar.baz",
"foo.bar..baz",
"foo.bar...baz",
"foo.bar.b..blah",
":",
":0-1",
"10",
":11.34324",
"0.0.0",
"0..0",
"foo.Bar.%",
"foo.Bar!!",
"!Foo.bar.bz",
"foo.$.blah",
"",
" ",
"foo bar"
};
const char *valid_unique_names[] = {
":0",
":a",
":",
":.a",
":.1",
":0.1",
":000.2222",
":.blah",
":abce.freedesktop.blah"
};
const char *invalid_unique_names[] = {
//":-",
":!",
//":0-10",
":blah.",
":blah.",
":blah..org",
":blah.org..",
":..blah.org",
"",
" ",
"foo bar"
};
const char *valid_members[] = {
"Hello",
"Bar",
"foobar",
"_foobar",
"foo89"
};
const char *invalid_members[] = {
"9Hello",
"10",
"1",
"foo-bar",
"blah.org",
".blah",
"blah.",
"Hello.",
"!foo",
"",
" ",
"foo bar"
};
const char *valid_signatures[] = {
"",
"sss",
"i",
"b"
};
const char *invalid_signatures[] = {
" ",
"not a valid signature",
"123",
".",
"("
};
/* Signature with reason */
run_validity_tests (signature_tests, _DBUS_N_ELEMENTS (signature_tests),
_dbus_validate_signature_with_reason);
/* Path validation */
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (valid_paths))
{
_dbus_string_init_const (&str, valid_paths[i]);
if (!_dbus_validate_path (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Path \"%s\" should have been valid\n", valid_paths[i]);
_dbus_assert_not_reached ("invalid path");
}
++i;
}
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (invalid_paths))
{
_dbus_string_init_const (&str, invalid_paths[i]);
if (_dbus_validate_path (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Path \"%s\" should have been invalid\n", invalid_paths[i]);
_dbus_assert_not_reached ("valid path");
}
++i;
}
/* Interface validation */
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (valid_interfaces))
{
_dbus_string_init_const (&str, valid_interfaces[i]);
if (!_dbus_validate_interface (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Interface \"%s\" should have been valid\n", valid_interfaces[i]);
_dbus_assert_not_reached ("invalid interface");
}
++i;
}
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (invalid_interfaces))
{
_dbus_string_init_const (&str, invalid_interfaces[i]);
if (_dbus_validate_interface (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Interface \"%s\" should have been invalid\n", invalid_interfaces[i]);
_dbus_assert_not_reached ("valid interface");
}
++i;
}
/* Bus name validation (check that valid interfaces are valid bus names,
* and invalid interfaces are invalid services except if they start with ':')
*/
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (valid_interfaces))
{
_dbus_string_init_const (&str, valid_interfaces[i]);
if (!_dbus_validate_bus_name (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Bus name \"%s\" should have been valid\n", valid_interfaces[i]);
_dbus_assert_not_reached ("invalid bus name");
}
++i;
}
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (invalid_interfaces))
{
if (invalid_interfaces[i][0] != ':')
{
_dbus_string_init_const (&str, invalid_interfaces[i]);
if (_dbus_validate_bus_name (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Bus name \"%s\" should have been invalid\n", invalid_interfaces[i]);
_dbus_assert_not_reached ("valid bus name");
}
}
++i;
}
/* unique name validation */
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (valid_unique_names))
{
_dbus_string_init_const (&str, valid_unique_names[i]);
if (!_dbus_validate_bus_name (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Bus name \"%s\" should have been valid\n", valid_unique_names[i]);
_dbus_assert_not_reached ("invalid unique name");
}
++i;
}
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (invalid_unique_names))
{
_dbus_string_init_const (&str, invalid_unique_names[i]);
if (_dbus_validate_bus_name (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Bus name \"%s\" should have been invalid\n", invalid_unique_names[i]);
_dbus_assert_not_reached ("valid unique name");
}
++i;
}
/* Error name validation (currently identical to interfaces)
*/
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (valid_interfaces))
{
_dbus_string_init_const (&str, valid_interfaces[i]);
if (!_dbus_validate_error_name (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Error name \"%s\" should have been valid\n", valid_interfaces[i]);
_dbus_assert_not_reached ("invalid error name");
}
++i;
}
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (invalid_interfaces))
{
if (invalid_interfaces[i][0] != ':')
{
_dbus_string_init_const (&str, invalid_interfaces[i]);
if (_dbus_validate_error_name (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Error name \"%s\" should have been invalid\n", invalid_interfaces[i]);
_dbus_assert_not_reached ("valid error name");
}
}
++i;
}
/* Member validation */
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (valid_members))
{
_dbus_string_init_const (&str, valid_members[i]);
if (!_dbus_validate_member (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Member \"%s\" should have been valid\n", valid_members[i]);
_dbus_assert_not_reached ("invalid member");
}
++i;
}
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (invalid_members))
{
_dbus_string_init_const (&str, invalid_members[i]);
if (_dbus_validate_member (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Member \"%s\" should have been invalid\n", invalid_members[i]);
_dbus_assert_not_reached ("valid member");
}
++i;
}
/* Signature validation */
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (valid_signatures))
{
_dbus_string_init_const (&str, valid_signatures[i]);
if (!_dbus_validate_signature (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Signature \"%s\" should have been valid\n", valid_signatures[i]);
_dbus_assert_not_reached ("invalid signature");
}
++i;
}
i = 0;
while (i < (int) _DBUS_N_ELEMENTS (invalid_signatures))
{
_dbus_string_init_const (&str, invalid_signatures[i]);
if (_dbus_validate_signature (&str, 0,
_dbus_string_get_length (&str)))
{
_dbus_warn ("Signature \"%s\" should have been invalid\n", invalid_signatures[i]);
_dbus_assert_not_reached ("valid signature");
}
++i;
}
/* Validate claimed length longer than real length */
_dbus_string_init_const (&str, "abc.efg");
if (_dbus_validate_bus_name (&str, 0, 8))
_dbus_assert_not_reached ("validated too-long string");
if (_dbus_validate_interface (&str, 0, 8))
_dbus_assert_not_reached ("validated too-long string");
if (_dbus_validate_error_name (&str, 0, 8))
_dbus_assert_not_reached ("validated too-long string");
_dbus_string_init_const (&str, "abc");
if (_dbus_validate_member (&str, 0, 4))
_dbus_assert_not_reached ("validated too-long string");
_dbus_string_init_const (&str, "sss");
if (_dbus_validate_signature (&str, 0, 4))
_dbus_assert_not_reached ("validated too-long signature");
/* Validate string exceeding max name length */
if (!_dbus_string_init (&str))
_dbus_assert_not_reached ("no memory");
while (_dbus_string_get_length (&str) <= DBUS_MAXIMUM_NAME_LENGTH)
if (!_dbus_string_append (&str, "abc.def"))
_dbus_assert_not_reached ("no memory");
if (_dbus_validate_bus_name (&str, 0, _dbus_string_get_length (&str)))
_dbus_assert_not_reached ("validated overmax string");
if (_dbus_validate_interface (&str, 0, _dbus_string_get_length (&str)))
_dbus_assert_not_reached ("validated overmax string");
if (_dbus_validate_error_name (&str, 0, _dbus_string_get_length (&str)))
_dbus_assert_not_reached ("validated overmax string");
/* overlong member */
_dbus_string_set_length (&str, 0);
while (_dbus_string_get_length (&str) <= DBUS_MAXIMUM_NAME_LENGTH)
if (!_dbus_string_append (&str, "abc"))
_dbus_assert_not_reached ("no memory");
if (_dbus_validate_member (&str, 0, _dbus_string_get_length (&str)))
_dbus_assert_not_reached ("validated overmax string");
/* overlong unique name */
_dbus_string_set_length (&str, 0);
_dbus_string_append (&str, ":");
while (_dbus_string_get_length (&str) <= DBUS_MAXIMUM_NAME_LENGTH)
if (!_dbus_string_append (&str, "abc"))
_dbus_assert_not_reached ("no memory");
if (_dbus_validate_bus_name (&str, 0, _dbus_string_get_length (&str)))
_dbus_assert_not_reached ("validated overmax string");
_dbus_string_free (&str);
/* Body validation; test basic validation of valid bodies for both endian */
{
int sequence;
DBusString signature;
DBusString body;
if (!_dbus_string_init (&signature) || !_dbus_string_init (&body))
_dbus_assert_not_reached ("oom");
sequence = 0;
while (dbus_internal_do_not_use_generate_bodies (sequence,
DBUS_LITTLE_ENDIAN,
&signature, &body))
{
DBusValidity validity;
validity = _dbus_validate_body_with_reason (&signature, 0,
DBUS_LITTLE_ENDIAN,
NULL, &body, 0,
_dbus_string_get_length (&body));
if (validity != DBUS_VALID)
{
_dbus_warn ("invalid code %d expected valid on sequence %d little endian\n",
validity, sequence);
_dbus_verbose_bytes_of_string (&signature, 0, _dbus_string_get_length (&signature));
_dbus_verbose_bytes_of_string (&body, 0, _dbus_string_get_length (&body));
_dbus_assert_not_reached ("test failed");
}
_dbus_string_set_length (&signature, 0);
_dbus_string_set_length (&body, 0);
++sequence;
}
sequence = 0;
while (dbus_internal_do_not_use_generate_bodies (sequence,
DBUS_BIG_ENDIAN,
&signature, &body))
{
DBusValidity validity;
validity = _dbus_validate_body_with_reason (&signature, 0,
DBUS_BIG_ENDIAN,
NULL, &body, 0,
_dbus_string_get_length (&body));
if (validity != DBUS_VALID)
{
_dbus_warn ("invalid code %d expected valid on sequence %d big endian\n",
validity, sequence);
_dbus_verbose_bytes_of_string (&signature, 0, _dbus_string_get_length (&signature));
_dbus_verbose_bytes_of_string (&body, 0, _dbus_string_get_length (&body));
_dbus_assert_not_reached ("test failed");
}
_dbus_string_set_length (&signature, 0);
_dbus_string_set_length (&body, 0);
++sequence;
}
_dbus_string_free (&signature);
_dbus_string_free (&body);
}
return TRUE;
} | 1 | [
"CWE-20"
] | dbus | 7b10b46c5c8658449783ce45f1273dd35c353bce | 213,135,535,826,009,350,000,000,000,000,000,000,000 | 478 | Bug 17803: Panic from dbus_signature_validate
* dbus/dbus-marshal-validate.c: Ensure we validate
a basic type before calling is_basic on it.
* dbus-marshal-validate-util.c: Test. |
sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *asconf_ack = arg;
struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
struct sctp_chunk *abort;
struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *addip_hdr;
__u32 sent_serial, rcvd_serial;
if (!sctp_vtag_verify(asconf_ack, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* ADD-IP, Section 4.1.2:
* This chunk MUST be sent in an authenticated way by using
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
* is received unauthenticated it MUST be silently discarded as
* described in [I-D.ietf-tsvwg-sctp-auth].
*/
if (!sctp_addip_noauth && !asconf_ack->auth)
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
/* Make sure that the ADDIP chunk has a valid length. */
if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
rcvd_serial = ntohl(addip_hdr->serial);
/* Verify the ASCONF-ACK chunk before processing it. */
if (!sctp_verify_asconf(asoc,
(sctp_paramhdr_t *)addip_hdr->params,
(void *)asconf_ack->chunk_end,
&err_param))
return sctp_sf_violation_paramlen(ep, asoc, type,
(void *)&err_param, commands);
if (last_asconf) {
addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
sent_serial = ntohl(addip_hdr->serial);
} else {
sent_serial = asoc->addip_serial - 1;
}
/* D0) If an endpoint receives an ASCONF-ACK that is greater than or
* equal to the next serial number to be used but no ASCONF chunk is
* outstanding the endpoint MUST ABORT the association. Note that a
* sequence number is greater than if it is no more than 2^^31-1
* larger than the current sequence number (using serial arithmetic).
*/
if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) &&
!(asoc->addip_last_asconf)) {
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
asconf_ack))
return SCTP_DISPOSITION_CONSUME;
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
return SCTP_DISPOSITION_DISCARD;
} | 1 | [
"CWE-20"
] | linux-2.6 | ba0166708ef4da7eeb61dd92bbba4d5a749d6561 | 238,014,936,619,603,620,000,000,000,000,000,000,000 | 110 | sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static sctp_disposition_t sctp_sf_violation_paramlen(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands) {
static const char err_str[] = "The following parameter had invalid length:";
return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
sizeof(err_str));
} | 1 | [
"CWE-20"
] | linux-2.6 | ba0166708ef4da7eeb61dd92bbba4d5a749d6561 | 99,481,638,255,279,320,000,000,000,000,000,000,000 | 11 | sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
struct sctp_paramhdr *param,
const struct sctp_chunk *chunk,
struct sctp_chunk **errp)
{
static const char error[] = "The following parameter had invalid length:";
size_t payload_len = WORD_ROUND(sizeof(error)) +
sizeof(sctp_paramhdr_t);
/* This is a fatal error. Any accumulated non-fatal errors are
* not reported.
*/
if (*errp)
sctp_chunk_free(*errp);
/* Create an error chunk and fill it in with our payload. */
*errp = sctp_make_op_error_space(asoc, chunk, payload_len);
if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
sizeof(error) + sizeof(sctp_paramhdr_t));
sctp_addto_chunk(*errp, sizeof(error), error);
sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param);
}
return 0;
} | 1 | [
"CWE-20"
] | linux-2.6 | ba0166708ef4da7eeb61dd92bbba4d5a749d6561 | 98,589,033,401,389,590,000,000,000,000,000,000,000 | 28 | sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *asconf_ack = NULL;
struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *hdr;
union sctp_addr_param *addr_param;
__u32 serial;
int length;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* ADD-IP: Section 4.1.1
* This chunk MUST be sent in an authenticated way by using
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
* is received unauthenticated it MUST be silently discarded as
* described in [I-D.ietf-tsvwg-sctp-auth].
*/
if (!sctp_addip_noauth && !chunk->auth)
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
/* Make sure that the ASCONF ADDIP chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
hdr = (sctp_addiphdr_t *)chunk->skb->data;
serial = ntohl(hdr->serial);
addr_param = (union sctp_addr_param *)hdr->params;
length = ntohs(addr_param->p.length);
if (length < sizeof(sctp_paramhdr_t))
return sctp_sf_violation_paramlen(ep, asoc, type,
(void *)addr_param, commands);
/* Verify the ASCONF chunk before processing it. */
if (!sctp_verify_asconf(asoc,
(sctp_paramhdr_t *)((void *)addr_param + length),
(void *)chunk->chunk_end,
&err_param))
return sctp_sf_violation_paramlen(ep, asoc, type,
(void *)&err_param, commands);
/* ADDIP 5.2 E1) Compare the value of the serial number to the value
* the endpoint stored in a new association variable
* 'Peer-Serial-Number'.
*/
if (serial == asoc->peer.addip_serial + 1) {
/* If this is the first instance of ASCONF in the packet,
* we can clean our old ASCONF-ACKs.
*/
if (!chunk->has_asconf)
sctp_assoc_clean_asconf_ack_cache(asoc);
/* ADDIP 5.2 E4) When the Sequence Number matches the next one
* expected, process the ASCONF as described below and after
* processing the ASCONF Chunk, append an ASCONF-ACK Chunk to
* the response packet and cache a copy of it (in the event it
* later needs to be retransmitted).
*
* Essentially, do V1-V5.
*/
asconf_ack = sctp_process_asconf((struct sctp_association *)
asoc, chunk);
if (!asconf_ack)
return SCTP_DISPOSITION_NOMEM;
} else if (serial < asoc->peer.addip_serial + 1) {
/* ADDIP 5.2 E2)
* If the value found in the Sequence Number is less than the
* ('Peer- Sequence-Number' + 1), simply skip to the next
* ASCONF, and include in the outbound response packet
* any previously cached ASCONF-ACK response that was
* sent and saved that matches the Sequence Number of the
* ASCONF. Note: It is possible that no cached ASCONF-ACK
* Chunk exists. This will occur when an older ASCONF
* arrives out of order. In such a case, the receiver
* should skip the ASCONF Chunk and not include ASCONF-ACK
* Chunk for that chunk.
*/
asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial);
if (!asconf_ack)
return SCTP_DISPOSITION_DISCARD;
} else {
/* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since
* it must be either a stale packet or from an attacker.
*/
return SCTP_DISPOSITION_DISCARD;
}
/* ADDIP 5.2 E6) The destination address of the SCTP packet
* containing the ASCONF-ACK Chunks MUST be the source address of
* the SCTP packet that held the ASCONF Chunks.
*
* To do this properly, we'll set the destination address of the chunk
* and at the transmit time, will try look up the transport to use.
* Since ASCONFs may be bundled, the correct transport may not be
* created untill we process the entire packet, thus this workaround.
*/
asconf_ack->dest = chunk->source;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
return SCTP_DISPOSITION_CONSUME;
} | 1 | [
"CWE-20"
] | linux-2.6 | ba0166708ef4da7eeb61dd92bbba4d5a749d6561 | 163,511,805,060,768,300,000,000,000,000,000,000,000 | 110 | sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int rtc_dev_release(struct inode *inode, struct file *file)
{
struct rtc_device *rtc = file->private_data;
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
clear_uie(rtc);
#endif
rtc_irq_set_state(rtc, NULL, 0);
if (rtc->ops->release)
rtc->ops->release(rtc->dev.parent);
clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
return 0;
} | 1 | [] | linux-2.6 | 2e4a75cdcb89ff53bb182dda3a6dcdc14befe007 | 335,404,817,289,562,520,000,000,000,000,000,000,000 | 15 | rtc: fix kernel panic on second use of SIGIO nofitication
When userspace uses SIGIO notification and forgets to disable it before
closing file descriptor, rtc->async_queue contains stale pointer to struct
file. When user space enables again SIGIO notification in different
process, kernel dereferences this (poisoned) pointer and crashes.
So disable SIGIO notification on close.
Kernel panic:
(second run of qemu (requires echo 1024 > /sys/class/rtc/rtc0/max_user_freq))
general protection fault: 0000 [1] PREEMPT
CPU 0
Modules linked in: af_packet snd_pcm_oss snd_mixer_oss snd_seq_oss snd_seq_midi_event snd_seq usbhid tuner tea5767 tda8290 tuner_xc2028 xc5000 tda9887 tuner_simple tuner_types mt20xx tea5761 tda9875 uhci_hcd ehci_hcd usbcore bttv snd_via82xx snd_ac97_codec ac97_bus snd_pcm snd_timer ir_common compat_ioctl32 snd_page_alloc videodev v4l1_compat snd_mpu401_uart snd_rawmidi v4l2_common videobuf_dma_sg videobuf_core snd_seq_device snd btcx_risc soundcore tveeprom i2c_viapro
Pid: 5781, comm: qemu-system-x86 Not tainted 2.6.27-rc6 #363
RIP: 0010:[<ffffffff8024f891>] [<ffffffff8024f891>] __lock_acquire+0x3db/0x73f
RSP: 0000:ffffffff80674cb8 EFLAGS: 00010002
RAX: ffff8800224c62f0 RBX: 0000000000000046 RCX: 0000000000000002
RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff8800224c62f0
RBP: ffffffff80674d08 R08: 0000000000000002 R09: 0000000000000001
R10: ffffffff80238941 R11: 0000000000000001 R12: 0000000000000000
R13: 6b6b6b6b6b6b6b6b R14: ffff88003a450080 R15: 0000000000000000
FS: 00007f98b69516f0(0000) GS:ffffffff80623200(0000) knlGS:00000000f7cc86d0
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: 0000000000a87000 CR3: 0000000022598000 CR4: 00000000000006e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Process qemu-system-x86 (pid: 5781, threadinfo ffff880028812000, task ffff88003a450080)
Stack: ffffffff80674cf8 0000000180238440 0000000200000002 0000000000000000
ffff8800224c62f0 0000000000000046 0000000000000000 0000000000000002
0000000000000002 0000000000000000 ffffffff80674d68 ffffffff8024fc7a
Call Trace:
<IRQ> [<ffffffff8024fc7a>] lock_acquire+0x85/0xa9
[<ffffffff8029cb62>] ? send_sigio+0x2a/0x184
[<ffffffff80491d1f>] _read_lock+0x3e/0x4a
[<ffffffff8029cb62>] ? send_sigio+0x2a/0x184
[<ffffffff8029cb62>] send_sigio+0x2a/0x184
[<ffffffff8024fb97>] ? __lock_acquire+0x6e1/0x73f
[<ffffffff8029cd4d>] ? kill_fasync+0x2c/0x4e
[<ffffffff8029cd10>] __kill_fasync+0x54/0x65
[<ffffffff8029cd5b>] kill_fasync+0x3a/0x4e
[<ffffffff80402896>] rtc_update_irq+0x9c/0xa5
[<ffffffff80404640>] cmos_interrupt+0xae/0xc0
[<ffffffff8025d1c1>] handle_IRQ_event+0x25/0x5a
[<ffffffff8025e5e4>] handle_edge_irq+0xdd/0x123
[<ffffffff8020da34>] do_IRQ+0xe4/0x144
[<ffffffff8020bad6>] ret_from_intr+0x0/0xf
<EOI> [<ffffffff8026fdc2>] ? __alloc_pages_internal+0xe7/0x3ad
[<ffffffff8033fe67>] ? clear_page_c+0x7/0x10
[<ffffffff8026fc10>] ? get_page_from_freelist+0x385/0x450
[<ffffffff8026fdc2>] ? __alloc_pages_internal+0xe7/0x3ad
[<ffffffff80280aac>] ? anon_vma_prepare+0x2e/0xf6
[<ffffffff80279400>] ? handle_mm_fault+0x227/0x6a5
[<ffffffff80494716>] ? do_page_fault+0x494/0x83f
[<ffffffff8049251d>] ? error_exit+0x0/0xa9
Code: cc 41 39 45 28 74 24 e8 5e 1d 0f 00 85 c0 0f 84 6a 03 00 00 83 3d 8f a9 aa 00 00 be 47 03 00 00 0f 84 6a 02 00 00 e9 53 03 00 00 <41> ff 85 38 01 00 00 45 8b be 90 06 00 00 41 83 ff 2f 76 24 e8
RIP [<ffffffff8024f891>] __lock_acquire+0x3db/0x73f
RSP <ffffffff80674cb8>
---[ end trace 431877d860448760 ]---
Kernel panic - not syncing: Aiee, killing interrupt handler!
Signed-off-by: Marcin Slusarz <[email protected]>
Acked-by: Alessandro Zummo <[email protected]>
Acked-by: David Brownell <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int rtc_dev_fasync(int fd, struct file *file, int on)
{
struct rtc_device *rtc = file->private_data;
return fasync_helper(fd, file, on, &rtc->async_queue);
} | 1 | [] | linux-2.6 | 2e4a75cdcb89ff53bb182dda3a6dcdc14befe007 | 30,417,915,765,676,503,000,000,000,000,000,000,000 | 5 | rtc: fix kernel panic on second use of SIGIO nofitication
When userspace uses SIGIO notification and forgets to disable it before
closing file descriptor, rtc->async_queue contains stale pointer to struct
file. When user space enables again SIGIO notification in different
process, kernel dereferences this (poisoned) pointer and crashes.
So disable SIGIO notification on close.
Kernel panic:
(second run of qemu (requires echo 1024 > /sys/class/rtc/rtc0/max_user_freq))
general protection fault: 0000 [1] PREEMPT
CPU 0
Modules linked in: af_packet snd_pcm_oss snd_mixer_oss snd_seq_oss snd_seq_midi_event snd_seq usbhid tuner tea5767 tda8290 tuner_xc2028 xc5000 tda9887 tuner_simple tuner_types mt20xx tea5761 tda9875 uhci_hcd ehci_hcd usbcore bttv snd_via82xx snd_ac97_codec ac97_bus snd_pcm snd_timer ir_common compat_ioctl32 snd_page_alloc videodev v4l1_compat snd_mpu401_uart snd_rawmidi v4l2_common videobuf_dma_sg videobuf_core snd_seq_device snd btcx_risc soundcore tveeprom i2c_viapro
Pid: 5781, comm: qemu-system-x86 Not tainted 2.6.27-rc6 #363
RIP: 0010:[<ffffffff8024f891>] [<ffffffff8024f891>] __lock_acquire+0x3db/0x73f
RSP: 0000:ffffffff80674cb8 EFLAGS: 00010002
RAX: ffff8800224c62f0 RBX: 0000000000000046 RCX: 0000000000000002
RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff8800224c62f0
RBP: ffffffff80674d08 R08: 0000000000000002 R09: 0000000000000001
R10: ffffffff80238941 R11: 0000000000000001 R12: 0000000000000000
R13: 6b6b6b6b6b6b6b6b R14: ffff88003a450080 R15: 0000000000000000
FS: 00007f98b69516f0(0000) GS:ffffffff80623200(0000) knlGS:00000000f7cc86d0
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: 0000000000a87000 CR3: 0000000022598000 CR4: 00000000000006e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Process qemu-system-x86 (pid: 5781, threadinfo ffff880028812000, task ffff88003a450080)
Stack: ffffffff80674cf8 0000000180238440 0000000200000002 0000000000000000
ffff8800224c62f0 0000000000000046 0000000000000000 0000000000000002
0000000000000002 0000000000000000 ffffffff80674d68 ffffffff8024fc7a
Call Trace:
<IRQ> [<ffffffff8024fc7a>] lock_acquire+0x85/0xa9
[<ffffffff8029cb62>] ? send_sigio+0x2a/0x184
[<ffffffff80491d1f>] _read_lock+0x3e/0x4a
[<ffffffff8029cb62>] ? send_sigio+0x2a/0x184
[<ffffffff8029cb62>] send_sigio+0x2a/0x184
[<ffffffff8024fb97>] ? __lock_acquire+0x6e1/0x73f
[<ffffffff8029cd4d>] ? kill_fasync+0x2c/0x4e
[<ffffffff8029cd10>] __kill_fasync+0x54/0x65
[<ffffffff8029cd5b>] kill_fasync+0x3a/0x4e
[<ffffffff80402896>] rtc_update_irq+0x9c/0xa5
[<ffffffff80404640>] cmos_interrupt+0xae/0xc0
[<ffffffff8025d1c1>] handle_IRQ_event+0x25/0x5a
[<ffffffff8025e5e4>] handle_edge_irq+0xdd/0x123
[<ffffffff8020da34>] do_IRQ+0xe4/0x144
[<ffffffff8020bad6>] ret_from_intr+0x0/0xf
<EOI> [<ffffffff8026fdc2>] ? __alloc_pages_internal+0xe7/0x3ad
[<ffffffff8033fe67>] ? clear_page_c+0x7/0x10
[<ffffffff8026fc10>] ? get_page_from_freelist+0x385/0x450
[<ffffffff8026fdc2>] ? __alloc_pages_internal+0xe7/0x3ad
[<ffffffff80280aac>] ? anon_vma_prepare+0x2e/0xf6
[<ffffffff80279400>] ? handle_mm_fault+0x227/0x6a5
[<ffffffff80494716>] ? do_page_fault+0x494/0x83f
[<ffffffff8049251d>] ? error_exit+0x0/0xa9
Code: cc 41 39 45 28 74 24 e8 5e 1d 0f 00 85 c0 0f 84 6a 03 00 00 83 3d 8f a9 aa 00 00 be 47 03 00 00 0f 84 6a 02 00 00 e9 53 03 00 00 <41> ff 85 38 01 00 00 45 8b be 90 06 00 00 41 83 ff 2f 76 24 e8
RIP [<ffffffff8024f891>] __lock_acquire+0x3db/0x73f
RSP <ffffffff80674cb8>
---[ end trace 431877d860448760 ]---
Kernel panic - not syncing: Aiee, killing interrupt handler!
Signed-off-by: Marcin Slusarz <[email protected]>
Acked-by: Alessandro Zummo <[email protected]>
Acked-by: David Brownell <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
const union sctp_addr *addr,
const gfp_t gfp,
const int peer_state)
{
struct sctp_transport *peer;
struct sctp_sock *sp;
unsigned short port;
sp = sctp_sk(asoc->base.sk);
/* AF_INET and AF_INET6 share common port field. */
port = ntohs(addr->v4.sin_port);
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
" port: %d state:%d\n",
asoc,
addr,
port,
peer_state);
/* Set the port if it has not been set yet. */
if (0 == asoc->peer.port)
asoc->peer.port = port;
/* Check to see if this is a duplicate. */
peer = sctp_assoc_lookup_paddr(asoc, addr);
if (peer) {
if (peer->state == SCTP_UNKNOWN) {
if (peer_state == SCTP_ACTIVE)
peer->state = SCTP_ACTIVE;
if (peer_state == SCTP_UNCONFIRMED)
peer->state = SCTP_UNCONFIRMED;
}
return peer;
}
peer = sctp_transport_new(addr, gfp);
if (!peer)
return NULL;
sctp_transport_set_owner(peer, asoc);
/* Initialize the peer's heartbeat interval based on the
* association configured value.
*/
peer->hbinterval = asoc->hbinterval;
/* Set the path max_retrans. */
peer->pathmaxrxt = asoc->pathmaxrxt;
/* Initialize the peer's SACK delay timeout based on the
* association configured value.
*/
peer->sackdelay = asoc->sackdelay;
peer->sackfreq = asoc->sackfreq;
/* Enable/disable heartbeat, SACK delay, and path MTU discovery
* based on association setting.
*/
peer->param_flags = asoc->param_flags;
/* Initialize the pmtu of the transport. */
if (peer->param_flags & SPP_PMTUD_ENABLE)
sctp_transport_pmtu(peer);
else if (asoc->pathmtu)
peer->pathmtu = asoc->pathmtu;
else
peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
/* If this is the first transport addr on this association,
* initialize the association PMTU to the peer's PMTU.
* If not and the current association PMTU is higher than the new
* peer's PMTU, reset the association PMTU to the new peer's PMTU.
*/
if (asoc->pathmtu)
asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
else
asoc->pathmtu = peer->pathmtu;
SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to "
"%d\n", asoc, asoc->pathmtu);
peer->pmtu_pending = 0;
asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu);
/* The asoc->peer.port might not be meaningful yet, but
* initialize the packet structure anyway.
*/
sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
asoc->peer.port);
/* 7.2.1 Slow-Start
*
* o The initial cwnd before DATA transmission or after a sufficiently
* long idle period MUST be set to
* min(4*MTU, max(2*MTU, 4380 bytes))
*
* o The initial value of ssthresh MAY be arbitrarily high
* (for example, implementations MAY use the size of the
* receiver advertised window).
*/
peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
/* At this point, we may not have the receiver's advertised window,
* so initialize ssthresh to the default value and it will be set
* later when we process the INIT.
*/
peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
peer->partial_bytes_acked = 0;
peer->flight_size = 0;
/* Set the transport's RTO.initial value */
peer->rto = asoc->rto_initial;
/* Set the peer's active state. */
peer->state = peer_state;
/* Attach the remote transport to our asoc. */
list_add_tail(&peer->transports, &asoc->peer.transport_addr_list);
asoc->peer.transport_count++;
/* If we do not yet have a primary path, set one. */
if (!asoc->peer.primary_path) {
sctp_assoc_set_primary(asoc, peer);
asoc->peer.retran_path = peer;
}
if (asoc->peer.active_path == asoc->peer.retran_path) {
asoc->peer.retran_path = peer;
}
return peer;
} | 1 | [
"CWE-287"
] | linux-2.6 | add52379dde2e5300e2d574b172e62c6cf43b3d3 | 254,469,762,198,362,900,000,000,000,000,000,000,000 | 135 | sctp: Fix oops when INIT-ACK indicates that peer doesn't support AUTH
If INIT-ACK is received with SupportedExtensions parameter which
indicates that the peer does not support AUTH, the packet will be
silently ignore, and sctp_process_init() do cleanup all of the
transports in the association.
When T1-Init timer is expires, OOPS happen while we try to choose
a different init transport.
The solution is to only clean up the non-active transports, i.e
the ones that the peer added. However, that introduces a problem
with sctp_connectx(), because we don't mark the proper state for
the transports provided by the user. So, we'll simply mark
user-provided transports as ACTIVE. That will allow INIT
retransmissions to work properly in the sctp_connectx() context
and prevent the crash.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
const union sctp_addr *peer_addr,
sctp_init_chunk_t *peer_init, gfp_t gfp)
{
union sctp_params param;
struct sctp_transport *transport;
struct list_head *pos, *temp;
char *cookie;
/* We must include the address that the INIT packet came from.
* This is the only address that matters for an INIT packet.
* When processing a COOKIE ECHO, we retrieve the from address
* of the INIT from the cookie.
*/
/* This implementation defaults to making the first transport
* added as the primary transport. The source address seems to
* be a a better choice than any of the embedded addresses.
*/
if (peer_addr) {
if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
goto nomem;
}
/* Process the initialization parameters. */
sctp_walk_params(param, peer_init, init_hdr.params) {
if (!sctp_process_param(asoc, param, peer_addr, gfp))
goto clean_up;
}
/* AUTH: After processing the parameters, make sure that we
* have all the required info to potentially do authentications.
*/
if (asoc->peer.auth_capable && (!asoc->peer.peer_random ||
!asoc->peer.peer_hmacs))
asoc->peer.auth_capable = 0;
/* In a non-backward compatible mode, if the peer claims
* support for ADD-IP but not AUTH, the ADD-IP spec states
* that we MUST ABORT the association. Section 6. The section
* also give us an option to silently ignore the packet, which
* is what we'll do here.
*/
if (!sctp_addip_noauth &&
(asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
SCTP_PARAM_DEL_IP |
SCTP_PARAM_SET_PRIMARY);
asoc->peer.asconf_capable = 0;
goto clean_up;
}
/* Walk list of transports, removing transports in the UNKNOWN state. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transports);
if (transport->state == SCTP_UNKNOWN) {
sctp_assoc_rm_peer(asoc, transport);
}
}
/* The fixed INIT headers are always in network byte
* order.
*/
asoc->peer.i.init_tag =
ntohl(peer_init->init_hdr.init_tag);
asoc->peer.i.a_rwnd =
ntohl(peer_init->init_hdr.a_rwnd);
asoc->peer.i.num_outbound_streams =
ntohs(peer_init->init_hdr.num_outbound_streams);
asoc->peer.i.num_inbound_streams =
ntohs(peer_init->init_hdr.num_inbound_streams);
asoc->peer.i.initial_tsn =
ntohl(peer_init->init_hdr.initial_tsn);
/* Apply the upper bounds for output streams based on peer's
* number of inbound streams.
*/
if (asoc->c.sinit_num_ostreams >
ntohs(peer_init->init_hdr.num_inbound_streams)) {
asoc->c.sinit_num_ostreams =
ntohs(peer_init->init_hdr.num_inbound_streams);
}
if (asoc->c.sinit_max_instreams >
ntohs(peer_init->init_hdr.num_outbound_streams)) {
asoc->c.sinit_max_instreams =
ntohs(peer_init->init_hdr.num_outbound_streams);
}
/* Copy Initiation tag from INIT to VT_peer in cookie. */
asoc->c.peer_vtag = asoc->peer.i.init_tag;
/* Peer Rwnd : Current calculated value of the peer's rwnd. */
asoc->peer.rwnd = asoc->peer.i.a_rwnd;
/* Copy cookie in case we need to resend COOKIE-ECHO. */
cookie = asoc->peer.cookie;
if (cookie) {
asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp);
if (!asoc->peer.cookie)
goto clean_up;
}
/* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
* high (for example, implementations MAY use the size of the receiver
* advertised window).
*/
list_for_each_entry(transport, &asoc->peer.transport_addr_list,
transports) {
transport->ssthresh = asoc->peer.i.a_rwnd;
}
/* Set up the TSN tracking pieces. */
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE,
asoc->peer.i.initial_tsn);
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* The stream sequence number in all the streams shall start
* from 0 when the association is established. Also, when the
* stream sequence number reaches the value 65535 the next
* stream sequence number shall be set to 0.
*/
/* Allocate storage for the negotiated streams if it is not a temporary
* association.
*/
if (!asoc->temp) {
int error;
asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams,
asoc->c.sinit_num_ostreams, gfp);
if (!asoc->ssnmap)
goto clean_up;
error = sctp_assoc_set_id(asoc, gfp);
if (error)
goto clean_up;
}
/* ADDIP Section 4.1 ASCONF Chunk Procedures
*
* When an endpoint has an ASCONF signaled change to be sent to the
* remote endpoint it should do the following:
* ...
* A2) A serial number should be assigned to the Chunk. The serial
* number should be a monotonically increasing number. All serial
* numbers are defined to be initialized at the start of the
* association to the same value as the Initial TSN.
*/
asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1;
return 1;
clean_up:
/* Release the transport structures. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transports);
list_del_init(pos);
sctp_transport_free(transport);
}
asoc->peer.transport_count = 0;
nomem:
return 0;
} | 1 | [
"CWE-287"
] | linux-2.6 | add52379dde2e5300e2d574b172e62c6cf43b3d3 | 305,211,551,040,565,700,000,000,000,000,000,000,000 | 167 | sctp: Fix oops when INIT-ACK indicates that peer doesn't support AUTH
If INIT-ACK is received with SupportedExtensions parameter which
indicates that the peer does not support AUTH, the packet will be
silently ignore, and sctp_process_init() do cleanup all of the
transports in the association.
When T1-Init timer is expires, OOPS happen while we try to choose
a different init transport.
The solution is to only clean up the non-active transports, i.e
the ones that the peer added. However, that introduces a problem
with sctp_connectx(), because we don't mark the proper state for
the transports provided by the user. So, we'll simply mark
user-provided transports as ACTIVE. That will allow INIT
retransmissions to work properly in the sctp_connectx() context
and prevent the crash.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
int ret;
if (unlikely(!out->f_op || !out->f_op->splice_write))
return -EINVAL;
if (unlikely(!(out->f_mode & FMODE_WRITE)))
return -EBADF;
ret = rw_verify_area(WRITE, out, ppos, len);
if (unlikely(ret < 0))
return ret;
return out->f_op->splice_write(pipe, out, ppos, len, flags);
} | 1 | [
"CWE-264"
] | linux-2.6 | efc968d450e013049a662d22727cf132618dcb2f | 104,054,770,320,161,400,000,000,000,000,000,000,000 | 17 | Don't allow splice() to files opened with O_APPEND
This is debatable, but while we're debating it, let's disallow the
combination of splice and an O_APPEND destination.
It's not entirely clear what the semantics of O_APPEND should be, and
POSIX apparently expects pwrite() to ignore O_APPEND, for example. So
we could make up any semantics we want, including the old ones.
But Miklos convinced me that we should at least give it some thought,
and that accepting writes at arbitrary offsets is wrong at least for
IS_APPEND() files (which always have O_APPEND set, even if the reverse
isn't true: you can obviously have O_APPEND set on a regular file).
So disallow O_APPEND entirely for now. I doubt anybody cares, and this
way we have one less gray area to worry about.
Reported-and-argued-for-by: Miklos Szeredi <[email protected]>
Acked-by: Jens Axboe <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
int hfsplus_find_cat(struct super_block *sb, u32 cnid,
struct hfs_find_data *fd)
{
hfsplus_cat_entry tmp;
int err;
u16 type;
hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
if (err)
return err;
type = be16_to_cpu(tmp.type);
if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) {
printk(KERN_ERR "hfs: found bad thread record in catalog\n");
return -EIO;
}
hfsplus_cat_build_key_uni(fd->search_key, be32_to_cpu(tmp.thread.parentID),
&tmp.thread.nodeName);
return hfs_brec_find(fd);
} | 1 | [
"CWE-119"
] | linux-2.6 | efc7ffcb4237f8cb9938909041c4ed38f6e1bf40 | 256,655,408,888,579,640,000,000,000,000,000,000,000 | 22 | hfsplus: fix Buffer overflow with a corrupted image
When an hfsplus image gets corrupted it might happen that the catalog
namelength field gets b0rked. If we mount such an image the memcpy() in
hfsplus_cat_build_key_uni() writes more than the 255 that fit in the name
field. Depending on the size of the overwritten data, we either only get
memory corruption or also trigger an oops like this:
[ 221.628020] BUG: unable to handle kernel paging request at c82b0000
[ 221.629066] IP: [<c022d4b1>] hfsplus_find_cat+0x10d/0x151
[ 221.629066] *pde = 0ea29163 *pte = 082b0160
[ 221.629066] Oops: 0002 [#1] PREEMPT DEBUG_PAGEALLOC
[ 221.629066] Modules linked in:
[ 221.629066]
[ 221.629066] Pid: 4845, comm: mount Not tainted (2.6.27-rc4-00123-gd3ee1b4-dirty #28)
[ 221.629066] EIP: 0060:[<c022d4b1>] EFLAGS: 00010206 CPU: 0
[ 221.629066] EIP is at hfsplus_find_cat+0x10d/0x151
[ 221.629066] EAX: 00000029 EBX: 00016210 ECX: 000042c2 EDX: 00000002
[ 221.629066] ESI: c82d70ca EDI: c82b0000 EBP: c82d1bcc ESP: c82d199c
[ 221.629066] DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068
[ 221.629066] Process mount (pid: 4845, ti=c82d1000 task=c8224060 task.ti=c82d1000)
[ 221.629066] Stack: c080b3c4 c82aa8f8 c82d19c2 00016210 c080b3be c82d1bd4 c82aa8f0 00000300
[ 221.629066] 01000000 750008b1 74006e00 74006900 65006c00 c82d6400 c013bd35 c8224060
[ 221.629066] 00000036 00000046 c82d19f0 00000082 c8224548 c8224060 00000036 c0d653cc
[ 221.629066] Call Trace:
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c01302d2>] ? __kernel_text_address+0x1b/0x27
[ 221.629066] [<c010487a>] ? dump_trace+0xca/0xd6
[ 221.629066] [<c0109e32>] ? save_stack_address+0x0/0x2c
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c013b571>] ? save_trace+0x37/0x8d
[ 221.629066] [<c013b62e>] ? add_lock_to_list+0x67/0x8d
[ 221.629066] [<c013ea1c>] ? validate_chain+0x8a4/0x9f4
[ 221.629066] [<c013553d>] ? down+0xc/0x2f
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c013da5d>] ? mark_held_locks+0x43/0x5a
[ 221.629066] [<c013dc3a>] ? trace_hardirqs_on+0xb/0xd
[ 221.629066] [<c013dbf4>] ? trace_hardirqs_on_caller+0xf4/0x12f
[ 221.629066] [<c06abec8>] ? _spin_unlock_irqrestore+0x42/0x58
[ 221.629066] [<c013555c>] ? down+0x2b/0x2f
[ 221.629066] [<c022aa68>] ? hfsplus_iget+0xa0/0x154
[ 221.629066] [<c022b0b9>] ? hfsplus_fill_super+0x280/0x447
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c041c9e4>] ? string+0x2b/0x74
[ 221.629066] [<c041cd16>] ? vsnprintf+0x2e9/0x512
[ 221.629066] [<c010487a>] ? dump_trace+0xca/0xd6
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c013b571>] ? save_trace+0x37/0x8d
[ 221.629066] [<c013b62e>] ? add_lock_to_list+0x67/0x8d
[ 221.629066] [<c013ea1c>] ? validate_chain+0x8a4/0x9f4
[ 221.629066] [<c01354d3>] ? up+0xc/0x2f
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c041cfb7>] ? snprintf+0x1b/0x1d
[ 221.629066] [<c01ba466>] ? disk_name+0x25/0x67
[ 221.629066] [<c0183960>] ? get_sb_bdev+0xcd/0x10b
[ 221.629066] [<c016ad92>] ? kstrdup+0x2a/0x4c
[ 221.629066] [<c022a7b3>] ? hfsplus_get_sb+0x13/0x15
[ 221.629066] [<c022ae39>] ? hfsplus_fill_super+0x0/0x447
[ 221.629066] [<c0183583>] ? vfs_kern_mount+0x3b/0x76
[ 221.629066] [<c0183602>] ? do_kern_mount+0x32/0xba
[ 221.629066] [<c01960d4>] ? do_new_mount+0x46/0x74
[ 221.629066] [<c0196277>] ? do_mount+0x175/0x193
[ 221.629066] [<c013dbf4>] ? trace_hardirqs_on_caller+0xf4/0x12f
[ 221.629066] [<c01663b2>] ? __get_free_pages+0x1e/0x24
[ 221.629066] [<c06ac07b>] ? lock_kernel+0x19/0x8c
[ 221.629066] [<c01962e6>] ? sys_mount+0x51/0x9b
[ 221.629066] [<c01962f9>] ? sys_mount+0x64/0x9b
[ 221.629066] [<c01038bd>] ? sysenter_do_call+0x12/0x31
[ 221.629066] =======================
[ 221.629066] Code: 89 c2 c1 e2 08 c1 e8 08 09 c2 8b 85 e8 fd ff ff 66 89 50 06 89 c7 53 83 c7 08 56 57 68 c4 b3 80 c0 e8 8c 5c ef ff 89 d9 c1 e9 02 <f3> a5 89 d9 83 e1 03 74 02 f3 a4 83 c3 06 8b 95 e8 fd ff ff 0f
[ 221.629066] EIP: [<c022d4b1>] hfsplus_find_cat+0x10d/0x151 SS:ESP 0068:c82d199c
[ 221.629066] ---[ end trace e417a1d67f0d0066 ]---
Since hfsplus_cat_build_key_uni() returns void and only has one callsite,
the check is performed at the callsite.
Signed-off-by: Eric Sesterhenn <[email protected]>
Reviewed-by: Pekka Enberg <[email protected]>
Cc: Roman Zippel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
{
struct page *page;
struct address_space *mapping;
__be32 *pptr, *curr, *end;
u32 mask, start, len, n;
__be32 val;
int i;
len = *max;
if (!len)
return size;
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
i = offset % 32;
offset &= ~(PAGE_CACHE_BITS - 1);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
/* scan the first partial u32 for zero bits */
val = *curr;
if (~val) {
n = be32_to_cpu(val);
mask = (1U << 31) >> i;
for (; i < 32; mask >>= 1, i++) {
if (!(n & mask))
goto found;
}
}
curr++;
/* scan complete u32s for the first zero bit */
while (1) {
while (curr < end) {
val = *curr;
if (~val) {
n = be32_to_cpu(val);
mask = 1 << 31;
for (i = 0; i < 32; mask >>= 1, i++) {
if (!(n & mask))
goto found;
}
}
curr++;
}
kunmap(page);
offset += PAGE_CACHE_BITS;
if (offset >= size)
break;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
curr = pptr = kmap(page);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
}
dprint(DBG_BITMAP, "bitmap full\n");
start = size;
goto out;
found:
start = offset + (curr - pptr) * 32 + i;
if (start >= size) {
dprint(DBG_BITMAP, "bitmap full\n");
goto out;
}
/* do any partial u32 at the start */
len = min(size - start, len);
while (1) {
n |= mask;
if (++i >= 32)
break;
mask >>= 1;
if (!--len || n & mask)
goto done;
}
if (!--len)
goto done;
*curr++ = cpu_to_be32(n);
/* do full u32s */
while (1) {
while (curr < end) {
n = be32_to_cpu(*curr);
if (len < 32)
goto last;
if (n) {
len = 32;
goto last;
}
*curr++ = cpu_to_be32(0xffffffff);
len -= 32;
}
set_page_dirty(page);
kunmap(page);
offset += PAGE_CACHE_BITS;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
}
last:
/* do any partial u32 at end */
mask = 1U << 31;
for (i = 0; i < len; i++) {
if (n & mask)
break;
n |= mask;
mask >>= 1;
}
done:
*curr = cpu_to_be32(n);
set_page_dirty(page);
kunmap(page);
*max = offset + (curr - pptr) * 32 + i - start;
HFSPLUS_SB(sb).free_blocks -= *max;
sb->s_dirt = 1;
dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
out:
mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
return start;
} | 1 | [
"CWE-20"
] | linux-2.6 | 649f1ee6c705aab644035a7998d7b574193a598a | 162,883,961,593,441,360,000,000,000,000,000,000,000 | 130 | hfsplus: check read_mapping_page() return value
While testing more corrupted images with hfsplus, i came across
one which triggered the following bug:
[15840.675016] BUG: unable to handle kernel paging request at fffffffb
[15840.675016] IP: [<c0116a4f>] kmap+0x15/0x56
[15840.675016] *pde = 00008067 *pte = 00000000
[15840.675016] Oops: 0000 [#1] PREEMPT DEBUG_PAGEALLOC
[15840.675016] Modules linked in:
[15840.675016]
[15840.675016] Pid: 11575, comm: ln Not tainted (2.6.27-rc4-00123-gd3ee1b4-dirty #29)
[15840.675016] EIP: 0060:[<c0116a4f>] EFLAGS: 00010202 CPU: 0
[15840.675016] EIP is at kmap+0x15/0x56
[15840.675016] EAX: 00000246 EBX: fffffffb ECX: 00000000 EDX: cab919c0
[15840.675016] ESI: 000007dd EDI: cab0bcf4 EBP: cab0bc98 ESP: cab0bc94
[15840.675016] DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068
[15840.675016] Process ln (pid: 11575, ti=cab0b000 task=cab919c0 task.ti=cab0b000)
[15840.675016] Stack: 00000000 cab0bcdc c0231cfb 00000000 cab0bce0 00000800 ca9290c0 fffffffb
[15840.675016] cab145d0 cab919c0 cab15998 22222222 22222222 22222222 00000001 cab15960
[15840.675016] 000007dd cab0bcf4 cab0bd04 c022cb3a cab0bcf4 cab15a6c ca9290c0 00000000
[15840.675016] Call Trace:
[15840.675016] [<c0231cfb>] ? hfsplus_block_allocate+0x6f/0x2d3
[15840.675016] [<c022cb3a>] ? hfsplus_file_extend+0xc4/0x1db
[15840.675016] [<c022ce41>] ? hfsplus_get_block+0x8c/0x19d
[15840.675016] [<c06adde4>] ? sub_preempt_count+0x9d/0xab
[15840.675016] [<c019ece6>] ? __block_prepare_write+0x147/0x311
[15840.675016] [<c0161934>] ? __grab_cache_page+0x52/0x73
[15840.675016] [<c019ef4f>] ? block_write_begin+0x79/0xd5
[15840.675016] [<c022cdb5>] ? hfsplus_get_block+0x0/0x19d
[15840.675016] [<c019f22a>] ? cont_write_begin+0x27f/0x2af
[15840.675016] [<c022cdb5>] ? hfsplus_get_block+0x0/0x19d
[15840.675016] [<c0139ebe>] ? tick_program_event+0x28/0x4c
[15840.675016] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[15840.675016] [<c022b723>] ? hfsplus_write_begin+0x2d/0x32
[15840.675016] [<c022cdb5>] ? hfsplus_get_block+0x0/0x19d
[15840.675016] [<c0161988>] ? pagecache_write_begin+0x33/0x107
[15840.675016] [<c01879e5>] ? __page_symlink+0x3c/0xae
[15840.675016] [<c019ad34>] ? __mark_inode_dirty+0x12f/0x137
[15840.675016] [<c0187a70>] ? page_symlink+0x19/0x1e
[15840.675016] [<c022e6eb>] ? hfsplus_symlink+0x41/0xa6
[15840.675016] [<c01886a9>] ? vfs_symlink+0x99/0x101
[15840.675016] [<c018a2f6>] ? sys_symlinkat+0x6b/0xad
[15840.675016] [<c018a348>] ? sys_symlink+0x10/0x12
[15840.675016] [<c01038bd>] ? sysenter_do_call+0x12/0x31
[15840.675016] =======================
[15840.675016] Code: 00 00 75 10 83 3d 88 2f ec c0 02 75 07 89 d0 e8 12 56 05 00 5d c3 55 ba 06 00 00 00 89 e5 53 89 c3 b8 3d eb 7e c0 e8 16 74 00 00 <8b> 03 c1 e8 1e 69 c0 d8 02 00 00 05 b8 69 8e c0 2b 80 c4 02 00
[15840.675016] EIP: [<c0116a4f>] kmap+0x15/0x56 SS:ESP 0068:cab0bc94
[15840.675016] ---[ end trace 4fea40dad6b70e5f ]---
This happens because the return value of read_mapping_page() is passed on
to kmap unchecked. The bug is triggered after the first
read_mapping_page() in hfsplus_block_allocate(), this patch fixes all
three usages in this functions but leaves the ones further down in the
file unchanged.
Signed-off-by: Eric Sesterhenn <[email protected]>
Cc: Roman Zippel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
struct hfs_find_data *fd)
{
hfs_cat_rec rec;
int res, len, type;
hfs_cat_build_key(sb, fd->search_key, cnid, NULL);
res = hfs_brec_read(fd, &rec, sizeof(rec));
if (res)
return res;
type = rec.type;
if (type != HFS_CDR_THD && type != HFS_CDR_FTH) {
printk(KERN_ERR "hfs: found bad thread record in catalog\n");
return -EIO;
}
fd->search_key->cat.ParID = rec.thread.ParID;
len = fd->search_key->cat.CName.len = rec.thread.CName.len;
memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len);
return hfs_brec_find(fd);
} | 1 | [] | linux-2.6 | d38b7aa7fc3371b52d036748028db50b585ade2e | 71,919,391,830,078,220,000,000,000,000,000,000,000 | 22 | hfs: fix namelength memory corruption
Fix a stack corruption caused by a corrupted hfs filesystem. If the
catalog name length is corrupted the memcpy overwrites the catalog btree
structure. Since the field is limited to HFS_NAMELEN bytes in the
structure and the file format, we throw an error if it is too long.
Cc: Roman Zippel <[email protected]>
Signed-off-by: Eric Sesterhenn <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void __scm_destroy(struct scm_cookie *scm)
{
struct scm_fp_list *fpl = scm->fp;
int i;
if (fpl) {
scm->fp = NULL;
for (i=fpl->count-1; i>=0; i--)
fput(fpl->fp[i]);
kfree(fpl);
}
} | 1 | [] | linux-2.6 | f8d570a4745835f2238a33b537218a1bb03fc671 | 10,851,480,685,579,340,000,000,000,000,000,000,000 | 12 | net: Fix recursive descent in __scm_destroy().
__scm_destroy() walks the list of file descriptors in the scm_fp_list
pointed to by the scm_cookie argument.
Those, in turn, can close sockets and invoke __scm_destroy() again.
There is nothing which limits how deeply this can occur.
The idea for how to fix this is from Linus. Basically, we do all of
the fput()s at the top level by collecting all of the scm_fp_list
objects hit by an fput(). Inside of the initial __scm_destroy() we
keep running the list until it is empty.
Signed-off-by: David S. Miller <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
{
int *fdp = (int*)CMSG_DATA(cmsg);
struct scm_fp_list *fpl = *fplp;
struct file **fpp;
int i, num;
num = (cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)))/sizeof(int);
if (num <= 0)
return 0;
if (num > SCM_MAX_FD)
return -EINVAL;
if (!fpl)
{
fpl = kmalloc(sizeof(struct scm_fp_list), GFP_KERNEL);
if (!fpl)
return -ENOMEM;
*fplp = fpl;
fpl->count = 0;
}
fpp = &fpl->fp[fpl->count];
if (fpl->count + num > SCM_MAX_FD)
return -EINVAL;
/*
* Verify the descriptors and increment the usage count.
*/
for (i=0; i< num; i++)
{
int fd = fdp[i];
struct file *file;
if (fd < 0 || !(file = fget(fd)))
return -EBADF;
*fpp++ = file;
fpl->count++;
}
return num;
} | 1 | [] | linux-2.6 | f8d570a4745835f2238a33b537218a1bb03fc671 | 29,905,897,789,343,096,000,000,000,000,000,000,000 | 44 | net: Fix recursive descent in __scm_destroy().
__scm_destroy() walks the list of file descriptors in the scm_fp_list
pointed to by the scm_cookie argument.
Those, in turn, can close sockets and invoke __scm_destroy() again.
There is nothing which limits how deeply this can occur.
The idea for how to fix this is from Linus. Basically, we do all of
the fput()s at the top level by collecting all of the scm_fp_list
objects hit by an fput(). Inside of the initial __scm_destroy() we
keep running the list until it is empty.
Signed-off-by: David S. Miller <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
{
struct scm_fp_list *new_fpl;
int i;
if (!fpl)
return NULL;
new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL);
if (new_fpl) {
for (i=fpl->count-1; i>=0; i--)
get_file(fpl->fp[i]);
memcpy(new_fpl, fpl, sizeof(*fpl));
}
return new_fpl;
} | 1 | [] | linux-2.6 | f8d570a4745835f2238a33b537218a1bb03fc671 | 98,295,612,003,832,760,000,000,000,000,000,000,000 | 16 | net: Fix recursive descent in __scm_destroy().
__scm_destroy() walks the list of file descriptors in the scm_fp_list
pointed to by the scm_cookie argument.
Those, in turn, can close sockets and invoke __scm_destroy() again.
There is nothing which limits how deeply this can occur.
The idea for how to fix this is from Linus. Basically, we do all of
the fput()s at the top level by collecting all of the scm_fp_list
objects hit by an fput(). Inside of the initial __scm_destroy() we
keep running the list until it is empty.
Signed-off-by: David S. Miller <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void inc_inflight_move_tail(struct unix_sock *u)
{
atomic_long_inc(&u->inflight);
/*
* If this is still a candidate, move it to the end of the
* list, so that it's checked even if it was already passed
* over
*/
if (u->gc_candidate)
list_move_tail(&u->link, &gc_candidates);
} | 1 | [] | linux-2.6 | 6209344f5a3795d34b7f2c0061f49802283b6bdd | 200,788,920,948,014,500,000,000,000,000,000,000,000 | 11 | net: unix: fix inflight counting bug in garbage collector
Previously I assumed that the receive queues of candidates don't
change during the GC. This is only half true, nothing can be received
from the queues (see comment in unix_gc()), but buffers could be added
through the other half of the socket pair, which may still have file
descriptors referring to it.
This can result in inc_inflight_move_tail() erronously increasing the
"inflight" counter for a unix socket for which dec_inflight() wasn't
previously called. This in turn can trigger the "BUG_ON(total_refs <
inflight_refs)" in a later garbage collection run.
Fix this by only manipulating the "inflight" counter for sockets which
are candidates themselves. Duplicating the file references in
unix_attach_fds() is also needed to prevent a socket becoming a
candidate for GC while the skb that contains it is not yet queued.
Reported-by: Andrea Bittau <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
CC: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
for (i=scm->fp->count-1; i>=0; i--)
unix_inflight(scm->fp->fp[i]);
UNIXCB(skb).fp = scm->fp;
skb->destructor = unix_destruct_fds;
scm->fp = NULL;
} | 1 | [] | linux-2.6 | 6209344f5a3795d34b7f2c0061f49802283b6bdd | 11,999,274,033,051,870,000,000,000,000,000,000,000 | 9 | net: unix: fix inflight counting bug in garbage collector
Previously I assumed that the receive queues of candidates don't
change during the GC. This is only half true, nothing can be received
from the queues (see comment in unix_gc()), but buffers could be added
through the other half of the socket pair, which may still have file
descriptors referring to it.
This can result in inc_inflight_move_tail() erronously increasing the
"inflight" counter for a unix socket for which dec_inflight() wasn't
previously called. This in turn can trigger the "BUG_ON(total_refs <
inflight_refs)" in a later garbage collection run.
Fix this by only manipulating the "inflight" counter for sockets which
are candidates themselves. Duplicating the file references in
unix_attach_fds() is also needed to prevent a socket becoming a
candidate for GC while the skb that contains it is not yet queued.
Reported-by: Andrea Bittau <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
CC: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
struct sk_buff *skb;
struct sk_buff *next;
spin_lock(&x->sk_receive_queue.lock);
receive_queue_for_each_skb(x, next, skb) {
/*
* Do we have file descriptors ?
*/
if (UNIXCB(skb).fp) {
bool hit = false;
/*
* Process the descriptors of this socket
*/
int nfd = UNIXCB(skb).fp->count;
struct file **fp = UNIXCB(skb).fp->fp;
while (nfd--) {
/*
* Get the socket the fd matches
* if it indeed does so
*/
struct sock *sk = unix_get_socket(*fp++);
if (sk) {
hit = true;
func(unix_sk(sk));
}
}
if (hit && hitlist != NULL) {
__skb_unlink(skb, &x->sk_receive_queue);
__skb_queue_tail(hitlist, skb);
}
}
}
spin_unlock(&x->sk_receive_queue.lock);
} | 1 | [] | linux-2.6 | 6209344f5a3795d34b7f2c0061f49802283b6bdd | 175,844,469,987,644,700,000,000,000,000,000,000,000 | 37 | net: unix: fix inflight counting bug in garbage collector
Previously I assumed that the receive queues of candidates don't
change during the GC. This is only half true, nothing can be received
from the queues (see comment in unix_gc()), but buffers could be added
through the other half of the socket pair, which may still have file
descriptors referring to it.
This can result in inc_inflight_move_tail() erronously increasing the
"inflight" counter for a unix socket for which dec_inflight() wasn't
previously called. This in turn can trigger the "BUG_ON(total_refs <
inflight_refs)" in a later garbage collection run.
Fix this by only manipulating the "inflight" counter for sockets which
are candidates themselves. Duplicating the file references in
unix_attach_fds() is also needed to prevent a socket becoming a
candidate for GC while the skb that contains it is not yet queued.
Reported-by: Andrea Bittau <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
CC: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
void unix_gc(void)
{
static bool gc_in_progress = false;
struct unix_sock *u;
struct unix_sock *next;
struct sk_buff_head hitlist;
struct list_head cursor;
spin_lock(&unix_gc_lock);
/* Avoid a recursive GC. */
if (gc_in_progress)
goto out;
gc_in_progress = true;
/*
* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
*
* Holding unix_gc_lock will protect these candidates from
* being detached, and hence from gaining an external
* reference. This also means, that since there are no
* possible receivers, the receive queues of these sockets are
* static during the GC, even though the dequeue is done
* before the detach without atomicity guarantees.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
inflight_refs = atomic_long_read(&u->inflight);
BUG_ON(inflight_refs < 1);
BUG_ON(total_refs < inflight_refs);
if (total_refs == inflight_refs) {
list_move_tail(&u->link, &gc_candidates);
u->gc_candidate = 1;
}
}
/*
* Now remove all internal in-flight reference to children of
* the candidates.
*/
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, dec_inflight, NULL);
/*
* Restore the references for children of all candidates,
* which have remaining references. Do this recursively, so
* only those remain, which form cyclic references.
*
* Use a "cursor" link, to make the list traversal safe, even
* though elements might be moved about.
*/
list_add(&cursor, &gc_candidates);
while (cursor.next != &gc_candidates) {
u = list_entry(cursor.next, struct unix_sock, link);
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
if (atomic_long_read(&u->inflight) > 0) {
list_move_tail(&u->link, &gc_inflight_list);
u->gc_candidate = 0;
scan_children(&u->sk, inc_inflight_move_tail, NULL);
}
}
list_del(&cursor);
/*
* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
skb_queue_head_init(&hitlist);
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, inc_inflight, &hitlist);
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
gc_in_progress = false;
out:
spin_unlock(&unix_gc_lock);
} | 1 | [] | linux-2.6 | 6209344f5a3795d34b7f2c0061f49802283b6bdd | 64,534,971,667,528,470,000,000,000,000,000,000,000 | 96 | net: unix: fix inflight counting bug in garbage collector
Previously I assumed that the receive queues of candidates don't
change during the GC. This is only half true, nothing can be received
from the queues (see comment in unix_gc()), but buffers could be added
through the other half of the socket pair, which may still have file
descriptors referring to it.
This can result in inc_inflight_move_tail() erronously increasing the
"inflight" counter for a unix socket for which dec_inflight() wasn't
previously called. This in turn can trigger the "BUG_ON(total_refs <
inflight_refs)" in a later garbage collection run.
Fix this by only manipulating the "inflight" counter for sockets which
are candidates themselves. Duplicating the file references in
unix_attach_fds() is also needed to prevent a socket becoming a
candidate for GC while the skb that contains it is not yet queued.
Reported-by: Andrea Bittau <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
CC: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr=msg->msg_name;
struct sock *other = NULL;
int namelen = 0; /* fake GCC */
int err;
unsigned hash;
struct sk_buff *skb;
long timeo;
struct scm_cookie tmp_scm;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
err = scm_send(sock, msg, siocb->scm);
if (err < 0)
return err;
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out;
if (msg->msg_namelen) {
err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
if (err < 0)
goto out;
namelen = err;
} else {
sunaddr = NULL;
err = -ENOTCONN;
other = unix_peer_get(sk);
if (!other)
goto out;
}
if (test_bit(SOCK_PASSCRED, &sock->flags)
&& !u->addr && (err = unix_autobind(sock)) != 0)
goto out;
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
if (skb==NULL)
goto out;
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
if (siocb->scm->fp)
unix_attach_fds(siocb->scm, skb);
unix_get_secdata(siocb->scm, skb);
skb_reset_transport_header(skb);
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
if (err)
goto out_free;
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
restart:
if (!other) {
err = -ECONNRESET;
if (sunaddr == NULL)
goto out_free;
other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
hash, &err);
if (other==NULL)
goto out_free;
}
unix_state_lock(other);
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
if (sock_flag(other, SOCK_DEAD)) {
/*
* Check with 1003.1g - what should
* datagram error
*/
unix_state_unlock(other);
sock_put(other);
err = 0;
unix_state_lock(sk);
if (unix_peer(sk) == other) {
unix_peer(sk)=NULL;
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
} else {
unix_state_unlock(sk);
}
other = NULL;
if (err)
goto out_free;
goto restart;
}
err = -EPIPE;
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
if (sk->sk_type != SOCK_SEQPACKET) {
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
}
if (unix_peer(other) != sk && unix_recvq_full(other)) {
if (!timeo) {
err = -EAGAIN;
goto out_unlock;
}
timeo = unix_wait_for_peer(other, timeo);
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out_free;
goto restart;
}
skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other, len);
sock_put(other);
scm_destroy(siocb->scm);
return len;
out_unlock:
unix_state_unlock(other);
out_free:
kfree_skb(skb);
out:
if (other)
sock_put(other);
scm_destroy(siocb->scm);
return err;
} | 1 | [] | linux-2.6 | 6209344f5a3795d34b7f2c0061f49802283b6bdd | 167,783,015,965,478,600,000,000,000,000,000,000,000 | 149 | net: unix: fix inflight counting bug in garbage collector
Previously I assumed that the receive queues of candidates don't
change during the GC. This is only half true, nothing can be received
from the queues (see comment in unix_gc()), but buffers could be added
through the other half of the socket pair, which may still have file
descriptors referring to it.
This can result in inc_inflight_move_tail() erronously increasing the
"inflight" counter for a unix socket for which dec_inflight() wasn't
previously called. This in turn can trigger the "BUG_ON(total_refs <
inflight_refs)" in a later garbage collection run.
Fix this by only manipulating the "inflight" counter for sockets which
are candidates themselves. Duplicating the file references in
unix_attach_fds() is also needed to prevent a socket becoming a
candidate for GC while the skb that contains it is not yet queued.
Reported-by: Andrea Bittau <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
CC: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct sock *other = NULL;
struct sockaddr_un *sunaddr=msg->msg_name;
int err,size;
struct sk_buff *skb;
int sent=0;
struct scm_cookie tmp_scm;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
err = scm_send(sock, msg, siocb->scm);
if (err < 0)
return err;
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out_err;
if (msg->msg_namelen) {
err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
sunaddr = NULL;
err = -ENOTCONN;
other = unix_peer(sk);
if (!other)
goto out_err;
}
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto pipe_err;
while(sent < len)
{
/*
* Optimisation for the fact that under 0.01% of X
* messages typically need breaking up.
*/
size = len-sent;
/* Keep two messages in the pipe so it schedules better */
if (size > ((sk->sk_sndbuf >> 1) - 64))
size = (sk->sk_sndbuf >> 1) - 64;
if (size > SKB_MAX_ALLOC)
size = SKB_MAX_ALLOC;
/*
* Grab a buffer
*/
skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
if (skb==NULL)
goto out_err;
/*
* If you pass two values to the sock_alloc_send_skb
* it tries to grab the large buffer with GFP_NOFS
* (which can fail easily), and if it fails grab the
* fallback size buffer which is under a page and will
* succeed. [Alan]
*/
size = min_t(int, size, skb_tailroom(skb));
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
if (siocb->scm->fp)
unix_attach_fds(siocb->scm, skb);
if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
kfree_skb(skb);
goto out_err;
}
unix_state_lock(other);
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN))
goto pipe_err_free;
skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other, size);
sent+=size;
}
scm_destroy(siocb->scm);
siocb->scm = NULL;
return sent;
pipe_err_free:
unix_state_unlock(other);
kfree_skb(skb);
pipe_err:
if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
send_sig(SIGPIPE,current,0);
err = -EPIPE;
out_err:
scm_destroy(siocb->scm);
siocb->scm = NULL;
return sent ? : err;
} | 1 | [] | linux-2.6 | 6209344f5a3795d34b7f2c0061f49802283b6bdd | 190,972,759,458,050,800,000,000,000,000,000,000,000 | 108 | net: unix: fix inflight counting bug in garbage collector
Previously I assumed that the receive queues of candidates don't
change during the GC. This is only half true, nothing can be received
from the queues (see comment in unix_gc()), but buffers could be added
through the other half of the socket pair, which may still have file
descriptors referring to it.
This can result in inc_inflight_move_tail() erronously increasing the
"inflight" counter for a unix socket for which dec_inflight() wasn't
previously called. This in turn can trigger the "BUG_ON(total_refs <
inflight_refs)" in a later garbage collection run.
Fix this by only manipulating the "inflight" counter for sockets which
are candidates themselves. Duplicating the file references in
unix_attach_fds() is also needed to prevent a socket becoming a
candidate for GC while the skb that contains it is not yet queued.
Reported-by: Andrea Bittau <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
CC: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk), *newu, *otheru;
struct sock *newsk = NULL;
struct sock *other = NULL;
struct sk_buff *skb = NULL;
unsigned hash;
int st;
int err;
long timeo;
err = unix_mkname(sunaddr, addr_len, &hash);
if (err < 0)
goto out;
addr_len = err;
if (test_bit(SOCK_PASSCRED, &sock->flags)
&& !u->addr && (err = unix_autobind(sock)) != 0)
goto out;
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
/* First of all allocate resources.
If we will make it after state is locked,
we will have to recheck all again in any case.
*/
err = -ENOMEM;
/* create new sock for complete connection */
newsk = unix_create1(NULL);
if (newsk == NULL)
goto out;
/* Allocate skb for sending to listening sock */
skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
if (skb == NULL)
goto out;
restart:
/* Find listening sock. */
other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err);
if (!other)
goto out;
/* Latch state of peer */
unix_state_lock(other);
/* Apparently VFS overslept socket death. Retry. */
if (sock_flag(other, SOCK_DEAD)) {
unix_state_unlock(other);
sock_put(other);
goto restart;
}
err = -ECONNREFUSED;
if (other->sk_state != TCP_LISTEN)
goto out_unlock;
if (skb_queue_len(&other->sk_receive_queue) >
other->sk_max_ack_backlog) {
err = -EAGAIN;
if (!timeo)
goto out_unlock;
timeo = unix_wait_for_peer(other, timeo);
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out;
sock_put(other);
goto restart;
}
/* Latch our state.
It is tricky place. We need to grab write lock and cannot
drop lock on peer. It is dangerous because deadlock is
possible. Connect to self case and simultaneous
attempt to connect are eliminated by checking socket
state. other is TCP_LISTEN, if sk is TCP_LISTEN we
check this before attempt to grab lock.
Well, and we have to recheck the state after socket locked.
*/
st = sk->sk_state;
switch (st) {
case TCP_CLOSE:
/* This is ok... continue with connect */
break;
case TCP_ESTABLISHED:
/* Socket is already connected */
err = -EISCONN;
goto out_unlock;
default:
err = -EINVAL;
goto out_unlock;
}
unix_state_lock_nested(sk);
if (sk->sk_state != st) {
unix_state_unlock(sk);
unix_state_unlock(other);
sock_put(other);
goto restart;
}
err = security_unix_stream_connect(sock, other->sk_socket, newsk);
if (err) {
unix_state_unlock(sk);
goto out_unlock;
}
/* The way is open! Fastly set all the necessary fields... */
sock_hold(sk);
unix_peer(newsk) = sk;
newsk->sk_state = TCP_ESTABLISHED;
newsk->sk_type = sk->sk_type;
newsk->sk_peercred.pid = current->tgid;
newsk->sk_peercred.uid = current->euid;
newsk->sk_peercred.gid = current->egid;
newu = unix_sk(newsk);
newsk->sk_sleep = &newu->peer_wait;
otheru = unix_sk(other);
/* copy address information from listening to new sock*/
if (otheru->addr) {
atomic_inc(&otheru->addr->refcnt);
newu->addr = otheru->addr;
}
if (otheru->dentry) {
newu->dentry = dget(otheru->dentry);
newu->mnt = mntget(otheru->mnt);
}
/* Set credentials */
sk->sk_peercred = other->sk_peercred;
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
sock_hold(newsk);
smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
unix_peer(sk) = newsk;
unix_state_unlock(sk);
/* take ten and and send info to listening sock */
spin_lock(&other->sk_receive_queue.lock);
__skb_queue_tail(&other->sk_receive_queue, skb);
/* Undo artificially decreased inflight after embrion
* is installed to listening socket. */
atomic_inc(&newu->inflight);
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
other->sk_data_ready(other, 0);
sock_put(other);
return 0;
out_unlock:
if (other)
unix_state_unlock(other);
out:
if (skb)
kfree_skb(skb);
if (newsk)
unix_release_sock(newsk, 0);
if (other)
sock_put(other);
return err;
} | 1 | [] | linux-2.6 | 1fd05ba5a2f2aa8e7b9b52ef55df850e2e7d54c9 | 36,809,652,256,356,650,000,000,000,000,000,000,000 | 178 | [AF_UNIX]: Rewrite garbage collector, fixes race.
Throw out the old mark & sweep garbage collector and put in a
refcounting cycle detecting one.
The old one had a race with recvmsg, that resulted in false positives
and hence data loss. The old algorithm operated on all unix sockets
in the system, so any additional locking would have meant performance
problems for all users of these.
The new algorithm instead only operates on "in flight" sockets, which
are very rare, and the additional locking for these doesn't negatively
impact the vast majority of users.
In fact it's probable, that there weren't *any* heavy senders of
sockets over sockets, otherwise the above race would have been
discovered long ago.
The patch works OK with the app that exposed the race with the old
code. The garbage collection has also been verified to work in a few
simple cases.
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static inline int empty_stack(void)
{
return gc_current == GC_HEAD;
} | 1 | [] | linux-2.6 | 1fd05ba5a2f2aa8e7b9b52ef55df850e2e7d54c9 | 142,163,827,764,239,930,000,000,000,000,000,000,000 | 4 | [AF_UNIX]: Rewrite garbage collector, fixes race.
Throw out the old mark & sweep garbage collector and put in a
refcounting cycle detecting one.
The old one had a race with recvmsg, that resulted in false positives
and hence data loss. The old algorithm operated on all unix sockets
in the system, so any additional locking would have meant performance
problems for all users of these.
The new algorithm instead only operates on "in flight" sockets, which
are very rare, and the additional locking for these doesn't negatively
impact the vast majority of users.
In fact it's probable, that there weren't *any* heavy senders of
sockets over sockets, otherwise the above race would have been
discovered long ago.
The patch works OK with the app that exposed the race with the old
code. The garbage collection has also been verified to work in a few
simple cases.
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void maybe_unmark_and_push(struct sock *x)
{
struct unix_sock *u = unix_sk(x);
if (u->gc_tree != GC_ORPHAN)
return;
sock_hold(x);
u->gc_tree = gc_current;
gc_current = x;
} | 1 | [] | linux-2.6 | 1fd05ba5a2f2aa8e7b9b52ef55df850e2e7d54c9 | 75,883,221,676,963,210,000,000,000,000,000,000,000 | 10 | [AF_UNIX]: Rewrite garbage collector, fixes race.
Throw out the old mark & sweep garbage collector and put in a
refcounting cycle detecting one.
The old one had a race with recvmsg, that resulted in false positives
and hence data loss. The old algorithm operated on all unix sockets
in the system, so any additional locking would have meant performance
problems for all users of these.
The new algorithm instead only operates on "in flight" sockets, which
are very rare, and the additional locking for these doesn't negatively
impact the vast majority of users.
In fact it's probable, that there weren't *any* heavy senders of
sockets over sockets, otherwise the above race would have been
discovered long ago.
The patch works OK with the app that exposed the race with the old
code. The garbage collection has also been verified to work in a few
simple cases.
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void unix_gc(void)
{
static DEFINE_MUTEX(unix_gc_sem);
int i;
struct sock *s;
struct sk_buff_head hitlist;
struct sk_buff *skb;
/*
* Avoid a recursive GC.
*/
if (!mutex_trylock(&unix_gc_sem))
return;
spin_lock(&unix_table_lock);
forall_unix_sockets(i, s)
{
unix_sk(s)->gc_tree = GC_ORPHAN;
}
/*
* Everything is now marked
*/
/* Invariant to be maintained:
- everything unmarked is either:
-- (a) on the stack, or
-- (b) has all of its children unmarked
- everything on the stack is always unmarked
- nothing is ever pushed onto the stack twice, because:
-- nothing previously unmarked is ever pushed on the stack
*/
/*
* Push root set
*/
forall_unix_sockets(i, s)
{
int open_count = 0;
/*
* If all instances of the descriptor are not
* in flight we are in use.
*
* Special case: when socket s is embrion, it may be
* hashed but still not in queue of listening socket.
* In this case (see unix_create1()) we set artificial
* negative inflight counter to close race window.
* It is trick of course and dirty one.
*/
if (s->sk_socket && s->sk_socket->file)
open_count = file_count(s->sk_socket->file);
if (open_count > atomic_read(&unix_sk(s)->inflight))
maybe_unmark_and_push(s);
}
/*
* Mark phase
*/
while (!empty_stack())
{
struct sock *x = pop_stack();
struct sock *sk;
spin_lock(&x->sk_receive_queue.lock);
skb = skb_peek(&x->sk_receive_queue);
/*
* Loop through all but first born
*/
while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) {
/*
* Do we have file descriptors ?
*/
if(UNIXCB(skb).fp)
{
/*
* Process the descriptors of this socket
*/
int nfd=UNIXCB(skb).fp->count;
struct file **fp = UNIXCB(skb).fp->fp;
while(nfd--)
{
/*
* Get the socket the fd matches if
* it indeed does so
*/
if((sk=unix_get_socket(*fp++))!=NULL)
{
maybe_unmark_and_push(sk);
}
}
}
/* We have to scan not-yet-accepted ones too */
if (x->sk_state == TCP_LISTEN)
maybe_unmark_and_push(skb->sk);
skb=skb->next;
}
spin_unlock(&x->sk_receive_queue.lock);
sock_put(x);
}
skb_queue_head_init(&hitlist);
forall_unix_sockets(i, s)
{
struct unix_sock *u = unix_sk(s);
if (u->gc_tree == GC_ORPHAN) {
struct sk_buff *nextsk;
spin_lock(&s->sk_receive_queue.lock);
skb = skb_peek(&s->sk_receive_queue);
while (skb &&
skb != (struct sk_buff *)&s->sk_receive_queue) {
nextsk = skb->next;
/*
* Do we have file descriptors ?
*/
if (UNIXCB(skb).fp) {
__skb_unlink(skb,
&s->sk_receive_queue);
__skb_queue_tail(&hitlist, skb);
}
skb = nextsk;
}
spin_unlock(&s->sk_receive_queue.lock);
}
u->gc_tree = GC_ORPHAN;
}
spin_unlock(&unix_table_lock);
/*
* Here we are. Hitlist is filled. Die.
*/
__skb_queue_purge(&hitlist);
mutex_unlock(&unix_gc_sem);
} | 1 | [] | linux-2.6 | 1fd05ba5a2f2aa8e7b9b52ef55df850e2e7d54c9 | 33,298,602,564,084,207,000,000,000,000,000,000,000 | 143 | [AF_UNIX]: Rewrite garbage collector, fixes race.
Throw out the old mark & sweep garbage collector and put in a
refcounting cycle detecting one.
The old one had a race with recvmsg, that resulted in false positives
and hence data loss. The old algorithm operated on all unix sockets
in the system, so any additional locking would have meant performance
problems for all users of these.
The new algorithm instead only operates on "in flight" sockets, which
are very rare, and the additional locking for these doesn't negatively
impact the vast majority of users.
In fact it's probable, that there weren't *any* heavy senders of
sockets over sockets, otherwise the above race would have been
discovered long ago.
The patch works OK with the app that exposed the race with the old
code. The garbage collection has also been verified to work in a few
simple cases.
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void unix_inflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
if(s) {
atomic_inc(&unix_sk(s)->inflight);
atomic_inc(&unix_tot_inflight);
}
} | 1 | [] | linux-2.6 | 1fd05ba5a2f2aa8e7b9b52ef55df850e2e7d54c9 | 202,854,582,157,076,370,000,000,000,000,000,000,000 | 8 | [AF_UNIX]: Rewrite garbage collector, fixes race.
Throw out the old mark & sweep garbage collector and put in a
refcounting cycle detecting one.
The old one had a race with recvmsg, that resulted in false positives
and hence data loss. The old algorithm operated on all unix sockets
in the system, so any additional locking would have meant performance
problems for all users of these.
The new algorithm instead only operates on "in flight" sockets, which
are very rare, and the additional locking for these doesn't negatively
impact the vast majority of users.
In fact it's probable, that there weren't *any* heavy senders of
sockets over sockets, otherwise the above race would have been
discovered long ago.
The patch works OK with the app that exposed the race with the old
code. The garbage collection has also been verified to work in a few
simple cases.
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static inline struct sock *pop_stack(void)
{
struct sock *p = gc_current;
gc_current = unix_sk(p)->gc_tree;
return p;
} | 1 | [] | linux-2.6 | 1fd05ba5a2f2aa8e7b9b52ef55df850e2e7d54c9 | 253,841,771,000,451,100,000,000,000,000,000,000,000 | 6 | [AF_UNIX]: Rewrite garbage collector, fixes race.
Throw out the old mark & sweep garbage collector and put in a
refcounting cycle detecting one.
The old one had a race with recvmsg, that resulted in false positives
and hence data loss. The old algorithm operated on all unix sockets
in the system, so any additional locking would have meant performance
problems for all users of these.
The new algorithm instead only operates on "in flight" sockets, which
are very rare, and the additional locking for these doesn't negatively
impact the vast majority of users.
In fact it's probable, that there weren't *any* heavy senders of
sockets over sockets, otherwise the above race would have been
discovered long ago.
The patch works OK with the app that exposed the race with the old
code. The garbage collection has also been verified to work in a few
simple cases.
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static struct sock * unix_create1(struct socket *sock)
{
struct sock *sk = NULL;
struct unix_sock *u;
if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
goto out;
sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1);
if (!sk)
goto out;
atomic_inc(&unix_nr_socks);
sock_init_data(sock,sk);
lockdep_set_class(&sk->sk_receive_queue.lock,
&af_unix_sk_receive_queue_lock_key);
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk);
u->dentry = NULL;
u->mnt = NULL;
spin_lock_init(&u->lock);
atomic_set(&u->inflight, sock ? 0 : -1);
mutex_init(&u->readlock); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);
unix_insert_socket(unix_sockets_unbound, sk);
out:
return sk;
} | 1 | [] | linux-2.6 | 1fd05ba5a2f2aa8e7b9b52ef55df850e2e7d54c9 | 220,022,929,393,850,460,000,000,000,000,000,000,000 | 32 | [AF_UNIX]: Rewrite garbage collector, fixes race.
Throw out the old mark & sweep garbage collector and put in a
refcounting cycle detecting one.
The old one had a race with recvmsg, that resulted in false positives
and hence data loss. The old algorithm operated on all unix sockets
in the system, so any additional locking would have meant performance
problems for all users of these.
The new algorithm instead only operates on "in flight" sockets, which
are very rare, and the additional locking for these doesn't negatively
impact the vast majority of users.
In fact it's probable, that there weren't *any* heavy senders of
sockets over sockets, otherwise the above race would have been
discovered long ago.
The patch works OK with the app that exposed the race with the old
code. The garbage collection has also been verified to work in a few
simple cases.
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void unix_notinflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
if(s) {
atomic_dec(&unix_sk(s)->inflight);
atomic_dec(&unix_tot_inflight);
}
} | 1 | [] | linux-2.6 | 1fd05ba5a2f2aa8e7b9b52ef55df850e2e7d54c9 | 282,904,169,275,275,830,000,000,000,000,000,000,000 | 8 | [AF_UNIX]: Rewrite garbage collector, fixes race.
Throw out the old mark & sweep garbage collector and put in a
refcounting cycle detecting one.
The old one had a race with recvmsg, that resulted in false positives
and hence data loss. The old algorithm operated on all unix sockets
in the system, so any additional locking would have meant performance
problems for all users of these.
The new algorithm instead only operates on "in flight" sockets, which
are very rare, and the additional locking for these doesn't negatively
impact the vast majority of users.
In fact it's probable, that there weren't *any* heavy senders of
sockets over sockets, otherwise the above race would have been
discovered long ago.
The patch works OK with the app that exposed the race with the old
code. The garbage collection has also been verified to work in a few
simple cases.
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int uvc_parse_format(struct uvc_device *dev,
struct uvc_streaming *streaming, struct uvc_format *format,
__u32 **intervals, unsigned char *buffer, int buflen)
{
struct usb_interface *intf = streaming->intf;
struct usb_host_interface *alts = intf->cur_altsetting;
struct uvc_format_desc *fmtdesc;
struct uvc_frame *frame;
const unsigned char *start = buffer;
unsigned int interval;
unsigned int i, n;
__u8 ftype;
format->type = buffer[2];
format->index = buffer[3];
switch (buffer[2]) {
case VS_FORMAT_UNCOMPRESSED:
case VS_FORMAT_FRAME_BASED:
if (buflen < 27) {
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
"interface %d FORMAT error\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber);
return -EINVAL;
}
/* Find the format descriptor from its GUID. */
fmtdesc = uvc_format_by_guid(&buffer[5]);
if (fmtdesc != NULL) {
strncpy(format->name, fmtdesc->name,
sizeof format->name);
format->fcc = fmtdesc->fcc;
} else {
uvc_printk(KERN_INFO, "Unknown video format "
UVC_GUID_FORMAT "\n",
UVC_GUID_ARGS(&buffer[5]));
snprintf(format->name, sizeof format->name,
UVC_GUID_FORMAT, UVC_GUID_ARGS(&buffer[5]));
format->fcc = 0;
}
format->bpp = buffer[21];
if (buffer[2] == VS_FORMAT_UNCOMPRESSED) {
ftype = VS_FRAME_UNCOMPRESSED;
} else {
ftype = VS_FRAME_FRAME_BASED;
if (buffer[27])
format->flags = UVC_FMT_FLAG_COMPRESSED;
}
break;
case VS_FORMAT_MJPEG:
if (buflen < 11) {
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
"interface %d FORMAT error\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber);
return -EINVAL;
}
strncpy(format->name, "MJPEG", sizeof format->name);
format->fcc = V4L2_PIX_FMT_MJPEG;
format->flags = UVC_FMT_FLAG_COMPRESSED;
format->bpp = 0;
ftype = VS_FRAME_MJPEG;
break;
case VS_FORMAT_DV:
if (buflen < 9) {
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
"interface %d FORMAT error\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber);
return -EINVAL;
}
switch (buffer[8] & 0x7f) {
case 0:
strncpy(format->name, "SD-DV", sizeof format->name);
break;
case 1:
strncpy(format->name, "SDL-DV", sizeof format->name);
break;
case 2:
strncpy(format->name, "HD-DV", sizeof format->name);
break;
default:
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
"interface %d: unknown DV format %u\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber, buffer[8]);
return -EINVAL;
}
strncat(format->name, buffer[8] & (1 << 7) ? " 60Hz" : " 50Hz",
sizeof format->name);
format->fcc = V4L2_PIX_FMT_DV;
format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM;
format->bpp = 0;
ftype = 0;
/* Create a dummy frame descriptor. */
frame = &format->frame[0];
memset(&format->frame[0], 0, sizeof format->frame[0]);
frame->bFrameIntervalType = 1;
frame->dwDefaultFrameInterval = 1;
frame->dwFrameInterval = *intervals;
*(*intervals)++ = 1;
format->nframes = 1;
break;
case VS_FORMAT_MPEG2TS:
case VS_FORMAT_STREAM_BASED:
/* Not supported yet. */
default:
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
"interface %d unsupported format %u\n",
dev->udev->devnum, alts->desc.bInterfaceNumber,
buffer[2]);
return -EINVAL;
}
uvc_trace(UVC_TRACE_DESCR, "Found format %s.\n", format->name);
buflen -= buffer[0];
buffer += buffer[0];
/* Parse the frame descriptors. Only uncompressed, MJPEG and frame
* based formats have frame descriptors.
*/
while (buflen > 2 && buffer[2] == ftype) {
frame = &format->frame[format->nframes];
if (ftype != VS_FRAME_FRAME_BASED)
n = buflen > 25 ? buffer[25] : 0;
else
n = buflen > 21 ? buffer[21] : 0;
n = n ? n : 3;
if (buflen < 26 + 4*n) {
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
"interface %d FRAME error\n", dev->udev->devnum,
alts->desc.bInterfaceNumber);
return -EINVAL;
}
frame->bFrameIndex = buffer[3];
frame->bmCapabilities = buffer[4];
frame->wWidth = le16_to_cpup((__le16 *)&buffer[5]);
frame->wHeight = le16_to_cpup((__le16 *)&buffer[7]);
frame->dwMinBitRate = le32_to_cpup((__le32 *)&buffer[9]);
frame->dwMaxBitRate = le32_to_cpup((__le32 *)&buffer[13]);
if (ftype != VS_FRAME_FRAME_BASED) {
frame->dwMaxVideoFrameBufferSize =
le32_to_cpup((__le32 *)&buffer[17]);
frame->dwDefaultFrameInterval =
le32_to_cpup((__le32 *)&buffer[21]);
frame->bFrameIntervalType = buffer[25];
} else {
frame->dwMaxVideoFrameBufferSize = 0;
frame->dwDefaultFrameInterval =
le32_to_cpup((__le32 *)&buffer[17]);
frame->bFrameIntervalType = buffer[21];
}
frame->dwFrameInterval = *intervals;
/* Several UVC chipsets screw up dwMaxVideoFrameBufferSize
* completely. Observed behaviours range from setting the
* value to 1.1x the actual frame size of hardwiring the
* 16 low bits to 0. This results in a higher than necessary
* memory usage as well as a wrong image size information. For
* uncompressed formats this can be fixed by computing the
* value from the frame size.
*/
if (!(format->flags & UVC_FMT_FLAG_COMPRESSED))
frame->dwMaxVideoFrameBufferSize = format->bpp
* frame->wWidth * frame->wHeight / 8;
/* Some bogus devices report dwMinFrameInterval equal to
* dwMaxFrameInterval and have dwFrameIntervalStep set to
* zero. Setting all null intervals to 1 fixes the problem and
* some other divisions by zero which could happen.
*/
for (i = 0; i < n; ++i) {
interval = le32_to_cpup((__le32 *)&buffer[26+4*i]);
*(*intervals)++ = interval ? interval : 1;
}
/* Make sure that the default frame interval stays between
* the boundaries.
*/
n -= frame->bFrameIntervalType ? 1 : 2;
frame->dwDefaultFrameInterval =
min(frame->dwFrameInterval[n],
max(frame->dwFrameInterval[0],
frame->dwDefaultFrameInterval));
uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n",
frame->wWidth, frame->wHeight,
10000000/frame->dwDefaultFrameInterval,
(100000000/frame->dwDefaultFrameInterval)%10);
format->nframes++;
buflen -= buffer[0];
buffer += buffer[0];
}
if (buflen > 2 && buffer[2] == VS_STILL_IMAGE_FRAME) {
buflen -= buffer[0];
buffer += buffer[0];
}
if (buflen > 2 && buffer[2] == VS_COLORFORMAT) {
if (buflen < 6) {
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
"interface %d COLORFORMAT error\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber);
return -EINVAL;
}
format->colorspace = uvc_colorspace(buffer[3]);
buflen -= buffer[0];
buffer += buffer[0];
}
return buffer - start;
} | 1 | [
"CWE-120"
] | linux-2.6 | 233548a2fd934a0220db8b1521c0bc88c82e5e53 | 292,611,018,362,037,460,000,000,000,000,000,000,000 | 233 | V4L/DVB (8207): uvcvideo: Fix a buffer overflow in format descriptor parsing
Thanks to Oliver Neukum for catching and reporting this bug.
Signed-off-by: Laurent Pinchart <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static int get_file_caps(struct linux_binprm *bprm)
{
struct dentry *dentry;
int rc = 0;
struct vfs_cap_data vcaps;
struct inode *inode;
if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID) {
bprm_clear_caps(bprm);
return 0;
}
dentry = dget(bprm->file->f_dentry);
inode = dentry->d_inode;
if (!inode->i_op || !inode->i_op->getxattr)
goto out;
rc = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS, &vcaps,
XATTR_CAPS_SZ);
if (rc == -ENODATA || rc == -EOPNOTSUPP) {
/* no data, that's ok */
rc = 0;
goto out;
}
if (rc < 0)
goto out;
rc = cap_from_disk(&vcaps, bprm, rc);
if (rc == -EINVAL)
printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
__func__, rc, bprm->filename);
out:
dput(dentry);
if (rc)
bprm_clear_caps(bprm);
return rc;
} | 1 | [] | linux-2.6 | 3318a386e4ca68c76e0294363d29bdc46fcad670 | 27,356,392,919,344,324,000,000,000,000,000,000,000 | 39 | file caps: always start with clear bprm->caps_*
While Linux doesn't honor setuid on scripts. However, it mistakenly
behaves differently for file capabilities.
This patch fixes that behavior by making sure that get_file_caps()
begins with empty bprm->caps_*. That way when a script is loaded,
its bprm->caps_* may be filled when binfmt_misc calls prepare_binprm(),
but they will be cleared again when binfmt_elf calls prepare_binprm()
next to read the interpreter's file capabilities.
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: David Howells <[email protected]>
Acked-by: Andrew G. Morgan <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static inline int get_file_caps(struct linux_binprm *bprm)
{
bprm_clear_caps(bprm);
return 0;
} | 1 | [] | linux-2.6 | 3318a386e4ca68c76e0294363d29bdc46fcad670 | 183,133,645,747,739,020,000,000,000,000,000,000,000 | 5 | file caps: always start with clear bprm->caps_*
While Linux doesn't honor setuid on scripts. However, it mistakenly
behaves differently for file capabilities.
This patch fixes that behavior by making sure that get_file_caps()
begins with empty bprm->caps_*. That way when a script is loaded,
its bprm->caps_* may be filled when binfmt_misc calls prepare_binprm(),
but they will be cleared again when binfmt_elf calls prepare_binprm()
next to read the interpreter's file capabilities.
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: David Howells <[email protected]>
Acked-by: Andrew G. Morgan <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int lbs_process_bss(struct bss_descriptor *bss,
uint8_t **pbeaconinfo, int *bytesleft)
{
struct ieeetypes_fhparamset *pFH;
struct ieeetypes_dsparamset *pDS;
struct ieeetypes_cfparamset *pCF;
struct ieeetypes_ibssparamset *pibss;
DECLARE_MAC_BUF(mac);
struct ieeetypes_countryinfoset *pcountryinfo;
uint8_t *pos, *end, *p;
uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
uint16_t beaconsize = 0;
int ret;
lbs_deb_enter(LBS_DEB_SCAN);
if (*bytesleft >= sizeof(beaconsize)) {
/* Extract & convert beacon size from the command buffer */
beaconsize = get_unaligned_le16(*pbeaconinfo);
*bytesleft -= sizeof(beaconsize);
*pbeaconinfo += sizeof(beaconsize);
}
if (beaconsize == 0 || beaconsize > *bytesleft) {
*pbeaconinfo += *bytesleft;
*bytesleft = 0;
ret = -1;
goto done;
}
/* Initialize the current working beacon pointer for this BSS iteration */
pos = *pbeaconinfo;
end = pos + beaconsize;
/* Advance the return beacon pointer past the current beacon */
*pbeaconinfo += beaconsize;
*bytesleft -= beaconsize;
memcpy(bss->bssid, pos, ETH_ALEN);
lbs_deb_scan("process_bss: BSSID %s\n", print_mac(mac, bss->bssid));
pos += ETH_ALEN;
if ((end - pos) < 12) {
lbs_deb_scan("process_bss: Not enough bytes left\n");
ret = -1;
goto done;
}
/*
* next 4 fields are RSSI, time stamp, beacon interval,
* and capability information
*/
/* RSSI is 1 byte long */
bss->rssi = *pos;
lbs_deb_scan("process_bss: RSSI %d\n", *pos);
pos++;
/* time stamp is 8 bytes long */
pos += 8;
/* beacon interval is 2 bytes long */
bss->beaconperiod = get_unaligned_le16(pos);
pos += 2;
/* capability information is 2 bytes long */
bss->capability = get_unaligned_le16(pos);
lbs_deb_scan("process_bss: capabilities 0x%04x\n", bss->capability);
pos += 2;
if (bss->capability & WLAN_CAPABILITY_PRIVACY)
lbs_deb_scan("process_bss: WEP enabled\n");
if (bss->capability & WLAN_CAPABILITY_IBSS)
bss->mode = IW_MODE_ADHOC;
else
bss->mode = IW_MODE_INFRA;
/* rest of the current buffer are IE's */
lbs_deb_scan("process_bss: IE len %zd\n", end - pos);
lbs_deb_hex(LBS_DEB_SCAN, "process_bss: IE info", pos, end - pos);
/* process variable IE */
while (pos <= end - 2) {
struct ieee80211_info_element * elem = (void *)pos;
if (pos + elem->len > end) {
lbs_deb_scan("process_bss: error in processing IE, "
"bytes left < IE length\n");
break;
}
switch (elem->id) {
case MFIE_TYPE_SSID:
bss->ssid_len = elem->len;
memcpy(bss->ssid, elem->data, elem->len);
lbs_deb_scan("got SSID IE: '%s', len %u\n",
escape_essid(bss->ssid, bss->ssid_len),
bss->ssid_len);
break;
case MFIE_TYPE_RATES:
n_basic_rates = min_t(uint8_t, MAX_RATES, elem->len);
memcpy(bss->rates, elem->data, n_basic_rates);
got_basic_rates = 1;
lbs_deb_scan("got RATES IE\n");
break;
case MFIE_TYPE_FH_SET:
pFH = (struct ieeetypes_fhparamset *) pos;
memmove(&bss->phyparamset.fhparamset, pFH,
sizeof(struct ieeetypes_fhparamset));
lbs_deb_scan("got FH IE\n");
break;
case MFIE_TYPE_DS_SET:
pDS = (struct ieeetypes_dsparamset *) pos;
bss->channel = pDS->currentchan;
memcpy(&bss->phyparamset.dsparamset, pDS,
sizeof(struct ieeetypes_dsparamset));
lbs_deb_scan("got DS IE, channel %d\n", bss->channel);
break;
case MFIE_TYPE_CF_SET:
pCF = (struct ieeetypes_cfparamset *) pos;
memcpy(&bss->ssparamset.cfparamset, pCF,
sizeof(struct ieeetypes_cfparamset));
lbs_deb_scan("got CF IE\n");
break;
case MFIE_TYPE_IBSS_SET:
pibss = (struct ieeetypes_ibssparamset *) pos;
bss->atimwindow = le16_to_cpu(pibss->atimwindow);
memmove(&bss->ssparamset.ibssparamset, pibss,
sizeof(struct ieeetypes_ibssparamset));
lbs_deb_scan("got IBSS IE\n");
break;
case MFIE_TYPE_COUNTRY:
pcountryinfo = (struct ieeetypes_countryinfoset *) pos;
lbs_deb_scan("got COUNTRY IE\n");
if (pcountryinfo->len < sizeof(pcountryinfo->countrycode)
|| pcountryinfo->len > 254) {
lbs_deb_scan("process_bss: 11D- Err CountryInfo len %d, min %zd, max 254\n",
pcountryinfo->len, sizeof(pcountryinfo->countrycode));
ret = -1;
goto done;
}
memcpy(&bss->countryinfo, pcountryinfo, pcountryinfo->len + 2);
lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo",
(uint8_t *) pcountryinfo,
(int) (pcountryinfo->len + 2));
break;
case MFIE_TYPE_RATES_EX:
/* only process extended supported rate if data rate is
* already found. Data rate IE should come before
* extended supported rate IE
*/
lbs_deb_scan("got RATESEX IE\n");
if (!got_basic_rates) {
lbs_deb_scan("... but ignoring it\n");
break;
}
n_ex_rates = elem->len;
if (n_basic_rates + n_ex_rates > MAX_RATES)
n_ex_rates = MAX_RATES - n_basic_rates;
p = bss->rates + n_basic_rates;
memcpy(p, elem->data, n_ex_rates);
break;
case MFIE_TYPE_GENERIC:
if (elem->len >= 4 &&
elem->data[0] == 0x00 && elem->data[1] == 0x50 &&
elem->data[2] == 0xf2 && elem->data[3] == 0x01) {
bss->wpa_ie_len = min(elem->len + 2, MAX_WPA_IE_LEN);
memcpy(bss->wpa_ie, elem, bss->wpa_ie_len);
lbs_deb_scan("got WPA IE\n");
lbs_deb_hex(LBS_DEB_SCAN, "WPA IE", bss->wpa_ie, elem->len);
} else if (elem->len >= MARVELL_MESH_IE_LENGTH &&
elem->data[0] == 0x00 && elem->data[1] == 0x50 &&
elem->data[2] == 0x43 && elem->data[3] == 0x04) {
lbs_deb_scan("got mesh IE\n");
bss->mesh = 1;
} else {
lbs_deb_scan("got generic IE: %02x:%02x:%02x:%02x, len %d\n",
elem->data[0], elem->data[1],
elem->data[2], elem->data[3],
elem->len);
}
break;
case MFIE_TYPE_RSN:
lbs_deb_scan("got RSN IE\n");
bss->rsn_ie_len = min(elem->len + 2, MAX_WPA_IE_LEN);
memcpy(bss->rsn_ie, elem, bss->rsn_ie_len);
lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE",
bss->rsn_ie, elem->len);
break;
default:
lbs_deb_scan("got IE 0x%04x, len %d\n",
elem->id, elem->len);
break;
}
pos += elem->len + 2;
}
/* Timestamp */
bss->last_scanned = jiffies;
lbs_unset_basic_rate_flags(bss->rates, sizeof(bss->rates));
ret = 0;
done:
lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
return ret;
} | 1 | [
"CWE-119"
] | linux-2.6 | 48735d8d8bd701b1e0cd3d49c21e5e385ddcb077 | 41,974,862,988,677,174,000,000,000,000,000,000,000 | 221 | libertas: fix buffer overrun
If somebody sends an invalid beacon/probe response, that can trash the
whole BSS descriptor. The descriptor is, luckily, large enough so that
it cannot scribble past the end of it; it's well above 400 bytes long.
Signed-off-by: Johannes Berg <[email protected]>
Cc: [email protected] [2.6.24-2.6.27, bug present in some form since driver was added (2.6.22)]
Signed-off-by: John W. Linville <[email protected]> |
static int tvaudio_get_ctrl(struct CHIPSTATE *chip,
struct v4l2_control *ctrl)
{
struct CHIPDESC *desc = chip->desc;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
ctrl->value=chip->muted;
return 0;
case V4L2_CID_AUDIO_VOLUME:
if (!(desc->flags & CHIP_HAS_VOLUME))
break;
ctrl->value = max(chip->left,chip->right);
return 0;
case V4L2_CID_AUDIO_BALANCE:
{
int volume;
if (!(desc->flags & CHIP_HAS_VOLUME))
break;
volume = max(chip->left,chip->right);
if (volume)
ctrl->value=(32768*min(chip->left,chip->right))/volume;
else
ctrl->value=32768;
return 0;
}
case V4L2_CID_AUDIO_BASS:
if (desc->flags & CHIP_HAS_BASSTREBLE)
break;
ctrl->value = chip->bass;
return 0;
case V4L2_CID_AUDIO_TREBLE:
if (desc->flags & CHIP_HAS_BASSTREBLE)
return -EINVAL;
ctrl->value = chip->treble;
return 0;
}
return -EINVAL;
} | 1 | [
"CWE-399"
] | linux-2.6 | 01a1a3cc1e3fbe718bd06a2a5d4d1a2d0fb4d7d9 | 306,448,464,959,738,200,000,000,000,000,000,000,000 | 39 | V4L/DVB (9624): CVE-2008-5033: fix OOPS on tvaudio when controlling bass/treble
This bug were supposed to be fixed by 5ba2f67afb02c5302b2898949ed6fc3b3d37dcf1,
where a call to NULL happens.
Not all tvaudio chips allow controlling bass/treble. So, the driver
has a table with a flag to indicate if the chip does support it.
Unfortunately, the handling of this logic were broken for a very long
time (probably since the first module version). Due to that, an OOPS
were generated for devices that don't support bass/treble.
This were the resulting OOPS message before the patch, with debug messages
enabled:
tvaudio' 1-005b: VIDIOC_S_CTRL
BUG: unable to handle kernel NULL pointer dereference at 00000000
IP: [<00000000>]
*pde = 22fda067 *pte = 00000000
Oops: 0000 [#1] SMP
Modules linked in: snd_hda_intel snd_seq_dummy snd_seq_oss snd_seq_midi_event snd_seq snd_seq_device
snd_pcm_oss snd_mixer_oss snd_pcm snd_timer snd_hwdep snd soundcore tuner_simple tuner_types tea5767 tuner
tvaudio bttv bridgebnep rfcomm l2cap bluetooth it87 hwmon_vid hwmon fuse sunrpc ipt_REJECT
nf_conntrack_ipv4 iptable_filter ip_tables ip6t_REJECT xt_tcpudp nf_conntrack_ipv6 xt_state nf_conntrack
ip6table_filter ip6_tables x_tables ipv6 dm_mirrordm_multipath dm_mod configfs videodev v4l1_compat
ir_common 8139cp compat_ioctl32 v4l2_common 8139too videobuf_dma_sg videobuf_core mii btcx_risc tveeprom
i915 button snd_page_alloc serio_raw drm pcspkr i2c_algo_bit i2c_i801 i2c_core iTCO_wdt
iTCO_vendor_support sr_mod cdrom sg ata_generic pata_acpi ata_piix libata sd_mod scsi_mod ext3 jbdmbcache
uhci_hcd ohci_hcd ehci_hcd [last unloaded: soundcore]
Pid: 15413, comm: qv4l2 Not tainted (2.6.25.14-108.fc9.i686 #1)
EIP: 0060:[<00000000>] EFLAGS: 00210246 CPU: 0
EIP is at 0x0
EAX: 00008000 EBX: ebd21600 ECX: e2fd9ec4 EDX: 00200046
ESI: f8c0f0c4 EDI: f8c0f0c4 EBP: e2fd9d50 ESP: e2fd9d2c
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
Process qv4l2 (pid: 15413, ti=e2fd9000 task=ebe44000 task.ti=e2fd9000)
Stack: f8c0c6ae e2ff2a00 00000d00 e2fd9ec4 ebc4e000 e2fd9d5c f8c0c448 00000000
f899c12a e2fd9d5c f899c154 e2fd9d68 e2fd9d80 c0560185 e2fd9d88 f8f3e1d8
f8f3e1dc ebc4e034 f8f3e18c e2fd9ec4 00000000 e2fd9d90 f899c286 c008561c
Call Trace:
[<f8c0c6ae>] ? chip_command+0x266/0x4b6 [tvaudio]
[<f8c0c448>] ? chip_command+0x0/0x4b6 [tvaudio]
[<f899c12a>] ? i2c_cmd+0x0/0x2f [i2c_core]
[<f899c154>] ? i2c_cmd+0x2a/0x2f [i2c_core]
[<c0560185>] ? device_for_each_child+0x21/0x49
[<f899c286>] ? i2c_clients_command+0x1c/0x1e [i2c_core]
[<f8f283d8>] ? bttv_call_i2c_clients+0x14/0x16 [bttv]
[<f8f23601>] ? bttv_s_ctrl+0x1bc/0x313 [bttv]
[<f8f23445>] ? bttv_s_ctrl+0x0/0x313 [bttv]
[<f8b6096d>] ? __video_do_ioctl+0x1f84/0x3726 [videodev]
[<c05abb4e>] ? sock_aio_write+0x100/0x10d
[<c041b23e>] ? kmap_atomic_prot+0x1dd/0x1df
[<c043a0c9>] ? enqueue_hrtimer+0xc2/0xcd
[<c04f4fa4>] ? copy_from_user+0x39/0x121
[<f8b622b9>] ? __video_ioctl2+0x1aa/0x24a [videodev]
[<c04054fd>] ? do_notify_resume+0x768/0x795
[<c043c0f7>] ? getnstimeofday+0x34/0xd1
[<c0437b77>] ? autoremove_wake_function+0x0/0x33
[<f8b62368>] ? video_ioctl2+0xf/0x13 [videodev]
[<c048c6f0>] ? vfs_ioctl+0x50/0x69
[<c048c942>] ? do_vfs_ioctl+0x239/0x24c
[<c048c995>] ? sys_ioctl+0x40/0x5b
[<c0405bf2>] ? syscall_call+0x7/0xb
[<c0620000>] ? cpuid4_cache_sysfs_exit+0x3d/0x69
=======================
Code: Bad EIP value.
EIP: [<00000000>] 0x0 SS:ESP 0068:e2fd9d2c
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static int chip_command(struct i2c_client *client,
unsigned int cmd, void *arg)
{
struct CHIPSTATE *chip = i2c_get_clientdata(client);
struct CHIPDESC *desc = chip->desc;
if (debug > 0) {
v4l_i2c_print_ioctl(chip->c, cmd);
printk("\n");
}
switch (cmd) {
case AUDC_SET_RADIO:
chip->radio = 1;
chip->watch_stereo = 0;
/* del_timer(&chip->wt); */
break;
/* --- v4l ioctls --- */
/* take care: bttv does userspace copying, we'll get a
kernel pointer here... */
case VIDIOC_QUERYCTRL:
{
struct v4l2_queryctrl *qc = arg;
switch (qc->id) {
case V4L2_CID_AUDIO_MUTE:
break;
case V4L2_CID_AUDIO_VOLUME:
case V4L2_CID_AUDIO_BALANCE:
if (!(desc->flags & CHIP_HAS_VOLUME))
return -EINVAL;
break;
case V4L2_CID_AUDIO_BASS:
case V4L2_CID_AUDIO_TREBLE:
if (desc->flags & CHIP_HAS_BASSTREBLE)
return -EINVAL;
break;
default:
return -EINVAL;
}
return v4l2_ctrl_query_fill_std(qc);
}
case VIDIOC_S_CTRL:
return tvaudio_set_ctrl(chip, arg);
case VIDIOC_G_CTRL:
return tvaudio_get_ctrl(chip, arg);
case VIDIOC_INT_G_AUDIO_ROUTING:
{
struct v4l2_routing *rt = arg;
rt->input = chip->input;
rt->output = 0;
break;
}
case VIDIOC_INT_S_AUDIO_ROUTING:
{
struct v4l2_routing *rt = arg;
if (!(desc->flags & CHIP_HAS_INPUTSEL) || rt->input >= 4)
return -EINVAL;
/* There are four inputs: tuner, radio, extern and intern. */
chip->input = rt->input;
if (chip->muted)
break;
chip_write_masked(chip, desc->inputreg,
desc->inputmap[chip->input], desc->inputmask);
break;
}
case VIDIOC_S_TUNER:
{
struct v4l2_tuner *vt = arg;
int mode = 0;
if (chip->radio)
break;
switch (vt->audmode) {
case V4L2_TUNER_MODE_MONO:
case V4L2_TUNER_MODE_STEREO:
case V4L2_TUNER_MODE_LANG1:
case V4L2_TUNER_MODE_LANG2:
mode = vt->audmode;
break;
case V4L2_TUNER_MODE_LANG1_LANG2:
mode = V4L2_TUNER_MODE_STEREO;
break;
default:
return -EINVAL;
}
chip->audmode = vt->audmode;
if (desc->setmode && mode) {
chip->watch_stereo = 0;
/* del_timer(&chip->wt); */
chip->mode = mode;
desc->setmode(chip, mode);
}
break;
}
case VIDIOC_G_TUNER:
{
struct v4l2_tuner *vt = arg;
int mode = V4L2_TUNER_MODE_MONO;
if (chip->radio)
break;
vt->audmode = chip->audmode;
vt->rxsubchans = 0;
vt->capability = V4L2_TUNER_CAP_STEREO |
V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
if (desc->getmode)
mode = desc->getmode(chip);
if (mode & V4L2_TUNER_MODE_MONO)
vt->rxsubchans |= V4L2_TUNER_SUB_MONO;
if (mode & V4L2_TUNER_MODE_STEREO)
vt->rxsubchans |= V4L2_TUNER_SUB_STEREO;
/* Note: for SAP it should be mono/lang2 or stereo/lang2.
When this module is converted fully to v4l2, then this
should change for those chips that can detect SAP. */
if (mode & V4L2_TUNER_MODE_LANG1)
vt->rxsubchans = V4L2_TUNER_SUB_LANG1 |
V4L2_TUNER_SUB_LANG2;
break;
}
case VIDIOC_S_STD:
chip->radio = 0;
break;
case VIDIOC_S_FREQUENCY:
chip->mode = 0; /* automatic */
/* For chips that provide getmode and setmode, and doesn't
automatically follows the stereo carrier, a kthread is
created to set the audio standard. In this case, when then
the video channel is changed, tvaudio starts on MONO mode.
After waiting for 2 seconds, the kernel thread is called,
to follow whatever audio standard is pointed by the
audio carrier.
*/
if (chip->thread) {
desc->setmode(chip,V4L2_TUNER_MODE_MONO);
if (chip->prevmode != V4L2_TUNER_MODE_MONO)
chip->prevmode = -1; /* reset previous mode */
mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000));
}
break;
case VIDIOC_G_CHIP_IDENT:
return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_TVAUDIO, 0);
}
return 0;
} | 1 | [
"CWE-399"
] | linux-2.6 | 01a1a3cc1e3fbe718bd06a2a5d4d1a2d0fb4d7d9 | 306,961,254,196,788,300,000,000,000,000,000,000,000 | 153 | V4L/DVB (9624): CVE-2008-5033: fix OOPS on tvaudio when controlling bass/treble
This bug were supposed to be fixed by 5ba2f67afb02c5302b2898949ed6fc3b3d37dcf1,
where a call to NULL happens.
Not all tvaudio chips allow controlling bass/treble. So, the driver
has a table with a flag to indicate if the chip does support it.
Unfortunately, the handling of this logic were broken for a very long
time (probably since the first module version). Due to that, an OOPS
were generated for devices that don't support bass/treble.
This were the resulting OOPS message before the patch, with debug messages
enabled:
tvaudio' 1-005b: VIDIOC_S_CTRL
BUG: unable to handle kernel NULL pointer dereference at 00000000
IP: [<00000000>]
*pde = 22fda067 *pte = 00000000
Oops: 0000 [#1] SMP
Modules linked in: snd_hda_intel snd_seq_dummy snd_seq_oss snd_seq_midi_event snd_seq snd_seq_device
snd_pcm_oss snd_mixer_oss snd_pcm snd_timer snd_hwdep snd soundcore tuner_simple tuner_types tea5767 tuner
tvaudio bttv bridgebnep rfcomm l2cap bluetooth it87 hwmon_vid hwmon fuse sunrpc ipt_REJECT
nf_conntrack_ipv4 iptable_filter ip_tables ip6t_REJECT xt_tcpudp nf_conntrack_ipv6 xt_state nf_conntrack
ip6table_filter ip6_tables x_tables ipv6 dm_mirrordm_multipath dm_mod configfs videodev v4l1_compat
ir_common 8139cp compat_ioctl32 v4l2_common 8139too videobuf_dma_sg videobuf_core mii btcx_risc tveeprom
i915 button snd_page_alloc serio_raw drm pcspkr i2c_algo_bit i2c_i801 i2c_core iTCO_wdt
iTCO_vendor_support sr_mod cdrom sg ata_generic pata_acpi ata_piix libata sd_mod scsi_mod ext3 jbdmbcache
uhci_hcd ohci_hcd ehci_hcd [last unloaded: soundcore]
Pid: 15413, comm: qv4l2 Not tainted (2.6.25.14-108.fc9.i686 #1)
EIP: 0060:[<00000000>] EFLAGS: 00210246 CPU: 0
EIP is at 0x0
EAX: 00008000 EBX: ebd21600 ECX: e2fd9ec4 EDX: 00200046
ESI: f8c0f0c4 EDI: f8c0f0c4 EBP: e2fd9d50 ESP: e2fd9d2c
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
Process qv4l2 (pid: 15413, ti=e2fd9000 task=ebe44000 task.ti=e2fd9000)
Stack: f8c0c6ae e2ff2a00 00000d00 e2fd9ec4 ebc4e000 e2fd9d5c f8c0c448 00000000
f899c12a e2fd9d5c f899c154 e2fd9d68 e2fd9d80 c0560185 e2fd9d88 f8f3e1d8
f8f3e1dc ebc4e034 f8f3e18c e2fd9ec4 00000000 e2fd9d90 f899c286 c008561c
Call Trace:
[<f8c0c6ae>] ? chip_command+0x266/0x4b6 [tvaudio]
[<f8c0c448>] ? chip_command+0x0/0x4b6 [tvaudio]
[<f899c12a>] ? i2c_cmd+0x0/0x2f [i2c_core]
[<f899c154>] ? i2c_cmd+0x2a/0x2f [i2c_core]
[<c0560185>] ? device_for_each_child+0x21/0x49
[<f899c286>] ? i2c_clients_command+0x1c/0x1e [i2c_core]
[<f8f283d8>] ? bttv_call_i2c_clients+0x14/0x16 [bttv]
[<f8f23601>] ? bttv_s_ctrl+0x1bc/0x313 [bttv]
[<f8f23445>] ? bttv_s_ctrl+0x0/0x313 [bttv]
[<f8b6096d>] ? __video_do_ioctl+0x1f84/0x3726 [videodev]
[<c05abb4e>] ? sock_aio_write+0x100/0x10d
[<c041b23e>] ? kmap_atomic_prot+0x1dd/0x1df
[<c043a0c9>] ? enqueue_hrtimer+0xc2/0xcd
[<c04f4fa4>] ? copy_from_user+0x39/0x121
[<f8b622b9>] ? __video_ioctl2+0x1aa/0x24a [videodev]
[<c04054fd>] ? do_notify_resume+0x768/0x795
[<c043c0f7>] ? getnstimeofday+0x34/0xd1
[<c0437b77>] ? autoremove_wake_function+0x0/0x33
[<f8b62368>] ? video_ioctl2+0xf/0x13 [videodev]
[<c048c6f0>] ? vfs_ioctl+0x50/0x69
[<c048c942>] ? do_vfs_ioctl+0x239/0x24c
[<c048c995>] ? sys_ioctl+0x40/0x5b
[<c0405bf2>] ? syscall_call+0x7/0xb
[<c0620000>] ? cpuid4_cache_sysfs_exit+0x3d/0x69
=======================
Code: Bad EIP value.
EIP: [<00000000>] 0x0 SS:ESP 0068:e2fd9d2c
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static int tvaudio_set_ctrl(struct CHIPSTATE *chip,
struct v4l2_control *ctrl)
{
struct CHIPDESC *desc = chip->desc;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
if (ctrl->value < 0 || ctrl->value >= 2)
return -ERANGE;
chip->muted = ctrl->value;
if (chip->muted)
chip_write_masked(chip,desc->inputreg,desc->inputmute,desc->inputmask);
else
chip_write_masked(chip,desc->inputreg,
desc->inputmap[chip->input],desc->inputmask);
return 0;
case V4L2_CID_AUDIO_VOLUME:
{
int volume,balance;
if (!(desc->flags & CHIP_HAS_VOLUME))
break;
volume = max(chip->left,chip->right);
if (volume)
balance=(32768*min(chip->left,chip->right))/volume;
else
balance=32768;
volume=ctrl->value;
chip->left = (min(65536 - balance,32768) * volume) / 32768;
chip->right = (min(balance,volume *(__u16)32768)) / 32768;
chip_write(chip,desc->leftreg,desc->volfunc(chip->left));
chip_write(chip,desc->rightreg,desc->volfunc(chip->right));
return 0;
}
case V4L2_CID_AUDIO_BALANCE:
{
int volume, balance;
if (!(desc->flags & CHIP_HAS_VOLUME))
break;
volume = max(chip->left,chip->right);
balance = ctrl->value;
chip_write(chip,desc->leftreg,desc->volfunc(chip->left));
chip_write(chip,desc->rightreg,desc->volfunc(chip->right));
return 0;
}
case V4L2_CID_AUDIO_BASS:
if (desc->flags & CHIP_HAS_BASSTREBLE)
break;
chip->bass = ctrl->value;
chip_write(chip,desc->bassreg,desc->bassfunc(chip->bass));
return 0;
case V4L2_CID_AUDIO_TREBLE:
if (desc->flags & CHIP_HAS_BASSTREBLE)
return -EINVAL;
chip->treble = ctrl->value;
chip_write(chip,desc->treblereg,desc->treblefunc(chip->treble));
return 0;
}
return -EINVAL;
} | 1 | [
"CWE-399"
] | linux-2.6 | 01a1a3cc1e3fbe718bd06a2a5d4d1a2d0fb4d7d9 | 189,088,603,958,465,900,000,000,000,000,000,000,000 | 70 | V4L/DVB (9624): CVE-2008-5033: fix OOPS on tvaudio when controlling bass/treble
This bug were supposed to be fixed by 5ba2f67afb02c5302b2898949ed6fc3b3d37dcf1,
where a call to NULL happens.
Not all tvaudio chips allow controlling bass/treble. So, the driver
has a table with a flag to indicate if the chip does support it.
Unfortunately, the handling of this logic were broken for a very long
time (probably since the first module version). Due to that, an OOPS
were generated for devices that don't support bass/treble.
This were the resulting OOPS message before the patch, with debug messages
enabled:
tvaudio' 1-005b: VIDIOC_S_CTRL
BUG: unable to handle kernel NULL pointer dereference at 00000000
IP: [<00000000>]
*pde = 22fda067 *pte = 00000000
Oops: 0000 [#1] SMP
Modules linked in: snd_hda_intel snd_seq_dummy snd_seq_oss snd_seq_midi_event snd_seq snd_seq_device
snd_pcm_oss snd_mixer_oss snd_pcm snd_timer snd_hwdep snd soundcore tuner_simple tuner_types tea5767 tuner
tvaudio bttv bridgebnep rfcomm l2cap bluetooth it87 hwmon_vid hwmon fuse sunrpc ipt_REJECT
nf_conntrack_ipv4 iptable_filter ip_tables ip6t_REJECT xt_tcpudp nf_conntrack_ipv6 xt_state nf_conntrack
ip6table_filter ip6_tables x_tables ipv6 dm_mirrordm_multipath dm_mod configfs videodev v4l1_compat
ir_common 8139cp compat_ioctl32 v4l2_common 8139too videobuf_dma_sg videobuf_core mii btcx_risc tveeprom
i915 button snd_page_alloc serio_raw drm pcspkr i2c_algo_bit i2c_i801 i2c_core iTCO_wdt
iTCO_vendor_support sr_mod cdrom sg ata_generic pata_acpi ata_piix libata sd_mod scsi_mod ext3 jbdmbcache
uhci_hcd ohci_hcd ehci_hcd [last unloaded: soundcore]
Pid: 15413, comm: qv4l2 Not tainted (2.6.25.14-108.fc9.i686 #1)
EIP: 0060:[<00000000>] EFLAGS: 00210246 CPU: 0
EIP is at 0x0
EAX: 00008000 EBX: ebd21600 ECX: e2fd9ec4 EDX: 00200046
ESI: f8c0f0c4 EDI: f8c0f0c4 EBP: e2fd9d50 ESP: e2fd9d2c
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
Process qv4l2 (pid: 15413, ti=e2fd9000 task=ebe44000 task.ti=e2fd9000)
Stack: f8c0c6ae e2ff2a00 00000d00 e2fd9ec4 ebc4e000 e2fd9d5c f8c0c448 00000000
f899c12a e2fd9d5c f899c154 e2fd9d68 e2fd9d80 c0560185 e2fd9d88 f8f3e1d8
f8f3e1dc ebc4e034 f8f3e18c e2fd9ec4 00000000 e2fd9d90 f899c286 c008561c
Call Trace:
[<f8c0c6ae>] ? chip_command+0x266/0x4b6 [tvaudio]
[<f8c0c448>] ? chip_command+0x0/0x4b6 [tvaudio]
[<f899c12a>] ? i2c_cmd+0x0/0x2f [i2c_core]
[<f899c154>] ? i2c_cmd+0x2a/0x2f [i2c_core]
[<c0560185>] ? device_for_each_child+0x21/0x49
[<f899c286>] ? i2c_clients_command+0x1c/0x1e [i2c_core]
[<f8f283d8>] ? bttv_call_i2c_clients+0x14/0x16 [bttv]
[<f8f23601>] ? bttv_s_ctrl+0x1bc/0x313 [bttv]
[<f8f23445>] ? bttv_s_ctrl+0x0/0x313 [bttv]
[<f8b6096d>] ? __video_do_ioctl+0x1f84/0x3726 [videodev]
[<c05abb4e>] ? sock_aio_write+0x100/0x10d
[<c041b23e>] ? kmap_atomic_prot+0x1dd/0x1df
[<c043a0c9>] ? enqueue_hrtimer+0xc2/0xcd
[<c04f4fa4>] ? copy_from_user+0x39/0x121
[<f8b622b9>] ? __video_ioctl2+0x1aa/0x24a [videodev]
[<c04054fd>] ? do_notify_resume+0x768/0x795
[<c043c0f7>] ? getnstimeofday+0x34/0xd1
[<c0437b77>] ? autoremove_wake_function+0x0/0x33
[<f8b62368>] ? video_ioctl2+0xf/0x13 [videodev]
[<c048c6f0>] ? vfs_ioctl+0x50/0x69
[<c048c942>] ? do_vfs_ioctl+0x239/0x24c
[<c048c995>] ? sys_ioctl+0x40/0x5b
[<c0405bf2>] ? syscall_call+0x7/0xb
[<c0620000>] ? cpuid4_cache_sysfs_exit+0x3d/0x69
=======================
Code: Bad EIP value.
EIP: [<00000000>] 0x0 SS:ESP 0068:e2fd9d2c
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static int chip_write_masked(struct CHIPSTATE *chip, int subaddr, int val, int mask)
{
if (mask != 0) {
if (-1 == subaddr) {
val = (chip->shadow.bytes[1] & ~mask) | (val & mask);
} else {
val = (chip->shadow.bytes[subaddr+1] & ~mask) | (val & mask);
}
}
return chip_write(chip, subaddr, val);
} | 1 | [] | linux-2.6 | 494264379d186bf806613d27aafb7d88d42f4212 | 17,523,845,146,844,268,000,000,000,000,000,000,000 | 11 | V4L/DVB (9621): Avoid writing outside shadow.bytes[] array
There were no check about the limits of shadow.bytes array. This offers
a risk of writing values outside the limits, overriding other data
areas.
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static int chip_write(struct CHIPSTATE *chip, int subaddr, int val)
{
unsigned char buffer[2];
if (-1 == subaddr) {
v4l_dbg(1, debug, chip->c, "%s: chip_write: 0x%x\n",
chip->c->name, val);
chip->shadow.bytes[1] = val;
buffer[0] = val;
if (1 != i2c_master_send(chip->c,buffer,1)) {
v4l_warn(chip->c, "%s: I/O error (write 0x%x)\n",
chip->c->name, val);
return -1;
}
} else {
v4l_dbg(1, debug, chip->c, "%s: chip_write: reg%d=0x%x\n",
chip->c->name, subaddr, val);
chip->shadow.bytes[subaddr+1] = val;
buffer[0] = subaddr;
buffer[1] = val;
if (2 != i2c_master_send(chip->c,buffer,2)) {
v4l_warn(chip->c, "%s: I/O error (write reg%d=0x%x)\n",
chip->c->name, subaddr, val);
return -1;
}
}
return 0;
} | 1 | [] | linux-2.6 | 494264379d186bf806613d27aafb7d88d42f4212 | 101,476,711,595,109,130,000,000,000,000,000,000,000 | 28 | V4L/DVB (9621): Avoid writing outside shadow.bytes[] array
There were no check about the limits of shadow.bytes array. This offers
a risk of writing values outside the limits, overriding other data
areas.
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
{
int i;
if (0 == cmd->count)
return 0;
/* update our shadow register set; print bytes if (debug > 0) */
v4l_dbg(1, debug, chip->c, "%s: chip_cmd(%s): reg=%d, data:",
chip->c->name, name,cmd->bytes[0]);
for (i = 1; i < cmd->count; i++) {
if (debug)
printk(" 0x%x",cmd->bytes[i]);
chip->shadow.bytes[i+cmd->bytes[0]] = cmd->bytes[i];
}
if (debug)
printk("\n");
/* send data to the chip */
if (cmd->count != i2c_master_send(chip->c,cmd->bytes,cmd->count)) {
v4l_warn(chip->c, "%s: I/O error (%s)\n", chip->c->name, name);
return -1;
}
return 0;
} | 1 | [] | linux-2.6 | 494264379d186bf806613d27aafb7d88d42f4212 | 185,698,574,459,664,020,000,000,000,000,000,000,000 | 25 | V4L/DVB (9621): Avoid writing outside shadow.bytes[] array
There were no check about the limits of shadow.bytes array. This offers
a risk of writing values outside the limits, overriding other data
areas.
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static void audit_inotify_unregister(struct list_head *in_list)
{
struct audit_parent *p, *n;
list_for_each_entry_safe(p, n, in_list, ilist) {
list_del(&p->ilist);
inotify_rm_watch(audit_ih, &p->wdata);
/* the put matching the get in audit_do_del_rule() */
put_inotify_watch(&p->wdata);
}
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 232,812,928,812,541,570,000,000,000,000,000,000,000 | 11 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void untag_chunk(struct audit_chunk *chunk, struct node *p)
{
struct audit_chunk *new;
struct audit_tree *owner;
int size = chunk->count - 1;
int i, j;
mutex_lock(&chunk->watch.inode->inotify_mutex);
if (chunk->dead) {
mutex_unlock(&chunk->watch.inode->inotify_mutex);
return;
}
owner = p->owner;
if (!size) {
chunk->dead = 1;
spin_lock(&hash_lock);
list_del_init(&chunk->trees);
if (owner->root == chunk)
owner->root = NULL;
list_del_init(&p->list);
list_del_rcu(&chunk->hash);
spin_unlock(&hash_lock);
inotify_evict_watch(&chunk->watch);
mutex_unlock(&chunk->watch.inode->inotify_mutex);
put_inotify_watch(&chunk->watch);
return;
}
new = alloc_chunk(size);
if (!new)
goto Fallback;
if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
free_chunk(new);
goto Fallback;
}
chunk->dead = 1;
spin_lock(&hash_lock);
list_replace_init(&chunk->trees, &new->trees);
if (owner->root == chunk) {
list_del_init(&owner->same_root);
owner->root = NULL;
}
for (i = j = 0; i < size; i++, j++) {
struct audit_tree *s;
if (&chunk->owners[j] == p) {
list_del_init(&p->list);
i--;
continue;
}
s = chunk->owners[j].owner;
new->owners[i].owner = s;
new->owners[i].index = chunk->owners[j].index - j + i;
if (!s) /* result of earlier fallback */
continue;
get_tree(s);
list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
}
list_replace_rcu(&chunk->hash, &new->hash);
list_for_each_entry(owner, &new->trees, same_root)
owner->root = new;
spin_unlock(&hash_lock);
inotify_evict_watch(&chunk->watch);
mutex_unlock(&chunk->watch.inode->inotify_mutex);
put_inotify_watch(&chunk->watch);
return;
Fallback:
// do the best we can
spin_lock(&hash_lock);
if (owner->root == chunk) {
list_del_init(&owner->same_root);
owner->root = NULL;
}
list_del_init(&p->list);
p->owner = NULL;
put_tree(owner);
spin_unlock(&hash_lock);
mutex_unlock(&chunk->watch.inode->inotify_mutex);
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 50,917,876,832,060,690,000,000,000,000,000,000,000 | 84 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void trim_marked(struct audit_tree *tree)
{
struct list_head *p, *q;
spin_lock(&hash_lock);
if (tree->goner) {
spin_unlock(&hash_lock);
return;
}
/* reorder */
for (p = tree->chunks.next; p != &tree->chunks; p = q) {
struct node *node = list_entry(p, struct node, list);
q = p->next;
if (node->index & (1U<<31)) {
list_del_init(p);
list_add(p, &tree->chunks);
}
}
while (!list_empty(&tree->chunks)) {
struct node *node;
struct audit_chunk *chunk;
node = list_entry(tree->chunks.next, struct node, list);
/* have we run out of marked? */
if (!(node->index & (1U<<31)))
break;
chunk = find_chunk(node);
get_inotify_watch(&chunk->watch);
spin_unlock(&hash_lock);
untag_chunk(chunk, node);
put_inotify_watch(&chunk->watch);
spin_lock(&hash_lock);
}
if (!tree->root && !tree->goner) {
tree->goner = 1;
spin_unlock(&hash_lock);
mutex_lock(&audit_filter_mutex);
kill_rules(tree);
list_del_init(&tree->list);
mutex_unlock(&audit_filter_mutex);
prune_one(tree);
} else {
spin_unlock(&hash_lock);
}
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 312,013,971,667,038,060,000,000,000,000,000,000,000 | 49 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static inline void free_chunk(struct audit_chunk *chunk)
{
call_rcu(&chunk->head, __free_chunk);
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 232,660,666,733,245,280,000,000,000,000,000,000,000 | 4 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void inotify_destroy(struct inotify_handle *ih)
{
/*
* Destroy all of the watches for this handle. Unfortunately, not very
* pretty. We cannot do a simple iteration over the list, because we
* do not know the inode until we iterate to the watch. But we need to
* hold inode->inotify_mutex before ih->mutex. The following works.
*/
while (1) {
struct inotify_watch *watch;
struct list_head *watches;
struct inode *inode;
mutex_lock(&ih->mutex);
watches = &ih->watches;
if (list_empty(watches)) {
mutex_unlock(&ih->mutex);
break;
}
watch = list_first_entry(watches, struct inotify_watch, h_list);
get_inotify_watch(watch);
mutex_unlock(&ih->mutex);
inode = watch->inode;
mutex_lock(&inode->inotify_mutex);
mutex_lock(&ih->mutex);
/* make sure we didn't race with another list removal */
if (likely(idr_find(&ih->idr, watch->wd))) {
remove_watch_no_event(watch, ih);
put_inotify_watch(watch);
}
mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(watch);
}
/* free this handle: the put matching the get in inotify_init() */
put_inotify_handle(ih);
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 97,538,541,219,067,190,000,000,000,000,000,000,000 | 41 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
struct audit_chunk *audit_tree_lookup(const struct inode *inode)
{
struct list_head *list = chunk_hash(inode);
struct audit_chunk *p;
list_for_each_entry_rcu(p, list, hash) {
if (p->watch.inode == inode) {
get_inotify_watch(&p->watch);
return p;
}
}
return NULL;
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 305,548,466,206,123,730,000,000,000,000,000,000,000 | 13 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
{
struct inotify_watch *watch;
struct inode *inode;
mutex_lock(&ih->mutex);
watch = idr_find(&ih->idr, wd);
if (unlikely(!watch)) {
mutex_unlock(&ih->mutex);
return -EINVAL;
}
get_inotify_watch(watch);
inode = watch->inode;
mutex_unlock(&ih->mutex);
mutex_lock(&inode->inotify_mutex);
mutex_lock(&ih->mutex);
/* make sure that we did not race */
if (likely(idr_find(&ih->idr, wd) == watch))
inotify_remove_watch_locked(ih, watch);
mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(watch);
return 0;
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 74,634,527,766,613,130,000,000,000,000,000,000,000 | 28 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static inline int audit_del_rule(struct audit_entry *entry,
struct list_head *list)
{
struct audit_entry *e;
struct audit_field *inode_f = entry->rule.inode_f;
struct audit_watch *watch, *tmp_watch = entry->rule.watch;
struct audit_tree *tree = entry->rule.tree;
LIST_HEAD(inotify_list);
int h, ret = 0;
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
/* If either of these, don't count towards total */
if (entry->rule.listnr == AUDIT_FILTER_USER ||
entry->rule.listnr == AUDIT_FILTER_TYPE)
dont_count = 1;
#endif
if (inode_f) {
h = audit_hash_ino(inode_f->val);
list = &audit_inode_hash[h];
}
mutex_lock(&audit_filter_mutex);
e = audit_find_rule(entry, list);
if (!e) {
mutex_unlock(&audit_filter_mutex);
ret = -ENOENT;
goto out;
}
watch = e->rule.watch;
if (watch) {
struct audit_parent *parent = watch->parent;
list_del(&e->rule.rlist);
if (list_empty(&watch->rules)) {
audit_remove_watch(watch);
if (list_empty(&parent->watches)) {
/* Put parent on the inotify un-registration
* list. Grab a reference before releasing
* audit_filter_mutex, to be released in
* audit_inotify_unregister(). */
list_add(&parent->ilist, &inotify_list);
get_inotify_watch(&parent->wdata);
}
}
}
if (e->rule.tree)
audit_remove_tree_rule(&e->rule);
list_del_rcu(&e->list);
call_rcu(&e->rcu, audit_free_rule_rcu);
#ifdef CONFIG_AUDITSYSCALL
if (!dont_count)
audit_n_rules--;
if (!audit_match_signal(entry))
audit_signals--;
#endif
mutex_unlock(&audit_filter_mutex);
if (!list_empty(&inotify_list))
audit_inotify_unregister(&inotify_list);
out:
if (tmp_watch)
audit_put_watch(tmp_watch); /* match initial get */
if (tree)
audit_put_tree(tree); /* that's the temporary one */
return ret;
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 337,034,589,167,416,450,000,000,000,000,000,000,000 | 77 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void destroy_watch(struct inotify_watch *watch)
{
struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
free_chunk(chunk);
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 126,976,300,743,929,480,000,000,000,000,000,000,000 | 5 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static struct audit_chunk *alloc_chunk(int count)
{
struct audit_chunk *chunk;
size_t size;
int i;
size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
chunk = kzalloc(size, GFP_KERNEL);
if (!chunk)
return NULL;
INIT_LIST_HEAD(&chunk->hash);
INIT_LIST_HEAD(&chunk->trees);
chunk->count = count;
for (i = 0; i < count; i++) {
INIT_LIST_HEAD(&chunk->owners[i].list);
chunk->owners[i].index = i;
}
inotify_init_watch(&chunk->watch);
return chunk;
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 256,389,276,391,929,980,000,000,000,000,000,000,000 | 21 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void audit_put_chunk(struct audit_chunk *chunk)
{
put_inotify_watch(&chunk->watch);
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 9,632,752,360,592,218,000,000,000,000,000,000,000 | 4 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static struct audit_chunk *find_chunk(struct node *p)
{
int index = p->index & ~(1U<<31);
p -= index;
return container_of(p, struct audit_chunk, owners[0]);
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 151,315,558,575,830,370,000,000,000,000,000,000,000 | 6 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void prune_one(struct audit_tree *victim)
{
spin_lock(&hash_lock);
while (!list_empty(&victim->chunks)) {
struct node *p;
struct audit_chunk *chunk;
p = list_entry(victim->chunks.next, struct node, list);
chunk = find_chunk(p);
get_inotify_watch(&chunk->watch);
spin_unlock(&hash_lock);
untag_chunk(chunk, p);
put_inotify_watch(&chunk->watch);
spin_lock(&hash_lock);
}
spin_unlock(&hash_lock);
put_tree(victim);
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 41,116,006,577,601,036,000,000,000,000,000,000,000 | 20 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void __free_chunk(struct rcu_head *rcu)
{
struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
int i;
for (i = 0; i < chunk->count; i++) {
if (chunk->owners[i].owner)
put_tree(chunk->owners[i].owner);
}
kfree(chunk);
} | 1 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 321,003,103,073,488,850,000,000,000,000,000,000,000 | 11 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct sock *other = NULL;
struct sockaddr_un *sunaddr=msg->msg_name;
int err,size;
struct sk_buff *skb;
int sent=0;
struct scm_cookie tmp_scm;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
err = scm_send(sock, msg, siocb->scm);
if (err < 0)
return err;
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out_err;
if (msg->msg_namelen) {
err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
sunaddr = NULL;
err = -ENOTCONN;
other = unix_peer(sk);
if (!other)
goto out_err;
}
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto pipe_err;
while(sent < len)
{
/*
* Optimisation for the fact that under 0.01% of X
* messages typically need breaking up.
*/
size = len-sent;
/* Keep two messages in the pipe so it schedules better */
if (size > ((sk->sk_sndbuf >> 1) - 64))
size = (sk->sk_sndbuf >> 1) - 64;
if (size > SKB_MAX_ALLOC)
size = SKB_MAX_ALLOC;
/*
* Grab a buffer
*/
skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
if (skb==NULL)
goto out_err;
/*
* If you pass two values to the sock_alloc_send_skb
* it tries to grab the large buffer with GFP_NOFS
* (which can fail easily), and if it fails grab the
* fallback size buffer which is under a page and will
* succeed. [Alan]
*/
size = min_t(int, size, skb_tailroom(skb));
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
if (siocb->scm->fp) {
err = unix_attach_fds(siocb->scm, skb);
if (err) {
kfree_skb(skb);
goto out_err;
}
}
if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
kfree_skb(skb);
goto out_err;
}
unix_state_lock(other);
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN))
goto pipe_err_free;
skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other, size);
sent+=size;
}
scm_destroy(siocb->scm);
siocb->scm = NULL;
return sent;
pipe_err_free:
unix_state_unlock(other);
kfree_skb(skb);
pipe_err:
if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
send_sig(SIGPIPE,current,0);
err = -EPIPE;
out_err:
scm_destroy(siocb->scm);
siocb->scm = NULL;
return sent ? : err;
} | 1 | [
"CWE-399"
] | linux-2.6 | 5f23b734963ec7eaa3ebcd9050da0c9b7d143dd3 | 311,028,048,896,877,500,000,000,000,000,000,000,000 | 113 | net: Fix soft lockups/OOM issues w/ unix garbage collector
This is an implementation of David Miller's suggested fix in:
https://bugzilla.redhat.com/show_bug.cgi?id=470201
It has been updated to use wait_event() instead of
wait_event_interruptible().
Paraphrasing the description from the above report, it makes sendmsg()
block while UNIX garbage collection is in progress. This avoids a
situation where child processes continue to queue new FDs over a
AF_UNIX socket to a parent which is in the exit path and running
garbage collection on these FDs. This contention can result in soft
lockups and oom-killing of unrelated processes.
Signed-off-by: dann frazier <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void unix_gc(void)
{
static bool gc_in_progress = false;
struct unix_sock *u;
struct unix_sock *next;
struct sk_buff_head hitlist;
struct list_head cursor;
LIST_HEAD(not_cycle_list);
spin_lock(&unix_gc_lock);
/* Avoid a recursive GC. */
if (gc_in_progress)
goto out;
gc_in_progress = true;
/*
* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
*
* Holding unix_gc_lock will protect these candidates from
* being detached, and hence from gaining an external
* reference. Since there are no possible receivers, all
* buffers currently on the candidates' queues stay there
* during the garbage collection.
*
* We also know that no new candidate can be added onto the
* receive queues. Other, non candidate sockets _can_ be
* added to queue, so we must make sure only to touch
* candidates.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
inflight_refs = atomic_long_read(&u->inflight);
BUG_ON(inflight_refs < 1);
BUG_ON(total_refs < inflight_refs);
if (total_refs == inflight_refs) {
list_move_tail(&u->link, &gc_candidates);
u->gc_candidate = 1;
u->gc_maybe_cycle = 1;
}
}
/*
* Now remove all internal in-flight reference to children of
* the candidates.
*/
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, dec_inflight, NULL);
/*
* Restore the references for children of all candidates,
* which have remaining references. Do this recursively, so
* only those remain, which form cyclic references.
*
* Use a "cursor" link, to make the list traversal safe, even
* though elements might be moved about.
*/
list_add(&cursor, &gc_candidates);
while (cursor.next != &gc_candidates) {
u = list_entry(cursor.next, struct unix_sock, link);
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
if (atomic_long_read(&u->inflight) > 0) {
list_move_tail(&u->link, ¬_cycle_list);
u->gc_maybe_cycle = 0;
scan_children(&u->sk, inc_inflight_move_tail, NULL);
}
}
list_del(&cursor);
/*
* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
while (!list_empty(¬_cycle_list)) {
u = list_entry(not_cycle_list.next, struct unix_sock, link);
u->gc_candidate = 0;
list_move_tail(&u->link, &gc_inflight_list);
}
/*
* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
skb_queue_head_init(&hitlist);
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, inc_inflight, &hitlist);
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
gc_in_progress = false;
out:
spin_unlock(&unix_gc_lock);
} | 1 | [
"CWE-399"
] | linux-2.6 | 5f23b734963ec7eaa3ebcd9050da0c9b7d143dd3 | 169,061,583,414,475,820,000,000,000,000,000,000,000 | 112 | net: Fix soft lockups/OOM issues w/ unix garbage collector
This is an implementation of David Miller's suggested fix in:
https://bugzilla.redhat.com/show_bug.cgi?id=470201
It has been updated to use wait_event() instead of
wait_event_interruptible().
Paraphrasing the description from the above report, it makes sendmsg()
block while UNIX garbage collection is in progress. This avoids a
situation where child processes continue to queue new FDs over a
AF_UNIX socket to a parent which is in the exit path and running
garbage collection on these FDs. This contention can result in soft
lockups and oom-killing of unrelated processes.
Signed-off-by: dann frazier <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr=msg->msg_name;
struct sock *other = NULL;
int namelen = 0; /* fake GCC */
int err;
unsigned hash;
struct sk_buff *skb;
long timeo;
struct scm_cookie tmp_scm;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
err = scm_send(sock, msg, siocb->scm);
if (err < 0)
return err;
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out;
if (msg->msg_namelen) {
err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
if (err < 0)
goto out;
namelen = err;
} else {
sunaddr = NULL;
err = -ENOTCONN;
other = unix_peer_get(sk);
if (!other)
goto out;
}
if (test_bit(SOCK_PASSCRED, &sock->flags)
&& !u->addr && (err = unix_autobind(sock)) != 0)
goto out;
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
if (skb==NULL)
goto out;
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
if (siocb->scm->fp) {
err = unix_attach_fds(siocb->scm, skb);
if (err)
goto out_free;
}
unix_get_secdata(siocb->scm, skb);
skb_reset_transport_header(skb);
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
if (err)
goto out_free;
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
restart:
if (!other) {
err = -ECONNRESET;
if (sunaddr == NULL)
goto out_free;
other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
hash, &err);
if (other==NULL)
goto out_free;
}
unix_state_lock(other);
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
if (sock_flag(other, SOCK_DEAD)) {
/*
* Check with 1003.1g - what should
* datagram error
*/
unix_state_unlock(other);
sock_put(other);
err = 0;
unix_state_lock(sk);
if (unix_peer(sk) == other) {
unix_peer(sk)=NULL;
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
} else {
unix_state_unlock(sk);
}
other = NULL;
if (err)
goto out_free;
goto restart;
}
err = -EPIPE;
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
if (sk->sk_type != SOCK_SEQPACKET) {
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
}
if (unix_peer(other) != sk && unix_recvq_full(other)) {
if (!timeo) {
err = -EAGAIN;
goto out_unlock;
}
timeo = unix_wait_for_peer(other, timeo);
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out_free;
goto restart;
}
skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other, len);
sock_put(other);
scm_destroy(siocb->scm);
return len;
out_unlock:
unix_state_unlock(other);
out_free:
kfree_skb(skb);
out:
if (other)
sock_put(other);
scm_destroy(siocb->scm);
return err;
} | 1 | [
"CWE-399"
] | linux-2.6 | 5f23b734963ec7eaa3ebcd9050da0c9b7d143dd3 | 10,764,665,587,468,731,000,000,000,000,000,000,000 | 152 | net: Fix soft lockups/OOM issues w/ unix garbage collector
This is an implementation of David Miller's suggested fix in:
https://bugzilla.redhat.com/show_bug.cgi?id=470201
It has been updated to use wait_event() instead of
wait_event_interruptible().
Paraphrasing the description from the above report, it makes sendmsg()
block while UNIX garbage collection is in progress. This avoids a
situation where child processes continue to queue new FDs over a
AF_UNIX socket to a parent which is in the exit path and running
garbage collection on these FDs. This contention can result in soft
lockups and oom-killing of unrelated processes.
Signed-off-by: dann frazier <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int svc_listen(struct socket *sock,int backlog)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
pr_debug("svc_listen %p\n",vcc);
lock_sock(sk);
/* let server handle listen on unbound sockets */
if (test_bit(ATM_VF_SESSION,&vcc->flags)) {
error = -EINVAL;
goto out;
}
vcc_insert_socket(sk);
set_bit(ATM_VF_WAITING, &vcc->flags);
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
}
finish_wait(sk->sk_sleep, &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
}
set_bit(ATM_VF_LISTEN,&vcc->flags);
sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
error = -sk->sk_err;
out:
release_sock(sk);
return error;
} | 1 | [
"CWE-399"
] | linux-2.6 | 17b24b3c97498935a2ef9777370b1151dfed3f6f | 319,036,396,244,354,800,000,000,000,000,000,000,000 | 34 | ATM: CVE-2008-5079: duplicate listen() on socket corrupts the vcc table
As reported by Hugo Dias that it is possible to cause a local denial
of service attack by calling the svc_listen function twice on the same
socket and reading /proc/net/atm/*vc
Signed-off-by: Chas Williams <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_hdr *hdr, fmode_t mode)
{
if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
if (blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE))
return -EPERM;
/*
* fill in request structure
*/
rq->cmd_len = hdr->cmd_len;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout)
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
return 0;
} | 1 | [
"CWE-399"
] | linux-2.6 | f2f1fa78a155524b849edf359e42a3001ea652c0 | 259,037,141,837,650,080,000,000,000,000,000,000,000 | 22 | Enforce a minimum SG_IO timeout
There's no point in having too short SG_IO timeouts, since if the
command does end up timing out, we'll end up through the reset sequence
that is several seconds long in order to abort the command that timed
out.
As a result, shorter timeouts than a few seconds simply do not make
sense, as the recovery would be longer than the timeout itself.
Add a BLK_MIN_SG_TIMEOUT to match the existign BLK_DEFAULT_SG_TIMEOUT.
Suggested-by: Alan Cox <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Acked-by: Jens Axboe <[email protected]>
Cc: Jeff Garzik <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, struct bsg_device *bd,
fmode_t has_write_perm)
{
if (hdr->request_len > BLK_MAX_CDB) {
rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
if (!rq->cmd)
return -ENOMEM;
}
if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
hdr->request_len))
return -EFAULT;
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
if (blk_verify_command(&q->cmd_filter, rq->cmd, has_write_perm))
return -EPERM;
} else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
/*
* fill in request structure
*/
rq->cmd_len = hdr->request_len;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = (hdr->timeout * HZ) / 1000;
if (!rq->timeout)
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
return 0;
} | 1 | [
"CWE-399"
] | linux-2.6 | f2f1fa78a155524b849edf359e42a3001ea652c0 | 178,586,599,095,835,240,000,000,000,000,000,000,000 | 34 | Enforce a minimum SG_IO timeout
There's no point in having too short SG_IO timeouts, since if the
command does end up timing out, we'll end up through the reset sequence
that is several seconds long in order to abort the command that timed
out.
As a result, shorter timeouts than a few seconds simply do not make
sense, as the recovery would be longer than the timeout itself.
Add a BLK_MIN_SG_TIMEOUT to match the existign BLK_DEFAULT_SG_TIMEOUT.
Suggested-by: Alan Cox <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Acked-by: Jens Axboe <[email protected]>
Cc: Jeff Garzik <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int ibwdt_set_heartbeat(int t)
{
int i;
if ((t < 0) || (t > 30))
return -EINVAL;
for (i = 0x0F; i > -1; i--)
if (wd_times[i] > t)
break;
wd_margin = i;
return 0;
} | 1 | [] | linux-2.6 | 7c2500f17d65092d93345f3996cf82ebca17e9ff | 304,592,494,063,468,760,000,000,000,000,000,000,000 | 13 | [WATCHDOG] ib700wdt.c - fix buffer_underflow bug
This fixes Bug 11399:
if ibwdt_set_heartbeat(int t) is called with value 30 then
the check "if ((t < 0) || (t > 30))" in ibwdt_set_heartbeat
is not going to fail because t == 30, but in the loop, the
check wd_times[i] > t is never going to be true because
none of the wd_times are greater than the value of t (i.e. 30).
So we are exiting the loop with i == -1 and therefore setting
wd_margin to -1 which is wrong.
Reported-by: Zvonimir Rakamaric <[email protected]>
Signed-off-by: Wim Van Sebroeck <[email protected]> |
static void dispatch_packet(AvahiServer *s, AvahiDnsPacket *p, const AvahiAddress *src_address, uint16_t port, const AvahiAddress *dst_address, AvahiIfIndex iface, int ttl) {
AvahiInterface *i;
int from_local_iface = 0;
assert(s);
assert(p);
assert(src_address);
assert(dst_address);
assert(iface > 0);
assert(src_address->proto == dst_address->proto);
if (!(i = avahi_interface_monitor_get_interface(s->monitor, iface, src_address->proto)) ||
!i->announcing) {
avahi_log_warn("Received packet from invalid interface.");
return;
}
if (avahi_address_is_ipv4_in_ipv6(src_address))
/* This is an IPv4 address encapsulated in IPv6, so let's ignore it. */
return;
if (originates_from_local_legacy_unicast_socket(s, src_address, port))
/* This originates from our local reflector, so let's ignore it */
return;
/* We don't want to reflect local traffic, so we check if this packet is generated locally. */
if (s->config.enable_reflector)
from_local_iface = originates_from_local_iface(s, iface, src_address, port);
if (avahi_dns_packet_check_valid_multicast(p) < 0) {
avahi_log_warn("Received invalid packet.");
return;
}
if (avahi_dns_packet_is_query(p)) {
int legacy_unicast = 0;
if (avahi_dns_packet_get_field(p, AVAHI_DNS_FIELD_ARCOUNT) != 0) {
avahi_log_warn("Invalid query packet.");
return;
}
if (port != AVAHI_MDNS_PORT) {
/* Legacy Unicast */
if ((avahi_dns_packet_get_field(p, AVAHI_DNS_FIELD_ANCOUNT) != 0 ||
avahi_dns_packet_get_field(p, AVAHI_DNS_FIELD_NSCOUNT) != 0)) {
avahi_log_warn("Invalid legacy unicast query packet.");
return;
}
legacy_unicast = 1;
}
if (legacy_unicast)
reflect_legacy_unicast_query_packet(s, p, i, src_address, port);
handle_query_packet(s, p, i, src_address, port, legacy_unicast, from_local_iface);
} else {
char t[AVAHI_ADDRESS_STR_MAX];
if (port != AVAHI_MDNS_PORT) {
avahi_log_warn("Received response from host %s with invalid source port %u on interface '%s.%i'", avahi_address_snprint(t, sizeof(t), src_address), port, i->hardware->name, i->protocol);
return;
}
if (ttl != 255 && s->config.check_response_ttl) {
avahi_log_warn("Received response from host %s with invalid TTL %u on interface '%s.%i'.", avahi_address_snprint(t, sizeof(t), src_address), ttl, i->hardware->name, i->protocol);
return;
}
if (!is_mdns_mcast_address(dst_address) &&
!avahi_interface_address_on_link(i, src_address)) {
avahi_log_warn("Received non-local response from host %s on interface '%s.%i'.", avahi_address_snprint(t, sizeof(t), src_address), i->hardware->name, i->protocol);
return;
}
if (avahi_dns_packet_get_field(p, AVAHI_DNS_FIELD_QDCOUNT) != 0 ||
avahi_dns_packet_get_field(p, AVAHI_DNS_FIELD_ANCOUNT) == 0 ||
avahi_dns_packet_get_field(p, AVAHI_DNS_FIELD_NSCOUNT) != 0) {
avahi_log_warn("Invalid response packet from host %s.", avahi_address_snprint(t, sizeof(t), src_address));
return;
}
handle_response_packet(s, p, i, src_address, from_local_iface);
}
} | 1 | [
"CWE-399"
] | avahi | 3093047f1aa36bed8a37fa79004bf0ee287929f4 | 202,231,450,142,774,530,000,000,000,000,000,000,000 | 90 | Don't get confused by UDP packets with a source port that is zero
This is a fix for rhbz 475394.
Problem identified by Hugo Dias. |
while(1) {
/* Add the Unix Domain Sockets to the list of read
* descriptors.
* rgerhards 2005-08-01: we must now check if there are
* any local sockets to listen to at all. If the -o option
* is given without -a, we do not need to listen at all..
*/
maxfds = 0;
FD_ZERO (&readfds);
/* Add the UDP listen sockets to the list of read descriptors.
*/
if(udpLstnSocks != NULL) {
for (i = 0; i < *udpLstnSocks; i++) {
if (udpLstnSocks[i+1] != -1) {
if(Debug)
net.debugListenInfo(udpLstnSocks[i+1], "UDP");
FD_SET(udpLstnSocks[i+1], &readfds);
if(udpLstnSocks[i+1]>maxfds) maxfds=udpLstnSocks[i+1];
}
}
}
if(Debug) {
dbgprintf("--------imUDP calling select, active file descriptors (max %d): ", maxfds);
for (nfds = 0; nfds <= maxfds; ++nfds)
if ( FD_ISSET(nfds, &readfds) )
dbgprintf("%d ", nfds);
dbgprintf("\n");
}
/* wait for io to become ready */
nfds = select(maxfds+1, (fd_set *) &readfds, NULL, NULL, NULL);
if(udpLstnSocks != NULL) {
for (i = 0; nfds && i < *udpLstnSocks; i++) {
if (FD_ISSET(udpLstnSocks[i+1], &readfds)) {
socklen = sizeof(frominet);
l = recvfrom(udpLstnSocks[i+1], (char*) pRcvBuf, MAXLINE - 1, 0,
(struct sockaddr *)&frominet, &socklen);
if (l > 0) {
if(net.cvthname(&frominet, fromHost, fromHostFQDN, fromHostIP) == RS_RET_OK) {
dbgprintf("Message from inetd socket: #%d, host: %s\n",
udpLstnSocks[i+1], fromHost);
/* Here we check if a host is permitted to send us
* syslog messages. If it isn't, we do not further
* process the message but log a warning (if we are
* configured to do this).
* rgerhards, 2005-09-26
*/
if(net.isAllowedSender((uchar*) "UDP",
(struct sockaddr *)&frominet, (char*)fromHostFQDN)) {
parseAndSubmitMessage(fromHost, fromHostIP, pRcvBuf, l,
MSG_PARSE_HOSTNAME, NOFLAG, eFLOWCTL_NO_DELAY);
} else {
dbgprintf("%s is not an allowed sender\n", (char*)fromHostFQDN);
if(glbl.GetOption_DisallowWarning) {
errmsg.LogError(0, NO_ERRCODE, "UDP message from disallowed sender %s discarded",
(char*)fromHost);
}
}
}
} else if (l < 0 && errno != EINTR && errno != EAGAIN) {
char errStr[1024];
rs_strerror_r(errno, errStr, sizeof(errStr));
dbgprintf("INET socket error: %d = %s.\n", errno, errStr);
errmsg.LogError(errno, NO_ERRCODE, "recvfrom inet");
/* should be harmless */
sleep(1);
}
--nfds; /* indicate we have processed one */
}
}
}
} | 1 | [] | rsyslog | afdccceefa30306cf720a27efd5a29bcc5a916c9 | 173,727,960,605,522,330,000,000,000,000,000,000,000 | 74 | security fix: imudp emitted a message when a non-permitted sender...
...tried to send a message to it. This behaviour is operator-configurable.
If enabled, a message was emitted each time. That way an attacker could
effectively fill the disk via this facility. The message is now
emitted only once in a minute (this currently is a hard-coded limit,
if someone comes up with a good reason to make it configurable, we
will probably do that). |
void __qdisc_run(struct net_device *dev)
{
do {
if (!qdisc_restart(dev))
break;
} while (!netif_queue_stopped(dev));
clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
} | 1 | [
"CWE-399"
] | linux-2.6 | 2ba2506ca7ca62c56edaa334b0fe61eb5eab6ab0 | 288,784,763,751,991,200,000,000,000,000,000,000,000 | 9 | [NET]: Add preemption point in qdisc_run
The qdisc_run loop is currently unbounded and runs entirely in a
softirq. This is bad as it may create an unbounded softirq run.
This patch fixes this by calling need_resched and breaking out if
necessary.
It also adds a break out if the jiffies value changes since that would
indicate we've been transmitting for too long which starves other
softirqs.
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto gen_shutdown;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Go a head and force a SACK, since we are shutting down. */
gen_shutdown:
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
* respond to each received packet containing one or more DATA chunk(s)
* with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
} | 1 | [
"CWE-119"
] | linux-2.6 | 9fcb95a105758b81ef0131cd18e2db5149f13e95 | 44,099,590,711,358,880,000,000,000,000,000,000,000 | 58 | sctp: Avoid memory overflow while FWD-TSN chunk is received with bad stream ID
If FWD-TSN chunk is received with bad stream ID, the sctp will not do the
validity check, this may cause memory overflow when overwrite the TSN of
the stream ID.
The FORWARD-TSN chunk is like this:
FORWARD-TSN chunk
Type = 192
Flags = 0
Length = 172
NewTSN = 99
Stream = 10000
StreamSequence = 0xFFFF
This patch fix this problem by discard the chunk if stream ID is not
less than MIS.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* FIXME: For now send a SACK, but DATA processing may
* send another.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
return SCTP_DISPOSITION_CONSUME;
discard_noforce:
return SCTP_DISPOSITION_DISCARD;
} | 1 | [
"CWE-119"
] | linux-2.6 | 9fcb95a105758b81ef0131cd18e2db5149f13e95 | 110,954,194,245,684,120,000,000,000,000,000,000,000 | 58 | sctp: Avoid memory overflow while FWD-TSN chunk is received with bad stream ID
If FWD-TSN chunk is received with bad stream ID, the sctp will not do the
validity check, this may cause memory overflow when overwrite the TSN of
the stream ID.
The FORWARD-TSN chunk is like this:
FORWARD-TSN chunk
Type = 192
Flags = 0
Length = 172
NewTSN = 99
Stream = 10000
StreamSequence = 0xFFFF
This patch fix this problem by discard the chunk if stream ID is not
less than MIS.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *child_tidptr,
struct pid *pid,
int trace)
{
int retval;
struct task_struct *p;
int cgroup_callbacks_done = 0;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
*/
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
/*
* Shared signal handlers imply shared VM. By way of the above,
* thread groups also imply shared VM. Blocking this case allows
* for various simplifications in other code.
*/
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
retval = -ENOMEM;
p = dup_task_struct(current);
if (!p)
goto fork_out;
rt_mutex_init_task(p);
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
if (atomic_read(&p->real_cred->user->processes) >=
p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->real_cred->user != INIT_USER)
goto bad_fork_free;
}
retval = copy_creds(p, clone_flags);
if (retval < 0)
goto bad_fork_free;
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
retval = -EAGAIN;
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
if (p->binfmt && !try_module_get(p->binfmt->module))
goto bad_fork_cleanup_put_domain;
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
#ifdef CONFIG_PREEMPT_RCU
p->rcu_read_lock_nesting = 0;
p->rcu_flipctr_idx = 0;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
clear_tsk_thread_flag(p, TIF_SIGPENDING);
init_sigpending(&p->pending);
p->utime = cputime_zero;
p->stime = cputime_zero;
p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
p->prev_utime = cputime_zero;
p->prev_stime = cputime_zero;
p->default_timer_slack_ns = current->timer_slack_ns;
#ifdef CONFIG_DETECT_SOFTLOCKUP
p->last_switch_count = 0;
p->last_switch_timestamp = 0;
#endif
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
posix_cpu_timers_init(p);
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
goto bad_fork_cleanup_cgroup;
}
mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
p->hardirqs_enabled = 1;
#else
p->hardirqs_enabled = 0;
#endif
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
p->hardirq_disable_event = 0;
p->softirqs_enabled = 1;
p->softirq_enable_ip = _THIS_IP_;
p->softirq_enable_event = 0;
p->softirq_disable_ip = 0;
p->softirq_disable_event = 0;
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
p->lockdep_recursion = 0;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
if (unlikely(current->ptrace))
ptrace_fork(p, clone_flags);
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_policy;
/* copy all the process information */
if ((retval = copy_semundo(clone_flags, p)))
goto bad_fork_cleanup_audit;
if ((retval = copy_files(clone_flags, p)))
goto bad_fork_cleanup_semundo;
if ((retval = copy_fs(clone_flags, p)))
goto bad_fork_cleanup_files;
if ((retval = copy_sighand(clone_flags, p)))
goto bad_fork_cleanup_fs;
if ((retval = copy_signal(clone_flags, p)))
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
if ((retval = copy_namespaces(clone_flags, p)))
goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_io;
if (pid != &init_struct_pid) {
retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns);
if (!pid)
goto bad_fork_cleanup_io;
if (clone_flags & CLONE_NEWPID) {
retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
if (retval < 0)
goto bad_fork_free_pid;
}
}
ftrace_graph_init_task(p);
p->pid = pid_nr(pid);
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
if (current->nsproxy != p->nsproxy) {
retval = ns_cgroup_clone(p, pid);
if (retval)
goto bad_fork_free_graph;
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
/*
* sigaltstack should be cleared when sharing the same VM
*/
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
p->sas_ss_sp = p->sas_ss_size = 0;
/*
* Syscall tracing should be turned off in the child regardless
* of CLONE_PTRACE.
*/
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
clear_all_latency_tracing(p);
/* Our parent execution domain becomes current domain
These must match for thread signalling to apply */
p->parent_exec_id = p->self_exec_id;
/* ok, now we should be set up.. */
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
p->exit_state = 0;
/*
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
/* Now that the task is set up, run cgroup callbacks if
* necessary. We need to run them before the task is visible
* on the tasklist. */
cgroup_fork_callbacks(p);
cgroup_callbacks_done = 1;
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
/*
* The task hasn't been attached yet, so its cpus_allowed mask will
* not be changed, nor will its assigned CPU.
*
* The cpus_allowed mask of the parent may have changed after it was
* copied first time - so re-copy it here, then check the child's CPU
* to ensure it is on a valid CPU (and if not, just force it back to
* parent's CPU). This avoids alot of nasty races.
*/
p->cpus_allowed = current->cpus_allowed;
p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
!cpu_online(task_cpu(p))))
set_task_cpu(p, smp_processor_id());
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
p->real_parent = current->real_parent;
else
p->real_parent = current;
spin_lock(¤t->sighand->siglock);
/*
* Process group and session signals need to be delivered to just the
* parent before the fork or both the parent and the child after the
* fork. Restart if a signal comes in before we add the new process to
* it's process group.
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_free_graph;
}
if (clone_flags & CLONE_THREAD) {
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}
if (likely(p->pid)) {
list_add_tail(&p->sibling, &p->real_parent->children);
tracehook_finish_clone(p, clone_flags, trace);
if (thread_group_leader(p)) {
if (clone_flags & CLONE_NEWPID)
p->nsproxy->pid_ns->child_reaper = p;
p->signal->leader_pid = pid;
tty_kref_put(p->signal->tty);
p->signal->tty = tty_kref_get(current->signal->tty);
set_task_pgrp(p, task_pgrp_nr(current));
set_task_session(p, task_session_nr(current));
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
total_forks++;
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
cgroup_post_fork(p);
return p;
bad_fork_free_graph:
ftrace_graph_exit_task(p);
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
cleanup_signal(p);
bad_fork_cleanup_sighand:
__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
cgroup_exit(p, cgroup_callbacks_done);
delayacct_tsk_free(p);
if (p->binfmt)
module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
put_cred(p->real_cred);
put_cred(p->cred);
bad_fork_free:
free_task(p);
fork_out:
return ERR_PTR(retval);
} | 1 | [
"CWE-264"
] | linux-2.6 | 2d5516cbb9daf7d0e342a2e3b0fc6f8c39a81205 | 181,459,718,677,728,530,000,000,000,000,000,000,000 | 377 | copy_process: fix CLONE_PARENT && parent_exec_id interaction
CLONE_PARENT can fool the ->self_exec_id/parent_exec_id logic. If we
re-use the old parent, we must also re-use ->parent_exec_id to make
sure exit_notify() sees the right ->xxx_exec_id's when the CLONE_PARENT'ed
task exits.
Also, move down the "p->parent_exec_id = p->self_exec_id" thing, to place
two different cases together.
Signed-off-by: Oleg Nesterov <[email protected]>
Cc: Roland McGrath <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: David Howells <[email protected]>
Cc: Serge E. Hallyn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
_AFmoduleinst _af_ms_adpcm_init_decompress (_Track *track, AFvirtualfile *fh,
bool seekok, bool headerless, AFframecount *chunkframes)
{
_AFmoduleinst ret = _AFnewmodinst(&ms_adpcm_decompress);
ms_adpcm_data *d;
AUpvlist pv;
long l;
void *v;
assert(af_ftell(fh) == track->fpos_first_frame);
d = (ms_adpcm_data *) _af_malloc(sizeof (ms_adpcm_data));
d->track = track;
d->fh = fh;
d->track->frames2ignore = 0;
d->track->fpos_next_frame = d->track->fpos_first_frame;
pv = d->track->f.compressionParams;
if (_af_pv_getlong(pv, _AF_MS_ADPCM_NUM_COEFFICIENTS, &l))
d->numCoefficients = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "number of coefficients not set");
if (_af_pv_getptr(pv, _AF_MS_ADPCM_COEFFICIENTS, &v))
memcpy(d->coefficients, v, sizeof (int16_t) * 256 * 2);
else
_af_error(AF_BAD_CODEC_CONFIG, "coefficient array not set");
if (_af_pv_getlong(pv, _AF_SAMPLES_PER_BLOCK, &l))
d->samplesPerBlock = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "samples per block not set");
if (_af_pv_getlong(pv, _AF_BLOCK_SIZE, &l))
d->blockAlign = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "block size not set");
*chunkframes = d->samplesPerBlock / d->track->f.channelCount;
ret.modspec = d;
return ret;
} | 1 | [
"CWE-119"
] | audiofile | e8cf0095b3f319739f9aa1ab5a1aa52b76be8cdd | 139,382,574,856,551,510,000,000,000,000,000,000,000 | 45 | Fix decoding of multi-channel ADPCM audio files. |
static void ms_adpcm_run_pull (_AFmoduleinst *module)
{
ms_adpcm_data *d = (ms_adpcm_data *) module->modspec;
AFframecount frames2read = module->outc->nframes;
AFframecount nframes = 0;
int i, framesPerBlock, blockCount;
ssize_t blocksRead, bytesDecoded;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
assert(module->outc->nframes % framesPerBlock == 0);
blockCount = module->outc->nframes / framesPerBlock;
/* Read the compressed frames. */
blocksRead = af_fread(module->inc->buf, d->blockAlign, blockCount, d->fh);
/* Decompress into module->outc. */
for (i=0; i<blockCount; i++)
{
bytesDecoded = ms_adpcm_decode_block(d,
(uint8_t *) module->inc->buf + i * d->blockAlign,
(int16_t *) module->outc->buf + i * d->samplesPerBlock);
nframes += framesPerBlock;
}
d->track->nextfframe += nframes;
if (blocksRead > 0)
d->track->fpos_next_frame += blocksRead * d->blockAlign;
assert(af_ftell(d->fh) == d->track->fpos_next_frame);
/*
If we got EOF from read, then we return the actual amount read.
Complain only if there should have been more frames in the file.
*/
if (d->track->totalfframes != -1 && nframes != frames2read)
{
/* Report error if we haven't already */
if (d->track->filemodhappy)
{
_af_error(AF_BAD_READ,
"file missing data -- read %d frames, should be %d",
d->track->nextfframe,
d->track->totalfframes);
d->track->filemodhappy = AF_FALSE;
}
}
module->outc->nframes = nframes;
} | 1 | [
"CWE-119"
] | audiofile | e8cf0095b3f319739f9aa1ab5a1aa52b76be8cdd | 23,357,047,633,015,260,000,000,000,000,000,000,000 | 53 | Fix decoding of multi-channel ADPCM audio files. |
static void ms_adpcm_reset2 (_AFmoduleinst *i)
{
ms_adpcm_data *d = (ms_adpcm_data *) i->modspec;
int framesPerBlock;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
d->track->fpos_next_frame = d->track->fpos_first_frame +
d->blockAlign * (d->track->nextfframe / framesPerBlock);
d->track->frames2ignore += d->framesToIgnore;
assert(d->track->nextfframe % framesPerBlock == 0);
} | 1 | [
"CWE-119"
] | audiofile | e8cf0095b3f319739f9aa1ab5a1aa52b76be8cdd | 196,070,143,317,425,100,000,000,000,000,000,000,000 | 13 | Fix decoding of multi-channel ADPCM audio files. |
static void ima_adpcm_reset2 (_AFmoduleinst *i)
{
ima_adpcm_data *d = (ima_adpcm_data *) i->modspec;
int framesPerBlock;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
d->track->fpos_next_frame = d->track->fpos_first_frame +
d->blockAlign * (d->track->nextfframe / framesPerBlock);
d->track->frames2ignore += d->framesToIgnore;
assert(d->track->nextfframe % framesPerBlock == 0);
} | 1 | [
"CWE-119"
] | audiofile | e8cf0095b3f319739f9aa1ab5a1aa52b76be8cdd | 276,876,709,294,592,270,000,000,000,000,000,000,000 | 13 | Fix decoding of multi-channel ADPCM audio files. |
void _af_adpcm_decoder (uint8_t *indata, int16_t *outdata, int len,
struct adpcm_state *state)
{
uint8_t *inp; /* Input buffer pointer */
int16_t *outp; /* output buffer pointer */
int sign; /* Current adpcm sign bit */
int delta; /* Current adpcm output value */
int step; /* Stepsize */
int valpred; /* Predicted value */
int vpdiff; /* Current change to valpred */
int index; /* Current step change index */
int inputbuffer; /* place to keep next 4-bit value */
int bufferstep; /* toggle between inputbuffer/input */
outp = outdata;
inp = indata;
valpred = state->valprev;
index = state->index;
step = stepsizeTable[index];
bufferstep = 0;
for ( ; len > 0 ; len-- ) {
/* Step 1 - get the delta value */
if ( bufferstep ) {
delta = (inputbuffer >> 4) & 0xf;
} else {
inputbuffer = *inp++;
delta = inputbuffer & 0xf;
}
bufferstep = !bufferstep;
/* Step 2 - Find new index value (for later) */
index += indexTable[delta];
if ( index < 0 ) index = 0;
if ( index > 88 ) index = 88;
/* Step 3 - Separate sign and magnitude */
sign = delta & 8;
delta = delta & 7;
/* Step 4 - Compute difference and new predicted value */
/*
** Computes 'vpdiff = (delta+0.5)*step/4', but see comment
** in adpcm_coder.
*/
vpdiff = step >> 3;
if ( delta & 4 ) vpdiff += step;
if ( delta & 2 ) vpdiff += step>>1;
if ( delta & 1 ) vpdiff += step>>2;
if ( sign )
valpred -= vpdiff;
else
valpred += vpdiff;
/* Step 5 - clamp output value */
if ( valpred > 32767 )
valpred = 32767;
else if ( valpred < -32768 )
valpred = -32768;
/* Step 6 - Update step value */
step = stepsizeTable[index];
/* Step 7 - Output value */
*outp++ = valpred;
}
state->valprev = valpred;
state->index = index;
} | 1 | [
"CWE-119"
] | audiofile | e8cf0095b3f319739f9aa1ab5a1aa52b76be8cdd | 182,662,500,351,655,050,000,000,000,000,000,000,000 | 74 | Fix decoding of multi-channel ADPCM audio files. |
static int ima_adpcm_decode_block (ima_adpcm_data *ima, uint8_t *encoded,
int16_t *decoded)
{
int outputLength;
struct adpcm_state state;
outputLength = ima->samplesPerBlock * sizeof (int16_t) *
ima->track->f.channelCount;
state.valprev = (encoded[1]<<8) | encoded[0];
if (encoded[1] & 0x80)
state.valprev -= 0x10000;
state.index = encoded[2];
*decoded++ = state.valprev;
encoded += 4;
_af_adpcm_decoder(encoded, decoded, ima->samplesPerBlock - 1, &state);
return outputLength;
} | 1 | [
"CWE-119"
] | audiofile | e8cf0095b3f319739f9aa1ab5a1aa52b76be8cdd | 283,886,510,527,453,620,000,000,000,000,000,000,000 | 24 | Fix decoding of multi-channel ADPCM audio files. |
_AFmoduleinst _af_ima_adpcm_init_decompress (_Track *track, AFvirtualfile *fh,
bool seekok, bool headerless, AFframecount *chunkframes)
{
_AFmoduleinst ret = _AFnewmodinst(&ima_adpcm_decompress);
ima_adpcm_data *d;
AUpvlist pv;
long l;
assert(af_ftell(fh) == track->fpos_first_frame);
d = (ima_adpcm_data *) _af_malloc(sizeof (ima_adpcm_data));
d->track = track;
d->fh = fh;
d->track->frames2ignore = 0;
d->track->fpos_next_frame = d->track->fpos_first_frame;
pv = d->track->f.compressionParams;
if (_af_pv_getlong(pv, _AF_SAMPLES_PER_BLOCK, &l))
d->samplesPerBlock = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "samples per block not set");
if (_af_pv_getlong(pv, _AF_BLOCK_SIZE, &l))
d->blockAlign = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "block size not set");
*chunkframes = d->samplesPerBlock / d->track->f.channelCount;
ret.modspec = d;
return ret;
} | 1 | [
"CWE-119"
] | audiofile | e8cf0095b3f319739f9aa1ab5a1aa52b76be8cdd | 158,407,911,301,499,650,000,000,000,000,000,000,000 | 35 | Fix decoding of multi-channel ADPCM audio files. |
static void ima_adpcm_reset1 (_AFmoduleinst *i)
{
ima_adpcm_data *d = (ima_adpcm_data *) i->modspec;
AFframecount nextTrackFrame;
int framesPerBlock;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
nextTrackFrame = d->track->nextfframe;
d->track->nextfframe = (nextTrackFrame / framesPerBlock) *
framesPerBlock;
d->framesToIgnore = nextTrackFrame - d->track->nextfframe;
/* postroll = frames2ignore */
} | 1 | [
"CWE-119"
] | audiofile | e8cf0095b3f319739f9aa1ab5a1aa52b76be8cdd | 10,089,811,291,569,344,000,000,000,000,000,000,000 | 15 | Fix decoding of multi-channel ADPCM audio files. |
void _af_adpcm_coder (int16_t *indata, uint8_t *outdata, int len,
struct adpcm_state *state)
{
int16_t *inp; /* Input buffer pointer */
uint8_t *outp; /* Output buffer pointer */
int val; /* Current input sample value */
int sign; /* Current adpcm sign bit */
int delta; /* Current adpcm output value */
int diff; /* Difference between val and valprev */
int step; /* Stepsize */
int valpred; /* Predicted output value */
int vpdiff; /* Current change to valpred */
int index; /* Current step change index */
int outputbuffer; /* place to keep previous 4-bit value */
int bufferstep; /* toggle between outputbuffer/output */
outp = outdata;
inp = indata;
valpred = state->valprev;
index = state->index;
step = stepsizeTable[index];
bufferstep = 1;
for ( ; len > 0 ; len-- ) {
val = *inp++;
/* Step 1 - compute difference with previous value */
diff = val - valpred;
sign = (diff < 0) ? 8 : 0;
if ( sign ) diff = (-diff);
/* Step 2 - Divide and clamp */
/* Note:
** This code *approximately* computes:
** delta = diff*4/step;
** vpdiff = (delta+0.5)*step/4;
** but in shift step bits are dropped. The net result of this is
** that even if you have fast mul/div hardware you cannot put it to
** good use since the fixup would be too expensive.
*/
delta = 0;
vpdiff = (step >> 3);
if ( diff >= step ) {
delta = 4;
diff -= step;
vpdiff += step;
}
step >>= 1;
if ( diff >= step ) {
delta |= 2;
diff -= step;
vpdiff += step;
}
step >>= 1;
if ( diff >= step ) {
delta |= 1;
vpdiff += step;
}
/* Step 3 - Update previous value */
if ( sign )
valpred -= vpdiff;
else
valpred += vpdiff;
/* Step 4 - Clamp previous value to 16 bits */
if ( valpred > 32767 )
valpred = 32767;
else if ( valpred < -32768 )
valpred = -32768;
/* Step 5 - Assemble value, update index and step values */
delta |= sign;
index += indexTable[delta];
if ( index < 0 ) index = 0;
if ( index > 88 ) index = 88;
step = stepsizeTable[index];
/* Step 6 - Output value */
if ( bufferstep ) {
outputbuffer = delta & 0x0f;
} else {
*outp++ = ((delta << 4) & 0xf0) | outputbuffer;
}
bufferstep = !bufferstep;
}
/* Output last step, if needed */
if ( !bufferstep )
*outp++ = outputbuffer;
state->valprev = valpred;
state->index = index;
} | 1 | [
"CWE-119"
] | audiofile | e8cf0095b3f319739f9aa1ab5a1aa52b76be8cdd | 151,725,545,189,119,400,000,000,000,000,000,000,000 | 98 | Fix decoding of multi-channel ADPCM audio files. |
static void ima_adpcm_run_pull (_AFmoduleinst *module)
{
ima_adpcm_data *d = (ima_adpcm_data *) module->modspec;
AFframecount frames2read = module->outc->nframes;
AFframecount nframes = 0;
int i, framesPerBlock, blockCount;
ssize_t blocksRead, bytesDecoded;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
assert(module->outc->nframes % framesPerBlock == 0);
blockCount = module->outc->nframes / framesPerBlock;
/* Read the compressed frames. */
blocksRead = af_fread(module->inc->buf, d->blockAlign, blockCount, d->fh);
/* This condition would indicate that the file is bad. */
if (blocksRead < 0)
{
if (d->track->filemodhappy)
{
_af_error(AF_BAD_READ, "file missing data");
d->track->filemodhappy = AF_FALSE;
}
}
if (blocksRead < blockCount)
blockCount = blocksRead;
/* Decompress into module->outc. */
for (i=0; i<blockCount; i++)
{
bytesDecoded = ima_adpcm_decode_block(d,
(uint8_t *) module->inc->buf + i * d->blockAlign,
(int16_t *) module->outc->buf + i * d->samplesPerBlock);
nframes += framesPerBlock;
}
d->track->nextfframe += nframes;
if (blocksRead > 0)
d->track->fpos_next_frame += blocksRead * d->blockAlign;
assert(af_ftell(d->fh) == d->track->fpos_next_frame);
/*
If we got EOF from read, then we return the actual amount read.
Complain only if there should have been more frames in the file.
*/
if (d->track->totalfframes != -1 && nframes != frames2read)
{
/* Report error if we haven't already */
if (d->track->filemodhappy)
{
_af_error(AF_BAD_READ,
"file missing data -- read %d frames, should be %d",
d->track->nextfframe,
d->track->totalfframes);
d->track->filemodhappy = AF_FALSE;
}
}
module->outc->nframes = nframes;
} | 1 | [
"CWE-119"
] | audiofile | e8cf0095b3f319739f9aa1ab5a1aa52b76be8cdd | 135,392,062,269,304,940,000,000,000,000,000,000,000 | 66 | Fix decoding of multi-channel ADPCM audio files. |
Subsets and Splits