Dataset Viewer
code
stringlengths 0
23.9M
|
---|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Tenart <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/usb/chipidea.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ulpi.h>
#include "ci.h"
struct ci_hdrc_usb2_priv {
struct platform_device *ci_pdev;
struct clk *clk;
};
static const struct ci_hdrc_platform_data ci_default_pdata = {
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_platform_data ci_zynq_pdata = {
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_PHY_VBUS_CONTROL,
};
static const struct ci_hdrc_platform_data ci_zevio_pdata = {
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_REGS_SHARED | CI_HDRC_FORCE_FULLSPEED,
};
static const struct of_device_id ci_hdrc_usb2_of_match[] = {
{ .compatible = "chipidea,usb2" },
{ .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata },
{ .compatible = "lsi,zevio-usb", .data = &ci_zevio_pdata },
{ }
};
MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
static int ci_hdrc_usb2_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ci_hdrc_usb2_priv *priv;
struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev);
const struct ci_hdrc_platform_data *data;
int ret;
if (!ci_pdata) {
ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL);
if (!ci_pdata)
return -ENOMEM;
*ci_pdata = ci_default_pdata; /* struct copy */
}
data = device_get_match_data(&pdev->dev);
if (data)
/* struct copy */
*ci_pdata = *data;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "failed to enable the clock: %d\n", ret);
return ret;
}
ci_pdata->name = dev_name(dev);
priv->ci_pdev = ci_hdrc_add_device(dev, pdev->resource,
pdev->num_resources, ci_pdata);
if (IS_ERR(priv->ci_pdev)) {
ret = PTR_ERR(priv->ci_pdev);
if (ret != -EPROBE_DEFER)
dev_err(dev,
"failed to register ci_hdrc platform device: %d\n",
ret);
goto clk_err;
}
platform_set_drvdata(pdev, priv);
pm_runtime_no_callbacks(dev);
pm_runtime_enable(dev);
return 0;
clk_err:
clk_disable_unprepare(priv->clk);
return ret;
}
static void ci_hdrc_usb2_remove(struct platform_device *pdev)
{
struct ci_hdrc_usb2_priv *priv = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
ci_hdrc_remove_device(priv->ci_pdev);
clk_disable_unprepare(priv->clk);
}
static struct platform_driver ci_hdrc_usb2_driver = {
.probe = ci_hdrc_usb2_probe,
.remove = ci_hdrc_usb2_remove,
.driver = {
.name = "chipidea-usb2",
.of_match_table = ci_hdrc_usb2_of_match,
},
};
module_platform_driver(ci_hdrc_usb2_driver);
MODULE_DESCRIPTION("ChipIdea HDRC USB2 binding for ci13xxx");
MODULE_AUTHOR("Antoine Tenart <[email protected]>");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 NVIDIA Corporation
*/
#ifndef TEGRA_GR2D_H
#define TEGRA_GR2D_H
#define GR2D_UA_BASE_ADDR 0x1a
#define GR2D_VA_BASE_ADDR 0x1b
#define GR2D_PAT_BASE_ADDR 0x26
#define GR2D_DSTA_BASE_ADDR 0x2b
#define GR2D_DSTB_BASE_ADDR 0x2c
#define GR2D_DSTC_BASE_ADDR 0x2d
#define GR2D_SRCA_BASE_ADDR 0x31
#define GR2D_SRCB_BASE_ADDR 0x32
#define GR2D_PATBASE_ADDR 0x47
#define GR2D_SRC_BASE_ADDR_SB 0x48
#define GR2D_DSTA_BASE_ADDR_SB 0x49
#define GR2D_DSTB_BASE_ADDR_SB 0x4a
#define GR2D_UA_BASE_ADDR_SB 0x4b
#define GR2D_VA_BASE_ADDR_SB 0x4c
#define GR2D_NUM_REGS 0x4d
#endif
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright 2022 Broadcom Ltd.
*/
/dts-v1/;
#include "bcm63146.dtsi"
/ {
model = "Broadcom BCM963146 Reference Board";
compatible = "brcm,bcm963146", "brcm,bcm63146", "brcm,bcmbca";
aliases {
serial0 = &uart0;
};
chosen {
stdout-path = "serial0:115200n8";
};
memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x08000000>;
};
};
&uart0 {
status = "okay";
};
&hsspi {
status = "okay";
};
&nand_controller {
brcm,wp-not-connected;
status = "okay";
};
&nandcs {
nand-on-flash-bbt;
brcm,nand-ecc-use-strap;
};
|
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/ctx_sk_msg.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("sk_msg")
__description("valid access family in SK_MSG")
__success
__naked void access_family_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_family]); \
exit; \
" :
: __imm_const(sk_msg_md_family, offsetof(struct sk_msg_md, family))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access remote_ip4 in SK_MSG")
__success
__naked void remote_ip4_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip4]); \
exit; \
" :
: __imm_const(sk_msg_md_remote_ip4, offsetof(struct sk_msg_md, remote_ip4))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access local_ip4 in SK_MSG")
__success
__naked void local_ip4_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip4]); \
exit; \
" :
: __imm_const(sk_msg_md_local_ip4, offsetof(struct sk_msg_md, local_ip4))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access remote_port in SK_MSG")
__success
__naked void remote_port_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_port]); \
exit; \
" :
: __imm_const(sk_msg_md_remote_port, offsetof(struct sk_msg_md, remote_port))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access local_port in SK_MSG")
__success
__naked void local_port_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_local_port]); \
exit; \
" :
: __imm_const(sk_msg_md_local_port, offsetof(struct sk_msg_md, local_port))
: __clobber_all);
}
SEC("sk_skb")
__description("valid access remote_ip6 in SK_MSG")
__success
__naked void remote_ip6_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_0]); \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_1]); \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_2]); \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_3]); \
exit; \
" :
: __imm_const(sk_msg_md_remote_ip6_0, offsetof(struct sk_msg_md, remote_ip6[0])),
__imm_const(sk_msg_md_remote_ip6_1, offsetof(struct sk_msg_md, remote_ip6[1])),
__imm_const(sk_msg_md_remote_ip6_2, offsetof(struct sk_msg_md, remote_ip6[2])),
__imm_const(sk_msg_md_remote_ip6_3, offsetof(struct sk_msg_md, remote_ip6[3]))
: __clobber_all);
}
SEC("sk_skb")
__description("valid access local_ip6 in SK_MSG")
__success
__naked void local_ip6_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_0]); \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_1]); \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_2]); \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_3]); \
exit; \
" :
: __imm_const(sk_msg_md_local_ip6_0, offsetof(struct sk_msg_md, local_ip6[0])),
__imm_const(sk_msg_md_local_ip6_1, offsetof(struct sk_msg_md, local_ip6[1])),
__imm_const(sk_msg_md_local_ip6_2, offsetof(struct sk_msg_md, local_ip6[2])),
__imm_const(sk_msg_md_local_ip6_3, offsetof(struct sk_msg_md, local_ip6[3]))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access size in SK_MSG")
__success
__naked void access_size_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_size]); \
exit; \
" :
: __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
: __clobber_all);
}
SEC("sk_msg")
__description("invalid 64B read of size in SK_MSG")
__failure __msg("invalid bpf_context access")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void of_size_in_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_size]); \
exit; \
" :
: __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
: __clobber_all);
}
SEC("sk_msg")
__description("invalid read past end of SK_MSG")
__failure __msg("invalid bpf_context access")
__naked void past_end_of_sk_msg(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__imm_0]); \
exit; \
" :
: __imm_const(__imm_0, offsetof(struct sk_msg_md, size) + 4)
: __clobber_all);
}
SEC("sk_msg")
__description("invalid read offset in SK_MSG")
__failure __msg("invalid bpf_context access")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void read_offset_in_sk_msg(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__imm_0]); \
exit; \
" :
: __imm_const(__imm_0, offsetof(struct sk_msg_md, family) + 1)
: __clobber_all);
}
SEC("sk_msg")
__description("direct packet read for SK_MSG")
__success
__naked void packet_read_for_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
: __clobber_all);
}
SEC("sk_msg")
__description("direct packet write for SK_MSG")
__success
__naked void packet_write_for_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
*(u8*)(r2 + 0) = r2; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
: __clobber_all);
}
SEC("sk_msg")
__description("overlapping checks for direct packet access SK_MSG")
__success
__naked void direct_packet_access_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r1 = r2; \
r1 += 6; \
if r1 > r3 goto l0_%=; \
r0 = *(u16*)(r2 + 6); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
|
/* Copyright (C) 2005 - 2008 Jeff Dike <jdike@{linux.intel,addtoit}.com> */
/* Much of this ripped from drivers/char/hw_random.c, see there for other
* copyright.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*/
#include <linux/sched/signal.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <init.h>
#include <irq_kern.h>
#include <os.h>
/*
* core module information
*/
#define RNG_MODULE_NAME "hw_random"
/* Changed at init time, in the non-modular case, and at module load
* time, in the module case. Presumably, the module subsystem
* protects against a module being loaded twice at the same time.
*/
static int random_fd = -1;
static struct hwrng hwrng;
static DECLARE_COMPLETION(have_data);
static int rng_dev_read(struct hwrng *rng, void *buf, size_t max, bool block)
{
int ret;
for (;;) {
ret = os_read_file(random_fd, buf, max);
if (block && ret == -EAGAIN) {
add_sigio_fd(random_fd);
ret = wait_for_completion_killable(&have_data);
ignore_sigio_fd(random_fd);
deactivate_fd(random_fd, RANDOM_IRQ);
if (ret < 0)
break;
} else {
break;
}
}
return ret != -EAGAIN ? ret : 0;
}
static irqreturn_t random_interrupt(int irq, void *data)
{
complete(&have_data);
return IRQ_HANDLED;
}
/*
* rng_init - initialize RNG module
*/
static int __init rng_init (void)
{
int err;
err = os_open_file("/dev/random", of_read(OPENFLAGS()), 0);
if (err < 0)
goto out;
random_fd = err;
err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt,
0, "random", NULL);
if (err < 0)
goto err_out_cleanup_hw;
sigio_broken(random_fd);
hwrng.name = RNG_MODULE_NAME;
hwrng.read = rng_dev_read;
err = hwrng_register(&hwrng);
if (err) {
pr_err(RNG_MODULE_NAME " registering failed (%d)\n", err);
goto err_out_cleanup_hw;
}
out:
return err;
err_out_cleanup_hw:
os_close_file(random_fd);
random_fd = -1;
goto out;
}
/*
* rng_cleanup - shutdown RNG module
*/
static void cleanup(void)
{
free_irq_by_fd(random_fd);
os_close_file(random_fd);
}
static void __exit rng_cleanup(void)
{
hwrng_unregister(&hwrng);
os_close_file(random_fd);
}
module_init (rng_init);
module_exit (rng_cleanup);
__uml_exitcall(cleanup);
MODULE_DESCRIPTION("UML Host Random Number Generator (RNG) driver");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0
* Copyright 2021 NXP
*/
#ifndef _NET_DSA_TAG_MV88E6XXX_H
#define _NET_DSA_TAG_MV88E6XXX_H
#include <linux/if_vlan.h>
#define MV88E6XXX_VID_STANDALONE 0
#define MV88E6XXX_VID_BRIDGED (VLAN_N_VID - 1)
#endif
|
// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
static int fdt_nodename_eq_(const void *fdt, int offset,
const char *s, int len)
{
int olen;
const char *p = fdt_get_name(fdt, offset, &olen);
if (!p || olen < len)
/* short match */
return 0;
if (memcmp(p, s, len) != 0)
return 0;
if (p[len] == '\0')
return 1;
else if (!memchr(s, '@', len) && (p[len] == '@'))
return 1;
else
return 0;
}
const char *fdt_get_string(const void *fdt, int stroffset, int *lenp)
{
int32_t totalsize;
uint32_t absoffset;
size_t len;
int err;
const char *s, *n;
if (can_assume(VALID_INPUT)) {
s = (const char *)fdt + fdt_off_dt_strings(fdt) + stroffset;
if (lenp)
*lenp = strlen(s);
return s;
}
totalsize = fdt_ro_probe_(fdt);
err = totalsize;
if (totalsize < 0)
goto fail;
err = -FDT_ERR_BADOFFSET;
absoffset = stroffset + fdt_off_dt_strings(fdt);
if (absoffset >= (unsigned)totalsize)
goto fail;
len = totalsize - absoffset;
if (fdt_magic(fdt) == FDT_MAGIC) {
if (stroffset < 0)
goto fail;
if (can_assume(LATEST) || fdt_version(fdt) >= 17) {
if ((unsigned)stroffset >= fdt_size_dt_strings(fdt))
goto fail;
if ((fdt_size_dt_strings(fdt) - stroffset) < len)
len = fdt_size_dt_strings(fdt) - stroffset;
}
} else if (fdt_magic(fdt) == FDT_SW_MAGIC) {
unsigned int sw_stroffset = -stroffset;
if ((stroffset >= 0) ||
(sw_stroffset > fdt_size_dt_strings(fdt)))
goto fail;
if (sw_stroffset < len)
len = sw_stroffset;
} else {
err = -FDT_ERR_INTERNAL;
goto fail;
}
s = (const char *)fdt + absoffset;
n = memchr(s, '\0', len);
if (!n) {
/* missing terminating NULL */
err = -FDT_ERR_TRUNCATED;
goto fail;
}
if (lenp)
*lenp = n - s;
return s;
fail:
if (lenp)
*lenp = err;
return NULL;
}
const char *fdt_string(const void *fdt, int stroffset)
{
return fdt_get_string(fdt, stroffset, NULL);
}
static int fdt_string_eq_(const void *fdt, int stroffset,
const char *s, int len)
{
int slen;
const char *p = fdt_get_string(fdt, stroffset, &slen);
return p && (slen == len) && (memcmp(p, s, len) == 0);
}
int fdt_find_max_phandle(const void *fdt, uint32_t *phandle)
{
uint32_t max = 0;
int offset = -1;
while (true) {
uint32_t value;
offset = fdt_next_node(fdt, offset, NULL);
if (offset < 0) {
if (offset == -FDT_ERR_NOTFOUND)
break;
return offset;
}
value = fdt_get_phandle(fdt, offset);
if (value > max)
max = value;
}
if (phandle)
*phandle = max;
return 0;
}
int fdt_generate_phandle(const void *fdt, uint32_t *phandle)
{
uint32_t max;
int err;
err = fdt_find_max_phandle(fdt, &max);
if (err < 0)
return err;
if (max == FDT_MAX_PHANDLE)
return -FDT_ERR_NOPHANDLES;
if (phandle)
*phandle = max + 1;
return 0;
}
static const struct fdt_reserve_entry *fdt_mem_rsv(const void *fdt, int n)
{
unsigned int offset = n * sizeof(struct fdt_reserve_entry);
unsigned int absoffset = fdt_off_mem_rsvmap(fdt) + offset;
if (!can_assume(VALID_INPUT)) {
if (absoffset < fdt_off_mem_rsvmap(fdt))
return NULL;
if (absoffset > fdt_totalsize(fdt) -
sizeof(struct fdt_reserve_entry))
return NULL;
}
return fdt_mem_rsv_(fdt, n);
}
int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size)
{
const struct fdt_reserve_entry *re;
FDT_RO_PROBE(fdt);
re = fdt_mem_rsv(fdt, n);
if (!can_assume(VALID_INPUT) && !re)
return -FDT_ERR_BADOFFSET;
*address = fdt64_ld_(&re->address);
*size = fdt64_ld_(&re->size);
return 0;
}
int fdt_num_mem_rsv(const void *fdt)
{
int i;
const struct fdt_reserve_entry *re;
for (i = 0; (re = fdt_mem_rsv(fdt, i)) != NULL; i++) {
if (fdt64_ld_(&re->size) == 0)
return i;
}
return -FDT_ERR_TRUNCATED;
}
static int nextprop_(const void *fdt, int offset)
{
uint32_t tag;
int nextoffset;
do {
tag = fdt_next_tag(fdt, offset, &nextoffset);
switch (tag) {
case FDT_END:
if (nextoffset >= 0)
return -FDT_ERR_BADSTRUCTURE;
else
return nextoffset;
case FDT_PROP:
return offset;
}
offset = nextoffset;
} while (tag == FDT_NOP);
return -FDT_ERR_NOTFOUND;
}
int fdt_subnode_offset_namelen(const void *fdt, int offset,
const char *name, int namelen)
{
int depth;
FDT_RO_PROBE(fdt);
for (depth = 0;
(offset >= 0) && (depth >= 0);
offset = fdt_next_node(fdt, offset, &depth))
if ((depth == 1)
&& fdt_nodename_eq_(fdt, offset, name, namelen))
return offset;
if (depth < 0)
return -FDT_ERR_NOTFOUND;
return offset; /* error */
}
int fdt_subnode_offset(const void *fdt, int parentoffset,
const char *name)
{
return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name));
}
int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen)
{
const char *end = path + namelen;
const char *p = path;
int offset = 0;
FDT_RO_PROBE(fdt);
if (!can_assume(VALID_INPUT) && namelen <= 0)
return -FDT_ERR_BADPATH;
/* see if we have an alias */
if (*path != '/') {
const char *q = memchr(path, '/', end - p);
if (!q)
q = end;
p = fdt_get_alias_namelen(fdt, p, q - p);
if (!p)
return -FDT_ERR_BADPATH;
offset = fdt_path_offset(fdt, p);
p = q;
}
while (p < end) {
const char *q;
while (*p == '/') {
p++;
if (p == end)
return offset;
}
q = memchr(p, '/', end - p);
if (! q)
q = end;
offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p);
if (offset < 0)
return offset;
p = q;
}
return offset;
}
int fdt_path_offset(const void *fdt, const char *path)
{
return fdt_path_offset_namelen(fdt, path, strlen(path));
}
const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
{
const struct fdt_node_header *nh = fdt_offset_ptr_(fdt, nodeoffset);
const char *nameptr;
int err;
if (((err = fdt_ro_probe_(fdt)) < 0)
|| ((err = fdt_check_node_offset_(fdt, nodeoffset)) < 0))
goto fail;
nameptr = nh->name;
if (!can_assume(LATEST) && fdt_version(fdt) < 0x10) {
/*
* For old FDT versions, match the naming conventions of V16:
* give only the leaf name (after all /). The actual tree
* contents are loosely checked.
*/
const char *leaf;
leaf = strrchr(nameptr, '/');
if (leaf == NULL) {
err = -FDT_ERR_BADSTRUCTURE;
goto fail;
}
nameptr = leaf+1;
}
if (len)
*len = strlen(nameptr);
return nameptr;
fail:
if (len)
*len = err;
return NULL;
}
int fdt_first_property_offset(const void *fdt, int nodeoffset)
{
int offset;
if ((offset = fdt_check_node_offset_(fdt, nodeoffset)) < 0)
return offset;
return nextprop_(fdt, offset);
}
int fdt_next_property_offset(const void *fdt, int offset)
{
if ((offset = fdt_check_prop_offset_(fdt, offset)) < 0)
return offset;
return nextprop_(fdt, offset);
}
static const struct fdt_property *fdt_get_property_by_offset_(const void *fdt,
int offset,
int *lenp)
{
int err;
const struct fdt_property *prop;
if (!can_assume(VALID_INPUT) &&
(err = fdt_check_prop_offset_(fdt, offset)) < 0) {
if (lenp)
*lenp = err;
return NULL;
}
prop = fdt_offset_ptr_(fdt, offset);
if (lenp)
*lenp = fdt32_ld_(&prop->len);
return prop;
}
const struct fdt_property *fdt_get_property_by_offset(const void *fdt,
int offset,
int *lenp)
{
/* Prior to version 16, properties may need realignment
* and this API does not work. fdt_getprop_*() will, however. */
if (!can_assume(LATEST) && fdt_version(fdt) < 0x10) {
if (lenp)
*lenp = -FDT_ERR_BADVERSION;
return NULL;
}
return fdt_get_property_by_offset_(fdt, offset, lenp);
}
static const struct fdt_property *fdt_get_property_namelen_(const void *fdt,
int offset,
const char *name,
int namelen,
int *lenp,
int *poffset)
{
for (offset = fdt_first_property_offset(fdt, offset);
(offset >= 0);
(offset = fdt_next_property_offset(fdt, offset))) {
const struct fdt_property *prop;
prop = fdt_get_property_by_offset_(fdt, offset, lenp);
if (!can_assume(LIBFDT_FLAWLESS) && !prop) {
offset = -FDT_ERR_INTERNAL;
break;
}
if (fdt_string_eq_(fdt, fdt32_ld_(&prop->nameoff),
name, namelen)) {
if (poffset)
*poffset = offset;
return prop;
}
}
if (lenp)
*lenp = offset;
return NULL;
}
const struct fdt_property *fdt_get_property_namelen(const void *fdt,
int offset,
const char *name,
int namelen, int *lenp)
{
/* Prior to version 16, properties may need realignment
* and this API does not work. fdt_getprop_*() will, however. */
if (!can_assume(LATEST) && fdt_version(fdt) < 0x10) {
if (lenp)
*lenp = -FDT_ERR_BADVERSION;
return NULL;
}
return fdt_get_property_namelen_(fdt, offset, name, namelen, lenp,
NULL);
}
const struct fdt_property *fdt_get_property(const void *fdt,
int nodeoffset,
const char *name, int *lenp)
{
return fdt_get_property_namelen(fdt, nodeoffset, name,
strlen(name), lenp);
}
const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
const char *name, int namelen, int *lenp)
{
int poffset;
const struct fdt_property *prop;
prop = fdt_get_property_namelen_(fdt, nodeoffset, name, namelen, lenp,
&poffset);
if (!prop)
return NULL;
/* Handle realignment */
if (!can_assume(LATEST) && fdt_version(fdt) < 0x10 &&
(poffset + sizeof(*prop)) % 8 && fdt32_ld_(&prop->len) >= 8)
return prop->data + 4;
return prop->data;
}
const void *fdt_getprop_by_offset(const void *fdt, int offset,
const char **namep, int *lenp)
{
const struct fdt_property *prop;
prop = fdt_get_property_by_offset_(fdt, offset, lenp);
if (!prop)
return NULL;
if (namep) {
const char *name;
int namelen;
if (!can_assume(VALID_INPUT)) {
name = fdt_get_string(fdt, fdt32_ld_(&prop->nameoff),
&namelen);
*namep = name;
if (!name) {
if (lenp)
*lenp = namelen;
return NULL;
}
} else {
*namep = fdt_string(fdt, fdt32_ld_(&prop->nameoff));
}
}
/* Handle realignment */
if (!can_assume(LATEST) && fdt_version(fdt) < 0x10 &&
(offset + sizeof(*prop)) % 8 && fdt32_ld_(&prop->len) >= 8)
return prop->data + 4;
return prop->data;
}
const void *fdt_getprop(const void *fdt, int nodeoffset,
const char *name, int *lenp)
{
return fdt_getprop_namelen(fdt, nodeoffset, name, strlen(name), lenp);
}
uint32_t fdt_get_phandle(const void *fdt, int nodeoffset)
{
const fdt32_t *php;
int len;
/* FIXME: This is a bit sub-optimal, since we potentially scan
* over all the properties twice. */
php = fdt_getprop(fdt, nodeoffset, "phandle", &len);
if (!php || (len != sizeof(*php))) {
php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len);
if (!php || (len != sizeof(*php)))
return 0;
}
return fdt32_ld_(php);
}
static const void *fdt_path_getprop_namelen(const void *fdt, const char *path,
const char *propname, int propnamelen,
int *lenp)
{
int offset = fdt_path_offset(fdt, path);
if (offset < 0)
return NULL;
return fdt_getprop_namelen(fdt, offset, propname, propnamelen, lenp);
}
const char *fdt_get_alias_namelen(const void *fdt,
const char *name, int namelen)
{
int len;
const char *alias;
alias = fdt_path_getprop_namelen(fdt, "/aliases", name, namelen, &len);
if (!can_assume(VALID_DTB) &&
!(alias && len > 0 && alias[len - 1] == '\0' && *alias == '/'))
return NULL;
return alias;
}
const char *fdt_get_alias(const void *fdt, const char *name)
{
return fdt_get_alias_namelen(fdt, name, strlen(name));
}
const char *fdt_get_symbol_namelen(const void *fdt,
const char *name, int namelen)
{
return fdt_path_getprop_namelen(fdt, "/__symbols__", name, namelen, NULL);
}
const char *fdt_get_symbol(const void *fdt, const char *name)
{
return fdt_get_symbol_namelen(fdt, name, strlen(name));
}
int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
{
int pdepth = 0, p = 0;
int offset, depth, namelen;
const char *name;
FDT_RO_PROBE(fdt);
if (buflen < 2)
return -FDT_ERR_NOSPACE;
for (offset = 0, depth = 0;
(offset >= 0) && (offset <= nodeoffset);
offset = fdt_next_node(fdt, offset, &depth)) {
while (pdepth > depth) {
do {
p--;
} while (buf[p-1] != '/');
pdepth--;
}
if (pdepth >= depth) {
name = fdt_get_name(fdt, offset, &namelen);
if (!name)
return namelen;
if ((p + namelen + 1) <= buflen) {
memcpy(buf + p, name, namelen);
p += namelen;
buf[p++] = '/';
pdepth++;
}
}
if (offset == nodeoffset) {
if (pdepth < (depth + 1))
return -FDT_ERR_NOSPACE;
if (p > 1) /* special case so that root path is "/", not "" */
p--;
buf[p] = '\0';
return 0;
}
}
if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
return -FDT_ERR_BADOFFSET;
else if (offset == -FDT_ERR_BADOFFSET)
return -FDT_ERR_BADSTRUCTURE;
return offset; /* error from fdt_next_node() */
}
int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
int supernodedepth, int *nodedepth)
{
int offset, depth;
int supernodeoffset = -FDT_ERR_INTERNAL;
FDT_RO_PROBE(fdt);
if (supernodedepth < 0)
return -FDT_ERR_NOTFOUND;
for (offset = 0, depth = 0;
(offset >= 0) && (offset <= nodeoffset);
offset = fdt_next_node(fdt, offset, &depth)) {
if (depth == supernodedepth)
supernodeoffset = offset;
if (offset == nodeoffset) {
if (nodedepth)
*nodedepth = depth;
if (supernodedepth > depth)
return -FDT_ERR_NOTFOUND;
else
return supernodeoffset;
}
}
if (!can_assume(VALID_INPUT)) {
if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
return -FDT_ERR_BADOFFSET;
else if (offset == -FDT_ERR_BADOFFSET)
return -FDT_ERR_BADSTRUCTURE;
}
return offset; /* error from fdt_next_node() */
}
int fdt_node_depth(const void *fdt, int nodeoffset)
{
int nodedepth;
int err;
err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth);
if (err)
return (can_assume(LIBFDT_FLAWLESS) || err < 0) ? err :
-FDT_ERR_INTERNAL;
return nodedepth;
}
int fdt_parent_offset(const void *fdt, int nodeoffset)
{
int nodedepth = fdt_node_depth(fdt, nodeoffset);
if (nodedepth < 0)
return nodedepth;
return fdt_supernode_atdepth_offset(fdt, nodeoffset,
nodedepth - 1, NULL);
}
int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
const char *propname,
const void *propval, int proplen)
{
int offset;
const void *val;
int len;
FDT_RO_PROBE(fdt);
/* FIXME: The algorithm here is pretty horrible: we scan each
* property of a node in fdt_getprop(), then if that didn't
* find what we want, we scan over them again making our way
* to the next node. Still it's the easiest to implement
* approach; performance can come later. */
for (offset = fdt_next_node(fdt, startoffset, NULL);
offset >= 0;
offset = fdt_next_node(fdt, offset, NULL)) {
val = fdt_getprop(fdt, offset, propname, &len);
if (val && (len == proplen)
&& (memcmp(val, propval, len) == 0))
return offset;
}
return offset; /* error from fdt_next_node() */
}
int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
{
int offset;
if ((phandle == 0) || (phandle == ~0U))
return -FDT_ERR_BADPHANDLE;
FDT_RO_PROBE(fdt);
/* FIXME: The algorithm here is pretty horrible: we
* potentially scan each property of a node in
* fdt_get_phandle(), then if that didn't find what
* we want, we scan over them again making our way to the next
* node. Still it's the easiest to implement approach;
* performance can come later. */
for (offset = fdt_next_node(fdt, -1, NULL);
offset >= 0;
offset = fdt_next_node(fdt, offset, NULL)) {
if (fdt_get_phandle(fdt, offset) == phandle)
return offset;
}
return offset; /* error from fdt_next_node() */
}
int fdt_stringlist_contains(const char *strlist, int listlen, const char *str)
{
int len = strlen(str);
const char *p;
while (listlen >= len) {
if (memcmp(str, strlist, len+1) == 0)
return 1;
p = memchr(strlist, '\0', listlen);
if (!p)
return 0; /* malformed strlist.. */
listlen -= (p-strlist) + 1;
strlist = p + 1;
}
return 0;
}
int fdt_stringlist_count(const void *fdt, int nodeoffset, const char *property)
{
const char *list, *end;
int length, count = 0;
list = fdt_getprop(fdt, nodeoffset, property, &length);
if (!list)
return length;
end = list + length;
while (list < end) {
length = strnlen(list, end - list) + 1;
/* Abort if the last string isn't properly NUL-terminated. */
if (list + length > end)
return -FDT_ERR_BADVALUE;
list += length;
count++;
}
return count;
}
int fdt_stringlist_search(const void *fdt, int nodeoffset, const char *property,
const char *string)
{
int length, len, idx = 0;
const char *list, *end;
list = fdt_getprop(fdt, nodeoffset, property, &length);
if (!list)
return length;
len = strlen(string) + 1;
end = list + length;
while (list < end) {
length = strnlen(list, end - list) + 1;
/* Abort if the last string isn't properly NUL-terminated. */
if (list + length > end)
return -FDT_ERR_BADVALUE;
if (length == len && memcmp(list, string, length) == 0)
return idx;
list += length;
idx++;
}
return -FDT_ERR_NOTFOUND;
}
const char *fdt_stringlist_get(const void *fdt, int nodeoffset,
const char *property, int idx,
int *lenp)
{
const char *list, *end;
int length;
list = fdt_getprop(fdt, nodeoffset, property, &length);
if (!list) {
if (lenp)
*lenp = length;
return NULL;
}
end = list + length;
while (list < end) {
length = strnlen(list, end - list) + 1;
/* Abort if the last string isn't properly NUL-terminated. */
if (list + length > end) {
if (lenp)
*lenp = -FDT_ERR_BADVALUE;
return NULL;
}
if (idx == 0) {
if (lenp)
*lenp = length - 1;
return list;
}
list += length;
idx--;
}
if (lenp)
*lenp = -FDT_ERR_NOTFOUND;
return NULL;
}
int fdt_node_check_compatible(const void *fdt, int nodeoffset,
const char *compatible)
{
const void *prop;
int len;
prop = fdt_getprop(fdt, nodeoffset, "compatible", &len);
if (!prop)
return len;
return !fdt_stringlist_contains(prop, len, compatible);
}
int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
const char *compatible)
{
int offset, err;
FDT_RO_PROBE(fdt);
/* FIXME: The algorithm here is pretty horrible: we scan each
* property of a node in fdt_node_check_compatible(), then if
* that didn't find what we want, we scan over them again
* making our way to the next node. Still it's the easiest to
* implement approach; performance can come later. */
for (offset = fdt_next_node(fdt, startoffset, NULL);
offset >= 0;
offset = fdt_next_node(fdt, offset, NULL)) {
err = fdt_node_check_compatible(fdt, offset, compatible);
if ((err < 0) && (err != -FDT_ERR_NOTFOUND))
return err;
else if (err == 0)
return offset;
}
return offset; /* error from fdt_next_node() */
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Coda multi-standard codec IP - H.264 helper functions
*
* Copyright (C) 2012 Vista Silicon S.L.
* Javier Martin, <[email protected]>
* Xavier Duret
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/videodev2.h>
#include "coda.h"
static const u8 coda_filler_size[8] = { 0, 7, 14, 13, 12, 11, 10, 9 };
static const u8 *coda_find_nal_header(const u8 *buf, const u8 *end)
{
u32 val = 0xffffffff;
do {
val = val << 8 | *buf++;
if (buf >= end)
return NULL;
} while (val != 0x00000001);
return buf;
}
int coda_sps_parse_profile(struct coda_ctx *ctx, struct vb2_buffer *vb)
{
const u8 *buf = vb2_plane_vaddr(vb, 0);
const u8 *end = buf + vb2_get_plane_payload(vb, 0);
/* Find SPS header */
do {
buf = coda_find_nal_header(buf, end);
if (!buf)
return -EINVAL;
} while ((*buf++ & 0x1f) != 0x7);
ctx->params.h264_profile_idc = buf[0];
ctx->params.h264_level_idc = buf[2];
return 0;
}
int coda_h264_filler_nal(int size, char *p)
{
if (size < 6)
return -EINVAL;
p[0] = 0x00;
p[1] = 0x00;
p[2] = 0x00;
p[3] = 0x01;
p[4] = 0x0c;
memset(p + 5, 0xff, size - 6);
/* Add rbsp stop bit and trailing at the end */
p[size - 1] = 0x80;
return 0;
}
int coda_h264_padding(int size, char *p)
{
int nal_size;
int diff;
diff = size - (size & ~0x7);
if (diff == 0)
return 0;
nal_size = coda_filler_size[diff];
coda_h264_filler_nal(nal_size, p);
return nal_size;
}
int coda_h264_profile(int profile_idc)
{
switch (profile_idc) {
case 66: return V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
case 77: return V4L2_MPEG_VIDEO_H264_PROFILE_MAIN;
case 88: return V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED;
case 100: return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
default: return -EINVAL;
}
}
int coda_h264_level(int level_idc)
{
switch (level_idc) {
case 10: return V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
case 9: return V4L2_MPEG_VIDEO_H264_LEVEL_1B;
case 11: return V4L2_MPEG_VIDEO_H264_LEVEL_1_1;
case 12: return V4L2_MPEG_VIDEO_H264_LEVEL_1_2;
case 13: return V4L2_MPEG_VIDEO_H264_LEVEL_1_3;
case 20: return V4L2_MPEG_VIDEO_H264_LEVEL_2_0;
case 21: return V4L2_MPEG_VIDEO_H264_LEVEL_2_1;
case 22: return V4L2_MPEG_VIDEO_H264_LEVEL_2_2;
case 30: return V4L2_MPEG_VIDEO_H264_LEVEL_3_0;
case 31: return V4L2_MPEG_VIDEO_H264_LEVEL_3_1;
case 32: return V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
case 40: return V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
case 41: return V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
case 42: return V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
case 50: return V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
case 51: return V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
default: return -EINVAL;
}
}
struct rbsp {
char *buf;
int size;
int pos;
};
static inline int rbsp_read_bit(struct rbsp *rbsp)
{
int shift = 7 - (rbsp->pos % 8);
int ofs = rbsp->pos++ / 8;
if (ofs >= rbsp->size)
return -EINVAL;
return (rbsp->buf[ofs] >> shift) & 1;
}
static inline int rbsp_write_bit(struct rbsp *rbsp, int bit)
{
int shift = 7 - (rbsp->pos % 8);
int ofs = rbsp->pos++ / 8;
if (ofs >= rbsp->size)
return -EINVAL;
rbsp->buf[ofs] &= ~(1 << shift);
rbsp->buf[ofs] |= bit << shift;
return 0;
}
static inline int rbsp_read_bits(struct rbsp *rbsp, int num, int *val)
{
int i, ret;
int tmp = 0;
if (num > 32)
return -EINVAL;
for (i = 0; i < num; i++) {
ret = rbsp_read_bit(rbsp);
if (ret < 0)
return ret;
tmp |= ret << (num - i - 1);
}
if (val)
*val = tmp;
return 0;
}
static int rbsp_write_bits(struct rbsp *rbsp, int num, int value)
{
int ret;
while (num--) {
ret = rbsp_write_bit(rbsp, (value >> num) & 1);
if (ret)
return ret;
}
return 0;
}
static int rbsp_read_uev(struct rbsp *rbsp, unsigned int *val)
{
int leading_zero_bits = 0;
unsigned int tmp = 0;
int ret;
while ((ret = rbsp_read_bit(rbsp)) == 0)
leading_zero_bits++;
if (ret < 0)
return ret;
if (leading_zero_bits > 0) {
ret = rbsp_read_bits(rbsp, leading_zero_bits, &tmp);
if (ret)
return ret;
}
if (val)
*val = (1 << leading_zero_bits) - 1 + tmp;
return 0;
}
static int rbsp_write_uev(struct rbsp *rbsp, unsigned int value)
{
int i;
int ret;
int tmp = value + 1;
int leading_zero_bits = fls(tmp) - 1;
for (i = 0; i < leading_zero_bits; i++) {
ret = rbsp_write_bit(rbsp, 0);
if (ret)
return ret;
}
return rbsp_write_bits(rbsp, leading_zero_bits + 1, tmp);
}
static int rbsp_read_sev(struct rbsp *rbsp, int *val)
{
unsigned int tmp;
int ret;
ret = rbsp_read_uev(rbsp, &tmp);
if (ret)
return ret;
if (val) {
if (tmp & 1)
*val = (tmp + 1) / 2;
else
*val = -(tmp / 2);
}
return 0;
}
/**
* coda_h264_sps_fixup - fixes frame cropping values in h.264 SPS
* @ctx: encoder context
* @width: visible width
* @height: visible height
* @buf: buffer containing h.264 SPS RBSP, starting with NAL header
* @size: modified RBSP size return value
* @max_size: available size in buf
*
* Rewrites the frame cropping values in an h.264 SPS RBSP correctly for the
* given visible width and height.
*/
int coda_h264_sps_fixup(struct coda_ctx *ctx, int width, int height, char *buf,
int *size, int max_size)
{
int profile_idc;
unsigned int pic_order_cnt_type;
int pic_width_in_mbs_minus1, pic_height_in_map_units_minus1;
int frame_mbs_only_flag, frame_cropping_flag;
int vui_parameters_present_flag;
unsigned int crop_right, crop_bottom;
struct rbsp sps;
int pos;
int ret;
if (*size < 8 || *size >= max_size)
return -EINVAL;
sps.buf = buf + 5; /* Skip NAL header */
sps.size = *size - 5;
profile_idc = sps.buf[0];
/* Skip constraint_set[0-5]_flag, reserved_zero_2bits */
/* Skip level_idc */
sps.pos = 24;
/* seq_parameter_set_id */
ret = rbsp_read_uev(&sps, NULL);
if (ret)
return ret;
if (profile_idc == 100 || profile_idc == 110 || profile_idc == 122 ||
profile_idc == 244 || profile_idc == 44 || profile_idc == 83 ||
profile_idc == 86 || profile_idc == 118 || profile_idc == 128 ||
profile_idc == 138 || profile_idc == 139 || profile_idc == 134 ||
profile_idc == 135) {
dev_err(ctx->fh.vdev->dev_parent,
"%s: Handling profile_idc %d not implemented\n",
__func__, profile_idc);
return -EINVAL;
}
/* log2_max_frame_num_minus4 */
ret = rbsp_read_uev(&sps, NULL);
if (ret)
return ret;
ret = rbsp_read_uev(&sps, &pic_order_cnt_type);
if (ret)
return ret;
if (pic_order_cnt_type == 0) {
/* log2_max_pic_order_cnt_lsb_minus4 */
ret = rbsp_read_uev(&sps, NULL);
if (ret)
return ret;
} else if (pic_order_cnt_type == 1) {
unsigned int i, num_ref_frames_in_pic_order_cnt_cycle;
/* delta_pic_order_always_zero_flag */
ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
/* offset_for_non_ref_pic */
ret = rbsp_read_sev(&sps, NULL);
if (ret)
return ret;
/* offset_for_top_to_bottom_field */
ret = rbsp_read_sev(&sps, NULL);
if (ret)
return ret;
ret = rbsp_read_uev(&sps,
&num_ref_frames_in_pic_order_cnt_cycle);
if (ret)
return ret;
for (i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++) {
/* offset_for_ref_frame */
ret = rbsp_read_sev(&sps, NULL);
if (ret)
return ret;
}
}
/* max_num_ref_frames */
ret = rbsp_read_uev(&sps, NULL);
if (ret)
return ret;
/* gaps_in_frame_num_value_allowed_flag */
ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
ret = rbsp_read_uev(&sps, &pic_width_in_mbs_minus1);
if (ret)
return ret;
ret = rbsp_read_uev(&sps, &pic_height_in_map_units_minus1);
if (ret)
return ret;
frame_mbs_only_flag = ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
if (!frame_mbs_only_flag) {
/* mb_adaptive_frame_field_flag */
ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
}
/* direct_8x8_inference_flag */
ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
/* Mark position of the frame cropping flag */
pos = sps.pos;
frame_cropping_flag = ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
if (frame_cropping_flag) {
unsigned int crop_left, crop_top;
ret = rbsp_read_uev(&sps, &crop_left);
if (ret)
return ret;
ret = rbsp_read_uev(&sps, &crop_right);
if (ret)
return ret;
ret = rbsp_read_uev(&sps, &crop_top);
if (ret)
return ret;
ret = rbsp_read_uev(&sps, &crop_bottom);
if (ret)
return ret;
}
vui_parameters_present_flag = ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
if (vui_parameters_present_flag) {
dev_err(ctx->fh.vdev->dev_parent,
"%s: Handling vui_parameters not implemented\n",
__func__);
return -EINVAL;
}
crop_right = round_up(width, 16) - width;
crop_bottom = round_up(height, 16) - height;
crop_right /= 2;
if (frame_mbs_only_flag)
crop_bottom /= 2;
else
crop_bottom /= 4;
sps.size = max_size - 5;
sps.pos = pos;
frame_cropping_flag = 1;
ret = rbsp_write_bit(&sps, frame_cropping_flag);
if (ret)
return ret;
ret = rbsp_write_uev(&sps, 0); /* crop_left */
if (ret)
return ret;
ret = rbsp_write_uev(&sps, crop_right);
if (ret)
return ret;
ret = rbsp_write_uev(&sps, 0); /* crop_top */
if (ret)
return ret;
ret = rbsp_write_uev(&sps, crop_bottom);
if (ret)
return ret;
ret = rbsp_write_bit(&sps, 0); /* vui_parameters_present_flag */
if (ret)
return ret;
ret = rbsp_write_bit(&sps, 1);
if (ret)
return ret;
*size = 5 + DIV_ROUND_UP(sps.pos, 8);
return 0;
}
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/if_link.h>
#include <test_progs.h>
#define IFINDEX_LO 1
void serial_test_xdp_info(void)
{
__u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id;
const char *file = "./xdp_dummy.bpf.o";
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
struct bpf_prog_info info = {};
struct bpf_object *obj;
int err, prog_fd;
/* Get prog_id for XDP_ATTACHED_NONE mode */
err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id);
if (CHECK(err, "get_xdp_none", "errno=%d\n", errno))
return;
if (CHECK(prog_id, "prog_id_none", "unexpected prog_id=%u\n", prog_id))
return;
err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id);
if (CHECK(err, "get_xdp_none_skb", "errno=%d\n", errno))
return;
if (CHECK(prog_id, "prog_id_none_skb", "unexpected prog_id=%u\n",
prog_id))
return;
/* Setup prog */
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
if (CHECK(err, "get_prog_info", "errno=%d\n", errno))
goto out_close;
err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);
if (CHECK(err, "set_xdp_skb", "errno=%d\n", errno))
goto out_close;
/* Get prog_id for single prog mode */
err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id);
if (CHECK(err, "get_xdp", "errno=%d\n", errno))
goto out;
if (CHECK(prog_id != info.id, "prog_id", "prog_id not available\n"))
goto out;
err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id);
if (CHECK(err, "get_xdp_skb", "errno=%d\n", errno))
goto out;
if (CHECK(prog_id != info.id, "prog_id_skb", "prog_id not available\n"))
goto out;
err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_DRV_MODE, &prog_id);
if (CHECK(err, "get_xdp_drv", "errno=%d\n", errno))
goto out;
if (CHECK(prog_id, "prog_id_drv", "unexpected prog_id=%u\n", prog_id))
goto out;
/* Check xdp features supported by lo device */
opts.feature_flags = ~0;
err = bpf_xdp_query(IFINDEX_LO, XDP_FLAGS_DRV_MODE, &opts);
if (!ASSERT_OK(err, "bpf_xdp_query"))
goto out;
ASSERT_EQ(opts.feature_flags, 0, "opts.feature_flags");
out:
bpf_xdp_detach(IFINDEX_LO, 0, NULL);
out_close:
bpf_object__close(obj);
}
|
// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
/*
* Copyright 2020-2021 TQ-Systems GmbH
*/
/dts-v1/;
#include "imx8mn-tqma8mqnl.dtsi"
#include "mba8mx.dtsi"
/ {
model = "TQ-Systems GmbH i.MX8MN TQMa8MxNL on MBa8Mx";
compatible = "tq,imx8mn-tqma8mqnl-mba8mx", "tq,imx8mn-tqma8mqnl", "fsl,imx8mn";
chassis-type = "embedded";
aliases {
eeprom0 = &eeprom3;
mmc0 = &usdhc3;
mmc1 = &usdhc2;
mmc2 = &usdhc1;
rtc0 = &pcf85063;
rtc1 = &snvs_rtc;
};
reg_usdhc2_vmmc: regulator-vmmc {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
regulator-name = "VSD_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
enable-active-high;
startup-delay-us = <100>;
off-on-delay-us = <12000>;
};
};
/* Located on TQMa8MxML-ADAP */
&gpio2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0hub_sel>;
sel_usb_hub_hog: sel-usb-hub-hog {
gpio-hog;
gpios = <1 GPIO_ACTIVE_HIGH>;
output-high;
};
};
&i2c1 {
expander2: gpio@27 {
compatible = "nxp,pca9555";
reg = <0x27>;
gpio-controller;
#gpio-cells = <2>;
vcc-supply = <®_vcc_3v3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_expander2>;
interrupt-parent = <&gpio1>;
interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <2>;
};
};
&mipi_dsi {
samsung,burst-clock-frequency = <891000000>;
samsung,esc-clock-frequency = <20000000>;
};
&sai3 {
assigned-clocks = <&clk IMX8MN_CLK_SAI3>;
assigned-clock-parents = <&clk IMX8MN_AUDIO_PLL1_OUT>;
clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3", "pll8k", "pll11k";
clocks = <&clk IMX8MN_CLK_SAI3_IPG>, <&clk IMX8MN_CLK_DUMMY>,
<&clk IMX8MN_CLK_SAI3_ROOT>, <&clk IMX8MN_CLK_DUMMY>,
<&clk IMX8MN_CLK_DUMMY>, <&clk IMX8MN_AUDIO_PLL1_OUT>,
<&clk IMX8MN_AUDIO_PLL2_OUT>;
};
&tlv320aic3x04 {
clock-names = "mclk";
clocks = <&clk IMX8MN_CLK_SAI3_ROOT>;
};
&usbotg1 {
dr_mode = "host";
disable-over-current;
power-active-high;
status = "okay";
};
&iomuxc {
pinctrl_ecspi1: ecspi1grp {
fsl,pins = <MX8MN_IOMUXC_ECSPI1_SCLK_ECSPI1_SCLK 0x00000146>,
<MX8MN_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x00000146>,
<MX8MN_IOMUXC_ECSPI1_MISO_ECSPI1_MISO 0x00000146>,
<MX8MN_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x00000146>;
};
pinctrl_ecspi2: ecspi2grp {
fsl,pins = <MX8MN_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x00000146>,
<MX8MN_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x00000146>,
<MX8MN_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x00000146>,
<MX8MN_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x00000146>;
};
pinctrl_expander2: expander2grp {
fsl,pins = <MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x94>;
};
pinctrl_fec1: fec1grp {
fsl,pins = <MX8MN_IOMUXC_ENET_MDC_ENET1_MDC 0x40000002>,
<MX8MN_IOMUXC_ENET_MDIO_ENET1_MDIO 0x40000002>,
<MX8MN_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x14>,
<MX8MN_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x14>,
<MX8MN_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x14>,
<MX8MN_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x14>,
<MX8MN_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x90>,
<MX8MN_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x90>,
<MX8MN_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x90>,
<MX8MN_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x90>,
<MX8MN_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x14>,
<MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x90>,
<MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x90>,
<MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x14>;
};
pinctrl_gpiobutton: gpiobuttongrp {
fsl,pins = <MX8MN_IOMUXC_GPIO1_IO05_GPIO1_IO5 0x84>,
<MX8MN_IOMUXC_GPIO1_IO07_GPIO1_IO7 0x84>,
<MX8MN_IOMUXC_SD1_CLK_GPIO2_IO0 0x84>;
};
pinctrl_gpioled: gpioledgrp {
fsl,pins = <MX8MN_IOMUXC_GPIO1_IO00_GPIO1_IO0 0x84>,
<MX8MN_IOMUXC_NAND_DQS_GPIO3_IO14 0x84>;
};
pinctrl_i2c2: i2c2grp {
fsl,pins = <MX8MN_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001C4>,
<MX8MN_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001C4>;
};
pinctrl_i2c2_gpio: i2c2gpiogrp {
fsl,pins = <MX8MN_IOMUXC_I2C2_SCL_GPIO5_IO16 0x400001C4>,
<MX8MN_IOMUXC_I2C2_SDA_GPIO5_IO17 0x400001C4>;
};
pinctrl_i2c3: i2c3grp {
fsl,pins = <MX8MN_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001C4>,
<MX8MN_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001C4>;
};
pinctrl_i2c3_gpio: i2c3gpiogrp {
fsl,pins = <MX8MN_IOMUXC_I2C3_SCL_GPIO5_IO18 0x400001C4>,
<MX8MN_IOMUXC_I2C3_SDA_GPIO5_IO19 0x400001C4>;
};
pinctrl_pwm3: pwm3grp {
fsl,pins = <MX8MN_IOMUXC_GPIO1_IO14_PWM3_OUT 0x14>;
};
pinctrl_pwm4: pwm4grp {
fsl,pins = <MX8MN_IOMUXC_GPIO1_IO15_PWM4_OUT 0x14>;
};
pinctrl_sai3: sai3grp {
fsl,pins = <MX8MN_IOMUXC_SAI3_MCLK_SAI3_MCLK 0x94>,
<MX8MN_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x94>,
<MX8MN_IOMUXC_SAI3_RXFS_SAI3_RX_SYNC 0x94>,
<MX8MN_IOMUXC_SAI3_RXD_SAI3_RX_DATA0 0x94>,
<MX8MN_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x94>,
<MX8MN_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0x94>,
<MX8MN_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0x94>;
};
pinctrl_uart1: uart1grp {
fsl,pins = <MX8MN_IOMUXC_UART1_RXD_UART1_DCE_RX 0x16>,
<MX8MN_IOMUXC_UART1_TXD_UART1_DCE_TX 0x16>;
};
pinctrl_uart2: uart2grp {
fsl,pins = <MX8MN_IOMUXC_UART2_RXD_UART2_DCE_RX 0x16>,
<MX8MN_IOMUXC_UART2_TXD_UART2_DCE_TX 0x16>;
};
pinctrl_uart3: uart3grp {
fsl,pins = <MX8MN_IOMUXC_UART3_RXD_UART3_DCE_RX 0x16>,
<MX8MN_IOMUXC_UART3_TXD_UART3_DCE_TX 0x16>;
};
pinctrl_uart4: uart4grp {
fsl,pins = <MX8MN_IOMUXC_UART4_RXD_UART4_DCE_RX 0x16>,
<MX8MN_IOMUXC_UART4_TXD_UART4_DCE_TX 0x16>;
};
pinctrl_usb0hub_sel: usb0hub-selgrp {
/* SEL_USB_HUB_B */
fsl,pins = <MX8MN_IOMUXC_SD1_CMD_GPIO2_IO1 0x84>;
};
pinctrl_usbotg: usbotggrp {
fsl,pins = <MX8MN_IOMUXC_GPIO1_IO12_USB1_OTG_PWR 0x84>,
<MX8MN_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x84>;
};
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <MX8MN_IOMUXC_SD2_CLK_USDHC2_CLK 0x1d4>,
<MX8MN_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4>,
<MX8MN_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x84>;
};
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins = <MX8MN_IOMUXC_SD2_CLK_USDHC2_CLK 0x1d4>,
<MX8MN_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4>,
<MX8MN_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x84>;
};
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins = <MX8MN_IOMUXC_SD2_CLK_USDHC2_CLK 0x1d4>,
<MX8MN_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4>,
<MX8MN_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4>,
<MX8MN_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x84>;
};
pinctrl_usdhc2_gpio: usdhc2-gpiogrp {
fsl,pins = <MX8MN_IOMUXC_SD2_CD_B_GPIO2_IO12 0x84>;
};
};
|
// SPDX-License-Identifier: GPL-2.0
/*
* r8a7778 Core CPG Clocks
*
* Copyright (C) 2014 Ulrich Hecht
*/
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/soc/renesas/rcar-rst.h>
/* PLL multipliers per bits 11, 12, and 18 of MODEMR */
static const struct {
unsigned long plla_mult;
unsigned long pllb_mult;
} r8a7778_rates[] __initconst = {
[0] = { 21, 21 },
[1] = { 24, 24 },
[2] = { 28, 28 },
[3] = { 32, 32 },
[5] = { 24, 21 },
[6] = { 28, 21 },
[7] = { 32, 24 },
};
/* Clock dividers per bits 1 and 2 of MODEMR */
static const struct {
const char *name;
unsigned int div[4];
} r8a7778_divs[6] __initconst = {
{ "b", { 12, 12, 16, 18 } },
{ "out", { 12, 12, 16, 18 } },
{ "p", { 16, 12, 16, 12 } },
{ "s", { 4, 3, 4, 3 } },
{ "s1", { 8, 6, 8, 6 } },
};
static u32 cpg_mode_rates __initdata;
static u32 cpg_mode_divs __initdata;
static struct clk * __init
r8a7778_cpg_register_clock(struct device_node *np, const char *name)
{
if (!strcmp(name, "plla")) {
return clk_register_fixed_factor(NULL, "plla",
of_clk_get_parent_name(np, 0), 0,
r8a7778_rates[cpg_mode_rates].plla_mult, 1);
} else if (!strcmp(name, "pllb")) {
return clk_register_fixed_factor(NULL, "pllb",
of_clk_get_parent_name(np, 0), 0,
r8a7778_rates[cpg_mode_rates].pllb_mult, 1);
} else {
unsigned int i;
for (i = 0; i < ARRAY_SIZE(r8a7778_divs); i++) {
if (!strcmp(name, r8a7778_divs[i].name)) {
return clk_register_fixed_factor(NULL,
r8a7778_divs[i].name,
"plla", 0, 1,
r8a7778_divs[i].div[cpg_mode_divs]);
}
}
}
return ERR_PTR(-EINVAL);
}
static void __init r8a7778_cpg_clocks_init(struct device_node *np)
{
struct clk_onecell_data *data;
struct clk **clks;
unsigned int i;
int num_clks;
u32 mode;
if (rcar_rst_read_mode_pins(&mode))
return;
BUG_ON(!(mode & BIT(19)));
cpg_mode_rates = (!!(mode & BIT(18)) << 2) |
(!!(mode & BIT(12)) << 1) |
(!!(mode & BIT(11)));
cpg_mode_divs = (!!(mode & BIT(2)) << 1) |
(!!(mode & BIT(1)));
num_clks = of_property_count_strings(np, "clock-output-names");
if (num_clks < 0) {
pr_err("%s: failed to count clocks\n", __func__);
return;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
if (data == NULL || clks == NULL) {
/* We're leaking memory on purpose, there's no point in cleaning
* up as the system won't boot anyway.
*/
return;
}
data->clks = clks;
data->clk_num = num_clks;
for (i = 0; i < num_clks; ++i) {
const char *name;
struct clk *clk;
of_property_read_string_index(np, "clock-output-names", i,
&name);
clk = r8a7778_cpg_register_clock(np, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
else
data->clks[i] = clk;
}
of_clk_add_provider(np, of_clk_src_onecell_get, data);
cpg_mstp_add_clk_domain(np);
}
CLK_OF_DECLARE(r8a7778_cpg_clks, "renesas,r8a7778-cpg-clocks",
r8a7778_cpg_clocks_init);
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_flavors {
int a;
int b;
int c;
};
/* local flavor with reversed layout */
struct core_reloc_flavors___reversed {
int c;
int b;
int a;
};
/* local flavor with nested/overlapping layout */
struct core_reloc_flavors___weird {
struct {
int b;
};
/* a and c overlap in local flavor, but this should still work
* correctly with target original flavor
*/
union {
int a;
int c;
};
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_flavors(void *ctx)
{
struct core_reloc_flavors *in_orig = (void *)&data.in;
struct core_reloc_flavors___reversed *in_rev = (void *)&data.in;
struct core_reloc_flavors___weird *in_weird = (void *)&data.in;
struct core_reloc_flavors *out = (void *)&data.out;
/* read a using weird layout */
if (CORE_READ(&out->a, &in_weird->a))
return 1;
/* read b using reversed layout */
if (CORE_READ(&out->b, &in_rev->b))
return 1;
/* read c using original layout */
if (CORE_READ(&out->c, &in_orig->c))
return 1;
return 0;
}
|
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <bpf/bpf_endian.h>
#include "sock_destroy_prog.skel.h"
#include "sock_destroy_prog_fail.skel.h"
#include "network_helpers.h"
#define TEST_NS "sock_destroy_netns"
static void start_iter_sockets(struct bpf_program *prog)
{
struct bpf_link *link;
char buf[50] = {};
int iter_fd, len;
link = bpf_program__attach_iter(prog, NULL);
if (!ASSERT_OK_PTR(link, "attach_iter"))
return;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
ASSERT_GE(len, 0, "read");
close(iter_fd);
free_link:
bpf_link__destroy(link);
}
static void test_tcp_client(struct sock_destroy_prog *skel)
{
int serv = -1, clien = -1, accept_serv = -1, n;
serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_GE(serv, 0, "start_server"))
goto cleanup;
clien = connect_to_fd(serv, 0);
if (!ASSERT_GE(clien, 0, "connect_to_fd"))
goto cleanup;
accept_serv = accept(serv, NULL, NULL);
if (!ASSERT_GE(accept_serv, 0, "serv accept"))
goto cleanup;
n = send(clien, "t", 1, 0);
if (!ASSERT_EQ(n, 1, "client send"))
goto cleanup;
/* Run iterator program that destroys connected client sockets. */
start_iter_sockets(skel->progs.iter_tcp6_client);
n = send(clien, "t", 1, 0);
if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
goto cleanup;
ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket");
cleanup:
if (clien != -1)
close(clien);
if (accept_serv != -1)
close(accept_serv);
if (serv != -1)
close(serv);
}
static void test_tcp_server(struct sock_destroy_prog *skel)
{
int serv = -1, clien = -1, accept_serv = -1, n, serv_port;
serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_GE(serv, 0, "start_server"))
goto cleanup;
serv_port = get_socket_local_port(serv);
if (!ASSERT_GE(serv_port, 0, "get_sock_local_port"))
goto cleanup;
skel->bss->serv_port = (__be16) serv_port;
clien = connect_to_fd(serv, 0);
if (!ASSERT_GE(clien, 0, "connect_to_fd"))
goto cleanup;
accept_serv = accept(serv, NULL, NULL);
if (!ASSERT_GE(accept_serv, 0, "serv accept"))
goto cleanup;
n = send(clien, "t", 1, 0);
if (!ASSERT_EQ(n, 1, "client send"))
goto cleanup;
/* Run iterator program that destroys server sockets. */
start_iter_sockets(skel->progs.iter_tcp6_server);
n = send(clien, "t", 1, 0);
if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
goto cleanup;
ASSERT_EQ(errno, ECONNRESET, "error code on destroyed socket");
cleanup:
if (clien != -1)
close(clien);
if (accept_serv != -1)
close(accept_serv);
if (serv != -1)
close(serv);
}
static void test_udp_client(struct sock_destroy_prog *skel)
{
int serv = -1, clien = -1, n = 0;
serv = start_server(AF_INET6, SOCK_DGRAM, NULL, 0, 0);
if (!ASSERT_GE(serv, 0, "start_server"))
goto cleanup;
clien = connect_to_fd(serv, 0);
if (!ASSERT_GE(clien, 0, "connect_to_fd"))
goto cleanup;
n = send(clien, "t", 1, 0);
if (!ASSERT_EQ(n, 1, "client send"))
goto cleanup;
/* Run iterator program that destroys sockets. */
start_iter_sockets(skel->progs.iter_udp6_client);
n = send(clien, "t", 1, 0);
if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
goto cleanup;
/* UDP sockets have an overriding error code after they are disconnected,
* so we don't check for ECONNABORTED error code.
*/
cleanup:
if (clien != -1)
close(clien);
if (serv != -1)
close(serv);
}
static void test_udp_server(struct sock_destroy_prog *skel)
{
int *listen_fds = NULL, n, i, serv_port;
unsigned int num_listens = 5;
char buf[1];
/* Start reuseport servers. */
listen_fds = start_reuseport_server(AF_INET6, SOCK_DGRAM,
"::1", 0, 0, num_listens);
if (!ASSERT_OK_PTR(listen_fds, "start_reuseport_server"))
goto cleanup;
serv_port = get_socket_local_port(listen_fds[0]);
if (!ASSERT_GE(serv_port, 0, "get_sock_local_port"))
goto cleanup;
skel->bss->serv_port = (__be16) serv_port;
/* Run iterator program that destroys server sockets. */
start_iter_sockets(skel->progs.iter_udp6_server);
for (i = 0; i < num_listens; ++i) {
n = read(listen_fds[i], buf, sizeof(buf));
if (!ASSERT_EQ(n, -1, "read") ||
!ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket"))
break;
}
ASSERT_EQ(i, num_listens, "server socket");
cleanup:
free_fds(listen_fds, num_listens);
}
void test_sock_destroy(void)
{
struct sock_destroy_prog *skel;
struct nstoken *nstoken = NULL;
int cgroup_fd;
skel = sock_destroy_prog__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
cgroup_fd = test__join_cgroup("/sock_destroy");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
goto cleanup;
skel->links.sock_connect = bpf_program__attach_cgroup(
skel->progs.sock_connect, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.sock_connect, "prog_attach"))
goto cleanup;
SYS(cleanup, "ip netns add %s", TEST_NS);
SYS(cleanup, "ip -net %s link set dev lo up", TEST_NS);
nstoken = open_netns(TEST_NS);
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
goto cleanup;
if (test__start_subtest("tcp_client"))
test_tcp_client(skel);
if (test__start_subtest("tcp_server"))
test_tcp_server(skel);
if (test__start_subtest("udp_client"))
test_udp_client(skel);
if (test__start_subtest("udp_server"))
test_udp_server(skel);
RUN_TESTS(sock_destroy_prog_fail);
cleanup:
if (nstoken)
close_netns(nstoken);
SYS_NOFAIL("ip netns del " TEST_NS);
if (cgroup_fd >= 0)
close(cgroup_fd);
sock_destroy_prog__destroy(skel);
}
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Core pinctrl/GPIO driver for Intel GPIO controllers
*
* Copyright (C) 2015, Intel Corporation
* Authors: Mathias Nyman <[email protected]>
* Mika Westerberg <[email protected]>
*/
#ifndef PINCTRL_INTEL_H
#define PINCTRL_INTEL_H
#include <linux/array_size.h>
#include <linux/bits.h>
#include <linux/compiler_types.h>
#include <linux/gpio/driver.h>
#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/spinlock_types.h>
struct platform_device;
struct device;
/**
* struct intel_pingroup - Description about group of pins
* @grp: Generic data of the pin group (name and pins)
* @mode: Native mode in which the group is muxed out @pins. Used if @modes is %NULL.
* @modes: If not %NULL this will hold mode for each pin in @pins
*/
struct intel_pingroup {
struct pingroup grp;
unsigned short mode;
const unsigned int *modes;
};
/**
* struct intel_function - Description about a function
* @func: Generic data of the pin function (name and groups of pins)
*/
struct intel_function {
struct pinfunction func;
};
#define INTEL_PINCTRL_MAX_GPP_SIZE 32
/**
* struct intel_padgroup - Hardware pad group information
* @reg_num: GPI_IS register number
* @base: Starting pin of this group
* @size: Size of this group (maximum is %INTEL_PINCTRL_MAX_GPP_SIZE).
* @gpio_base: Starting GPIO base of this group
* @padown_num: PAD_OWN register number (assigned by the core driver)
*
* If pad groups of a community are not the same size, use this structure
* to specify them.
*/
struct intel_padgroup {
unsigned int reg_num;
unsigned int base;
unsigned int size;
int gpio_base;
unsigned int padown_num;
};
/**
* enum - Special treatment for GPIO base in pad group
*
* @INTEL_GPIO_BASE_ZERO: force GPIO base to be 0
* @INTEL_GPIO_BASE_NOMAP: no GPIO mapping should be created
* @INTEL_GPIO_BASE_MATCH: matches with starting pin number
*/
enum {
INTEL_GPIO_BASE_ZERO = -2,
INTEL_GPIO_BASE_NOMAP = -1,
INTEL_GPIO_BASE_MATCH = 0,
};
/**
* struct intel_community - Intel pin community description
* @barno: MMIO BAR number where registers for this community reside
* @padown_offset: Register offset of PAD_OWN register from @regs. If %0
* then there is no support for owner.
* @padcfglock_offset: Register offset of PADCFGLOCK from @regs. If %0 then
* locking is not supported.
* @hostown_offset: Register offset of HOSTSW_OWN from @regs. If %0 then it
* is assumed that the host owns the pin (rather than
* ACPI).
* @is_offset: Register offset of GPI_IS from @regs.
* @ie_offset: Register offset of GPI_IE from @regs.
* @features: Additional features supported by the hardware
* @pin_base: Starting pin of pins in this community
* @npins: Number of pins in this community
* @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
* HOSTSW_OWN, GPI_IS, GPI_IE. Used when @gpps is %NULL.
* @gpp_num_padown_regs: Number of pad registers each pad group consumes at
* minimum. Used when @gpps is %NULL.
* @gpps: Pad groups if the controller has variable size pad groups
* @ngpps: Number of pad groups in this community
* @pad_map: Optional non-linear mapping of the pads
* @nirqs: Optional total number of IRQs this community can generate
* @acpi_space_id: Optional address space ID for ACPI OpRegion handler
* @regs: Community specific common registers (reserved for core driver)
* @pad_regs: Community specific pad registers (reserved for core driver)
*
* In older Intel GPIO host controllers, this driver supports, each pad group
* is of equal size (except the last one). In that case the driver can just
* fill in @gpp_size and @gpp_num_padown_regs fields and let the core driver
* to handle the rest.
*
* In newer Intel GPIO host controllers each pad group is of variable size,
* so the client driver can pass custom @gpps and @ngpps instead.
*/
struct intel_community {
unsigned int barno;
unsigned int padown_offset;
unsigned int padcfglock_offset;
unsigned int hostown_offset;
unsigned int is_offset;
unsigned int ie_offset;
unsigned int features;
unsigned int pin_base;
size_t npins;
unsigned int gpp_size;
unsigned int gpp_num_padown_regs;
const struct intel_padgroup *gpps;
size_t ngpps;
const unsigned int *pad_map;
unsigned short nirqs;
unsigned short acpi_space_id;
/* Reserved for the core driver */
void __iomem *regs;
void __iomem *pad_regs;
};
/* Additional features supported by the hardware */
#define PINCTRL_FEATURE_DEBOUNCE BIT(0)
#define PINCTRL_FEATURE_1K_PD BIT(1)
#define PINCTRL_FEATURE_GPIO_HW_INFO BIT(2)
#define PINCTRL_FEATURE_PWM BIT(3)
#define PINCTRL_FEATURE_BLINK BIT(4)
#define PINCTRL_FEATURE_EXP BIT(5)
#define __INTEL_COMMUNITY(b, s, e, g, n, gs, gn, soc) \
{ \
.barno = (b), \
.padown_offset = soc ## _PAD_OWN, \
.padcfglock_offset = soc ## _PADCFGLOCK, \
.hostown_offset = soc ## _HOSTSW_OWN, \
.is_offset = soc ## _GPI_IS, \
.ie_offset = soc ## _GPI_IE, \
.gpp_size = (gs), \
.gpp_num_padown_regs = (gn), \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
.gpps = (g), \
.ngpps = (n), \
}
#define INTEL_COMMUNITY_GPPS(b, s, e, g, soc) \
__INTEL_COMMUNITY(b, s, e, g, ARRAY_SIZE(g), 0, 0, soc)
#define INTEL_COMMUNITY_SIZE(b, s, e, gs, gn, soc) \
__INTEL_COMMUNITY(b, s, e, NULL, 0, gs, gn, soc)
/**
* PIN_GROUP - Declare a pin group
* @n: Name of the group
* @p: An array of pins this group consists
* @m: Mode which the pins are put when this group is active. Can be either
* a single integer or an array of integers in which case mode is per
* pin.
*/
#define PIN_GROUP(n, p, m) \
{ \
.grp = PINCTRL_PINGROUP((n), (p), ARRAY_SIZE((p))), \
.mode = __builtin_choose_expr(__builtin_constant_p((m)), (m), 0), \
.modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
}
#define PIN_GROUP_GPIO(n, p, m) \
PIN_GROUP(n, p, m), \
PIN_GROUP(n "_gpio", p, 0)
#define FUNCTION(n, g) \
{ \
.func = PINCTRL_PINFUNCTION((n), (g), ARRAY_SIZE(g)), \
}
/**
* struct intel_pinctrl_soc_data - Intel pin controller per-SoC configuration
* @uid: ACPI _UID for the probe driver use if needed
* @pins: Array if pins this pinctrl controls
* @npins: Number of pins in the array
* @groups: Array of pin groups
* @ngroups: Number of groups in the array
* @functions: Array of functions
* @nfunctions: Number of functions in the array
* @communities: Array of communities this pinctrl handles
* @ncommunities: Number of communities in the array
*
* The @communities is used as a template by the core driver. It will make
* copy of all communities and fill in rest of the information.
*/
struct intel_pinctrl_soc_data {
const char *uid;
const struct pinctrl_pin_desc *pins;
size_t npins;
const struct intel_pingroup *groups;
size_t ngroups;
const struct intel_function *functions;
size_t nfunctions;
const struct intel_community *communities;
size_t ncommunities;
};
const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev);
struct intel_pad_context;
struct intel_community_context;
/**
* struct intel_pinctrl_context - context to be saved during suspend-resume
* @pads: Opaque context per pad (driver dependent)
* @communities: Opaque context per community (driver dependent)
*/
struct intel_pinctrl_context {
struct intel_pad_context *pads;
struct intel_community_context *communities;
};
/**
* struct intel_pinctrl - Intel pinctrl private structure
* @dev: Pointer to the device structure
* @lock: Lock to serialize register access
* @pctldesc: Pin controller description
* @pctldev: Pointer to the pin controller device
* @chip: GPIO chip in this pin controller
* @soc: SoC/PCH specific pin configuration data
* @communities: All communities in this pin controller
* @ncommunities: Number of communities in this pin controller
* @context: Configuration saved over system sleep
* @irq: pinctrl/GPIO chip irq number
*/
struct intel_pinctrl {
struct device *dev;
raw_spinlock_t lock;
struct pinctrl_desc pctldesc;
struct pinctrl_dev *pctldev;
struct gpio_chip chip;
const struct intel_pinctrl_soc_data *soc;
struct intel_community *communities;
size_t ncommunities;
struct intel_pinctrl_context context;
int irq;
};
int intel_pinctrl_probe(struct platform_device *pdev,
const struct intel_pinctrl_soc_data *soc_data);
int intel_pinctrl_probe_by_hid(struct platform_device *pdev);
int intel_pinctrl_probe_by_uid(struct platform_device *pdev);
extern const struct dev_pm_ops intel_pinctrl_pm_ops;
const struct intel_community *intel_get_community(const struct intel_pinctrl *pctrl,
unsigned int pin);
int intel_get_groups_count(struct pinctrl_dev *pctldev);
const char *intel_get_group_name(struct pinctrl_dev *pctldev, unsigned int group);
int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
const unsigned int **pins, unsigned int *npins);
int intel_get_functions_count(struct pinctrl_dev *pctldev);
const char *intel_get_function_name(struct pinctrl_dev *pctldev, unsigned int function);
int intel_get_function_groups(struct pinctrl_dev *pctldev, unsigned int function,
const char * const **groups, unsigned int * const ngroups);
#endif /* PINCTRL_INTEL_H */
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* A scheduler that validates the behavior of direct dispatching with a default
* select_cpu implementation.
*
* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
* Copyright (c) 2023 David Vernet <[email protected]>
* Copyright (c) 2023 Tejun Heo <[email protected]>
*/
#include <scx/common.bpf.h>
char _license[] SEC("license") = "GPL";
bool saw_local = false;
static bool task_is_test(const struct task_struct *p)
{
return !bpf_strncmp(p->comm, 9, "select_cpu");
}
void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p,
u64 enq_flags)
{
const struct cpumask *idle_mask = scx_bpf_get_idle_cpumask();
if (task_is_test(p) &&
bpf_cpumask_test_cpu(scx_bpf_task_cpu(p), idle_mask)) {
saw_local = true;
}
scx_bpf_put_idle_cpumask(idle_mask);
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
}
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_dfl_ops = {
.enqueue = (void *) select_cpu_dfl_enqueue,
.name = "select_cpu_dfl",
};
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define LOOP_BOUND 0xf
#define MAX_ENTRIES 8
#define HALF_ENTRIES (MAX_ENTRIES >> 1)
_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
__u32 g_line = 0;
int page_size = 0; /* userspace should set it */
#define VERIFY_TYPE(type, func) ({ \
g_map_type = type; \
if (!func()) \
return 0; \
})
#define VERIFY(expr) ({ \
g_line = __LINE__; \
if (!(expr)) \
return 0; \
})
struct bpf_map {
enum bpf_map_type map_type;
__u32 key_size;
__u32 value_size;
__u32 max_entries;
__u32 id;
} __attribute__((preserve_access_index));
static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
__u32 value_size, __u32 max_entries)
{
VERIFY(map->map_type == g_map_type);
VERIFY(map->key_size == key_size);
VERIFY(map->value_size == value_size);
VERIFY(map->max_entries == max_entries);
VERIFY(map->id > 0);
return 1;
}
static inline int check_bpf_map_ptr(struct bpf_map *indirect,
struct bpf_map *direct)
{
VERIFY(indirect->map_type == direct->map_type);
VERIFY(indirect->key_size == direct->key_size);
VERIFY(indirect->value_size == direct->value_size);
VERIFY(indirect->max_entries == direct->max_entries);
VERIFY(indirect->id == direct->id);
return 1;
}
static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
__u32 key_size, __u32 value_size, __u32 max_entries)
{
VERIFY(check_bpf_map_ptr(indirect, direct));
VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
max_entries));
return 1;
}
static inline int check_default(struct bpf_map *indirect,
struct bpf_map *direct)
{
VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
MAX_ENTRIES));
return 1;
}
static __noinline int
check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
{
VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
MAX_ENTRIES));
return 1;
}
typedef struct {
int counter;
} atomic_t;
struct bpf_htab {
struct bpf_map map;
atomic_t count;
__u32 n_buckets;
__u32 elem_size;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_hash SEC(".maps");
__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
static inline int check_hash(void)
{
struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
struct bpf_map *map = (struct bpf_map *)&m_hash;
int i;
VERIFY(check_default_noinline(&hash->map, map));
VERIFY(hash->n_buckets == MAX_ENTRIES);
VERIFY(hash->elem_size == 64);
VERIFY(hash->count.counter == 0);
VERIFY(bpf_map_sum_elem_count(map) == 0);
for (i = 0; i < HALF_ENTRIES; ++i) {
const __u32 key = i;
const __u32 val = 1;
if (bpf_map_update_elem(hash, &key, &val, 0))
return 0;
}
VERIFY(hash->count.counter == HALF_ENTRIES);
VERIFY(bpf_map_sum_elem_count(map) == HALF_ENTRIES);
return 1;
}
struct bpf_array {
struct bpf_map map;
__u32 elem_size;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_array SEC(".maps");
static inline int check_array(void)
{
struct bpf_array *array = (struct bpf_array *)&m_array;
struct bpf_map *map = (struct bpf_map *)&m_array;
int i, n_lookups = 0, n_keys = 0;
VERIFY(check_default(&array->map, map));
VERIFY(array->elem_size == 8);
for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
const __u32 key = i;
__u32 *val = bpf_map_lookup_elem(array, &key);
++n_lookups;
if (val)
++n_keys;
}
VERIFY(n_lookups == MAX_ENTRIES);
VERIFY(n_keys == MAX_ENTRIES);
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_prog_array SEC(".maps");
static inline int check_prog_array(void)
{
struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
struct bpf_map *map = (struct bpf_map *)&m_prog_array;
VERIFY(check_default(&prog_array->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_perf_event_array SEC(".maps");
static inline int check_perf_event_array(void)
{
struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
VERIFY(check_default(&perf_event_array->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_percpu_hash SEC(".maps");
static inline int check_percpu_hash(void)
{
struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
VERIFY(check_default(&percpu_hash->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_percpu_array SEC(".maps");
static inline int check_percpu_array(void)
{
struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
VERIFY(check_default(&percpu_array->map, map));
return 1;
}
struct bpf_stack_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u64);
} m_stack_trace SEC(".maps");
static inline int check_stack_trace(void)
{
struct bpf_stack_map *stack_trace =
(struct bpf_stack_map *)&m_stack_trace;
struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
MAX_ENTRIES));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_cgroup_array SEC(".maps");
static inline int check_cgroup_array(void)
{
struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
VERIFY(check_default(&cgroup_array->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_lru_hash SEC(".maps");
static inline int check_lru_hash(void)
{
struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
VERIFY(check_default(&lru_hash->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_lru_percpu_hash SEC(".maps");
static inline int check_lru_percpu_hash(void)
{
struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
VERIFY(check_default(&lru_percpu_hash->map, map));
return 1;
}
struct lpm_trie {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct lpm_key {
struct bpf_lpm_trie_key_hdr trie_key;
__u32 data;
};
struct {
__uint(type, BPF_MAP_TYPE_LPM_TRIE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__uint(max_entries, MAX_ENTRIES);
__type(key, struct lpm_key);
__type(value, __u32);
} m_lpm_trie SEC(".maps");
static inline int check_lpm_trie(void)
{
struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
MAX_ENTRIES));
return 1;
}
#define INNER_MAX_ENTRIES 1234
struct inner_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, INNER_MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} inner_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, INNER_MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
});
} m_array_of_maps SEC(".maps") = {
.values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
};
static inline int check_array_of_maps(void)
{
struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
struct bpf_array *inner_map;
int key = 0;
VERIFY(check_default(&array_of_maps->map, map));
inner_map = bpf_map_lookup_elem(array_of_maps, &key);
VERIFY(inner_map != NULL);
VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
__array(values, struct inner_map);
} m_hash_of_maps SEC(".maps") = {
.values = {
[2] = &inner_map,
},
};
static inline int check_hash_of_maps(void)
{
struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
struct bpf_htab *inner_map;
int key = 2;
VERIFY(check_default(&hash_of_maps->map, map));
inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
VERIFY(inner_map != NULL);
VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
return 1;
}
struct bpf_dtab {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_devmap SEC(".maps");
static inline int check_devmap(void)
{
struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
struct bpf_map *map = (struct bpf_map *)&m_devmap;
VERIFY(check_default(&devmap->map, map));
return 1;
}
struct bpf_stab {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_sockmap SEC(".maps");
static inline int check_sockmap(void)
{
struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
struct bpf_map *map = (struct bpf_map *)&m_sockmap;
VERIFY(check_default(&sockmap->map, map));
return 1;
}
struct bpf_cpu_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_CPUMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_cpumap SEC(".maps");
static inline int check_cpumap(void)
{
struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
struct bpf_map *map = (struct bpf_map *)&m_cpumap;
VERIFY(check_default(&cpumap->map, map));
return 1;
}
struct xsk_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_XSKMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_xskmap SEC(".maps");
static inline int check_xskmap(void)
{
struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
struct bpf_map *map = (struct bpf_map *)&m_xskmap;
VERIFY(check_default(&xskmap->map, map));
return 1;
}
struct bpf_shtab {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_sockhash SEC(".maps");
static inline int check_sockhash(void)
{
struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
struct bpf_map *map = (struct bpf_map *)&m_sockhash;
VERIFY(check_default(&sockhash->map, map));
return 1;
}
struct bpf_cgroup_storage_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, __u32);
} m_cgroup_storage SEC(".maps");
static inline int check_cgroup_storage(void)
{
struct bpf_cgroup_storage_map *cgroup_storage =
(struct bpf_cgroup_storage_map *)&m_cgroup_storage;
struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
VERIFY(check(&cgroup_storage->map, map,
sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
return 1;
}
struct reuseport_array {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_reuseport_sockarray SEC(".maps");
static inline int check_reuseport_sockarray(void)
{
struct reuseport_array *reuseport_sockarray =
(struct reuseport_array *)&m_reuseport_sockarray;
struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
VERIFY(check_default(&reuseport_sockarray->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, __u32);
} m_percpu_cgroup_storage SEC(".maps");
static inline int check_percpu_cgroup_storage(void)
{
struct bpf_cgroup_storage_map *percpu_cgroup_storage =
(struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
VERIFY(check(&percpu_cgroup_storage->map, map,
sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
return 1;
}
struct bpf_queue_stack {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_QUEUE);
__uint(max_entries, MAX_ENTRIES);
__type(value, __u32);
} m_queue SEC(".maps");
static inline int check_queue(void)
{
struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
struct bpf_map *map = (struct bpf_map *)&m_queue;
VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_STACK);
__uint(max_entries, MAX_ENTRIES);
__type(value, __u32);
} m_stack SEC(".maps");
static inline int check_stack(void)
{
struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
struct bpf_map *map = (struct bpf_map *)&m_stack;
VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
return 1;
}
struct bpf_local_storage_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, __u32);
__type(value, __u32);
} m_sk_storage SEC(".maps");
static inline int check_sk_storage(void)
{
struct bpf_local_storage_map *sk_storage =
(struct bpf_local_storage_map *)&m_sk_storage;
struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_devmap_hash SEC(".maps");
static inline int check_devmap_hash(void)
{
struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
VERIFY(check_default(&devmap_hash->map, map));
return 1;
}
struct bpf_ringbuf_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
} m_ringbuf SEC(".maps");
static inline int check_ringbuf(void)
{
struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
return 1;
}
SEC("cgroup_skb/egress")
int cg_skb(void *ctx)
{
VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
check_reuseport_sockarray);
VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
check_percpu_cgroup_storage);
VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
return 1;
}
char _license[] SEC("license") = "GPL";
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Data Access Monitor Unit Tests
*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
*
* Author: SeongJae Park <[email protected]>
*/
#ifdef CONFIG_DAMON_KUNIT_TEST
#ifndef _DAMON_CORE_TEST_H
#define _DAMON_CORE_TEST_H
#include <kunit/test.h>
static void damon_test_regions(struct kunit *test)
{
struct damon_region *r;
struct damon_target *t;
r = damon_new_region(1, 2);
KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
t = damon_new_target();
KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
damon_add_region(r, t);
KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t));
damon_destroy_region(r, t);
KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
damon_free_target(t);
}
static unsigned int nr_damon_targets(struct damon_ctx *ctx)
{
struct damon_target *t;
unsigned int nr_targets = 0;
damon_for_each_target(t, ctx)
nr_targets++;
return nr_targets;
}
static void damon_test_target(struct kunit *test)
{
struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
t = damon_new_target();
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
damon_add_target(c, t);
KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
damon_destroy_target(t);
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
damon_destroy_ctx(c);
}
/*
* Test kdamond_reset_aggregated()
*
* DAMON checks access to each region and aggregates this information as the
* access frequency of each region. In detail, it increases '->nr_accesses' of
* regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes
* the aggregated information ('->nr_accesses' of each regions) to the result
* buffer. As a result of the flushing, the '->nr_accesses' of regions are
* initialized to zero.
*/
static void damon_test_aggregate(struct kunit *test)
{
struct damon_ctx *ctx = damon_new_ctx();
unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} };
unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} };
unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} };
struct damon_target *t;
struct damon_region *r;
int it, ir;
for (it = 0; it < 3; it++) {
t = damon_new_target();
damon_add_target(ctx, t);
}
it = 0;
damon_for_each_target(t, ctx) {
for (ir = 0; ir < 3; ir++) {
r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
r->nr_accesses = accesses[it][ir];
r->nr_accesses_bp = accesses[it][ir] * 10000;
damon_add_region(r, t);
}
it++;
}
kdamond_reset_aggregated(ctx);
it = 0;
damon_for_each_target(t, ctx) {
ir = 0;
/* '->nr_accesses' should be zeroed */
damon_for_each_region(r, t) {
KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
ir++;
}
/* regions should be preserved */
KUNIT_EXPECT_EQ(test, 3, ir);
it++;
}
/* targets also should be preserved */
KUNIT_EXPECT_EQ(test, 3, it);
damon_destroy_ctx(ctx);
}
static void damon_test_split_at(struct kunit *test)
{
struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
struct damon_region *r, *r_new;
t = damon_new_target();
r = damon_new_region(0, 100);
r->nr_accesses_bp = 420000;
r->nr_accesses = 42;
r->last_nr_accesses = 15;
damon_add_region(r, t);
damon_split_region_at(t, r, 25);
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
r_new = damon_next_region(r);
KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul);
KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul);
KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp);
KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses);
KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses);
damon_free_target(t);
damon_destroy_ctx(c);
}
static void damon_test_merge_two(struct kunit *test)
{
struct damon_target *t;
struct damon_region *r, *r2, *r3;
int i;
t = damon_new_target();
r = damon_new_region(0, 100);
r->nr_accesses = 10;
r->nr_accesses_bp = 100000;
damon_add_region(r, t);
r2 = damon_new_region(100, 300);
r2->nr_accesses = 20;
r2->nr_accesses_bp = 200000;
damon_add_region(r2, t);
damon_merge_two_regions(t, r, r2);
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
KUNIT_EXPECT_EQ(test, r->ar.end, 300ul);
KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u);
i = 0;
damon_for_each_region(r3, t) {
KUNIT_EXPECT_PTR_EQ(test, r, r3);
i++;
}
KUNIT_EXPECT_EQ(test, i, 1);
damon_free_target(t);
}
static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
{
struct damon_region *r;
unsigned int i = 0;
damon_for_each_region(r, t) {
if (i++ == idx)
return r;
}
return NULL;
}
static void damon_test_merge_regions_of(struct kunit *test)
{
struct damon_target *t;
struct damon_region *r;
unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184};
unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230};
unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2};
unsigned long saddrs[] = {0, 114, 130, 156, 170};
unsigned long eaddrs[] = {112, 130, 156, 170, 230};
int i;
t = damon_new_target();
for (i = 0; i < ARRAY_SIZE(sa); i++) {
r = damon_new_region(sa[i], ea[i]);
r->nr_accesses = nrs[i];
r->nr_accesses_bp = nrs[i] * 10000;
damon_add_region(r, t);
}
damon_merge_regions_of(t, 9, 9999);
/* 0-112, 114-130, 130-156, 156-170 */
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
for (i = 0; i < 5; i++) {
r = __nth_region_of(t, i);
KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]);
KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]);
}
damon_free_target(t);
}
static void damon_test_split_regions_of(struct kunit *test)
{
struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
struct damon_region *r;
t = damon_new_target();
r = damon_new_region(0, 22);
damon_add_region(r, t);
damon_split_regions_of(t, 2);
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
damon_free_target(t);
t = damon_new_target();
r = damon_new_region(0, 220);
damon_add_region(r, t);
damon_split_regions_of(t, 4);
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
damon_free_target(t);
damon_destroy_ctx(c);
}
static void damon_test_ops_registration(struct kunit *test)
{
struct damon_ctx *c = damon_new_ctx();
struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
bool need_cleanup = false;
/* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
bak.id = DAMON_OPS_VADDR;
KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0);
need_cleanup = true;
}
/* DAMON_OPS_VADDR is ensured to be registered */
KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0);
/* Double-registration is prohibited */
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
/* Unknown ops id cannot be registered */
KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL);
/* Registration should success after unregistration */
mutex_lock(&damon_ops_lock);
bak = damon_registered_ops[DAMON_OPS_VADDR];
damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){};
mutex_unlock(&damon_ops_lock);
ops.id = DAMON_OPS_VADDR;
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0);
mutex_lock(&damon_ops_lock);
damon_registered_ops[DAMON_OPS_VADDR] = bak;
mutex_unlock(&damon_ops_lock);
/* Check double-registration failure again */
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
damon_destroy_ctx(c);
if (need_cleanup) {
mutex_lock(&damon_ops_lock);
damon_registered_ops[DAMON_OPS_VADDR] =
(struct damon_operations){};
mutex_unlock(&damon_ops_lock);
}
}
static void damon_test_set_regions(struct kunit *test)
{
struct damon_target *t = damon_new_target();
struct damon_region *r1 = damon_new_region(4, 16);
struct damon_region *r2 = damon_new_region(24, 32);
struct damon_addr_range range = {.start = 8, .end = 28};
unsigned long expects[] = {8, 16, 16, 24, 24, 28};
int expect_idx = 0;
struct damon_region *r;
damon_add_region(r1, t);
damon_add_region(r2, t);
damon_set_regions(t, &range, 1);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3);
damon_for_each_region(r, t) {
KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]);
KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]);
}
damon_destroy_target(t);
}
static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
{
struct damon_attrs attrs = {
.sample_interval = 10,
.aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
};
/*
* In some cases such as 32bit architectures where UINT_MAX is
* ULONG_MAX, attrs.aggr_interval becomes zero. Calling
* damon_nr_accesses_to_accesses_bp() in the case will cause
* divide-by-zero. Such case is prohibited in normal execution since
* the caution is documented on the comment for the function, and
* damon_update_monitoring_results() does the check. Skip the test in
* the case.
*/
if (!attrs.aggr_interval)
kunit_skip(test, "aggr_interval is zero.");
KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
}
static void damon_test_update_monitoring_result(struct kunit *test)
{
struct damon_attrs old_attrs = {
.sample_interval = 10, .aggr_interval = 1000,};
struct damon_attrs new_attrs;
struct damon_region *r = damon_new_region(3, 7);
r->nr_accesses = 15;
r->nr_accesses_bp = 150000;
r->age = 20;
new_attrs = (struct damon_attrs){
.sample_interval = 100, .aggr_interval = 10000,};
damon_update_monitoring_result(r, &old_attrs, &new_attrs);
KUNIT_EXPECT_EQ(test, r->nr_accesses, 15);
KUNIT_EXPECT_EQ(test, r->age, 2);
new_attrs = (struct damon_attrs){
.sample_interval = 1, .aggr_interval = 1000};
damon_update_monitoring_result(r, &old_attrs, &new_attrs);
KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
KUNIT_EXPECT_EQ(test, r->age, 2);
new_attrs = (struct damon_attrs){
.sample_interval = 1, .aggr_interval = 100};
damon_update_monitoring_result(r, &old_attrs, &new_attrs);
KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
KUNIT_EXPECT_EQ(test, r->age, 20);
damon_free_region(r);
}
static void damon_test_set_attrs(struct kunit *test)
{
struct damon_ctx *c = damon_new_ctx();
struct damon_attrs valid_attrs = {
.min_nr_regions = 10, .max_nr_regions = 1000,
.sample_interval = 5000, .aggr_interval = 100000,};
struct damon_attrs invalid_attrs;
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
invalid_attrs = valid_attrs;
invalid_attrs.min_nr_regions = 1;
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
invalid_attrs = valid_attrs;
invalid_attrs.max_nr_regions = 9;
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
invalid_attrs = valid_attrs;
invalid_attrs.aggr_interval = 4999;
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
damon_destroy_ctx(c);
}
static void damon_test_moving_sum(struct kunit *test)
{
unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10;
unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0};
unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000,
45000, 40000, 35000, 30000};
int i;
for (i = 0; i < ARRAY_SIZE(new_values); i++) {
mvsum = damon_moving_sum(mvsum, nomvsum, len_window,
new_values[i]);
KUNIT_EXPECT_EQ(test, mvsum, expects[i]);
}
}
static void damos_test_new_filter(struct kunit *test)
{
struct damos_filter *filter;
filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON);
KUNIT_EXPECT_EQ(test, filter->matching, true);
KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list);
KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list);
damos_destroy_filter(filter);
}
static void damos_test_filter_out(struct kunit *test)
{
struct damon_target *t;
struct damon_region *r, *r2;
struct damos_filter *f;
f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true);
f->addr_range = (struct damon_addr_range){
.start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6};
t = damon_new_target();
r = damon_new_region(DAMON_MIN_REGION * 3, DAMON_MIN_REGION * 5);
damon_add_region(r, t);
/* region in the range */
KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region before the range */
r->ar.start = DAMON_MIN_REGION * 1;
r->ar.end = DAMON_MIN_REGION * 2;
KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region after the range */
r->ar.start = DAMON_MIN_REGION * 6;
r->ar.end = DAMON_MIN_REGION * 8;
KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region started before the range */
r->ar.start = DAMON_MIN_REGION * 1;
r->ar.end = DAMON_MIN_REGION * 4;
KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
/* filter should have split the region */
KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1);
KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
r2 = damon_next_region(r);
KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 2);
KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 4);
damon_destroy_region(r2, t);
/* region started in the range */
r->ar.start = DAMON_MIN_REGION * 2;
r->ar.end = DAMON_MIN_REGION * 8;
KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f));
/* filter should have split the region */
KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2);
KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
r2 = damon_next_region(r);
KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 6);
KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 8);
damon_destroy_region(r2, t);
damon_free_target(t);
damos_free_filter(f);
}
static void damon_test_feed_loop_next_input(struct kunit *test)
{
unsigned long last_input = 900000, current_score = 200;
/*
* If current score is lower than the goal, which is always 10,000
* (read the comment on damon_feed_loop_next_input()'s comment), next
* input should be higher than the last input.
*/
KUNIT_EXPECT_GT(test,
damon_feed_loop_next_input(last_input, current_score),
last_input);
/*
* If current score is higher than the goal, next input should be lower
* than the last input.
*/
current_score = 250000000;
KUNIT_EXPECT_LT(test,
damon_feed_loop_next_input(last_input, current_score),
last_input);
/*
* The next input depends on the distance between the current score and
* the goal
*/
KUNIT_EXPECT_GT(test,
damon_feed_loop_next_input(last_input, 200),
damon_feed_loop_next_input(last_input, 2000));
}
static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_target),
KUNIT_CASE(damon_test_regions),
KUNIT_CASE(damon_test_aggregate),
KUNIT_CASE(damon_test_split_at),
KUNIT_CASE(damon_test_merge_two),
KUNIT_CASE(damon_test_merge_regions_of),
KUNIT_CASE(damon_test_split_regions_of),
KUNIT_CASE(damon_test_ops_registration),
KUNIT_CASE(damon_test_set_regions),
KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp),
KUNIT_CASE(damon_test_update_monitoring_result),
KUNIT_CASE(damon_test_set_attrs),
KUNIT_CASE(damon_test_moving_sum),
KUNIT_CASE(damos_test_new_filter),
KUNIT_CASE(damos_test_filter_out),
KUNIT_CASE(damon_test_feed_loop_next_input),
{},
};
static struct kunit_suite damon_test_suite = {
.name = "damon",
.test_cases = damon_test_cases,
};
kunit_test_suite(damon_test_suite);
#endif /* _DAMON_CORE_TEST_H */
#endif /* CONFIG_DAMON_KUNIT_TEST */
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 3