file_name
int64
0
72.3k
vulnerable_line_numbers
stringlengths
1
1.06k
dataset_type
stringclasses
1 value
commit_hash
stringlengths
40
44
unique_id
int64
0
271k
project
stringclasses
10 values
target
int64
0
1
repo_url
stringclasses
10 values
date
stringlengths
25
25
code
stringlengths
0
20.4M
CVE
stringlengths
13
43
CWE
stringclasses
50 values
commit_link
stringlengths
73
97
severity
stringclasses
4 values
__index_level_0__
int64
0
124k
19,265
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
184,260
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * FUJITSU Extended Socket Network Device driver * Copyright (c) 2015-2016 FUJITSU LIMITED * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, see <http://www.gnu.org/licenses/>. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * */ /* debugfs support for fjes driver */ #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/platform_device.h> #include "fjes.h" static struct dentry *fjes_debug_root; static const char * const ep_status_string[] = { "unshared", "shared", "waiting", "complete", }; static int fjes_dbg_status_show(struct seq_file *m, void *v) { struct fjes_adapter *adapter = m->private; struct fjes_hw *hw = &adapter->hw; int max_epid = hw->max_epid; int my_epid = hw->my_epid; int epidx; seq_puts(m, "EPID\tSTATUS SAME_ZONE CONNECTED\n"); for (epidx = 0; epidx < max_epid; epidx++) { if (epidx == my_epid) { seq_printf(m, "ep%d\t%-16c %-16c %-16c\n", epidx, '-', '-', '-'); } else { seq_printf(m, "ep%d\t%-16s %-16c %-16c\n", epidx, ep_status_string[fjes_hw_get_partner_ep_status(hw, epidx)], fjes_hw_epid_is_same_zone(hw, epidx) ? 'Y' : 'N', fjes_hw_epid_is_shared(hw->hw_info.share, epidx) ? 'Y' : 'N'); } } return 0; } static int fjes_dbg_status_open(struct inode *inode, struct file *file) { return single_open(file, fjes_dbg_status_show, inode->i_private); } static const struct file_operations fjes_dbg_status_fops = { .owner = THIS_MODULE, .open = fjes_dbg_status_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void fjes_dbg_adapter_init(struct fjes_adapter *adapter) { const char *name = dev_name(&adapter->plat_dev->dev); struct dentry *pfile; adapter->dbg_adapter = debugfs_create_dir(name, fjes_debug_root); if (!adapter->dbg_adapter) { dev_err(&adapter->plat_dev->dev, "debugfs entry for %s failed\n", name); return; } pfile = debugfs_create_file("status", 0444, adapter->dbg_adapter, adapter, &fjes_dbg_status_fops); if (!pfile) dev_err(&adapter->plat_dev->dev, "debugfs status for %s failed\n", name); } void fjes_dbg_adapter_exit(struct fjes_adapter *adapter) { debugfs_remove_recursive(adapter->dbg_adapter); adapter->dbg_adapter = NULL; } void fjes_dbg_init(void) { fjes_debug_root = debugfs_create_dir(fjes_driver_name, NULL); if (!fjes_debug_root) pr_info("init of debugfs failed\n"); } void fjes_dbg_exit(void) { debugfs_remove_recursive(fjes_debug_root); fjes_debug_root = NULL; } #endif /* CONFIG_DEBUG_FS */
null
null
null
null
92,607
38,373
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
38,373
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/platform/animation/compositor_transform_operations.h" #include "ui/gfx/transform.h" namespace blink { const cc::TransformOperations& CompositorTransformOperations::AsCcTransformOperations() const { return transform_operations_; } cc::TransformOperations CompositorTransformOperations::ReleaseCcTransformOperations() { return std::move(transform_operations_); } bool CompositorTransformOperations::CanBlendWith( const blink::CompositorTransformOperations& other) const { return transform_operations_.CanBlendWith(other.transform_operations_); } void CompositorTransformOperations::AppendTranslate(double x, double y, double z) { transform_operations_.AppendTranslate(x, y, z); } void CompositorTransformOperations::AppendRotate(double x, double y, double z, double degrees) { transform_operations_.AppendRotate(x, y, z, degrees); } void CompositorTransformOperations::AppendScale(double x, double y, double z) { transform_operations_.AppendScale(x, y, z); } void CompositorTransformOperations::AppendSkew(double x, double y) { transform_operations_.AppendSkew(x, y); } void CompositorTransformOperations::AppendPerspective(double depth) { transform_operations_.AppendPerspective(depth); } void CompositorTransformOperations::AppendMatrix(const SkMatrix44& matrix) { gfx::Transform transform(gfx::Transform::kSkipInitialization); transform.matrix() = matrix; transform_operations_.AppendMatrix(transform); } void CompositorTransformOperations::AppendIdentity() { transform_operations_.AppendIdentity(); } bool CompositorTransformOperations::IsIdentity() const { return transform_operations_.IsIdentity(); } } // namespace blink
null
null
null
null
35,236
10,452
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
175,447
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2008 Juergen Beisert, [email protected] * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/io.h> #include <linux/of.h> #include <asm/mach/irq.h> #include <asm/exception.h> #include "common.h" #include "hardware.h" #include "irq-common.h" #define AVIC_INTCNTL 0x00 /* int control reg */ #define AVIC_NIMASK 0x04 /* int mask reg */ #define AVIC_INTENNUM 0x08 /* int enable number reg */ #define AVIC_INTDISNUM 0x0C /* int disable number reg */ #define AVIC_INTENABLEH 0x10 /* int enable reg high */ #define AVIC_INTENABLEL 0x14 /* int enable reg low */ #define AVIC_INTTYPEH 0x18 /* int type reg high */ #define AVIC_INTTYPEL 0x1C /* int type reg low */ #define AVIC_NIPRIORITY(x) (0x20 + 4 * (7 - (x))) /* int priority */ #define AVIC_NIVECSR 0x40 /* norm int vector/status */ #define AVIC_FIVECSR 0x44 /* fast int vector/status */ #define AVIC_INTSRCH 0x48 /* int source reg high */ #define AVIC_INTSRCL 0x4C /* int source reg low */ #define AVIC_INTFRCH 0x50 /* int force reg high */ #define AVIC_INTFRCL 0x54 /* int force reg low */ #define AVIC_NIPNDH 0x58 /* norm int pending high */ #define AVIC_NIPNDL 0x5C /* norm int pending low */ #define AVIC_FIPNDH 0x60 /* fast int pending high */ #define AVIC_FIPNDL 0x64 /* fast int pending low */ #define AVIC_NUM_IRQS 64 static void __iomem *avic_base; static struct irq_domain *domain; #ifdef CONFIG_FIQ static int avic_set_irq_fiq(unsigned int hwirq, unsigned int type) { unsigned int irqt; if (hwirq >= AVIC_NUM_IRQS) return -EINVAL; if (hwirq < AVIC_NUM_IRQS / 2) { irqt = imx_readl(avic_base + AVIC_INTTYPEL) & ~(1 << hwirq); imx_writel(irqt | (!!type << hwirq), avic_base + AVIC_INTTYPEL); } else { hwirq -= AVIC_NUM_IRQS / 2; irqt = imx_readl(avic_base + AVIC_INTTYPEH) & ~(1 << hwirq); imx_writel(irqt | (!!type << hwirq), avic_base + AVIC_INTTYPEH); } return 0; } #endif /* CONFIG_FIQ */ static struct mxc_extra_irq avic_extra_irq = { #ifdef CONFIG_FIQ .set_irq_fiq = avic_set_irq_fiq, #endif }; #ifdef CONFIG_PM static u32 avic_saved_mask_reg[2]; static void avic_irq_suspend(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = gc->chip_types; int idx = d->hwirq >> 5; avic_saved_mask_reg[idx] = imx_readl(avic_base + ct->regs.mask); imx_writel(gc->wake_active, avic_base + ct->regs.mask); } static void avic_irq_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = gc->chip_types; int idx = d->hwirq >> 5; imx_writel(avic_saved_mask_reg[idx], avic_base + ct->regs.mask); } #else #define avic_irq_suspend NULL #define avic_irq_resume NULL #endif static __init void avic_init_gc(int idx, unsigned int irq_start) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip("mxc-avic", 1, irq_start, avic_base, handle_level_irq); gc->private = &avic_extra_irq; gc->wake_enabled = IRQ_MSK(32); ct = gc->chip_types; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_ack = irq_gc_mask_clr_bit; ct->chip.irq_set_wake = irq_gc_set_wake; ct->chip.irq_suspend = avic_irq_suspend; ct->chip.irq_resume = avic_irq_resume; ct->regs.mask = !idx ? AVIC_INTENABLEL : AVIC_INTENABLEH; ct->regs.ack = ct->regs.mask; irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0); } static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs) { u32 nivector; do { nivector = imx_readl(avic_base + AVIC_NIVECSR) >> 16; if (nivector == 0xffff) break; handle_domain_irq(domain, nivector, regs); } while (1); } /* * This function initializes the AVIC hardware and disables all the * interrupts. It registers the interrupt enable and disable functions * to the kernel for each interrupt source. */ void __init mxc_init_irq(void __iomem *irqbase) { struct device_node *np; int irq_base; int i; avic_base = irqbase; /* put the AVIC into the reset value with * all interrupts disabled */ imx_writel(0, avic_base + AVIC_INTCNTL); imx_writel(0x1f, avic_base + AVIC_NIMASK); /* disable all interrupts */ imx_writel(0, avic_base + AVIC_INTENABLEH); imx_writel(0, avic_base + AVIC_INTENABLEL); /* all IRQ no FIQ */ imx_writel(0, avic_base + AVIC_INTTYPEH); imx_writel(0, avic_base + AVIC_INTTYPEL); irq_base = irq_alloc_descs(-1, 0, AVIC_NUM_IRQS, numa_node_id()); WARN_ON(irq_base < 0); np = of_find_compatible_node(NULL, NULL, "fsl,avic"); domain = irq_domain_add_legacy(np, AVIC_NUM_IRQS, irq_base, 0, &irq_domain_simple_ops, NULL); WARN_ON(!domain); for (i = 0; i < AVIC_NUM_IRQS / 32; i++, irq_base += 32) avic_init_gc(i, irq_base); /* Set default priority value (0) for all IRQ's */ for (i = 0; i < 8; i++) imx_writel(0, avic_base + AVIC_NIPRIORITY(i)); set_handle_irq(avic_handle_irq); #ifdef CONFIG_FIQ /* Initialize FIQ */ init_FIQ(FIQ_START); #endif printk(KERN_INFO "MXC IRQ initialized\n"); }
null
null
null
null
83,794
40,000
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
40,000
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/modules/locks/navigator_locks.h" #include "third_party/blink/renderer/core/frame/navigator.h" #include "third_party/blink/renderer/core/workers/worker_navigator.h" #include "third_party/blink/renderer/modules/locks/lock_manager.h" #include "third_party/blink/renderer/platform/bindings/script_state.h" #include "third_party/blink/renderer/platform/bindings/trace_wrapper_member.h" #include "third_party/blink/renderer/platform/supplementable.h" namespace blink { namespace { template <typename T> class NavigatorLocksImpl final : public GarbageCollected<NavigatorLocksImpl<T>>, public Supplement<T>, public TraceWrapperBase { USING_GARBAGE_COLLECTED_MIXIN(NavigatorLocksImpl); public: static const char kSupplementName[]; static NavigatorLocksImpl& From(T& navigator) { NavigatorLocksImpl* supplement = static_cast<NavigatorLocksImpl*>( Supplement<T>::template From<NavigatorLocksImpl>(navigator)); if (!supplement) { supplement = new NavigatorLocksImpl(navigator); Supplement<T>::ProvideTo(navigator, supplement); } return *supplement; } LockManager* GetLockManager(ExecutionContext* context) const { if (!lock_manager_ && context) { lock_manager_ = new LockManager(context); } return lock_manager_.Get(); } virtual void Trace(blink::Visitor* visitor) { visitor->Trace(lock_manager_); Supplement<T>::Trace(visitor); } // Wrapper tracing is needed for callbacks. The reference chain is // NavigatorLocksImpl -> LockManager -> LockRequestImpl -> // V8LockGrantedCallback. void TraceWrappers(const ScriptWrappableVisitor* visitor) const override { visitor->TraceWrappers(lock_manager_); } const char* NameInHeapSnapshot() const override { return "NavigatorLocksImpl"; } private: explicit NavigatorLocksImpl(T& navigator) : Supplement<T>(navigator) {} mutable TraceWrapperMember<LockManager> lock_manager_; }; // static template <typename T> const char NavigatorLocksImpl<T>::kSupplementName[] = "NavigatorLocksImpl"; } // namespace LockManager* NavigatorLocks::locks(ScriptState* script_state, Navigator& navigator) { return NavigatorLocksImpl<Navigator>::From(navigator).GetLockManager( ExecutionContext::From(script_state)); } LockManager* NavigatorLocks::locks(ScriptState* script_state, WorkerNavigator& navigator) { return NavigatorLocksImpl<WorkerNavigator>::From(navigator).GetLockManager( ExecutionContext::From(script_state)); } } // namespace blink
null
null
null
null
36,863
53,532
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
53,532
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "media/cast/sender/fake_software_video_encoder.h" #include <stddef.h> #include "base/json/json_writer.h" #include "base/values.h" #include "media/base/video_frame.h" #include "media/cast/common/rtp_time.h" #include "media/cast/constants.h" #ifndef OFFICIAL_BUILD namespace media { namespace cast { FakeSoftwareVideoEncoder::FakeSoftwareVideoEncoder( const FrameSenderConfig& video_config) : video_config_(video_config), next_frame_is_key_(true), frame_id_(FrameId::first()), frame_size_(0) { DCHECK_GT(video_config_.max_frame_rate, 0); } FakeSoftwareVideoEncoder::~FakeSoftwareVideoEncoder() = default; void FakeSoftwareVideoEncoder::Initialize() {} void FakeSoftwareVideoEncoder::Encode( const scoped_refptr<media::VideoFrame>& video_frame, const base::TimeTicks& reference_time, SenderEncodedFrame* encoded_frame) { DCHECK(encoded_frame); if (video_frame->visible_rect().size() != last_frame_size_) { next_frame_is_key_ = true; last_frame_size_ = video_frame->visible_rect().size(); } encoded_frame->frame_id = frame_id_++; if (next_frame_is_key_) { encoded_frame->dependency = EncodedFrame::KEY; encoded_frame->referenced_frame_id = encoded_frame->frame_id; next_frame_is_key_ = false; } else { encoded_frame->dependency = EncodedFrame::DEPENDENT; encoded_frame->referenced_frame_id = encoded_frame->frame_id - 1; } encoded_frame->rtp_timestamp = RtpTimeTicks::FromTimeDelta(video_frame->timestamp(), kVideoFrequency); encoded_frame->reference_time = reference_time; base::DictionaryValue values; values.SetBoolean("key", encoded_frame->dependency == EncodedFrame::KEY); values.SetInteger("ref", encoded_frame->referenced_frame_id.lower_32_bits()); values.SetInteger("id", encoded_frame->frame_id.lower_32_bits()); values.SetInteger("size", frame_size_); base::JSONWriter::Write(values, &encoded_frame->data); encoded_frame->data.resize( std::max<size_t>(encoded_frame->data.size(), frame_size_), ' '); if (encoded_frame->dependency == EncodedFrame::KEY) { encoded_frame->encoder_utilization = 1.0; encoded_frame->lossy_utilization = 6.0; } else { encoded_frame->encoder_utilization = 0.8; encoded_frame->lossy_utilization = 0.8; } } void FakeSoftwareVideoEncoder::UpdateRates(uint32_t new_bitrate) { frame_size_ = new_bitrate / video_config_.max_frame_rate / 8; } void FakeSoftwareVideoEncoder::GenerateKeyFrame() { next_frame_is_key_ = true; } } // namespace cast } // namespace media #endif
null
null
null
null
50,395
2,053
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
167,048
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (c) 2008 Patrick McHardy <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Development of this code funded by Astaro AG (http://www.astaro.com/) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables.h> #include <net/tcp.h> struct nft_exthdr { u8 type; u8 offset; u8 len; u8 op; enum nft_registers dreg:8; u8 flags; }; static unsigned int optlen(const u8 *opt, unsigned int offset) { /* Beware zero-length options: make finite progress */ if (opt[offset] <= TCPOPT_NOP || opt[offset + 1] == 0) return 1; else return opt[offset + 1]; } static void nft_exthdr_ipv6_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_exthdr *priv = nft_expr_priv(expr); u32 *dest = &regs->data[priv->dreg]; unsigned int offset = 0; int err; err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); if (priv->flags & NFT_EXTHDR_F_PRESENT) { *dest = (err >= 0); return; } else if (err < 0) { goto err; } offset += priv->offset; dest[priv->len / NFT_REG32_SIZE] = 0; if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0) goto err; return; err: regs->verdict.code = NFT_BREAK; } static void nft_exthdr_tcp_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE]; struct nft_exthdr *priv = nft_expr_priv(expr); unsigned int i, optl, tcphdr_len, offset; u32 *dest = &regs->data[priv->dreg]; struct tcphdr *tcph; u8 *opt; if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP) goto err; tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, sizeof(*tcph), buff); if (!tcph) goto err; tcphdr_len = __tcp_hdrlen(tcph); if (tcphdr_len < sizeof(*tcph)) goto err; tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, tcphdr_len, buff); if (!tcph) goto err; opt = (u8 *)tcph; for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) { optl = optlen(opt, i); if (priv->type != opt[i]) continue; if (i + optl > tcphdr_len || priv->len + priv->offset > optl) goto err; offset = i + priv->offset; dest[priv->len / NFT_REG32_SIZE] = 0; memcpy(dest, opt + offset, priv->len); return; } err: regs->verdict.code = NFT_BREAK; } static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { [NFTA_EXTHDR_DREG] = { .type = NLA_U32 }, [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 }, [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 }, [NFTA_EXTHDR_LEN] = { .type = NLA_U32 }, [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 }, }; static int nft_exthdr_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_exthdr *priv = nft_expr_priv(expr); u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6; int err; if (!tb[NFTA_EXTHDR_DREG] || !tb[NFTA_EXTHDR_TYPE] || !tb[NFTA_EXTHDR_OFFSET] || !tb[NFTA_EXTHDR_LEN]) return -EINVAL; err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset); if (err < 0) return err; err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len); if (err < 0) return err; if (tb[NFTA_EXTHDR_FLAGS]) { err = nft_parse_u32_check(tb[NFTA_EXTHDR_FLAGS], U8_MAX, &flags); if (err < 0) return err; if (flags & ~NFT_EXTHDR_F_PRESENT) return -EINVAL; } if (tb[NFTA_EXTHDR_OP]) { err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op); if (err < 0) return err; } priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); priv->offset = offset; priv->len = len; priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); priv->flags = flags; priv->op = op; return nft_validate_register_store(ctx, priv->dreg, NULL, NFT_DATA_VALUE, priv->len); } static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr) { const struct nft_exthdr *priv = nft_expr_priv(expr); if (nft_dump_register(skb, NFTA_EXTHDR_DREG, priv->dreg)) goto nla_put_failure; if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_EXTHDR_FLAGS, htonl(priv->flags))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_EXTHDR_OP, htonl(priv->op))) goto nla_put_failure; return 0; nla_put_failure: return -1; } static struct nft_expr_type nft_exthdr_type; static const struct nft_expr_ops nft_exthdr_ipv6_ops = { .type = &nft_exthdr_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), .eval = nft_exthdr_ipv6_eval, .init = nft_exthdr_init, .dump = nft_exthdr_dump, }; static const struct nft_expr_ops nft_exthdr_tcp_ops = { .type = &nft_exthdr_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), .eval = nft_exthdr_tcp_eval, .init = nft_exthdr_init, .dump = nft_exthdr_dump, }; static const struct nft_expr_ops * nft_exthdr_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { u32 op; if (!tb[NFTA_EXTHDR_OP]) return &nft_exthdr_ipv6_ops; op = ntohl(nla_get_u32(tb[NFTA_EXTHDR_OP])); switch (op) { case NFT_EXTHDR_OP_TCPOPT: return &nft_exthdr_tcp_ops; case NFT_EXTHDR_OP_IPV6: return &nft_exthdr_ipv6_ops; } return ERR_PTR(-EOPNOTSUPP); } static struct nft_expr_type nft_exthdr_type __read_mostly = { .name = "exthdr", .select_ops = &nft_exthdr_select_ops, .policy = nft_exthdr_policy, .maxattr = NFTA_EXTHDR_MAX, .owner = THIS_MODULE, }; static int __init nft_exthdr_module_init(void) { return nft_register_expr(&nft_exthdr_type); } static void __exit nft_exthdr_module_exit(void) { nft_unregister_expr(&nft_exthdr_type); } module_init(nft_exthdr_module_init); module_exit(nft_exthdr_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <[email protected]>"); MODULE_ALIAS_NFT_EXPR("exthdr");
null
null
null
null
75,396
4,250
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
169,245
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (c) 1995-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __XFS_FS_H__ #define __XFS_FS_H__ /* * SGI's XFS filesystem's major stuff (constants, structures) */ /* * Direct I/O attribute record used with XFS_IOC_DIOINFO * d_miniosz is the min xfer size, xfer size multiple and file seek offset * alignment. */ #ifndef HAVE_DIOATTR struct dioattr { __u32 d_mem; /* data buffer memory alignment */ __u32 d_miniosz; /* min xfer size */ __u32 d_maxiosz; /* max xfer size */ }; #endif /* * Structure for XFS_IOC_GETBMAP. * On input, fill in bmv_offset and bmv_length of the first structure * to indicate the area of interest in the file, and bmv_entries with * the number of array elements given back. The first structure is * updated on return to give the offset and length for the next call. */ #ifndef HAVE_GETBMAP struct getbmap { __s64 bmv_offset; /* file offset of segment in blocks */ __s64 bmv_block; /* starting block (64-bit daddr_t) */ __s64 bmv_length; /* length of segment, blocks */ __s32 bmv_count; /* # of entries in array incl. 1st */ __s32 bmv_entries; /* # of entries filled in (output) */ }; #endif /* * Structure for XFS_IOC_GETBMAPX. Fields bmv_offset through bmv_entries * are used exactly as in the getbmap structure. The getbmapx structure * has additional bmv_iflags and bmv_oflags fields. The bmv_iflags field * is only used for the first structure. It contains input flags * specifying XFS_IOC_GETBMAPX actions. The bmv_oflags field is filled * in by the XFS_IOC_GETBMAPX command for each returned structure after * the first. */ #ifndef HAVE_GETBMAPX struct getbmapx { __s64 bmv_offset; /* file offset of segment in blocks */ __s64 bmv_block; /* starting block (64-bit daddr_t) */ __s64 bmv_length; /* length of segment, blocks */ __s32 bmv_count; /* # of entries in array incl. 1st */ __s32 bmv_entries; /* # of entries filled in (output). */ __s32 bmv_iflags; /* input flags (1st structure) */ __s32 bmv_oflags; /* output flags (after 1st structure)*/ __s32 bmv_unused1; /* future use */ __s32 bmv_unused2; /* future use */ }; #endif /* bmv_iflags values - set by XFS_IOC_GETBMAPX caller. */ #define BMV_IF_ATTRFORK 0x1 /* return attr fork rather than data */ #define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */ #define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */ #define BMV_IF_DELALLOC 0x8 /* rtn status BMV_OF_DELALLOC if req */ #define BMV_IF_NO_HOLES 0x10 /* Do not return holes */ #define BMV_IF_COWFORK 0x20 /* return CoW fork rather than data */ #define BMV_IF_VALID \ (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC| \ BMV_IF_DELALLOC|BMV_IF_NO_HOLES|BMV_IF_COWFORK) /* bmv_oflags values - returned for each non-header segment */ #define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ #define BMV_OF_DELALLOC 0x2 /* segment = delayed allocation */ #define BMV_OF_LAST 0x4 /* segment is the last in the file */ #define BMV_OF_SHARED 0x8 /* segment shared with another file */ /* * Structure for XFS_IOC_FSSETDM. * For use by backup and restore programs to set the XFS on-disk inode * fields di_dmevmask and di_dmstate. These must be set to exactly and * only values previously obtained via xfs_bulkstat! (Specifically the * xfs_bstat_t fields bs_dmevmask and bs_dmstate.) */ #ifndef HAVE_FSDMIDATA struct fsdmidata { __u32 fsd_dmevmask; /* corresponds to di_dmevmask */ __u16 fsd_padding; __u16 fsd_dmstate; /* corresponds to di_dmstate */ }; #endif /* * File segment locking set data type for 64 bit access. * Also used for all the RESV/FREE interfaces. */ typedef struct xfs_flock64 { __s16 l_type; __s16 l_whence; __s64 l_start; __s64 l_len; /* len == 0 means until end of file */ __s32 l_sysid; __u32 l_pid; __s32 l_pad[4]; /* reserve area */ } xfs_flock64_t; /* * Output for XFS_IOC_FSGEOMETRY_V1 */ typedef struct xfs_fsop_geom_v1 { __u32 blocksize; /* filesystem (data) block size */ __u32 rtextsize; /* realtime extent size */ __u32 agblocks; /* fsblocks in an AG */ __u32 agcount; /* number of allocation groups */ __u32 logblocks; /* fsblocks in the log */ __u32 sectsize; /* (data) sector size, bytes */ __u32 inodesize; /* inode size in bytes */ __u32 imaxpct; /* max allowed inode space(%) */ __u64 datablocks; /* fsblocks in data subvolume */ __u64 rtblocks; /* fsblocks in realtime subvol */ __u64 rtextents; /* rt extents in realtime subvol*/ __u64 logstart; /* starting fsblock of the log */ unsigned char uuid[16]; /* unique id of the filesystem */ __u32 sunit; /* stripe unit, fsblocks */ __u32 swidth; /* stripe width, fsblocks */ __s32 version; /* structure version */ __u32 flags; /* superblock version flags */ __u32 logsectsize; /* log sector size, bytes */ __u32 rtsectsize; /* realtime sector size, bytes */ __u32 dirblocksize; /* directory block size, bytes */ } xfs_fsop_geom_v1_t; /* * Output for XFS_IOC_FSGEOMETRY */ typedef struct xfs_fsop_geom { __u32 blocksize; /* filesystem (data) block size */ __u32 rtextsize; /* realtime extent size */ __u32 agblocks; /* fsblocks in an AG */ __u32 agcount; /* number of allocation groups */ __u32 logblocks; /* fsblocks in the log */ __u32 sectsize; /* (data) sector size, bytes */ __u32 inodesize; /* inode size in bytes */ __u32 imaxpct; /* max allowed inode space(%) */ __u64 datablocks; /* fsblocks in data subvolume */ __u64 rtblocks; /* fsblocks in realtime subvol */ __u64 rtextents; /* rt extents in realtime subvol*/ __u64 logstart; /* starting fsblock of the log */ unsigned char uuid[16]; /* unique id of the filesystem */ __u32 sunit; /* stripe unit, fsblocks */ __u32 swidth; /* stripe width, fsblocks */ __s32 version; /* structure version */ __u32 flags; /* superblock version flags */ __u32 logsectsize; /* log sector size, bytes */ __u32 rtsectsize; /* realtime sector size, bytes */ __u32 dirblocksize; /* directory block size, bytes */ __u32 logsunit; /* log stripe unit, bytes */ } xfs_fsop_geom_t; /* Output for XFS_FS_COUNTS */ typedef struct xfs_fsop_counts { __u64 freedata; /* free data section blocks */ __u64 freertx; /* free rt extents */ __u64 freeino; /* free inodes */ __u64 allocino; /* total allocated inodes */ } xfs_fsop_counts_t; /* Input/Output for XFS_GET_RESBLKS and XFS_SET_RESBLKS */ typedef struct xfs_fsop_resblks { __u64 resblks; __u64 resblks_avail; } xfs_fsop_resblks_t; #define XFS_FSOP_GEOM_VERSION 0 #define XFS_FSOP_GEOM_FLAGS_ATTR 0x0001 /* attributes in use */ #define XFS_FSOP_GEOM_FLAGS_NLINK 0x0002 /* 32-bit nlink values */ #define XFS_FSOP_GEOM_FLAGS_QUOTA 0x0004 /* quotas enabled */ #define XFS_FSOP_GEOM_FLAGS_IALIGN 0x0008 /* inode alignment */ #define XFS_FSOP_GEOM_FLAGS_DALIGN 0x0010 /* large data alignment */ #define XFS_FSOP_GEOM_FLAGS_SHARED 0x0020 /* read-only shared */ #define XFS_FSOP_GEOM_FLAGS_EXTFLG 0x0040 /* special extent flag */ #define XFS_FSOP_GEOM_FLAGS_DIRV2 0x0080 /* directory version 2 */ #define XFS_FSOP_GEOM_FLAGS_LOGV2 0x0100 /* log format version 2 */ #define XFS_FSOP_GEOM_FLAGS_SECTOR 0x0200 /* sector sizes >1BB */ #define XFS_FSOP_GEOM_FLAGS_ATTR2 0x0400 /* inline attributes rework */ #define XFS_FSOP_GEOM_FLAGS_PROJID32 0x0800 /* 32-bit project IDs */ #define XFS_FSOP_GEOM_FLAGS_DIRV2CI 0x1000 /* ASCII only CI names */ #define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */ #define XFS_FSOP_GEOM_FLAGS_V5SB 0x8000 /* version 5 superblock */ #define XFS_FSOP_GEOM_FLAGS_FTYPE 0x10000 /* inode directory types */ #define XFS_FSOP_GEOM_FLAGS_FINOBT 0x20000 /* free inode btree */ #define XFS_FSOP_GEOM_FLAGS_SPINODES 0x40000 /* sparse inode chunks */ #define XFS_FSOP_GEOM_FLAGS_RMAPBT 0x80000 /* reverse mapping btree */ #define XFS_FSOP_GEOM_FLAGS_REFLINK 0x100000 /* files can share blocks */ /* * Minimum and maximum sizes need for growth checks. * * Block counts are in units of filesystem blocks, not basic blocks. */ #define XFS_MIN_AG_BLOCKS 64 #define XFS_MIN_LOG_BLOCKS 512ULL #define XFS_MAX_LOG_BLOCKS (1024 * 1024ULL) #define XFS_MIN_LOG_BYTES (10 * 1024 * 1024ULL) /* keep the maximum size under 2^31 by a small amount */ #define XFS_MAX_LOG_BYTES \ ((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES) /* Used for sanity checks on superblock */ #define XFS_MAX_DBLOCKS(s) ((xfs_rfsblock_t)(s)->sb_agcount * (s)->sb_agblocks) #define XFS_MIN_DBLOCKS(s) ((xfs_rfsblock_t)((s)->sb_agcount - 1) * \ (s)->sb_agblocks + XFS_MIN_AG_BLOCKS) /* * Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT */ typedef struct xfs_growfs_data { __u64 newblocks; /* new data subvol size, fsblocks */ __u32 imaxpct; /* new inode space percentage limit */ } xfs_growfs_data_t; typedef struct xfs_growfs_log { __u32 newblocks; /* new log size, fsblocks */ __u32 isint; /* 1 if new log is internal */ } xfs_growfs_log_t; typedef struct xfs_growfs_rt { __u64 newblocks; /* new realtime size, fsblocks */ __u32 extsize; /* new realtime extent size, fsblocks */ } xfs_growfs_rt_t; /* * Structures returned from ioctl XFS_IOC_FSBULKSTAT & XFS_IOC_FSBULKSTAT_SINGLE */ typedef struct xfs_bstime { time_t tv_sec; /* seconds */ __s32 tv_nsec; /* and nanoseconds */ } xfs_bstime_t; typedef struct xfs_bstat { __u64 bs_ino; /* inode number */ __u16 bs_mode; /* type and mode */ __u16 bs_nlink; /* number of links */ __u32 bs_uid; /* user id */ __u32 bs_gid; /* group id */ __u32 bs_rdev; /* device value */ __s32 bs_blksize; /* block size */ __s64 bs_size; /* file size */ xfs_bstime_t bs_atime; /* access time */ xfs_bstime_t bs_mtime; /* modify time */ xfs_bstime_t bs_ctime; /* inode change time */ int64_t bs_blocks; /* number of blocks */ __u32 bs_xflags; /* extended flags */ __s32 bs_extsize; /* extent size */ __s32 bs_extents; /* number of extents */ __u32 bs_gen; /* generation count */ __u16 bs_projid_lo; /* lower part of project id */ #define bs_projid bs_projid_lo /* (previously just bs_projid) */ __u16 bs_forkoff; /* inode fork offset in bytes */ __u16 bs_projid_hi; /* higher part of project id */ unsigned char bs_pad[6]; /* pad space, unused */ __u32 bs_cowextsize; /* cow extent size */ __u32 bs_dmevmask; /* DMIG event mask */ __u16 bs_dmstate; /* DMIG state info */ __u16 bs_aextents; /* attribute number of extents */ } xfs_bstat_t; /* * Project quota id helpers (previously projid was 16bit only * and using two 16bit values to hold new 32bit projid was choosen * to retain compatibility with "old" filesystems). */ static inline __uint32_t bstat_get_projid(struct xfs_bstat *bs) { return (__uint32_t)bs->bs_projid_hi << 16 | bs->bs_projid_lo; } /* * The user-level BulkStat Request interface structure. */ typedef struct xfs_fsop_bulkreq { __u64 __user *lastip; /* last inode # pointer */ __s32 icount; /* count of entries in buffer */ void __user *ubuffer;/* user buffer for inode desc. */ __s32 __user *ocount; /* output count pointer */ } xfs_fsop_bulkreq_t; /* * Structures returned from xfs_inumbers routine (XFS_IOC_FSINUMBERS). */ typedef struct xfs_inogrp { __u64 xi_startino; /* starting inode number */ __s32 xi_alloccount; /* # bits set in allocmask */ __u64 xi_allocmask; /* mask of allocated inodes */ } xfs_inogrp_t; /* * Error injection. */ typedef struct xfs_error_injection { __s32 fd; __s32 errtag; } xfs_error_injection_t; /* * Speculative preallocation trimming. */ #define XFS_EOFBLOCKS_VERSION 1 struct xfs_fs_eofblocks { __u32 eof_version; __u32 eof_flags; uid_t eof_uid; gid_t eof_gid; prid_t eof_prid; __u32 pad32; __u64 eof_min_file_size; __u64 pad64[12]; }; /* eof_flags values */ #define XFS_EOF_FLAGS_SYNC (1 << 0) /* sync/wait mode scan */ #define XFS_EOF_FLAGS_UID (1 << 1) /* filter by uid */ #define XFS_EOF_FLAGS_GID (1 << 2) /* filter by gid */ #define XFS_EOF_FLAGS_PRID (1 << 3) /* filter by project id */ #define XFS_EOF_FLAGS_MINFILESIZE (1 << 4) /* filter by min file size */ #define XFS_EOF_FLAGS_UNION (1 << 5) /* union filter algorithm; * kernel only, not included in * valid mask */ #define XFS_EOF_FLAGS_VALID \ (XFS_EOF_FLAGS_SYNC | \ XFS_EOF_FLAGS_UID | \ XFS_EOF_FLAGS_GID | \ XFS_EOF_FLAGS_PRID | \ XFS_EOF_FLAGS_MINFILESIZE) /* * The user-level Handle Request interface structure. */ typedef struct xfs_fsop_handlereq { __u32 fd; /* fd for FD_TO_HANDLE */ void __user *path; /* user pathname */ __u32 oflags; /* open flags */ void __user *ihandle;/* user supplied handle */ __u32 ihandlen; /* user supplied length */ void __user *ohandle;/* user buffer for handle */ __u32 __user *ohandlen;/* user buffer length */ } xfs_fsop_handlereq_t; /* * Compound structures for passing args through Handle Request interfaces * xfs_fssetdm_by_handle, xfs_attrlist_by_handle, xfs_attrmulti_by_handle * - ioctls: XFS_IOC_FSSETDM_BY_HANDLE, XFS_IOC_ATTRLIST_BY_HANDLE, and * XFS_IOC_ATTRMULTI_BY_HANDLE */ typedef struct xfs_fsop_setdm_handlereq { struct xfs_fsop_handlereq hreq; /* handle information */ struct fsdmidata __user *data; /* DMAPI data */ } xfs_fsop_setdm_handlereq_t; typedef struct xfs_attrlist_cursor { __u32 opaque[4]; } xfs_attrlist_cursor_t; typedef struct xfs_fsop_attrlist_handlereq { struct xfs_fsop_handlereq hreq; /* handle interface structure */ struct xfs_attrlist_cursor pos; /* opaque cookie, list offset */ __u32 flags; /* which namespace to use */ __u32 buflen; /* length of buffer supplied */ void __user *buffer; /* returned names */ } xfs_fsop_attrlist_handlereq_t; typedef struct xfs_attr_multiop { __u32 am_opcode; #define ATTR_OP_GET 1 /* return the indicated attr's value */ #define ATTR_OP_SET 2 /* set/create the indicated attr/value pair */ #define ATTR_OP_REMOVE 3 /* remove the indicated attr */ __s32 am_error; void __user *am_attrname; void __user *am_attrvalue; __u32 am_length; __u32 am_flags; } xfs_attr_multiop_t; typedef struct xfs_fsop_attrmulti_handlereq { struct xfs_fsop_handlereq hreq; /* handle interface structure */ __u32 opcount;/* count of following multiop */ struct xfs_attr_multiop __user *ops; /* attr_multi data */ } xfs_fsop_attrmulti_handlereq_t; /* * per machine unique filesystem identifier types. */ typedef struct { __u32 val[2]; } xfs_fsid_t; /* file system id type */ typedef struct xfs_fid { __u16 fid_len; /* length of remainder */ __u16 fid_pad; __u32 fid_gen; /* generation number */ __u64 fid_ino; /* 64 bits inode number */ } xfs_fid_t; typedef struct xfs_handle { union { __s64 align; /* force alignment of ha_fid */ xfs_fsid_t _ha_fsid; /* unique file system identifier */ } ha_u; xfs_fid_t ha_fid; /* file system specific file ID */ } xfs_handle_t; #define ha_fsid ha_u._ha_fsid #define XFS_HSIZE(handle) (((char *) &(handle).ha_fid.fid_pad \ - (char *) &(handle)) \ + (handle).ha_fid.fid_len) /* * Structure passed to XFS_IOC_SWAPEXT */ typedef struct xfs_swapext { __int64_t sx_version; /* version */ #define XFS_SX_VERSION 0 __int64_t sx_fdtarget; /* fd of target file */ __int64_t sx_fdtmp; /* fd of tmp file */ xfs_off_t sx_offset; /* offset into file */ xfs_off_t sx_length; /* leng from offset */ char sx_pad[16]; /* pad space, unused */ xfs_bstat_t sx_stat; /* stat of target b4 copy */ } xfs_swapext_t; /* * Flags for going down operation */ #define XFS_FSOP_GOING_FLAGS_DEFAULT 0x0 /* going down */ #define XFS_FSOP_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */ #define XFS_FSOP_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */ /* * ioctl limits */ #ifdef XATTR_LIST_MAX # define XFS_XATTR_LIST_MAX XATTR_LIST_MAX #else # define XFS_XATTR_LIST_MAX 65536 #endif /* * ioctl commands that are used by Linux filesystems */ #define XFS_IOC_GETXFLAGS FS_IOC_GETFLAGS #define XFS_IOC_SETXFLAGS FS_IOC_SETFLAGS #define XFS_IOC_GETVERSION FS_IOC_GETVERSION /* * ioctl commands that replace IRIX fcntl()'s * For 'documentation' purposed more than anything else, * the "cmd #" field reflects the IRIX fcntl number. */ #define XFS_IOC_ALLOCSP _IOW ('X', 10, struct xfs_flock64) #define XFS_IOC_FREESP _IOW ('X', 11, struct xfs_flock64) #define XFS_IOC_DIOINFO _IOR ('X', 30, struct dioattr) #define XFS_IOC_FSGETXATTR FS_IOC_FSGETXATTR #define XFS_IOC_FSSETXATTR FS_IOC_FSSETXATTR #define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64) #define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64) #define XFS_IOC_GETBMAP _IOWR('X', 38, struct getbmap) #define XFS_IOC_FSSETDM _IOW ('X', 39, struct fsdmidata) #define XFS_IOC_RESVSP _IOW ('X', 40, struct xfs_flock64) #define XFS_IOC_UNRESVSP _IOW ('X', 41, struct xfs_flock64) #define XFS_IOC_RESVSP64 _IOW ('X', 42, struct xfs_flock64) #define XFS_IOC_UNRESVSP64 _IOW ('X', 43, struct xfs_flock64) #define XFS_IOC_GETBMAPA _IOWR('X', 44, struct getbmap) #define XFS_IOC_FSGETXATTRA _IOR ('X', 45, struct fsxattr) /* XFS_IOC_SETBIOSIZE ---- deprecated 46 */ /* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ #define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) #define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64) #define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks) /* * ioctl commands that replace IRIX syssgi()'s */ #define XFS_IOC_FSGEOMETRY_V1 _IOR ('X', 100, struct xfs_fsop_geom_v1) #define XFS_IOC_FSBULKSTAT _IOWR('X', 101, struct xfs_fsop_bulkreq) #define XFS_IOC_FSBULKSTAT_SINGLE _IOWR('X', 102, struct xfs_fsop_bulkreq) #define XFS_IOC_FSINUMBERS _IOWR('X', 103, struct xfs_fsop_bulkreq) #define XFS_IOC_PATH_TO_FSHANDLE _IOWR('X', 104, struct xfs_fsop_handlereq) #define XFS_IOC_PATH_TO_HANDLE _IOWR('X', 105, struct xfs_fsop_handlereq) #define XFS_IOC_FD_TO_HANDLE _IOWR('X', 106, struct xfs_fsop_handlereq) #define XFS_IOC_OPEN_BY_HANDLE _IOWR('X', 107, struct xfs_fsop_handlereq) #define XFS_IOC_READLINK_BY_HANDLE _IOWR('X', 108, struct xfs_fsop_handlereq) #define XFS_IOC_SWAPEXT _IOWR('X', 109, struct xfs_swapext) #define XFS_IOC_FSGROWFSDATA _IOW ('X', 110, struct xfs_growfs_data) #define XFS_IOC_FSGROWFSLOG _IOW ('X', 111, struct xfs_growfs_log) #define XFS_IOC_FSGROWFSRT _IOW ('X', 112, struct xfs_growfs_rt) #define XFS_IOC_FSCOUNTS _IOR ('X', 113, struct xfs_fsop_counts) #define XFS_IOC_SET_RESBLKS _IOWR('X', 114, struct xfs_fsop_resblks) #define XFS_IOC_GET_RESBLKS _IOR ('X', 115, struct xfs_fsop_resblks) #define XFS_IOC_ERROR_INJECTION _IOW ('X', 116, struct xfs_error_injection) #define XFS_IOC_ERROR_CLEARALL _IOW ('X', 117, struct xfs_error_injection) /* XFS_IOC_ATTRCTL_BY_HANDLE -- deprecated 118 */ #define XFS_IOC_FREEZE _IOWR('X', 119, int) /* aka FIFREEZE */ #define XFS_IOC_THAW _IOWR('X', 120, int) /* aka FITHAW */ #define XFS_IOC_FSSETDM_BY_HANDLE _IOW ('X', 121, struct xfs_fsop_setdm_handlereq) #define XFS_IOC_ATTRLIST_BY_HANDLE _IOW ('X', 122, struct xfs_fsop_attrlist_handlereq) #define XFS_IOC_ATTRMULTI_BY_HANDLE _IOW ('X', 123, struct xfs_fsop_attrmulti_handlereq) #define XFS_IOC_FSGEOMETRY _IOR ('X', 124, struct xfs_fsop_geom) #define XFS_IOC_GOINGDOWN _IOR ('X', 125, __uint32_t) /* XFS_IOC_GETFSUUID ---------- deprecated 140 */ #ifndef HAVE_BBMACROS /* * Block I/O parameterization. A basic block (BB) is the lowest size of * filesystem allocation, and must equal 512. Length units given to bio * routines are in BB's. */ #define BBSHIFT 9 #define BBSIZE (1<<BBSHIFT) #define BBMASK (BBSIZE-1) #define BTOBB(bytes) (((__u64)(bytes) + BBSIZE - 1) >> BBSHIFT) #define BTOBBT(bytes) ((__u64)(bytes) >> BBSHIFT) #define BBTOB(bbs) ((bbs) << BBSHIFT) #endif #endif /* __XFS_FS_H__ */
null
null
null
null
77,592
6,031
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
6,031
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROMEOS_COMPONENTS_FAKE_ERROR_TOLERANT_BLE_ADVERTISEMENT_H_ #define CHROMEOS_COMPONENTS_FAKE_ERROR_TOLERANT_BLE_ADVERTISEMENT_H_ #include "base/callback.h" #include "base/macros.h" #include "chromeos/components/tether/error_tolerant_ble_advertisement.h" namespace chromeos { namespace tether { // Test double for ErrorTolerantBleAdvertisement. class FakeErrorTolerantBleAdvertisement : public ErrorTolerantBleAdvertisement { public: FakeErrorTolerantBleAdvertisement(const std::string& device_id); FakeErrorTolerantBleAdvertisement( const std::string& device_id, const base::Callback<void(FakeErrorTolerantBleAdvertisement*)>& deletion_callback); ~FakeErrorTolerantBleAdvertisement() override; void InvokeStopCallback(); // ErrorTolerantBleAdvertisement: void Stop(const base::Closure& callback) override; bool HasBeenStopped() override; private: const base::Callback<void(FakeErrorTolerantBleAdvertisement*)> deletion_callback_; base::Closure stop_callback_; DISALLOW_COPY_AND_ASSIGN(FakeErrorTolerantBleAdvertisement); }; } // namespace tether } // namespace chromeos #endif // CHROMEOS_COMPONENTS_FAKE_ERROR_TOLERANT_BLE_ADVERTISEMENT_H_
null
null
null
null
2,894
7,319
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
172,314
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * platform_msic.h: MSIC platform data header file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #ifndef _PLATFORM_MSIC_H_ #define _PLATFORM_MSIC_H_ extern struct intel_msic_platform_data msic_pdata; void *msic_generic_platform_data(void *info, enum intel_msic_block block); #endif
null
null
null
null
80,661
38,966
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
38,966
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_TESTING_INTERNALS_WEB_AUDIO_H_ #define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_TESTING_INTERNALS_WEB_AUDIO_H_ #include "third_party/blink/renderer/platform/wtf/allocator.h" namespace blink { class Internals; class InternalsWebAudio { STATIC_ONLY(InternalsWebAudio); public: static unsigned audioHandlerCount(Internals&); }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_TESTING_INTERNALS_WEB_AUDIO_H_
null
null
null
null
35,829
26,782
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
191,777
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Qualcomm SCM driver * * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/cpumask.h> #include <linux/export.h> #include <linux/dma-mapping.h> #include <linux/types.h> #include <linux/qcom_scm.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/clk.h> #include <linux/reset-controller.h> #include "qcom_scm.h" #define SCM_HAS_CORE_CLK BIT(0) #define SCM_HAS_IFACE_CLK BIT(1) #define SCM_HAS_BUS_CLK BIT(2) struct qcom_scm { struct device *dev; struct clk *core_clk; struct clk *iface_clk; struct clk *bus_clk; struct reset_controller_dev reset; }; static struct qcom_scm *__scm; static int qcom_scm_clk_enable(void) { int ret; ret = clk_prepare_enable(__scm->core_clk); if (ret) goto bail; ret = clk_prepare_enable(__scm->iface_clk); if (ret) goto disable_core; ret = clk_prepare_enable(__scm->bus_clk); if (ret) goto disable_iface; return 0; disable_iface: clk_disable_unprepare(__scm->iface_clk); disable_core: clk_disable_unprepare(__scm->core_clk); bail: return ret; } static void qcom_scm_clk_disable(void) { clk_disable_unprepare(__scm->core_clk); clk_disable_unprepare(__scm->iface_clk); clk_disable_unprepare(__scm->bus_clk); } /** * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus * @entry: Entry point function for the cpus * @cpus: The cpumask of cpus that will use the entry point * * Set the cold boot address of the cpus. Any cpu outside the supported * range would be removed from the cpu present mask. */ int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) { return __qcom_scm_set_cold_boot_addr(entry, cpus); } EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); /** * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus * @entry: Entry point function for the cpus * @cpus: The cpumask of cpus that will use the entry point * * Set the Linux entry point for the SCM to transfer control to when coming * out of a power down. CPU power down may be executed on cpuidle or hotplug. */ int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) { return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus); } EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); /** * qcom_scm_cpu_power_down() - Power down the cpu * @flags - Flags to flush cache * * This is an end point to power down cpu. If there was a pending interrupt, * the control would return from this function, otherwise, the cpu jumps to the * warm boot entry point set for this cpu upon reset. */ void qcom_scm_cpu_power_down(u32 flags) { __qcom_scm_cpu_power_down(flags); } EXPORT_SYMBOL(qcom_scm_cpu_power_down); /** * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. * * Return true if HDCP is supported, false if not. */ bool qcom_scm_hdcp_available(void) { int ret = qcom_scm_clk_enable(); if (ret) return ret; ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP); qcom_scm_clk_disable(); return ret > 0 ? true : false; } EXPORT_SYMBOL(qcom_scm_hdcp_available); /** * qcom_scm_hdcp_req() - Send HDCP request. * @req: HDCP request array * @req_cnt: HDCP request array count * @resp: response buffer passed to SCM * * Write HDCP register(s) through SCM. */ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) { int ret = qcom_scm_clk_enable(); if (ret) return ret; ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp); qcom_scm_clk_disable(); return ret; } EXPORT_SYMBOL(qcom_scm_hdcp_req); /** * qcom_scm_pas_supported() - Check if the peripheral authentication service is * available for the given peripherial * @peripheral: peripheral id * * Returns true if PAS is supported for this peripheral, otherwise false. */ bool qcom_scm_pas_supported(u32 peripheral) { int ret; ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_IS_SUPPORTED_CMD); if (ret <= 0) return false; return __qcom_scm_pas_supported(__scm->dev, peripheral); } EXPORT_SYMBOL(qcom_scm_pas_supported); /** * qcom_scm_pas_init_image() - Initialize peripheral authentication service * state machine for a given peripheral, using the * metadata * @peripheral: peripheral id * @metadata: pointer to memory containing ELF header, program header table * and optional blob of data used for authenticating the metadata * and the rest of the firmware * @size: size of the metadata * * Returns 0 on success. */ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) { dma_addr_t mdata_phys; void *mdata_buf; int ret; /* * During the scm call memory protection will be enabled for the meta * data blob, so make sure it's physically contiguous, 4K aligned and * non-cachable to avoid XPU violations. */ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, GFP_KERNEL); if (!mdata_buf) { dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); return -ENOMEM; } memcpy(mdata_buf, metadata, size); ret = qcom_scm_clk_enable(); if (ret) goto free_metadata; ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys); qcom_scm_clk_disable(); free_metadata: dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); return ret; } EXPORT_SYMBOL(qcom_scm_pas_init_image); /** * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral * for firmware loading * @peripheral: peripheral id * @addr: start address of memory area to prepare * @size: size of the memory area to prepare * * Returns 0 on success. */ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) { int ret; ret = qcom_scm_clk_enable(); if (ret) return ret; ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size); qcom_scm_clk_disable(); return ret; } EXPORT_SYMBOL(qcom_scm_pas_mem_setup); /** * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware * and reset the remote processor * @peripheral: peripheral id * * Return 0 on success. */ int qcom_scm_pas_auth_and_reset(u32 peripheral) { int ret; ret = qcom_scm_clk_enable(); if (ret) return ret; ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral); qcom_scm_clk_disable(); return ret; } EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); /** * qcom_scm_pas_shutdown() - Shut down the remote processor * @peripheral: peripheral id * * Returns 0 on success. */ int qcom_scm_pas_shutdown(u32 peripheral) { int ret; ret = qcom_scm_clk_enable(); if (ret) return ret; ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral); qcom_scm_clk_disable(); return ret; } EXPORT_SYMBOL(qcom_scm_pas_shutdown); static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, unsigned long idx) { if (idx != 0) return -EINVAL; return __qcom_scm_pas_mss_reset(__scm->dev, 1); } static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, unsigned long idx) { if (idx != 0) return -EINVAL; return __qcom_scm_pas_mss_reset(__scm->dev, 0); } static const struct reset_control_ops qcom_scm_pas_reset_ops = { .assert = qcom_scm_pas_reset_assert, .deassert = qcom_scm_pas_reset_deassert, }; /** * qcom_scm_is_available() - Checks if SCM is available */ bool qcom_scm_is_available(void) { return !!__scm; } EXPORT_SYMBOL(qcom_scm_is_available); int qcom_scm_set_remote_state(u32 state, u32 id) { return __qcom_scm_set_remote_state(__scm->dev, state, id); } EXPORT_SYMBOL(qcom_scm_set_remote_state); static int qcom_scm_probe(struct platform_device *pdev) { struct qcom_scm *scm; unsigned long clks; int ret; scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); if (!scm) return -ENOMEM; clks = (unsigned long)of_device_get_match_data(&pdev->dev); if (clks & SCM_HAS_CORE_CLK) { scm->core_clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(scm->core_clk)) { if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to acquire core clk\n"); return PTR_ERR(scm->core_clk); } } if (clks & SCM_HAS_IFACE_CLK) { scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); if (IS_ERR(scm->iface_clk)) { if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to acquire iface clk\n"); return PTR_ERR(scm->iface_clk); } } if (clks & SCM_HAS_BUS_CLK) { scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); if (IS_ERR(scm->bus_clk)) { if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to acquire bus clk\n"); return PTR_ERR(scm->bus_clk); } } scm->reset.ops = &qcom_scm_pas_reset_ops; scm->reset.nr_resets = 1; scm->reset.of_node = pdev->dev.of_node; ret = devm_reset_controller_register(&pdev->dev, &scm->reset); if (ret) return ret; /* vote for max clk rate for highest performance */ ret = clk_set_rate(scm->core_clk, INT_MAX); if (ret) return ret; __scm = scm; __scm->dev = &pdev->dev; __qcom_scm_init(); return 0; } static const struct of_device_id qcom_scm_dt_match[] = { { .compatible = "qcom,scm-apq8064", /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ }, { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK, }, { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK, }, { .compatible = "qcom,scm-msm8996", .data = NULL, /* no clocks */ }, { .compatible = "qcom,scm", .data = (void *)(SCM_HAS_CORE_CLK | SCM_HAS_IFACE_CLK | SCM_HAS_BUS_CLK), }, {} }; static struct platform_driver qcom_scm_driver = { .driver = { .name = "qcom_scm", .of_match_table = qcom_scm_dt_match, }, .probe = qcom_scm_probe, }; static int __init qcom_scm_init(void) { struct device_node *np, *fw_np; int ret; fw_np = of_find_node_by_name(NULL, "firmware"); if (!fw_np) return -ENODEV; np = of_find_matching_node(fw_np, qcom_scm_dt_match); if (!np) { of_node_put(fw_np); return -ENODEV; } of_node_put(np); ret = of_platform_populate(fw_np, qcom_scm_dt_match, NULL, NULL); of_node_put(fw_np); if (ret) return ret; return platform_driver_register(&qcom_scm_driver); } subsys_initcall(qcom_scm_init);
null
null
null
null
100,124
18,547
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
18,547
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/subresource_filter/content/browser/verified_ruleset_dealer.h" #include <memory> #include <vector> #include "base/bind.h" #include "base/bind_helpers.h" #include "base/files/file.h" #include "base/test/test_simple_task_runner.h" #include "components/subresource_filter/core/common/memory_mapped_ruleset.h" #include "components/subresource_filter/core/common/test_ruleset_creator.h" #include "content/public/test/test_browser_thread_bundle.h" #include "testing/gtest/include/gtest/gtest.h" namespace subresource_filter { namespace { // TODO(pkalinnikov): Consider putting this to a test_support for this test file // and SubresourceFilterRulesetDealerTest. class TestRulesets { public: TestRulesets() = default; void CreateRulesets(bool many_rules = false) { if (many_rules) { ASSERT_NO_FATAL_FAILURE( test_ruleset_creator_.CreateRulesetToDisallowURLsWithManySuffixes( kTestRulesetSuffix1, kNumberOfRulesInBigRuleset, &test_ruleset_pair_1_)); ASSERT_NO_FATAL_FAILURE( test_ruleset_creator_.CreateRulesetToDisallowURLsWithManySuffixes( kTestRulesetSuffix2, kNumberOfRulesInBigRuleset, &test_ruleset_pair_2_)); } else { ASSERT_NO_FATAL_FAILURE( test_ruleset_creator_.CreateRulesetToDisallowURLsWithPathSuffix( kTestRulesetSuffix1, &test_ruleset_pair_1_)); ASSERT_NO_FATAL_FAILURE( test_ruleset_creator_.CreateRulesetToDisallowURLsWithPathSuffix( kTestRulesetSuffix2, &test_ruleset_pair_2_)); } } const testing::TestRuleset& indexed_1() const { return test_ruleset_pair_1_.indexed; } const testing::TestRuleset& indexed_2() const { return test_ruleset_pair_2_.indexed; } private: static constexpr const char kTestRulesetSuffix1[] = "foo"; static constexpr const char kTestRulesetSuffix2[] = "bar"; static constexpr int kNumberOfRulesInBigRuleset = 500; testing::TestRulesetCreator test_ruleset_creator_; testing::TestRulesetPair test_ruleset_pair_1_; testing::TestRulesetPair test_ruleset_pair_2_; DISALLOW_COPY_AND_ASSIGN(TestRulesets); }; constexpr const char TestRulesets::kTestRulesetSuffix1[]; constexpr const char TestRulesets::kTestRulesetSuffix2[]; constexpr int TestRulesets::kNumberOfRulesInBigRuleset; std::vector<uint8_t> ReadRulesetContents(const MemoryMappedRuleset* ruleset) { return std::vector<uint8_t>(ruleset->data(), ruleset->data() + ruleset->length()); } std::vector<uint8_t> ReadFileContent(base::File* file) { DCHECK(file); DCHECK(file->IsValid()); const int64_t file_length = file->GetLength(); DCHECK_LE(0, file_length); std::vector<uint8_t> file_content(static_cast<size_t>(file_length), 0); const int read_res = file->Read(0, reinterpret_cast<char*>(&(file_content[0])), static_cast<int>(file_length)); DCHECK_EQ(read_res, file_length); return file_content; } } // namespace // Tests for VerifiedRulesetDealer. -------------------------------------------- // // Note that VerifiedRulesetDealer uses RulesetDealer very directly to provide // MemoryMappedRulesets. Many aspects of its work, e.g., lifetime of a // MemoryMappedRuleset, its lazy creation, etc., are covered with tests to // RulesetDealer, therefore these aspects are not tested here. class SubresourceFilterVerifiedRulesetDealerTest : public ::testing::Test { public: SubresourceFilterVerifiedRulesetDealerTest() = default; protected: void SetUp() override { rulesets_.CreateRulesets(true /* many_rules */); ruleset_dealer_.reset(new VerifiedRulesetDealer); } const TestRulesets& rulesets() const { return rulesets_; } VerifiedRulesetDealer* ruleset_dealer() { return ruleset_dealer_.get(); } bool has_cached_ruleset() const { return ruleset_dealer_->has_cached_ruleset(); } private: TestRulesets rulesets_; std::unique_ptr<VerifiedRulesetDealer> ruleset_dealer_; DISALLOW_COPY_AND_ASSIGN(SubresourceFilterVerifiedRulesetDealerTest); }; TEST_F(SubresourceFilterVerifiedRulesetDealerTest, RulesetIsMemoryMappedAndVerifiedLazily) { ruleset_dealer()->SetRulesetFile( testing::TestRuleset::Open(rulesets().indexed_1())); EXPECT_TRUE(ruleset_dealer()->IsRulesetFileAvailable()); EXPECT_FALSE(has_cached_ruleset()); EXPECT_EQ(RulesetVerificationStatus::NOT_VERIFIED, ruleset_dealer()->status()); scoped_refptr<const MemoryMappedRuleset> ref_to_ruleset = ruleset_dealer()->GetRuleset(); EXPECT_TRUE(ruleset_dealer()->IsRulesetFileAvailable()); EXPECT_TRUE(ref_to_ruleset); EXPECT_TRUE(has_cached_ruleset()); EXPECT_EQ(RulesetVerificationStatus::INTACT, ruleset_dealer()->status()); } TEST_F(SubresourceFilterVerifiedRulesetDealerTest, CorruptedRulesetIsNeitherProvidedNorCached) { testing::TestRuleset::CorruptByTruncating(rulesets().indexed_1(), 123); ruleset_dealer()->SetRulesetFile( testing::TestRuleset::Open(rulesets().indexed_1())); EXPECT_TRUE(ruleset_dealer()->IsRulesetFileAvailable()); EXPECT_FALSE(has_cached_ruleset()); EXPECT_EQ(RulesetVerificationStatus::NOT_VERIFIED, ruleset_dealer()->status()); scoped_refptr<const MemoryMappedRuleset> ref_to_ruleset = ruleset_dealer()->GetRuleset(); EXPECT_TRUE(ruleset_dealer()->IsRulesetFileAvailable()); EXPECT_FALSE(ref_to_ruleset); EXPECT_FALSE(has_cached_ruleset()); EXPECT_EQ(RulesetVerificationStatus::CORRUPT, ruleset_dealer()->status()); } TEST_F(SubresourceFilterVerifiedRulesetDealerTest, TruncatingFileMakesRulesetInvalid) { testing::TestRuleset::CorruptByTruncating(rulesets().indexed_1(), 4096); ruleset_dealer()->SetRulesetFile( testing::TestRuleset::Open(rulesets().indexed_1())); scoped_refptr<const MemoryMappedRuleset> ref_to_ruleset = ruleset_dealer()->GetRuleset(); EXPECT_TRUE(ruleset_dealer()->IsRulesetFileAvailable()); EXPECT_FALSE(ref_to_ruleset); EXPECT_FALSE(has_cached_ruleset()); EXPECT_EQ(RulesetVerificationStatus::CORRUPT, ruleset_dealer()->status()); } TEST_F(SubresourceFilterVerifiedRulesetDealerTest, FillingRangeMakesRulesetInvalid) { testing::TestRuleset::CorruptByFilling(rulesets().indexed_1(), 2501 /* from */, 4000 /* to */, 255 /* fill_with */); ruleset_dealer()->SetRulesetFile( testing::TestRuleset::Open(rulesets().indexed_1())); scoped_refptr<const MemoryMappedRuleset> ref_to_ruleset = ruleset_dealer()->GetRuleset(); EXPECT_TRUE(ruleset_dealer()->IsRulesetFileAvailable()); EXPECT_FALSE(ref_to_ruleset); EXPECT_FALSE(has_cached_ruleset()); EXPECT_EQ(RulesetVerificationStatus::CORRUPT, ruleset_dealer()->status()); } TEST_F(SubresourceFilterVerifiedRulesetDealerTest, RulesetIsVerifiedAfterUpdate) { testing::TestRuleset::CorruptByTruncating(rulesets().indexed_1(), 123); ruleset_dealer()->SetRulesetFile( testing::TestRuleset::Open(rulesets().indexed_1())); EXPECT_TRUE(ruleset_dealer()->IsRulesetFileAvailable()); EXPECT_FALSE(has_cached_ruleset()); EXPECT_EQ(RulesetVerificationStatus::NOT_VERIFIED, ruleset_dealer()->status()); scoped_refptr<const MemoryMappedRuleset> ref_to_ruleset = ruleset_dealer()->GetRuleset(); EXPECT_TRUE(ruleset_dealer()->IsRulesetFileAvailable()); EXPECT_FALSE(ref_to_ruleset); EXPECT_FALSE(has_cached_ruleset()); EXPECT_EQ(RulesetVerificationStatus::CORRUPT, ruleset_dealer()->status()); ruleset_dealer()->SetRulesetFile( testing::TestRuleset::Open(rulesets().indexed_2())); EXPECT_EQ(RulesetVerificationStatus::NOT_VERIFIED, ruleset_dealer()->status()); ref_to_ruleset = ruleset_dealer()->GetRuleset(); EXPECT_TRUE(ruleset_dealer()->IsRulesetFileAvailable()); EXPECT_TRUE(ref_to_ruleset); EXPECT_TRUE(has_cached_ruleset()); EXPECT_EQ(RulesetVerificationStatus::INTACT, ruleset_dealer()->status()); } TEST_F(SubresourceFilterVerifiedRulesetDealerTest, OpenAndSetRulesetFileReturnsCorrectFileOnSuccess) { base::File file = ruleset_dealer()->OpenAndSetRulesetFile(rulesets().indexed_1().path); // Check the required file is opened. ASSERT_TRUE(file.IsValid()); EXPECT_EQ(rulesets().indexed_1().contents, ReadFileContent(&file)); // Check |OpenAndSetRulesetFile| forwards call to |SetRulesetFile| on success. EXPECT_TRUE(ruleset_dealer()->IsRulesetFileAvailable()); EXPECT_FALSE(has_cached_ruleset()); EXPECT_EQ(RulesetVerificationStatus::NOT_VERIFIED, ruleset_dealer()->status()); } TEST_F(SubresourceFilterVerifiedRulesetDealerTest, OpenAndSetRulesetFileReturnsNullFileOnFailure) { base::File file = ruleset_dealer()->OpenAndSetRulesetFile( base::FilePath::FromUTF8Unsafe("non_existent_file")); EXPECT_FALSE(file.IsValid()); EXPECT_FALSE(ruleset_dealer()->IsRulesetFileAvailable()); } // Tests for VerifiedRulesetDealer::Handle. ------------------------------------ namespace { class TestVerifiedRulesetDealerClient { public: TestVerifiedRulesetDealerClient() = default; base::Callback<void(VerifiedRulesetDealer*)> GetCallback() { return base::Bind(&TestVerifiedRulesetDealerClient::Callback, base::Unretained(this)); } void ExpectRulesetState(bool expected_availability, RulesetVerificationStatus expected_status = RulesetVerificationStatus::NOT_VERIFIED, bool expected_cached = false) const { ASSERT_EQ(1, invocation_counter_); EXPECT_EQ(expected_availability, is_ruleset_file_available_); EXPECT_EQ(expected_cached, has_cached_ruleset_); EXPECT_EQ(expected_status, status_); } void ExpectRulesetContents(const std::vector<uint8_t>& expected_contents, bool expected_cached = false) const { ExpectRulesetState(true, RulesetVerificationStatus::INTACT, expected_cached); EXPECT_TRUE(ruleset_is_created_); EXPECT_EQ(expected_contents, contents_); } private: void Callback(VerifiedRulesetDealer* dealer) { ++invocation_counter_; ASSERT_TRUE(dealer); is_ruleset_file_available_ = dealer->IsRulesetFileAvailable(); has_cached_ruleset_ = dealer->has_cached_ruleset(); status_ = dealer->status(); auto ruleset = dealer->GetRuleset(); ruleset_is_created_ = !!ruleset; if (ruleset_is_created_) contents_ = ReadRulesetContents(ruleset.get()); } bool is_ruleset_file_available_ = false; bool has_cached_ruleset_ = false; RulesetVerificationStatus status_ = RulesetVerificationStatus::NOT_VERIFIED; bool ruleset_is_created_ = false; std::vector<uint8_t> contents_; int invocation_counter_ = 0; DISALLOW_COPY_AND_ASSIGN(TestVerifiedRulesetDealerClient); }; } // namespace class SubresourceFilterVerifiedRulesetDealerHandleTest : public ::testing::Test { public: SubresourceFilterVerifiedRulesetDealerHandleTest() = default; protected: void SetUp() override { rulesets_.CreateRulesets(false /* many_rules */); task_runner_ = new base::TestSimpleTaskRunner; } const TestRulesets& rulesets() const { return rulesets_; } base::TestSimpleTaskRunner* task_runner() { return task_runner_.get(); } private: TestRulesets rulesets_; scoped_refptr<base::TestSimpleTaskRunner> task_runner_; content::TestBrowserThreadBundle thread_bundle_; DISALLOW_COPY_AND_ASSIGN(SubresourceFilterVerifiedRulesetDealerHandleTest); }; TEST_F(SubresourceFilterVerifiedRulesetDealerHandleTest, RulesetIsMappedLazily) { TestVerifiedRulesetDealerClient before_set_ruleset; TestVerifiedRulesetDealerClient after_set_ruleset; TestVerifiedRulesetDealerClient after_warm_up; std::unique_ptr<VerifiedRulesetDealer::Handle> dealer_handle( new VerifiedRulesetDealer::Handle(task_runner())); dealer_handle->GetDealerAsync(before_set_ruleset.GetCallback()); dealer_handle->TryOpenAndSetRulesetFile(rulesets().indexed_1().path, base::DoNothing()); dealer_handle->GetDealerAsync(after_set_ruleset.GetCallback()); dealer_handle->GetDealerAsync(after_warm_up.GetCallback()); dealer_handle.reset(nullptr); task_runner()->RunUntilIdle(); before_set_ruleset.ExpectRulesetState(false); after_set_ruleset.ExpectRulesetState(true); after_warm_up.ExpectRulesetContents(rulesets().indexed_1().contents); } TEST_F(SubresourceFilterVerifiedRulesetDealerHandleTest, RulesetFileIsUpdated) { TestVerifiedRulesetDealerClient after_set_ruleset_1; TestVerifiedRulesetDealerClient read_ruleset_1; TestVerifiedRulesetDealerClient after_set_ruleset_2; TestVerifiedRulesetDealerClient read_ruleset_2; std::unique_ptr<VerifiedRulesetDealer::Handle> dealer_handle( new VerifiedRulesetDealer::Handle(task_runner())); dealer_handle->TryOpenAndSetRulesetFile(rulesets().indexed_1().path, base::DoNothing()); dealer_handle->GetDealerAsync(after_set_ruleset_1.GetCallback()); dealer_handle->GetDealerAsync(read_ruleset_1.GetCallback()); dealer_handle->TryOpenAndSetRulesetFile(rulesets().indexed_2().path, base::DoNothing()); dealer_handle->GetDealerAsync(after_set_ruleset_2.GetCallback()); dealer_handle->GetDealerAsync(read_ruleset_2.GetCallback()); dealer_handle.reset(nullptr); task_runner()->RunUntilIdle(); after_set_ruleset_1.ExpectRulesetState(true); read_ruleset_1.ExpectRulesetContents(rulesets().indexed_1().contents); after_set_ruleset_2.ExpectRulesetState(true); read_ruleset_2.ExpectRulesetContents(rulesets().indexed_2().contents); } TEST_F(SubresourceFilterVerifiedRulesetDealerHandleTest, InvalidFileDoesNotReplaceTheValidOne) { TestVerifiedRulesetDealerClient after_set_ruleset_1; TestVerifiedRulesetDealerClient read_ruleset_1; TestVerifiedRulesetDealerClient after_set_ruleset_2; TestVerifiedRulesetDealerClient read_ruleset_2; auto dealer_handle = std::make_unique<VerifiedRulesetDealer::Handle>(task_runner()); dealer_handle->TryOpenAndSetRulesetFile(rulesets().indexed_1().path, base::DoNothing()); dealer_handle->GetDealerAsync(after_set_ruleset_1.GetCallback()); dealer_handle->GetDealerAsync(read_ruleset_1.GetCallback()); dealer_handle->TryOpenAndSetRulesetFile( base::FilePath::FromUTF8Unsafe("non_existent_file"), base::BindOnce([](base::File file) { EXPECT_FALSE(file.IsValid()); })); dealer_handle->GetDealerAsync(after_set_ruleset_2.GetCallback()); dealer_handle->GetDealerAsync(read_ruleset_2.GetCallback()); dealer_handle.reset(nullptr); task_runner()->RunUntilIdle(); after_set_ruleset_1.ExpectRulesetState(true); read_ruleset_1.ExpectRulesetContents(rulesets().indexed_1().contents); after_set_ruleset_2.ExpectRulesetState(true, RulesetVerificationStatus::INTACT); read_ruleset_2.ExpectRulesetContents(rulesets().indexed_1().contents); } // Tests for VerifiedRuleset::Handle. ------------------------------------------ namespace { class TestVerifiedRulesetClient { public: TestVerifiedRulesetClient() = default; base::Callback<void(VerifiedRuleset*)> GetCallback() { return base::Bind(&TestVerifiedRulesetClient::Callback, base::Unretained(this)); } void ExpectNoRuleset() const { ASSERT_EQ(1, invocation_counter_); EXPECT_FALSE(has_ruleset_); } void ExpectRulesetContents( const std::vector<uint8_t> expected_contents) const { ASSERT_EQ(1, invocation_counter_); EXPECT_EQ(expected_contents, contents_); } private: void Callback(VerifiedRuleset* ruleset) { ++invocation_counter_; ASSERT_TRUE(ruleset); has_ruleset_ = !!ruleset->Get(); if (has_ruleset_) contents_ = ReadRulesetContents(ruleset->Get()); } bool has_ruleset_ = false; std::vector<uint8_t> contents_; int invocation_counter_ = 0; DISALLOW_COPY_AND_ASSIGN(TestVerifiedRulesetClient); }; } // namespace class SubresourceFilterVerifiedRulesetHandleTest : public ::testing::Test { public: SubresourceFilterVerifiedRulesetHandleTest() = default; protected: void SetUp() override { rulesets_.CreateRulesets(true /* many_rules */); task_runner_ = new base::TestSimpleTaskRunner; dealer_handle_.reset(new VerifiedRulesetDealer::Handle(task_runner_)); } void TearDown() override { dealer_handle_.reset(nullptr); task_runner_->RunUntilIdle(); } const TestRulesets& rulesets() const { return rulesets_; } base::TestSimpleTaskRunner* task_runner() { return task_runner_.get(); } VerifiedRulesetDealer::Handle* dealer_handle() { return dealer_handle_.get(); } std::unique_ptr<VerifiedRuleset::Handle> CreateRulesetHandle() { return std::unique_ptr<VerifiedRuleset::Handle>( new VerifiedRuleset::Handle(dealer_handle())); } private: TestRulesets rulesets_; scoped_refptr<base::TestSimpleTaskRunner> task_runner_; std::unique_ptr<VerifiedRulesetDealer::Handle> dealer_handle_; content::TestBrowserThreadBundle thread_bundle_; DISALLOW_COPY_AND_ASSIGN(SubresourceFilterVerifiedRulesetHandleTest); }; TEST_F(SubresourceFilterVerifiedRulesetHandleTest, RulesetHandleKeepsRulesetMemoryMappedAndVerified) { TestVerifiedRulesetDealerClient created_handle; TestVerifiedRulesetClient read_ruleset; TestVerifiedRulesetDealerClient deleted_handle; dealer_handle()->TryOpenAndSetRulesetFile( rulesets().indexed_1().path, base::BindOnce([](base::File file) { EXPECT_TRUE(file.IsValid()); })); auto ruleset_handle = CreateRulesetHandle(); dealer_handle()->GetDealerAsync(created_handle.GetCallback()); ruleset_handle->GetRulesetAsync(read_ruleset.GetCallback()); ruleset_handle.reset(nullptr); dealer_handle()->GetDealerAsync(deleted_handle.GetCallback()); task_runner()->RunUntilIdle(); created_handle.ExpectRulesetContents(rulesets().indexed_1().contents, true); read_ruleset.ExpectRulesetContents(rulesets().indexed_1().contents); deleted_handle.ExpectRulesetState(true, RulesetVerificationStatus::INTACT); } TEST_F(SubresourceFilterVerifiedRulesetHandleTest, RulesetUnmappedOnlyAfterLastHandleIsDeleted) { TestVerifiedRulesetDealerClient created_handles; TestVerifiedRulesetClient read_ruleset_from_handle_1; TestVerifiedRulesetClient read_ruleset_from_handle_2; TestVerifiedRulesetDealerClient deleted_handle_1; TestVerifiedRulesetClient read_ruleset_again_from_handle_2; TestVerifiedRulesetDealerClient deleted_both_handles; dealer_handle()->TryOpenAndSetRulesetFile( rulesets().indexed_1().path, base::BindOnce([](base::File file) { EXPECT_TRUE(file.IsValid()); })); auto ruleset_handle_1 = CreateRulesetHandle(); auto ruleset_handle_2 = CreateRulesetHandle(); dealer_handle()->GetDealerAsync(created_handles.GetCallback()); ruleset_handle_1->GetRulesetAsync(read_ruleset_from_handle_1.GetCallback()); ruleset_handle_2->GetRulesetAsync(read_ruleset_from_handle_2.GetCallback()); ruleset_handle_1.reset(nullptr); dealer_handle()->GetDealerAsync(deleted_handle_1.GetCallback()); ruleset_handle_2->GetRulesetAsync( read_ruleset_again_from_handle_2.GetCallback()); ruleset_handle_2.reset(nullptr); dealer_handle()->GetDealerAsync(deleted_both_handles.GetCallback()); task_runner()->RunUntilIdle(); created_handles.ExpectRulesetContents(rulesets().indexed_1().contents, true); read_ruleset_from_handle_1.ExpectRulesetContents( rulesets().indexed_1().contents); read_ruleset_from_handle_2.ExpectRulesetContents( rulesets().indexed_1().contents); deleted_handle_1.ExpectRulesetContents(rulesets().indexed_1().contents, true); read_ruleset_again_from_handle_2.ExpectRulesetContents( rulesets().indexed_1().contents); deleted_both_handles.ExpectRulesetState(true, RulesetVerificationStatus::INTACT); } TEST_F(SubresourceFilterVerifiedRulesetHandleTest, OldRulesetRemainsMappedAfterUpdateUntilHandleIsDeleted) { TestVerifiedRulesetDealerClient created_handle_1; TestVerifiedRulesetClient read_from_handle_1; TestVerifiedRulesetDealerClient created_handle_2_after_update; TestVerifiedRulesetClient read_from_handle_2; TestVerifiedRulesetClient read_again_from_handle_1; TestVerifiedRulesetClient read_from_handle_1_after_update; TestVerifiedRulesetClient read_from_handle_2_after_update; TestVerifiedRulesetDealerClient deleted_all_handles; dealer_handle()->TryOpenAndSetRulesetFile( rulesets().indexed_1().path, base::BindOnce([](base::File file) { EXPECT_TRUE(file.IsValid()); })); auto ruleset_handle_1 = CreateRulesetHandle(); dealer_handle()->GetDealerAsync(created_handle_1.GetCallback()); ruleset_handle_1->GetRulesetAsync(read_from_handle_1.GetCallback()); dealer_handle()->TryOpenAndSetRulesetFile(rulesets().indexed_2().path, base::DoNothing()); auto ruleset_handle_2 = CreateRulesetHandle(); dealer_handle()->GetDealerAsync(created_handle_2_after_update.GetCallback()); ruleset_handle_2->GetRulesetAsync(read_from_handle_2.GetCallback()); ruleset_handle_1->GetRulesetAsync(read_again_from_handle_1.GetCallback()); ruleset_handle_1 = CreateRulesetHandle(); ruleset_handle_1->GetRulesetAsync( read_from_handle_1_after_update.GetCallback()); ruleset_handle_2->GetRulesetAsync( read_from_handle_2_after_update.GetCallback()); ruleset_handle_1.reset(nullptr); ruleset_handle_2.reset(nullptr); dealer_handle()->GetDealerAsync(deleted_all_handles.GetCallback()); task_runner()->RunUntilIdle(); created_handle_1.ExpectRulesetContents(rulesets().indexed_1().contents, true); read_from_handle_1.ExpectRulesetContents(rulesets().indexed_1().contents); created_handle_2_after_update.ExpectRulesetContents( rulesets().indexed_2().contents, true); read_from_handle_2.ExpectRulesetContents(rulesets().indexed_2().contents); read_again_from_handle_1.ExpectRulesetContents( rulesets().indexed_1().contents); read_from_handle_1_after_update.ExpectRulesetContents( rulesets().indexed_2().contents); read_from_handle_2_after_update.ExpectRulesetContents( rulesets().indexed_2().contents); deleted_all_handles.ExpectRulesetState(true, RulesetVerificationStatus::INTACT); } TEST_F(SubresourceFilterVerifiedRulesetHandleTest, CorruptRulesetIsNotHandedOut) { TestVerifiedRulesetDealerClient created_handle; TestVerifiedRulesetClient read_ruleset; TestVerifiedRulesetDealerClient deleted_handle; testing::TestRuleset::CorruptByTruncating(rulesets().indexed_1(), 4096); dealer_handle()->TryOpenAndSetRulesetFile( rulesets().indexed_1().path, base::BindOnce([](base::File file) { EXPECT_TRUE(file.IsValid()); })); auto ruleset_handle = CreateRulesetHandle(); dealer_handle()->GetDealerAsync(created_handle.GetCallback()); ruleset_handle->GetRulesetAsync(read_ruleset.GetCallback()); ruleset_handle.reset(nullptr); dealer_handle()->GetDealerAsync(deleted_handle.GetCallback()); task_runner()->RunUntilIdle(); created_handle.ExpectRulesetState(true, RulesetVerificationStatus::CORRUPT); read_ruleset.ExpectNoRuleset(); deleted_handle.ExpectRulesetState(true, RulesetVerificationStatus::CORRUPT); } } // namespace subresource_filter
null
null
null
null
15,410
4,898
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
169,893
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Support for Digigram Lola PCI-e boards * * Copyright (c) 2011 Takashi Iwai <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/io.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/tlv.h> #include "lola.h" static int lola_init_pin(struct lola *chip, struct lola_pin *pin, int dir, int nid) { unsigned int val; int err; pin->nid = nid; err = lola_read_param(chip, nid, LOLA_PAR_AUDIO_WIDGET_CAP, &val); if (err < 0) { dev_err(chip->card->dev, "Can't read wcaps for 0x%x\n", nid); return err; } val &= 0x00f00fff; /* test TYPE and bits 0..11 */ if (val == 0x00400200) /* Type = 4, Digital = 1 */ pin->is_analog = false; else if (val == 0x0040000a && dir == CAPT) /* Dig=0, InAmp/ovrd */ pin->is_analog = true; else if (val == 0x0040000c && dir == PLAY) /* Dig=0, OutAmp/ovrd */ pin->is_analog = true; else { dev_err(chip->card->dev, "Invalid wcaps 0x%x for 0x%x\n", val, nid); return -EINVAL; } /* analog parameters only following, so continue in case of Digital pin */ if (!pin->is_analog) return 0; if (dir == PLAY) err = lola_read_param(chip, nid, LOLA_PAR_AMP_OUT_CAP, &val); else err = lola_read_param(chip, nid, LOLA_PAR_AMP_IN_CAP, &val); if (err < 0) { dev_err(chip->card->dev, "Can't read AMP-caps for 0x%x\n", nid); return err; } pin->amp_mute = LOLA_AMP_MUTE_CAPABLE(val); pin->amp_step_size = LOLA_AMP_STEP_SIZE(val); pin->amp_num_steps = LOLA_AMP_NUM_STEPS(val); if (pin->amp_num_steps) { /* zero as mute state */ pin->amp_num_steps++; pin->amp_step_size++; } pin->amp_offset = LOLA_AMP_OFFSET(val); err = lola_codec_read(chip, nid, LOLA_VERB_GET_MAX_LEVEL, 0, 0, &val, NULL); if (err < 0) { dev_err(chip->card->dev, "Can't get MAX_LEVEL 0x%x\n", nid); return err; } pin->max_level = val & 0x3ff; /* 10 bits */ pin->config_default_reg = 0; pin->fixed_gain_list_len = 0; pin->cur_gain_step = 0; return 0; } int lola_init_pins(struct lola *chip, int dir, int *nidp) { int i, err, nid; nid = *nidp; for (i = 0; i < chip->pin[dir].num_pins; i++, nid++) { err = lola_init_pin(chip, &chip->pin[dir].pins[i], dir, nid); if (err < 0) return err; if (chip->pin[dir].pins[i].is_analog) chip->pin[dir].num_analog_pins++; } *nidp = nid; return 0; } void lola_free_mixer(struct lola *chip) { vfree(chip->mixer.array_saved); } int lola_init_mixer_widget(struct lola *chip, int nid) { unsigned int val; int err; err = lola_read_param(chip, nid, LOLA_PAR_AUDIO_WIDGET_CAP, &val); if (err < 0) { dev_err(chip->card->dev, "Can't read wcaps for 0x%x\n", nid); return err; } if ((val & 0xfff00000) != 0x02f00000) { /* test SubType and Type */ dev_dbg(chip->card->dev, "No valid mixer widget\n"); return 0; } chip->mixer.nid = nid; chip->mixer.caps = val; chip->mixer.array = (struct lola_mixer_array __iomem *) (chip->bar[BAR1].remap_addr + LOLA_BAR1_SOURCE_GAIN_ENABLE); /* reserve memory to copy mixer data for sleep mode transitions */ chip->mixer.array_saved = vmalloc(sizeof(struct lola_mixer_array)); /* mixer matrix sources are physical input data and play streams */ chip->mixer.src_stream_outs = chip->pcm[PLAY].num_streams; chip->mixer.src_phys_ins = chip->pin[CAPT].num_pins; /* mixer matrix destinations are record streams and physical output */ chip->mixer.dest_stream_ins = chip->pcm[CAPT].num_streams; chip->mixer.dest_phys_outs = chip->pin[PLAY].num_pins; /* mixer matrix may have unused areas between PhysIn and * Play or Record and PhysOut zones */ chip->mixer.src_stream_out_ofs = chip->mixer.src_phys_ins + LOLA_MIXER_SRC_INPUT_PLAY_SEPARATION(val); chip->mixer.dest_phys_out_ofs = chip->mixer.dest_stream_ins + LOLA_MIXER_DEST_REC_OUTPUT_SEPARATION(val); /* example : MixerMatrix of LoLa881 (LoLa16161 uses unused zones) * +-+ 0-------8------16-------8------16 * | | | | | | | * |s| | INPUT | | INPUT | | * | |->| -> |unused | -> |unused | * |r| |CAPTURE| | OUTPUT| | * | | | MIX | | MIX | | * |c| 8-------------------------------- * | | | | | | | * | | | | | | | * |g| |unused |unused |unused |unused | * | | | | | | | * |a| | | | | | * | | 16------------------------------- * |i| | | | | | * | | | PLAYBK| | PLAYBK| | * |n|->| -> |unused | -> |unused | * | | |CAPTURE| | OUTPUT| | * | | | MIX | | MIX | | * |a| 8-------------------------------- * |r| | | | | | * |r| | | | | | * |a| |unused |unused |unused |unused | * |y| | | | | | * | | | | | | | * +++ 16--|---------------|------------ * +---V---------------V-----------+ * | dest_mix_gain_enable array | * +-------------------------------+ */ /* example : MixerMatrix of LoLa280 * +-+ 0-------8-2 * | | | | | * |s| | INPUT | | INPUT * |r|->| -> | | -> * |c| |CAPTURE| | <- OUTPUT * | | | MIX | | MIX * |g| 8---------- * |a| | | | * |i| | PLAYBK| | PLAYBACK * |n|->| -> | | -> * | | |CAPTURE| | <- OUTPUT * |a| | MIX | | MIX * |r| 8---|----|- * |r| +---V----V-------------------+ * |a| | dest_mix_gain_enable array | * |y| +----------------------------+ */ if (chip->mixer.src_stream_out_ofs > MAX_AUDIO_INOUT_COUNT || chip->mixer.dest_phys_out_ofs > MAX_STREAM_IN_COUNT) { dev_err(chip->card->dev, "Invalid mixer widget size\n"); return -EINVAL; } chip->mixer.src_mask = ((1U << chip->mixer.src_phys_ins) - 1) | (((1U << chip->mixer.src_stream_outs) - 1) << chip->mixer.src_stream_out_ofs); chip->mixer.dest_mask = ((1U << chip->mixer.dest_stream_ins) - 1) | (((1U << chip->mixer.dest_phys_outs) - 1) << chip->mixer.dest_phys_out_ofs); dev_dbg(chip->card->dev, "Mixer src_mask=%x, dest_mask=%x\n", chip->mixer.src_mask, chip->mixer.dest_mask); return 0; } static int lola_mixer_set_src_gain(struct lola *chip, unsigned int id, unsigned short gain, bool on) { unsigned int oldval, val; if (!(chip->mixer.src_mask & (1 << id))) return -EINVAL; oldval = val = readl(&chip->mixer.array->src_gain_enable); if (on) val |= (1 << id); else val &= ~(1 << id); /* test if values unchanged */ if ((val == oldval) && (gain == readw(&chip->mixer.array->src_gain[id]))) return 0; dev_dbg(chip->card->dev, "lola_mixer_set_src_gain (id=%d, gain=%d) enable=%x\n", id, gain, val); writew(gain, &chip->mixer.array->src_gain[id]); writel(val, &chip->mixer.array->src_gain_enable); lola_codec_flush(chip); /* inform micro-controller about the new source gain */ return lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_SOURCE_GAIN, id, 0); } #if 0 /* not used */ static int lola_mixer_set_src_gains(struct lola *chip, unsigned int mask, unsigned short *gains) { int i; if ((chip->mixer.src_mask & mask) != mask) return -EINVAL; for (i = 0; i < LOLA_MIXER_DIM; i++) { if (mask & (1 << i)) { writew(*gains, &chip->mixer.array->src_gain[i]); gains++; } } writel(mask, &chip->mixer.array->src_gain_enable); lola_codec_flush(chip); if (chip->mixer.caps & LOLA_PEAK_METER_CAN_AGC_MASK) { /* update for all srcs at once */ return lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_SOURCE_GAIN, 0x80, 0); } /* update manually */ for (i = 0; i < LOLA_MIXER_DIM; i++) { if (mask & (1 << i)) { lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_SOURCE_GAIN, i, 0); } } return 0; } #endif /* not used */ static int lola_mixer_set_mapping_gain(struct lola *chip, unsigned int src, unsigned int dest, unsigned short gain, bool on) { unsigned int val; if (!(chip->mixer.src_mask & (1 << src)) || !(chip->mixer.dest_mask & (1 << dest))) return -EINVAL; if (on) writew(gain, &chip->mixer.array->dest_mix_gain[dest][src]); val = readl(&chip->mixer.array->dest_mix_gain_enable[dest]); if (on) val |= (1 << src); else val &= ~(1 << src); writel(val, &chip->mixer.array->dest_mix_gain_enable[dest]); lola_codec_flush(chip); return lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_MIX_GAIN, src, dest); } #if 0 /* not used */ static int lola_mixer_set_dest_gains(struct lola *chip, unsigned int id, unsigned int mask, unsigned short *gains) { int i; if (!(chip->mixer.dest_mask & (1 << id)) || (chip->mixer.src_mask & mask) != mask) return -EINVAL; for (i = 0; i < LOLA_MIXER_DIM; i++) { if (mask & (1 << i)) { writew(*gains, &chip->mixer.array->dest_mix_gain[id][i]); gains++; } } writel(mask, &chip->mixer.array->dest_mix_gain_enable[id]); lola_codec_flush(chip); /* update for all dests at once */ return lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_DESTINATION_GAIN, id, 0); } #endif /* not used */ /* */ static int set_analog_volume(struct lola *chip, int dir, unsigned int idx, unsigned int val, bool external_call); int lola_setup_all_analog_gains(struct lola *chip, int dir, bool mute) { struct lola_pin *pin; int idx, max_idx; pin = chip->pin[dir].pins; max_idx = chip->pin[dir].num_pins; for (idx = 0; idx < max_idx; idx++) { if (pin[idx].is_analog) { unsigned int val = mute ? 0 : pin[idx].cur_gain_step; /* set volume and do not save the value */ set_analog_volume(chip, dir, idx, val, false); } } return lola_codec_flush(chip); } void lola_save_mixer(struct lola *chip) { /* mute analog output */ if (chip->mixer.array_saved) { /* store contents of mixer array */ memcpy_fromio(chip->mixer.array_saved, chip->mixer.array, sizeof(*chip->mixer.array)); } lola_setup_all_analog_gains(chip, PLAY, true); /* output mute */ } void lola_restore_mixer(struct lola *chip) { int i; /*lola_reset_setups(chip);*/ if (chip->mixer.array_saved) { /* restore contents of mixer array */ memcpy_toio(chip->mixer.array, chip->mixer.array_saved, sizeof(*chip->mixer.array)); /* inform micro-controller about all restored values * and ignore return values */ for (i = 0; i < chip->mixer.src_phys_ins; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_SOURCE_GAIN, i, 0); for (i = 0; i < chip->mixer.src_stream_outs; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_SOURCE_GAIN, chip->mixer.src_stream_out_ofs + i, 0); for (i = 0; i < chip->mixer.dest_stream_ins; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_DESTINATION_GAIN, i, 0); for (i = 0; i < chip->mixer.dest_phys_outs; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_DESTINATION_GAIN, chip->mixer.dest_phys_out_ofs + i, 0); lola_codec_flush(chip); } } /* */ static int set_analog_volume(struct lola *chip, int dir, unsigned int idx, unsigned int val, bool external_call) { struct lola_pin *pin; int err; if (idx >= chip->pin[dir].num_pins) return -EINVAL; pin = &chip->pin[dir].pins[idx]; if (!pin->is_analog || pin->amp_num_steps <= val) return -EINVAL; if (external_call && pin->cur_gain_step == val) return 0; if (external_call) lola_codec_flush(chip); dev_dbg(chip->card->dev, "set_analog_volume (dir=%d idx=%d, volume=%d)\n", dir, idx, val); err = lola_codec_write(chip, pin->nid, LOLA_VERB_SET_AMP_GAIN_MUTE, val, 0); if (err < 0) return err; if (external_call) pin->cur_gain_step = val; return 0; } int lola_set_src_config(struct lola *chip, unsigned int src_mask, bool update) { int ret = 0; int success = 0; int n, err; /* SRC can be activated and the dwInputSRCMask is valid? */ if ((chip->input_src_caps_mask & src_mask) != src_mask) return -EINVAL; /* handle all even Inputs - SRC is a stereo setting !!! */ for (n = 0; n < chip->pin[CAPT].num_pins; n += 2) { unsigned int mask = 3U << n; /* handle the stereo case */ unsigned int new_src, src_state; if (!(chip->input_src_caps_mask & mask)) continue; /* if one IO needs SRC, both stereo IO will get SRC */ new_src = (src_mask & mask) != 0; if (update) { src_state = (chip->input_src_mask & mask) != 0; if (src_state == new_src) continue; /* nothing to change for this IO */ } err = lola_codec_write(chip, chip->pcm[CAPT].streams[n].nid, LOLA_VERB_SET_SRC, new_src, 0); if (!err) success++; else ret = err; } if (success) ret = lola_codec_flush(chip); if (!ret) chip->input_src_mask = src_mask; return ret; } /* */ static int init_mixer_values(struct lola *chip) { int i; /* all sample rate converters on */ lola_set_src_config(chip, (1 << chip->pin[CAPT].num_pins) - 1, false); /* clear all mixer matrix settings */ memset_io(chip->mixer.array, 0, sizeof(*chip->mixer.array)); /* inform firmware about all updated matrix columns - capture part */ for (i = 0; i < chip->mixer.dest_stream_ins; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_DESTINATION_GAIN, i, 0); /* inform firmware about all updated matrix columns - output part */ for (i = 0; i < chip->mixer.dest_phys_outs; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_DESTINATION_GAIN, chip->mixer.dest_phys_out_ofs + i, 0); /* set all digital input source (master) gains to 0dB */ for (i = 0; i < chip->mixer.src_phys_ins; i++) lola_mixer_set_src_gain(chip, i, 336, true); /* 0dB */ /* set all digital playback source (master) gains to 0dB */ for (i = 0; i < chip->mixer.src_stream_outs; i++) lola_mixer_set_src_gain(chip, i + chip->mixer.src_stream_out_ofs, 336, true); /* 0dB */ /* set gain value 0dB diagonally in matrix - part INPUT -> CAPTURE */ for (i = 0; i < chip->mixer.dest_stream_ins; i++) { int src = i % chip->mixer.src_phys_ins; lola_mixer_set_mapping_gain(chip, src, i, 336, true); } /* set gain value 0dB diagonally in matrix , part PLAYBACK -> OUTPUT * (LoLa280 : playback channel 0,2,4,6 linked to output channel 0) * (LoLa280 : playback channel 1,3,5,7 linked to output channel 1) */ for (i = 0; i < chip->mixer.src_stream_outs; i++) { int src = chip->mixer.src_stream_out_ofs + i; int dst = chip->mixer.dest_phys_out_ofs + i % chip->mixer.dest_phys_outs; lola_mixer_set_mapping_gain(chip, src, dst, 336, true); } return 0; } /* * analog mixer control element */ static int lola_analog_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct lola *chip = snd_kcontrol_chip(kcontrol); int dir = kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = chip->pin[dir].num_pins; uinfo->value.integer.min = 0; uinfo->value.integer.max = chip->pin[dir].pins[0].amp_num_steps; return 0; } static int lola_analog_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); int dir = kcontrol->private_value; int i; for (i = 0; i < chip->pin[dir].num_pins; i++) ucontrol->value.integer.value[i] = chip->pin[dir].pins[i].cur_gain_step; return 0; } static int lola_analog_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); int dir = kcontrol->private_value; int i, err; for (i = 0; i < chip->pin[dir].num_pins; i++) { err = set_analog_volume(chip, dir, i, ucontrol->value.integer.value[i], true); if (err < 0) return err; } return 0; } static int lola_analog_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct lola *chip = snd_kcontrol_chip(kcontrol); int dir = kcontrol->private_value; unsigned int val1, val2; struct lola_pin *pin; if (size < 4 * sizeof(unsigned int)) return -ENOMEM; pin = &chip->pin[dir].pins[0]; val2 = pin->amp_step_size * 25; val1 = -1 * (int)pin->amp_offset * (int)val2; #ifdef TLV_DB_SCALE_MUTE val2 |= TLV_DB_SCALE_MUTE; #endif if (put_user(SNDRV_CTL_TLVT_DB_SCALE, tlv)) return -EFAULT; if (put_user(2 * sizeof(unsigned int), tlv + 1)) return -EFAULT; if (put_user(val1, tlv + 2)) return -EFAULT; if (put_user(val2, tlv + 3)) return -EFAULT; return 0; } static struct snd_kcontrol_new lola_analog_mixer = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK), .info = lola_analog_vol_info, .get = lola_analog_vol_get, .put = lola_analog_vol_put, .tlv.c = lola_analog_vol_tlv, }; static int create_analog_mixer(struct lola *chip, int dir, char *name) { if (!chip->pin[dir].num_pins) return 0; /* no analog volumes on digital only adapters */ if (chip->pin[dir].num_pins != chip->pin[dir].num_analog_pins) return 0; lola_analog_mixer.name = name; lola_analog_mixer.private_value = dir; return snd_ctl_add(chip->card, snd_ctl_new1(&lola_analog_mixer, chip)); } /* * Hardware sample rate converter on digital input */ static int lola_input_src_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct lola *chip = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = chip->pin[CAPT].num_pins; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int lola_input_src_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); int i; for (i = 0; i < chip->pin[CAPT].num_pins; i++) ucontrol->value.integer.value[i] = !!(chip->input_src_mask & (1 << i)); return 0; } static int lola_input_src_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); int i; unsigned int mask; mask = 0; for (i = 0; i < chip->pin[CAPT].num_pins; i++) if (ucontrol->value.integer.value[i]) mask |= 1 << i; return lola_set_src_config(chip, mask, true); } static struct snd_kcontrol_new lola_input_src_mixer = { .name = "Digital SRC Capture Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = lola_input_src_info, .get = lola_input_src_get, .put = lola_input_src_put, }; /* * Lola16161 or Lola881 can have Hardware sample rate converters * on its digital input pins */ static int create_input_src_mixer(struct lola *chip) { if (!chip->input_src_caps_mask) return 0; return snd_ctl_add(chip->card, snd_ctl_new1(&lola_input_src_mixer, chip)); } /* * src gain mixer */ static int lola_src_gain_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { unsigned int count = (kcontrol->private_value >> 8) & 0xff; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = count; uinfo->value.integer.min = 0; uinfo->value.integer.max = 409; return 0; } static int lola_src_gain_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); unsigned int ofs = kcontrol->private_value & 0xff; unsigned int count = (kcontrol->private_value >> 8) & 0xff; unsigned int mask, i; mask = readl(&chip->mixer.array->src_gain_enable); for (i = 0; i < count; i++) { unsigned int idx = ofs + i; unsigned short val; if (!(chip->mixer.src_mask & (1 << idx))) return -EINVAL; if (mask & (1 << idx)) val = readw(&chip->mixer.array->src_gain[idx]) + 1; else val = 0; ucontrol->value.integer.value[i] = val; } return 0; } static int lola_src_gain_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); unsigned int ofs = kcontrol->private_value & 0xff; unsigned int count = (kcontrol->private_value >> 8) & 0xff; int i, err; for (i = 0; i < count; i++) { unsigned int idx = ofs + i; unsigned short val = ucontrol->value.integer.value[i]; if (val) val--; err = lola_mixer_set_src_gain(chip, idx, val, !!val); if (err < 0) return err; } return 0; } /* raw value: 0 = -84dB, 336 = 0dB, 408=18dB, incremented 1 for mute */ static const DECLARE_TLV_DB_SCALE(lola_src_gain_tlv, -8425, 25, 1); static struct snd_kcontrol_new lola_src_gain_mixer = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .info = lola_src_gain_info, .get = lola_src_gain_get, .put = lola_src_gain_put, .tlv.p = lola_src_gain_tlv, }; static int create_src_gain_mixer(struct lola *chip, int num, int ofs, char *name) { lola_src_gain_mixer.name = name; lola_src_gain_mixer.private_value = ofs + (num << 8); return snd_ctl_add(chip->card, snd_ctl_new1(&lola_src_gain_mixer, chip)); } #if 0 /* not used */ /* * destination gain (matrix-like) mixer */ static int lola_dest_gain_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { unsigned int src_num = (kcontrol->private_value >> 8) & 0xff; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = src_num; uinfo->value.integer.min = 0; uinfo->value.integer.max = 433; return 0; } static int lola_dest_gain_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); unsigned int src_ofs = kcontrol->private_value & 0xff; unsigned int src_num = (kcontrol->private_value >> 8) & 0xff; unsigned int dst_ofs = (kcontrol->private_value >> 16) & 0xff; unsigned int dst, mask, i; dst = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + dst_ofs; mask = readl(&chip->mixer.array->dest_mix_gain_enable[dst]); for (i = 0; i < src_num; i++) { unsigned int src = src_ofs + i; unsigned short val; if (!(chip->mixer.src_mask & (1 << src))) return -EINVAL; if (mask & (1 << dst)) val = readw(&chip->mixer.array->dest_mix_gain[dst][src]) + 1; else val = 0; ucontrol->value.integer.value[i] = val; } return 0; } static int lola_dest_gain_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); unsigned int src_ofs = kcontrol->private_value & 0xff; unsigned int src_num = (kcontrol->private_value >> 8) & 0xff; unsigned int dst_ofs = (kcontrol->private_value >> 16) & 0xff; unsigned int dst, mask; unsigned short gains[MAX_STREAM_COUNT]; int i, num; mask = 0; num = 0; for (i = 0; i < src_num; i++) { unsigned short val = ucontrol->value.integer.value[i]; if (val) { gains[num++] = val - 1; mask |= 1 << i; } } mask <<= src_ofs; dst = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + dst_ofs; return lola_mixer_set_dest_gains(chip, dst, mask, gains); } static const DECLARE_TLV_DB_SCALE(lola_dest_gain_tlv, -8425, 25, 1); static struct snd_kcontrol_new lola_dest_gain_mixer = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .info = lola_dest_gain_info, .get = lola_dest_gain_get, .put = lola_dest_gain_put, .tlv.p = lola_dest_gain_tlv, }; static int create_dest_gain_mixer(struct lola *chip, int src_num, int src_ofs, int num, int ofs, char *name) { lola_dest_gain_mixer.count = num; lola_dest_gain_mixer.name = name; lola_dest_gain_mixer.private_value = src_ofs + (src_num << 8) + (ofs << 16) + (num << 24); return snd_ctl_add(chip->card, snd_ctl_new1(&lola_dest_gain_mixer, chip)); } #endif /* not used */ /* */ int lola_create_mixer(struct lola *chip) { int err; err = create_analog_mixer(chip, PLAY, "Analog Playback Volume"); if (err < 0) return err; err = create_analog_mixer(chip, CAPT, "Analog Capture Volume"); if (err < 0) return err; err = create_input_src_mixer(chip); if (err < 0) return err; err = create_src_gain_mixer(chip, chip->mixer.src_phys_ins, 0, "Digital Capture Volume"); if (err < 0) return err; err = create_src_gain_mixer(chip, chip->mixer.src_stream_outs, chip->mixer.src_stream_out_ofs, "Digital Playback Volume"); if (err < 0) return err; #if 0 /* FIXME: buggy mixer matrix handling */ err = create_dest_gain_mixer(chip, chip->mixer.src_phys_ins, 0, chip->mixer.dest_stream_ins, 0, "Line Capture Volume"); if (err < 0) return err; err = create_dest_gain_mixer(chip, chip->mixer.src_stream_outs, chip->mixer.src_stream_out_ofs, chip->mixer.dest_stream_ins, 0, "Stream-Loopback Capture Volume"); if (err < 0) return err; err = create_dest_gain_mixer(chip, chip->mixer.src_phys_ins, 0, chip->mixer.dest_phys_outs, chip->mixer.dest_phys_out_ofs, "Line-Loopback Playback Volume"); if (err < 0) return err; err = create_dest_gain_mixer(chip, chip->mixer.src_stream_outs, chip->mixer.src_stream_out_ofs, chip->mixer.dest_phys_outs, chip->mixer.dest_phys_out_ofs, "Stream Playback Volume"); if (err < 0) return err; #endif /* FIXME */ return init_mixer_values(chip); }
null
null
null
null
78,240
600
null
train_val
31e986bc171719c9e6d40d0c2cb1501796a69e6c
259,555
php-src
0
https://github.com/php/php-src
2016-10-24 10:37:20+01:00
/* +----------------------------------------------------------------------+ | PHP Version 7 | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | [email protected] so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Gustavo Lopes <[email protected]> | +----------------------------------------------------------------------+ */ #ifndef TIMEZONE_METHODS_H #define TIMEZONE_METHODS_H #include <php.h> PHP_METHOD(IntlTimeZone, __construct); PHP_FUNCTION(intltz_create_time_zone); PHP_FUNCTION(intltz_from_date_time_zone); PHP_FUNCTION(intltz_create_default); PHP_FUNCTION(intltz_get_id); PHP_FUNCTION(intltz_get_gmt); PHP_FUNCTION(intltz_get_unknown); PHP_FUNCTION(intltz_create_enumeration); PHP_FUNCTION(intltz_count_equivalent_ids); PHP_FUNCTION(intltz_create_time_zone_id_enumeration); PHP_FUNCTION(intltz_get_canonical_id); PHP_FUNCTION(intltz_get_region); PHP_FUNCTION(intltz_get_tz_data_version); PHP_FUNCTION(intltz_get_equivalent_id); PHP_FUNCTION(intltz_use_daylight_time); PHP_FUNCTION(intltz_get_offset); PHP_FUNCTION(intltz_get_raw_offset); PHP_FUNCTION(intltz_has_same_rules); PHP_FUNCTION(intltz_get_display_name); PHP_FUNCTION(intltz_get_dst_savings); PHP_FUNCTION(intltz_to_date_time_zone); PHP_FUNCTION(intltz_get_error_code); PHP_FUNCTION(intltz_get_error_message); #if U_ICU_VERSION_MAJOR_NUM >= 52 PHP_FUNCTION(intltz_get_windows_id); PHP_FUNCTION(intltz_get_id_for_windows_id); #endif #endif /* #ifndef TIMEZONE_METHODS_H */
null
null
null
null
119,476
36,498
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
201,493
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * HID driver for Aureal Cy se W-01RN USB_V3.1 devices * * Copyright (c) 2010 Franco Catrin <[email protected]> * Copyright (c) 2010 Ben Cropley <[email protected]> * * Based on HID sunplus driver by * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <[email protected]> * Copyright (c) 2005 Michael Haboustak <[email protected]> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static __u8 *aureal_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { dev_info(&hdev->dev, "fixing Aureal Cy se W-01RN USB_V3.1 report descriptor.\n"); rdesc[53] = 0x65; } return rdesc; } static const struct hid_device_id aureal_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, { } }; MODULE_DEVICE_TABLE(hid, aureal_devices); static struct hid_driver aureal_driver = { .name = "aureal", .id_table = aureal_devices, .report_fixup = aureal_report_fixup, }; module_hid_driver(aureal_driver); MODULE_LICENSE("GPL");
null
null
null
null
109,840
53,730
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
53,730
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/android/base_jni_onload.h" #include "base/android/jni_android.h" // This is called by the VM when the shared library is first loaded. JNI_EXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved) { base::android::InitVM(vm); if (!base::android::OnJNIOnLoadInit()) return -1; return JNI_VERSION_1_4; }
null
null
null
null
50,593
3,830
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
168,825
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Server-side types for NFSv4. * * Copyright (c) 2002 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <[email protected]> * Andy Adamson <[email protected]> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef _LINUX_NFSD_XDR4_H #define _LINUX_NFSD_XDR4_H #include "state.h" #include "nfsd.h" #define NFSD4_MAX_TAGLEN 128 #define XDR_LEN(n) (((n) + 3) & ~3) #define CURRENT_STATE_ID_FLAG (1<<0) #define SAVED_STATE_ID_FLAG (1<<1) #define SET_STATE_ID(c, f) ((c)->sid_flags |= (f)) #define HAS_STATE_ID(c, f) ((c)->sid_flags & (f)) #define CLEAR_STATE_ID(c, f) ((c)->sid_flags &= ~(f)) struct nfsd4_compound_state { struct svc_fh current_fh; struct svc_fh save_fh; struct nfs4_stateowner *replay_owner; struct nfs4_client *clp; /* For sessions DRC */ struct nfsd4_session *session; struct nfsd4_slot *slot; int data_offset; bool spo_must_allowed; size_t iovlen; u32 minorversion; __be32 status; stateid_t current_stateid; stateid_t save_stateid; /* to indicate current and saved state id presents */ u32 sid_flags; }; static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs) { return cs->slot != NULL; } struct nfsd4_change_info { u32 atomic; bool change_supported; u32 before_ctime_sec; u32 before_ctime_nsec; u64 before_change; u32 after_ctime_sec; u32 after_ctime_nsec; u64 after_change; }; struct nfsd4_access { u32 ac_req_access; /* request */ u32 ac_supported; /* response */ u32 ac_resp_access; /* response */ }; struct nfsd4_close { u32 cl_seqid; /* request */ stateid_t cl_stateid; /* request+response */ }; struct nfsd4_commit { u64 co_offset; /* request */ u32 co_count; /* request */ nfs4_verifier co_verf; /* response */ }; struct nfsd4_create { u32 cr_namelen; /* request */ char * cr_name; /* request */ u32 cr_type; /* request */ union { /* request */ struct { u32 datalen; char *data; } link; /* NF4LNK */ struct { u32 specdata1; u32 specdata2; } dev; /* NF4BLK, NF4CHR */ } u; u32 cr_bmval[3]; /* request */ struct iattr cr_iattr; /* request */ struct nfsd4_change_info cr_cinfo; /* response */ struct nfs4_acl *cr_acl; struct xdr_netobj cr_label; }; #define cr_datalen u.link.datalen #define cr_data u.link.data #define cr_specdata1 u.dev.specdata1 #define cr_specdata2 u.dev.specdata2 struct nfsd4_delegreturn { stateid_t dr_stateid; }; struct nfsd4_getattr { u32 ga_bmval[3]; /* request */ struct svc_fh *ga_fhp; /* response */ }; struct nfsd4_link { u32 li_namelen; /* request */ char * li_name; /* request */ struct nfsd4_change_info li_cinfo; /* response */ }; struct nfsd4_lock_denied { clientid_t ld_clientid; struct xdr_netobj ld_owner; u64 ld_start; u64 ld_length; u32 ld_type; }; struct nfsd4_lock { /* request */ u32 lk_type; u32 lk_reclaim; /* boolean */ u64 lk_offset; u64 lk_length; u32 lk_is_new; union { struct { u32 open_seqid; stateid_t open_stateid; u32 lock_seqid; clientid_t clientid; struct xdr_netobj owner; } new; struct { stateid_t lock_stateid; u32 lock_seqid; } old; } v; /* response */ union { struct { stateid_t stateid; } ok; struct nfsd4_lock_denied denied; } u; }; #define lk_new_open_seqid v.new.open_seqid #define lk_new_open_stateid v.new.open_stateid #define lk_new_lock_seqid v.new.lock_seqid #define lk_new_clientid v.new.clientid #define lk_new_owner v.new.owner #define lk_old_lock_stateid v.old.lock_stateid #define lk_old_lock_seqid v.old.lock_seqid #define lk_resp_stateid u.ok.stateid #define lk_denied u.denied struct nfsd4_lockt { u32 lt_type; clientid_t lt_clientid; struct xdr_netobj lt_owner; u64 lt_offset; u64 lt_length; struct nfsd4_lock_denied lt_denied; }; struct nfsd4_locku { u32 lu_type; u32 lu_seqid; stateid_t lu_stateid; u64 lu_offset; u64 lu_length; }; struct nfsd4_lookup { u32 lo_len; /* request */ char * lo_name; /* request */ }; struct nfsd4_putfh { u32 pf_fhlen; /* request */ char *pf_fhval; /* request */ }; struct nfsd4_open { u32 op_claim_type; /* request */ struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */ u32 op_delegate_type; /* request - CLAIM_PREV only */ stateid_t op_delegate_stateid; /* request - response */ u32 op_why_no_deleg; /* response - DELEG_NONE_EXT only */ u32 op_create; /* request */ u32 op_createmode; /* request */ u32 op_bmval[3]; /* request */ struct iattr op_iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */ nfs4_verifier op_verf __attribute__((aligned(32))); /* EXCLUSIVE4 */ clientid_t op_clientid; /* request */ struct xdr_netobj op_owner; /* request */ u32 op_seqid; /* request */ u32 op_share_access; /* request */ u32 op_share_deny; /* request */ u32 op_deleg_want; /* request */ stateid_t op_stateid; /* response */ __be32 op_xdr_error; /* see nfsd4_open_omfg() */ u32 op_recall; /* recall */ struct nfsd4_change_info op_cinfo; /* response */ u32 op_rflags; /* response */ bool op_truncate; /* used during processing */ bool op_created; /* used during processing */ struct nfs4_openowner *op_openowner; /* used during processing */ struct nfs4_file *op_file; /* used during processing */ struct nfs4_ol_stateid *op_stp; /* used during processing */ struct nfs4_clnt_odstate *op_odstate; /* used during processing */ struct nfs4_acl *op_acl; struct xdr_netobj op_label; }; struct nfsd4_open_confirm { stateid_t oc_req_stateid /* request */; u32 oc_seqid /* request */; stateid_t oc_resp_stateid /* response */; }; struct nfsd4_open_downgrade { stateid_t od_stateid; u32 od_seqid; u32 od_share_access; /* request */ u32 od_deleg_want; /* request */ u32 od_share_deny; /* request */ }; struct nfsd4_read { stateid_t rd_stateid; /* request */ u64 rd_offset; /* request */ u32 rd_length; /* request */ int rd_vlen; struct file *rd_filp; bool rd_tmp_file; struct svc_rqst *rd_rqstp; /* response */ struct svc_fh * rd_fhp; /* response */ }; struct nfsd4_readdir { u64 rd_cookie; /* request */ nfs4_verifier rd_verf; /* request */ u32 rd_dircount; /* request */ u32 rd_maxcount; /* request */ u32 rd_bmval[3]; /* request */ struct svc_rqst *rd_rqstp; /* response */ struct svc_fh * rd_fhp; /* response */ struct readdir_cd common; struct xdr_stream *xdr; int cookie_offset; }; struct nfsd4_release_lockowner { clientid_t rl_clientid; struct xdr_netobj rl_owner; }; struct nfsd4_readlink { struct svc_rqst *rl_rqstp; /* request */ struct svc_fh * rl_fhp; /* request */ }; struct nfsd4_remove { u32 rm_namelen; /* request */ char * rm_name; /* request */ struct nfsd4_change_info rm_cinfo; /* response */ }; struct nfsd4_rename { u32 rn_snamelen; /* request */ char * rn_sname; /* request */ u32 rn_tnamelen; /* request */ char * rn_tname; /* request */ struct nfsd4_change_info rn_sinfo; /* response */ struct nfsd4_change_info rn_tinfo; /* response */ }; struct nfsd4_secinfo { u32 si_namelen; /* request */ char *si_name; /* request */ struct svc_export *si_exp; /* response */ }; struct nfsd4_secinfo_no_name { u32 sin_style; /* request */ struct svc_export *sin_exp; /* response */ }; struct nfsd4_setattr { stateid_t sa_stateid; /* request */ u32 sa_bmval[3]; /* request */ struct iattr sa_iattr; /* request */ struct nfs4_acl *sa_acl; struct xdr_netobj sa_label; }; struct nfsd4_setclientid { nfs4_verifier se_verf; /* request */ struct xdr_netobj se_name; u32 se_callback_prog; /* request */ u32 se_callback_netid_len; /* request */ char * se_callback_netid_val; /* request */ u32 se_callback_addr_len; /* request */ char * se_callback_addr_val; /* request */ u32 se_callback_ident; /* request */ clientid_t se_clientid; /* response */ nfs4_verifier se_confirm; /* response */ }; struct nfsd4_setclientid_confirm { clientid_t sc_clientid; nfs4_verifier sc_confirm; }; struct nfsd4_saved_compoundargs { __be32 *p; __be32 *end; int pagelen; struct page **pagelist; }; struct nfsd4_test_stateid_id { __be32 ts_id_status; stateid_t ts_id_stateid; struct list_head ts_id_list; }; struct nfsd4_test_stateid { u32 ts_num_ids; struct list_head ts_stateid_list; }; struct nfsd4_free_stateid { stateid_t fr_stateid; /* request */ }; /* also used for NVERIFY */ struct nfsd4_verify { u32 ve_bmval[3]; /* request */ u32 ve_attrlen; /* request */ char * ve_attrval; /* request */ }; struct nfsd4_write { stateid_t wr_stateid; /* request */ u64 wr_offset; /* request */ u32 wr_stable_how; /* request */ u32 wr_buflen; /* request */ struct kvec wr_head; struct page ** wr_pagelist; /* request */ u32 wr_bytes_written; /* response */ u32 wr_how_written; /* response */ nfs4_verifier wr_verifier; /* response */ }; struct nfsd4_exchange_id { nfs4_verifier verifier; struct xdr_netobj clname; u32 flags; clientid_t clientid; u32 seqid; int spa_how; u32 spo_must_enforce[3]; u32 spo_must_allow[3]; }; struct nfsd4_sequence { struct nfs4_sessionid sessionid; /* request/response */ u32 seqid; /* request/response */ u32 slotid; /* request/response */ u32 maxslots; /* request/response */ u32 cachethis; /* request */ #if 0 u32 target_maxslots; /* response */ #endif /* not yet */ u32 status_flags; /* response */ }; struct nfsd4_destroy_session { struct nfs4_sessionid sessionid; }; struct nfsd4_destroy_clientid { clientid_t clientid; }; struct nfsd4_reclaim_complete { u32 rca_one_fs; }; struct nfsd4_deviceid { u64 fsid_idx; u32 generation; u32 pad; }; struct nfsd4_layout_seg { u32 iomode; u64 offset; u64 length; }; struct nfsd4_getdeviceinfo { struct nfsd4_deviceid gd_devid; /* request */ u32 gd_layout_type; /* request */ u32 gd_maxcount; /* request */ u32 gd_notify_types;/* request - response */ void *gd_device; /* response */ }; struct nfsd4_layoutget { u64 lg_minlength; /* request */ u32 lg_signal; /* request */ u32 lg_layout_type; /* request */ u32 lg_maxcount; /* request */ stateid_t lg_sid; /* request/response */ struct nfsd4_layout_seg lg_seg; /* request/response */ void *lg_content; /* response */ }; struct nfsd4_layoutcommit { stateid_t lc_sid; /* request */ struct nfsd4_layout_seg lc_seg; /* request */ u32 lc_reclaim; /* request */ u32 lc_newoffset; /* request */ u64 lc_last_wr; /* request */ struct timespec lc_mtime; /* request */ u32 lc_layout_type; /* request */ u32 lc_up_len; /* layout length */ void *lc_up_layout; /* decoded by callback */ u32 lc_size_chg; /* boolean for response */ u64 lc_newsize; /* response */ }; struct nfsd4_layoutreturn { u32 lr_return_type; /* request */ u32 lr_layout_type; /* request */ struct nfsd4_layout_seg lr_seg; /* request */ u32 lr_reclaim; /* request */ u32 lrf_body_len; /* request */ void *lrf_body; /* request */ stateid_t lr_sid; /* request/response */ u32 lrs_present; /* response */ }; struct nfsd4_fallocate { /* request */ stateid_t falloc_stateid; loff_t falloc_offset; u64 falloc_length; }; struct nfsd4_clone { /* request */ stateid_t cl_src_stateid; stateid_t cl_dst_stateid; u64 cl_src_pos; u64 cl_dst_pos; u64 cl_count; }; struct nfsd42_write_res { u64 wr_bytes_written; u32 wr_stable_how; nfs4_verifier wr_verifier; }; struct nfsd4_copy { /* request */ stateid_t cp_src_stateid; stateid_t cp_dst_stateid; u64 cp_src_pos; u64 cp_dst_pos; u64 cp_count; /* both */ bool cp_consecutive; bool cp_synchronous; /* response */ struct nfsd42_write_res cp_res; }; struct nfsd4_seek { /* request */ stateid_t seek_stateid; loff_t seek_offset; u32 seek_whence; /* response */ u32 seek_eof; loff_t seek_pos; }; struct nfsd4_op { int opnum; __be32 status; union { struct nfsd4_access access; struct nfsd4_close close; struct nfsd4_commit commit; struct nfsd4_create create; struct nfsd4_delegreturn delegreturn; struct nfsd4_getattr getattr; struct svc_fh * getfh; struct nfsd4_link link; struct nfsd4_lock lock; struct nfsd4_lockt lockt; struct nfsd4_locku locku; struct nfsd4_lookup lookup; struct nfsd4_verify nverify; struct nfsd4_open open; struct nfsd4_open_confirm open_confirm; struct nfsd4_open_downgrade open_downgrade; struct nfsd4_putfh putfh; struct nfsd4_read read; struct nfsd4_readdir readdir; struct nfsd4_readlink readlink; struct nfsd4_remove remove; struct nfsd4_rename rename; clientid_t renew; struct nfsd4_secinfo secinfo; struct nfsd4_setattr setattr; struct nfsd4_setclientid setclientid; struct nfsd4_setclientid_confirm setclientid_confirm; struct nfsd4_verify verify; struct nfsd4_write write; struct nfsd4_release_lockowner release_lockowner; /* NFSv4.1 */ struct nfsd4_exchange_id exchange_id; struct nfsd4_backchannel_ctl backchannel_ctl; struct nfsd4_bind_conn_to_session bind_conn_to_session; struct nfsd4_create_session create_session; struct nfsd4_destroy_session destroy_session; struct nfsd4_sequence sequence; struct nfsd4_reclaim_complete reclaim_complete; struct nfsd4_test_stateid test_stateid; struct nfsd4_free_stateid free_stateid; struct nfsd4_getdeviceinfo getdeviceinfo; struct nfsd4_layoutget layoutget; struct nfsd4_layoutcommit layoutcommit; struct nfsd4_layoutreturn layoutreturn; /* NFSv4.2 */ struct nfsd4_fallocate allocate; struct nfsd4_fallocate deallocate; struct nfsd4_clone clone; struct nfsd4_copy copy; struct nfsd4_seek seek; } u; struct nfs4_replay * replay; }; bool nfsd4_cache_this_op(struct nfsd4_op *); /* * Memory needed just for the duration of processing one compound: */ struct svcxdr_tmpbuf { struct svcxdr_tmpbuf *next; char buf[]; }; struct nfsd4_compoundargs { /* scratch variables for XDR decode */ __be32 * p; __be32 * end; struct page ** pagelist; int pagelen; __be32 tmp[8]; __be32 * tmpp; struct svcxdr_tmpbuf *to_free; struct svc_rqst *rqstp; u32 taglen; char * tag; u32 minorversion; u32 opcnt; struct nfsd4_op *ops; struct nfsd4_op iops[8]; int cachetype; }; struct nfsd4_compoundres { /* scratch variables for XDR encode */ struct xdr_stream xdr; struct svc_rqst * rqstp; u32 taglen; char * tag; u32 opcnt; __be32 * tagp; /* tag, opcount encode location */ struct nfsd4_compound_state cstate; }; static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) { struct nfsd4_compoundargs *args = resp->rqstp->rq_argp; return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE; } static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) { return !(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS) || nfsd4_is_solo_sequence(resp); } static inline bool nfsd4_last_compound_op(struct svc_rqst *rqstp) { struct nfsd4_compoundres *resp = rqstp->rq_resp; struct nfsd4_compoundargs *argp = rqstp->rq_argp; return argp->opcnt == resp->opcnt; } int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op); void warn_on_nonidempotent_op(struct nfsd4_op *op); #define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) static inline void set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp) { BUG_ON(!fhp->fh_pre_saved); cinfo->atomic = (u32)fhp->fh_post_saved; cinfo->change_supported = IS_I_VERSION(d_inode(fhp->fh_dentry)); cinfo->before_change = fhp->fh_pre_change; cinfo->after_change = fhp->fh_post_change; cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec; cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec; cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec; cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec; } bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp); int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *); int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *, struct nfsd4_compoundargs *); int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *, struct nfsd4_compoundres *); __be32 nfsd4_check_resp_size(struct nfsd4_compoundres *, u32); void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *); void nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op); __be32 nfsd4_encode_fattr_to_buf(__be32 **p, int words, struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, u32 *bmval, struct svc_rqst *, int ignore_crossmnt); extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_setclientid *setclid); extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_setclientid_confirm *setclientid_confirm); extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_exchange_id *); extern __be32 nfsd4_backchannel_ctl(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_backchannel_ctl *); extern __be32 nfsd4_bind_conn_to_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_bind_conn_to_session *); extern __be32 nfsd4_create_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_create_session *); extern __be32 nfsd4_sequence(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_sequence *); extern void nfsd4_sequence_done(struct nfsd4_compoundres *resp); extern __be32 nfsd4_destroy_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_destroy_session *); extern __be32 nfsd4_destroy_clientid(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_destroy_clientid *); __be32 nfsd4_reclaim_complete(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_reclaim_complete *); extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *, struct nfsd4_open *open, struct nfsd_net *nn); extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open); extern void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate); extern void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, struct nfsd4_open *open); extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc); extern __be32 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_close *close); extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_open_downgrade *od); extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_lock *lock); extern __be32 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_lockt *lockt); extern __be32 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_locku *locku); extern __be32 nfsd4_release_lockowner(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_release_lockowner *rlockowner); extern int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp); extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_delegreturn *dr); extern __be32 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *, clientid_t *clid); extern __be32 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_test_stateid *test_stateid); extern __be32 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_free_stateid *free_stateid); extern void nfsd4_bump_seqid(struct nfsd4_compound_state *, __be32 nfserr); #endif /* * Local variables: * c-basic-offset: 8 * End: */
null
null
null
null
77,172
21,215
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
186,210
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Marvell Wireless LAN device driver: AP specific command handling * * Copyright (C) 2012-2014, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "main.h" #include "11ac.h" #include "11n.h" /* This function parses security related parameters from cfg80211_ap_settings * and sets into FW understandable bss_config structure. */ int mwifiex_set_secure_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_config, struct cfg80211_ap_settings *params) { int i; struct mwifiex_wep_key wep_key; if (!params->privacy) { bss_config->protocol = PROTOCOL_NO_SECURITY; bss_config->key_mgmt = KEY_MGMT_NONE; bss_config->wpa_cfg.length = 0; priv->sec_info.wep_enabled = 0; priv->sec_info.wpa_enabled = 0; priv->sec_info.wpa2_enabled = 0; return 0; } switch (params->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: bss_config->auth_mode = WLAN_AUTH_OPEN; break; case NL80211_AUTHTYPE_SHARED_KEY: bss_config->auth_mode = WLAN_AUTH_SHARED_KEY; break; case NL80211_AUTHTYPE_NETWORK_EAP: bss_config->auth_mode = WLAN_AUTH_LEAP; break; default: bss_config->auth_mode = MWIFIEX_AUTH_MODE_AUTO; break; } bss_config->key_mgmt_operation |= KEY_MGMT_ON_HOST; for (i = 0; i < params->crypto.n_akm_suites; i++) { switch (params->crypto.akm_suites[i]) { case WLAN_AKM_SUITE_8021X: if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1) { bss_config->protocol = PROTOCOL_WPA; bss_config->key_mgmt = KEY_MGMT_EAP; } if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2) { bss_config->protocol |= PROTOCOL_WPA2; bss_config->key_mgmt = KEY_MGMT_EAP; } break; case WLAN_AKM_SUITE_PSK: if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1) { bss_config->protocol = PROTOCOL_WPA; bss_config->key_mgmt = KEY_MGMT_PSK; } if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2) { bss_config->protocol |= PROTOCOL_WPA2; bss_config->key_mgmt = KEY_MGMT_PSK; } break; default: break; } } for (i = 0; i < params->crypto.n_ciphers_pairwise; i++) { switch (params->crypto.ciphers_pairwise[i]) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: break; case WLAN_CIPHER_SUITE_TKIP: if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1) bss_config->wpa_cfg.pairwise_cipher_wpa |= CIPHER_TKIP; if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2) bss_config->wpa_cfg.pairwise_cipher_wpa2 |= CIPHER_TKIP; break; case WLAN_CIPHER_SUITE_CCMP: if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1) bss_config->wpa_cfg.pairwise_cipher_wpa |= CIPHER_AES_CCMP; if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2) bss_config->wpa_cfg.pairwise_cipher_wpa2 |= CIPHER_AES_CCMP; default: break; } } switch (params->crypto.cipher_group) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (priv->sec_info.wep_enabled) { bss_config->protocol = PROTOCOL_STATIC_WEP; bss_config->key_mgmt = KEY_MGMT_NONE; bss_config->wpa_cfg.length = 0; for (i = 0; i < NUM_WEP_KEYS; i++) { wep_key = priv->wep_key[i]; bss_config->wep_cfg[i].key_index = i; if (priv->wep_key_curr_index == i) bss_config->wep_cfg[i].is_default = 1; else bss_config->wep_cfg[i].is_default = 0; bss_config->wep_cfg[i].length = wep_key.key_length; memcpy(&bss_config->wep_cfg[i].key, &wep_key.key_material, wep_key.key_length); } } break; case WLAN_CIPHER_SUITE_TKIP: bss_config->wpa_cfg.group_cipher = CIPHER_TKIP; break; case WLAN_CIPHER_SUITE_CCMP: bss_config->wpa_cfg.group_cipher = CIPHER_AES_CCMP; break; default: break; } return 0; } /* This function updates 11n related parameters from IE and sets them into * bss_config structure. */ void mwifiex_set_ht_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params) { const u8 *ht_ie; u16 cap_info; if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info)) return; ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail, params->beacon.tail_len); if (ht_ie) { memcpy(&bss_cfg->ht_cap, ht_ie + 2, sizeof(struct ieee80211_ht_cap)); cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info); memset(&bss_cfg->ht_cap.mcs, 0, priv->adapter->number_of_antenna); switch (GET_RXSTBC(cap_info)) { case MWIFIEX_RX_STBC1: /* HT_CAP 1X1 mode */ bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff; break; case MWIFIEX_RX_STBC12: /* fall through */ case MWIFIEX_RX_STBC123: /* HT_CAP 2X2 mode */ bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff; bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff; break; default: mwifiex_dbg(priv->adapter, WARN, "Unsupported RX-STBC, default to 2x2\n"); bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff; bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff; break; } priv->ap_11n_enabled = 1; } else { memset(&bss_cfg->ht_cap, 0, sizeof(struct ieee80211_ht_cap)); bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP); bss_cfg->ht_cap.ampdu_params_info = MWIFIEX_DEF_AMPDU; } return; } /* This function updates 11ac related parameters from IE * and sets them into bss_config structure. */ void mwifiex_set_vht_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params) { const u8 *vht_ie; vht_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, params->beacon.tail, params->beacon.tail_len); if (vht_ie) { memcpy(&bss_cfg->vht_cap, vht_ie + 2, sizeof(struct ieee80211_vht_cap)); priv->ap_11ac_enabled = 1; } else { priv->ap_11ac_enabled = 0; } return; } /* This function updates 11ac related parameters from IE * and sets them into bss_config structure. */ void mwifiex_set_tpc_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params) { const u8 *tpc_ie; tpc_ie = cfg80211_find_ie(WLAN_EID_TPC_REQUEST, params->beacon.tail, params->beacon.tail_len); if (tpc_ie) bss_cfg->power_constraint = *(tpc_ie + 2); else bss_cfg->power_constraint = 0; } /* Enable VHT only when cfg80211_ap_settings has VHT IE. * Otherwise disable VHT. */ void mwifiex_set_vht_width(struct mwifiex_private *priv, enum nl80211_chan_width width, bool ap_11ac_enable) { struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_11ac_vht_cfg vht_cfg; vht_cfg.band_config = VHT_CFG_5GHZ; vht_cfg.cap_info = adapter->hw_dot_11ac_dev_cap; if (!ap_11ac_enable) { vht_cfg.mcs_tx_set = DISABLE_VHT_MCS_SET; vht_cfg.mcs_rx_set = DISABLE_VHT_MCS_SET; } else { vht_cfg.mcs_tx_set = DEFAULT_VHT_MCS_SET; vht_cfg.mcs_rx_set = DEFAULT_VHT_MCS_SET; } vht_cfg.misc_config = VHT_CAP_UAP_ONLY; if (ap_11ac_enable && width >= NL80211_CHAN_WIDTH_80) vht_cfg.misc_config |= VHT_BW_80_160_80P80; mwifiex_send_cmd(priv, HostCmd_CMD_11AC_CFG, HostCmd_ACT_GEN_SET, 0, &vht_cfg, true); return; } /* This function finds supported rates IE from beacon parameter and sets * these rates into bss_config structure. */ void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params) { struct ieee_types_header *rate_ie; int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable); const u8 *var_pos = params->beacon.head + var_offset; int len = params->beacon.head_len - var_offset; u8 rate_len = 0; rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); if (rate_ie) { memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len); rate_len = rate_ie->len; } rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, params->beacon.tail, params->beacon.tail_len); if (rate_ie) memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len); return; } /* This function initializes some of mwifiex_uap_bss_param variables. * This helps FW in ignoring invalid values. These values may or may not * be get updated to valid ones at later stage. */ void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config) { config->bcast_ssid_ctl = 0x7F; config->radio_ctl = 0x7F; config->dtim_period = 0x7F; config->beacon_period = 0x7FFF; config->auth_mode = 0x7F; config->rts_threshold = 0x7FFF; config->frag_threshold = 0x7FFF; config->retry_limit = 0x7F; config->qos_info = 0xFF; } /* This function parses BSS related parameters from structure * and prepares TLVs specific to WPA/WPA2 security. * These TLVs are appended to command buffer. */ static void mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size) { struct host_cmd_tlv_pwk_cipher *pwk_cipher; struct host_cmd_tlv_gwk_cipher *gwk_cipher; struct host_cmd_tlv_passphrase *passphrase; struct host_cmd_tlv_akmp *tlv_akmp; struct mwifiex_uap_bss_param *bss_cfg = cmd_buf; u16 cmd_size = *param_size; u8 *tlv = *tlv_buf; tlv_akmp = (struct host_cmd_tlv_akmp *)tlv; tlv_akmp->header.type = cpu_to_le16(TLV_TYPE_UAP_AKMP); tlv_akmp->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) - sizeof(struct mwifiex_ie_types_header)); tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation); tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt); cmd_size += sizeof(struct host_cmd_tlv_akmp); tlv += sizeof(struct host_cmd_tlv_akmp); if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) { pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv; pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER); pwk_cipher->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) - sizeof(struct mwifiex_ie_types_header)); pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA); pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa; cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher); tlv += sizeof(struct host_cmd_tlv_pwk_cipher); } if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) { pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv; pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER); pwk_cipher->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) - sizeof(struct mwifiex_ie_types_header)); pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2); pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2; cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher); tlv += sizeof(struct host_cmd_tlv_pwk_cipher); } if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) { gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv; gwk_cipher->header.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER); gwk_cipher->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) - sizeof(struct mwifiex_ie_types_header)); gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher; cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher); tlv += sizeof(struct host_cmd_tlv_gwk_cipher); } if (bss_cfg->wpa_cfg.length) { passphrase = (struct host_cmd_tlv_passphrase *)tlv; passphrase->header.type = cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE); passphrase->header.len = cpu_to_le16(bss_cfg->wpa_cfg.length); memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase, bss_cfg->wpa_cfg.length); cmd_size += sizeof(struct mwifiex_ie_types_header) + bss_cfg->wpa_cfg.length; tlv += sizeof(struct mwifiex_ie_types_header) + bss_cfg->wpa_cfg.length; } *param_size = cmd_size; *tlv_buf = tlv; return; } /* This function parses WMM related parameters from cfg80211_ap_settings * structure and updates bss_config structure. */ void mwifiex_set_wmm_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params) { const u8 *vendor_ie; const u8 *wmm_ie; u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02}; vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, params->beacon.tail, params->beacon.tail_len); if (vendor_ie) { wmm_ie = vendor_ie; memcpy(&bss_cfg->wmm_info, wmm_ie + sizeof(struct ieee_types_header), *(wmm_ie + 1)); priv->wmm_enabled = 1; } else { memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info)); memcpy(&bss_cfg->wmm_info.oui, wmm_oui, sizeof(wmm_oui)); bss_cfg->wmm_info.subtype = MWIFIEX_WMM_SUBTYPE; bss_cfg->wmm_info.version = MWIFIEX_WMM_VERSION; priv->wmm_enabled = 0; } bss_cfg->qos_info = 0x00; return; } /* This function parses BSS related parameters from structure * and prepares TLVs specific to WEP encryption. * These TLVs are appended to command buffer. */ static void mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size) { struct host_cmd_tlv_wep_key *wep_key; u16 cmd_size = *param_size; int i; u8 *tlv = *tlv_buf; struct mwifiex_uap_bss_param *bss_cfg = cmd_buf; for (i = 0; i < NUM_WEP_KEYS; i++) { if (bss_cfg->wep_cfg[i].length && (bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 || bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) { wep_key = (struct host_cmd_tlv_wep_key *)tlv; wep_key->header.type = cpu_to_le16(TLV_TYPE_UAP_WEP_KEY); wep_key->header.len = cpu_to_le16(bss_cfg->wep_cfg[i].length + 2); wep_key->key_index = bss_cfg->wep_cfg[i].key_index; wep_key->is_default = bss_cfg->wep_cfg[i].is_default; memcpy(wep_key->key, bss_cfg->wep_cfg[i].key, bss_cfg->wep_cfg[i].length); cmd_size += sizeof(struct mwifiex_ie_types_header) + 2 + bss_cfg->wep_cfg[i].length; tlv += sizeof(struct mwifiex_ie_types_header) + 2 + bss_cfg->wep_cfg[i].length; } } *param_size = cmd_size; *tlv_buf = tlv; return; } /* This function parses BSS related parameters from structure * and prepares TLVs. These TLVs are appended to command buffer. */ static int mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) { struct host_cmd_tlv_dtim_period *dtim_period; struct host_cmd_tlv_beacon_period *beacon_period; struct host_cmd_tlv_ssid *ssid; struct host_cmd_tlv_bcast_ssid *bcast_ssid; struct host_cmd_tlv_channel_band *chan_band; struct host_cmd_tlv_frag_threshold *frag_threshold; struct host_cmd_tlv_rts_threshold *rts_threshold; struct host_cmd_tlv_retry_limit *retry_limit; struct host_cmd_tlv_encrypt_protocol *encrypt_protocol; struct host_cmd_tlv_auth_type *auth_type; struct host_cmd_tlv_rates *tlv_rates; struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer; struct host_cmd_tlv_power_constraint *pwr_ct; struct mwifiex_ie_types_htcap *htcap; struct mwifiex_ie_types_wmmcap *wmm_cap; struct mwifiex_uap_bss_param *bss_cfg = cmd_buf; int i; u16 cmd_size = *param_size; if (bss_cfg->ssid.ssid_len) { ssid = (struct host_cmd_tlv_ssid *)tlv; ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID); ssid->header.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len); memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len); cmd_size += sizeof(struct mwifiex_ie_types_header) + bss_cfg->ssid.ssid_len; tlv += sizeof(struct mwifiex_ie_types_header) + bss_cfg->ssid.ssid_len; bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv; bcast_ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID); bcast_ssid->header.len = cpu_to_le16(sizeof(bcast_ssid->bcast_ctl)); bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl; cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid); tlv += sizeof(struct host_cmd_tlv_bcast_ssid); } if (bss_cfg->rates[0]) { tlv_rates = (struct host_cmd_tlv_rates *)tlv; tlv_rates->header.type = cpu_to_le16(TLV_TYPE_UAP_RATES); for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i]; i++) tlv_rates->rates[i] = bss_cfg->rates[i]; tlv_rates->header.len = cpu_to_le16(i); cmd_size += sizeof(struct host_cmd_tlv_rates) + i; tlv += sizeof(struct host_cmd_tlv_rates) + i; } if (bss_cfg->channel && (((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_BG && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) || ((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_A && bss_cfg->channel <= MAX_CHANNEL_BAND_A))) { chan_band = (struct host_cmd_tlv_channel_band *)tlv; chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST); chan_band->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) - sizeof(struct mwifiex_ie_types_header)); chan_band->band_config = bss_cfg->band_cfg; chan_band->channel = bss_cfg->channel; cmd_size += sizeof(struct host_cmd_tlv_channel_band); tlv += sizeof(struct host_cmd_tlv_channel_band); } if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD && bss_cfg->beacon_period <= MAX_BEACON_PERIOD) { beacon_period = (struct host_cmd_tlv_beacon_period *)tlv; beacon_period->header.type = cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD); beacon_period->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) - sizeof(struct mwifiex_ie_types_header)); beacon_period->period = cpu_to_le16(bss_cfg->beacon_period); cmd_size += sizeof(struct host_cmd_tlv_beacon_period); tlv += sizeof(struct host_cmd_tlv_beacon_period); } if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD && bss_cfg->dtim_period <= MAX_DTIM_PERIOD) { dtim_period = (struct host_cmd_tlv_dtim_period *)tlv; dtim_period->header.type = cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD); dtim_period->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) - sizeof(struct mwifiex_ie_types_header)); dtim_period->period = bss_cfg->dtim_period; cmd_size += sizeof(struct host_cmd_tlv_dtim_period); tlv += sizeof(struct host_cmd_tlv_dtim_period); } if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) { rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv; rts_threshold->header.type = cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD); rts_threshold->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) - sizeof(struct mwifiex_ie_types_header)); rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold); cmd_size += sizeof(struct host_cmd_tlv_frag_threshold); tlv += sizeof(struct host_cmd_tlv_frag_threshold); } if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) && (bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) { frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv; frag_threshold->header.type = cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD); frag_threshold->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) - sizeof(struct mwifiex_ie_types_header)); frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold); cmd_size += sizeof(struct host_cmd_tlv_frag_threshold); tlv += sizeof(struct host_cmd_tlv_frag_threshold); } if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) { retry_limit = (struct host_cmd_tlv_retry_limit *)tlv; retry_limit->header.type = cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT); retry_limit->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) - sizeof(struct mwifiex_ie_types_header)); retry_limit->limit = (u8)bss_cfg->retry_limit; cmd_size += sizeof(struct host_cmd_tlv_retry_limit); tlv += sizeof(struct host_cmd_tlv_retry_limit); } if ((bss_cfg->protocol & PROTOCOL_WPA) || (bss_cfg->protocol & PROTOCOL_WPA2) || (bss_cfg->protocol & PROTOCOL_EAP)) mwifiex_uap_bss_wpa(&tlv, cmd_buf, &cmd_size); else mwifiex_uap_bss_wep(&tlv, cmd_buf, &cmd_size); if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) || (bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) { auth_type = (struct host_cmd_tlv_auth_type *)tlv; auth_type->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE); auth_type->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) - sizeof(struct mwifiex_ie_types_header)); auth_type->auth_type = (u8)bss_cfg->auth_mode; cmd_size += sizeof(struct host_cmd_tlv_auth_type); tlv += sizeof(struct host_cmd_tlv_auth_type); } if (bss_cfg->protocol) { encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv; encrypt_protocol->header.type = cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL); encrypt_protocol->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol) - sizeof(struct mwifiex_ie_types_header)); encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol); cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol); tlv += sizeof(struct host_cmd_tlv_encrypt_protocol); } if (bss_cfg->ht_cap.cap_info) { htcap = (struct mwifiex_ie_types_htcap *)tlv; htcap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY); htcap->header.len = cpu_to_le16(sizeof(struct ieee80211_ht_cap)); htcap->ht_cap.cap_info = bss_cfg->ht_cap.cap_info; htcap->ht_cap.ampdu_params_info = bss_cfg->ht_cap.ampdu_params_info; memcpy(&htcap->ht_cap.mcs, &bss_cfg->ht_cap.mcs, sizeof(struct ieee80211_mcs_info)); htcap->ht_cap.extended_ht_cap_info = bss_cfg->ht_cap.extended_ht_cap_info; htcap->ht_cap.tx_BF_cap_info = bss_cfg->ht_cap.tx_BF_cap_info; htcap->ht_cap.antenna_selection_info = bss_cfg->ht_cap.antenna_selection_info; cmd_size += sizeof(struct mwifiex_ie_types_htcap); tlv += sizeof(struct mwifiex_ie_types_htcap); } if (bss_cfg->wmm_info.qos_info != 0xFF) { wmm_cap = (struct mwifiex_ie_types_wmmcap *)tlv; wmm_cap->header.type = cpu_to_le16(WLAN_EID_VENDOR_SPECIFIC); wmm_cap->header.len = cpu_to_le16(sizeof(wmm_cap->wmm_info)); memcpy(&wmm_cap->wmm_info, &bss_cfg->wmm_info, sizeof(wmm_cap->wmm_info)); cmd_size += sizeof(struct mwifiex_ie_types_wmmcap); tlv += sizeof(struct mwifiex_ie_types_wmmcap); } if (bss_cfg->sta_ao_timer) { ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv; ao_timer->header.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER); ao_timer->header.len = cpu_to_le16(sizeof(*ao_timer) - sizeof(struct mwifiex_ie_types_header)); ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer); cmd_size += sizeof(*ao_timer); tlv += sizeof(*ao_timer); } if (bss_cfg->power_constraint) { pwr_ct = (void *)tlv; pwr_ct->header.type = cpu_to_le16(TLV_TYPE_PWR_CONSTRAINT); pwr_ct->header.len = cpu_to_le16(sizeof(u8)); pwr_ct->constraint = bss_cfg->power_constraint; cmd_size += sizeof(*pwr_ct); tlv += sizeof(*pwr_ct); } if (bss_cfg->ps_sta_ao_timer) { ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv; ps_ao_timer->header.type = cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER); ps_ao_timer->header.len = cpu_to_le16(sizeof(*ps_ao_timer) - sizeof(struct mwifiex_ie_types_header)); ps_ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->ps_sta_ao_timer); cmd_size += sizeof(*ps_ao_timer); tlv += sizeof(*ps_ao_timer); } *param_size = cmd_size; return 0; } /* This function parses custom IEs from IE list and prepares command buffer */ static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size) { struct mwifiex_ie_list *ap_ie = cmd_buf; struct mwifiex_ie_types_header *tlv_ie = (void *)tlv; if (!ap_ie || !ap_ie->len) return -1; *ie_size += le16_to_cpu(ap_ie->len) + sizeof(struct mwifiex_ie_types_header); tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE); tlv_ie->len = ap_ie->len; tlv += sizeof(struct mwifiex_ie_types_header); memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len)); return 0; } /* Parse AP config structure and prepare TLV based command structure * to be sent to FW for uAP configuration */ static int mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action, u32 type, void *cmd_buf) { u8 *tlv; u16 cmd_size, param_size, ie_size; struct host_cmd_ds_sys_config *sys_cfg; cmd->command = cpu_to_le16(HostCmd_CMD_UAP_SYS_CONFIG); cmd_size = (u16)(sizeof(struct host_cmd_ds_sys_config) + S_DS_GEN); sys_cfg = (struct host_cmd_ds_sys_config *)&cmd->params.uap_sys_config; sys_cfg->action = cpu_to_le16(cmd_action); tlv = sys_cfg->tlv; switch (type) { case UAP_BSS_PARAMS_I: param_size = cmd_size; if (mwifiex_uap_bss_param_prepare(tlv, cmd_buf, &param_size)) return -1; cmd->size = cpu_to_le16(param_size); break; case UAP_CUSTOM_IE_I: ie_size = cmd_size; if (mwifiex_uap_custom_ie_prepare(tlv, cmd_buf, &ie_size)) return -1; cmd->size = cpu_to_le16(ie_size); break; default: return -1; } return 0; } /* This function prepares AP specific deauth command with mac supplied in * function parameter. */ static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, u8 *mac) { struct host_cmd_ds_sta_deauth *sta_deauth = &cmd->params.sta_deauth; cmd->command = cpu_to_le16(HostCmd_CMD_UAP_STA_DEAUTH); memcpy(sta_deauth->mac, mac, ETH_ALEN); sta_deauth->reason = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING); cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_sta_deauth) + S_DS_GEN); return 0; } /* This function prepares the AP specific commands before sending them * to the firmware. * This is a generic function which calls specific command preparation * routines based upon the command number. */ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no, u16 cmd_action, u32 type, void *data_buf, void *cmd_buf) { struct host_cmd_ds_command *cmd = cmd_buf; switch (cmd_no) { case HostCmd_CMD_UAP_SYS_CONFIG: if (mwifiex_cmd_uap_sys_config(cmd, cmd_action, type, data_buf)) return -1; break; case HostCmd_CMD_UAP_BSS_START: case HostCmd_CMD_UAP_BSS_STOP: case HOST_CMD_APCMD_SYS_RESET: case HOST_CMD_APCMD_STA_LIST: cmd->command = cpu_to_le16(cmd_no); cmd->size = cpu_to_le16(S_DS_GEN); break; case HostCmd_CMD_UAP_STA_DEAUTH: if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf)) return -1; break; case HostCmd_CMD_CHAN_REPORT_REQUEST: if (mwifiex_cmd_issue_chan_report_request(priv, cmd_buf, data_buf)) return -1; break; default: mwifiex_dbg(priv->adapter, ERROR, "PREP_CMD: unknown cmd %#x\n", cmd_no); return -1; } return 0; } void mwifiex_uap_set_channel(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_chan_def chandef) { u8 config_bands = 0, old_bands = priv->adapter->config_bands; priv->bss_chandef = chandef; bss_cfg->channel = ieee80211_frequency_to_channel( chandef.chan->center_freq); /* Set appropriate bands */ if (chandef.chan->band == NL80211_BAND_2GHZ) { bss_cfg->band_cfg = BAND_CONFIG_BG; config_bands = BAND_B | BAND_G; if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT) config_bands |= BAND_GN; } else { bss_cfg->band_cfg = BAND_CONFIG_A; config_bands = BAND_A; if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT) config_bands |= BAND_AN; if (chandef.width > NL80211_CHAN_WIDTH_40) config_bands |= BAND_AAC; } switch (chandef.width) { case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: break; case NL80211_CHAN_WIDTH_40: if (chandef.center_freq1 < chandef.chan->center_freq) bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_BELOW; else bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_ABOVE; break; case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: bss_cfg->band_cfg |= mwifiex_get_sec_chan_offset(bss_cfg->channel) << 4; break; default: mwifiex_dbg(priv->adapter, WARN, "Unknown channel width: %d\n", chandef.width); break; } priv->adapter->config_bands = config_bands; if (old_bands != config_bands) { mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy); mwifiex_dnld_txpwr_table(priv); } } int mwifiex_config_start_uap(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg) { enum state_11d_t state_11d; if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG, HostCmd_ACT_GEN_SET, UAP_BSS_PARAMS_I, bss_cfg, true)) { mwifiex_dbg(priv->adapter, ERROR, "Failed to set AP configuration\n"); return -1; } /* Send cmd to FW to enable 11D function */ state_11d = ENABLE_11D; if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB, HostCmd_ACT_GEN_SET, DOT11D_I, &state_11d, true)) { mwifiex_dbg(priv->adapter, ERROR, "11D: failed to enable 11D\n"); return -1; } if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START, HostCmd_ACT_GEN_SET, 0, NULL, true)) { mwifiex_dbg(priv->adapter, ERROR, "Failed to start the BSS\n"); return -1; } if (priv->sec_info.wep_enabled) priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE; else priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE; if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL, HostCmd_ACT_GEN_SET, 0, &priv->curr_pkt_filter, true)) return -1; return 0; }
null
null
null
null
94,557
23,434
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
23,434
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_BROWSER_INDEXED_DB_INDEXED_DB_DATABASE_ERROR_H_ #define CONTENT_BROWSER_INDEXED_DB_INDEXED_DB_DATABASE_ERROR_H_ #include <stdint.h> #include "base/strings/string16.h" #include "base/strings/utf_string_conversions.h" #include "content/common/content_export.h" namespace content { class CONTENT_EXPORT IndexedDBDatabaseError { public: IndexedDBDatabaseError(); explicit IndexedDBDatabaseError(uint16_t code); IndexedDBDatabaseError(uint16_t code, const char* message); IndexedDBDatabaseError(uint16_t code, const base::string16& message); ~IndexedDBDatabaseError(); IndexedDBDatabaseError& operator=(const IndexedDBDatabaseError& rhs); uint16_t code() const { return code_; } const base::string16& message() const { return message_; } private: uint16_t code_ = 0; base::string16 message_; }; } // namespace content #endif // CONTENT_BROWSER_INDEXED_DB_INDEXED_DB_DATABASE_ERROR_H_
null
null
null
null
20,297
62,777
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
62,777
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/views/chrome_views_delegate.h" #include "base/environment.h" #include "base/nix/xdg_util.h" #include "chrome/browser/ui/views/native_widget_factory.h" #include "chrome/common/channel_info.h" #include "chrome/grit/chrome_unscaled_resources.h" #include "components/version_info/channel.h" #include "ui/base/resource/resource_bundle.h" #include "ui/views/linux_ui/linux_ui.h" namespace { bool IsDesktopEnvironmentUnity() { std::unique_ptr<base::Environment> env(base::Environment::Create()); base::nix::DesktopEnvironment desktop_env = base::nix::GetDesktopEnvironment(env.get()); return desktop_env == base::nix::DESKTOP_ENVIRONMENT_UNITY; } int GetWindowIconResourceId() { #if defined(GOOGLE_CHROME_BUILD) switch (chrome::GetChannel()) { case version_info::Channel::DEV: return IDR_PRODUCT_LOGO_128_DEV; case version_info::Channel::BETA: return IDR_PRODUCT_LOGO_128_BETA; default: break; } #endif return IDR_PRODUCT_LOGO_128; } } // namespace views::NativeWidget* ChromeViewsDelegate::CreateNativeWidget( views::Widget::InitParams* params, views::internal::NativeWidgetDelegate* delegate) { NativeWidgetType native_widget_type = (params->parent && params->type != views::Widget::InitParams::TYPE_MENU && params->type != views::Widget::InitParams::TYPE_TOOLTIP) ? NativeWidgetType::NATIVE_WIDGET_AURA : NativeWidgetType::DESKTOP_NATIVE_WIDGET_AURA; return ::CreateNativeWidget(native_widget_type, params, delegate); } gfx::ImageSkia* ChromeViewsDelegate::GetDefaultWindowIcon() const { ui::ResourceBundle& rb = ui::ResourceBundle::GetSharedInstance(); return rb.GetImageSkiaNamed(GetWindowIconResourceId()); } bool ChromeViewsDelegate::WindowManagerProvidesTitleBar(bool maximized) { // On Ubuntu Unity, the system always provides a title bar for // maximized windows. // // TODO(thomasanderson,crbug.com/784010): Consider using the // _UNITY_SHELL wm hint when support for Ubuntu Trusty is dropped. if (!maximized) return false; static bool is_desktop_environment_unity = IsDesktopEnvironmentUnity(); return is_desktop_environment_unity; }
null
null
null
null
59,640
71,388
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
71,388
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef SERVICES_SERVICE_MANAGER_EMBEDDER_SWITCHES_H_ #define SERVICES_SERVICE_MANAGER_EMBEDDER_SWITCHES_H_ #include "build/build_config.h" #include "services/service_manager/embedder/service_manager_embedder_switches_export.h" namespace service_manager { namespace switches { #if defined(OS_WIN) SERVICE_MANAGER_EMBEDDER_SWITCHES_EXPORT extern const char kDefaultServicePrefetchArgument[]; #endif // defined(OS_WIN) SERVICE_MANAGER_EMBEDDER_SWITCHES_EXPORT extern const char kDisableInProcessStackTraces[]; SERVICE_MANAGER_EMBEDDER_SWITCHES_EXPORT extern const char kEnableLogging[]; SERVICE_MANAGER_EMBEDDER_SWITCHES_EXPORT extern const char kProcessType[]; SERVICE_MANAGER_EMBEDDER_SWITCHES_EXPORT extern const char kProcessTypeServiceManager[]; SERVICE_MANAGER_EMBEDDER_SWITCHES_EXPORT extern const char kProcessTypeService[]; SERVICE_MANAGER_EMBEDDER_SWITCHES_EXPORT extern const char kSharedFiles[]; } // namespace switches } // namespace service_manager #endif // SERVICES_SERVICE_MANAGER_EMBEDDER_SWITCHES_H_
null
null
null
null
68,251
9,759
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
9,759
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromecast/base/component/component.h" #include <memory> #include "base/message_loop/message_loop.h" #include "base/run_loop.h" #include "base/threading/thread_task_runner_handle.h" #include "testing/gtest/include/gtest/gtest.h" namespace chromecast { class ComponentTest : public ::testing::Test { protected: ComponentTest() : message_loop_(new base::MessageLoop()) {} const std::unique_ptr<base::MessageLoop> message_loop_; }; using ComponentDeathTest = ComponentTest; class ComponentB; class ComponentC; class ComponentA : public Component<ComponentA> { public: void MakeSelfDependency() { a_.reset(new Component<ComponentA>::Dependency(GetRef(), this)); } void MakeCircularDependency(const Component<ComponentB>::WeakRef& b) { b_.reset(new Component<ComponentB>::Dependency(b, this)); } void MakeTransitiveCircularDependency( const Component<ComponentC>::WeakRef& c) { c_.reset(new Component<ComponentC>::Dependency(c, this)); } void OnEnable() override { if (!fail_enable_) { enabled_ = true; Test(); } base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ComponentA::OnEnableComplete, base::Unretained(this), !fail_enable_)); } void OnDisable() override { if (enabled_) Test(); enabled_ = false; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ComponentA::OnDisableComplete, base::Unretained(this))); } void Test() { EXPECT_TRUE(enabled_); EXPECT_FALSE(fail_enable_); } bool enabled() const { return enabled_; } void FailEnable() { fail_enable_ = true; } private: bool enabled_ = false; bool fail_enable_ = false; std::unique_ptr<Component<ComponentA>::Dependency> a_; std::unique_ptr<Component<ComponentB>::Dependency> b_; std::unique_ptr<Component<ComponentC>::Dependency> c_; }; class ComponentB : public Component<ComponentB> { public: explicit ComponentB(const ComponentA::WeakRef& a) : a_(a, this) {} void OnEnable() override { if (!fail_enable_) { enabled_ = true; Test(); } base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ComponentB::OnEnableComplete, base::Unretained(this), !fail_enable_)); } void OnDisable() override { if (enabled_) Test(); enabled_ = false; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ComponentB::OnDisableComplete, base::Unretained(this))); } void Test() { EXPECT_TRUE(enabled_); EXPECT_FALSE(fail_enable_); a_->Test(); } bool enabled() const { return enabled_; } void FailEnable() { fail_enable_ = true; } private: bool enabled_ = false; bool fail_enable_ = false; ComponentA::Dependency a_; }; class ComponentC : public Component<ComponentC> { public: explicit ComponentC(const ComponentB::WeakRef& b) : b_(b, this) {} void OnEnable() override { if (!fail_enable_) { enabled_ = true; Test(); } base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ComponentC::OnEnableComplete, base::Unretained(this), !fail_enable_)); } void OnDisable() override { if (enabled_) Test(); enabled_ = false; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ComponentC::OnDisableComplete, base::Unretained(this))); } void Test() { EXPECT_TRUE(enabled_); EXPECT_FALSE(fail_enable_); b_->Test(); } bool enabled() const { return enabled_; } void FailEnable() { fail_enable_ = true; } private: bool enabled_ = false; bool fail_enable_ = false; ComponentB::Dependency b_; }; #if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST TEST_F(ComponentDeathTest, SelfDependency) { ::testing::FLAGS_gtest_death_test_style = "threadsafe"; ComponentA a; EXPECT_DEATH(a.MakeSelfDependency(), "Circular dependency"); } TEST_F(ComponentDeathTest, CircularDependency) { ::testing::FLAGS_gtest_death_test_style = "threadsafe"; ComponentA a; ComponentB b(a.GetRef()); EXPECT_DEATH(a.MakeCircularDependency(b.GetRef()), "Circular dependency"); } TEST_F(ComponentDeathTest, TransitiveCircularDependency) { ::testing::FLAGS_gtest_death_test_style = "threadsafe"; ComponentA a; ComponentB b(a.GetRef()); ComponentC c(b.GetRef()); EXPECT_DEATH(a.MakeTransitiveCircularDependency(c.GetRef()), "Circular dependency"); } #endif // (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && // GTEST_HAS_DEATH_TEST TEST_F(ComponentTest, SimpleEnable) { std::unique_ptr<ComponentA> a(new ComponentA()); a->Enable(); base::RunLoop().RunUntilIdle(); EXPECT_TRUE(a->enabled()); a.release()->Destroy(); } TEST_F(ComponentTest, TransitiveEnable) { std::unique_ptr<ComponentA> a(new ComponentA()); std::unique_ptr<ComponentB> b(new ComponentB(a->GetRef())); std::unique_ptr<ComponentC> c(new ComponentC(b->GetRef())); c->Enable(); base::RunLoop().RunUntilIdle(); EXPECT_TRUE(a->enabled()); EXPECT_TRUE(b->enabled()); EXPECT_TRUE(c->enabled()); a.release()->Destroy(); b.release()->Destroy(); c.release()->Destroy(); } TEST_F(ComponentTest, FailEnable) { std::unique_ptr<ComponentA> a(new ComponentA()); a->FailEnable(); a->Enable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); a.release()->Destroy(); } TEST_F(ComponentTest, TransitiveFailEnable) { std::unique_ptr<ComponentA> a(new ComponentA()); std::unique_ptr<ComponentB> b(new ComponentB(a->GetRef())); std::unique_ptr<ComponentC> c(new ComponentC(b->GetRef())); a->FailEnable(); c->Enable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); EXPECT_FALSE(b->enabled()); EXPECT_FALSE(c->enabled()); a.release()->Destroy(); b.release()->Destroy(); c.release()->Destroy(); } TEST_F(ComponentTest, DisableWhileEnabling) { std::unique_ptr<ComponentA> a(new ComponentA()); a->Enable(); a->Disable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); a.release()->Destroy(); } TEST_F(ComponentTest, EnableTwice) { std::unique_ptr<ComponentA> a(new ComponentA()); a->Enable(); a->Enable(); base::RunLoop().RunUntilIdle(); EXPECT_TRUE(a->enabled()); a.release()->Destroy(); } TEST_F(ComponentTest, DisableTwice) { std::unique_ptr<ComponentA> a(new ComponentA()); a->Enable(); base::RunLoop().RunUntilIdle(); EXPECT_TRUE(a->enabled()); a->Disable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); a->Disable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); a.release()->Destroy(); } TEST_F(ComponentTest, DisableAfterFailedEnable) { std::unique_ptr<ComponentA> a(new ComponentA()); a->FailEnable(); a->Enable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); a->Disable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); a.release()->Destroy(); } TEST_F(ComponentTest, DisableAfterNeverEnabled) { std::unique_ptr<ComponentA> a(new ComponentA()); a->Disable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); a.release()->Destroy(); } TEST_F(ComponentTest, DisableDependencyWhileEnabling) { std::unique_ptr<ComponentA> a(new ComponentA()); std::unique_ptr<ComponentB> b(new ComponentB(a->GetRef())); std::unique_ptr<ComponentC> c(new ComponentC(b->GetRef())); b->Enable(); base::RunLoop().RunUntilIdle(); c->Enable(); a->Disable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); EXPECT_FALSE(b->enabled()); EXPECT_FALSE(c->enabled()); a.release()->Destroy(); b.release()->Destroy(); c.release()->Destroy(); } TEST_F(ComponentTest, EnableDisableEnable) { std::unique_ptr<ComponentA> a(new ComponentA()); a->Enable(); a->Disable(); a->Enable(); base::RunLoop().RunUntilIdle(); EXPECT_TRUE(a->enabled()); a.release()->Destroy(); } TEST_F(ComponentTest, DisableEnableDisable) { std::unique_ptr<ComponentA> a(new ComponentA()); a->Enable(); base::RunLoop().RunUntilIdle(); EXPECT_TRUE(a->enabled()); a->Disable(); a->Enable(); a->Disable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); a.release()->Destroy(); } TEST_F(ComponentTest, TransitiveEnableDisableEnable) { std::unique_ptr<ComponentA> a(new ComponentA()); std::unique_ptr<ComponentB> b(new ComponentB(a->GetRef())); std::unique_ptr<ComponentC> c(new ComponentC(b->GetRef())); a->Enable(); base::RunLoop().RunUntilIdle(); c->Enable(); a->Disable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); EXPECT_FALSE(b->enabled()); EXPECT_FALSE(c->enabled()); c->Enable(); base::RunLoop().RunUntilIdle(); EXPECT_TRUE(a->enabled()); EXPECT_TRUE(b->enabled()); EXPECT_TRUE(c->enabled()); a.release()->Destroy(); b.release()->Destroy(); c.release()->Destroy(); } TEST_F(ComponentTest, WeakRefs) { std::unique_ptr<ComponentA> a(new ComponentA()); ComponentA::WeakRef weak = a->GetRef(); EXPECT_FALSE(weak.Try()); a->Enable(); EXPECT_FALSE(weak.Try()); base::RunLoop().RunUntilIdle(); EXPECT_TRUE(weak.Try()); weak.Try()->Test(); a->Disable(); base::RunLoop().RunUntilIdle(); EXPECT_FALSE(weak.Try()); a.release()->Destroy(); } TEST_F(ComponentTest, WeakRefsKeepEnabled) { std::unique_ptr<ComponentA> a(new ComponentA()); ComponentA::WeakRef weak = a->GetRef(); EXPECT_FALSE(weak.Try()); a->Enable(); EXPECT_FALSE(weak.Try()); base::RunLoop().RunUntilIdle(); { auto held_ref = weak.Try(); EXPECT_TRUE(held_ref); held_ref->Test(); a->Disable(); base::RunLoop().RunUntilIdle(); // The held ref keeps |a| enabled until it goes out of scope. EXPECT_TRUE(a->enabled()); } base::RunLoop().RunUntilIdle(); EXPECT_FALSE(a->enabled()); EXPECT_FALSE(weak.Try()); a.release()->Destroy(); } } // namespace chromecast
null
null
null
null
6,622
35,215
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
35,215
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/exported/worker_shadow_page.h" #include "third_party/blink/public/mojom/page/page_visibility_state.mojom-blink.h" #include "third_party/blink/public/platform/platform.h" #include "third_party/blink/public/web/web_settings.h" #include "third_party/blink/renderer/core/exported/web_view_impl.h" #include "third_party/blink/renderer/core/frame/csp/content_security_policy.h" #include "third_party/blink/renderer/core/frame/settings.h" #include "third_party/blink/renderer/core/loader/frame_load_request.h" #include "third_party/blink/renderer/platform/loader/fetch/substitute_data.h" namespace blink { WorkerShadowPage::WorkerShadowPage(Client* client) : client_(client), web_view_(WebViewImpl::Create(nullptr, mojom::PageVisibilityState::kVisible, nullptr)), main_frame_(WebLocalFrameImpl::CreateMainFrame(web_view_, this, nullptr, nullptr, g_empty_atom, WebSandboxFlags::kNone)) { DCHECK(IsMainThread()); // TODO(http://crbug.com/363843): This needs to find a better way to // not create graphics layers. web_view_->GetSettings()->SetAcceleratedCompositingEnabled(false); // TODO(lunalu): Service worker and shared worker count feature usage on the // blink side use counter. Once the blink side use counter is removed // (crbug.com/811948), remove this instant from Settings. main_frame_->GetFrame()->GetSettings()->SetIsShadowPage(true); main_frame_->SetDevToolsAgentImpl( WebDevToolsAgentImpl::CreateForWorker(main_frame_, client_)); } WorkerShadowPage::~WorkerShadowPage() { DCHECK(IsMainThread()); // Detach the client before closing the view to avoid getting called back. main_frame_->SetClient(nullptr); web_view_->Close(); main_frame_->Close(); } void WorkerShadowPage::Initialize(const KURL& script_url) { DCHECK(IsMainThread()); AdvanceState(State::kInitializing); // Construct substitute data source. We only need it to have same origin as // the worker so the loading checks work correctly. CString content(""); scoped_refptr<SharedBuffer> buffer( SharedBuffer::Create(content.data(), content.length())); main_frame_->GetFrame()->Loader().Load(FrameLoadRequest( nullptr, ResourceRequest(script_url), SubstituteData(buffer))); } void WorkerShadowPage::SetContentSecurityPolicyAndReferrerPolicy( ContentSecurityPolicy* content_security_policy, String referrer_policy) { DCHECK(IsMainThread()); content_security_policy->SetOverrideURLForSelf(GetDocument()->Url()); GetDocument()->InitContentSecurityPolicy(content_security_policy); if (!referrer_policy.IsNull()) GetDocument()->ParseAndSetReferrerPolicy(referrer_policy); } void WorkerShadowPage::DidFinishDocumentLoad() { DCHECK(IsMainThread()); AdvanceState(State::kInitialized); client_->OnShadowPageInitialized(); } std::unique_ptr<WebApplicationCacheHost> WorkerShadowPage::CreateApplicationCacheHost( WebApplicationCacheHostClient* appcache_host_client) { DCHECK(IsMainThread()); return client_->CreateApplicationCacheHost(appcache_host_client); } std::unique_ptr<blink::WebURLLoaderFactory> WorkerShadowPage::CreateURLLoaderFactory() { DCHECK(IsMainThread()); return Platform::Current()->CreateDefaultURLLoaderFactory(); } base::UnguessableToken WorkerShadowPage::GetDevToolsFrameToken() { // TODO(dgozman): instrumentation token will have to be passed directly to // DevTools once we stop using a frame for workers. Currently, we rely on // the frame's instrumentation token to match the worker. return client_->GetDevToolsWorkerToken(); } bool WorkerShadowPage::WasInitialized() const { return state_ == State::kInitialized; } void WorkerShadowPage::AdvanceState(State new_state) { switch (new_state) { case State::kUninitialized: NOTREACHED(); return; case State::kInitializing: DCHECK_EQ(State::kUninitialized, state_); state_ = new_state; return; case State::kInitialized: DCHECK_EQ(State::kInitializing, state_); state_ = new_state; return; } } void WorkerShadowPage::BindDevToolsAgent( mojom::blink::DevToolsAgentAssociatedRequest request) { main_frame_->DevToolsAgentImpl()->BindRequest(std::move(request)); } } // namespace blink
null
null
null
null
32,078
34,822
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
34,822
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/html/forms/file_input_type.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/blink/renderer/core/clipboard/data_object.h" #include "third_party/blink/renderer/core/dom/document.h" #include "third_party/blink/renderer/core/fileapi/file_list.h" #include "third_party/blink/renderer/core/html/forms/html_input_element.h" #include "third_party/blink/renderer/core/html_names.h" #include "third_party/blink/renderer/core/page/drag_data.h" #include "third_party/blink/renderer/platform/wtf/date_math.h" namespace blink { TEST(FileInputTypeTest, createFileList) { Vector<FileChooserFileInfo> files; // Native file. files.push_back( FileChooserFileInfo("/native/path/native-file", "display-name")); // Non-native file. KURL url("filesystem:http://example.com/isolated/hash/non-native-file"); FileMetadata metadata; metadata.length = 64; metadata.modification_time = 1.0 * kMsPerDay + 3; files.push_back(FileChooserFileInfo(url, metadata)); FileList* list = FileInputType::CreateFileList(files, false); ASSERT_TRUE(list); ASSERT_EQ(2u, list->length()); EXPECT_EQ("/native/path/native-file", list->item(0)->GetPath()); EXPECT_EQ("display-name", list->item(0)->name()); EXPECT_TRUE(list->item(0)->FileSystemURL().IsEmpty()); EXPECT_TRUE(list->item(1)->GetPath().IsEmpty()); EXPECT_EQ("non-native-file", list->item(1)->name()); EXPECT_EQ(url, list->item(1)->FileSystemURL()); EXPECT_EQ(64u, list->item(1)->size()); EXPECT_EQ(1.0 * kMsPerDay + 3, list->item(1)->lastModified()); } TEST(FileInputTypeTest, ignoreDroppedNonNativeFiles) { Document* document = Document::CreateForTest(); auto* input = HTMLInputElement::Create(*document, CreateElementFlags()); InputType* file_input = FileInputType::Create(*input); DataObject* native_file_raw_drag_data = DataObject::Create(); const DragData native_file_drag_data(native_file_raw_drag_data, IntPoint(), IntPoint(), kDragOperationCopy); native_file_drag_data.PlatformData()->Add(File::Create("/native/path")); native_file_drag_data.PlatformData()->SetFilesystemId("fileSystemId"); file_input->ReceiveDroppedFiles(&native_file_drag_data); EXPECT_EQ("fileSystemId", file_input->DroppedFileSystemId()); ASSERT_EQ(1u, file_input->Files()->length()); EXPECT_EQ(String("/native/path"), file_input->Files()->item(0)->GetPath()); DataObject* non_native_file_raw_drag_data = DataObject::Create(); const DragData non_native_file_drag_data(non_native_file_raw_drag_data, IntPoint(), IntPoint(), kDragOperationCopy); FileMetadata metadata; metadata.length = 1234; const KURL url("filesystem:http://example.com/isolated/hash/non-native-file"); non_native_file_drag_data.PlatformData()->Add( File::CreateForFileSystemFile(url, metadata, File::kIsUserVisible)); non_native_file_drag_data.PlatformData()->SetFilesystemId("fileSystemId"); file_input->ReceiveDroppedFiles(&non_native_file_drag_data); // Dropping non-native files should not change the existing files. EXPECT_EQ("fileSystemId", file_input->DroppedFileSystemId()); ASSERT_EQ(1u, file_input->Files()->length()); EXPECT_EQ(String("/native/path"), file_input->Files()->item(0)->GetPath()); } TEST(FileInputTypeTest, setFilesFromPaths) { Document* document = Document::CreateForTest(); auto* input = HTMLInputElement::Create(*document, CreateElementFlags()); InputType* file_input = FileInputType::Create(*input); Vector<String> paths; paths.push_back("/native/path"); paths.push_back("/native/path2"); file_input->SetFilesFromPaths(paths); ASSERT_EQ(1u, file_input->Files()->length()); EXPECT_EQ(String("/native/path"), file_input->Files()->item(0)->GetPath()); // Try to upload multiple files without multipleAttr paths.clear(); paths.push_back("/native/path1"); paths.push_back("/native/path2"); file_input->SetFilesFromPaths(paths); ASSERT_EQ(1u, file_input->Files()->length()); EXPECT_EQ(String("/native/path1"), file_input->Files()->item(0)->GetPath()); // Try to upload multiple files with multipleAttr input->SetBooleanAttribute(HTMLNames::multipleAttr, true); paths.clear(); paths.push_back("/native/real/path1"); paths.push_back("/native/real/path2"); file_input->SetFilesFromPaths(paths); ASSERT_EQ(2u, file_input->Files()->length()); EXPECT_EQ(String("/native/real/path1"), file_input->Files()->item(0)->GetPath()); EXPECT_EQ(String("/native/real/path2"), file_input->Files()->item(1)->GetPath()); } } // namespace blink
null
null
null
null
31,685
16,886
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
16,886
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/viz/service/display/draw_polygon.h" #include <stddef.h> #include <vector> #include "base/memory/ptr_util.h" #include "build/build_config.h" #include "cc/base/math_util.h" #include "components/viz/common/quads/draw_quad.h" namespace { // This threshold controls how "thick" a plane is. If a point's distance is // <= |split_threshold|, then it is considered on the plane for the purpose of // polygon splitting. static const float split_threshold = 0.05f; static const float normalized_threshold = 0.001f; void PointInterpolate(const gfx::Point3F& from, const gfx::Point3F& to, double delta, gfx::Point3F* out) { out->SetPoint(from.x() + (to.x() - from.x()) * delta, from.y() + (to.y() - from.y()) * delta, from.z() + (to.z() - from.z()) * delta); } } // namespace namespace viz { DrawPolygon::DrawPolygon() = default; DrawPolygon::DrawPolygon(const DrawQuad* original, const std::vector<gfx::Point3F>& in_points, const gfx::Vector3dF& normal, int draw_order_index) : normal_(normal), order_index_(draw_order_index), original_ref_(original), is_split_(true) { for (size_t i = 0; i < in_points.size(); i++) { points_.push_back(in_points[i]); } // If life was fair, we could recalculate the normal from the given points // and assert it was roughly the same. This causes unhelpful breaks on // trivial slices of split polygons. Similarly, when splitting, it is // better to keep the normal that was constructed from the original. } // This takes the original DrawQuad that this polygon should be based on, // a visible content rect to make the 4 corner points from, and a transformation // to move it and its normal into screen space. DrawPolygon::DrawPolygon(const DrawQuad* original_ref, const gfx::RectF& visible_layer_rect, const gfx::Transform& transform, int draw_order_index) : normal_(0.0f, 0.0f, 1.0f), order_index_(draw_order_index), original_ref_(original_ref), is_split_(false) { gfx::Point3F points[6]; int num_vertices_in_clipped_quad; gfx::QuadF send_quad(visible_layer_rect); // Doing this mapping here is very important, since we can't just transform // the points without clipping and not run into strange geometry issues when // crossing w = 0. At this point, in the constructor, we know that we're // working with a quad, so we can reuse the MathUtil::MapClippedQuad3d // function instead of writing a generic polygon version of it. cc::MathUtil::MapClippedQuad3d(transform, send_quad, points, &num_vertices_in_clipped_quad); for (int i = 0; i < num_vertices_in_clipped_quad; i++) { points_.push_back(points[i]); } transform.TransformVector(&normal_); ConstructNormal(); } DrawPolygon::~DrawPolygon() = default; std::unique_ptr<DrawPolygon> DrawPolygon::CreateCopy() { std::unique_ptr<DrawPolygon> new_polygon(new DrawPolygon()); new_polygon->order_index_ = order_index_; new_polygon->original_ref_ = original_ref_; new_polygon->points_.reserve(points_.size()); new_polygon->points_ = points_; new_polygon->normal_.set_x(normal_.x()); new_polygon->normal_.set_y(normal_.y()); new_polygon->normal_.set_z(normal_.z()); return new_polygon; } // // If this were to be more generally used and expected to be applicable // replacing this with Newell's algorithm (or an improvement thereof) // would be preferable, but usually this is coming in from a rectangle // that has been transformed to screen space and clipped. // Averaging a few near diagonal cross products is pretty good in that case. // void DrawPolygon::ConstructNormal() { gfx::Vector3dF new_normal(0.0f, 0.0f, 0.0f); int delta = points_.size() / 2; for (size_t i = 1; i + delta < points_.size(); i++) { new_normal += CrossProduct(points_[i] - points_[0], points_[i + delta] - points_[0]); } float normal_magnitude = new_normal.Length(); // Here we constrain the new normal to point in the same sense as the old one. // This allows us to handle winding-reversing transforms better. if (gfx::DotProduct(normal_, new_normal) < 0.0) { normal_magnitude *= -1.0; } if (normal_magnitude != 0 && normal_magnitude != 1) { new_normal.Scale(1.0f / normal_magnitude); } normal_ = new_normal; } #if defined(OS_WIN) // // Allows the unittest to invoke this for the more general constructor. // void DrawPolygon::RecomputeNormalForTesting() { ConstructNormal(); } #endif float DrawPolygon::SignedPointDistance(const gfx::Point3F& point) const { return gfx::DotProduct(point - points_[0], normal_); } // This function is separate from ApplyTransform because it is often unnecessary // to transform the normal with the rest of the polygon. // When drawing these polygons, it is necessary to move them back into layer // space before sending them to OpenGL, which requires using ApplyTransform, // but normal information is no longer needed after sorting. void DrawPolygon::ApplyTransformToNormal(const gfx::Transform& transform) { // Now we use the inverse transpose of |transform| to transform the normal. gfx::Transform inverse_transform; bool inverted = transform.GetInverse(&inverse_transform); DCHECK(inverted); if (!inverted) return; inverse_transform.Transpose(); gfx::Point3F new_normal(normal_.x(), normal_.y(), normal_.z()); inverse_transform.TransformPoint(&new_normal); // Make sure our normal is still normalized. normal_ = gfx::Vector3dF(new_normal.x(), new_normal.y(), new_normal.z()); float normal_magnitude = normal_.Length(); if (normal_magnitude != 0 && normal_magnitude != 1) { normal_.Scale(1.0f / normal_magnitude); } } void DrawPolygon::ApplyTransform(const gfx::Transform& transform) { for (size_t i = 0; i < points_.size(); i++) { transform.TransformPoint(&points_[i]); } } // TransformToScreenSpace assumes we're moving a layer from its layer space // into 3D screen space, which for sorting purposes requires the normal to // be transformed along with the vertices. void DrawPolygon::TransformToScreenSpace(const gfx::Transform& transform) { ApplyTransform(transform); transform.TransformVector(&normal_); ConstructNormal(); } // In the case of TransformToLayerSpace, we assume that we are giving the // inverse transformation back to the polygon to move it back into layer space // but we can ignore the costly process of applying the inverse to the normal // since we know the normal will just reset to its original state. void DrawPolygon::TransformToLayerSpace( const gfx::Transform& inverse_transform) { ApplyTransform(inverse_transform); normal_ = gfx::Vector3dF(0.0f, 0.0f, -1.0f); } // Split |polygon| based upon |this|, leaving the results in |front| and |back|. // If |polygon| is not split by |this|, then move it to either |front| or |back| // depending on its orientation relative to |this|. Sets |is_coplanar| to true // if |polygon| is actually coplanar with |this| (in which case whether it is // front facing or back facing is determined by the dot products of normals, and // document order). void DrawPolygon::SplitPolygon(std::unique_ptr<DrawPolygon> polygon, std::unique_ptr<DrawPolygon>* front, std::unique_ptr<DrawPolygon>* back, bool* is_coplanar) const { DCHECK_GE(normalized_threshold, std::abs(normal_.LengthSquared() - 1.0f)); const size_t num_points = polygon->points_.size(); const auto next = [num_points](size_t i) { return (i + 1) % num_points; }; const auto prev = [num_points](size_t i) { return (i + num_points - 1) % num_points; }; std::vector<float> vertex_distance; size_t pos_count = 0; size_t neg_count = 0; // Compute plane distances for each vertex of polygon. vertex_distance.resize(num_points); for (size_t i = 0; i < num_points; i++) { vertex_distance[i] = SignedPointDistance(polygon->points_[i]); if (vertex_distance[i] < -split_threshold) { ++neg_count; } else if (vertex_distance[i] > split_threshold) { ++pos_count; } else { vertex_distance[i] = 0.0; } } // Handle non-splitting cases. if (!pos_count && !neg_count) { double dot = gfx::DotProduct(normal_, polygon->normal_); if ((dot >= 0.0f && polygon->order_index_ >= order_index_) || (dot <= 0.0f && polygon->order_index_ <= order_index_)) { *back = std::move(polygon); } else { *front = std::move(polygon); } *is_coplanar = true; return; } *is_coplanar = false; if (!neg_count) { *front = std::move(polygon); return; } else if (!pos_count) { *back = std::move(polygon); return; } // Handle splitting case. size_t front_begin; size_t back_begin; size_t pre_front_begin; size_t pre_back_begin; // Find the first vertex that is part of the front split polygon. front_begin = std::find_if(vertex_distance.begin(), vertex_distance.end(), [](float val) { return val > 0.0; }) - vertex_distance.begin(); while (vertex_distance[pre_front_begin = prev(front_begin)] > 0.0) front_begin = pre_front_begin; // Find the first vertex that is part of the back split polygon. back_begin = std::find_if(vertex_distance.begin(), vertex_distance.end(), [](float val) { return val < 0.0; }) - vertex_distance.begin(); while (vertex_distance[pre_back_begin = prev(back_begin)] < 0.0) back_begin = pre_back_begin; DCHECK(vertex_distance[front_begin] > 0.0); DCHECK(vertex_distance[pre_front_begin] <= 0.0); DCHECK(vertex_distance[back_begin] < 0.0); DCHECK(vertex_distance[pre_back_begin] >= 0.0); gfx::Point3F pre_pos_intersection; gfx::Point3F pre_neg_intersection; // Compute the intersection points. N.B.: If the "pre" vertex is on // the thick plane, then the intersection will be at the same point, because // we set vertex_distance to 0 in this case. PointInterpolate( polygon->points_[pre_front_begin], polygon->points_[front_begin], -vertex_distance[pre_front_begin] / gfx::DotProduct(normal_, polygon->points_[front_begin] - polygon->points_[pre_front_begin]), &pre_pos_intersection); PointInterpolate( polygon->points_[pre_back_begin], polygon->points_[back_begin], -vertex_distance[pre_back_begin] / gfx::DotProduct(normal_, polygon->points_[back_begin] - polygon->points_[pre_back_begin]), &pre_neg_intersection); // Build the front and back polygons. std::vector<gfx::Point3F> out_points; out_points.push_back(pre_pos_intersection); for (size_t index = front_begin; index != back_begin; index = next(index)) { out_points.push_back(polygon->points_[index]); } if (out_points.back() != pre_neg_intersection) { out_points.push_back(pre_neg_intersection); } *front = std::make_unique<DrawPolygon>(polygon->original_ref_, out_points, polygon->normal_, polygon->order_index_); out_points.clear(); out_points.push_back(pre_neg_intersection); for (size_t index = back_begin; index != front_begin; index = next(index)) { out_points.push_back(polygon->points_[index]); } if (out_points.back() != pre_pos_intersection) { out_points.push_back(pre_pos_intersection); } *back = std::make_unique<DrawPolygon>(polygon->original_ref_, out_points, polygon->normal_, polygon->order_index_); DCHECK_GE((*front)->points().size(), 3u); DCHECK_GE((*back)->points().size(), 3u); } // This algorithm takes the first vertex in the polygon and uses that as a // pivot point to fan out and create quads from the rest of the vertices. // |offset| starts off as the second vertex, and then |op1| and |op2| indicate // offset+1 and offset+2 respectively. // After the first quad is created, the first vertex in the next quad is the // same as all the rest, the pivot point. The second vertex in the next quad is // the old |op2|, the last vertex added to the previous quad. This continues // until all points are exhausted. // The special case here is where there are only 3 points remaining, in which // case we use the same values for vertex 3 and 4 to make a degenerate quad // that represents a triangle. void DrawPolygon::ToQuads2D(std::vector<gfx::QuadF>* quads) const { if (points_.size() <= 2) return; gfx::PointF first(points_[0].x(), points_[0].y()); size_t offset = 1; while (offset < points_.size() - 1) { size_t op1 = offset + 1; size_t op2 = offset + 2; if (op2 >= points_.size()) { // It's going to be a degenerate triangle. op2 = op1; } quads->push_back( gfx::QuadF(first, gfx::PointF(points_[offset].x(), points_[offset].y()), gfx::PointF(points_[op1].x(), points_[op1].y()), gfx::PointF(points_[op2].x(), points_[op2].y()))); offset = op2; } } } // namespace viz
null
null
null
null
13,749
31,144
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
31,144
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/frame/remote_frame.h" #include "third_party/blink/public/platform/web_layer.h" #include "third_party/blink/renderer/bindings/core/v8/window_proxy.h" #include "third_party/blink/renderer/bindings/core/v8/window_proxy_manager.h" #include "third_party/blink/renderer/core/frame/local_frame.h" #include "third_party/blink/renderer/core/frame/remote_dom_window.h" #include "third_party/blink/renderer/core/frame/remote_frame_client.h" #include "third_party/blink/renderer/core/frame/remote_frame_view.h" #include "third_party/blink/renderer/core/html/html_frame_owner_element.h" #include "third_party/blink/renderer/core/layout/layout_embedded_content.h" #include "third_party/blink/renderer/core/loader/frame_load_request.h" #include "third_party/blink/renderer/core/loader/frame_loader.h" #include "third_party/blink/renderer/core/page/page.h" #include "third_party/blink/renderer/core/paint/paint_layer.h" #include "third_party/blink/renderer/platform/graphics/graphics_layer.h" #include "third_party/blink/renderer/platform/loader/fetch/resource_request.h" #include "third_party/blink/renderer/platform/loader/fetch/resource_timing_info.h" #include "third_party/blink/renderer/platform/plugins/plugin_script_forbidden_scope.h" #include "third_party/blink/renderer/platform/weborigin/security_policy.h" namespace blink { inline RemoteFrame::RemoteFrame(RemoteFrameClient* client, Page& page, FrameOwner* owner) : Frame(client, page, owner, RemoteWindowProxyManager::Create(*this)), security_context_(RemoteSecurityContext::Create()) { dom_window_ = RemoteDOMWindow::Create(*this); UpdateInertIfPossible(); } RemoteFrame* RemoteFrame::Create(RemoteFrameClient* client, Page& page, FrameOwner* owner) { RemoteFrame* frame = new RemoteFrame(client, page, owner); PageScheduler* page_scheduler = page.GetPageScheduler(); if (frame->IsMainFrame() && page_scheduler) page_scheduler->SetIsMainFrameLocal(false); return frame; } RemoteFrame::~RemoteFrame() { DCHECK(!view_); } void RemoteFrame::Trace(blink::Visitor* visitor) { visitor->Trace(view_); visitor->Trace(security_context_); Frame::Trace(visitor); } void RemoteFrame::Navigate(Document& origin_document, const KURL& url, bool replace_current_item, UserGestureStatus user_gesture_status) { FrameLoadRequest frame_request(&origin_document, ResourceRequest(url)); frame_request.SetReplacesCurrentItem(replace_current_item); frame_request.GetResourceRequest().SetHasUserGesture( user_gesture_status == UserGestureStatus::kActive); frame_request.GetResourceRequest().SetFrameType( IsMainFrame() ? network::mojom::RequestContextFrameType::kTopLevel : network::mojom::RequestContextFrameType::kNested); Navigate(frame_request); } void RemoteFrame::Navigate(const FrameLoadRequest& passed_request) { FrameLoadRequest frame_request(passed_request); // The process where this frame actually lives won't have sufficient // information to determine correct referrer and upgrade the url, since it // won't have access to the originDocument. Do it now. FrameLoader::SetReferrerForFrameRequest(frame_request); FrameLoader::UpgradeInsecureRequest(frame_request.GetResourceRequest(), frame_request.OriginDocument()); Client()->Navigate(frame_request.GetResourceRequest(), frame_request.ReplacesCurrentItem()); } void RemoteFrame::Reload(FrameLoadType frame_load_type, ClientRedirectPolicy client_redirect_policy) { Client()->Reload(frame_load_type, client_redirect_policy); } void RemoteFrame::Detach(FrameDetachType type) { lifecycle_.AdvanceTo(FrameLifecycle::kDetaching); PluginScriptForbiddenScope forbid_plugin_destructor_scripting; DetachChildren(); if (!Client()) return; // Clean up the frame's view if needed. A remote frame only has a view if // the parent is a local frame. if (view_) view_->Dispose(); GetWindowProxyManager()->ClearForClose(); SetView(nullptr); // ... the RemoteDOMWindow will need to be informed of detachment, // as otherwise it will keep a strong reference back to this RemoteFrame. // That combined with wrappers (owned and kept alive by RemoteFrame) keeping // persistent strong references to RemoteDOMWindow will prevent the GCing // of all these objects. Break the cycle by notifying of detachment. ToRemoteDOMWindow(dom_window_)->FrameDetached(); if (web_layer_) SetWebLayer(nullptr); Frame::Detach(type); } bool RemoteFrame::PrepareForCommit() { DetachChildren(); return !!GetPage(); } void RemoteFrame::CheckCompleted() { // Notify the client so that the corresponding LocalFrame can do the check. Client()->CheckCompleted(); } RemoteSecurityContext* RemoteFrame::GetSecurityContext() const { return security_context_.Get(); } bool RemoteFrame::ShouldClose() { // TODO(nasko): Implement running the beforeunload handler in the actual // LocalFrame running in a different process and getting back a real result. return true; } void RemoteFrame::DidFreeze() { DCHECK(RuntimeEnabledFeatures::PageLifecycleEnabled()); // TODO(fmeawad): Add support for remote frames. } void RemoteFrame::DidResume() { DCHECK(RuntimeEnabledFeatures::PageLifecycleEnabled()); // TODO(fmeawad): Add support for remote frames. } void RemoteFrame::SetIsInert(bool inert) { if (inert != is_inert_) Client()->SetIsInert(inert); is_inert_ = inert; } void RemoteFrame::SetView(RemoteFrameView* view) { // Oilpan: as RemoteFrameView performs no finalization actions, // no explicit Dispose() of it needed here. (cf. LocalFrameView::Dispose().) view_ = view; } void RemoteFrame::CreateView() { // If the RemoteFrame does not have a LocalFrame parent, there's no need to // create a EmbeddedContentView for it. if (!DeprecatedLocalOwner()) return; DCHECK(!DeprecatedLocalOwner()->OwnedEmbeddedContentView()); SetView(RemoteFrameView::Create(this)); if (OwnerLayoutObject()) DeprecatedLocalOwner()->SetEmbeddedContentView(view_); } RemoteFrameClient* RemoteFrame::Client() const { return static_cast<RemoteFrameClient*>(Frame::Client()); } void RemoteFrame::SetWebLayer(WebLayer* web_layer) { if (web_layer_) GraphicsLayer::UnregisterContentsLayer(web_layer_); web_layer_ = web_layer; if (web_layer_) GraphicsLayer::RegisterContentsLayer(web_layer_); DCHECK(Owner()); ToHTMLFrameOwnerElement(Owner())->SetNeedsCompositingUpdate(); } void RemoteFrame::AdvanceFocus(WebFocusType type, LocalFrame* source) { Client()->AdvanceFocus(type, source); } void RemoteFrame::DetachChildren() { using FrameVector = HeapVector<Member<Frame>>; FrameVector children_to_detach; children_to_detach.ReserveCapacity(Tree().ChildCount()); for (Frame* child = Tree().FirstChild(); child; child = child->Tree().NextSibling()) children_to_detach.push_back(child); for (const auto& child : children_to_detach) child->Detach(FrameDetachType::kRemove); } } // namespace blink
null
null
null
null
28,007
20,311
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
185,306
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Micrel KS8695 (Centaur) Ethernet. * * Copyright 2008 Simtec Electronics * Daniel Silverstone <[email protected]> * Vincent Sanders <[email protected]> */ #ifndef KS8695NET_H #define KS8695NET_H /* Receive descriptor flags */ #define RDES_OWN (1 << 31) /* Ownership */ #define RDES_FS (1 << 30) /* First Descriptor */ #define RDES_LS (1 << 29) /* Last Descriptor */ #define RDES_IPE (1 << 28) /* IP Checksum error */ #define RDES_TCPE (1 << 27) /* TCP Checksum error */ #define RDES_UDPE (1 << 26) /* UDP Checksum error */ #define RDES_ES (1 << 25) /* Error summary */ #define RDES_MF (1 << 24) /* Multicast Frame */ #define RDES_RE (1 << 19) /* MII Error reported */ #define RDES_TL (1 << 18) /* Frame too Long */ #define RDES_RF (1 << 17) /* Runt Frame */ #define RDES_CE (1 << 16) /* CRC error */ #define RDES_FT (1 << 15) /* Frame Type */ #define RDES_FLEN (0x7ff) /* Frame Length */ #define RDES_RER (1 << 25) /* Receive End of Ring */ #define RDES_RBS (0x7ff) /* Receive Buffer Size */ /* Transmit descriptor flags */ #define TDES_OWN (1 << 31) /* Ownership */ #define TDES_IC (1 << 31) /* Interrupt on Completion */ #define TDES_FS (1 << 30) /* First Segment */ #define TDES_LS (1 << 29) /* Last Segment */ #define TDES_IPCKG (1 << 28) /* IP Checksum generate */ #define TDES_TCPCKG (1 << 27) /* TCP Checksum generate */ #define TDES_UDPCKG (1 << 26) /* UDP Checksum generate */ #define TDES_TER (1 << 25) /* Transmit End of Ring */ #define TDES_TBS (0x7ff) /* Transmit Buffer Size */ /* * Network controller register offsets */ #define KS8695_DTXC (0x00) /* DMA Transmit Control */ #define KS8695_DRXC (0x04) /* DMA Receive Control */ #define KS8695_DTSC (0x08) /* DMA Transmit Start Command */ #define KS8695_DRSC (0x0c) /* DMA Receive Start Command */ #define KS8695_TDLB (0x10) /* Transmit Descriptor List * Base Address */ #define KS8695_RDLB (0x14) /* Receive Descriptor List * Base Address */ #define KS8695_MAL (0x18) /* MAC Station Address Low */ #define KS8695_MAH (0x1c) /* MAC Station Address High */ #define KS8695_AAL_(n) (0x80 + ((n)*8)) /* MAC Additional * Station Address * (0..15) Low */ #define KS8695_AAH_(n) (0x84 + ((n)*8)) /* MAC Additional * Station Address * (0..15) High */ /* DMA Transmit Control Register */ #define DTXC_TRST (1 << 31) /* Soft Reset */ #define DTXC_TBS (0x3f << 24) /* Transmit Burst Size */ #define DTXC_TUCG (1 << 18) /* Transmit UDP * Checksum Generate */ #define DTXC_TTCG (1 << 17) /* Transmit TCP * Checksum Generate */ #define DTXC_TICG (1 << 16) /* Transmit IP * Checksum Generate */ #define DTXC_TFCE (1 << 9) /* Transmit Flow * Control Enable */ #define DTXC_TLB (1 << 8) /* Loopback mode */ #define DTXC_TEP (1 << 2) /* Transmit Enable Padding */ #define DTXC_TAC (1 << 1) /* Transmit Add CRC */ #define DTXC_TE (1 << 0) /* TX Enable */ /* DMA Receive Control Register */ #define DRXC_RBS (0x3f << 24) /* Receive Burst Size */ #define DRXC_RUCC (1 << 18) /* Receive UDP Checksum check */ #define DRXC_RTCG (1 << 17) /* Receive TCP Checksum check */ #define DRXC_RICG (1 << 16) /* Receive IP Checksum check */ #define DRXC_RFCE (1 << 9) /* Receive Flow Control * Enable */ #define DRXC_RB (1 << 6) /* Receive Broadcast */ #define DRXC_RM (1 << 5) /* Receive Multicast */ #define DRXC_RU (1 << 4) /* Receive Unicast */ #define DRXC_RERR (1 << 3) /* Receive Error Frame */ #define DRXC_RA (1 << 2) /* Receive All */ #define DRXC_RE (1 << 0) /* RX Enable */ /* Additional Station Address High */ #define AAH_E (1 << 31) /* Address Enabled */ #endif /* KS8695NET_H */
null
null
null
null
93,653
31,088
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
196,083
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Freescale imx6ul pinctrl driver * * Author: Anson Huang <[email protected]> * Copyright (C) 2015 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> #include "pinctrl-imx.h" enum imx6ul_pads { MX6UL_PAD_RESERVE0 = 0, MX6UL_PAD_RESERVE1 = 1, MX6UL_PAD_RESERVE2 = 2, MX6UL_PAD_RESERVE3 = 3, MX6UL_PAD_RESERVE4 = 4, MX6UL_PAD_RESERVE5 = 5, MX6UL_PAD_RESERVE6 = 6, MX6UL_PAD_RESERVE7 = 7, MX6UL_PAD_RESERVE8 = 8, MX6UL_PAD_RESERVE9 = 9, MX6UL_PAD_RESERVE10 = 10, MX6UL_PAD_SNVS_TAMPER4 = 11, MX6UL_PAD_RESERVE12 = 12, MX6UL_PAD_RESERVE13 = 13, MX6UL_PAD_RESERVE14 = 14, MX6UL_PAD_RESERVE15 = 15, MX6UL_PAD_RESERVE16 = 16, MX6UL_PAD_JTAG_MOD = 17, MX6UL_PAD_JTAG_TMS = 18, MX6UL_PAD_JTAG_TDO = 19, MX6UL_PAD_JTAG_TDI = 20, MX6UL_PAD_JTAG_TCK = 21, MX6UL_PAD_JTAG_TRST_B = 22, MX6UL_PAD_GPIO1_IO00 = 23, MX6UL_PAD_GPIO1_IO01 = 24, MX6UL_PAD_GPIO1_IO02 = 25, MX6UL_PAD_GPIO1_IO03 = 26, MX6UL_PAD_GPIO1_IO04 = 27, MX6UL_PAD_GPIO1_IO05 = 28, MX6UL_PAD_GPIO1_IO06 = 29, MX6UL_PAD_GPIO1_IO07 = 30, MX6UL_PAD_GPIO1_IO08 = 31, MX6UL_PAD_GPIO1_IO09 = 32, MX6UL_PAD_UART1_TX_DATA = 33, MX6UL_PAD_UART1_RX_DATA = 34, MX6UL_PAD_UART1_CTS_B = 35, MX6UL_PAD_UART1_RTS_B = 36, MX6UL_PAD_UART2_TX_DATA = 37, MX6UL_PAD_UART2_RX_DATA = 38, MX6UL_PAD_UART2_CTS_B = 39, MX6UL_PAD_UART2_RTS_B = 40, MX6UL_PAD_UART3_TX_DATA = 41, MX6UL_PAD_UART3_RX_DATA = 42, MX6UL_PAD_UART3_CTS_B = 43, MX6UL_PAD_UART3_RTS_B = 44, MX6UL_PAD_UART4_TX_DATA = 45, MX6UL_PAD_UART4_RX_DATA = 46, MX6UL_PAD_UART5_TX_DATA = 47, MX6UL_PAD_UART5_RX_DATA = 48, MX6UL_PAD_ENET1_RX_DATA0 = 49, MX6UL_PAD_ENET1_RX_DATA1 = 50, MX6UL_PAD_ENET1_RX_EN = 51, MX6UL_PAD_ENET1_TX_DATA0 = 52, MX6UL_PAD_ENET1_TX_DATA1 = 53, MX6UL_PAD_ENET1_TX_EN = 54, MX6UL_PAD_ENET1_TX_CLK = 55, MX6UL_PAD_ENET1_RX_ER = 56, MX6UL_PAD_ENET2_RX_DATA0 = 57, MX6UL_PAD_ENET2_RX_DATA1 = 58, MX6UL_PAD_ENET2_RX_EN = 59, MX6UL_PAD_ENET2_TX_DATA0 = 60, MX6UL_PAD_ENET2_TX_DATA1 = 61, MX6UL_PAD_ENET2_TX_EN = 62, MX6UL_PAD_ENET2_TX_CLK = 63, MX6UL_PAD_ENET2_RX_ER = 64, MX6UL_PAD_LCD_CLK = 65, MX6UL_PAD_LCD_ENABLE = 66, MX6UL_PAD_LCD_HSYNC = 67, MX6UL_PAD_LCD_VSYNC = 68, MX6UL_PAD_LCD_RESET = 69, MX6UL_PAD_LCD_DATA00 = 70, MX6UL_PAD_LCD_DATA01 = 71, MX6UL_PAD_LCD_DATA02 = 72, MX6UL_PAD_LCD_DATA03 = 73, MX6UL_PAD_LCD_DATA04 = 74, MX6UL_PAD_LCD_DATA05 = 75, MX6UL_PAD_LCD_DATA06 = 76, MX6UL_PAD_LCD_DATA07 = 77, MX6UL_PAD_LCD_DATA08 = 78, MX6UL_PAD_LCD_DATA09 = 79, MX6UL_PAD_LCD_DATA10 = 80, MX6UL_PAD_LCD_DATA11 = 81, MX6UL_PAD_LCD_DATA12 = 82, MX6UL_PAD_LCD_DATA13 = 83, MX6UL_PAD_LCD_DATA14 = 84, MX6UL_PAD_LCD_DATA15 = 85, MX6UL_PAD_LCD_DATA16 = 86, MX6UL_PAD_LCD_DATA17 = 87, MX6UL_PAD_LCD_DATA18 = 88, MX6UL_PAD_LCD_DATA19 = 89, MX6UL_PAD_LCD_DATA20 = 90, MX6UL_PAD_LCD_DATA21 = 91, MX6UL_PAD_LCD_DATA22 = 92, MX6UL_PAD_LCD_DATA23 = 93, MX6UL_PAD_NAND_RE_B = 94, MX6UL_PAD_NAND_WE_B = 95, MX6UL_PAD_NAND_DATA00 = 96, MX6UL_PAD_NAND_DATA01 = 97, MX6UL_PAD_NAND_DATA02 = 98, MX6UL_PAD_NAND_DATA03 = 99, MX6UL_PAD_NAND_DATA04 = 100, MX6UL_PAD_NAND_DATA05 = 101, MX6UL_PAD_NAND_DATA06 = 102, MX6UL_PAD_NAND_DATA07 = 103, MX6UL_PAD_NAND_ALE = 104, MX6UL_PAD_NAND_WP_B = 105, MX6UL_PAD_NAND_READY_B = 106, MX6UL_PAD_NAND_CE0_B = 107, MX6UL_PAD_NAND_CE1_B = 108, MX6UL_PAD_NAND_CLE = 109, MX6UL_PAD_NAND_DQS = 110, MX6UL_PAD_SD1_CMD = 111, MX6UL_PAD_SD1_CLK = 112, MX6UL_PAD_SD1_DATA0 = 113, MX6UL_PAD_SD1_DATA1 = 114, MX6UL_PAD_SD1_DATA2 = 115, MX6UL_PAD_SD1_DATA3 = 116, MX6UL_PAD_CSI_MCLK = 117, MX6UL_PAD_CSI_PIXCLK = 118, MX6UL_PAD_CSI_VSYNC = 119, MX6UL_PAD_CSI_HSYNC = 120, MX6UL_PAD_CSI_DATA00 = 121, MX6UL_PAD_CSI_DATA01 = 122, MX6UL_PAD_CSI_DATA02 = 123, MX6UL_PAD_CSI_DATA03 = 124, MX6UL_PAD_CSI_DATA04 = 125, MX6UL_PAD_CSI_DATA05 = 126, MX6UL_PAD_CSI_DATA06 = 127, MX6UL_PAD_CSI_DATA07 = 128, }; /* Pad names for the pinmux subsystem */ static const struct pinctrl_pin_desc imx6ul_pinctrl_pads[] = { IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE0), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE1), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE2), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE3), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE4), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE5), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE6), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE7), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE8), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE9), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE10), IMX_PINCTRL_PIN(MX6UL_PAD_SNVS_TAMPER4), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE12), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE13), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE14), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE15), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE16), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_MOD), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TMS), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDO), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDI), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TCK), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TRST_B), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO00), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO01), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO02), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO03), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO04), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO05), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO06), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO07), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO08), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO09), IMX_PINCTRL_PIN(MX6UL_PAD_UART1_TX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART1_CTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART2_TX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART2_CTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART3_TX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART3_CTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART4_TX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART4_RX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART5_TX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART5_RX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA0), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA1), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_EN), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA0), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA1), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_EN), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_CLK), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_ER), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA0), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA1), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_EN), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA0), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA1), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_EN), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_CLK), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_ER), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_CLK), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_ENABLE), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_HSYNC), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_VSYNC), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_RESET), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA00), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA01), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA02), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA03), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA04), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA05), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA06), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA07), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA08), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA09), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA10), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA11), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA12), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA13), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA14), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA15), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA16), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA17), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA18), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA19), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA20), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA21), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA22), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA23), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_RE_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WE_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA00), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA01), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA02), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA03), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA04), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA05), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA06), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA07), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_ALE), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WP_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_READY_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE0_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE1_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CLE), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DQS), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CMD), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CLK), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA0), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA1), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA2), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA3), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_MCLK), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_PIXCLK), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_VSYNC), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_HSYNC), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA00), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA01), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA02), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA03), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA04), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA05), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA06), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA07), }; static struct imx_pinctrl_soc_info imx6ul_pinctrl_info = { .pins = imx6ul_pinctrl_pads, .npins = ARRAY_SIZE(imx6ul_pinctrl_pads), .gpr_compatible = "fsl,imx6ul-iomuxc-gpr", }; static struct of_device_id imx6ul_pinctrl_of_match[] = { { .compatible = "fsl,imx6ul-iomuxc", }, { /* sentinel */ } }; static int imx6ul_pinctrl_probe(struct platform_device *pdev) { return imx_pinctrl_probe(pdev, &imx6ul_pinctrl_info); } static struct platform_driver imx6ul_pinctrl_driver = { .driver = { .name = "imx6ul-pinctrl", .of_match_table = of_match_ptr(imx6ul_pinctrl_of_match), }, .probe = imx6ul_pinctrl_probe, }; static int __init imx6ul_pinctrl_init(void) { return platform_driver_register(&imx6ul_pinctrl_driver); } arch_initcall(imx6ul_pinctrl_init);
null
null
null
null
104,430
46,178
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
46,178
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_WM_TABLET_MODE_TABLET_MODE_CONTROLLER_H_ #define ASH_WM_TABLET_MODE_TABLET_MODE_CONTROLLER_H_ #include <memory> #include "ash/ash_export.h" #include "ash/display/window_tree_host_manager.h" #include "ash/public/interfaces/tablet_mode.mojom.h" #include "ash/session/session_observer.h" #include "ash/shell_observer.h" #include "base/compiler_specific.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "base/observer_list.h" #include "base/optional.h" #include "base/time/time.h" #include "base/timer/timer.h" #include "chromeos/accelerometer/accelerometer_reader.h" #include "chromeos/accelerometer/accelerometer_types.h" #include "chromeos/dbus/power_manager_client.h" #include "mojo/public/cpp/bindings/binding.h" #include "mojo/public/cpp/bindings/interface_ptr_set.h" #include "ui/gfx/geometry/vector3d_f.h" namespace aura { class Window; } namespace base { class TickClock; } namespace gfx { class Vector3dF; } namespace views { class Widget; } namespace ash { class ScopedDisableInternalMouseAndKeyboard; class TabletModeControllerTest; class TabletModeObserver; class TabletModeWindowManager; class TabletModeWindowManagerTest; // TabletModeController listens to accelerometer events and automatically // enters and exits tablet mode when the lid is opened beyond the triggering // angle and rotates the display to match the device when in tablet mode. class ASH_EXPORT TabletModeController : public chromeos::AccelerometerReader::Observer, public chromeos::PowerManagerClient::Observer, public mojom::TabletModeController, public ShellObserver, public WindowTreeHostManager::Observer, public SessionObserver { public: // Used for keeping track if the user wants the machine to behave as a // clamshell/tablet regardless of hardware orientation. // TODO(oshima): Move this to common place. enum class UiMode { NONE = 0, CLAMSHELL, TABLETMODE, }; // Public so it can be used by unit tests. constexpr static char kLidAngleHistogramName[] = "Ash.TouchView.LidAngle"; TabletModeController(); ~TabletModeController() override; // True if it is possible to enter tablet mode in the current // configuration. If this returns false, it should never be the case that // tablet mode becomes enabled. bool CanEnterTabletMode(); // TODO(jonross): Merge this with EnterTabletMode. Currently these are // separate for several reasons: there is no internal display when running // unittests; the event blocker prevents keyboard input when running ChromeOS // on linux. http://crbug.com/362881 // Turn the always tablet mode window manager on or off. void EnableTabletModeWindowManager(bool should_enable); // Test if the TabletModeWindowManager is enabled or not. bool IsTabletModeWindowManagerEnabled() const; // Add a special window to the TabletModeWindowManager for tracking. This is // only required for special windows which are handled by other window // managers like the |MultiUserWindowManager|. // If the tablet mode is not enabled no action will be performed. void AddWindow(aura::Window* window); // Binds the mojom::TabletModeController interface request to this object. void BindRequest(mojom::TabletModeControllerRequest request); void AddObserver(TabletModeObserver* observer); void RemoveObserver(TabletModeObserver* observer); // Checks if we should auto hide title bars for the |widget| in tablet mode. bool ShouldAutoHideTitlebars(views::Widget* widget); // Flushes the mojo message pipe to chrome. void FlushForTesting(); // If |record_lid_angle_timer_| is running, invokes its task and returns true. // Otherwise, returns false. bool TriggerRecordLidAngleTimerForTesting() WARN_UNUSED_RESULT; // ShellObserver: void OnShellInitialized() override; // WindowTreeHostManager::Observer: void OnDisplayConfigurationChanged() override; // SessionObserver: void OnChromeTerminating() override; // chromeos::AccelerometerReader::Observer: void OnAccelerometerUpdated( scoped_refptr<const chromeos::AccelerometerUpdate> update) override; // chromeos::PowerManagerClient::Observer: void LidEventReceived(chromeos::PowerManagerClient::LidState state, const base::TimeTicks& time) override; void TabletModeEventReceived(chromeos::PowerManagerClient::TabletMode mode, const base::TimeTicks& time) override; void SuspendImminent(power_manager::SuspendImminent::Reason reason) override; void SuspendDone(const base::TimeDelta& sleep_duration) override; private: friend class TabletModeControllerTest; friend class TabletModeWindowManagerTest; friend class MultiUserWindowManagerChromeOSTest; friend class VirtualKeyboardControllerTest; // Used for recording metrics for intervals of time spent in // and out of TabletMode. enum TabletModeIntervalType { TABLET_MODE_INTERVAL_INACTIVE, TABLET_MODE_INTERVAL_ACTIVE }; // Set the TickClock. This is only to be used by tests that need to // artificially and deterministically control the current time. // This does not take the ownership of the tick_clock. |tick_clock| must // outlive the TabletModeController instance. void SetTickClockForTest(const base::TickClock* tick_clock); // Detect hinge rotation from base and lid accelerometers and automatically // start / stop tablet mode. void HandleHingeRotation( scoped_refptr<const chromeos::AccelerometerUpdate> update); void OnGetSwitchStates( base::Optional<chromeos::PowerManagerClient::SwitchStates> result); // Returns true if unstable lid angle can be used. The lid angle that falls in // the unstable zone ([0, 20) and (340, 360] degrees) is considered unstable // due to the potential erroneous accelerometer readings. Immediately using // the unstable angle to trigger tablet mode is error-prone. So we wait for // a certain range of time before using unstable angle. bool CanUseUnstableLidAngle() const; // Enables TabletModeWindowManager, and determines the current state of // rotation lock. void EnterTabletMode(); // Removes TabletModeWindowManager and resets the display rotation if there // is no rotation lock. void LeaveTabletMode(); // Record UMA stats tracking TabletMode usage. If |type| is // TABLET_MODE_INTERVAL_INACTIVE, then record that TabletMode has been // inactive from |tablet_mode_usage_interval_start_time_| until now. // Similarly, record that TabletMode has been active if |type| is // TABLET_MODE_INTERVAL_ACTIVE. void RecordTabletModeUsageInterval(TabletModeIntervalType type); // Reports an UMA histogram containing the value of |lid_angle_|. // Called periodically by |record_lid_angle_timer_|. void RecordLidAngle(); // Returns TABLET_MODE_INTERVAL_ACTIVE if TabletMode is currently active, // otherwise returns TABLET_MODE_INTERNAL_INACTIVE. TabletModeIntervalType CurrentTabletModeIntervalType(); // mojom::TabletModeController: void SetClient(mojom::TabletModeClientPtr client) override; // Checks whether we want to allow entering and exiting tablet mode. This // returns false if the user set a flag for the software to behave in a // certain way regardless of configuration. bool AllowEnterExitTabletMode() const; // The maximized window manager (if enabled). std::unique_ptr<TabletModeWindowManager> tablet_mode_window_manager_; // A helper class which when instantiated will block native events from the // internal keyboard and touchpad. std::unique_ptr<ScopedDisableInternalMouseAndKeyboard> event_blocker_; // Whether we have ever seen accelerometer data. bool have_seen_accelerometer_data_ = false; // Whether both accelerometers are available. bool can_detect_lid_angle_ = false; // Tracks time spent in (and out of) tablet mode. base::Time tablet_mode_usage_interval_start_time_; base::TimeDelta total_tablet_mode_time_; base::TimeDelta total_non_tablet_mode_time_; // Tracks the first time the lid angle was unstable. This is used to suppress // erroneous accelerometer readings as the lid is nearly opened or closed but // the accelerometer reports readings that make the lid to appear near fully // open. (e.g. After closing the lid, the correct angle reading is 0. But the // accelerometer may report 359.5 degrees which triggers the tablet mode by // mistake.) base::TimeTicks first_unstable_lid_angle_time_; // Source for the current time in base::TimeTicks. const base::TickClock* tick_clock_; // Set when tablet mode switch is on. This is used to force tablet mode. bool tablet_mode_switch_is_on_ = false; // Tracks when the lid is closed. Used to prevent entering tablet mode. bool lid_is_closed_ = false; // Last computed lid angle. double lid_angle_ = 0.0f; // Tracks smoothed accelerometer data over time. This is done when the hinge // is approaching vertical to remove abrupt acceleration that can lead to // incorrect calculations of hinge angles. gfx::Vector3dF base_smoothed_; gfx::Vector3dF lid_smoothed_; // Binding for the TabletModeController interface. mojo::Binding<mojom::TabletModeController> binding_; // Client interface (e.g. in chrome). mojom::TabletModeClientPtr client_; // Tracks whether a flag is used to force ui mode. UiMode force_ui_mode_ = UiMode::NONE; // Calls RecordLidAngle() periodically. base::RepeatingTimer record_lid_angle_timer_; ScopedSessionObserver scoped_session_observer_; base::ObserverList<TabletModeObserver> tablet_mode_observers_; base::WeakPtrFactory<TabletModeController> weak_factory_; DISALLOW_COPY_AND_ASSIGN(TabletModeController); }; } // namespace ash #endif // ASH_WM_TABLET_MODE_TABLET_MODE_CONTROLLER_H_
null
null
null
null
43,041
17,207
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
17,207
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/viz/common/quads/shared_bitmap.h" #include <stddef.h> #include <stdint.h> #include "base/logging.h" #include "base/memory/shared_memory_handle.h" #include "base/numerics/safe_math.h" #include "base/rand_util.h" #include "base/strings/string_number_conversions.h" #include "base/strings/stringprintf.h" #include "components/viz/common/resources/resource_format_utils.h" namespace viz { SharedBitmap::SharedBitmap(uint8_t* pixels, const SharedBitmapId& id, uint32_t sequence_number) : pixels_(pixels), id_(id), sequence_number_(sequence_number) {} SharedBitmap::~SharedBitmap() {} // static SharedBitmapId SharedBitmap::GenerateId() { SharedBitmapId id; // Needs cryptographically-secure random numbers. base::RandBytes(id.name, sizeof(id.name)); return id; } base::trace_event::MemoryAllocatorDumpGuid GetSharedBitmapGUIDForTracing( const SharedBitmapId& bitmap_id) { auto bitmap_id_hex = base::HexEncode(bitmap_id.name, sizeof(bitmap_id.name)); return base::trace_event::MemoryAllocatorDumpGuid( base::StringPrintf("sharedbitmap-x-process/%s", bitmap_id_hex.c_str())); } } // namespace viz
null
null
null
null
14,070
26,043
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
191,038
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (c) 2010 Red Hat Inc. * Author : Dave Airlie <[email protected]> * * Licensed under GPLv2 * * ATPX support for both Intel/ATI */ #include <linux/vga_switcheroo.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/pci.h> #include <linux/delay.h> #include "amd_acpi.h" struct amdgpu_atpx_functions { bool px_params; bool power_cntl; bool disp_mux_cntl; bool i2c_mux_cntl; bool switch_start; bool switch_end; bool disp_connectors_mapping; bool disp_detetion_ports; }; struct amdgpu_atpx { acpi_handle handle; struct amdgpu_atpx_functions functions; bool is_hybrid; bool dgpu_req_power_for_displays; }; static struct amdgpu_atpx_priv { bool atpx_detected; bool bridge_pm_usable; /* handle for device - and atpx */ acpi_handle dhandle; acpi_handle other_handle; struct amdgpu_atpx atpx; } amdgpu_atpx_priv; struct atpx_verify_interface { u16 size; /* structure size in bytes (includes size field) */ u16 version; /* version */ u32 function_bits; /* supported functions bit vector */ } __packed; struct atpx_px_params { u16 size; /* structure size in bytes (includes size field) */ u32 valid_flags; /* which flags are valid */ u32 flags; /* flags */ } __packed; struct atpx_power_control { u16 size; u8 dgpu_state; } __packed; struct atpx_mux { u16 size; u16 mux; } __packed; bool amdgpu_has_atpx(void) { return amdgpu_atpx_priv.atpx_detected; } bool amdgpu_has_atpx_dgpu_power_cntl(void) { return amdgpu_atpx_priv.atpx.functions.power_cntl; } bool amdgpu_is_atpx_hybrid(void) { return amdgpu_atpx_priv.atpx.is_hybrid; } bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; } /** * amdgpu_atpx_call - call an ATPX method * * @handle: acpi handle * @function: the ATPX function to execute * @params: ATPX function params * * Executes the requested ATPX function (all asics). * Returns a pointer to the acpi output buffer. */ static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function, struct acpi_buffer *params) { acpi_status status; union acpi_object atpx_arg_elements[2]; struct acpi_object_list atpx_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; atpx_arg.count = 2; atpx_arg.pointer = &atpx_arg_elements[0]; atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; atpx_arg_elements[0].integer.value = function; if (params) { atpx_arg_elements[1].type = ACPI_TYPE_BUFFER; atpx_arg_elements[1].buffer.length = params->length; atpx_arg_elements[1].buffer.pointer = params->pointer; } else { /* We need a second fake parameter */ atpx_arg_elements[1].type = ACPI_TYPE_INTEGER; atpx_arg_elements[1].integer.value = 0; } status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); /* Fail only if calling the method fails and ATPX is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { printk("failed to evaluate ATPX got %s\n", acpi_format_exception(status)); kfree(buffer.pointer); return NULL; } return buffer.pointer; } /** * amdgpu_atpx_parse_functions - parse supported functions * * @f: supported functions struct * @mask: supported functions mask from ATPX * * Use the supported functions mask from ATPX function * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions * are supported (all asics). */ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mask) { f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED; f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED; f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED; f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED; f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED; f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED; f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED; f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED; } /** * amdgpu_atpx_validate_functions - validate ATPX functions * * @atpx: amdgpu atpx struct * * Validate that required functions are enabled (all asics). * returns 0 on success, error on failure. */ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) { u32 valid_bits = 0; if (atpx->functions.px_params) { union acpi_object *info; struct atpx_px_params output; size_t size; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); if (!info) return -EIO; memset(&output, 0, sizeof(output)); size = *(u16 *) info->buffer.pointer; if (size < 10) { printk("ATPX buffer is too small: %zu\n", size); kfree(info); return -EINVAL; } size = min(sizeof(output), size); memcpy(&output, info->buffer.pointer, size); valid_bits = output.flags & output.valid_flags; kfree(info); } /* if separate mux flag is set, mux controls are required */ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { atpx->functions.i2c_mux_cntl = true; atpx->functions.disp_mux_cntl = true; } /* if any outputs are muxed, mux controls are required */ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | ATPX_TV_SIGNAL_MUXED | ATPX_DFP_SIGNAL_MUXED)) atpx->functions.disp_mux_cntl = true; /* some bioses set these bits rather than flagging power_cntl as supported */ if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED | ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED)) atpx->functions.power_cntl = true; atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { printk("ATPX Hybrid Graphics\n"); /* * Disable legacy PM methods only when pcie port PM is usable, * otherwise the device might fail to power off or power on. */ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable; atpx->is_hybrid = true; } atpx->dgpu_req_power_for_displays = false; if (valid_bits & ATPX_DGPU_REQ_POWER_FOR_DISPLAYS) atpx->dgpu_req_power_for_displays = true; return 0; } /** * amdgpu_atpx_verify_interface - verify ATPX * * @atpx: amdgpu atpx struct * * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function * to initialize ATPX and determine what features are supported * (all asics). * returns 0 on success, error on failure. */ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx) { union acpi_object *info; struct atpx_verify_interface output; size_t size; int err = 0; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL); if (!info) return -EIO; memset(&output, 0, sizeof(output)); size = *(u16 *) info->buffer.pointer; if (size < 8) { printk("ATPX buffer is too small: %zu\n", size); err = -EINVAL; goto out; } size = min(sizeof(output), size); memcpy(&output, info->buffer.pointer, size); /* TODO: check version? */ printk("ATPX version %u, functions 0x%08x\n", output.version, output.function_bits); amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits); out: kfree(info); return err; } /** * amdgpu_atpx_set_discrete_state - power up/down discrete GPU * * @atpx: atpx info struct * @state: discrete GPU state (0 = power down, 1 = power up) * * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to * power down/up the discrete GPU (all asics). * Returns 0 on success, error on failure. */ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state) { struct acpi_buffer params; union acpi_object *info; struct atpx_power_control input; if (atpx->functions.power_cntl) { input.size = 3; input.dgpu_state = state; params.length = input.size; params.pointer = &input; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_POWER_CONTROL, &params); if (!info) return -EIO; kfree(info); /* 200ms delay is required after off */ if (state == 0) msleep(200); } return 0; } /** * amdgpu_atpx_switch_disp_mux - switch display mux * * @atpx: atpx info struct * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) * * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to * switch the display mux between the discrete GPU and integrated GPU * (all asics). * Returns 0 on success, error on failure. */ static int amdgpu_atpx_switch_disp_mux(struct amdgpu_atpx *atpx, u16 mux_id) { struct acpi_buffer params; union acpi_object *info; struct atpx_mux input; if (atpx->functions.disp_mux_cntl) { input.size = 4; input.mux = mux_id; params.length = input.size; params.pointer = &input; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_DISPLAY_MUX_CONTROL, &params); if (!info) return -EIO; kfree(info); } return 0; } /** * amdgpu_atpx_switch_i2c_mux - switch i2c/hpd mux * * @atpx: atpx info struct * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) * * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to * switch the i2c/hpd mux between the discrete GPU and integrated GPU * (all asics). * Returns 0 on success, error on failure. */ static int amdgpu_atpx_switch_i2c_mux(struct amdgpu_atpx *atpx, u16 mux_id) { struct acpi_buffer params; union acpi_object *info; struct atpx_mux input; if (atpx->functions.i2c_mux_cntl) { input.size = 4; input.mux = mux_id; params.length = input.size; params.pointer = &input; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_I2C_MUX_CONTROL, &params); if (!info) return -EIO; kfree(info); } return 0; } /** * amdgpu_atpx_switch_start - notify the sbios of a GPU switch * * @atpx: atpx info struct * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) * * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX * function to notify the sbios that a switch between the discrete GPU and * integrated GPU has begun (all asics). * Returns 0 on success, error on failure. */ static int amdgpu_atpx_switch_start(struct amdgpu_atpx *atpx, u16 mux_id) { struct acpi_buffer params; union acpi_object *info; struct atpx_mux input; if (atpx->functions.switch_start) { input.size = 4; input.mux = mux_id; params.length = input.size; params.pointer = &input; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION, &params); if (!info) return -EIO; kfree(info); } return 0; } /** * amdgpu_atpx_switch_end - notify the sbios of a GPU switch * * @atpx: atpx info struct * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) * * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX * function to notify the sbios that a switch between the discrete GPU and * integrated GPU has ended (all asics). * Returns 0 on success, error on failure. */ static int amdgpu_atpx_switch_end(struct amdgpu_atpx *atpx, u16 mux_id) { struct acpi_buffer params; union acpi_object *info; struct atpx_mux input; if (atpx->functions.switch_end) { input.size = 4; input.mux = mux_id; params.length = input.size; params.pointer = &input; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION, &params); if (!info) return -EIO; kfree(info); } return 0; } /** * amdgpu_atpx_switchto - switch to the requested GPU * * @id: GPU to switch to * * Execute the necessary ATPX functions to switch between the discrete GPU and * integrated GPU (all asics). * Returns 0 on success, error on failure. */ static int amdgpu_atpx_switchto(enum vga_switcheroo_client_id id) { u16 gpu_id; if (id == VGA_SWITCHEROO_IGD) gpu_id = ATPX_INTEGRATED_GPU; else gpu_id = ATPX_DISCRETE_GPU; amdgpu_atpx_switch_start(&amdgpu_atpx_priv.atpx, gpu_id); amdgpu_atpx_switch_disp_mux(&amdgpu_atpx_priv.atpx, gpu_id); amdgpu_atpx_switch_i2c_mux(&amdgpu_atpx_priv.atpx, gpu_id); amdgpu_atpx_switch_end(&amdgpu_atpx_priv.atpx, gpu_id); return 0; } /** * amdgpu_atpx_power_state - power down/up the requested GPU * * @id: GPU to power down/up * @state: requested power state (0 = off, 1 = on) * * Execute the necessary ATPX function to power down/up the discrete GPU * (all asics). * Returns 0 on success, error on failure. */ static int amdgpu_atpx_power_state(enum vga_switcheroo_client_id id, enum vga_switcheroo_state state) { /* on w500 ACPI can't change intel gpu state */ if (id == VGA_SWITCHEROO_IGD) return 0; amdgpu_atpx_set_discrete_state(&amdgpu_atpx_priv.atpx, state); return 0; } /** * amdgpu_atpx_pci_probe_handle - look up the ATPX handle * * @pdev: pci device * * Look up the ATPX handles (all asics). * Returns true if the handles are found, false if not. */ static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev) { acpi_handle dhandle, atpx_handle; acpi_status status; dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); if (ACPI_FAILURE(status)) { amdgpu_atpx_priv.other_handle = dhandle; return false; } amdgpu_atpx_priv.dhandle = dhandle; amdgpu_atpx_priv.atpx.handle = atpx_handle; return true; } /** * amdgpu_atpx_init - verify the ATPX interface * * Verify the ATPX interface (all asics). * Returns 0 on success, error on failure. */ static int amdgpu_atpx_init(void) { int r; /* set up the ATPX handle */ r = amdgpu_atpx_verify_interface(&amdgpu_atpx_priv.atpx); if (r) return r; /* validate the atpx setup */ r = amdgpu_atpx_validate(&amdgpu_atpx_priv.atpx); if (r) return r; return 0; } /** * amdgpu_atpx_get_client_id - get the client id * * @pdev: pci device * * look up whether we are the integrated or discrete GPU (all asics). * Returns the client id. */ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev) { if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) return VGA_SWITCHEROO_IGD; else return VGA_SWITCHEROO_DIS; } static const struct vga_switcheroo_handler amdgpu_atpx_handler = { .switchto = amdgpu_atpx_switchto, .power_state = amdgpu_atpx_power_state, .get_client_id = amdgpu_atpx_get_client_id, }; /** * amdgpu_atpx_detect - detect whether we have PX * * Check if we have a PX system (all asics). * Returns true if we have a PX system, false if not. */ static bool amdgpu_atpx_detect(void) { char acpi_method_name[255] = { 0 }; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; bool has_atpx = false; int vga_count = 0; bool d3_supported = false; struct pci_dev *parent_pdev; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); parent_pdev = pci_upstream_bridge(pdev); d3_supported |= parent_pdev && parent_pdev->bridge_d3; } while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { vga_count++; has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); parent_pdev = pci_upstream_bridge(pdev); d3_supported |= parent_pdev && parent_pdev->bridge_d3; } if (has_atpx && vga_count == 2) { acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer); pr_info("vga_switcheroo: detected switching method %s handle\n", acpi_method_name); amdgpu_atpx_priv.atpx_detected = true; amdgpu_atpx_priv.bridge_pm_usable = d3_supported; amdgpu_atpx_init(); return true; } return false; } /** * amdgpu_register_atpx_handler - register with vga_switcheroo * * Register the PX callbacks with vga_switcheroo (all asics). */ void amdgpu_register_atpx_handler(void) { bool r; enum vga_switcheroo_handler_flags_t handler_flags = 0; /* detect if we have any ATPX + 2 VGA in the system */ r = amdgpu_atpx_detect(); if (!r) return; vga_switcheroo_register_handler(&amdgpu_atpx_handler, handler_flags); } /** * amdgpu_unregister_atpx_handler - unregister with vga_switcheroo * * Unregister the PX callbacks with vga_switcheroo (all asics). */ void amdgpu_unregister_atpx_handler(void) { vga_switcheroo_unregister_handler(); }
null
null
null
null
99,385
20,827
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
185,822
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/********************************************************************** * Author: Cavium, Inc. * * Contact: [email protected] * Please include "LiquidIO" in the subject. * * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details. ***********************************************************************/ /*! \file octeon_droq.h * \brief Implementation of Octeon Output queues. "Output" is with * respect to the Octeon device on the NIC. From this driver's point of * view they are ingress queues. */ #ifndef __OCTEON_DROQ_H__ #define __OCTEON_DROQ_H__ /* Default number of packets that will be processed in one iteration. */ #define MAX_PACKET_BUDGET 0xFFFFFFFF /** Octeon descriptor format. * The descriptor ring is made of descriptors which have 2 64-bit values: * -# Physical (bus) address of the data buffer. * -# Physical (bus) address of a octeon_droq_info structure. * The Octeon device DMA's incoming packets and its information at the address * given by these descriptor fields. */ struct octeon_droq_desc { /** The buffer pointer */ u64 buffer_ptr; /** The Info pointer */ u64 info_ptr; }; #define OCT_DROQ_DESC_SIZE (sizeof(struct octeon_droq_desc)) /** Information about packet DMA'ed by Octeon. * The format of the information available at Info Pointer after Octeon * has posted a packet. Not all descriptors have valid information. Only * the Info field of the first descriptor for a packet has information * about the packet. */ struct octeon_droq_info { /** The Output Receive Header. */ union octeon_rh rh; /** The Length of the packet. */ u64 length; }; #define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info)) struct octeon_skb_page_info { /* DMA address for the page */ dma_addr_t dma; /* Page for the rx dma **/ struct page *page; /** which offset into page */ unsigned int page_offset; }; /** Pointer to data buffer. * Driver keeps a pointer to the data buffer that it made available to * the Octeon device. Since the descriptor ring keeps physical (bus) * addresses, this field is required for the driver to keep track of * the virtual address pointers. */ struct octeon_recv_buffer { /** Packet buffer, including metadata. */ void *buffer; /** Data in the packet buffer. */ u8 *data; /** pg_info **/ struct octeon_skb_page_info pg_info; }; #define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer)) /** Output Queue statistics. Each output queue has four stats fields. */ struct oct_droq_stats { /** Number of packets received in this queue. */ u64 pkts_received; /** Bytes received by this queue. */ u64 bytes_received; /** Packets dropped due to no dispatch function. */ u64 dropped_nodispatch; /** Packets dropped due to no memory available. */ u64 dropped_nomem; /** Packets dropped due to large number of pkts to process. */ u64 dropped_toomany; /** Number of packets sent to stack from this queue. */ u64 rx_pkts_received; /** Number of Bytes sent to stack from this queue. */ u64 rx_bytes_received; /** Num of Packets dropped due to receive path failures. */ u64 rx_dropped; u64 rx_vxlan; /** Num of failures of recv_buffer_alloc() */ u64 rx_alloc_failure; }; #define POLL_EVENT_INTR_ARRIVED 1 #define POLL_EVENT_PROCESS_PKTS 2 #define POLL_EVENT_PENDING_PKTS 3 #define POLL_EVENT_ENABLE_INTR 4 /* The maximum number of buffers that can be dispatched from the * output/dma queue. Set to 64 assuming 1K buffers in DROQ and the fact that * max packet size from DROQ is 64K. */ #define MAX_RECV_BUFS 64 /** Receive Packet format used when dispatching output queue packets * with non-raw opcodes. * The received packet will be sent to the upper layers using this * structure which is passed as a parameter to the dispatch function */ struct octeon_recv_pkt { /** Number of buffers in this received packet */ u16 buffer_count; /** Id of the device that is sending the packet up */ u16 octeon_id; /** Length of data in the packet buffer */ u32 length; /** The receive header */ union octeon_rh rh; /** Pointer to the OS-specific packet buffer */ void *buffer_ptr[MAX_RECV_BUFS]; /** Size of the buffers pointed to by ptr's in buffer_ptr */ u32 buffer_size[MAX_RECV_BUFS]; }; #define OCT_RECV_PKT_SIZE (sizeof(struct octeon_recv_pkt)) /** The first parameter of a dispatch function. * For a raw mode opcode, the driver dispatches with the device * pointer in this structure. * For non-raw mode opcode, the driver dispatches the recv_pkt * created to contain the buffers with data received from Octeon. * --------------------- * | *recv_pkt ----|--- * |-------------------| | * | 0 or more bytes | | * | reserved by driver| | * |-------------------|<-/ * | octeon_recv_pkt | * | | * |___________________| */ struct octeon_recv_info { void *rsvd; struct octeon_recv_pkt *recv_pkt; }; #define OCT_RECV_INFO_SIZE (sizeof(struct octeon_recv_info)) /** Allocate a recv_info structure. The recv_pkt pointer in the recv_info * structure is filled in before this call returns. * @param extra_bytes - extra bytes to be allocated at the end of the recv info * structure. * @return - pointer to a newly allocated recv_info structure. */ static inline struct octeon_recv_info *octeon_alloc_recv_info(int extra_bytes) { struct octeon_recv_info *recv_info; u8 *buf; buf = kmalloc(OCT_RECV_PKT_SIZE + OCT_RECV_INFO_SIZE + extra_bytes, GFP_ATOMIC); if (!buf) return NULL; recv_info = (struct octeon_recv_info *)buf; recv_info->recv_pkt = (struct octeon_recv_pkt *)(buf + OCT_RECV_INFO_SIZE); recv_info->rsvd = NULL; if (extra_bytes) recv_info->rsvd = buf + OCT_RECV_INFO_SIZE + OCT_RECV_PKT_SIZE; return recv_info; } /** Free a recv_info structure. * @param recv_info - Pointer to receive_info to be freed */ static inline void octeon_free_recv_info(struct octeon_recv_info *recv_info) { kfree(recv_info); } typedef int (*octeon_dispatch_fn_t)(struct octeon_recv_info *, void *); /** Used by NIC module to register packet handler and to get device * information for each octeon device. */ struct octeon_droq_ops { /** This registered function will be called by the driver with * the octeon id, pointer to buffer from droq and length of * data in the buffer. The receive header gives the port * number to the caller. Function pointer is set by caller. */ void (*fptr)(u32, void *, u32, union octeon_rh *, void *, void *); void *farg; /* This function will be called by the driver for all NAPI related * events. The first param is the octeon id. The second param is the * output queue number. The third is the NAPI event that occurred. */ void (*napi_fn)(void *); u32 poll_mode; /** Flag indicating if the DROQ handler should drop packets that * it cannot handle in one iteration. Set by caller. */ u32 drop_on_max; }; /** The Descriptor Ring Output Queue structure. * This structure has all the information required to implement a * Octeon DROQ. */ struct octeon_droq { /** A spinlock to protect access to this ring. */ spinlock_t lock; u32 q_no; u32 pkt_count; struct octeon_droq_ops ops; struct octeon_device *oct_dev; /** The 8B aligned descriptor ring starts at this address. */ struct octeon_droq_desc *desc_ring; /** Index in the ring where the driver should read the next packet */ u32 read_idx; /** Index in the ring where Octeon will write the next packet */ u32 write_idx; /** Index in the ring where the driver will refill the descriptor's * buffer */ u32 refill_idx; /** Packets pending to be processed */ atomic_t pkts_pending; /** Number of descriptors in this ring. */ u32 max_count; /** The number of descriptors pending refill. */ u32 refill_count; u32 pkts_per_intr; u32 refill_threshold; /** The max number of descriptors in DROQ without a buffer. * This field is used to keep track of empty space threshold. If the * refill_count reaches this value, the DROQ cannot accept a max-sized * (64K) packet. */ u32 max_empty_descs; /** The 8B aligned info ptrs begin from this address. */ struct octeon_droq_info *info_list; /** The receive buffer list. This list has the virtual addresses of the * buffers. */ struct octeon_recv_buffer *recv_buf_list; /** The size of each buffer pointed by the buffer pointer. */ u32 buffer_size; /** Pointer to the mapped packet credit register. * Host writes number of info/buffer ptrs available to this register */ void __iomem *pkts_credit_reg; /** Pointer to the mapped packet sent register. * Octeon writes the number of packets DMA'ed to host memory * in this register. */ void __iomem *pkts_sent_reg; struct list_head dispatch_list; /** Statistics for this DROQ. */ struct oct_droq_stats stats; /** DMA mapped address of the DROQ descriptor ring. */ size_t desc_ring_dma; /** Info ptr list are allocated at this virtual address. */ void *info_base_addr; /** DMA mapped address of the info list */ dma_addr_t info_list_dma; /** Allocated size of info list. */ u32 info_alloc_size; /** application context */ void *app_ctx; struct napi_struct napi; u32 cpu_id; struct call_single_data csd; }; #define OCT_DROQ_SIZE (sizeof(struct octeon_droq)) /** * Allocates space for the descriptor ring for the droq and sets the * base addr, num desc etc in Octeon registers. * * @param oct_dev - pointer to the octeon device structure * @param q_no - droq no. ranges from 0 - 3. * @param app_ctx - pointer to application context * @return Success: 0 Failure: 1 */ int octeon_init_droq(struct octeon_device *oct_dev, u32 q_no, u32 num_descs, u32 desc_size, void *app_ctx); /** * Frees the space for descriptor ring for the droq. * * @param oct_dev - pointer to the octeon device structure * @param q_no - droq no. ranges from 0 - 3. * @return: Success: 0 Failure: 1 */ int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no); /** Register a change in droq operations. The ops field has a pointer to a * function which will called by the DROQ handler for all packets arriving * on output queues given by q_no irrespective of the type of packet. * The ops field also has a flag which if set tells the DROQ handler to * drop packets if it receives more than what it can process in one * invocation of the handler. * @param oct - octeon device * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1 * @param ops - the droq_ops settings for this queue * @return - 0 on success, -ENODEV or -EINVAL on error. */ int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, struct octeon_droq_ops *ops); /** Resets the function pointer and flag settings made by * octeon_register_droq_ops(). After this routine is called, the DROQ handler * will lookup dispatch function for each arriving packet on the output queue * given by q_no. * @param oct - octeon device * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1 * @return - 0 on success, -ENODEV or -EINVAL on error. */ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no); /** Register a dispatch function for a opcode/subcode. The driver will call * this dispatch function when it receives a packet with the given * opcode/subcode in its output queues along with the user specified * argument. * @param oct - the octeon device to register with. * @param opcode - the opcode for which the dispatch will be registered. * @param subcode - the subcode for which the dispatch will be registered * @param fn - the dispatch function. * @param fn_arg - user specified that will be passed along with the * dispatch function by the driver. * @return Success: 0; Failure: 1 */ int octeon_register_dispatch_fn(struct octeon_device *oct, u16 opcode, u16 subcode, octeon_dispatch_fn_t fn, void *fn_arg); void octeon_droq_print_stats(void); u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq); int octeon_create_droq(struct octeon_device *oct, u32 q_no, u32 num_descs, u32 desc_size, void *app_ctx); int octeon_droq_process_packets(struct octeon_device *oct, struct octeon_droq *droq, u32 budget); int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, u32 arg); #endif /*__OCTEON_DROQ_H__ */
null
null
null
null
94,169
2,012
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
155,069
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * AAC encoder * Copyright (C) 2008 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * AAC encoder */ /*********************************** * TODOs: * add sane pulse detection ***********************************/ #include "libavutil/libm.h" #include "libavutil/thread.h" #include "libavutil/float_dsp.h" #include "libavutil/opt.h" #include "avcodec.h" #include "put_bits.h" #include "internal.h" #include "mpeg4audio.h" #include "kbdwin.h" #include "sinewin.h" #include "aac.h" #include "aactab.h" #include "aacenc.h" #include "aacenctab.h" #include "aacenc_utils.h" #include "psymodel.h" static AVOnce aac_table_init = AV_ONCE_INIT; static void put_pce(PutBitContext *pb, AVCodecContext *avctx) { int i, j; AACEncContext *s = avctx->priv_data; AACPCEInfo *pce = &s->pce; const int bitexact = avctx->flags & AV_CODEC_FLAG_BITEXACT; const char *aux_data = bitexact ? "Lavc" : LIBAVCODEC_IDENT; put_bits(pb, 4, 0); put_bits(pb, 2, avctx->profile); put_bits(pb, 4, s->samplerate_index); put_bits(pb, 4, pce->num_ele[0]); /* Front */ put_bits(pb, 4, pce->num_ele[1]); /* Side */ put_bits(pb, 4, pce->num_ele[2]); /* Back */ put_bits(pb, 2, pce->num_ele[3]); /* LFE */ put_bits(pb, 3, 0); /* Assoc data */ put_bits(pb, 4, 0); /* CCs */ put_bits(pb, 1, 0); /* Stereo mixdown */ put_bits(pb, 1, 0); /* Mono mixdown */ put_bits(pb, 1, 0); /* Something else */ for (i = 0; i < 4; i++) { for (j = 0; j < pce->num_ele[i]; j++) { if (i < 3) put_bits(pb, 1, pce->pairing[i][j]); put_bits(pb, 4, pce->index[i][j]); } } avpriv_align_put_bits(pb); put_bits(pb, 8, strlen(aux_data)); avpriv_put_string(pb, aux_data, 0); } /** * Make AAC audio config object. * @see 1.6.2.1 "Syntax - AudioSpecificConfig" */ static int put_audio_specific_config(AVCodecContext *avctx) { PutBitContext pb; AACEncContext *s = avctx->priv_data; int channels = (!s->needs_pce)*(s->channels - (s->channels == 8 ? 1 : 0)); const int max_size = 32; avctx->extradata = av_mallocz(max_size); if (!avctx->extradata) return AVERROR(ENOMEM); init_put_bits(&pb, avctx->extradata, max_size); put_bits(&pb, 5, s->profile+1); //profile put_bits(&pb, 4, s->samplerate_index); //sample rate index put_bits(&pb, 4, channels); //GASpecificConfig put_bits(&pb, 1, 0); //frame length - 1024 samples put_bits(&pb, 1, 0); //does not depend on core coder put_bits(&pb, 1, 0); //is not extension if (s->needs_pce) put_pce(&pb, avctx); //Explicitly Mark SBR absent put_bits(&pb, 11, 0x2b7); //sync extension put_bits(&pb, 5, AOT_SBR); put_bits(&pb, 1, 0); flush_put_bits(&pb); avctx->extradata_size = put_bits_count(&pb) >> 3; return 0; } void ff_quantize_band_cost_cache_init(struct AACEncContext *s) { ++s->quantize_band_cost_cache_generation; if (s->quantize_band_cost_cache_generation == 0) { memset(s->quantize_band_cost_cache, 0, sizeof(s->quantize_band_cost_cache)); s->quantize_band_cost_cache_generation = 1; } } #define WINDOW_FUNC(type) \ static void apply_ ##type ##_window(AVFloatDSPContext *fdsp, \ SingleChannelElement *sce, \ const float *audio) WINDOW_FUNC(only_long) { const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024; float *out = sce->ret_buf; fdsp->vector_fmul (out, audio, lwindow, 1024); fdsp->vector_fmul_reverse(out + 1024, audio + 1024, pwindow, 1024); } WINDOW_FUNC(long_start) { const float *lwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024; const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; float *out = sce->ret_buf; fdsp->vector_fmul(out, audio, lwindow, 1024); memcpy(out + 1024, audio + 1024, sizeof(out[0]) * 448); fdsp->vector_fmul_reverse(out + 1024 + 448, audio + 1024 + 448, swindow, 128); memset(out + 1024 + 576, 0, sizeof(out[0]) * 448); } WINDOW_FUNC(long_stop) { const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; const float *swindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; float *out = sce->ret_buf; memset(out, 0, sizeof(out[0]) * 448); fdsp->vector_fmul(out + 448, audio + 448, swindow, 128); memcpy(out + 576, audio + 576, sizeof(out[0]) * 448); fdsp->vector_fmul_reverse(out + 1024, audio + 1024, lwindow, 1024); } WINDOW_FUNC(eight_short) { const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; const float *in = audio + 448; float *out = sce->ret_buf; int w; for (w = 0; w < 8; w++) { fdsp->vector_fmul (out, in, w ? pwindow : swindow, 128); out += 128; in += 128; fdsp->vector_fmul_reverse(out, in, swindow, 128); out += 128; } } static void (*const apply_window[4])(AVFloatDSPContext *fdsp, SingleChannelElement *sce, const float *audio) = { [ONLY_LONG_SEQUENCE] = apply_only_long_window, [LONG_START_SEQUENCE] = apply_long_start_window, [EIGHT_SHORT_SEQUENCE] = apply_eight_short_window, [LONG_STOP_SEQUENCE] = apply_long_stop_window }; static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce, float *audio) { int i; const float *output = sce->ret_buf; apply_window[sce->ics.window_sequence[0]](s->fdsp, sce, audio); if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output); else for (i = 0; i < 1024; i += 128) s->mdct128.mdct_calc(&s->mdct128, &sce->coeffs[i], output + i*2); memcpy(audio, audio + 1024, sizeof(audio[0]) * 1024); memcpy(sce->pcoeffs, sce->coeffs, sizeof(sce->pcoeffs)); } /** * Encode ics_info element. * @see Table 4.6 (syntax of ics_info) */ static void put_ics_info(AACEncContext *s, IndividualChannelStream *info) { int w; put_bits(&s->pb, 1, 0); // ics_reserved bit put_bits(&s->pb, 2, info->window_sequence[0]); put_bits(&s->pb, 1, info->use_kb_window[0]); if (info->window_sequence[0] != EIGHT_SHORT_SEQUENCE) { put_bits(&s->pb, 6, info->max_sfb); put_bits(&s->pb, 1, !!info->predictor_present); } else { put_bits(&s->pb, 4, info->max_sfb); for (w = 1; w < 8; w++) put_bits(&s->pb, 1, !info->group_len[w]); } } /** * Encode MS data. * @see 4.6.8.1 "Joint Coding - M/S Stereo" */ static void encode_ms_info(PutBitContext *pb, ChannelElement *cpe) { int i, w; put_bits(pb, 2, cpe->ms_mode); if (cpe->ms_mode == 1) for (w = 0; w < cpe->ch[0].ics.num_windows; w += cpe->ch[0].ics.group_len[w]) for (i = 0; i < cpe->ch[0].ics.max_sfb; i++) put_bits(pb, 1, cpe->ms_mask[w*16 + i]); } /** * Produce integer coefficients from scalefactors provided by the model. */ static void adjust_frame_information(ChannelElement *cpe, int chans) { int i, w, w2, g, ch; int maxsfb, cmaxsfb; for (ch = 0; ch < chans; ch++) { IndividualChannelStream *ics = &cpe->ch[ch].ics; maxsfb = 0; cpe->ch[ch].pulse.num_pulse = 0; for (w = 0; w < ics->num_windows; w += ics->group_len[w]) { for (w2 = 0; w2 < ics->group_len[w]; w2++) { for (cmaxsfb = ics->num_swb; cmaxsfb > 0 && cpe->ch[ch].zeroes[w*16+cmaxsfb-1]; cmaxsfb--) ; maxsfb = FFMAX(maxsfb, cmaxsfb); } } ics->max_sfb = maxsfb; //adjust zero bands for window groups for (w = 0; w < ics->num_windows; w += ics->group_len[w]) { for (g = 0; g < ics->max_sfb; g++) { i = 1; for (w2 = w; w2 < w + ics->group_len[w]; w2++) { if (!cpe->ch[ch].zeroes[w2*16 + g]) { i = 0; break; } } cpe->ch[ch].zeroes[w*16 + g] = i; } } } if (chans > 1 && cpe->common_window) { IndividualChannelStream *ics0 = &cpe->ch[0].ics; IndividualChannelStream *ics1 = &cpe->ch[1].ics; int msc = 0; ics0->max_sfb = FFMAX(ics0->max_sfb, ics1->max_sfb); ics1->max_sfb = ics0->max_sfb; for (w = 0; w < ics0->num_windows*16; w += 16) for (i = 0; i < ics0->max_sfb; i++) if (cpe->ms_mask[w+i]) msc++; if (msc == 0 || ics0->max_sfb == 0) cpe->ms_mode = 0; else cpe->ms_mode = msc < ics0->max_sfb * ics0->num_windows ? 1 : 2; } } static void apply_intensity_stereo(ChannelElement *cpe) { int w, w2, g, i; IndividualChannelStream *ics = &cpe->ch[0].ics; if (!cpe->common_window) return; for (w = 0; w < ics->num_windows; w += ics->group_len[w]) { for (w2 = 0; w2 < ics->group_len[w]; w2++) { int start = (w+w2) * 128; for (g = 0; g < ics->num_swb; g++) { int p = -1 + 2 * (cpe->ch[1].band_type[w*16+g] - 14); float scale = cpe->ch[0].is_ener[w*16+g]; if (!cpe->is_mask[w*16 + g]) { start += ics->swb_sizes[g]; continue; } if (cpe->ms_mask[w*16 + g]) p *= -1; for (i = 0; i < ics->swb_sizes[g]; i++) { float sum = (cpe->ch[0].coeffs[start+i] + p*cpe->ch[1].coeffs[start+i])*scale; cpe->ch[0].coeffs[start+i] = sum; cpe->ch[1].coeffs[start+i] = 0.0f; } start += ics->swb_sizes[g]; } } } } static void apply_mid_side_stereo(ChannelElement *cpe) { int w, w2, g, i; IndividualChannelStream *ics = &cpe->ch[0].ics; if (!cpe->common_window) return; for (w = 0; w < ics->num_windows; w += ics->group_len[w]) { for (w2 = 0; w2 < ics->group_len[w]; w2++) { int start = (w+w2) * 128; for (g = 0; g < ics->num_swb; g++) { /* ms_mask can be used for other purposes in PNS and I/S, * so must not apply M/S if any band uses either, even if * ms_mask is set. */ if (!cpe->ms_mask[w*16 + g] || cpe->is_mask[w*16 + g] || cpe->ch[0].band_type[w*16 + g] >= NOISE_BT || cpe->ch[1].band_type[w*16 + g] >= NOISE_BT) { start += ics->swb_sizes[g]; continue; } for (i = 0; i < ics->swb_sizes[g]; i++) { float L = (cpe->ch[0].coeffs[start+i] + cpe->ch[1].coeffs[start+i]) * 0.5f; float R = L - cpe->ch[1].coeffs[start+i]; cpe->ch[0].coeffs[start+i] = L; cpe->ch[1].coeffs[start+i] = R; } start += ics->swb_sizes[g]; } } } } /** * Encode scalefactor band coding type. */ static void encode_band_info(AACEncContext *s, SingleChannelElement *sce) { int w; if (s->coder->set_special_band_scalefactors) s->coder->set_special_band_scalefactors(s, sce); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) s->coder->encode_window_bands_info(s, sce, w, sce->ics.group_len[w], s->lambda); } /** * Encode scalefactors. */ static void encode_scale_factors(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce) { int diff, off_sf = sce->sf_idx[0], off_pns = sce->sf_idx[0] - NOISE_OFFSET; int off_is = 0, noise_flag = 1; int i, w; for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { for (i = 0; i < sce->ics.max_sfb; i++) { if (!sce->zeroes[w*16 + i]) { if (sce->band_type[w*16 + i] == NOISE_BT) { diff = sce->sf_idx[w*16 + i] - off_pns; off_pns = sce->sf_idx[w*16 + i]; if (noise_flag-- > 0) { put_bits(&s->pb, NOISE_PRE_BITS, diff + NOISE_PRE); continue; } } else if (sce->band_type[w*16 + i] == INTENSITY_BT || sce->band_type[w*16 + i] == INTENSITY_BT2) { diff = sce->sf_idx[w*16 + i] - off_is; off_is = sce->sf_idx[w*16 + i]; } else { diff = sce->sf_idx[w*16 + i] - off_sf; off_sf = sce->sf_idx[w*16 + i]; } diff += SCALE_DIFF_ZERO; av_assert0(diff >= 0 && diff <= 120); put_bits(&s->pb, ff_aac_scalefactor_bits[diff], ff_aac_scalefactor_code[diff]); } } } } /** * Encode pulse data. */ static void encode_pulses(AACEncContext *s, Pulse *pulse) { int i; put_bits(&s->pb, 1, !!pulse->num_pulse); if (!pulse->num_pulse) return; put_bits(&s->pb, 2, pulse->num_pulse - 1); put_bits(&s->pb, 6, pulse->start); for (i = 0; i < pulse->num_pulse; i++) { put_bits(&s->pb, 5, pulse->pos[i]); put_bits(&s->pb, 4, pulse->amp[i]); } } /** * Encode spectral coefficients processed by psychoacoustic model. */ static void encode_spectral_coeffs(AACEncContext *s, SingleChannelElement *sce) { int start, i, w, w2; for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = 0; for (i = 0; i < sce->ics.max_sfb; i++) { if (sce->zeroes[w*16 + i]) { start += sce->ics.swb_sizes[i]; continue; } for (w2 = w; w2 < w + sce->ics.group_len[w]; w2++) { s->coder->quantize_and_encode_band(s, &s->pb, &sce->coeffs[start + w2*128], NULL, sce->ics.swb_sizes[i], sce->sf_idx[w*16 + i], sce->band_type[w*16 + i], s->lambda, sce->ics.window_clipping[w]); } start += sce->ics.swb_sizes[i]; } } } /** * Downscale spectral coefficients for near-clipping windows to avoid artifacts */ static void avoid_clipping(AACEncContext *s, SingleChannelElement *sce) { int start, i, j, w; if (sce->ics.clip_avoidance_factor < 1.0f) { for (w = 0; w < sce->ics.num_windows; w++) { start = 0; for (i = 0; i < sce->ics.max_sfb; i++) { float *swb_coeffs = &sce->coeffs[start + w*128]; for (j = 0; j < sce->ics.swb_sizes[i]; j++) swb_coeffs[j] *= sce->ics.clip_avoidance_factor; start += sce->ics.swb_sizes[i]; } } } } /** * Encode one channel of audio data. */ static int encode_individual_channel(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, int common_window) { put_bits(&s->pb, 8, sce->sf_idx[0]); if (!common_window) { put_ics_info(s, &sce->ics); if (s->coder->encode_main_pred) s->coder->encode_main_pred(s, sce); if (s->coder->encode_ltp_info) s->coder->encode_ltp_info(s, sce, 0); } encode_band_info(s, sce); encode_scale_factors(avctx, s, sce); encode_pulses(s, &sce->pulse); put_bits(&s->pb, 1, !!sce->tns.present); if (s->coder->encode_tns_info) s->coder->encode_tns_info(s, sce); put_bits(&s->pb, 1, 0); //ssr encode_spectral_coeffs(s, sce); return 0; } /** * Write some auxiliary information about the created AAC file. */ static void put_bitstream_info(AACEncContext *s, const char *name) { int i, namelen, padbits; namelen = strlen(name) + 2; put_bits(&s->pb, 3, TYPE_FIL); put_bits(&s->pb, 4, FFMIN(namelen, 15)); if (namelen >= 15) put_bits(&s->pb, 8, namelen - 14); put_bits(&s->pb, 4, 0); //extension type - filler padbits = -put_bits_count(&s->pb) & 7; avpriv_align_put_bits(&s->pb); for (i = 0; i < namelen - 2; i++) put_bits(&s->pb, 8, name[i]); put_bits(&s->pb, 12 - padbits, 0); } /* * Copy input samples. * Channels are reordered from libavcodec's default order to AAC order. */ static void copy_input_samples(AACEncContext *s, const AVFrame *frame) { int ch; int end = 2048 + (frame ? frame->nb_samples : 0); const uint8_t *channel_map = s->reorder_map; /* copy and remap input samples */ for (ch = 0; ch < s->channels; ch++) { /* copy last 1024 samples of previous frame to the start of the current frame */ memcpy(&s->planar_samples[ch][1024], &s->planar_samples[ch][2048], 1024 * sizeof(s->planar_samples[0][0])); /* copy new samples and zero any remaining samples */ if (frame) { memcpy(&s->planar_samples[ch][2048], frame->extended_data[channel_map[ch]], frame->nb_samples * sizeof(s->planar_samples[0][0])); } memset(&s->planar_samples[ch][end], 0, (3072 - end) * sizeof(s->planar_samples[0][0])); } } static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { AACEncContext *s = avctx->priv_data; float **samples = s->planar_samples, *samples2, *la, *overlap; ChannelElement *cpe; SingleChannelElement *sce; IndividualChannelStream *ics; int i, its, ch, w, chans, tag, start_ch, ret, frame_bits; int target_bits, rate_bits, too_many_bits, too_few_bits; int ms_mode = 0, is_mode = 0, tns_mode = 0, pred_mode = 0; int chan_el_counter[4]; FFPsyWindowInfo windows[AAC_MAX_CHANNELS]; /* add current frame to queue */ if (frame) { if ((ret = ff_af_queue_add(&s->afq, frame)) < 0) return ret; } else { if (!s->afq.remaining_samples || (!s->afq.frame_alloc && !s->afq.frame_count)) return 0; } copy_input_samples(s, frame); if (s->psypp) ff_psy_preprocess(s->psypp, s->planar_samples, s->channels); if (!avctx->frame_number) return 0; start_ch = 0; for (i = 0; i < s->chan_map[0]; i++) { FFPsyWindowInfo* wi = windows + start_ch; tag = s->chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; cpe = &s->cpe[i]; for (ch = 0; ch < chans; ch++) { int k; float clip_avoidance_factor; sce = &cpe->ch[ch]; ics = &sce->ics; s->cur_channel = start_ch + ch; overlap = &samples[s->cur_channel][0]; samples2 = overlap + 1024; la = samples2 + (448+64); if (!frame) la = NULL; if (tag == TYPE_LFE) { wi[ch].window_type[0] = wi[ch].window_type[1] = ONLY_LONG_SEQUENCE; wi[ch].window_shape = 0; wi[ch].num_windows = 1; wi[ch].grouping[0] = 1; wi[ch].clipping[0] = 0; /* Only the lowest 12 coefficients are used in a LFE channel. * The expression below results in only the bottom 8 coefficients * being used for 11.025kHz to 16kHz sample rates. */ ics->num_swb = s->samplerate_index >= 8 ? 1 : 3; } else { wi[ch] = s->psy.model->window(&s->psy, samples2, la, s->cur_channel, ics->window_sequence[0]); } ics->window_sequence[1] = ics->window_sequence[0]; ics->window_sequence[0] = wi[ch].window_type[0]; ics->use_kb_window[1] = ics->use_kb_window[0]; ics->use_kb_window[0] = wi[ch].window_shape; ics->num_windows = wi[ch].num_windows; ics->swb_sizes = s->psy.bands [ics->num_windows == 8]; ics->num_swb = tag == TYPE_LFE ? ics->num_swb : s->psy.num_bands[ics->num_windows == 8]; ics->max_sfb = FFMIN(ics->max_sfb, ics->num_swb); ics->swb_offset = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ? ff_swb_offset_128 [s->samplerate_index]: ff_swb_offset_1024[s->samplerate_index]; ics->tns_max_bands = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ? ff_tns_max_bands_128 [s->samplerate_index]: ff_tns_max_bands_1024[s->samplerate_index]; for (w = 0; w < ics->num_windows; w++) ics->group_len[w] = wi[ch].grouping[w]; /* Calculate input sample maximums and evaluate clipping risk */ clip_avoidance_factor = 0.0f; for (w = 0; w < ics->num_windows; w++) { const float *wbuf = overlap + w * 128; const int wlen = 2048 / ics->num_windows; float max = 0; int j; /* mdct input is 2 * output */ for (j = 0; j < wlen; j++) max = FFMAX(max, fabsf(wbuf[j])); wi[ch].clipping[w] = max; } for (w = 0; w < ics->num_windows; w++) { if (wi[ch].clipping[w] > CLIP_AVOIDANCE_FACTOR) { ics->window_clipping[w] = 1; clip_avoidance_factor = FFMAX(clip_avoidance_factor, wi[ch].clipping[w]); } else { ics->window_clipping[w] = 0; } } if (clip_avoidance_factor > CLIP_AVOIDANCE_FACTOR) { ics->clip_avoidance_factor = CLIP_AVOIDANCE_FACTOR / clip_avoidance_factor; } else { ics->clip_avoidance_factor = 1.0f; } apply_window_and_mdct(s, sce, overlap); if (s->options.ltp && s->coder->update_ltp) { s->coder->update_ltp(s, sce); apply_window[sce->ics.window_sequence[0]](s->fdsp, sce, &sce->ltp_state[0]); s->mdct1024.mdct_calc(&s->mdct1024, sce->lcoeffs, sce->ret_buf); } for (k = 0; k < 1024; k++) { if (!(fabs(cpe->ch[ch].coeffs[k]) < 1E16)) { // Ensure headroom for energy calculation av_log(avctx, AV_LOG_ERROR, "Input contains (near) NaN/+-Inf\n"); return AVERROR(EINVAL); } } avoid_clipping(s, sce); } start_ch += chans; } if ((ret = ff_alloc_packet2(avctx, avpkt, 8192 * s->channels, 0)) < 0) return ret; frame_bits = its = 0; do { init_put_bits(&s->pb, avpkt->data, avpkt->size); if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) put_bitstream_info(s, LIBAVCODEC_IDENT); start_ch = 0; target_bits = 0; memset(chan_el_counter, 0, sizeof(chan_el_counter)); for (i = 0; i < s->chan_map[0]; i++) { FFPsyWindowInfo* wi = windows + start_ch; const float *coeffs[2]; tag = s->chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; cpe = &s->cpe[i]; cpe->common_window = 0; memset(cpe->is_mask, 0, sizeof(cpe->is_mask)); memset(cpe->ms_mask, 0, sizeof(cpe->ms_mask)); put_bits(&s->pb, 3, tag); put_bits(&s->pb, 4, chan_el_counter[tag]++); for (ch = 0; ch < chans; ch++) { sce = &cpe->ch[ch]; coeffs[ch] = sce->coeffs; sce->ics.predictor_present = 0; sce->ics.ltp.present = 0; memset(sce->ics.ltp.used, 0, sizeof(sce->ics.ltp.used)); memset(sce->ics.prediction_used, 0, sizeof(sce->ics.prediction_used)); memset(&sce->tns, 0, sizeof(TemporalNoiseShaping)); for (w = 0; w < 128; w++) if (sce->band_type[w] > RESERVED_BT) sce->band_type[w] = 0; } s->psy.bitres.alloc = -1; s->psy.bitres.bits = s->last_frame_pb_count / s->channels; s->psy.model->analyze(&s->psy, start_ch, coeffs, wi); if (s->psy.bitres.alloc > 0) { /* Lambda unused here on purpose, we need to take psy's unscaled allocation */ target_bits += s->psy.bitres.alloc * (s->lambda / (avctx->global_quality ? avctx->global_quality : 120)); s->psy.bitres.alloc /= chans; } s->cur_type = tag; for (ch = 0; ch < chans; ch++) { s->cur_channel = start_ch + ch; if (s->options.pns && s->coder->mark_pns) s->coder->mark_pns(s, avctx, &cpe->ch[ch]); s->coder->search_for_quantizers(avctx, s, &cpe->ch[ch], s->lambda); } if (chans > 1 && wi[0].window_type[0] == wi[1].window_type[0] && wi[0].window_shape == wi[1].window_shape) { cpe->common_window = 1; for (w = 0; w < wi[0].num_windows; w++) { if (wi[0].grouping[w] != wi[1].grouping[w]) { cpe->common_window = 0; break; } } } for (ch = 0; ch < chans; ch++) { /* TNS and PNS */ sce = &cpe->ch[ch]; s->cur_channel = start_ch + ch; if (s->options.tns && s->coder->search_for_tns) s->coder->search_for_tns(s, sce); if (s->options.tns && s->coder->apply_tns_filt) s->coder->apply_tns_filt(s, sce); if (sce->tns.present) tns_mode = 1; if (s->options.pns && s->coder->search_for_pns) s->coder->search_for_pns(s, avctx, sce); } s->cur_channel = start_ch; if (s->options.intensity_stereo) { /* Intensity Stereo */ if (s->coder->search_for_is) s->coder->search_for_is(s, avctx, cpe); if (cpe->is_mode) is_mode = 1; apply_intensity_stereo(cpe); } if (s->options.pred) { /* Prediction */ for (ch = 0; ch < chans; ch++) { sce = &cpe->ch[ch]; s->cur_channel = start_ch + ch; if (s->options.pred && s->coder->search_for_pred) s->coder->search_for_pred(s, sce); if (cpe->ch[ch].ics.predictor_present) pred_mode = 1; } if (s->coder->adjust_common_pred) s->coder->adjust_common_pred(s, cpe); for (ch = 0; ch < chans; ch++) { sce = &cpe->ch[ch]; s->cur_channel = start_ch + ch; if (s->options.pred && s->coder->apply_main_pred) s->coder->apply_main_pred(s, sce); } s->cur_channel = start_ch; } if (s->options.mid_side) { /* Mid/Side stereo */ if (s->options.mid_side == -1 && s->coder->search_for_ms) s->coder->search_for_ms(s, cpe); else if (cpe->common_window) memset(cpe->ms_mask, 1, sizeof(cpe->ms_mask)); apply_mid_side_stereo(cpe); } adjust_frame_information(cpe, chans); if (s->options.ltp) { /* LTP */ for (ch = 0; ch < chans; ch++) { sce = &cpe->ch[ch]; s->cur_channel = start_ch + ch; if (s->coder->search_for_ltp) s->coder->search_for_ltp(s, sce, cpe->common_window); if (sce->ics.ltp.present) pred_mode = 1; } s->cur_channel = start_ch; if (s->coder->adjust_common_ltp) s->coder->adjust_common_ltp(s, cpe); } if (chans == 2) { put_bits(&s->pb, 1, cpe->common_window); if (cpe->common_window) { put_ics_info(s, &cpe->ch[0].ics); if (s->coder->encode_main_pred) s->coder->encode_main_pred(s, &cpe->ch[0]); if (s->coder->encode_ltp_info) s->coder->encode_ltp_info(s, &cpe->ch[0], 1); encode_ms_info(&s->pb, cpe); if (cpe->ms_mode) ms_mode = 1; } } for (ch = 0; ch < chans; ch++) { s->cur_channel = start_ch + ch; encode_individual_channel(avctx, s, &cpe->ch[ch], cpe->common_window); } start_ch += chans; } if (avctx->flags & AV_CODEC_FLAG_QSCALE) { /* When using a constant Q-scale, don't mess with lambda */ break; } /* rate control stuff * allow between the nominal bitrate, and what psy's bit reservoir says to target * but drift towards the nominal bitrate always */ frame_bits = put_bits_count(&s->pb); rate_bits = avctx->bit_rate * 1024 / avctx->sample_rate; rate_bits = FFMIN(rate_bits, 6144 * s->channels - 3); too_many_bits = FFMAX(target_bits, rate_bits); too_many_bits = FFMIN(too_many_bits, 6144 * s->channels - 3); too_few_bits = FFMIN(FFMAX(rate_bits - rate_bits/4, target_bits), too_many_bits); /* When using ABR, be strict (but only for increasing) */ too_few_bits = too_few_bits - too_few_bits/8; too_many_bits = too_many_bits + too_many_bits/2; if ( its == 0 /* for steady-state Q-scale tracking */ || (its < 5 && (frame_bits < too_few_bits || frame_bits > too_many_bits)) || frame_bits >= 6144 * s->channels - 3 ) { float ratio = ((float)rate_bits) / frame_bits; if (frame_bits >= too_few_bits && frame_bits <= too_many_bits) { /* * This path is for steady-state Q-scale tracking * When frame bits fall within the stable range, we still need to adjust * lambda to maintain it like so in a stable fashion (large jumps in lambda * create artifacts and should be avoided), but slowly */ ratio = sqrtf(sqrtf(ratio)); ratio = av_clipf(ratio, 0.9f, 1.1f); } else { /* Not so fast though */ ratio = sqrtf(ratio); } s->lambda = FFMIN(s->lambda * ratio, 65536.f); /* Keep iterating if we must reduce and lambda is in the sky */ if (ratio > 0.9f && ratio < 1.1f) { break; } else { if (is_mode || ms_mode || tns_mode || pred_mode) { for (i = 0; i < s->chan_map[0]; i++) { // Must restore coeffs chans = tag == TYPE_CPE ? 2 : 1; cpe = &s->cpe[i]; for (ch = 0; ch < chans; ch++) memcpy(cpe->ch[ch].coeffs, cpe->ch[ch].pcoeffs, sizeof(cpe->ch[ch].coeffs)); } } its++; } } else { break; } } while (1); if (s->options.ltp && s->coder->ltp_insert_new_frame) s->coder->ltp_insert_new_frame(s); put_bits(&s->pb, 3, TYPE_END); flush_put_bits(&s->pb); s->last_frame_pb_count = put_bits_count(&s->pb); s->lambda_sum += s->lambda; s->lambda_count++; ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts, &avpkt->duration); avpkt->size = put_bits_count(&s->pb) >> 3; *got_packet_ptr = 1; return 0; } static av_cold int aac_encode_end(AVCodecContext *avctx) { AACEncContext *s = avctx->priv_data; av_log(avctx, AV_LOG_INFO, "Qavg: %.3f\n", s->lambda_sum / s->lambda_count); ff_mdct_end(&s->mdct1024); ff_mdct_end(&s->mdct128); ff_psy_end(&s->psy); ff_lpc_end(&s->lpc); if (s->psypp) ff_psy_preprocess_end(s->psypp); av_freep(&s->buffer.samples); av_freep(&s->cpe); av_freep(&s->fdsp); ff_af_queue_close(&s->afq); return 0; } static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s) { int ret = 0; s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); if (!s->fdsp) return AVERROR(ENOMEM); // window init ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); ff_init_ff_sine_windows(10); ff_init_ff_sine_windows(7); if ((ret = ff_mdct_init(&s->mdct1024, 11, 0, 32768.0)) < 0) return ret; if ((ret = ff_mdct_init(&s->mdct128, 8, 0, 32768.0)) < 0) return ret; return 0; } static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s) { int ch; FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->buffer.samples, s->channels, 3 * 1024 * sizeof(s->buffer.samples[0]), alloc_fail); FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->cpe, s->chan_map[0], sizeof(ChannelElement), alloc_fail); for(ch = 0; ch < s->channels; ch++) s->planar_samples[ch] = s->buffer.samples + 3 * 1024 * ch; return 0; alloc_fail: return AVERROR(ENOMEM); } static av_cold void aac_encode_init_tables(void) { ff_aac_tableinit(); } static av_cold int aac_encode_init(AVCodecContext *avctx) { AACEncContext *s = avctx->priv_data; int i, ret = 0; const uint8_t *sizes[2]; uint8_t grouping[AAC_MAX_CHANNELS]; int lengths[2]; /* Constants */ s->last_frame_pb_count = 0; avctx->frame_size = 1024; avctx->initial_padding = 1024; s->lambda = avctx->global_quality > 0 ? avctx->global_quality : 120; /* Channel map and unspecified bitrate guessing */ s->channels = avctx->channels; s->needs_pce = 1; for (i = 0; i < FF_ARRAY_ELEMS(aac_normal_chan_layouts); i++) { if (avctx->channel_layout == aac_normal_chan_layouts[i]) { s->needs_pce = s->options.pce; break; } } if (s->needs_pce) { for (i = 0; i < FF_ARRAY_ELEMS(aac_pce_configs); i++) if (avctx->channel_layout == aac_pce_configs[i].layout) break; ERROR_IF(i == FF_ARRAY_ELEMS(aac_pce_configs), "Unsupported channel layout\n"); av_log(avctx, AV_LOG_INFO, "Using a PCE to encode channel layout\n"); s->pce = aac_pce_configs[i]; s->reorder_map = s->pce.reorder_map; s->chan_map = s->pce.config_map; } else { s->reorder_map = aac_chan_maps[s->channels - 1]; s->chan_map = aac_chan_configs[s->channels - 1]; } if (!avctx->bit_rate) { for (i = 1; i <= s->chan_map[0]; i++) { avctx->bit_rate += s->chan_map[i] == TYPE_CPE ? 128000 : /* Pair */ s->chan_map[i] == TYPE_LFE ? 16000 : /* LFE */ 69000 ; /* SCE */ } } /* Samplerate */ for (i = 0; i < 16; i++) if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i]) break; s->samplerate_index = i; ERROR_IF(s->samplerate_index == 16 || s->samplerate_index >= ff_aac_swb_size_1024_len || s->samplerate_index >= ff_aac_swb_size_128_len, "Unsupported sample rate %d\n", avctx->sample_rate); /* Bitrate limiting */ WARN_IF(1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * s->channels, "Too many bits %f > %d per frame requested, clamping to max\n", 1024.0 * avctx->bit_rate / avctx->sample_rate, 6144 * s->channels); avctx->bit_rate = (int64_t)FFMIN(6144 * s->channels / 1024.0 * avctx->sample_rate, avctx->bit_rate); /* Profile and option setting */ avctx->profile = avctx->profile == FF_PROFILE_UNKNOWN ? FF_PROFILE_AAC_LOW : avctx->profile; for (i = 0; i < FF_ARRAY_ELEMS(aacenc_profiles); i++) if (avctx->profile == aacenc_profiles[i]) break; if (avctx->profile == FF_PROFILE_MPEG2_AAC_LOW) { avctx->profile = FF_PROFILE_AAC_LOW; ERROR_IF(s->options.pred, "Main prediction unavailable in the \"mpeg2_aac_low\" profile\n"); ERROR_IF(s->options.ltp, "LTP prediction unavailable in the \"mpeg2_aac_low\" profile\n"); WARN_IF(s->options.pns, "PNS unavailable in the \"mpeg2_aac_low\" profile, turning off\n"); s->options.pns = 0; } else if (avctx->profile == FF_PROFILE_AAC_LTP) { s->options.ltp = 1; ERROR_IF(s->options.pred, "Main prediction unavailable in the \"aac_ltp\" profile\n"); } else if (avctx->profile == FF_PROFILE_AAC_MAIN) { s->options.pred = 1; ERROR_IF(s->options.ltp, "LTP prediction unavailable in the \"aac_main\" profile\n"); } else if (s->options.ltp) { avctx->profile = FF_PROFILE_AAC_LTP; WARN_IF(1, "Chainging profile to \"aac_ltp\"\n"); ERROR_IF(s->options.pred, "Main prediction unavailable in the \"aac_ltp\" profile\n"); } else if (s->options.pred) { avctx->profile = FF_PROFILE_AAC_MAIN; WARN_IF(1, "Chainging profile to \"aac_main\"\n"); ERROR_IF(s->options.ltp, "LTP prediction unavailable in the \"aac_main\" profile\n"); } s->profile = avctx->profile; /* Coder limitations */ s->coder = &ff_aac_coders[s->options.coder]; if (s->options.coder == AAC_CODER_ANMR) { ERROR_IF(avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL, "The ANMR coder is considered experimental, add -strict -2 to enable!\n"); s->options.intensity_stereo = 0; s->options.pns = 0; } ERROR_IF(s->options.ltp && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL, "The LPT profile requires experimental compliance, add -strict -2 to enable!\n"); /* M/S introduces horrible artifacts with multichannel files, this is temporary */ if (s->channels > 3) s->options.mid_side = 0; if ((ret = dsp_init(avctx, s)) < 0) goto fail; if ((ret = alloc_buffers(avctx, s)) < 0) goto fail; if ((ret = put_audio_specific_config(avctx))) goto fail; sizes[0] = ff_aac_swb_size_1024[s->samplerate_index]; sizes[1] = ff_aac_swb_size_128[s->samplerate_index]; lengths[0] = ff_aac_num_swb_1024[s->samplerate_index]; lengths[1] = ff_aac_num_swb_128[s->samplerate_index]; for (i = 0; i < s->chan_map[0]; i++) grouping[i] = s->chan_map[i + 1] == TYPE_CPE; if ((ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping)) < 0) goto fail; s->psypp = ff_psy_preprocess_init(avctx); ff_lpc_init(&s->lpc, 2*avctx->frame_size, TNS_MAX_ORDER, FF_LPC_TYPE_LEVINSON); s->random_state = 0x1f2e3d4c; s->abs_pow34 = abs_pow34_v; s->quant_bands = quantize_bands; if (ARCH_X86) ff_aac_dsp_init_x86(s); if (HAVE_MIPSDSP) ff_aac_coder_init_mips(s); if ((ret = ff_thread_once(&aac_table_init, &aac_encode_init_tables)) != 0) return AVERROR_UNKNOWN; ff_af_queue_init(avctx, &s->afq); return 0; fail: aac_encode_end(avctx); return ret; } #define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM static const AVOption aacenc_options[] = { {"aac_coder", "Coding algorithm", offsetof(AACEncContext, options.coder), AV_OPT_TYPE_INT, {.i64 = AAC_CODER_FAST}, 0, AAC_CODER_NB-1, AACENC_FLAGS, "coder"}, {"anmr", "ANMR method", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_ANMR}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"}, {"twoloop", "Two loop searching method", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_TWOLOOP}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"}, {"fast", "Default fast search", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_FAST}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"}, {"aac_ms", "Force M/S stereo coding", offsetof(AACEncContext, options.mid_side), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AACENC_FLAGS}, {"aac_is", "Intensity stereo coding", offsetof(AACEncContext, options.intensity_stereo), AV_OPT_TYPE_BOOL, {.i64 = 1}, -1, 1, AACENC_FLAGS}, {"aac_pns", "Perceptual noise substitution", offsetof(AACEncContext, options.pns), AV_OPT_TYPE_BOOL, {.i64 = 1}, -1, 1, AACENC_FLAGS}, {"aac_tns", "Temporal noise shaping", offsetof(AACEncContext, options.tns), AV_OPT_TYPE_BOOL, {.i64 = 1}, -1, 1, AACENC_FLAGS}, {"aac_ltp", "Long term prediction", offsetof(AACEncContext, options.ltp), AV_OPT_TYPE_BOOL, {.i64 = 0}, -1, 1, AACENC_FLAGS}, {"aac_pred", "AAC-Main prediction", offsetof(AACEncContext, options.pred), AV_OPT_TYPE_BOOL, {.i64 = 0}, -1, 1, AACENC_FLAGS}, {"aac_pce", "Forces the use of PCEs", offsetof(AACEncContext, options.pce), AV_OPT_TYPE_BOOL, {.i64 = 0}, -1, 1, AACENC_FLAGS}, {NULL} }; static const AVClass aacenc_class = { .class_name = "AAC encoder", .item_name = av_default_item_name, .option = aacenc_options, .version = LIBAVUTIL_VERSION_INT, }; static const AVCodecDefault aac_encode_defaults[] = { { "b", "0" }, { NULL } }; AVCodec ff_aac_encoder = { .name = "aac", .long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"), .type = AVMEDIA_TYPE_AUDIO, .id = AV_CODEC_ID_AAC, .priv_data_size = sizeof(AACEncContext), .init = aac_encode_init, .encode2 = aac_encode_frame, .close = aac_encode_end, .defaults = aac_encode_defaults, .supported_samplerates = mpeg4audio_sample_rates, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE }, .priv_class = &aacenc_class, };
null
null
null
null
71,124
11,788
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
11,788
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/omnibox/browser/tailored_word_break_iterator.h" namespace { constexpr base::char16 kUnderscore = '_'; } // namespace using base::i18n::BreakIterator; TailoredWordBreakIterator::TailoredWordBreakIterator( const base::StringPiece16& str, BreakIterator::BreakType break_type) : BreakIterator(str, break_type), prev_(0), pos_(0) { DCHECK_EQ(BreakIterator::BREAK_WORD, break_type); } TailoredWordBreakIterator::~TailoredWordBreakIterator() {} bool TailoredWordBreakIterator::Advance() { if (HasUnderscoreWord()) { AdvanceInUnderscoreWord(); return true; } if (!BreakIterator::Advance()) return false; prev_ = 0; pos_ = 0; underscore_word_.clear(); if (!IsWord()) return true; base::StringPiece16 word = BreakIterator::GetStringPiece(); if (word.find(kUnderscore) != base::StringPiece16::npos) { underscore_word_ = word; AdvanceInUnderscoreWord(); } return true; } bool TailoredWordBreakIterator::IsWord() const { if (HasUnderscoreWord()) { base::StringPiece16 word = GetStringPiece(); if (!word.empty()) return word[0] != kUnderscore; } return BreakIterator::IsWord(); } base::StringPiece16 TailoredWordBreakIterator::GetStringPiece() const { if (!underscore_word_.empty()) return underscore_word_.substr(prev_, pos_ - prev_); return BreakIterator::GetStringPiece(); } base::string16 TailoredWordBreakIterator::GetString() const { return GetStringPiece().as_string(); } size_t TailoredWordBreakIterator::prev() const { return BreakIterator::prev() + prev_; } size_t TailoredWordBreakIterator::pos() const { return BreakIterator::pos() + pos_; } bool TailoredWordBreakIterator::HasUnderscoreWord() const { return pos_ != underscore_word_.size(); } void TailoredWordBreakIterator::AdvanceInUnderscoreWord() { std::size_t next_pos = underscore_word_.find(kUnderscore, pos_); prev_ = pos_; if (next_pos == base::StringPiece16::npos) { pos_ = underscore_word_.size(); return; } // If an underscore is found at the current position, index moves to next // char. if (pos_ == next_pos) pos_ += 1; else pos_ = next_pos; }
null
null
null
null
8,651
23,226
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
188,221
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Marvell MMC/SD/SDIO driver * * Authors: Maen Suleiman, Nicolas Pitre * Copyright (C) 2008-2009 Marvell Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/mbus.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/irq.h> #include <linux/clk.h> #include <linux/of_irq.h> #include <linux/mmc/host.h> #include <linux/mmc/slot-gpio.h> #include <asm/sizes.h> #include <asm/unaligned.h> #include "mvsdio.h" #define DRIVER_NAME "mvsdio" static int maxfreq; static int nodma; struct mvsd_host { void __iomem *base; struct mmc_request *mrq; spinlock_t lock; unsigned int xfer_mode; unsigned int intr_en; unsigned int ctrl; unsigned int pio_size; void *pio_ptr; unsigned int sg_frags; unsigned int ns_per_clk; unsigned int clock; unsigned int base_clock; struct timer_list timer; struct mmc_host *mmc; struct device *dev; struct clk *clk; }; #define mvsd_write(offs, val) writel(val, iobase + (offs)) #define mvsd_read(offs) readl(iobase + (offs)) static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) { void __iomem *iobase = host->base; unsigned int tmout; int tmout_index; /* * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE * register is sometimes not set before a while when some * "unusual" data block sizes are used (such as with the SWITCH * command), even despite the fact that the XFER_DONE interrupt * was raised. And if another data transfer starts before * this bit comes to good sense (which eventually happens by * itself) then the new transfer simply fails with a timeout. */ if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) { unsigned long t = jiffies + HZ; unsigned int hw_state, count = 0; do { hw_state = mvsd_read(MVSD_HW_STATE); if (time_after(jiffies, t)) { dev_warn(host->dev, "FIFO_EMPTY bit missing\n"); break; } count++; } while (!(hw_state & (1 << 13))); dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit " "(hw=0x%04x, count=%d, jiffies=%ld)\n", hw_state, count, jiffies - (t - HZ)); } /* If timeout=0 then maximum timeout index is used. */ tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk); tmout += data->timeout_clks; tmout_index = fls(tmout - 1) - 12; if (tmout_index < 0) tmout_index = 0; if (tmout_index > MVSD_HOST_CTRL_TMOUT_MAX) tmout_index = MVSD_HOST_CTRL_TMOUT_MAX; dev_dbg(host->dev, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n", (data->flags & MMC_DATA_READ) ? "read" : "write", (u32)sg_virt(data->sg), data->blocks, data->blksz, tmout, tmout_index); host->ctrl &= ~MVSD_HOST_CTRL_TMOUT_MASK; host->ctrl |= MVSD_HOST_CTRL_TMOUT(tmout_index); mvsd_write(MVSD_HOST_CTRL, host->ctrl); mvsd_write(MVSD_BLK_COUNT, data->blocks); mvsd_write(MVSD_BLK_SIZE, data->blksz); if (nodma || (data->blksz | data->sg->offset) & 3 || ((!(data->flags & MMC_DATA_READ) && data->sg->offset & 0x3f))) { /* * We cannot do DMA on a buffer which offset or size * is not aligned on a 4-byte boundary. * * It also appears the host to card DMA can corrupt * data when the buffer is not aligned on a 64 byte * boundary. */ host->pio_size = data->blocks * data->blksz; host->pio_ptr = sg_virt(data->sg); if (!nodma) dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n", host->pio_ptr, host->pio_size); return 1; } else { dma_addr_t phys_addr; int dma_dir = (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, dma_dir); phys_addr = sg_dma_address(data->sg); mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff); mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16); return 0; } } static void mvsd_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct mvsd_host *host = mmc_priv(mmc); void __iomem *iobase = host->base; struct mmc_command *cmd = mrq->cmd; u32 cmdreg = 0, xfer = 0, intr = 0; unsigned long flags; BUG_ON(host->mrq != NULL); host->mrq = mrq; dev_dbg(host->dev, "cmd %d (hw state 0x%04x)\n", cmd->opcode, mvsd_read(MVSD_HW_STATE)); cmdreg = MVSD_CMD_INDEX(cmd->opcode); if (cmd->flags & MMC_RSP_BUSY) cmdreg |= MVSD_CMD_RSP_48BUSY; else if (cmd->flags & MMC_RSP_136) cmdreg |= MVSD_CMD_RSP_136; else if (cmd->flags & MMC_RSP_PRESENT) cmdreg |= MVSD_CMD_RSP_48; else cmdreg |= MVSD_CMD_RSP_NONE; if (cmd->flags & MMC_RSP_CRC) cmdreg |= MVSD_CMD_CHECK_CMDCRC; if (cmd->flags & MMC_RSP_OPCODE) cmdreg |= MVSD_CMD_INDX_CHECK; if (cmd->flags & MMC_RSP_PRESENT) { cmdreg |= MVSD_UNEXPECTED_RESP; intr |= MVSD_NOR_UNEXP_RSP; } if (mrq->data) { struct mmc_data *data = mrq->data; int pio; cmdreg |= MVSD_CMD_DATA_PRESENT | MVSD_CMD_CHECK_DATACRC16; xfer |= MVSD_XFER_MODE_HW_WR_DATA_EN; if (data->flags & MMC_DATA_READ) xfer |= MVSD_XFER_MODE_TO_HOST; pio = mvsd_setup_data(host, data); if (pio) { xfer |= MVSD_XFER_MODE_PIO; /* PIO section of mvsd_irq has comments on those bits */ if (data->flags & MMC_DATA_WRITE) intr |= MVSD_NOR_TX_AVAIL; else if (host->pio_size > 32) intr |= MVSD_NOR_RX_FIFO_8W; else intr |= MVSD_NOR_RX_READY; } if (data->stop) { struct mmc_command *stop = data->stop; u32 cmd12reg = 0; mvsd_write(MVSD_AUTOCMD12_ARG_LOW, stop->arg & 0xffff); mvsd_write(MVSD_AUTOCMD12_ARG_HI, stop->arg >> 16); if (stop->flags & MMC_RSP_BUSY) cmd12reg |= MVSD_AUTOCMD12_BUSY; if (stop->flags & MMC_RSP_OPCODE) cmd12reg |= MVSD_AUTOCMD12_INDX_CHECK; cmd12reg |= MVSD_AUTOCMD12_INDEX(stop->opcode); mvsd_write(MVSD_AUTOCMD12_CMD, cmd12reg); xfer |= MVSD_XFER_MODE_AUTO_CMD12; intr |= MVSD_NOR_AUTOCMD12_DONE; } else { intr |= MVSD_NOR_XFER_DONE; } } else { intr |= MVSD_NOR_CMD_DONE; } mvsd_write(MVSD_ARG_LOW, cmd->arg & 0xffff); mvsd_write(MVSD_ARG_HI, cmd->arg >> 16); spin_lock_irqsave(&host->lock, flags); host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN; host->xfer_mode |= xfer; mvsd_write(MVSD_XFER_MODE, host->xfer_mode); mvsd_write(MVSD_NOR_INTR_STATUS, ~MVSD_NOR_CARD_INT); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); mvsd_write(MVSD_CMD, cmdreg); host->intr_en &= MVSD_NOR_CARD_INT; host->intr_en |= intr | MVSD_NOR_ERROR; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_write(MVSD_ERR_INTR_EN, 0xffff); mod_timer(&host->timer, jiffies + 5 * HZ); spin_unlock_irqrestore(&host->lock, flags); } static u32 mvsd_finish_cmd(struct mvsd_host *host, struct mmc_command *cmd, u32 err_status) { void __iomem *iobase = host->base; if (cmd->flags & MMC_RSP_136) { unsigned int response[8], i; for (i = 0; i < 8; i++) response[i] = mvsd_read(MVSD_RSP(i)); cmd->resp[0] = ((response[0] & 0x03ff) << 22) | ((response[1] & 0xffff) << 6) | ((response[2] & 0xfc00) >> 10); cmd->resp[1] = ((response[2] & 0x03ff) << 22) | ((response[3] & 0xffff) << 6) | ((response[4] & 0xfc00) >> 10); cmd->resp[2] = ((response[4] & 0x03ff) << 22) | ((response[5] & 0xffff) << 6) | ((response[6] & 0xfc00) >> 10); cmd->resp[3] = ((response[6] & 0x03ff) << 22) | ((response[7] & 0x3fff) << 8); } else if (cmd->flags & MMC_RSP_PRESENT) { unsigned int response[3], i; for (i = 0; i < 3; i++) response[i] = mvsd_read(MVSD_RSP(i)); cmd->resp[0] = ((response[2] & 0x003f) << (8 - 8)) | ((response[1] & 0xffff) << (14 - 8)) | ((response[0] & 0x03ff) << (30 - 8)); cmd->resp[1] = ((response[0] & 0xfc00) >> 10); cmd->resp[2] = 0; cmd->resp[3] = 0; } if (err_status & MVSD_ERR_CMD_TIMEOUT) { cmd->error = -ETIMEDOUT; } else if (err_status & (MVSD_ERR_CMD_CRC | MVSD_ERR_CMD_ENDBIT | MVSD_ERR_CMD_INDEX | MVSD_ERR_CMD_STARTBIT)) { cmd->error = -EILSEQ; } err_status &= ~(MVSD_ERR_CMD_TIMEOUT | MVSD_ERR_CMD_CRC | MVSD_ERR_CMD_ENDBIT | MVSD_ERR_CMD_INDEX | MVSD_ERR_CMD_STARTBIT); return err_status; } static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data, u32 err_status) { void __iomem *iobase = host->base; if (host->pio_ptr) { host->pio_ptr = NULL; host->pio_size = 0; } else { dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } if (err_status & MVSD_ERR_DATA_TIMEOUT) data->error = -ETIMEDOUT; else if (err_status & (MVSD_ERR_DATA_CRC | MVSD_ERR_DATA_ENDBIT)) data->error = -EILSEQ; else if (err_status & MVSD_ERR_XFER_SIZE) data->error = -EBADE; err_status &= ~(MVSD_ERR_DATA_TIMEOUT | MVSD_ERR_DATA_CRC | MVSD_ERR_DATA_ENDBIT | MVSD_ERR_XFER_SIZE); dev_dbg(host->dev, "data done: blocks_left=%d, bytes_left=%d\n", mvsd_read(MVSD_CURR_BLK_LEFT), mvsd_read(MVSD_CURR_BYTE_LEFT)); data->bytes_xfered = (data->blocks - mvsd_read(MVSD_CURR_BLK_LEFT)) * data->blksz; /* We can't be sure about the last block when errors are detected */ if (data->bytes_xfered && data->error) data->bytes_xfered -= data->blksz; /* Handle Auto cmd 12 response */ if (data->stop) { unsigned int response[3], i; for (i = 0; i < 3; i++) response[i] = mvsd_read(MVSD_AUTO_RSP(i)); data->stop->resp[0] = ((response[2] & 0x003f) << (8 - 8)) | ((response[1] & 0xffff) << (14 - 8)) | ((response[0] & 0x03ff) << (30 - 8)); data->stop->resp[1] = ((response[0] & 0xfc00) >> 10); data->stop->resp[2] = 0; data->stop->resp[3] = 0; if (err_status & MVSD_ERR_AUTOCMD12) { u32 err_cmd12 = mvsd_read(MVSD_AUTOCMD12_ERR_STATUS); dev_dbg(host->dev, "c12err 0x%04x\n", err_cmd12); if (err_cmd12 & MVSD_AUTOCMD12_ERR_NOTEXE) data->stop->error = -ENOEXEC; else if (err_cmd12 & MVSD_AUTOCMD12_ERR_TIMEOUT) data->stop->error = -ETIMEDOUT; else if (err_cmd12) data->stop->error = -EILSEQ; err_status &= ~MVSD_ERR_AUTOCMD12; } } return err_status; } static irqreturn_t mvsd_irq(int irq, void *dev) { struct mvsd_host *host = dev; void __iomem *iobase = host->base; u32 intr_status, intr_done_mask; int irq_handled = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n", intr_status, mvsd_read(MVSD_NOR_INTR_EN), mvsd_read(MVSD_HW_STATE)); /* * It looks like, SDIO IP can issue one late, spurious irq * although all irqs should be disabled. To work around this, * bail out early, if we didn't expect any irqs to occur. */ if (!mvsd_read(MVSD_NOR_INTR_EN) && !mvsd_read(MVSD_ERR_INTR_EN)) { dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n", mvsd_read(MVSD_NOR_INTR_STATUS), mvsd_read(MVSD_NOR_INTR_EN), mvsd_read(MVSD_ERR_INTR_STATUS), mvsd_read(MVSD_ERR_INTR_EN)); return IRQ_HANDLED; } spin_lock(&host->lock); /* PIO handling, if needed. Messy business... */ if (host->pio_size && (intr_status & host->intr_en & (MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) { u16 *p = host->pio_ptr; int s = host->pio_size; while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) { readsw(iobase + MVSD_FIFO, p, 16); p += 16; s -= 32; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } /* * Normally we'd use < 32 here, but the RX_FIFO_8W bit * doesn't appear to assert when there is exactly 32 bytes * (8 words) left to fetch in a transfer. */ if (s <= 32) { while (s >= 4 && (intr_status & MVSD_NOR_RX_READY)) { put_unaligned(mvsd_read(MVSD_FIFO), p++); put_unaligned(mvsd_read(MVSD_FIFO), p++); s -= 4; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) { u16 val[2] = {0, 0}; val[0] = mvsd_read(MVSD_FIFO); val[1] = mvsd_read(MVSD_FIFO); memcpy(p, ((void *)&val) + 4 - s, s); s = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s == 0) { host->intr_en &= ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W); mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); } else if (host->intr_en & MVSD_NOR_RX_FIFO_8W) { host->intr_en &= ~MVSD_NOR_RX_FIFO_8W; host->intr_en |= MVSD_NOR_RX_READY; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); } } dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", s, intr_status, mvsd_read(MVSD_HW_STATE)); host->pio_ptr = p; host->pio_size = s; irq_handled = 1; } else if (host->pio_size && (intr_status & host->intr_en & (MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) { u16 *p = host->pio_ptr; int s = host->pio_size; /* * The TX_FIFO_8W bit is unreliable. When set, bursting * 16 halfwords all at once in the FIFO drops data. Actually * TX_AVAIL does go off after only one word is pushed even if * TX_FIFO_8W remains set. */ while (s >= 4 && (intr_status & MVSD_NOR_TX_AVAIL)) { mvsd_write(MVSD_FIFO, get_unaligned(p++)); mvsd_write(MVSD_FIFO, get_unaligned(p++)); s -= 4; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s < 4) { if (s && (intr_status & MVSD_NOR_TX_AVAIL)) { u16 val[2] = {0, 0}; memcpy(((void *)&val) + 4 - s, p, s); mvsd_write(MVSD_FIFO, val[0]); mvsd_write(MVSD_FIFO, val[1]); s = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s == 0) { host->intr_en &= ~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W); mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); } } dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", s, intr_status, mvsd_read(MVSD_HW_STATE)); host->pio_ptr = p; host->pio_size = s; irq_handled = 1; } mvsd_write(MVSD_NOR_INTR_STATUS, intr_status); intr_done_mask = MVSD_NOR_CARD_INT | MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W | MVSD_NOR_TX_FIFO_8W; if (intr_status & host->intr_en & ~intr_done_mask) { struct mmc_request *mrq = host->mrq; struct mmc_command *cmd = mrq->cmd; u32 err_status = 0; del_timer(&host->timer); host->mrq = NULL; host->intr_en &= MVSD_NOR_CARD_INT; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_write(MVSD_ERR_INTR_EN, 0); spin_unlock(&host->lock); if (intr_status & MVSD_NOR_UNEXP_RSP) { cmd->error = -EPROTO; } else if (intr_status & MVSD_NOR_ERROR) { err_status = mvsd_read(MVSD_ERR_INTR_STATUS); dev_dbg(host->dev, "err 0x%04x\n", err_status); } err_status = mvsd_finish_cmd(host, cmd, err_status); if (mrq->data) err_status = mvsd_finish_data(host, mrq->data, err_status); if (err_status) { dev_err(host->dev, "unhandled error status %#04x\n", err_status); cmd->error = -ENOMSG; } mmc_request_done(host->mmc, mrq); irq_handled = 1; } else spin_unlock(&host->lock); if (intr_status & MVSD_NOR_CARD_INT) { mmc_signal_sdio_irq(host->mmc); irq_handled = 1; } if (irq_handled) return IRQ_HANDLED; dev_err(host->dev, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n", intr_status, host->intr_en, host->pio_size); return IRQ_NONE; } static void mvsd_timeout_timer(unsigned long data) { struct mvsd_host *host = (struct mvsd_host *)data; void __iomem *iobase = host->base; struct mmc_request *mrq; unsigned long flags; spin_lock_irqsave(&host->lock, flags); mrq = host->mrq; if (mrq) { dev_err(host->dev, "Timeout waiting for hardware interrupt.\n"); dev_err(host->dev, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n", mvsd_read(MVSD_HW_STATE), mvsd_read(MVSD_NOR_INTR_STATUS), mvsd_read(MVSD_NOR_INTR_EN)); host->mrq = NULL; mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN; mvsd_write(MVSD_XFER_MODE, host->xfer_mode); host->intr_en &= MVSD_NOR_CARD_INT; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_write(MVSD_ERR_INTR_EN, 0); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); mrq->cmd->error = -ETIMEDOUT; mvsd_finish_cmd(host, mrq->cmd, 0); if (mrq->data) { mrq->data->error = -ETIMEDOUT; mvsd_finish_data(host, mrq->data, 0); } } spin_unlock_irqrestore(&host->lock, flags); if (mrq) mmc_request_done(host->mmc, mrq); } static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct mvsd_host *host = mmc_priv(mmc); void __iomem *iobase = host->base; unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (enable) { host->xfer_mode |= MVSD_XFER_MODE_INT_CHK_EN; host->intr_en |= MVSD_NOR_CARD_INT; } else { host->xfer_mode &= ~MVSD_XFER_MODE_INT_CHK_EN; host->intr_en &= ~MVSD_NOR_CARD_INT; } mvsd_write(MVSD_XFER_MODE, host->xfer_mode); mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); spin_unlock_irqrestore(&host->lock, flags); } static void mvsd_power_up(struct mvsd_host *host) { void __iomem *iobase = host->base; dev_dbg(host->dev, "power up\n"); mvsd_write(MVSD_NOR_INTR_EN, 0); mvsd_write(MVSD_ERR_INTR_EN, 0); mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); mvsd_write(MVSD_XFER_MODE, 0); mvsd_write(MVSD_NOR_STATUS_EN, 0xffff); mvsd_write(MVSD_ERR_STATUS_EN, 0xffff); mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); } static void mvsd_power_down(struct mvsd_host *host) { void __iomem *iobase = host->base; dev_dbg(host->dev, "power down\n"); mvsd_write(MVSD_NOR_INTR_EN, 0); mvsd_write(MVSD_ERR_INTR_EN, 0); mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK); mvsd_write(MVSD_NOR_STATUS_EN, 0); mvsd_write(MVSD_ERR_STATUS_EN, 0); mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); } static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mvsd_host *host = mmc_priv(mmc); void __iomem *iobase = host->base; u32 ctrl_reg = 0; if (ios->power_mode == MMC_POWER_UP) mvsd_power_up(host); if (ios->clock == 0) { mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK); mvsd_write(MVSD_CLK_DIV, MVSD_BASE_DIV_MAX); host->clock = 0; dev_dbg(host->dev, "clock off\n"); } else if (ios->clock != host->clock) { u32 m = DIV_ROUND_UP(host->base_clock, ios->clock) - 1; if (m > MVSD_BASE_DIV_MAX) m = MVSD_BASE_DIV_MAX; mvsd_write(MVSD_CLK_DIV, m); host->clock = ios->clock; host->ns_per_clk = 1000000000 / (host->base_clock / (m+1)); dev_dbg(host->dev, "clock=%d (%d), div=0x%04x\n", ios->clock, host->base_clock / (m+1), m); } /* default transfer mode */ ctrl_reg |= MVSD_HOST_CTRL_BIG_ENDIAN; ctrl_reg &= ~MVSD_HOST_CTRL_LSB_FIRST; /* default to maximum timeout */ ctrl_reg |= MVSD_HOST_CTRL_TMOUT_MASK; ctrl_reg |= MVSD_HOST_CTRL_TMOUT_EN; if (ios->bus_mode == MMC_BUSMODE_PUSHPULL) ctrl_reg |= MVSD_HOST_CTRL_PUSH_PULL_EN; if (ios->bus_width == MMC_BUS_WIDTH_4) ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS; /* * The HI_SPEED_EN bit is causing trouble with many (but not all) * high speed SD, SDHC and SDIO cards. Not enabling that bit * makes all cards work. So let's just ignore that bit for now * and revisit this issue if problems for not enabling this bit * are ever reported. */ #if 0 if (ios->timing == MMC_TIMING_MMC_HS || ios->timing == MMC_TIMING_SD_HS) ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN; #endif host->ctrl = ctrl_reg; mvsd_write(MVSD_HOST_CTRL, ctrl_reg); dev_dbg(host->dev, "ctrl 0x%04x: %s %s %s\n", ctrl_reg, (ctrl_reg & MVSD_HOST_CTRL_PUSH_PULL_EN) ? "push-pull" : "open-drain", (ctrl_reg & MVSD_HOST_CTRL_DATA_WIDTH_4_BITS) ? "4bit-width" : "1bit-width", (ctrl_reg & MVSD_HOST_CTRL_HI_SPEED_EN) ? "high-speed" : ""); if (ios->power_mode == MMC_POWER_OFF) mvsd_power_down(host); } static const struct mmc_host_ops mvsd_ops = { .request = mvsd_request, .get_ro = mmc_gpio_get_ro, .set_ios = mvsd_set_ios, .enable_sdio_irq = mvsd_enable_sdio_irq, }; static void mv_conf_mbus_windows(struct mvsd_host *host, const struct mbus_dram_target_info *dram) { void __iomem *iobase = host->base; int i; for (i = 0; i < 4; i++) { writel(0, iobase + MVSD_WINDOW_CTRL(i)); writel(0, iobase + MVSD_WINDOW_BASE(i)); } for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, iobase + MVSD_WINDOW_CTRL(i)); writel(cs->base, iobase + MVSD_WINDOW_BASE(i)); } } static int mvsd_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mmc_host *mmc = NULL; struct mvsd_host *host = NULL; const struct mbus_dram_target_info *dram; struct resource *r; int ret, irq; if (!np) { dev_err(&pdev->dev, "no DT node\n"); return -ENODEV; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!r || irq < 0) return -ENXIO; mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out; } host = mmc_priv(mmc); host->mmc = mmc; host->dev = &pdev->dev; /* * Some non-DT platforms do not pass a clock, and the clock * frequency is passed through platform_data. On DT platforms, * a clock must always be passed, even if there is no gatable * clock associated to the SDIO interface (it can simply be a * fixed rate clock). */ host->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) { dev_err(&pdev->dev, "no clock associated\n"); ret = -EINVAL; goto out; } clk_prepare_enable(host->clk); mmc->ops = &mvsd_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->f_min = DIV_ROUND_UP(host->base_clock, MVSD_BASE_DIV_MAX); mmc->f_max = MVSD_CLOCKRATE_MAX; mmc->max_blk_size = 2048; mmc->max_blk_count = 65535; mmc->max_segs = 1; mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; host->base_clock = clk_get_rate(host->clk) / 2; ret = mmc_of_parse(mmc); if (ret < 0) goto out; if (maxfreq) mmc->f_max = maxfreq; spin_lock_init(&host->lock); host->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto out; } /* (Re-)program MBUS remapping windows if we are asked to. */ dram = mv_mbus_dram_info(); if (dram) mv_conf_mbus_windows(host, dram); mvsd_power_down(host); ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host); if (ret) { dev_err(&pdev->dev, "cannot assign irq %d\n", irq); goto out; } setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host); platform_set_drvdata(pdev, mmc); ret = mmc_add_host(mmc); if (ret) goto out; if (!(mmc->caps & MMC_CAP_NEEDS_POLL)) dev_dbg(&pdev->dev, "using GPIO for card detection\n"); else dev_dbg(&pdev->dev, "lacking card detect (fall back to polling)\n"); return 0; out: if (mmc) { if (!IS_ERR(host->clk)) clk_disable_unprepare(host->clk); mmc_free_host(mmc); } return ret; } static int mvsd_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct mvsd_host *host = mmc_priv(mmc); mmc_remove_host(mmc); del_timer_sync(&host->timer); mvsd_power_down(host); if (!IS_ERR(host->clk)) clk_disable_unprepare(host->clk); mmc_free_host(mmc); return 0; } static const struct of_device_id mvsdio_dt_ids[] = { { .compatible = "marvell,orion-sdio" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mvsdio_dt_ids); static struct platform_driver mvsd_driver = { .probe = mvsd_probe, .remove = mvsd_remove, .driver = { .name = DRIVER_NAME, .of_match_table = mvsdio_dt_ids, }, }; module_platform_driver(mvsd_driver); /* maximum card clock frequency (default 50MHz) */ module_param(maxfreq, int, 0); /* force PIO transfers all the time */ module_param(nodma, int, 0); MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre"); MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:mvsdio");
null
null
null
null
96,568
10,920
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
175,915
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* spinlock.h: 32-bit Sparc spinlock support. * * Copyright (C) 1997 David S. Miller ([email protected]) */ #ifndef __SPARC_SPINLOCK_H #define __SPARC_SPINLOCK_H #ifndef __ASSEMBLY__ #include <asm/psr.h> #include <asm/barrier.h> #include <asm/processor.h> /* for cpu_relax */ #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { smp_cond_load_acquire(&lock->lock, !VAL); } static inline void arch_spin_lock(arch_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" "ldstub [%0], %%g2\n\t" "orcc %%g2, 0x0, %%g0\n\t" "bne,a 2f\n\t" " ldub [%0], %%g2\n\t" ".subsection 2\n" "2:\n\t" "orcc %%g2, 0x0, %%g0\n\t" "bne,a 2b\n\t" " ldub [%0], %%g2\n\t" "b,a 1b\n\t" ".previous\n" : /* no outputs */ : "r" (lock) : "g2", "memory", "cc"); } static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" : "=r" (result) : "r" (lock) : "memory"); return (result == 0); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } /* Read-write spinlocks, allowing multiple readers * but only one writer. * * NOTE! it is quite common to have readers in interrupts * but no interrupt writers. For those circumstances we * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. * * XXX This might create some problems with my dual spinlock * XXX scheme, deadlocks etc. -DaveM * * Sort of like atomic_t's on Sparc, but even more clever. * * ------------------------------------ * | 24-bit counter | wlock | arch_rwlock_t * ------------------------------------ * 31 8 7 0 * * wlock signifies the one writer is in or somebody is updating * counter. For a writer, if he successfully acquires the wlock, * but counter is non-zero, he has to release the lock and wait, * till both counter and wlock are zero. * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ static inline void __arch_read_lock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_read_enter\n\t" " ldstub [%%g1 + 3], %%g2\n" : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); } #define arch_read_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ __arch_read_lock(lock); \ local_irq_restore(flags); \ } while(0) static inline void __arch_read_unlock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_read_exit\n\t" " ldstub [%%g1 + 3], %%g2\n" : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); } #define arch_read_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ __arch_read_unlock(lock); \ local_irq_restore(flags); \ } while(0) static inline void arch_write_lock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_write_enter\n\t" " ldstub [%%g1 + 3], %%g2\n" : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); *(volatile __u32 *)&lp->lock = ~0U; } static inline void arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " st %%g0, [%0]" : /* no outputs */ : "r" (lock) : "memory"); } static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val; __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&rw->lock) : "memory"); if (val == 0) { val = rw->lock & ~0xff; if (val) ((volatile u8*)&rw->lock)[3] = 0; else *(volatile u32*)&rw->lock = ~0U; } return (val == 0); } static inline int __arch_read_trylock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); register int res asm("o0"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_read_try\n\t" " ldstub [%%g1 + 3], %%g2\n" : "=r" (res) : "r" (lp) : "g2", "g4", "memory", "cc"); return res; } #define arch_read_trylock(lock) \ ({ unsigned long flags; \ int res; \ local_irq_save(flags); \ res = __arch_read_trylock(lock); \ local_irq_restore(flags); \ res; \ }) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() #define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) #define arch_write_can_lock(rw) (!(rw)->lock) #endif /* !(__ASSEMBLY__) */ #endif /* __SPARC_SPINLOCK_H */
null
null
null
null
84,262
20,178
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
185,173
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX4_FW_H #define MLX4_FW_H #include "mlx4.h" #include "icm.h" struct mlx4_mod_stat_cfg { u8 log_pg_sz; u8 log_pg_sz_m; }; struct mlx4_port_cap { u8 link_state; u8 supported_port_types; u8 suggested_type; u8 default_sense; u8 log_max_macs; u8 log_max_vlans; int ib_mtu; int max_port_width; int max_vl; int max_tc_eth; int max_gids; int max_pkeys; u64 def_mac; u16 eth_mtu; int trans_type; int vendor_oui; u16 wavelength; u64 trans_code; u8 dmfs_optimized_state; }; struct mlx4_dev_cap { int max_srq_sz; int max_qp_sz; int reserved_qps; int max_qps; int reserved_srqs; int max_srqs; int max_cq_sz; int reserved_cqs; int max_cqs; int max_mpts; int reserved_eqs; int max_eqs; int num_sys_eqs; int reserved_mtts; int reserved_mrws; int max_requester_per_qp; int max_responder_per_qp; int max_rdma_global; int local_ca_ack_delay; int num_ports; u32 max_msg_sz; u16 stat_rate_support; int fs_log_max_ucast_qp_range_size; int fs_max_num_qp_per_entry; u64 flags; u64 flags2; int reserved_uars; int uar_size; int min_page_sz; int bf_reg_size; int bf_regs_per_page; int max_sq_sg; int max_sq_desc_sz; int max_rq_sg; int max_rq_desc_sz; int max_qp_per_mcg; int reserved_mgms; int max_mcgs; int reserved_pds; int max_pds; int reserved_xrcds; int max_xrcds; int qpc_entry_sz; int rdmarc_entry_sz; int altc_entry_sz; int aux_entry_sz; int srq_entry_sz; int cqc_entry_sz; int eqc_entry_sz; int dmpt_entry_sz; int cmpt_entry_sz; int mtt_entry_sz; int resize_srq; u32 bmme_flags; u32 reserved_lkey; u64 max_icm_sz; int max_gso_sz; int max_rss_tbl_sz; u32 max_counters; u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; struct mlx4_rate_limit_caps rl_caps; struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; }; struct mlx4_func_cap { u8 num_ports; u8 flags; u32 pf_context_behaviour; int qp_quota; int cq_quota; int srq_quota; int mpt_quota; int mtt_quota; int max_eq; int reserved_eq; int mcg_quota; u32 qp0_qkey; u32 qp0_tunnel_qpn; u32 qp0_proxy_qpn; u32 qp1_tunnel_qpn; u32 qp1_proxy_qpn; u32 reserved_lkey; u8 physical_port; u8 flags0; u8 flags1; u64 phys_port_id; u32 extra_flags; }; struct mlx4_func { int bus; int device; int function; int physical_function; int rsvd_eqs; int max_eq; int rsvd_uars; }; struct mlx4_adapter { char board_id[MLX4_BOARD_ID_LEN]; u8 inta_pin; }; struct mlx4_init_hca_param { u64 qpc_base; u64 rdmarc_base; u64 auxc_base; u64 altc_base; u64 srqc_base; u64 cqc_base; u64 eqc_base; u64 mc_base; u64 dmpt_base; u64 cmpt_base; u64 mtt_base; u64 global_caps; u16 log_mc_entry_sz; u16 log_mc_hash_sz; u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */ u8 log_num_qps; u8 log_num_srqs; u8 log_num_cqs; u8 log_num_eqs; u16 num_sys_eqs; u8 log_rd_per_qp; u8 log_mc_table_sz; u8 log_mpt_sz; u8 log_uar_sz; u8 mw_enabled; /* Enable memory windows */ u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 steering_mode; /* for QUERY_HCA */ u8 dmfs_high_steer_mode; /* for QUERY_HCA */ u64 dev_cap_enabled; u16 cqe_size; /* For use only when CQE stride feature enabled */ u16 eqe_size; /* For use only when EQE stride feature enabled */ u8 rss_ip_frags; u8 phv_check_en; /* for QUERY_HCA */ }; struct mlx4_init_ib_param { int port_width; int vl_cap; int mtu_cap; u16 gid_cap; u16 pkey_cap; int set_guid0; u64 guid0; int set_node_guid; u64 node_guid; int set_si_guid; u64 si_guid; }; struct mlx4_set_ib_param { int set_si_guid; int reset_qkey_viol; u64 si_guid; u32 cap_mask; }; void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap); int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, struct mlx4_func_cap *func_cap); int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave); int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); int mlx4_UNMAP_FA(struct mlx4_dev *dev); int mlx4_RUN_FW(struct mlx4_dev *dev); int mlx4_QUERY_FW(struct mlx4_dev *dev); int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter); int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic); int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt); int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages); int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); int mlx4_NOP(struct mlx4_dev *dev); int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg); void mlx4_opreq_action(struct work_struct *work); #endif /* MLX4_FW_H */
null
null
null
null
93,520
33,450
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
198,445
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * caam descriptor construction helper functions * * Copyright 2008-2012 Freescale Semiconductor, Inc. */ #include "desc.h" #include "regs.h" #define IMMEDIATE (1 << 23) #define CAAM_CMD_SZ sizeof(u32) #define CAAM_PTR_SZ sizeof(dma_addr_t) #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) #ifdef DEBUG #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ &__func__[sizeof("append")]); } while (0) #else #define PRINT_POS #endif #define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \ LDST_SRCDST_WORD_DECOCTRL | \ (LDOFF_CHG_SHARE_OK_NO_PROP << \ LDST_OFFSET_SHIFT)) #define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ LDST_SRCDST_WORD_DECOCTRL | \ (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) #define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ LDST_SRCDST_WORD_DECOCTRL | \ (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) extern bool caam_little_end; static inline int desc_len(u32 * const desc) { return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK; } static inline int desc_bytes(void * const desc) { return desc_len(desc) * CAAM_CMD_SZ; } static inline u32 *desc_end(u32 * const desc) { return desc + desc_len(desc); } static inline void *sh_desc_pdb(u32 * const desc) { return desc + 1; } static inline void init_desc(u32 * const desc, u32 options) { *desc = cpu_to_caam32((options | HDR_ONE) + 1); } static inline void init_sh_desc(u32 * const desc, u32 options) { PRINT_POS; init_desc(desc, CMD_SHARED_DESC_HDR | options); } static inline void init_sh_desc_pdb(u32 * const desc, u32 options, size_t pdb_bytes) { u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) | options); } static inline void init_job_desc(u32 * const desc, u32 options) { init_desc(desc, CMD_DESC_HDR | options); } static inline void init_job_desc_pdb(u32 * const desc, u32 options, size_t pdb_bytes) { u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options); } static inline void append_ptr(u32 * const desc, dma_addr_t ptr) { dma_addr_t *offset = (dma_addr_t *)desc_end(desc); *offset = cpu_to_caam_dma(ptr); (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + CAAM_PTR_SZ / CAAM_CMD_SZ); } static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr, int len, u32 options) { PRINT_POS; init_job_desc(desc, HDR_SHARED | options | (len << HDR_START_IDX_SHIFT)); append_ptr(desc, ptr); } static inline void append_data(u32 * const desc, void *data, int len) { u32 *offset = desc_end(desc); if (len) /* avoid sparse warning: memcpy with byte count of 0 */ memcpy(offset, data, len); (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ); } static inline void append_cmd(u32 * const desc, u32 command) { u32 *cmd = desc_end(desc); *cmd = cpu_to_caam32(command); (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 1); } #define append_u32 append_cmd static inline void append_u64(u32 * const desc, u64 data) { u32 *offset = desc_end(desc); /* Only 32-bit alignment is guaranteed in descriptor buffer */ if (caam_little_end) { *offset = cpu_to_caam32(lower_32_bits(data)); *(++offset) = cpu_to_caam32(upper_32_bits(data)); } else { *offset = cpu_to_caam32(upper_32_bits(data)); *(++offset) = cpu_to_caam32(lower_32_bits(data)); } (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 2); } /* Write command without affecting header, and return pointer to next word */ static inline u32 *write_cmd(u32 * const desc, u32 command) { *desc = cpu_to_caam32(command); return desc + 1; } static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len, u32 command) { append_cmd(desc, command | len); append_ptr(desc, ptr); } /* Write length after pointer, rather than inside command */ static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr, unsigned int len, u32 command) { append_cmd(desc, command); if (!(command & (SQIN_RTO | SQIN_PRE))) append_ptr(desc, ptr); append_cmd(desc, len); } static inline void append_cmd_data(u32 * const desc, void *data, int len, u32 command) { append_cmd(desc, command | IMMEDIATE | len); append_data(desc, data, len); } #define APPEND_CMD_RET(cmd, op) \ static inline u32 *append_##cmd(u32 * const desc, u32 options) \ { \ u32 *cmd = desc_end(desc); \ PRINT_POS; \ append_cmd(desc, CMD_##op | options); \ return cmd; \ } APPEND_CMD_RET(jump, JUMP) APPEND_CMD_RET(move, MOVE) static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd) { *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) | (desc_len(desc) - (jump_cmd - desc))); } static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd) { u32 val = caam32_to_cpu(*move_cmd); val &= ~MOVE_OFFSET_MASK; val |= (desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK; *move_cmd = cpu_to_caam32(val); } #define APPEND_CMD(cmd, op) \ static inline void append_##cmd(u32 * const desc, u32 options) \ { \ PRINT_POS; \ append_cmd(desc, CMD_##op | options); \ } APPEND_CMD(operation, OPERATION) #define APPEND_CMD_LEN(cmd, op) \ static inline void append_##cmd(u32 * const desc, unsigned int len, \ u32 options) \ { \ PRINT_POS; \ append_cmd(desc, CMD_##op | len | options); \ } APPEND_CMD_LEN(seq_load, SEQ_LOAD) APPEND_CMD_LEN(seq_store, SEQ_STORE) APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD) APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE) #define APPEND_CMD_PTR(cmd, op) \ static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \ unsigned int len, u32 options) \ { \ PRINT_POS; \ append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ } APPEND_CMD_PTR(key, KEY) APPEND_CMD_PTR(load, LOAD) APPEND_CMD_PTR(fifo_load, FIFO_LOAD) APPEND_CMD_PTR(fifo_store, FIFO_STORE) static inline void append_store(u32 * const desc, dma_addr_t ptr, unsigned int len, u32 options) { u32 cmd_src; cmd_src = options & LDST_SRCDST_MASK; append_cmd(desc, CMD_STORE | options | len); /* The following options do not require pointer */ if (!(cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED || cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB || cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB_WE || cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED_WE)) append_ptr(desc, ptr); } #define APPEND_SEQ_PTR_INTLEN(cmd, op) \ static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \ dma_addr_t ptr, \ unsigned int len, \ u32 options) \ { \ PRINT_POS; \ if (options & (SQIN_RTO | SQIN_PRE)) \ append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \ else \ append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \ } APPEND_SEQ_PTR_INTLEN(in, IN) APPEND_SEQ_PTR_INTLEN(out, OUT) #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ unsigned int len, u32 options) \ { \ PRINT_POS; \ append_cmd_data(desc, data, len, CMD_##op | options); \ } APPEND_CMD_PTR_TO_IMM(load, LOAD); APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); #define APPEND_CMD_PTR_EXTLEN(cmd, op) \ static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \ unsigned int len, u32 options) \ { \ PRINT_POS; \ append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \ } APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR) APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR) /* * Determine whether to store length internally or externally depending on * the size of its type */ #define APPEND_CMD_PTR_LEN(cmd, op, type) \ static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \ type len, u32 options) \ { \ PRINT_POS; \ if (sizeof(type) > sizeof(u16)) \ append_##cmd##_extlen(desc, ptr, len, options); \ else \ append_##cmd##_intlen(desc, ptr, len, options); \ } APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32) APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32) /* * 2nd variant for commands whose specified immediate length differs * from length of immediate data provided, e.g., split keys */ #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ unsigned int data_len, \ unsigned int len, u32 options) \ { \ PRINT_POS; \ append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \ append_data(desc, data, data_len); \ } APPEND_CMD_PTR_TO_IMM2(key, KEY); #define APPEND_CMD_RAW_IMM(cmd, op, type) \ static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \ u32 options) \ { \ PRINT_POS; \ append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \ append_cmd(desc, immediate); \ } APPEND_CMD_RAW_IMM(load, LOAD, u32); /* * ee - endianness * size - size of immediate type in bytes */ #define APPEND_CMD_RAW_IMM2(cmd, op, ee, size) \ static inline void append_##cmd##_imm_##ee##size(u32 *desc, \ u##size immediate, \ u32 options) \ { \ __##ee##size data = cpu_to_##ee##size(immediate); \ PRINT_POS; \ append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(data)); \ append_data(desc, &data, sizeof(data)); \ } APPEND_CMD_RAW_IMM2(load, LOAD, be, 32); /* * Append math command. Only the last part of destination and source need to * be specified */ #define APPEND_MATH(op, desc, dest, src_0, src_1, len) \ append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32)len); #define append_math_add(desc, dest, src0, src1, len) \ APPEND_MATH(ADD, desc, dest, src0, src1, len) #define append_math_sub(desc, dest, src0, src1, len) \ APPEND_MATH(SUB, desc, dest, src0, src1, len) #define append_math_add_c(desc, dest, src0, src1, len) \ APPEND_MATH(ADDC, desc, dest, src0, src1, len) #define append_math_sub_b(desc, dest, src0, src1, len) \ APPEND_MATH(SUBB, desc, dest, src0, src1, len) #define append_math_and(desc, dest, src0, src1, len) \ APPEND_MATH(AND, desc, dest, src0, src1, len) #define append_math_or(desc, dest, src0, src1, len) \ APPEND_MATH(OR, desc, dest, src0, src1, len) #define append_math_xor(desc, dest, src0, src1, len) \ APPEND_MATH(XOR, desc, dest, src0, src1, len) #define append_math_lshift(desc, dest, src0, src1, len) \ APPEND_MATH(LSHIFT, desc, dest, src0, src1, len) #define append_math_rshift(desc, dest, src0, src1, len) \ APPEND_MATH(RSHIFT, desc, dest, src0, src1, len) #define append_math_ldshift(desc, dest, src0, src1, len) \ APPEND_MATH(SHLD, desc, dest, src0, src1, len) /* Exactly one source is IMM. Data is passed in as u32 value */ #define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \ do { \ APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \ append_cmd(desc, data); \ } while (0) #define append_math_add_imm_u32(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data) #define append_math_sub_imm_u32(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data) #define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data) #define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data) #define append_math_and_imm_u32(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data) #define append_math_or_imm_u32(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data) #define append_math_xor_imm_u32(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data) #define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data) #define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data) /* Exactly one source is IMM. Data is passed in as u64 value */ #define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \ do { \ u32 upper = (data >> 16) >> 16; \ APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \ (upper ? 0 : MATH_IFB)); \ if (upper) \ append_u64(desc, data); \ else \ append_u32(desc, lower_32_bits(data)); \ } while (0) #define append_math_add_imm_u64(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data) #define append_math_sub_imm_u64(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data) #define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data) #define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data) #define append_math_and_imm_u64(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data) #define append_math_or_imm_u64(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data) #define append_math_xor_imm_u64(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data) #define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data) #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data) /** * struct alginfo - Container for algorithm details * @algtype: algorithm selector; for valid values, see documentation of the * functions where it is used. * @keylen: length of the provided algorithm key, in bytes * @keylen_pad: padded length of the provided algorithm key, in bytes * @key: address where algorithm key resides; virtual address if key_inline * is true, dma (bus) address if key_inline is false. * @key_inline: true - key can be inlined in the descriptor; false - key is * referenced by the descriptor */ struct alginfo { u32 algtype; unsigned int keylen; unsigned int keylen_pad; union { dma_addr_t key_dma; void *key_virt; }; bool key_inline; }; /** * desc_inline_query() - Provide indications on which data items can be inlined * and which shall be referenced in a shared descriptor. * @sd_base_len: Shared descriptor base length - bytes consumed by the commands, * excluding the data items to be inlined (or corresponding * pointer if an item is not inlined). Each cnstr_* function that * generates descriptors should have a define mentioning * corresponding length. * @jd_len: Maximum length of the job descriptor(s) that will be used * together with the shared descriptor. * @data_len: Array of lengths of the data items trying to be inlined * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0 * otherwise. * @count: Number of data items (size of @data_len array); must be <= 32 * * Return: 0 if data can be inlined / referenced, negative value if not. If 0, * check @inl_mask for details. */ static inline int desc_inline_query(unsigned int sd_base_len, unsigned int jd_len, unsigned int *data_len, u32 *inl_mask, unsigned int count) { int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len); unsigned int i; *inl_mask = 0; for (i = 0; (i < count) && (rem_bytes > 0); i++) { if (rem_bytes - (int)(data_len[i] + (count - i - 1) * CAAM_PTR_SZ) >= 0) { rem_bytes -= data_len[i]; *inl_mask |= (1 << i); } else { rem_bytes -= CAAM_PTR_SZ; } } return (rem_bytes >= 0) ? 0 : -1; }
null
null
null
null
106,792
24,979
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
189,974
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "nv50.h" #include "outpdp.h" #include <subdev/timer.h> static inline u32 gm200_sor_soff(struct nvkm_output_dp *outp) { return (ffs(outp->base.info.or) - 1) * 0x800; } static inline u32 gm200_sor_loff(struct nvkm_output_dp *outp) { return gm200_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80; } void gm200_sor_magic(struct nvkm_output *outp) { struct nvkm_device *device = outp->disp->engine.subdev.device; const u32 soff = outp->or * 0x100; const u32 data = outp->or + 1; if (outp->info.sorconf.link & 1) nvkm_mask(device, 0x612308 + soff, 0x0000001f, 0x00000000 | data); if (outp->info.sorconf.link & 2) nvkm_mask(device, 0x612388 + soff, 0x0000001f, 0x00000010 | data); } static inline u32 gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane) { return lane * 0x08; } static int gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) { struct nvkm_device *device = outp->base.disp->engine.subdev.device; const u32 soff = gm200_sor_soff(outp); const u32 loff = gm200_sor_loff(outp); u32 mask = 0, i; for (i = 0; i < nr; i++) mask |= 1 << (gm200_sor_dp_lane_map(device, i) >> 3); nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask); nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000); nvkm_msec(device, 2000, if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000)) break; ); return 0; } static int gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc) { struct nvkm_device *device = outp->base.disp->engine.subdev.device; struct nvkm_bios *bios = device->bios; const u32 shift = gm200_sor_dp_lane_map(device, ln); const u32 loff = gm200_sor_loff(outp); u32 addr, data[4]; u8 ver, hdr, cnt, len; struct nvbios_dpout info; struct nvbios_dpcfg ocfg; addr = nvbios_dpout_match(bios, outp->base.info.hasht, outp->base.info.hashm, &ver, &hdr, &cnt, &len, &info); if (!addr) return -ENODEV; addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe, &ver, &hdr, &cnt, &len, &ocfg); if (!addr) return -EINVAL; ocfg.tx_pu &= 0x0f; data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift); data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift); data[2] = nvkm_rd32(device, 0x61c130 + loff); if ((data[2] & 0x00000f00) < (ocfg.tx_pu << 8) || ln == 0) data[2] = (data[2] & ~0x00000f00) | (ocfg.tx_pu << 8); nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift)); nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift)); nvkm_wr32(device, 0x61c130 + loff, data[2]); data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift); nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift)); return 0; } static const struct nvkm_output_dp_func gm200_sor_dp_func = { .pattern = gm107_sor_dp_pattern, .lnk_pwr = gm200_sor_dp_lnk_pwr, .lnk_ctl = gf119_sor_dp_lnk_ctl, .drv_ctl = gm200_sor_dp_drv_ctl, .vcpi = gf119_sor_dp_vcpi, }; int gm200_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE, struct nvkm_output **poutp) { return nvkm_output_dp_new_(&gm200_sor_dp_func, disp, index, dcbE, poutp); }
null
null
null
null
98,321
28,948
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
193,943
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * lm92 - Hardware monitoring driver * Copyright (C) 2005-2008 Jean Delvare <[email protected]> * * Based on the lm90 driver, with some ideas taken from the lm_sensors * lm92 driver as well. * * The LM92 is a sensor chip made by National Semiconductor. It reports * its own temperature with a 0.0625 deg resolution and a 0.33 deg * accuracy. Complete datasheet can be obtained from National's website * at: * http://www.national.com/pf/LM/LM92.html * * This driver also supports the MAX6635 sensor chip made by Maxim. * This chip is compatible with the LM92, but has a lesser accuracy * (1.0 deg). Complete datasheet can be obtained from Maxim's website * at: * http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3074 * * Since the LM92 was the first chipset supported by this driver, most * comments will refer to this chipset, but are actually general and * concern all supported chipsets, unless mentioned otherwise. * * Support could easily be added for the National Semiconductor LM76 * and Maxim MAX6633 and MAX6634 chips, which are mostly compatible * with the LM92. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/jiffies.h> /* * The LM92 and MAX6635 have 2 two-state pins for address selection, * resulting in 4 possible addresses. */ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; /* The LM92 registers */ #define LM92_REG_CONFIG 0x01 /* 8-bit, RW */ #define LM92_REG_TEMP 0x00 /* 16-bit, RO */ #define LM92_REG_TEMP_HYST 0x02 /* 16-bit, RW */ #define LM92_REG_TEMP_CRIT 0x03 /* 16-bit, RW */ #define LM92_REG_TEMP_LOW 0x04 /* 16-bit, RW */ #define LM92_REG_TEMP_HIGH 0x05 /* 16-bit, RW */ #define LM92_REG_MAN_ID 0x07 /* 16-bit, RO, LM92 only */ /* * The LM92 uses signed 13-bit values with LSB = 0.0625 degree Celsius, * left-justified in 16-bit registers. No rounding is done, with such * a resolution it's just not worth it. Note that the MAX6635 doesn't * make use of the 4 lower bits for limits (i.e. effective resolution * for limits is 1 degree Celsius). */ static inline int TEMP_FROM_REG(s16 reg) { return reg / 8 * 625 / 10; } static inline s16 TEMP_TO_REG(long val) { val = clamp_val(val, -60000, 160000); return val * 10 / 625 * 8; } /* Alarm flags are stored in the 3 LSB of the temperature register */ static inline u8 ALARMS_FROM_REG(s16 reg) { return reg & 0x0007; } enum temp_index { t_input, t_crit, t_min, t_max, t_hyst, t_num_regs }; static const u8 regs[t_num_regs] = { [t_input] = LM92_REG_TEMP, [t_crit] = LM92_REG_TEMP_CRIT, [t_min] = LM92_REG_TEMP_LOW, [t_max] = LM92_REG_TEMP_HIGH, [t_hyst] = LM92_REG_TEMP_HYST, }; /* Client data (each client gets its own) */ struct lm92_data { struct i2c_client *client; struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ /* registers values */ s16 temp[t_num_regs]; /* index with enum temp_index */ }; /* * Sysfs attributes and callback functions */ static struct lm92_data *lm92_update_device(struct device *dev) { struct lm92_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; int i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { dev_dbg(&client->dev, "Updating lm92 data\n"); for (i = 0; i < t_num_regs; i++) { data->temp[i] = i2c_smbus_read_word_swapped(client, regs[i]); } data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm92_data *data = lm92_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index])); } static ssize_t set_temp(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm92_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; int nr = attr->index; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp[nr] = TEMP_TO_REG(val); i2c_smbus_write_word_swapped(client, regs[nr], data->temp[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm92_data *data = lm92_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]) - TEMP_FROM_REG(data->temp[t_hyst])); } static ssize_t temp1_min_hyst_show(struct device *dev, struct device_attribute *attr, char *buf) { struct lm92_data *data = lm92_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[t_min]) + TEMP_FROM_REG(data->temp[t_hyst])); } static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm92_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; val = clamp_val(val, -120000, 220000); mutex_lock(&data->update_lock); data->temp[t_hyst] = TEMP_TO_REG(TEMP_FROM_REG(data->temp[attr->index]) - val); i2c_smbus_write_word_swapped(client, LM92_REG_TEMP_HYST, data->temp[t_hyst]); mutex_unlock(&data->update_lock); return count; } static ssize_t alarms_show(struct device *dev, struct device_attribute *attr, char *buf) { struct lm92_data *data = lm92_update_device(dev); return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->temp[t_input])); } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct lm92_data *data = lm92_update_device(dev); return sprintf(buf, "%d\n", (data->temp[t_input] >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, t_input); static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp, set_temp, t_crit); static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_hyst, set_temp_hyst, t_crit); static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp, t_min); static DEVICE_ATTR_RO(temp1_min_hyst); static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp, t_max); static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp_hyst, NULL, t_max); static DEVICE_ATTR_RO(alarms); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1); /* * Detection and registration */ static void lm92_init_client(struct i2c_client *client) { u8 config; /* Start the conversions if needed */ config = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG); if (config & 0x01) i2c_smbus_write_byte_data(client, LM92_REG_CONFIG, config & 0xFE); } /* * The MAX6635 has no identification register, so we have to use tricks * to identify it reliably. This is somewhat slow. * Note that we do NOT rely on the 2 MSB of the configuration register * always reading 0, as suggested by the datasheet, because it was once * reported not to be true. */ static int max6635_check(struct i2c_client *client) { u16 temp_low, temp_high, temp_hyst, temp_crit; u8 conf; int i; /* * No manufacturer ID register, so a read from this address will * always return the last read value. */ temp_low = i2c_smbus_read_word_data(client, LM92_REG_TEMP_LOW); if (i2c_smbus_read_word_data(client, LM92_REG_MAN_ID) != temp_low) return 0; temp_high = i2c_smbus_read_word_data(client, LM92_REG_TEMP_HIGH); if (i2c_smbus_read_word_data(client, LM92_REG_MAN_ID) != temp_high) return 0; /* Limits are stored as integer values (signed, 9-bit). */ if ((temp_low & 0x7f00) || (temp_high & 0x7f00)) return 0; temp_hyst = i2c_smbus_read_word_data(client, LM92_REG_TEMP_HYST); temp_crit = i2c_smbus_read_word_data(client, LM92_REG_TEMP_CRIT); if ((temp_hyst & 0x7f00) || (temp_crit & 0x7f00)) return 0; /* * Registers addresses were found to cycle over 16-byte boundaries. * We don't test all registers with all offsets so as to save some * reads and time, but this should still be sufficient to dismiss * non-MAX6635 chips. */ conf = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG); for (i = 16; i < 96; i *= 2) { if (temp_hyst != i2c_smbus_read_word_data(client, LM92_REG_TEMP_HYST + i - 16) || temp_crit != i2c_smbus_read_word_data(client, LM92_REG_TEMP_CRIT + i) || temp_low != i2c_smbus_read_word_data(client, LM92_REG_TEMP_LOW + i + 16) || temp_high != i2c_smbus_read_word_data(client, LM92_REG_TEMP_HIGH + i + 32) || conf != i2c_smbus_read_byte_data(client, LM92_REG_CONFIG + i)) return 0; } return 1; } static struct attribute *lm92_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &dev_attr_temp1_min_hyst.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, &dev_attr_alarms.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, NULL }; ATTRIBUTE_GROUPS(lm92); /* Return 0 if detection is successful, -ENODEV otherwise */ static int lm92_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; u8 config; u16 man_id; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; config = i2c_smbus_read_byte_data(new_client, LM92_REG_CONFIG); man_id = i2c_smbus_read_word_data(new_client, LM92_REG_MAN_ID); if ((config & 0xe0) == 0x00 && man_id == 0x0180) pr_info("lm92: Found National Semiconductor LM92 chip\n"); else if (max6635_check(new_client)) pr_info("lm92: Found Maxim MAX6635 chip\n"); else return -ENODEV; strlcpy(info->type, "lm92", I2C_NAME_SIZE); return 0; } static int lm92_probe(struct i2c_client *new_client, const struct i2c_device_id *id) { struct device *hwmon_dev; struct lm92_data *data; data = devm_kzalloc(&new_client->dev, sizeof(struct lm92_data), GFP_KERNEL); if (!data) return -ENOMEM; data->client = new_client; mutex_init(&data->update_lock); /* Initialize the chipset */ lm92_init_client(new_client); hwmon_dev = devm_hwmon_device_register_with_groups(&new_client->dev, new_client->name, data, lm92_groups); return PTR_ERR_OR_ZERO(hwmon_dev); } /* * Module and driver stuff */ static const struct i2c_device_id lm92_id[] = { { "lm92", 0 }, /* max6635 could be added here */ { } }; MODULE_DEVICE_TABLE(i2c, lm92_id); static struct i2c_driver lm92_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "lm92", }, .probe = lm92_probe, .id_table = lm92_id, .detect = lm92_detect, .address_list = normal_i2c, }; module_i2c_driver(lm92_driver); MODULE_AUTHOR("Jean Delvare <[email protected]>"); MODULE_DESCRIPTION("LM92/MAX6635 driver"); MODULE_LICENSE("GPL");
null
null
null
null
102,290
27,846
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
192,841
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Seagate, Inc. * * lnet/lnet/net_fault.c * * Lustre network fault simulation * * Author: [email protected] */ #define DEBUG_SUBSYSTEM S_LNET #include "../../include/linux/lnet/lib-lnet.h" #include "../../include/linux/lnet/lnetctl.h" #define LNET_MSG_MASK (LNET_PUT_BIT | LNET_ACK_BIT | \ LNET_GET_BIT | LNET_REPLY_BIT) struct lnet_drop_rule { /** link chain on the_lnet.ln_drop_rules */ struct list_head dr_link; /** attributes of this rule */ struct lnet_fault_attr dr_attr; /** lock to protect \a dr_drop_at and \a dr_stat */ spinlock_t dr_lock; /** * the message sequence to drop, which means message is dropped when * dr_stat.drs_count == dr_drop_at */ unsigned long dr_drop_at; /** * seconds to drop the next message, it's exclusive with dr_drop_at */ unsigned long dr_drop_time; /** baseline to caculate dr_drop_time */ unsigned long dr_time_base; /** statistic of dropped messages */ struct lnet_fault_stat dr_stat; }; static bool lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid) { if (nid == msg_nid || nid == LNET_NID_ANY) return true; if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid)) return false; /* 255.255.255.255@net is wildcard for all addresses in a network */ return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY); } static bool lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src, lnet_nid_t dst, unsigned int type, unsigned int portal) { if (!lnet_fault_nid_match(attr->fa_src, src) || !lnet_fault_nid_match(attr->fa_dst, dst)) return false; if (!(attr->fa_msg_mask & (1 << type))) return false; /** * NB: ACK and REPLY have no portal, but they should have been * rejected by message mask */ if (attr->fa_ptl_mask && /* has portal filter */ !(attr->fa_ptl_mask & (1ULL << portal))) return false; return true; } static int lnet_fault_attr_validate(struct lnet_fault_attr *attr) { if (!attr->fa_msg_mask) attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */ if (!attr->fa_ptl_mask) /* no portal filter */ return 0; /* NB: only PUT and GET can be filtered if portal filter has been set */ attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT; if (!attr->fa_msg_mask) { CDEBUG(D_NET, "can't find valid message type bits %x\n", attr->fa_msg_mask); return -EINVAL; } return 0; } static void lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type) { /* NB: fs_counter is NOT updated by this function */ switch (type) { case LNET_MSG_PUT: stat->fs_put++; return; case LNET_MSG_ACK: stat->fs_ack++; return; case LNET_MSG_GET: stat->fs_get++; return; case LNET_MSG_REPLY: stat->fs_reply++; return; } } /** * LNet message drop simulation */ /** * Add a new drop rule to LNet * There is no check for duplicated drop rule, all rules will be checked for * incoming message. */ static int lnet_drop_rule_add(struct lnet_fault_attr *attr) { struct lnet_drop_rule *rule; if (attr->u.drop.da_rate & attr->u.drop.da_interval) { CDEBUG(D_NET, "please provide either drop rate or drop interval, but not both at the same time %d/%d\n", attr->u.drop.da_rate, attr->u.drop.da_interval); return -EINVAL; } if (lnet_fault_attr_validate(attr)) return -EINVAL; CFS_ALLOC_PTR(rule); if (!rule) return -ENOMEM; spin_lock_init(&rule->dr_lock); rule->dr_attr = *attr; if (attr->u.drop.da_interval) { rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval); rule->dr_drop_time = cfs_time_shift(cfs_rand() % attr->u.drop.da_interval); } else { rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate; } lnet_net_lock(LNET_LOCK_EX); list_add(&rule->dr_link, &the_lnet.ln_drop_rules); lnet_net_unlock(LNET_LOCK_EX); CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n", libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src), attr->u.drop.da_rate, attr->u.drop.da_interval); return 0; } /** * Remove matched drop rules from lnet, all rules that can match \a src and * \a dst will be removed. * If \a src is zero, then all rules have \a dst as destination will be remove * If \a dst is zero, then all rules have \a src as source will be removed * If both of them are zero, all rules will be removed */ static int lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst) { struct lnet_drop_rule *rule; struct lnet_drop_rule *tmp; struct list_head zombies; int n = 0; INIT_LIST_HEAD(&zombies); lnet_net_lock(LNET_LOCK_EX); list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) { if (rule->dr_attr.fa_src != src && src) continue; if (rule->dr_attr.fa_dst != dst && dst) continue; list_move(&rule->dr_link, &zombies); } lnet_net_unlock(LNET_LOCK_EX); list_for_each_entry_safe(rule, tmp, &zombies, dr_link) { CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n", libcfs_nid2str(rule->dr_attr.fa_src), libcfs_nid2str(rule->dr_attr.fa_dst), rule->dr_attr.u.drop.da_rate, rule->dr_attr.u.drop.da_interval); list_del(&rule->dr_link); CFS_FREE_PTR(rule); n++; } return n; } /** * List drop rule at position of \a pos */ static int lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr, struct lnet_fault_stat *stat) { struct lnet_drop_rule *rule; int cpt; int i = 0; int rc = -ENOENT; cpt = lnet_net_lock_current(); list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { if (i++ < pos) continue; spin_lock(&rule->dr_lock); *attr = rule->dr_attr; *stat = rule->dr_stat; spin_unlock(&rule->dr_lock); rc = 0; break; } lnet_net_unlock(cpt); return rc; } /** * reset counters for all drop rules */ static void lnet_drop_rule_reset(void) { struct lnet_drop_rule *rule; int cpt; cpt = lnet_net_lock_current(); list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { struct lnet_fault_attr *attr = &rule->dr_attr; spin_lock(&rule->dr_lock); memset(&rule->dr_stat, 0, sizeof(rule->dr_stat)); if (attr->u.drop.da_rate) { rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate; } else { rule->dr_drop_time = cfs_time_shift(cfs_rand() % attr->u.drop.da_interval); rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval); } spin_unlock(&rule->dr_lock); } lnet_net_unlock(cpt); } /** * check source/destination NID, portal, message type and drop rate, * decide whether should drop this message or not */ static bool drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src, lnet_nid_t dst, unsigned int type, unsigned int portal) { struct lnet_fault_attr *attr = &rule->dr_attr; bool drop; if (!lnet_fault_attr_match(attr, src, dst, type, portal)) return false; /* match this rule, check drop rate now */ spin_lock(&rule->dr_lock); if (rule->dr_drop_time) { /* time based drop */ unsigned long now = cfs_time_current(); rule->dr_stat.fs_count++; drop = cfs_time_aftereq(now, rule->dr_drop_time); if (drop) { if (cfs_time_after(now, rule->dr_time_base)) rule->dr_time_base = now; rule->dr_drop_time = rule->dr_time_base + cfs_time_seconds(cfs_rand() % attr->u.drop.da_interval); rule->dr_time_base += cfs_time_seconds(attr->u.drop.da_interval); CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lu\n", libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_dst), rule->dr_drop_time); } } else { /* rate based drop */ drop = rule->dr_stat.fs_count++ == rule->dr_drop_at; if (!do_div(rule->dr_stat.fs_count, attr->u.drop.da_rate)) { rule->dr_drop_at = rule->dr_stat.fs_count + cfs_rand() % attr->u.drop.da_rate; CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n", libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_dst), rule->dr_drop_at); } } if (drop) { /* drop this message, update counters */ lnet_fault_stat_inc(&rule->dr_stat, type); rule->dr_stat.u.drop.ds_dropped++; } spin_unlock(&rule->dr_lock); return drop; } /** * Check if message from \a src to \a dst can match any existed drop rule */ bool lnet_drop_rule_match(struct lnet_hdr *hdr) { struct lnet_drop_rule *rule; lnet_nid_t src = le64_to_cpu(hdr->src_nid); lnet_nid_t dst = le64_to_cpu(hdr->dest_nid); unsigned int typ = le32_to_cpu(hdr->type); unsigned int ptl = -1; bool drop = false; int cpt; /** * NB: if Portal is specified, then only PUT and GET will be * filtered by drop rule */ if (typ == LNET_MSG_PUT) ptl = le32_to_cpu(hdr->msg.put.ptl_index); else if (typ == LNET_MSG_GET) ptl = le32_to_cpu(hdr->msg.get.ptl_index); cpt = lnet_net_lock_current(); list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { drop = drop_rule_match(rule, src, dst, typ, ptl); if (drop) break; } lnet_net_unlock(cpt); return drop; } /** * LNet Delay Simulation */ /** timestamp (second) to send delayed message */ #define msg_delay_send msg_ev.hdr_data struct lnet_delay_rule { /** link chain on the_lnet.ln_delay_rules */ struct list_head dl_link; /** link chain on delay_dd.dd_sched_rules */ struct list_head dl_sched_link; /** attributes of this rule */ struct lnet_fault_attr dl_attr; /** lock to protect \a below members */ spinlock_t dl_lock; /** refcount of delay rule */ atomic_t dl_refcount; /** * the message sequence to delay, which means message is delayed when * dl_stat.fs_count == dl_delay_at */ unsigned long dl_delay_at; /** * seconds to delay the next message, it's exclusive with dl_delay_at */ unsigned long dl_delay_time; /** baseline to caculate dl_delay_time */ unsigned long dl_time_base; /** jiffies to send the next delayed message */ unsigned long dl_msg_send; /** delayed message list */ struct list_head dl_msg_list; /** statistic of delayed messages */ struct lnet_fault_stat dl_stat; /** timer to wakeup delay_daemon */ struct timer_list dl_timer; }; struct delay_daemon_data { /** serialise rule add/remove */ struct mutex dd_mutex; /** protect rules on \a dd_sched_rules */ spinlock_t dd_lock; /** scheduled delay rules (by timer) */ struct list_head dd_sched_rules; /** daemon thread sleeps at here */ wait_queue_head_t dd_waitq; /** controller (lctl command) wait at here */ wait_queue_head_t dd_ctl_waitq; /** daemon is running */ unsigned int dd_running; /** daemon stopped */ unsigned int dd_stopped; }; static struct delay_daemon_data delay_dd; static unsigned long round_timeout(unsigned long timeout) { return cfs_time_seconds((unsigned int) cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1); } static void delay_rule_decref(struct lnet_delay_rule *rule) { if (atomic_dec_and_test(&rule->dl_refcount)) { LASSERT(list_empty(&rule->dl_sched_link)); LASSERT(list_empty(&rule->dl_msg_list)); LASSERT(list_empty(&rule->dl_link)); CFS_FREE_PTR(rule); } } /** * check source/destination NID, portal, message type and delay rate, * decide whether should delay this message or not */ static bool delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src, lnet_nid_t dst, unsigned int type, unsigned int portal, struct lnet_msg *msg) { struct lnet_fault_attr *attr = &rule->dl_attr; bool delay; if (!lnet_fault_attr_match(attr, src, dst, type, portal)) return false; /* match this rule, check delay rate now */ spin_lock(&rule->dl_lock); if (rule->dl_delay_time) { /* time based delay */ unsigned long now = cfs_time_current(); rule->dl_stat.fs_count++; delay = cfs_time_aftereq(now, rule->dl_delay_time); if (delay) { if (cfs_time_after(now, rule->dl_time_base)) rule->dl_time_base = now; rule->dl_delay_time = rule->dl_time_base + cfs_time_seconds(cfs_rand() % attr->u.delay.la_interval); rule->dl_time_base += cfs_time_seconds(attr->u.delay.la_interval); CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lu\n", libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_dst), rule->dl_delay_time); } } else { /* rate based delay */ delay = rule->dl_stat.fs_count++ == rule->dl_delay_at; /* generate the next random rate sequence */ if (!do_div(rule->dl_stat.fs_count, attr->u.delay.la_rate)) { rule->dl_delay_at = rule->dl_stat.fs_count + cfs_rand() % attr->u.delay.la_rate; CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n", libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_dst), rule->dl_delay_at); } } if (!delay) { spin_unlock(&rule->dl_lock); return false; } /* delay this message, update counters */ lnet_fault_stat_inc(&rule->dl_stat, type); rule->dl_stat.u.delay.ls_delayed++; list_add_tail(&msg->msg_list, &rule->dl_msg_list); msg->msg_delay_send = round_timeout( cfs_time_shift(attr->u.delay.la_latency)); if (rule->dl_msg_send == -1) { rule->dl_msg_send = msg->msg_delay_send; mod_timer(&rule->dl_timer, rule->dl_msg_send); } spin_unlock(&rule->dl_lock); return true; } /** * check if \a msg can match any Delay Rule, receiving of this message * will be delayed if there is a match. */ bool lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg) { struct lnet_delay_rule *rule; lnet_nid_t src = le64_to_cpu(hdr->src_nid); lnet_nid_t dst = le64_to_cpu(hdr->dest_nid); unsigned int typ = le32_to_cpu(hdr->type); unsigned int ptl = -1; /* NB: called with hold of lnet_net_lock */ /** * NB: if Portal is specified, then only PUT and GET will be * filtered by delay rule */ if (typ == LNET_MSG_PUT) ptl = le32_to_cpu(hdr->msg.put.ptl_index); else if (typ == LNET_MSG_GET) ptl = le32_to_cpu(hdr->msg.get.ptl_index); list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { if (delay_rule_match(rule, src, dst, typ, ptl, msg)) return true; } return false; } /** check out delayed messages for send */ static void delayed_msg_check(struct lnet_delay_rule *rule, bool all, struct list_head *msg_list) { struct lnet_msg *msg; struct lnet_msg *tmp; unsigned long now = cfs_time_current(); if (!all && rule->dl_msg_send > now) return; spin_lock(&rule->dl_lock); list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) { if (!all && msg->msg_delay_send > now) break; msg->msg_delay_send = 0; list_move_tail(&msg->msg_list, msg_list); } if (list_empty(&rule->dl_msg_list)) { del_timer(&rule->dl_timer); rule->dl_msg_send = -1; } else if (!list_empty(msg_list)) { /* * dequeued some timedout messages, update timer for the * next delayed message on rule */ msg = list_entry(rule->dl_msg_list.next, struct lnet_msg, msg_list); rule->dl_msg_send = msg->msg_delay_send; mod_timer(&rule->dl_timer, rule->dl_msg_send); } spin_unlock(&rule->dl_lock); } static void delayed_msg_process(struct list_head *msg_list, bool drop) { struct lnet_msg *msg; while (!list_empty(msg_list)) { struct lnet_ni *ni; int cpt; int rc; msg = list_entry(msg_list->next, struct lnet_msg, msg_list); LASSERT(msg->msg_rxpeer); ni = msg->msg_rxpeer->lp_ni; cpt = msg->msg_rx_cpt; list_del_init(&msg->msg_list); if (drop) { rc = -ECANCELED; } else if (!msg->msg_routing) { rc = lnet_parse_local(ni, msg); if (!rc) continue; } else { lnet_net_lock(cpt); rc = lnet_parse_forward_locked(ni, msg); lnet_net_unlock(cpt); switch (rc) { case LNET_CREDIT_OK: lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, msg->msg_len, msg->msg_len); case LNET_CREDIT_WAIT: continue; default: /* failures */ break; } } lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len); lnet_finalize(ni, msg, rc); } } /** * Process delayed messages for scheduled rules * This function can either be called by delay_rule_daemon, or by lnet_finalise */ void lnet_delay_rule_check(void) { struct lnet_delay_rule *rule; struct list_head msgs; INIT_LIST_HEAD(&msgs); while (1) { if (list_empty(&delay_dd.dd_sched_rules)) break; spin_lock_bh(&delay_dd.dd_lock); if (list_empty(&delay_dd.dd_sched_rules)) { spin_unlock_bh(&delay_dd.dd_lock); break; } rule = list_entry(delay_dd.dd_sched_rules.next, struct lnet_delay_rule, dl_sched_link); list_del_init(&rule->dl_sched_link); spin_unlock_bh(&delay_dd.dd_lock); delayed_msg_check(rule, false, &msgs); delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */ } if (!list_empty(&msgs)) delayed_msg_process(&msgs, false); } /** daemon thread to handle delayed messages */ static int lnet_delay_rule_daemon(void *arg) { delay_dd.dd_running = 1; wake_up(&delay_dd.dd_ctl_waitq); while (delay_dd.dd_running) { wait_event_interruptible(delay_dd.dd_waitq, !delay_dd.dd_running || !list_empty(&delay_dd.dd_sched_rules)); lnet_delay_rule_check(); } /* in case more rules have been enqueued after my last check */ lnet_delay_rule_check(); delay_dd.dd_stopped = 1; wake_up(&delay_dd.dd_ctl_waitq); return 0; } static void delay_timer_cb(unsigned long arg) { struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg; spin_lock_bh(&delay_dd.dd_lock); if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) { atomic_inc(&rule->dl_refcount); list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules); wake_up(&delay_dd.dd_waitq); } spin_unlock_bh(&delay_dd.dd_lock); } /** * Add a new delay rule to LNet * There is no check for duplicated delay rule, all rules will be checked for * incoming message. */ int lnet_delay_rule_add(struct lnet_fault_attr *attr) { struct lnet_delay_rule *rule; int rc = 0; if (attr->u.delay.la_rate & attr->u.delay.la_interval) { CDEBUG(D_NET, "please provide either delay rate or delay interval, but not both at the same time %d/%d\n", attr->u.delay.la_rate, attr->u.delay.la_interval); return -EINVAL; } if (!attr->u.delay.la_latency) { CDEBUG(D_NET, "delay latency cannot be zero\n"); return -EINVAL; } if (lnet_fault_attr_validate(attr)) return -EINVAL; CFS_ALLOC_PTR(rule); if (!rule) return -ENOMEM; mutex_lock(&delay_dd.dd_mutex); if (!delay_dd.dd_running) { struct task_struct *task; /** * NB: although LND threads will process delayed message * in lnet_finalize, but there is no guarantee that LND * threads will be waken up if no other message needs to * be handled. * Only one daemon thread, performance is not the concern * of this simualation module. */ task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd"); if (IS_ERR(task)) { rc = PTR_ERR(task); goto failed; } wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running); } setup_timer(&rule->dl_timer, delay_timer_cb, (unsigned long)rule); spin_lock_init(&rule->dl_lock); INIT_LIST_HEAD(&rule->dl_msg_list); INIT_LIST_HEAD(&rule->dl_sched_link); rule->dl_attr = *attr; if (attr->u.delay.la_interval) { rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval); rule->dl_delay_time = cfs_time_shift(cfs_rand() % attr->u.delay.la_interval); } else { rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate; } rule->dl_msg_send = -1; lnet_net_lock(LNET_LOCK_EX); atomic_set(&rule->dl_refcount, 1); list_add(&rule->dl_link, &the_lnet.ln_delay_rules); lnet_net_unlock(LNET_LOCK_EX); CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n", libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src), attr->u.delay.la_rate); mutex_unlock(&delay_dd.dd_mutex); return 0; failed: mutex_unlock(&delay_dd.dd_mutex); CFS_FREE_PTR(rule); return rc; } /** * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src * and \a dst are zero, all rules will be removed, otherwise only matched rules * will be removed. * If \a src is zero, then all rules have \a dst as destination will be remove * If \a dst is zero, then all rules have \a src as source will be removed * * When a delay rule is removed, all delayed messages of this rule will be * processed immediately. */ int lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown) { struct lnet_delay_rule *rule; struct lnet_delay_rule *tmp; struct list_head rule_list; struct list_head msg_list; int n = 0; bool cleanup; INIT_LIST_HEAD(&rule_list); INIT_LIST_HEAD(&msg_list); if (shutdown) { src = 0; dst = 0; } mutex_lock(&delay_dd.dd_mutex); lnet_net_lock(LNET_LOCK_EX); list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) { if (rule->dl_attr.fa_src != src && src) continue; if (rule->dl_attr.fa_dst != dst && dst) continue; CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n", libcfs_nid2str(rule->dl_attr.fa_src), libcfs_nid2str(rule->dl_attr.fa_dst), rule->dl_attr.u.delay.la_rate, rule->dl_attr.u.delay.la_interval); /* refcount is taken over by rule_list */ list_move(&rule->dl_link, &rule_list); } /* check if we need to shutdown delay_daemon */ cleanup = list_empty(&the_lnet.ln_delay_rules) && !list_empty(&rule_list); lnet_net_unlock(LNET_LOCK_EX); list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) { list_del_init(&rule->dl_link); del_timer_sync(&rule->dl_timer); delayed_msg_check(rule, true, &msg_list); delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */ n++; } if (cleanup) { /* no more delay rule, shutdown delay_daemon */ LASSERT(delay_dd.dd_running); delay_dd.dd_running = 0; wake_up(&delay_dd.dd_waitq); while (!delay_dd.dd_stopped) wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped); } mutex_unlock(&delay_dd.dd_mutex); if (!list_empty(&msg_list)) delayed_msg_process(&msg_list, shutdown); return n; } /** * List Delay Rule at position of \a pos */ int lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr, struct lnet_fault_stat *stat) { struct lnet_delay_rule *rule; int cpt; int i = 0; int rc = -ENOENT; cpt = lnet_net_lock_current(); list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { if (i++ < pos) continue; spin_lock(&rule->dl_lock); *attr = rule->dl_attr; *stat = rule->dl_stat; spin_unlock(&rule->dl_lock); rc = 0; break; } lnet_net_unlock(cpt); return rc; } /** * reset counters for all Delay Rules */ void lnet_delay_rule_reset(void) { struct lnet_delay_rule *rule; int cpt; cpt = lnet_net_lock_current(); list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { struct lnet_fault_attr *attr = &rule->dl_attr; spin_lock(&rule->dl_lock); memset(&rule->dl_stat, 0, sizeof(rule->dl_stat)); if (attr->u.delay.la_rate) { rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate; } else { rule->dl_delay_time = cfs_time_shift(cfs_rand() % attr->u.delay.la_interval); rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval); } spin_unlock(&rule->dl_lock); } lnet_net_unlock(cpt); } int lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data) { struct lnet_fault_attr *attr; struct lnet_fault_stat *stat; attr = (struct lnet_fault_attr *)data->ioc_inlbuf1; switch (opc) { default: return -EINVAL; case LNET_CTL_DROP_ADD: if (!attr) return -EINVAL; return lnet_drop_rule_add(attr); case LNET_CTL_DROP_DEL: if (!attr) return -EINVAL; data->ioc_count = lnet_drop_rule_del(attr->fa_src, attr->fa_dst); return 0; case LNET_CTL_DROP_RESET: lnet_drop_rule_reset(); return 0; case LNET_CTL_DROP_LIST: stat = (struct lnet_fault_stat *)data->ioc_inlbuf2; if (!attr || !stat) return -EINVAL; return lnet_drop_rule_list(data->ioc_count, attr, stat); case LNET_CTL_DELAY_ADD: if (!attr) return -EINVAL; return lnet_delay_rule_add(attr); case LNET_CTL_DELAY_DEL: if (!attr) return -EINVAL; data->ioc_count = lnet_delay_rule_del(attr->fa_src, attr->fa_dst, false); return 0; case LNET_CTL_DELAY_RESET: lnet_delay_rule_reset(); return 0; case LNET_CTL_DELAY_LIST: stat = (struct lnet_fault_stat *)data->ioc_inlbuf2; if (!attr || !stat) return -EINVAL; return lnet_delay_rule_list(data->ioc_count, attr, stat); } } int lnet_fault_init(void) { BUILD_BUG_ON(LNET_PUT_BIT != 1 << LNET_MSG_PUT); BUILD_BUG_ON(LNET_ACK_BIT != 1 << LNET_MSG_ACK); BUILD_BUG_ON(LNET_GET_BIT != 1 << LNET_MSG_GET); BUILD_BUG_ON(LNET_REPLY_BIT != 1 << LNET_MSG_REPLY); mutex_init(&delay_dd.dd_mutex); spin_lock_init(&delay_dd.dd_lock); init_waitqueue_head(&delay_dd.dd_waitq); init_waitqueue_head(&delay_dd.dd_ctl_waitq); INIT_LIST_HEAD(&delay_dd.dd_sched_rules); return 0; } void lnet_fault_fini(void) { lnet_drop_rule_del(0, 0); lnet_delay_rule_del(0, 0, true); LASSERT(list_empty(&the_lnet.ln_drop_rules)); LASSERT(list_empty(&the_lnet.ln_delay_rules)); LASSERT(list_empty(&delay_dd.dd_sched_rules)); }
null
null
null
null
101,188
2,026
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
167,021
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack_l4proto.h> static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ; static bool nf_generic_should_process(u8 proto) { switch (proto) { #ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE case IPPROTO_SCTP: return false; #endif #ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE case IPPROTO_DCCP: return false; #endif #ifdef CONFIG_NF_CT_PROTO_GRE_MODULE case IPPROTO_GRE: return false; #endif #ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE case IPPROTO_UDPLITE: return false; #endif default: return true; } } static inline struct nf_generic_net *generic_pernet(struct net *net) { return &net->ct.nf_ct_proto.generic; } static bool generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct net *net, struct nf_conntrack_tuple *tuple) { tuple->src.u.all = 0; tuple->dst.u.all = 0; return true; } static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig) { tuple->src.u.all = 0; tuple->dst.u.all = 0; return true; } /* Print out the per-protocol part of the tuple. */ static void generic_print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple) { } static unsigned int *generic_get_timeouts(struct net *net) { return &(generic_pernet(net)->timeout); } /* Returns verdict for packet, or -1 for invalid. */ static int generic_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeout) { nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); return NF_ACCEPT; } /* Called when a new connection for this protocol found. */ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { bool ret; ret = nf_generic_should_process(nf_ct_protonum(ct)); if (!ret) pr_warn_once("conntrack: generic helper won't handle protocol %d. Please consider loading the specific helper module.\n", nf_ct_protonum(ct)); return ret; } #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_cttimeout.h> static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { unsigned int *timeout = data; struct nf_generic_net *gn = generic_pernet(net); if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT]) *timeout = ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ; else { /* Set default generic timeout. */ *timeout = gn->timeout; } return 0; } static int generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ))) goto nla_put_failure; return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = { [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ #ifdef CONFIG_SYSCTL static struct ctl_table generic_sysctl_table[] = { { .procname = "nf_conntrack_generic_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { } }; #endif /* CONFIG_SYSCTL */ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn, struct nf_generic_net *gn) { #ifdef CONFIG_SYSCTL pn->ctl_table = kmemdup(generic_sysctl_table, sizeof(generic_sysctl_table), GFP_KERNEL); if (!pn->ctl_table) return -ENOMEM; pn->ctl_table[0].data = &gn->timeout; #endif return 0; } static int generic_init_net(struct net *net, u_int16_t proto) { struct nf_generic_net *gn = generic_pernet(net); struct nf_proto_net *pn = &gn->pn; gn->timeout = nf_ct_generic_timeout; return generic_kmemdup_sysctl_table(pn, gn); } static struct nf_proto_net *generic_get_net_proto(struct net *net) { return &net->ct.nf_ct_proto.generic.pn; } struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly = { .l3proto = PF_UNSPEC, .l4proto = 255, .name = "unknown", .pkt_to_tuple = generic_pkt_to_tuple, .invert_tuple = generic_invert_tuple, .print_tuple = generic_print_tuple, .packet = generic_packet, .get_timeouts = generic_get_timeouts, .new = generic_new, #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = generic_timeout_nlattr_to_obj, .obj_to_nlattr = generic_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_GENERIC_MAX, .obj_size = sizeof(unsigned int), .nla_policy = generic_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .init_net = generic_init_net, .get_net_proto = generic_get_net_proto, };
null
null
null
null
75,369
13,528
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
13,528
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/metrics/machine_id_provider.h" #include <windows.h> #include <stdint.h> #include <winioctl.h> #include "base/base_paths.h" #include "base/files/file_path.h" #include "base/path_service.h" #include "base/threading/thread_restrictions.h" #include "base/win/scoped_handle.h" namespace metrics { // static bool MachineIdProvider::HasId() { return true; } // On windows, the machine id is based on the serial number of the drive Chrome // is running from. // static std::string MachineIdProvider::GetMachineId() { base::AssertBlockingAllowed(); // Use the program's path to get the drive used for the machine id. This means // that whenever the underlying drive changes, it's considered a new machine. // This is fine as we do not support migrating Chrome installs to new drives. base::FilePath executable_path; if (!PathService::Get(base::FILE_EXE, &executable_path)) { NOTREACHED(); return std::string(); } std::vector<base::FilePath::StringType> path_components; executable_path.GetComponents(&path_components); if (path_components.empty()) { NOTREACHED(); return std::string(); } base::FilePath::StringType drive_name = L"\\\\.\\" + path_components[0]; base::win::ScopedHandle drive_handle( CreateFile(drive_name.c_str(), 0, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL)); STORAGE_PROPERTY_QUERY query = {}; query.PropertyId = StorageDeviceProperty; query.QueryType = PropertyStandardQuery; // Perform an initial query to get the number of bytes being returned. DWORD bytes_returned; STORAGE_DESCRIPTOR_HEADER header = {}; BOOL status = DeviceIoControl(drive_handle.Get(), IOCTL_STORAGE_QUERY_PROPERTY, &query, sizeof(STORAGE_PROPERTY_QUERY), &header, sizeof(STORAGE_DESCRIPTOR_HEADER), &bytes_returned, NULL); if (!status) return std::string(); // Query for the actual serial number. std::vector<int8_t> output_buf(header.Size); status = DeviceIoControl(drive_handle.Get(), IOCTL_STORAGE_QUERY_PROPERTY, &query, sizeof(STORAGE_PROPERTY_QUERY), &output_buf[0], output_buf.size(), &bytes_returned, NULL); if (!status) return std::string(); const STORAGE_DEVICE_DESCRIPTOR* device_descriptor = reinterpret_cast<STORAGE_DEVICE_DESCRIPTOR*>(&output_buf[0]); // The serial number is stored in the |output_buf| as a null-terminated // string starting at the specified offset. const DWORD offset = device_descriptor->SerialNumberOffset; if (offset >= output_buf.size()) return std::string(); // Make sure that the null-terminator exists. const std::vector<int8_t>::iterator serial_number_begin = output_buf.begin() + offset; const std::vector<int8_t>::iterator null_location = std::find(serial_number_begin, output_buf.end(), '\0'); if (null_location == output_buf.end()) return std::string(); const char* serial_number = reinterpret_cast<const char*>(&output_buf[offset]); return std::string(serial_number); } } // namespace metrics
null
null
null
null
10,391
28,494
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
193,489
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ #define _RTW_IOCTL_SET_C_ #include <osdep_service.h> #include <drv_types.h> #include <rtw_ioctl_set.h> #include <hal_intf.h> extern void indicate_wx_scan_complete_event(struct adapter *padapter); u8 rtw_do_join(struct adapter *padapter) { struct list_head *plist, *phead; u8 *pibss = NULL; struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct __queue *queue = &(pmlmepriv->scanned_queue); u8 ret = _SUCCESS; spin_lock_bh(&(pmlmepriv->scanned_queue.lock)); phead = get_list_head(queue); plist = phead->next; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("\n rtw_do_join: phead = %p; plist = %p\n\n\n", phead, plist)); pmlmepriv->cur_network.join_res = -2; set_fwstate(pmlmepriv, _FW_UNDER_LINKING); pmlmepriv->pscanned = plist; pmlmepriv->to_join = true; if (list_empty(&queue->queue)) { spin_unlock_bh(&(pmlmepriv->scanned_queue.lock)); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); /* when set_ssid/set_bssid for rtw_do_join(), but scanning queue is empty */ /* we try to issue sitesurvey firstly */ if (!pmlmepriv->LinkDetectInfo.bBusyTraffic || pmlmepriv->to_roaming > 0) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_do_join(): site survey if scanned_queue is empty\n.")); /* submit site_survey_cmd */ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0); if (_SUCCESS != ret) { pmlmepriv->to_join = false; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_do_join(): site survey return error\n.")); } } else { pmlmepriv->to_join = false; ret = _FAIL; } goto exit; } else { int select_ret; spin_unlock_bh(&(pmlmepriv->scanned_queue.lock)); select_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv); if (select_ret == _SUCCESS) { pmlmepriv->to_join = false; mod_timer(&pmlmepriv->assoc_timer, jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT)); } else { if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) { /* submit createbss_cmd to change to a ADHOC_MASTER */ /* pmlmepriv->lock has been acquired by caller... */ struct wlan_bssid_ex *pdev_network = &(padapter->registrypriv.dev_network); pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE; pibss = padapter->registrypriv.dev_network.MacAddress; memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid)); rtw_update_registrypriv_dev_network(padapter); rtw_generate_random_ibss(pibss); if (rtw_createbss_cmd(padapter) != _SUCCESS) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error =>do_goin: rtw_createbss_cmd status FAIL***\n ")); ret = false; goto exit; } pmlmepriv->to_join = false; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("***Error => rtw_select_and_join_from_scanned_queue FAIL under STA_Mode***\n ")); } else { /* can't associate ; reset under-linking */ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); /* when set_ssid/set_bssid for rtw_do_join(), but there are no desired bss in scanning queue */ /* we try to issue sitesurvey firstly */ if (!pmlmepriv->LinkDetectInfo.bBusyTraffic || pmlmepriv->to_roaming > 0) { ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0); if (_SUCCESS != ret) { pmlmepriv->to_join = false; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("do_join(): site survey return error\n.")); } } else { ret = _FAIL; pmlmepriv->to_join = false; } } } } exit: return ret; } u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid) { u8 status = _SUCCESS; u32 cur_time = 0; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; DBG_88E_LEVEL(_drv_info_, "set bssid:%pM\n", bssid); if ((bssid[0] == 0x00 && bssid[1] == 0x00 && bssid[2] == 0x00 && bssid[3] == 0x00 && bssid[4] == 0x00 && bssid[5] == 0x00) || (bssid[0] == 0xFF && bssid[1] == 0xFF && bssid[2] == 0xFF && bssid[3] == 0xFF && bssid[4] == 0xFF && bssid[5] == 0xFF)) { status = _FAIL; goto exit; } spin_lock_bh(&pmlmepriv->lock); DBG_88E("Set BSSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv)); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) goto handle_tkip_countermeasure; else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) goto release_mlme_lock; if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n")); if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) { if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false) goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */ } else { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set BSSID not the same bssid\n")); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid =%pM\n", (bssid))); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("cur_bssid =%pM\n", (pmlmepriv->cur_network.network.MacAddress))); rtw_disassoc_cmd(padapter, 0, true); if (check_fwstate(pmlmepriv, _FW_LINKED) == true) rtw_indicate_disconnect(padapter); rtw_free_assoc_resources(padapter); if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) { _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE); set_fwstate(pmlmepriv, WIFI_ADHOC_STATE); } } } handle_tkip_countermeasure: /* should we add something here...? */ if (padapter->securitypriv.btkip_countermeasure) { cur_time = jiffies; if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) { padapter->securitypriv.btkip_countermeasure = false; padapter->securitypriv.btkip_countermeasure_time = 0; } else { status = _FAIL; goto release_mlme_lock; } } memcpy(&pmlmepriv->assoc_bssid, bssid, ETH_ALEN); pmlmepriv->assoc_by_bssid = true; if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) pmlmepriv->to_join = true; else status = rtw_do_join(padapter); release_mlme_lock: spin_unlock_bh(&pmlmepriv->lock); exit: RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_bssid: status=%d\n", status)); return status; } u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid) { u8 status = _SUCCESS; u32 cur_time = 0; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct wlan_network *pnetwork = &pmlmepriv->cur_network; DBG_88E_LEVEL(_drv_info_, "set ssid [%s] fw_state=0x%08x\n", ssid->Ssid, get_fwstate(pmlmepriv)); if (!padapter->hw_init_completed) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("set_ssid: hw_init_completed == false =>exit!!!\n")); status = _FAIL; goto exit; } spin_lock_bh(&pmlmepriv->lock); DBG_88E("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv)); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) goto handle_tkip_countermeasure; else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) goto release_mlme_lock; if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_ssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n")); if ((pmlmepriv->assoc_ssid.SsidLength == ssid->SsidLength) && (!memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid, ssid->SsidLength))) { if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("Set SSID is the same ssid, fw_state = 0x%08x\n", get_fwstate(pmlmepriv))); if (!rtw_is_same_ibss(padapter, pnetwork)) { /* if in WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE, create bss or rejoin again */ rtw_disassoc_cmd(padapter, 0, true); if (check_fwstate(pmlmepriv, _FW_LINKED) == true) rtw_indicate_disconnect(padapter); rtw_free_assoc_resources(padapter); if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) { _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE); set_fwstate(pmlmepriv, WIFI_ADHOC_STATE); } } else { goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */ } } else { rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_JOINBSS, 1); } } else { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set SSID not the same ssid\n")); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_ssid =[%s] len = 0x%x\n", ssid->Ssid, (unsigned int)ssid->SsidLength)); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("assoc_ssid =[%s] len = 0x%x\n", pmlmepriv->assoc_ssid.Ssid, (unsigned int)pmlmepriv->assoc_ssid.SsidLength)); rtw_disassoc_cmd(padapter, 0, true); if (check_fwstate(pmlmepriv, _FW_LINKED) == true) rtw_indicate_disconnect(padapter); rtw_free_assoc_resources(padapter); if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) { _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE); set_fwstate(pmlmepriv, WIFI_ADHOC_STATE); } } } handle_tkip_countermeasure: if (padapter->securitypriv.btkip_countermeasure) { cur_time = jiffies; if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) { padapter->securitypriv.btkip_countermeasure = false; padapter->securitypriv.btkip_countermeasure_time = 0; } else { status = _FAIL; goto release_mlme_lock; } } memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct ndis_802_11_ssid)); pmlmepriv->assoc_by_bssid = false; if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) pmlmepriv->to_join = true; else status = rtw_do_join(padapter); release_mlme_lock: spin_unlock_bh(&pmlmepriv->lock); exit: RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("-rtw_set_802_11_ssid: status =%d\n", status)); return status; } u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter, enum ndis_802_11_network_infra networktype) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct wlan_network *cur_network = &pmlmepriv->cur_network; enum ndis_802_11_network_infra *pold_state = &(cur_network->network.InfrastructureMode); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_notice_, ("+rtw_set_802_11_infrastructure_mode: old =%d new =%d fw_state = 0x%08x\n", *pold_state, networktype, get_fwstate(pmlmepriv))); if (*pold_state != networktype) { spin_lock_bh(&pmlmepriv->lock); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, (" change mode!")); /* DBG_88E("change mode, old_mode =%d, new_mode =%d, fw_state = 0x%x\n", *pold_state, networktype, get_fwstate(pmlmepriv)); */ if (*pold_state == Ndis802_11APMode) { /* change to other mode from Ndis802_11APMode */ cur_network->join_res = -1; #ifdef CONFIG_88EU_AP_MODE stop_ap_mode(padapter); #endif } if ((check_fwstate(pmlmepriv, _FW_LINKED)) || (*pold_state == Ndis802_11IBSS)) rtw_disassoc_cmd(padapter, 0, true); if ((check_fwstate(pmlmepriv, _FW_LINKED)) || (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) rtw_free_assoc_resources(padapter); if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) { if (check_fwstate(pmlmepriv, _FW_LINKED) == true) rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have checked whether issue dis-assoc_cmd or not */ } *pold_state = networktype; _clr_fwstate_(pmlmepriv, ~WIFI_NULL_STATE); switch (networktype) { case Ndis802_11IBSS: set_fwstate(pmlmepriv, WIFI_ADHOC_STATE); break; case Ndis802_11Infrastructure: set_fwstate(pmlmepriv, WIFI_STATION_STATE); break; case Ndis802_11APMode: set_fwstate(pmlmepriv, WIFI_AP_STATE); #ifdef CONFIG_88EU_AP_MODE start_ap_mode(padapter); #endif break; case Ndis802_11AutoUnknown: case Ndis802_11InfrastructureMax: break; } spin_unlock_bh(&pmlmepriv->lock); } return true; } u8 rtw_set_802_11_disassociate(struct adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; spin_lock_bh(&pmlmepriv->lock); if (check_fwstate(pmlmepriv, _FW_LINKED)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_disassociate: rtw_indicate_disconnect\n")); rtw_disassoc_cmd(padapter, 0, true); rtw_indicate_disconnect(padapter); rtw_free_assoc_resources(padapter); rtw_pwr_wakeup(padapter); } spin_unlock_bh(&pmlmepriv->lock); return true; } u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_ssid *pssid, int ssid_max_num) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; u8 res = true; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("+rtw_set_802_11_bssid_list_scan(), fw_state =%x\n", get_fwstate(pmlmepriv))); if (padapter == NULL) { res = false; goto exit; } if (!padapter->hw_init_completed) { res = false; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n === rtw_set_802_11_bssid_list_scan:hw_init_completed == false ===\n")); goto exit; } if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) || (pmlmepriv->LinkDetectInfo.bBusyTraffic)) { /* Scan or linking is in progress, do nothing. */ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_bssid_list_scan fail since fw_state = %x\n", get_fwstate(pmlmepriv))); res = true; if (check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)) == true) RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###_FW_UNDER_SURVEY|_FW_UNDER_LINKING\n\n")); else RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###pmlmepriv->sitesurveyctrl.traffic_busy == true\n\n")); } else { if (rtw_is_scan_deny(padapter)) { DBG_88E(FUNC_ADPT_FMT": scan deny\n", FUNC_ADPT_ARG(padapter)); indicate_wx_scan_complete_event(padapter); return _SUCCESS; } spin_lock_bh(&pmlmepriv->lock); res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num, NULL, 0); spin_unlock_bh(&pmlmepriv->lock); } exit: return res; } u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum ndis_802_11_auth_mode authmode) { struct security_priv *psecuritypriv = &padapter->securitypriv; int res; u8 ret; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_802_11_auth.mode(): mode =%x\n", authmode)); psecuritypriv->ndisauthtype = authmode; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_set_802_11_authentication_mode:psecuritypriv->ndisauthtype=%d", psecuritypriv->ndisauthtype)); if (psecuritypriv->ndisauthtype > 3) psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; res = rtw_set_auth(padapter, psecuritypriv); if (res == _SUCCESS) ret = true; else ret = false; return ret; } u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep) { int keyid, res; struct security_priv *psecuritypriv = &(padapter->securitypriv); u8 ret = _SUCCESS; keyid = wep->KeyIndex & 0x3fffffff; if (keyid >= 4) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("MgntActrtw_set_802_11_add_wep:keyid>4 =>fail\n")); ret = false; goto exit; } switch (wep->KeyLength) { case 5: psecuritypriv->dot11PrivacyAlgrthm = _WEP40_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength = 5\n")); break; case 13: psecuritypriv->dot11PrivacyAlgrthm = _WEP104_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength = 13\n")); break; default: psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength!= 5 or 13\n")); break; } RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_set_802_11_add_wep:before memcpy, wep->KeyLength = 0x%x wep->KeyIndex = 0x%x keyid =%x\n", wep->KeyLength, wep->KeyIndex, keyid)); memcpy(&(psecuritypriv->dot11DefKey[keyid].skey[0]), &(wep->KeyMaterial), wep->KeyLength); psecuritypriv->dot11DefKeylen[keyid] = wep->KeyLength; psecuritypriv->dot11PrivacyKeyIndex = keyid; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_set_802_11_add_wep:security key material : %x %x %x %x %x %x %x %x %x %x %x %x %x\n", psecuritypriv->dot11DefKey[keyid].skey[0], psecuritypriv->dot11DefKey[keyid].skey[1], psecuritypriv->dot11DefKey[keyid].skey[2], psecuritypriv->dot11DefKey[keyid].skey[3], psecuritypriv->dot11DefKey[keyid].skey[4], psecuritypriv->dot11DefKey[keyid].skey[5], psecuritypriv->dot11DefKey[keyid].skey[6], psecuritypriv->dot11DefKey[keyid].skey[7], psecuritypriv->dot11DefKey[keyid].skey[8], psecuritypriv->dot11DefKey[keyid].skey[9], psecuritypriv->dot11DefKey[keyid].skey[10], psecuritypriv->dot11DefKey[keyid].skey[11], psecuritypriv->dot11DefKey[keyid].skey[12])); res = rtw_set_key(padapter, psecuritypriv, keyid, 1); if (res == _FAIL) ret = false; exit: return ret; } /* * rtw_get_cur_max_rate - * @adapter: pointer to struct adapter structure * * Return 0 or 100Kbps */ u16 rtw_get_cur_max_rate(struct adapter *adapter) { int i = 0; u8 *p; u16 rate = 0, max_rate = 0; struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct registry_priv *pregistrypriv = &adapter->registrypriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network; u8 rf_type = 0; u8 bw_40MHz = 0, short_GI_20 = 0, short_GI_40 = 0; u32 ht_ielen = 0; if ((!check_fwstate(pmlmepriv, _FW_LINKED)) && (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) return 0; if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N|WIRELESS_11_5N)) { p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12); if (p && ht_ielen > 0) { /* cur_bwmod is updated by beacon, pmlmeinfo is updated by association response */ bw_40MHz = (pmlmeext->cur_bwmode && (HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH & pmlmeinfo->HT_info.infos[0])) ? 1 : 0; short_GI_20 = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; short_GI_40 = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; rtw_hal_get_hwreg(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type)); max_rate = rtw_mcs_rate( rf_type, bw_40MHz & (pregistrypriv->cbw40_enable), short_GI_20, short_GI_40, pmlmeinfo->HT_caps.mcs.rx_mask ); } } else { while ((pcur_bss->SupportedRates[i] != 0) && (pcur_bss->SupportedRates[i] != 0xFF)) { rate = pcur_bss->SupportedRates[i]&0x7F; if (rate > max_rate) max_rate = rate; i++; } max_rate = max_rate*10/2; } return max_rate; } /* * rtw_set_country - * @adapter: pointer to struct adapter structure * @country_code: string of country code * * Return _SUCCESS or _FAIL */ int rtw_set_country(struct adapter *adapter, const char *country_code) { int i; int channel_plan = RT_CHANNEL_DOMAIN_WORLD_WIDE_5G; DBG_88E("%s country_code:%s\n", __func__, country_code); for (i = 0; i < ARRAY_SIZE(channel_table); i++) { if (0 == strcmp(channel_table[i].name, country_code)) { channel_plan = channel_table[i].channel_plan; break; } } if (i == ARRAY_SIZE(channel_table)) DBG_88E("%s unknown country_code:%s\n", __func__, country_code); return rtw_set_chplan_cmd(adapter, channel_plan, 1); }
null
null
null
null
101,836
3,268
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
156,325
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * RTP packetization for H.261 video (RFC 4587) * Copyright (c) 2014 Thomas Volkert <[email protected]> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "rtpenc.h" #define RTP_H261_HEADER_SIZE 4 static const uint8_t *find_resync_marker_reverse(const uint8_t *av_restrict start, const uint8_t *av_restrict end) { const uint8_t *p = end - 1; start += 1; /* Make sure we never return the original start. */ for (; p > start; p--) { if (p[0] == 0 && p[1] == 1) return p; } return end; } void ff_rtp_send_h261(AVFormatContext *ctx, const uint8_t *frame_buf, int frame_size) { int cur_frame_size; int last_packet_of_frame; RTPMuxContext *rtp_ctx = ctx->priv_data; /* use the default 90 KHz time stamp */ rtp_ctx->timestamp = rtp_ctx->cur_timestamp; /* continue as long as not all frame data is processed */ while (frame_size > 0) { /* * encode the H.261 payload header according to section 4.1 of RFC 4587: * (uses 4 bytes between RTP header and H.261 stream per packet) * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |SBIT |EBIT |I|V| GOBN | MBAP | QUANT | HMVD | VMVD | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Start bit position (SBIT): 3 bits * End bit position (EBIT): 3 bits * INTRA-frame encoded data (I): 1 bit * Motion Vector flag (V): 1 bit * GOB number (GOBN): 4 bits * Macroblock address predictor (MBAP): 5 bits * Quantizer (QUANT): 5 bits * Horizontal motion vector data (HMVD): 5 bits * Vertical motion vector data (VMVD): 5 bits */ rtp_ctx->buf[0] = 1; /* sbit=0, ebit=0, i=0, v=1 */ rtp_ctx->buf[1] = 0; /* gobn=0, mbap=0 */ rtp_ctx->buf[2] = 0; /* quant=0, hmvd=5 */ rtp_ctx->buf[3] = 0; /* vmvd=0 */ if (frame_size < 2 || frame_buf[0] != 0 || frame_buf[1] != 1) { /* A full, correct fix for this would be to make the H.261 encoder * support inserting extra GOB headers (triggered by setting e.g. * "-ps 1"), and including information about macroblock boundaries * (such as for h263_rfc2190). */ av_log(ctx, AV_LOG_WARNING, "RTP/H.261 packet not cut at a GOB boundary, not signaled correctly\n"); } cur_frame_size = FFMIN(rtp_ctx->max_payload_size - RTP_H261_HEADER_SIZE, frame_size); /* look for a better place to split the frame into packets */ if (cur_frame_size < frame_size) { const uint8_t *packet_end = find_resync_marker_reverse(frame_buf, frame_buf + cur_frame_size); cur_frame_size = packet_end - frame_buf; } /* calculate the "marker" bit for the RTP header */ last_packet_of_frame = cur_frame_size == frame_size; /* complete and send RTP packet */ memcpy(&rtp_ctx->buf[RTP_H261_HEADER_SIZE], frame_buf, cur_frame_size); ff_rtp_send_data(ctx, rtp_ctx->buf, RTP_H261_HEADER_SIZE + cur_frame_size, last_packet_of_frame); frame_buf += cur_frame_size; frame_size -= cur_frame_size; } }
null
null
null
null
72,380
40,047
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
205,042
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * scsicam.h - SCSI CAM support functions, use for HDIO_GETGEO, etc. * * Copyright 1993, 1994 Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * [email protected] * +1 (303) 786-7975 * * For more information, please consult the SCSI-CAM draft. */ #ifndef SCSICAM_H #define SCSICAM_H extern int scsicam_bios_param (struct block_device *bdev, sector_t capacity, int *ip); extern int scsi_partsize(unsigned char *buf, unsigned long capacity, unsigned int *cyls, unsigned int *hds, unsigned int *secs); extern unsigned char *scsi_bios_ptable(struct block_device *bdev); #endif /* def SCSICAM_H */
null
null
null
null
113,389
72,275
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
72,275
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "services/network/public/cpp/cors/cors_error_status.h" #include "net/base/net_errors.h" namespace network { // Note: |cors_error| is initialized to kLast to keep the value inside the // valid enum value range. The value is meaningless and should be overriden // immediately by IPC desrtialization code. CORSErrorStatus::CORSErrorStatus() : CORSErrorStatus(network::mojom::CORSError::kMaxValue) {} CORSErrorStatus::CORSErrorStatus(const CORSErrorStatus& status) = default; CORSErrorStatus::CORSErrorStatus(network::mojom::CORSError error) : cors_error(error) {} CORSErrorStatus::CORSErrorStatus( network::mojom::CORSError error, scoped_refptr<net::HttpResponseHeaders> headers) : CORSErrorStatus(error) { related_response_headers = headers; } CORSErrorStatus::~CORSErrorStatus() = default; bool CORSErrorStatus::operator==(const CORSErrorStatus& rhs) const { return cors_error == rhs.cors_error && related_response_headers == rhs.related_response_headers; } } // namespace network
null
null
null
null
69,138
37,419
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
202,414
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2014 Free Electrons * * License Terms: GNU General Public License v2 * Author: Boris BREZILLON <[email protected]> * * Allwinner A31 APB0 clock gates driver * */ #include <linux/clk-provider.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #define SUN6I_APB0_GATES_MAX_SIZE 32 struct gates_data { DECLARE_BITMAP(mask, SUN6I_APB0_GATES_MAX_SIZE); }; static const struct gates_data sun6i_a31_apb0_gates __initconst = { .mask = {0x7F}, }; static const struct gates_data sun8i_a23_apb0_gates __initconst = { .mask = {0x5D}, }; static const struct of_device_id sun6i_a31_apb0_gates_clk_dt_ids[] = { { .compatible = "allwinner,sun6i-a31-apb0-gates-clk", .data = &sun6i_a31_apb0_gates }, { .compatible = "allwinner,sun8i-a23-apb0-gates-clk", .data = &sun8i_a23_apb0_gates }, { /* sentinel */ } }; static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct clk_onecell_data *clk_data; const struct of_device_id *device; const struct gates_data *data; const char *clk_parent; const char *clk_name; struct resource *r; void __iomem *reg; int ngates; int i; int j = 0; if (!np) return -ENODEV; device = of_match_device(sun6i_a31_apb0_gates_clk_dt_ids, &pdev->dev); if (!device) return -ENODEV; data = device->data; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(reg)) return PTR_ERR(reg); clk_parent = of_clk_get_parent_name(np, 0); if (!clk_parent) return -EINVAL; clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data), GFP_KERNEL); if (!clk_data) return -ENOMEM; /* Worst-case size approximation and memory allocation */ ngates = find_last_bit(data->mask, SUN6I_APB0_GATES_MAX_SIZE); clk_data->clks = devm_kcalloc(&pdev->dev, (ngates + 1), sizeof(struct clk *), GFP_KERNEL); if (!clk_data->clks) return -ENOMEM; for_each_set_bit(i, data->mask, SUN6I_APB0_GATES_MAX_SIZE) { of_property_read_string_index(np, "clock-output-names", j, &clk_name); clk_data->clks[i] = clk_register_gate(&pdev->dev, clk_name, clk_parent, 0, reg, i, 0, NULL); WARN_ON(IS_ERR(clk_data->clks[i])); j++; } clk_data->clk_num = ngates + 1; return of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); } static struct platform_driver sun6i_a31_apb0_gates_clk_driver = { .driver = { .name = "sun6i-a31-apb0-gates-clk", .of_match_table = sun6i_a31_apb0_gates_clk_dt_ids, }, .probe = sun6i_a31_apb0_gates_clk_probe, }; builtin_platform_driver(sun6i_a31_apb0_gates_clk_driver);
null
null
null
null
110,761
415
3,4,5,6,7,16
train_val
9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6
165,410
linux
1
https://github.com/torvalds/linux
2013-02-19 20:27:03+08:00
static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name)); memcpy(&ualg->cru_driver_name, &alg->cra_driver_name, sizeof(ualg->cru_driver_name)); memcpy(&ualg->cru_module_name, module_name(alg->cra_module), CRYPTO_MAX_ALG_NAME); ualg->cru_flags = alg->cra_flags; ualg->cru_refcnt = atomic_read(&alg->cra_refcnt); if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) goto nla_put_failure; if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_report_larval rl; snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval"); if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(struct crypto_report_larval), &rl)) goto nla_put_failure; goto out; } if (alg->cra_type && alg->cra_type->report) { if (alg->cra_type->report(skb, alg)) goto nla_put_failure; goto out; } switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { case CRYPTO_ALG_TYPE_CIPHER: if (crypto_report_cipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_COMPRESS: if (crypto_report_comp(skb, alg)) goto nla_put_failure; break; } out: return 0; nla_put_failure: return -EMSGSIZE; }
CVE-2013-2548
CWE-310
https://github.com/torvalds/linux/commit/9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6
Low
3,304
41,320
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
41,320
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "tools/gn/err.h" #include "tools/gn/functions.h" #include "tools/gn/parse_tree.h" #include "tools/gn/scope.h" namespace functions { const char kSetDefaults[] = "set_defaults"; const char kSetDefaults_HelpShort[] = "set_defaults: Set default values for a target type."; const char kSetDefaults_Help[] = R"(set_defaults: Set default values for a target type. set_defaults(<target_type_name>) { <values...> } Sets the default values for a given target type. Whenever target_type_name is seen in the future, the values specified in set_default's block will be copied into the current scope. When the target type is used, the variable copying is very strict. If a variable with that name is already in scope, the build will fail with an error. set_defaults can be used for built-in target types ("executable", "shared_library", etc.) and custom ones defined via the "template" command. It can be called more than once and the most recent call in any scope will apply, but there is no way to refer to the previous defaults and modify them (each call to set_defaults must supply a complete list of all defaults it wants). If you want to share defaults, store them in a separate variable. Example set_defaults("static_library") { configs = [ "//tools/mything:settings" ] } static_library("mylib") # The configs will be auto-populated as above. You can remove it if # you don't want the default for a particular default: configs -= [ "//tools/mything:settings" ] } )"; Value RunSetDefaults(Scope* scope, const FunctionCallNode* function, const std::vector<Value>& args, BlockNode* block, Err* err) { if (!EnsureSingleStringArg(function, args, err)) return Value(); const std::string& target_type(args[0].string_value()); if (!block) { FillNeedsBlockError(function, err); return Value(); } // Run the block for the rule invocation. Scope block_scope(scope); block->Execute(&block_scope, err); if (err->has_error()) return Value(); // Now copy the values set on the scope we made into the free-floating one // (with no containing scope) used to hold the target defaults. Scope* dest = scope->MakeTargetDefaults(target_type); block_scope.NonRecursiveMergeTo(dest, Scope::MergeOptions(), function, "<SHOULD NOT FAIL>", err); return Value(); } } // namespace functions
null
null
null
null
38,183
29,137
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
194,132
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Driver for the Analog Devices digital potentiometers (I2C bus) * * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/i2c.h> #include <linux/module.h> #include "ad525x_dpot.h" /* I2C bus functions */ static int write_d8(void *client, u8 val) { return i2c_smbus_write_byte(client, val); } static int write_r8d8(void *client, u8 reg, u8 val) { return i2c_smbus_write_byte_data(client, reg, val); } static int write_r8d16(void *client, u8 reg, u16 val) { return i2c_smbus_write_word_data(client, reg, val); } static int read_d8(void *client) { return i2c_smbus_read_byte(client); } static int read_r8d8(void *client, u8 reg) { return i2c_smbus_read_byte_data(client, reg); } static int read_r8d16(void *client, u8 reg) { return i2c_smbus_read_word_data(client, reg); } static const struct ad_dpot_bus_ops bops = { .read_d8 = read_d8, .read_r8d8 = read_r8d8, .read_r8d16 = read_r8d16, .write_d8 = write_d8, .write_r8d8 = write_r8d8, .write_r8d16 = write_r8d16, }; static int ad_dpot_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ad_dpot_bus_data bdata = { .client = client, .bops = &bops, }; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_err(&client->dev, "SMBUS Word Data not Supported\n"); return -EIO; } return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name); } static int ad_dpot_i2c_remove(struct i2c_client *client) { return ad_dpot_remove(&client->dev); } static const struct i2c_device_id ad_dpot_id[] = { {"ad5258", AD5258_ID}, {"ad5259", AD5259_ID}, {"ad5251", AD5251_ID}, {"ad5252", AD5252_ID}, {"ad5253", AD5253_ID}, {"ad5254", AD5254_ID}, {"ad5255", AD5255_ID}, {"ad5241", AD5241_ID}, {"ad5242", AD5242_ID}, {"ad5243", AD5243_ID}, {"ad5245", AD5245_ID}, {"ad5246", AD5246_ID}, {"ad5247", AD5247_ID}, {"ad5248", AD5248_ID}, {"ad5280", AD5280_ID}, {"ad5282", AD5282_ID}, {"adn2860", ADN2860_ID}, {"ad5273", AD5273_ID}, {"ad5161", AD5161_ID}, {"ad5171", AD5171_ID}, {"ad5170", AD5170_ID}, {"ad5172", AD5172_ID}, {"ad5173", AD5173_ID}, {"ad5272", AD5272_ID}, {"ad5274", AD5274_ID}, {} }; MODULE_DEVICE_TABLE(i2c, ad_dpot_id); static struct i2c_driver ad_dpot_i2c_driver = { .driver = { .name = "ad_dpot", }, .probe = ad_dpot_i2c_probe, .remove = ad_dpot_i2c_remove, .id_table = ad_dpot_id, }; module_i2c_driver(ad_dpot_i2c_driver); MODULE_AUTHOR("Michael Hennerich <[email protected]>"); MODULE_DESCRIPTION("digital potentiometer I2C bus driver"); MODULE_LICENSE("GPL");
null
null
null
null
102,479
70,298
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
70,298
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "device/usb/usb_descriptors.h" #include <stdint.h> #include <memory> #include "base/bind.h" #include "base/strings/utf_string_conversions.h" #include "device/usb/mock_usb_device_handle.h" #include "testing/gtest/include/gtest/gtest.h" using testing::_; namespace device { namespace { ACTION_P2(InvokeCallback, data, length) { size_t transferred_length = std::min(length, arg6->size()); memcpy(arg6->front(), data, transferred_length); std::move(arg8).Run(UsbTransferStatus::COMPLETED, arg6, transferred_length); } void ExpectStringDescriptors( std::unique_ptr<std::map<uint8_t, base::string16>> string_map) { EXPECT_EQ(3u, string_map->size()); EXPECT_EQ(base::ASCIIToUTF16("String 1"), (*string_map)[1]); EXPECT_EQ(base::ASCIIToUTF16("String 2"), (*string_map)[2]); EXPECT_EQ(base::ASCIIToUTF16("String 3"), (*string_map)[3]); } const uint8_t kDeviceDescriptor[] = {0x12, 0x01, 0x10, 0x03, 0xFF, 0xFF, 0xFF, 0x09, 0x34, 0x12, 0x78, 0x56, 0x00, 0x01, 0x01, 0x02, 0x03, 0x02}; const uint8_t kConfig1Descriptor[] = { // Config 1 0x09, 0x02, 0x38, 0x00, 0x02, 0x01, 0x01, 0x01, 0x10, // Interface Association (0 + 1) 0x08, 0x0B, 0x00, 0x02, 0xFF, 0xFF, 0xFF, 0x00, // Interface 0 0x09, 0x04, 0x00, 0x00, 0x03, 0x12, 0x34, 0x56, 0x02, // Endpoint 1 IN 0x07, 0x05, 0x81, 0x02, 0x00, 0x02, 0x00, // Endpoint 2 IN 0x07, 0x05, 0x82, 0x03, 0x00, 0x02, 0x04, // Endpoint 3 OUT 0x07, 0x05, 0x03, 0x13, 0x00, 0x02, 0x04, // Interface 1 0x09, 0x04, 0x01, 0x00, 0x00, 0x78, 0x9A, 0xAB, 0x03, }; const uint8_t kConfig2Descriptor[] = { // Config 2 0x09, 0x02, 0x29, 0x00, 0x01, 0x02, 0x04, 0x03, 0x20, // Interface 0 0x09, 0x04, 0x00, 0x00, 0x00, 0xCD, 0xEF, 0x01, 0x04, // Interface 0 (alternate 1) 0x09, 0x04, 0x00, 0x01, 0x02, 0xCD, 0xEF, 0x01, 0x05, // Endpoint 1 IN 0x07, 0x05, 0x81, 0x01, 0x00, 0x04, 0x08, // Endpoint 2 OUT 0x07, 0x05, 0x02, 0x11, 0x00, 0x04, 0x08, }; void ExpectConfig1Descriptor(const UsbConfigDescriptor& config) { // Config 1 EXPECT_EQ(1, config.configuration_value); EXPECT_FALSE(config.self_powered); EXPECT_FALSE(config.remote_wakeup); EXPECT_EQ(16, config.maximum_power); ASSERT_EQ(2u, config.interfaces.size()); EXPECT_EQ(8u, config.extra_data.size()); // Interface 0 EXPECT_EQ(0, config.interfaces[0].interface_number); EXPECT_EQ(0, config.interfaces[0].alternate_setting); EXPECT_EQ(0x12, config.interfaces[0].interface_class); EXPECT_EQ(0x34, config.interfaces[0].interface_subclass); EXPECT_EQ(0x56, config.interfaces[0].interface_protocol); ASSERT_EQ(3u, config.interfaces[0].endpoints.size()); EXPECT_EQ(0u, config.interfaces[0].extra_data.size()); EXPECT_EQ(0, config.interfaces[0].first_interface); // Endpoint 1 IN EXPECT_EQ(0x81, config.interfaces[0].endpoints[0].address); EXPECT_EQ(UsbTransferDirection::INBOUND, config.interfaces[0].endpoints[0].direction); EXPECT_EQ(512, config.interfaces[0].endpoints[0].maximum_packet_size); EXPECT_EQ(USB_SYNCHRONIZATION_NONE, config.interfaces[0].endpoints[0].synchronization_type); EXPECT_EQ(UsbTransferType::BULK, config.interfaces[0].endpoints[0].transfer_type); EXPECT_EQ(USB_USAGE_RESERVED, config.interfaces[0].endpoints[0].usage_type); EXPECT_EQ(0, config.interfaces[0].endpoints[0].polling_interval); EXPECT_EQ(0u, config.interfaces[0].endpoints[0].extra_data.size()); // Endpoint 2 IN EXPECT_EQ(0x82, config.interfaces[0].endpoints[1].address); EXPECT_EQ(UsbTransferDirection::INBOUND, config.interfaces[0].endpoints[1].direction); EXPECT_EQ(512, config.interfaces[0].endpoints[1].maximum_packet_size); EXPECT_EQ(USB_SYNCHRONIZATION_NONE, config.interfaces[0].endpoints[1].synchronization_type); EXPECT_EQ(UsbTransferType::INTERRUPT, config.interfaces[0].endpoints[1].transfer_type); EXPECT_EQ(USB_USAGE_PERIODIC, config.interfaces[0].endpoints[1].usage_type); EXPECT_EQ(4, config.interfaces[0].endpoints[1].polling_interval); EXPECT_EQ(0u, config.interfaces[0].endpoints[1].extra_data.size()); // Endpoint 3 OUT EXPECT_EQ(0x03, config.interfaces[0].endpoints[2].address); EXPECT_EQ(UsbTransferDirection::OUTBOUND, config.interfaces[0].endpoints[2].direction); EXPECT_EQ(512, config.interfaces[0].endpoints[2].maximum_packet_size); EXPECT_EQ(USB_SYNCHRONIZATION_NONE, config.interfaces[0].endpoints[2].synchronization_type); EXPECT_EQ(UsbTransferType::INTERRUPT, config.interfaces[0].endpoints[2].transfer_type); EXPECT_EQ(USB_USAGE_NOTIFICATION, config.interfaces[0].endpoints[2].usage_type); EXPECT_EQ(4, config.interfaces[0].endpoints[2].polling_interval); EXPECT_EQ(0u, config.interfaces[0].endpoints[2].extra_data.size()); // Interface 1 EXPECT_EQ(1, config.interfaces[1].interface_number); EXPECT_EQ(0, config.interfaces[1].alternate_setting); EXPECT_EQ(0x78, config.interfaces[1].interface_class); EXPECT_EQ(0x9A, config.interfaces[1].interface_subclass); EXPECT_EQ(0xAB, config.interfaces[1].interface_protocol); ASSERT_EQ(0u, config.interfaces[1].endpoints.size()); EXPECT_EQ(0u, config.interfaces[1].extra_data.size()); EXPECT_EQ(0, config.interfaces[1].first_interface); } void ExpectConfig2Descriptor(const UsbConfigDescriptor& config) { // Config 2 EXPECT_EQ(2, config.configuration_value); EXPECT_TRUE(config.self_powered); EXPECT_FALSE(config.remote_wakeup); EXPECT_EQ(32, config.maximum_power); ASSERT_EQ(2u, config.interfaces.size()); EXPECT_EQ(0u, config.extra_data.size()); // Interface 0 EXPECT_EQ(0, config.interfaces[0].interface_number); EXPECT_EQ(0, config.interfaces[0].alternate_setting); EXPECT_EQ(0xCD, config.interfaces[0].interface_class); EXPECT_EQ(0xEF, config.interfaces[0].interface_subclass); EXPECT_EQ(0x01, config.interfaces[0].interface_protocol); ASSERT_EQ(0u, config.interfaces[0].endpoints.size()); EXPECT_EQ(0u, config.interfaces[0].extra_data.size()); EXPECT_EQ(0, config.interfaces[0].first_interface); // Interface 0 (alternate 1) EXPECT_EQ(0, config.interfaces[1].interface_number); EXPECT_EQ(1, config.interfaces[1].alternate_setting); EXPECT_EQ(0xCD, config.interfaces[1].interface_class); EXPECT_EQ(0xEF, config.interfaces[1].interface_subclass); EXPECT_EQ(0x01, config.interfaces[1].interface_protocol); ASSERT_EQ(2u, config.interfaces[1].endpoints.size()); EXPECT_EQ(0u, config.interfaces[1].extra_data.size()); EXPECT_EQ(0, config.interfaces[1].first_interface); // Endpoint 1 IN EXPECT_EQ(0x81, config.interfaces[1].endpoints[0].address); EXPECT_EQ(UsbTransferDirection::INBOUND, config.interfaces[1].endpoints[0].direction); EXPECT_EQ(1024, config.interfaces[1].endpoints[0].maximum_packet_size); EXPECT_EQ(USB_SYNCHRONIZATION_NONE, config.interfaces[1].endpoints[0].synchronization_type); EXPECT_EQ(UsbTransferType::ISOCHRONOUS, config.interfaces[1].endpoints[0].transfer_type); EXPECT_EQ(USB_USAGE_DATA, config.interfaces[1].endpoints[0].usage_type); EXPECT_EQ(8, config.interfaces[1].endpoints[0].polling_interval); EXPECT_EQ(0u, config.interfaces[1].endpoints[0].extra_data.size()); // Endpoint 2 OUT EXPECT_EQ(0x02, config.interfaces[1].endpoints[1].address); EXPECT_EQ(UsbTransferDirection::OUTBOUND, config.interfaces[1].endpoints[1].direction); EXPECT_EQ(1024, config.interfaces[1].endpoints[1].maximum_packet_size); EXPECT_EQ(USB_SYNCHRONIZATION_NONE, config.interfaces[1].endpoints[1].synchronization_type); EXPECT_EQ(UsbTransferType::ISOCHRONOUS, config.interfaces[1].endpoints[1].transfer_type); EXPECT_EQ(USB_USAGE_FEEDBACK, config.interfaces[1].endpoints[1].usage_type); EXPECT_EQ(8, config.interfaces[1].endpoints[1].polling_interval); EXPECT_EQ(0u, config.interfaces[1].endpoints[1].extra_data.size()); } void ExpectDeviceDescriptor(const UsbDeviceDescriptor& descriptor) { // Device EXPECT_EQ(0x0310, descriptor.usb_version); EXPECT_EQ(0xFF, descriptor.device_class); EXPECT_EQ(0xFF, descriptor.device_subclass); EXPECT_EQ(0xFF, descriptor.device_protocol); EXPECT_EQ(0x1234, descriptor.vendor_id); EXPECT_EQ(0x5678, descriptor.product_id); EXPECT_EQ(0x0100, descriptor.device_version); ASSERT_EQ(2u, descriptor.configurations.size()); ExpectConfig1Descriptor(descriptor.configurations[0]); ExpectConfig2Descriptor(descriptor.configurations[1]); } void OnReadDescriptors(std::unique_ptr<UsbDeviceDescriptor> descriptor) { ASSERT_TRUE(descriptor); ExpectDeviceDescriptor(*descriptor); } class UsbDescriptorsTest : public ::testing::Test {}; TEST_F(UsbDescriptorsTest, ParseDescriptor) { std::vector<uint8_t> buffer; buffer.insert(buffer.end(), kDeviceDescriptor, kDeviceDescriptor + sizeof(kDeviceDescriptor)); buffer.insert(buffer.end(), kConfig1Descriptor, kConfig1Descriptor + sizeof(kConfig1Descriptor)); buffer.insert(buffer.end(), kConfig2Descriptor, kConfig2Descriptor + sizeof(kConfig2Descriptor)); UsbDeviceDescriptor descriptor; ASSERT_TRUE(descriptor.Parse(buffer)); ExpectDeviceDescriptor(descriptor); } TEST_F(UsbDescriptorsTest, ReadDescriptors) { scoped_refptr<MockUsbDeviceHandle> device_handle( new MockUsbDeviceHandle(nullptr)); EXPECT_CALL(*device_handle, ControlTransferInternal(UsbTransferDirection::INBOUND, UsbControlTransferType::STANDARD, UsbControlTransferRecipient::DEVICE, 0x06, 0x0100, 0x0000, _, _, _)) .WillOnce(InvokeCallback(kDeviceDescriptor, sizeof(kDeviceDescriptor))); EXPECT_CALL(*device_handle, ControlTransferInternal(UsbTransferDirection::INBOUND, UsbControlTransferType::STANDARD, UsbControlTransferRecipient::DEVICE, 0x06, 0x0200, 0x0000, _, _, _)) .Times(2) .WillRepeatedly( InvokeCallback(kConfig1Descriptor, sizeof(kConfig1Descriptor))); EXPECT_CALL(*device_handle, ControlTransferInternal(UsbTransferDirection::INBOUND, UsbControlTransferType::STANDARD, UsbControlTransferRecipient::DEVICE, 0x06, 0x0201, 0x0000, _, _, _)) .Times(2) .WillRepeatedly( InvokeCallback(kConfig2Descriptor, sizeof(kConfig2Descriptor))); ReadUsbDescriptors(device_handle, base::BindOnce(&OnReadDescriptors)); } TEST_F(UsbDescriptorsTest, NoInterfaceAssociations) { UsbConfigDescriptor config(1, false, false, 0); config.interfaces.emplace_back(0, 0, 255, 255, 255); config.interfaces.emplace_back(0, 1, 255, 255, 255); config.interfaces.emplace_back(1, 0, 255, 255, 255); config.AssignFirstInterfaceNumbers(); EXPECT_EQ(0, config.interfaces[0].first_interface); EXPECT_EQ(0, config.interfaces[1].first_interface); EXPECT_EQ(1, config.interfaces[2].first_interface); } TEST_F(UsbDescriptorsTest, InterfaceAssociations) { // Links interfaces 0 and 1 into a single function. static const uint8_t kIAD1[] = {0x08, 0x0b, 0x00, 0x02, 0xff, 0xff, 0xff, 0x00}; // Only references a single interface, 2. static const uint8_t kIAD2[] = {0x08, 0x0b, 0x02, 0x01, 0xff, 0xff, 0xff, 0x00}; // Malformed. References interface 3 but bInterfaceCount is 0. static const uint8_t kIAD3[] = {0x08, 0x0b, 0x03, 0x00, 0xff, 0xff, 0xff, 0x00}; // Links interfaces 4 and 5 into a single function. static const uint8_t kIAD4[] = {0x08, 0x0b, 0x04, 0x02, 0xff, 0xff, 0xff, 0x00}; UsbConfigDescriptor config(1, false, false, 0); config.extra_data.assign(kIAD1, kIAD1 + sizeof(kIAD1)); config.extra_data.insert(config.extra_data.end(), kIAD2, kIAD2 + sizeof(kIAD2)); config.interfaces.emplace_back(0, 0, 255, 255, 255); config.interfaces.emplace_back(1, 0, 255, 255, 255); UsbInterfaceDescriptor iface1a(1, 1, 255, 255, 255); iface1a.extra_data.assign(kIAD3, kIAD3 + sizeof(kIAD3)); config.interfaces.push_back(std::move(iface1a)); config.interfaces.emplace_back(2, 0, 255, 255, 255); config.interfaces.emplace_back(3, 0, 255, 255, 255); UsbInterfaceDescriptor iface4(4, 0, 255, 255, 255); iface4.extra_data.assign(kIAD4, kIAD4 + sizeof(kIAD4)); config.interfaces.push_back(std::move(iface4)); config.interfaces.emplace_back(5, 0, 255, 255, 255); config.AssignFirstInterfaceNumbers(); // Interfaces 0 and 1 (plus 1's alternate) are a single function. EXPECT_EQ(0, config.interfaces[0].interface_number); EXPECT_EQ(0, config.interfaces[0].first_interface); EXPECT_EQ(1, config.interfaces[1].interface_number); EXPECT_EQ(0, config.interfaces[1].first_interface); EXPECT_EQ(1, config.interfaces[2].interface_number); EXPECT_EQ(0, config.interfaces[2].first_interface); // Interfaces 2 and 3 are their own functions. EXPECT_EQ(2, config.interfaces[3].interface_number); EXPECT_EQ(2, config.interfaces[3].first_interface); EXPECT_EQ(3, config.interfaces[4].interface_number); EXPECT_EQ(3, config.interfaces[4].first_interface); // Interfaces 4 and 5 are a single function. EXPECT_EQ(4, config.interfaces[5].interface_number); EXPECT_EQ(4, config.interfaces[5].first_interface); EXPECT_EQ(5, config.interfaces[6].interface_number); EXPECT_EQ(4, config.interfaces[6].first_interface); } TEST_F(UsbDescriptorsTest, CorruptInterfaceAssociations) { { // Descriptor is too short. static const uint8_t kIAD[] = {0x01}; UsbConfigDescriptor config(1, false, false, 0); config.extra_data.assign(kIAD, kIAD + sizeof(kIAD)); config.AssignFirstInterfaceNumbers(); } { // Descriptor is too long. static const uint8_t kIAD[] = {0x09, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; UsbConfigDescriptor config(1, false, false, 0); config.extra_data.assign(kIAD, kIAD + sizeof(kIAD)); config.AssignFirstInterfaceNumbers(); } { // References an undefined interface. static const uint8_t kIAD[] = {0x08, 0x0b, 0x07, 0x00, 0xff, 0xff, 0xff, 0x00}; UsbConfigDescriptor config(1, false, false, 0); config.interfaces.emplace_back(0, 0, 255, 255, 255); config.extra_data.assign(kIAD, kIAD + sizeof(kIAD)); config.AssignFirstInterfaceNumbers(); EXPECT_EQ(0, config.interfaces[0].interface_number); EXPECT_EQ(0, config.interfaces[0].first_interface); } } TEST_F(UsbDescriptorsTest, StringDescriptor) { static const uint8_t kBuffer[] = {0x1a, 0x03, 'H', 0, 'e', 0, 'l', 0, 'l', 0, 'o', 0, ' ', 0, 'w', 0, 'o', 0, 'r', 0, 'l', 0, 'd', 0, '!', 0}; base::string16 string; ASSERT_TRUE(ParseUsbStringDescriptor( std::vector<uint8_t>(kBuffer, kBuffer + sizeof(kBuffer)), &string)); EXPECT_EQ(base::ASCIIToUTF16("Hello world!"), string); } TEST_F(UsbDescriptorsTest, ShortStringDescriptorHeader) { // The buffer is just too darn short. static const uint8_t kBuffer[] = {0x01}; base::string16 string; ASSERT_FALSE(ParseUsbStringDescriptor( std::vector<uint8_t>(kBuffer, kBuffer + sizeof(kBuffer)), &string)); } TEST_F(UsbDescriptorsTest, ShortStringDescriptor) { // The buffer is just too darn short. static const uint8_t kBuffer[] = {0x01, 0x03}; base::string16 string; ASSERT_FALSE(ParseUsbStringDescriptor( std::vector<uint8_t>(kBuffer, kBuffer + sizeof(kBuffer)), &string)); } TEST_F(UsbDescriptorsTest, OddLengthStringDescriptor) { // There's an extra byte at the end of the string. static const uint8_t kBuffer[] = {0x0d, 0x03, 'H', 0, 'e', 0, 'l', 0, 'l', 0, 'o', 0, '!'}; base::string16 string; ASSERT_TRUE(ParseUsbStringDescriptor( std::vector<uint8_t>(kBuffer, kBuffer + sizeof(kBuffer)), &string)); EXPECT_EQ(base::ASCIIToUTF16("Hello"), string); } TEST_F(UsbDescriptorsTest, EmptyStringDescriptor) { // The string is empty. static const uint8_t kBuffer[] = {0x02, 0x03}; base::string16 string; ASSERT_TRUE(ParseUsbStringDescriptor( std::vector<uint8_t>(kBuffer, kBuffer + sizeof(kBuffer)), &string)); EXPECT_EQ(base::string16(), string); } TEST_F(UsbDescriptorsTest, OneByteStringDescriptor) { // The string is only one byte. static const uint8_t kBuffer[] = {0x03, 0x03, '?'}; base::string16 string; ASSERT_TRUE(ParseUsbStringDescriptor( std::vector<uint8_t>(kBuffer, kBuffer + sizeof(kBuffer)), &string)); EXPECT_EQ(base::string16(), string); } TEST_F(UsbDescriptorsTest, ReadStringDescriptors) { std::unique_ptr<std::map<uint8_t, base::string16>> string_map( new std::map<uint8_t, base::string16>()); (*string_map)[1] = base::string16(); (*string_map)[2] = base::string16(); (*string_map)[3] = base::string16(); scoped_refptr<MockUsbDeviceHandle> device_handle( new MockUsbDeviceHandle(nullptr)); static const uint8_t kStringDescriptor0[] = {0x04, 0x03, 0x21, 0x43}; EXPECT_CALL(*device_handle, ControlTransferInternal(UsbTransferDirection::INBOUND, UsbControlTransferType::STANDARD, UsbControlTransferRecipient::DEVICE, 0x06, 0x0300, 0x0000, _, _, _)) .WillOnce(InvokeCallback(kStringDescriptor0, sizeof(kStringDescriptor0))); static const uint8_t kStringDescriptor1[] = {0x12, 0x03, 'S', 0, 't', 0, 'r', 0, 'i', 0, 'n', 0, 'g', 0, ' ', 0, '1', 0}; EXPECT_CALL(*device_handle, ControlTransferInternal(UsbTransferDirection::INBOUND, UsbControlTransferType::STANDARD, UsbControlTransferRecipient::DEVICE, 0x06, 0x0301, 0x4321, _, _, _)) .WillOnce(InvokeCallback(kStringDescriptor1, sizeof(kStringDescriptor1))); static const uint8_t kStringDescriptor2[] = {0x12, 0x03, 'S', 0, 't', 0, 'r', 0, 'i', 0, 'n', 0, 'g', 0, ' ', 0, '2', 0}; EXPECT_CALL(*device_handle, ControlTransferInternal(UsbTransferDirection::INBOUND, UsbControlTransferType::STANDARD, UsbControlTransferRecipient::DEVICE, 0x06, 0x0302, 0x4321, _, _, _)) .WillOnce(InvokeCallback(kStringDescriptor2, sizeof(kStringDescriptor2))); static const uint8_t kStringDescriptor3[] = {0x12, 0x03, 'S', 0, 't', 0, 'r', 0, 'i', 0, 'n', 0, 'g', 0, ' ', 0, '3', 0}; EXPECT_CALL(*device_handle, ControlTransferInternal(UsbTransferDirection::INBOUND, UsbControlTransferType::STANDARD, UsbControlTransferRecipient::DEVICE, 0x06, 0x0303, 0x4321, _, _, _)) .WillOnce(InvokeCallback(kStringDescriptor3, sizeof(kStringDescriptor3))); ReadUsbStringDescriptors(device_handle, std::move(string_map), base::BindOnce(&ExpectStringDescriptors)); } } // namespace } // namespace device
null
null
null
null
67,161
17,013
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
17,013
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/viz/service/display_embedder/compositor_overlay_candidate_validator_win.h" #include "components/viz/service/display/overlay_processor.h" namespace viz { CompositorOverlayCandidateValidatorWin:: CompositorOverlayCandidateValidatorWin() {} CompositorOverlayCandidateValidatorWin:: ~CompositorOverlayCandidateValidatorWin() {} void CompositorOverlayCandidateValidatorWin::GetStrategies( OverlayProcessor::StrategyList* strategies) {} void CompositorOverlayCandidateValidatorWin::CheckOverlaySupport( cc::OverlayCandidateList* candidates) { NOTIMPLEMENTED(); } bool CompositorOverlayCandidateValidatorWin::AllowCALayerOverlays() { return false; } bool CompositorOverlayCandidateValidatorWin::AllowDCLayerOverlays() { return true; } void CompositorOverlayCandidateValidatorWin::SetSoftwareMirrorMode( bool enabled) { // Software mirroring isn't supported on Windows. NOTIMPLEMENTED(); } } // namespace viz
null
null
null
null
13,876
1,954
null
train_val
1b0d3845b454eaaac0b2064c78926ca4d739a080
264,522
qemu
0
https://github.com/bonzini/qemu
2016-10-18 11:40:27+01:00
/* * Allwinner A10 timer device emulation * * Copyright (C) 2013 Li Guang * Written by Li Guang <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ #include "qemu/osdep.h" #include "hw/sysbus.h" #include "sysemu/sysemu.h" #include "hw/timer/allwinner-a10-pit.h" #include "qemu/log.h" static void a10_pit_update_irq(AwA10PITState *s) { int i; for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) { qemu_set_irq(s->irq[i], !!(s->irq_status & s->irq_enable & (1 << i))); } } static uint64_t a10_pit_read(void *opaque, hwaddr offset, unsigned size) { AwA10PITState *s = AW_A10_PIT(opaque); uint8_t index; switch (offset) { case AW_A10_PIT_TIMER_IRQ_EN: return s->irq_enable; case AW_A10_PIT_TIMER_IRQ_ST: return s->irq_status; case AW_A10_PIT_TIMER_BASE ... AW_A10_PIT_TIMER_BASE_END: index = offset & 0xf0; index >>= 4; index -= 1; switch (offset & 0x0f) { case AW_A10_PIT_TIMER_CONTROL: return s->control[index]; case AW_A10_PIT_TIMER_INTERVAL: return s->interval[index]; case AW_A10_PIT_TIMER_COUNT: s->count[index] = ptimer_get_count(s->timer[index]); return s->count[index]; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%x\n", __func__, (int)offset); break; } case AW_A10_PIT_WDOG_CONTROL: break; case AW_A10_PIT_WDOG_MODE: break; case AW_A10_PIT_COUNT_LO: return s->count_lo; case AW_A10_PIT_COUNT_HI: return s->count_hi; case AW_A10_PIT_COUNT_CTL: return s->count_ctl; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%x\n", __func__, (int)offset); break; } return 0; } static void a10_pit_set_freq(AwA10PITState *s, int index) { uint32_t prescaler, source, source_freq; prescaler = 1 << extract32(s->control[index], 4, 3); source = extract32(s->control[index], 2, 2); source_freq = s->clk_freq[source]; if (source_freq) { ptimer_set_freq(s->timer[index], source_freq / prescaler); } else { qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid clock source %u\n", __func__, source); } } static void a10_pit_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { AwA10PITState *s = AW_A10_PIT(opaque); uint8_t index; switch (offset) { case AW_A10_PIT_TIMER_IRQ_EN: s->irq_enable = value; a10_pit_update_irq(s); break; case AW_A10_PIT_TIMER_IRQ_ST: s->irq_status &= ~value; a10_pit_update_irq(s); break; case AW_A10_PIT_TIMER_BASE ... AW_A10_PIT_TIMER_BASE_END: index = offset & 0xf0; index >>= 4; index -= 1; switch (offset & 0x0f) { case AW_A10_PIT_TIMER_CONTROL: s->control[index] = value; a10_pit_set_freq(s, index); if (s->control[index] & AW_A10_PIT_TIMER_RELOAD) { ptimer_set_count(s->timer[index], s->interval[index]); } if (s->control[index] & AW_A10_PIT_TIMER_EN) { int oneshot = 0; if (s->control[index] & AW_A10_PIT_TIMER_MODE) { oneshot = 1; } ptimer_run(s->timer[index], oneshot); } else { ptimer_stop(s->timer[index]); } break; case AW_A10_PIT_TIMER_INTERVAL: s->interval[index] = value; ptimer_set_limit(s->timer[index], s->interval[index], 1); break; case AW_A10_PIT_TIMER_COUNT: s->count[index] = value; break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%x\n", __func__, (int)offset); } break; case AW_A10_PIT_WDOG_CONTROL: s->watch_dog_control = value; break; case AW_A10_PIT_WDOG_MODE: s->watch_dog_mode = value; break; case AW_A10_PIT_COUNT_LO: s->count_lo = value; break; case AW_A10_PIT_COUNT_HI: s->count_hi = value; break; case AW_A10_PIT_COUNT_CTL: s->count_ctl = value; if (s->count_ctl & AW_A10_PIT_COUNT_RL_EN) { uint64_t tmp_count = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); s->count_lo = tmp_count; s->count_hi = tmp_count >> 32; s->count_ctl &= ~AW_A10_PIT_COUNT_RL_EN; } if (s->count_ctl & AW_A10_PIT_COUNT_CLR_EN) { s->count_lo = 0; s->count_hi = 0; s->count_ctl &= ~AW_A10_PIT_COUNT_CLR_EN; } break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%x\n", __func__, (int)offset); break; } } static const MemoryRegionOps a10_pit_ops = { .read = a10_pit_read, .write = a10_pit_write, .endianness = DEVICE_NATIVE_ENDIAN, }; static Property a10_pit_properties[] = { DEFINE_PROP_UINT32("clk0-freq", AwA10PITState, clk_freq[0], 0), DEFINE_PROP_UINT32("clk1-freq", AwA10PITState, clk_freq[1], 0), DEFINE_PROP_UINT32("clk2-freq", AwA10PITState, clk_freq[2], 0), DEFINE_PROP_UINT32("clk3-freq", AwA10PITState, clk_freq[3], 0), DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_a10_pit = { .name = "a10.pit", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(irq_enable, AwA10PITState), VMSTATE_UINT32(irq_status, AwA10PITState), VMSTATE_UINT32_ARRAY(control, AwA10PITState, AW_A10_PIT_TIMER_NR), VMSTATE_UINT32_ARRAY(interval, AwA10PITState, AW_A10_PIT_TIMER_NR), VMSTATE_UINT32_ARRAY(count, AwA10PITState, AW_A10_PIT_TIMER_NR), VMSTATE_UINT32(watch_dog_mode, AwA10PITState), VMSTATE_UINT32(watch_dog_control, AwA10PITState), VMSTATE_UINT32(count_lo, AwA10PITState), VMSTATE_UINT32(count_hi, AwA10PITState), VMSTATE_UINT32(count_ctl, AwA10PITState), VMSTATE_PTIMER_ARRAY(timer, AwA10PITState, AW_A10_PIT_TIMER_NR), VMSTATE_END_OF_LIST() } }; static void a10_pit_reset(DeviceState *dev) { AwA10PITState *s = AW_A10_PIT(dev); uint8_t i; s->irq_enable = 0; s->irq_status = 0; a10_pit_update_irq(s); for (i = 0; i < 6; i++) { s->control[i] = AW_A10_PIT_DEFAULT_CLOCK; s->interval[i] = 0; s->count[i] = 0; ptimer_stop(s->timer[i]); a10_pit_set_freq(s, i); } s->watch_dog_mode = 0; s->watch_dog_control = 0; s->count_lo = 0; s->count_hi = 0; s->count_ctl = 0; } static void a10_pit_timer_cb(void *opaque) { AwA10TimerContext *tc = opaque; AwA10PITState *s = tc->container; uint8_t i = tc->index; if (s->control[i] & AW_A10_PIT_TIMER_EN) { s->irq_status |= 1 << i; if (s->control[i] & AW_A10_PIT_TIMER_MODE) { ptimer_stop(s->timer[i]); s->control[i] &= ~AW_A10_PIT_TIMER_EN; } a10_pit_update_irq(s); } } static void a10_pit_init(Object *obj) { AwA10PITState *s = AW_A10_PIT(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); QEMUBH * bh[AW_A10_PIT_TIMER_NR]; uint8_t i; for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) { sysbus_init_irq(sbd, &s->irq[i]); } memory_region_init_io(&s->iomem, OBJECT(s), &a10_pit_ops, s, TYPE_AW_A10_PIT, 0x400); sysbus_init_mmio(sbd, &s->iomem); for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) { AwA10TimerContext *tc = &s->timer_context[i]; tc->container = s; tc->index = i; bh[i] = qemu_bh_new(a10_pit_timer_cb, tc); s->timer[i] = ptimer_init(bh[i], PTIMER_POLICY_DEFAULT); } } static void a10_pit_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->reset = a10_pit_reset; dc->props = a10_pit_properties; dc->desc = "allwinner a10 timer"; dc->vmsd = &vmstate_a10_pit; } static const TypeInfo a10_pit_info = { .name = TYPE_AW_A10_PIT, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(AwA10PITState), .instance_init = a10_pit_init, .class_init = a10_pit_class_init, }; static void a10_register_types(void) { type_register_static(&a10_pit_info); } type_init(a10_register_types);
null
null
null
null
122,646
36,484
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
201,479
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Roccat Isku driver for Linux * * Copyright (c) 2011 Stefan Achatz <[email protected]> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* * Roccat Isku is a gamer keyboard with macro keys that can be configured in * 5 profiles. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-isku.h" static struct class *isku_class; static void isku_profile_activated(struct isku_device *isku, uint new_profile) { isku->actual_profile = new_profile; } static int isku_receive(struct usb_device *usb_dev, uint command, void *buf, uint size) { return roccat_common2_receive(usb_dev, command, buf, size); } static int isku_get_actual_profile(struct usb_device *usb_dev) { struct isku_actual_profile buf; int retval; retval = isku_receive(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE, &buf, sizeof(struct isku_actual_profile)); return retval ? retval : buf.actual_profile; } static int isku_set_actual_profile(struct usb_device *usb_dev, int new_profile) { struct isku_actual_profile buf; buf.command = ISKU_COMMAND_ACTUAL_PROFILE; buf.size = sizeof(struct isku_actual_profile); buf.actual_profile = new_profile; return roccat_common2_send_with_status(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE, &buf, sizeof(struct isku_actual_profile)); } static ssize_t isku_sysfs_show_actual_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", isku->actual_profile); } static ssize_t isku_sysfs_set_actual_profile(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct isku_device *isku; struct usb_device *usb_dev; unsigned long profile; int retval; struct isku_roccat_report roccat_report; dev = dev->parent->parent; isku = hid_get_drvdata(dev_get_drvdata(dev)); usb_dev = interface_to_usbdev(to_usb_interface(dev)); retval = kstrtoul(buf, 10, &profile); if (retval) return retval; if (profile > 4) return -EINVAL; mutex_lock(&isku->isku_lock); retval = isku_set_actual_profile(usb_dev, profile); if (retval) { mutex_unlock(&isku->isku_lock); return retval; } isku_profile_activated(isku, profile); roccat_report.event = ISKU_REPORT_BUTTON_EVENT_PROFILE; roccat_report.data1 = profile + 1; roccat_report.data2 = 0; roccat_report.profile = profile + 1; roccat_report_event(isku->chrdev_minor, (uint8_t const *)&roccat_report); mutex_unlock(&isku->isku_lock); return size; } static DEVICE_ATTR(actual_profile, 0660, isku_sysfs_show_actual_profile, isku_sysfs_set_actual_profile); static struct attribute *isku_attrs[] = { &dev_attr_actual_profile.attr, NULL, }; static ssize_t isku_sysfs_read(struct file *fp, struct kobject *kobj, char *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off >= real_size) return 0; if (off != 0 || count > real_size) return -EINVAL; mutex_lock(&isku->isku_lock); retval = isku_receive(usb_dev, command, buf, count); mutex_unlock(&isku->isku_lock); return retval ? retval : count; } static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj, void const *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off != 0 || count > real_size) return -EINVAL; mutex_lock(&isku->isku_lock); retval = roccat_common2_send_with_status(usb_dev, command, (void *)buf, count); mutex_unlock(&isku->isku_lock); return retval ? retval : count; } #define ISKU_SYSFS_W(thingy, THINGY) \ static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj, \ struct bin_attribute *attr, char *buf, \ loff_t off, size_t count) \ { \ return isku_sysfs_write(fp, kobj, buf, off, count, \ ISKU_SIZE_ ## THINGY, ISKU_COMMAND_ ## THINGY); \ } #define ISKU_SYSFS_R(thingy, THINGY) \ static ssize_t isku_sysfs_read_ ## thingy(struct file *fp, struct kobject *kobj, \ struct bin_attribute *attr, char *buf, \ loff_t off, size_t count) \ { \ return isku_sysfs_read(fp, kobj, buf, off, count, \ ISKU_SIZE_ ## THINGY, ISKU_COMMAND_ ## THINGY); \ } #define ISKU_SYSFS_RW(thingy, THINGY) \ ISKU_SYSFS_R(thingy, THINGY) \ ISKU_SYSFS_W(thingy, THINGY) #define ISKU_BIN_ATTR_RW(thingy, THINGY) \ ISKU_SYSFS_RW(thingy, THINGY); \ static struct bin_attribute bin_attr_##thingy = { \ .attr = { .name = #thingy, .mode = 0660 }, \ .size = ISKU_SIZE_ ## THINGY, \ .read = isku_sysfs_read_ ## thingy, \ .write = isku_sysfs_write_ ## thingy \ } #define ISKU_BIN_ATTR_R(thingy, THINGY) \ ISKU_SYSFS_R(thingy, THINGY); \ static struct bin_attribute bin_attr_##thingy = { \ .attr = { .name = #thingy, .mode = 0440 }, \ .size = ISKU_SIZE_ ## THINGY, \ .read = isku_sysfs_read_ ## thingy, \ } #define ISKU_BIN_ATTR_W(thingy, THINGY) \ ISKU_SYSFS_W(thingy, THINGY); \ static struct bin_attribute bin_attr_##thingy = { \ .attr = { .name = #thingy, .mode = 0220 }, \ .size = ISKU_SIZE_ ## THINGY, \ .write = isku_sysfs_write_ ## thingy \ } ISKU_BIN_ATTR_RW(macro, MACRO); ISKU_BIN_ATTR_RW(keys_function, KEYS_FUNCTION); ISKU_BIN_ATTR_RW(keys_easyzone, KEYS_EASYZONE); ISKU_BIN_ATTR_RW(keys_media, KEYS_MEDIA); ISKU_BIN_ATTR_RW(keys_thumbster, KEYS_THUMBSTER); ISKU_BIN_ATTR_RW(keys_macro, KEYS_MACRO); ISKU_BIN_ATTR_RW(keys_capslock, KEYS_CAPSLOCK); ISKU_BIN_ATTR_RW(light, LIGHT); ISKU_BIN_ATTR_RW(key_mask, KEY_MASK); ISKU_BIN_ATTR_RW(last_set, LAST_SET); ISKU_BIN_ATTR_W(talk, TALK); ISKU_BIN_ATTR_W(talkfx, TALKFX); ISKU_BIN_ATTR_W(control, CONTROL); ISKU_BIN_ATTR_W(reset, RESET); ISKU_BIN_ATTR_R(info, INFO); static struct bin_attribute *isku_bin_attributes[] = { &bin_attr_macro, &bin_attr_keys_function, &bin_attr_keys_easyzone, &bin_attr_keys_media, &bin_attr_keys_thumbster, &bin_attr_keys_macro, &bin_attr_keys_capslock, &bin_attr_light, &bin_attr_key_mask, &bin_attr_last_set, &bin_attr_talk, &bin_attr_talkfx, &bin_attr_control, &bin_attr_reset, &bin_attr_info, NULL, }; static const struct attribute_group isku_group = { .attrs = isku_attrs, .bin_attrs = isku_bin_attributes, }; static const struct attribute_group *isku_groups[] = { &isku_group, NULL, }; static int isku_init_isku_device_struct(struct usb_device *usb_dev, struct isku_device *isku) { int retval; mutex_init(&isku->isku_lock); retval = isku_get_actual_profile(usb_dev); if (retval < 0) return retval; isku_profile_activated(isku, retval); return 0; } static int isku_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct isku_device *isku; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol != ISKU_USB_INTERFACE_PROTOCOL) { hid_set_drvdata(hdev, NULL); return 0; } isku = kzalloc(sizeof(*isku), GFP_KERNEL); if (!isku) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, isku); retval = isku_init_isku_device_struct(usb_dev, isku); if (retval) { hid_err(hdev, "couldn't init struct isku_device\n"); goto exit_free; } retval = roccat_connect(isku_class, hdev, sizeof(struct isku_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { isku->chrdev_minor = retval; isku->roccat_claimed = 1; } return 0; exit_free: kfree(isku); return retval; } static void isku_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct isku_device *isku; if (intf->cur_altsetting->desc.bInterfaceProtocol != ISKU_USB_INTERFACE_PROTOCOL) return; isku = hid_get_drvdata(hdev); if (isku->roccat_claimed) roccat_disconnect(isku->chrdev_minor); kfree(isku); } static int isku_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = isku_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install keyboard\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void isku_remove(struct hid_device *hdev) { isku_remove_specials(hdev); hid_hw_stop(hdev); } static void isku_keep_values_up_to_date(struct isku_device *isku, u8 const *data) { struct isku_report_button const *button_report; switch (data[0]) { case ISKU_REPORT_NUMBER_BUTTON: button_report = (struct isku_report_button const *)data; switch (button_report->event) { case ISKU_REPORT_BUTTON_EVENT_PROFILE: isku_profile_activated(isku, button_report->data1 - 1); break; } break; } } static void isku_report_to_chrdev(struct isku_device const *isku, u8 const *data) { struct isku_roccat_report roccat_report; struct isku_report_button const *button_report; if (data[0] != ISKU_REPORT_NUMBER_BUTTON) return; button_report = (struct isku_report_button const *)data; roccat_report.event = button_report->event; roccat_report.data1 = button_report->data1; roccat_report.data2 = button_report->data2; roccat_report.profile = isku->actual_profile + 1; roccat_report_event(isku->chrdev_minor, (uint8_t const *)&roccat_report); } static int isku_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct isku_device *isku = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != ISKU_USB_INTERFACE_PROTOCOL) return 0; if (isku == NULL) return 0; isku_keep_values_up_to_date(isku, data); if (isku->roccat_claimed) isku_report_to_chrdev(isku, data); return 0; } static const struct hid_device_id isku_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) }, { } }; MODULE_DEVICE_TABLE(hid, isku_devices); static struct hid_driver isku_driver = { .name = "isku", .id_table = isku_devices, .probe = isku_probe, .remove = isku_remove, .raw_event = isku_raw_event }; static int __init isku_init(void) { int retval; isku_class = class_create(THIS_MODULE, "isku"); if (IS_ERR(isku_class)) return PTR_ERR(isku_class); isku_class->dev_groups = isku_groups; retval = hid_register_driver(&isku_driver); if (retval) class_destroy(isku_class); return retval; } static void __exit isku_exit(void) { hid_unregister_driver(&isku_driver); class_destroy(isku_class); } module_init(isku_init); module_exit(isku_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Isku/FX driver"); MODULE_LICENSE("GPL v2");
null
null
null
null
109,826
5,605
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
170,600
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * pxa-ssp.c -- ALSA Soc Audio Layer * * Copyright 2005,2008 Wolfson Microelectronics PLC. * Author: Liam Girdwood * Mark Brown <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * TODO: * o Test network mode for > 16bit sample size */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/pxa2xx_ssp.h> #include <linux/of.h> #include <linux/dmaengine.h> #include <asm/irq.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/pxa2xx-lib.h> #include <sound/dmaengine_pcm.h> #include "../../arm/pxa2xx-pcm.h" #include "pxa-ssp.h" /* * SSP audio private data */ struct ssp_priv { struct ssp_device *ssp; unsigned int sysclk; int dai_fmt; #ifdef CONFIG_PM uint32_t cr0; uint32_t cr1; uint32_t to; uint32_t psp; #endif }; static void dump_registers(struct ssp_device *ssp) { dev_dbg(&ssp->pdev->dev, "SSCR0 0x%08x SSCR1 0x%08x SSTO 0x%08x\n", pxa_ssp_read_reg(ssp, SSCR0), pxa_ssp_read_reg(ssp, SSCR1), pxa_ssp_read_reg(ssp, SSTO)); dev_dbg(&ssp->pdev->dev, "SSPSP 0x%08x SSSR 0x%08x SSACD 0x%08x\n", pxa_ssp_read_reg(ssp, SSPSP), pxa_ssp_read_reg(ssp, SSSR), pxa_ssp_read_reg(ssp, SSACD)); } static void pxa_ssp_enable(struct ssp_device *ssp) { uint32_t sscr0; sscr0 = __raw_readl(ssp->mmio_base + SSCR0) | SSCR0_SSE; __raw_writel(sscr0, ssp->mmio_base + SSCR0); } static void pxa_ssp_disable(struct ssp_device *ssp) { uint32_t sscr0; sscr0 = __raw_readl(ssp->mmio_base + SSCR0) & ~SSCR0_SSE; __raw_writel(sscr0, ssp->mmio_base + SSCR0); } static void pxa_ssp_set_dma_params(struct ssp_device *ssp, int width4, int out, struct snd_dmaengine_dai_dma_data *dma) { dma->addr_width = width4 ? DMA_SLAVE_BUSWIDTH_4_BYTES : DMA_SLAVE_BUSWIDTH_2_BYTES; dma->maxburst = 16; dma->addr = ssp->phys_base + SSDR; } static int pxa_ssp_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; struct snd_dmaengine_dai_dma_data *dma; int ret = 0; if (!cpu_dai->active) { clk_prepare_enable(ssp->clk); pxa_ssp_disable(ssp); } dma = kzalloc(sizeof(struct snd_dmaengine_dai_dma_data), GFP_KERNEL); if (!dma) return -ENOMEM; dma->filter_data = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? &ssp->drcmr_tx : &ssp->drcmr_rx; snd_soc_dai_set_dma_data(cpu_dai, substream, dma); return ret; } static void pxa_ssp_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; if (!cpu_dai->active) { pxa_ssp_disable(ssp); clk_disable_unprepare(ssp->clk); } kfree(snd_soc_dai_get_dma_data(cpu_dai, substream)); snd_soc_dai_set_dma_data(cpu_dai, substream, NULL); } #ifdef CONFIG_PM static int pxa_ssp_suspend(struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; if (!cpu_dai->active) clk_prepare_enable(ssp->clk); priv->cr0 = __raw_readl(ssp->mmio_base + SSCR0); priv->cr1 = __raw_readl(ssp->mmio_base + SSCR1); priv->to = __raw_readl(ssp->mmio_base + SSTO); priv->psp = __raw_readl(ssp->mmio_base + SSPSP); pxa_ssp_disable(ssp); clk_disable_unprepare(ssp->clk); return 0; } static int pxa_ssp_resume(struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; uint32_t sssr = SSSR_ROR | SSSR_TUR | SSSR_BCE; clk_prepare_enable(ssp->clk); __raw_writel(sssr, ssp->mmio_base + SSSR); __raw_writel(priv->cr0 & ~SSCR0_SSE, ssp->mmio_base + SSCR0); __raw_writel(priv->cr1, ssp->mmio_base + SSCR1); __raw_writel(priv->to, ssp->mmio_base + SSTO); __raw_writel(priv->psp, ssp->mmio_base + SSPSP); if (cpu_dai->active) pxa_ssp_enable(ssp); else clk_disable_unprepare(ssp->clk); return 0; } #else #define pxa_ssp_suspend NULL #define pxa_ssp_resume NULL #endif /** * ssp_set_clkdiv - set SSP clock divider * @div: serial clock rate divider */ static void pxa_ssp_set_scr(struct ssp_device *ssp, u32 div) { u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); if (ssp->type == PXA25x_SSP) { sscr0 &= ~0x0000ff00; sscr0 |= ((div - 2)/2) << 8; /* 2..512 */ } else { sscr0 &= ~0x000fff00; sscr0 |= (div - 1) << 8; /* 1..4096 */ } pxa_ssp_write_reg(ssp, SSCR0, sscr0); } /** * pxa_ssp_get_clkdiv - get SSP clock divider */ static u32 pxa_ssp_get_scr(struct ssp_device *ssp) { u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); u32 div; if (ssp->type == PXA25x_SSP) div = ((sscr0 >> 8) & 0xff) * 2 + 2; else div = ((sscr0 >> 8) & 0xfff) + 1; return div; } /* * Set the SSP ports SYSCLK. */ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int val; u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~(SSCR0_ECS | SSCR0_NCS | SSCR0_MOD | SSCR0_ACS); dev_dbg(&ssp->pdev->dev, "pxa_ssp_set_dai_sysclk id: %d, clk_id %d, freq %u\n", cpu_dai->id, clk_id, freq); switch (clk_id) { case PXA_SSP_CLK_NET_PLL: sscr0 |= SSCR0_MOD; break; case PXA_SSP_CLK_PLL: /* Internal PLL is fixed */ if (ssp->type == PXA25x_SSP) priv->sysclk = 1843200; else priv->sysclk = 13000000; break; case PXA_SSP_CLK_EXT: priv->sysclk = freq; sscr0 |= SSCR0_ECS; break; case PXA_SSP_CLK_NET: priv->sysclk = freq; sscr0 |= SSCR0_NCS | SSCR0_MOD; break; case PXA_SSP_CLK_AUDIO: priv->sysclk = 0; pxa_ssp_set_scr(ssp, 1); sscr0 |= SSCR0_ACS; break; default: return -ENODEV; } /* The SSP clock must be disabled when changing SSP clock mode * on PXA2xx. On PXA3xx it must be enabled when doing so. */ if (ssp->type != PXA3xx_SSP) clk_disable_unprepare(ssp->clk); val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0; pxa_ssp_write_reg(ssp, SSCR0, val); if (ssp->type != PXA3xx_SSP) clk_prepare_enable(ssp->clk); return 0; } /* * Set the SSP clock dividers. */ static int pxa_ssp_set_dai_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int val; switch (div_id) { case PXA_SSP_AUDIO_DIV_ACDS: val = (pxa_ssp_read_reg(ssp, SSACD) & ~0x7) | SSACD_ACDS(div); pxa_ssp_write_reg(ssp, SSACD, val); break; case PXA_SSP_AUDIO_DIV_SCDB: val = pxa_ssp_read_reg(ssp, SSACD); val &= ~SSACD_SCDB; if (ssp->type == PXA3xx_SSP) val &= ~SSACD_SCDX8; switch (div) { case PXA_SSP_CLK_SCDB_1: val |= SSACD_SCDB; break; case PXA_SSP_CLK_SCDB_4: break; case PXA_SSP_CLK_SCDB_8: if (ssp->type == PXA3xx_SSP) val |= SSACD_SCDX8; else return -EINVAL; break; default: return -EINVAL; } pxa_ssp_write_reg(ssp, SSACD, val); break; case PXA_SSP_DIV_SCR: pxa_ssp_set_scr(ssp, div); break; default: return -ENODEV; } return 0; } /* * Configure the PLL frequency pxa27x and (afaik - pxa320 only) */ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70; if (ssp->type == PXA3xx_SSP) pxa_ssp_write_reg(ssp, SSACDD, 0); switch (freq_out) { case 5622000: break; case 11345000: ssacd |= (0x1 << 4); break; case 12235000: ssacd |= (0x2 << 4); break; case 14857000: ssacd |= (0x3 << 4); break; case 32842000: ssacd |= (0x4 << 4); break; case 48000000: ssacd |= (0x5 << 4); break; case 0: /* Disable */ break; default: /* PXA3xx has a clock ditherer which can be used to generate * a wider range of frequencies - calculate a value for it. */ if (ssp->type == PXA3xx_SSP) { u32 val; u64 tmp = 19968; tmp *= 1000000; do_div(tmp, freq_out); val = tmp; val = (val << 16) | 64; pxa_ssp_write_reg(ssp, SSACDD, val); ssacd |= (0x6 << 4); dev_dbg(&ssp->pdev->dev, "Using SSACDD %x to supply %uHz\n", val, freq_out); break; } return -EINVAL; } pxa_ssp_write_reg(ssp, SSACD, ssacd); return 0; } /* * Set the active slots in TDM/Network mode */ static int pxa_ssp_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 sscr0; sscr0 = pxa_ssp_read_reg(ssp, SSCR0); sscr0 &= ~(SSCR0_MOD | SSCR0_SlotsPerFrm(8) | SSCR0_EDSS | SSCR0_DSS); /* set slot width */ if (slot_width > 16) sscr0 |= SSCR0_EDSS | SSCR0_DataSize(slot_width - 16); else sscr0 |= SSCR0_DataSize(slot_width); if (slots > 1) { /* enable network mode */ sscr0 |= SSCR0_MOD; /* set number of active slots */ sscr0 |= SSCR0_SlotsPerFrm(slots); /* set active slot mask */ pxa_ssp_write_reg(ssp, SSTSA, tx_mask); pxa_ssp_write_reg(ssp, SSRSA, rx_mask); } pxa_ssp_write_reg(ssp, SSCR0, sscr0); return 0; } /* * Tristate the SSP DAI lines */ static int pxa_ssp_set_dai_tristate(struct snd_soc_dai *cpu_dai, int tristate) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 sscr1; sscr1 = pxa_ssp_read_reg(ssp, SSCR1); if (tristate) sscr1 &= ~SSCR1_TTE; else sscr1 |= SSCR1_TTE; pxa_ssp_write_reg(ssp, SSCR1, sscr1); return 0; } /* * Set up the SSP DAI format. * The SSP Port must be inactive before calling this function as the * physical interface format is changed. */ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 sscr0, sscr1, sspsp, scfr; /* check if we need to change anything at all */ if (priv->dai_fmt == fmt) return 0; /* we can only change the settings if the port is not in use */ if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) { dev_err(&ssp->pdev->dev, "can't change hardware dai format: stream is in use"); return -EINVAL; } /* reset port settings */ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~(SSCR0_ECS | SSCR0_NCS | SSCR0_MOD | SSCR0_ACS); sscr1 = SSCR1_RxTresh(8) | SSCR1_TxTresh(7); sspsp = 0; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: sscr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR | SSCR1_SCFR; break; case SND_SOC_DAIFMT_CBM_CFS: sscr1 |= SSCR1_SCLKDIR | SSCR1_SCFR; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: sspsp |= SSPSP_SFRMP; break; case SND_SOC_DAIFMT_NB_IF: break; case SND_SOC_DAIFMT_IB_IF: sspsp |= SSPSP_SCMODE(2); break; case SND_SOC_DAIFMT_IB_NF: sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: sscr0 |= SSCR0_PSP; sscr1 |= SSCR1_RWOT | SSCR1_TRAIL; /* See hw_params() */ break; case SND_SOC_DAIFMT_DSP_A: sspsp |= SSPSP_FSRT; case SND_SOC_DAIFMT_DSP_B: sscr0 |= SSCR0_MOD | SSCR0_PSP; sscr1 |= SSCR1_TRAIL | SSCR1_RWOT; break; default: return -EINVAL; } pxa_ssp_write_reg(ssp, SSCR0, sscr0); pxa_ssp_write_reg(ssp, SSCR1, sscr1); pxa_ssp_write_reg(ssp, SSPSP, sspsp); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_CBM_CFS: scfr = pxa_ssp_read_reg(ssp, SSCR1) | SSCR1_SCFR; pxa_ssp_write_reg(ssp, SSCR1, scfr); while (pxa_ssp_read_reg(ssp, SSSR) & SSSR_BSY) cpu_relax(); break; } dump_registers(ssp); /* Since we are configuring the timings for the format by hand * we have to defer some things until hw_params() where we * know parameters like the sample size. */ priv->dai_fmt = fmt; return 0; } /* * Set the SSP audio DMA parameters and sample size. * Can be called multiple times by oss emulation. */ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int chn = params_channels(params); u32 sscr0; u32 sspsp; int width = snd_pcm_format_physical_width(params_format(params)); int ttsa = pxa_ssp_read_reg(ssp, SSTSA) & 0xf; struct snd_dmaengine_dai_dma_data *dma_data; dma_data = snd_soc_dai_get_dma_data(cpu_dai, substream); /* Network mode with one active slot (ttsa == 1) can be used * to force 16-bit frame width on the wire (for S16_LE), even * with two channels. Use 16-bit DMA transfers for this case. */ pxa_ssp_set_dma_params(ssp, ((chn == 2) && (ttsa != 1)) || (width == 32), substream->stream == SNDRV_PCM_STREAM_PLAYBACK, dma_data); /* we can only change the settings if the port is not in use */ if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) return 0; /* clear selected SSP bits */ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~(SSCR0_DSS | SSCR0_EDSS); /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: if (ssp->type == PXA3xx_SSP) sscr0 |= SSCR0_FPCKE; sscr0 |= SSCR0_DataSize(16); break; case SNDRV_PCM_FORMAT_S24_LE: sscr0 |= (SSCR0_EDSS | SSCR0_DataSize(8)); break; case SNDRV_PCM_FORMAT_S32_LE: sscr0 |= (SSCR0_EDSS | SSCR0_DataSize(16)); break; } pxa_ssp_write_reg(ssp, SSCR0, sscr0); switch (priv->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: sspsp = pxa_ssp_read_reg(ssp, SSPSP); if ((pxa_ssp_get_scr(ssp) == 4) && (width == 16)) { /* This is a special case where the bitclk is 64fs * and we're not dealing with 2*32 bits of audio * samples. * * The SSP values used for that are all found out by * trying and failing a lot; some of the registers * needed for that mode are only available on PXA3xx. */ if (ssp->type != PXA3xx_SSP) return -EINVAL; sspsp |= SSPSP_SFRMWDTH(width * 2); sspsp |= SSPSP_SFRMDLY(width * 4); sspsp |= SSPSP_EDMYSTOP(3); sspsp |= SSPSP_DMYSTOP(3); sspsp |= SSPSP_DMYSTRT(1); } else { /* The frame width is the width the LRCLK is * asserted for; the delay is expressed in * half cycle units. We need the extra cycle * because the data starts clocking out one BCLK * after LRCLK changes polarity. */ sspsp |= SSPSP_SFRMWDTH(width + 1); sspsp |= SSPSP_SFRMDLY((width + 1) * 2); sspsp |= SSPSP_DMYSTRT(1); } pxa_ssp_write_reg(ssp, SSPSP, sspsp); break; default: break; } /* When we use a network mode, we always require TDM slots * - complain loudly and fail if they've not been set up yet. */ if ((sscr0 & SSCR0_MOD) && !ttsa) { dev_err(&ssp->pdev->dev, "No TDM timeslot configured\n"); return -EINVAL; } dump_registers(ssp); return 0; } static void pxa_ssp_set_running_bit(struct snd_pcm_substream *substream, struct ssp_device *ssp, int value) { uint32_t sscr0 = pxa_ssp_read_reg(ssp, SSCR0); uint32_t sscr1 = pxa_ssp_read_reg(ssp, SSCR1); uint32_t sspsp = pxa_ssp_read_reg(ssp, SSPSP); uint32_t sssr = pxa_ssp_read_reg(ssp, SSSR); if (value && (sscr0 & SSCR0_SSE)) pxa_ssp_write_reg(ssp, SSCR0, sscr0 & ~SSCR0_SSE); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (value) sscr1 |= SSCR1_TSRE; else sscr1 &= ~SSCR1_TSRE; } else { if (value) sscr1 |= SSCR1_RSRE; else sscr1 &= ~SSCR1_RSRE; } pxa_ssp_write_reg(ssp, SSCR1, sscr1); if (value) { pxa_ssp_write_reg(ssp, SSSR, sssr); pxa_ssp_write_reg(ssp, SSPSP, sspsp); pxa_ssp_write_reg(ssp, SSCR0, sscr0 | SSCR0_SSE); } } static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *cpu_dai) { int ret = 0; struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int val; switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: pxa_ssp_enable(ssp); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: pxa_ssp_set_running_bit(substream, ssp, 1); val = pxa_ssp_read_reg(ssp, SSSR); pxa_ssp_write_reg(ssp, SSSR, val); break; case SNDRV_PCM_TRIGGER_START: pxa_ssp_set_running_bit(substream, ssp, 1); break; case SNDRV_PCM_TRIGGER_STOP: pxa_ssp_set_running_bit(substream, ssp, 0); break; case SNDRV_PCM_TRIGGER_SUSPEND: pxa_ssp_disable(ssp); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: pxa_ssp_set_running_bit(substream, ssp, 0); break; default: ret = -EINVAL; } dump_registers(ssp); return ret; } static int pxa_ssp_probe(struct snd_soc_dai *dai) { struct device *dev = dai->dev; struct ssp_priv *priv; int ret; priv = kzalloc(sizeof(struct ssp_priv), GFP_KERNEL); if (!priv) return -ENOMEM; if (dev->of_node) { struct device_node *ssp_handle; ssp_handle = of_parse_phandle(dev->of_node, "port", 0); if (!ssp_handle) { dev_err(dev, "unable to get 'port' phandle\n"); ret = -ENODEV; goto err_priv; } priv->ssp = pxa_ssp_request_of(ssp_handle, "SoC audio"); if (priv->ssp == NULL) { ret = -ENODEV; goto err_priv; } } else { priv->ssp = pxa_ssp_request(dai->id + 1, "SoC audio"); if (priv->ssp == NULL) { ret = -ENODEV; goto err_priv; } } priv->dai_fmt = (unsigned int) -1; snd_soc_dai_set_drvdata(dai, priv); return 0; err_priv: kfree(priv); return ret; } static int pxa_ssp_remove(struct snd_soc_dai *dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(dai); pxa_ssp_free(priv->ssp); kfree(priv); return 0; } #define PXA_SSP_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) #define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops pxa_ssp_dai_ops = { .startup = pxa_ssp_startup, .shutdown = pxa_ssp_shutdown, .trigger = pxa_ssp_trigger, .hw_params = pxa_ssp_hw_params, .set_sysclk = pxa_ssp_set_dai_sysclk, .set_clkdiv = pxa_ssp_set_dai_clkdiv, .set_pll = pxa_ssp_set_dai_pll, .set_fmt = pxa_ssp_set_dai_fmt, .set_tdm_slot = pxa_ssp_set_dai_tdm_slot, .set_tristate = pxa_ssp_set_dai_tristate, }; static struct snd_soc_dai_driver pxa_ssp_dai = { .probe = pxa_ssp_probe, .remove = pxa_ssp_remove, .suspend = pxa_ssp_suspend, .resume = pxa_ssp_resume, .playback = { .channels_min = 1, .channels_max = 8, .rates = PXA_SSP_RATES, .formats = PXA_SSP_FORMATS, }, .capture = { .channels_min = 1, .channels_max = 8, .rates = PXA_SSP_RATES, .formats = PXA_SSP_FORMATS, }, .ops = &pxa_ssp_dai_ops, }; static const struct snd_soc_component_driver pxa_ssp_component = { .name = "pxa-ssp", }; #ifdef CONFIG_OF static const struct of_device_id pxa_ssp_of_ids[] = { { .compatible = "mrvl,pxa-ssp-dai" }, {} }; MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids); #endif static int asoc_ssp_probe(struct platform_device *pdev) { return devm_snd_soc_register_component(&pdev->dev, &pxa_ssp_component, &pxa_ssp_dai, 1); } static struct platform_driver asoc_ssp_driver = { .driver = { .name = "pxa-ssp-dai", .of_match_table = of_match_ptr(pxa_ssp_of_ids), }, .probe = asoc_ssp_probe, }; module_platform_driver(asoc_ssp_driver); /* Module information */ MODULE_AUTHOR("Mark Brown <[email protected]>"); MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa-ssp-dai");
null
null
null
null
78,947
12,905
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
12,905
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_SPELLCHECK_COMMON_SPELLCHECK_STRUCT_TRAITS_H #define COMPONENTS_SPELLCHECK_COMMON_SPELLCHECK_STRUCT_TRAITS_H #include "components/spellcheck/common/spellcheck.mojom.h" #include "components/spellcheck/common/spellcheck_result.h" namespace mojo { template <> struct EnumTraits<spellcheck::mojom::Decoration, SpellCheckResult::Decoration> { static spellcheck::mojom::Decoration ToMojom(SpellCheckResult::Decoration); static bool FromMojom(spellcheck::mojom::Decoration, SpellCheckResult::Decoration*); }; template <> struct StructTraits<spellcheck::mojom::SpellCheckResultDataView, SpellCheckResult> { static SpellCheckResult::Decoration decoration( const SpellCheckResult& result) { return result.decoration; } static int32_t location(const SpellCheckResult& result) { return result.location; } static int32_t length(const SpellCheckResult& result) { return result.length; } static const std::vector<base::string16>& replacements( const SpellCheckResult& result) { return result.replacements; } static bool Read(spellcheck::mojom::SpellCheckResultDataView, SpellCheckResult*); }; } // namespace mojo #endif // COMPONENTS_SPELLCHECK_COMMON_SPELLCHECK_STRUCT_TRAITS_H
null
null
null
null
9,768
46,119
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
46,119
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/wm/video_detector.h" #include <memory> #include "ash/session/session_controller.h" #include "ash/shell.h" #include "ash/test/ash_test_base.h" #include "ash/wm/window_state.h" #include "ash/wm/wm_event.h" #include "base/compiler_specific.h" #include "base/containers/circular_deque.h" #include "base/time/time.h" #include "third_party/skia/include/core/SkColor.h" #include "ui/aura/client/aura_constants.h" #include "ui/aura/client/window_types.h" #include "ui/aura/test/test_windows.h" #include "ui/aura/window.h" #include "ui/aura/window_event_dispatcher.h" #include "ui/gfx/geometry/rect.h" namespace ash { // Implementation that just records video state changes. class TestObserver : public VideoDetector::Observer { public: TestObserver() = default; bool empty() const { return states_.empty(); } void reset() { states_.clear(); } // Pops and returns the earliest-received state. VideoDetector::State PopState() { CHECK(!states_.empty()); VideoDetector::State first_state = states_.front(); states_.pop_front(); return first_state; } // VideoDetector::Observer implementation. void OnVideoStateChanged(VideoDetector::State state) override { states_.push_back(state); } private: // States in the order they were received. base::circular_deque<VideoDetector::State> states_; DISALLOW_COPY_AND_ASSIGN(TestObserver); }; class VideoDetectorTest : public AshTestBase { public: VideoDetectorTest() : next_window_id_(1000) {} ~VideoDetectorTest() override = default; void SetUp() override { AshTestBase::SetUp(); observer_.reset(new TestObserver); detector_ = Shell::Get()->video_detector(); detector_->AddObserver(observer_.get()); } void TearDown() override { detector_->RemoveObserver(observer_.get()); AshTestBase::TearDown(); } protected: // Creates and returns a new window with |bounds|. std::unique_ptr<aura::Window> CreateTestWindow(const gfx::Rect& bounds) { return std::unique_ptr<aura::Window>( CreateTestWindowInShell(SK_ColorRED, next_window_id_++, bounds)); } VideoDetector* detector_; // not owned std::unique_ptr<TestObserver> observer_; // Next ID to be assigned by CreateTestWindow(). int next_window_id_; private: DISALLOW_COPY_AND_ASSIGN(VideoDetectorTest); }; // Verify that the video detector can distinguish fullscreen and windowed video // activity. TEST_F(VideoDetectorTest, ReportFullscreen) { UpdateDisplay("1024x768,1024x768"); std::unique_ptr<aura::Window> window = CreateTestWindow(gfx::Rect(0, 0, 1024, 768)); wm::WindowState* window_state = wm::GetWindowState(window.get()); const wm::WMEvent toggle_fullscreen_event(wm::WM_EVENT_TOGGLE_FULLSCREEN); window_state->OnWMEvent(&toggle_fullscreen_event); ASSERT_TRUE(window_state->IsFullscreen()); window->Focus(); detector_->OnVideoActivityStarted(); EXPECT_EQ(VideoDetector::State::PLAYING_FULLSCREEN, observer_->PopState()); EXPECT_TRUE(observer_->empty()); // Make the window non-fullscreen. observer_->reset(); window_state->OnWMEvent(&toggle_fullscreen_event); ASSERT_FALSE(window_state->IsFullscreen()); EXPECT_EQ(VideoDetector::State::PLAYING_WINDOWED, observer_->PopState()); EXPECT_TRUE(observer_->empty()); // Open a second, fullscreen window. Fullscreen video should still be reported // due to the second window being fullscreen. This avoids situations where // non-fullscreen video could be reported when multiple videos are playing in // fullscreen and non-fullscreen windows. observer_->reset(); std::unique_ptr<aura::Window> other_window = CreateTestWindow(gfx::Rect(1024, 0, 1024, 768)); wm::WindowState* other_window_state = wm::GetWindowState(other_window.get()); other_window_state->OnWMEvent(&toggle_fullscreen_event); ASSERT_TRUE(other_window_state->IsFullscreen()); EXPECT_EQ(VideoDetector::State::PLAYING_FULLSCREEN, observer_->PopState()); EXPECT_TRUE(observer_->empty()); // Make the second window non-fullscreen and check that the observer is // immediately notified about windowed video. observer_->reset(); other_window_state->OnWMEvent(&toggle_fullscreen_event); ASSERT_FALSE(other_window_state->IsFullscreen()); EXPECT_EQ(VideoDetector::State::PLAYING_WINDOWED, observer_->PopState()); EXPECT_TRUE(observer_->empty()); } } // namespace ash
null
null
null
null
42,982
43,766
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
43,766
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef BASE_CONTAINERS_SPAN_H_ #define BASE_CONTAINERS_SPAN_H_ #include <stddef.h> #include <algorithm> #include <array> #include <iterator> #include <type_traits> #include <utility> #include "base/logging.h" namespace base { template <typename T> class span; namespace internal { template <typename T> struct IsSpanImpl : std::false_type {}; template <typename T> struct IsSpanImpl<span<T>> : std::true_type {}; template <typename T> using IsSpan = IsSpanImpl<std::decay_t<T>>; template <typename T> struct IsStdArrayImpl : std::false_type {}; template <typename T, size_t N> struct IsStdArrayImpl<std::array<T, N>> : std::true_type {}; template <typename T> using IsStdArray = IsStdArrayImpl<std::decay_t<T>>; template <typename From, typename To> using IsLegalSpanConversion = std::is_convertible<From*, To*>; template <typename Container, typename T> using ContainerHasConvertibleData = IsLegalSpanConversion< std::remove_pointer_t<decltype(std::declval<Container>().data())>, T>; template <typename Container> using ContainerHasIntegralSize = std::is_integral<decltype(std::declval<Container>().size())>; template <typename From, typename To> using EnableIfLegalSpanConversion = std::enable_if_t<IsLegalSpanConversion<From, To>::value>; // SFINAE check if Container can be converted to a span<T>. Note that the // implementation details of this check differ slightly from the requirements in // the working group proposal: in particular, the proposal also requires that // the container conversion constructor participate in overload resolution only // if two additional conditions are true: // // 1. Container implements operator[]. // 2. Container::value_type matches remove_const_t<element_type>. // // The requirements are relaxed slightly here: in particular, not requiring (2) // means that an immutable span can be easily constructed from a mutable // container. template <typename Container, typename T> using EnableIfSpanCompatibleContainer = std::enable_if_t<!internal::IsSpan<Container>::value && !internal::IsStdArray<Container>::value && ContainerHasConvertibleData<Container, T>::value && ContainerHasIntegralSize<Container>::value>; template <typename Container, typename T> using EnableIfConstSpanCompatibleContainer = std::enable_if_t<std::is_const<T>::value && !internal::IsSpan<Container>::value && !internal::IsStdArray<Container>::value && ContainerHasConvertibleData<Container, T>::value && ContainerHasIntegralSize<Container>::value>; } // namespace internal // A span is a value type that represents an array of elements of type T. Since // it only consists of a pointer to memory with an associated size, it is very // light-weight. It is cheap to construct, copy, move and use spans, so that // users are encouraged to use it as a pass-by-value parameter. A span does not // own the underlying memory, so care must be taken to ensure that a span does // not outlive the backing store. // // span is somewhat analogous to StringPiece, but with arbitrary element types, // allowing mutation if T is non-const. // // span is implicitly convertible from C++ arrays, as well as most [1] // container-like types that provide a data() and size() method (such as // std::vector<T>). A mutable span<T> can also be implicitly converted to an // immutable span<const T>. // // Consider using a span for functions that take a data pointer and size // parameter: it allows the function to still act on an array-like type, while // allowing the caller code to be a bit more concise. // // For read-only data access pass a span<const T>: the caller can supply either // a span<const T> or a span<T>, while the callee will have a read-only view. // For read-write access a mutable span<T> is required. // // Without span: // Read-Only: // // std::string HexEncode(const uint8_t* data, size_t size); // std::vector<uint8_t> data_buffer = GenerateData(); // std::string r = HexEncode(data_buffer.data(), data_buffer.size()); // // Mutable: // // ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt, Args...); // char str_buffer[100]; // SafeSNPrintf(str_buffer, sizeof(str_buffer), "Pi ~= %lf", 3.14); // // With span: // Read-Only: // // std::string HexEncode(base::span<const uint8_t> data); // std::vector<uint8_t> data_buffer = GenerateData(); // std::string r = HexEncode(data_buffer); // // Mutable: // // ssize_t SafeSNPrintf(base::span<char>, const char* fmt, Args...); // char str_buffer[100]; // SafeSNPrintf(str_buffer, "Pi ~= %lf", 3.14); // // Spans with "const" and pointers // ------------------------------- // // Const and pointers can get confusing. Here are vectors of pointers and their // corresponding spans (you can always make the span "more const" too): // // const std::vector<int*> => base::span<int* const> // std::vector<const int*> => base::span<const int*> // const std::vector<const int*> => base::span<const int* const> // // Differences from the working group proposal // ------------------------------------------- // // https://wg21.link/P0122 is the latest working group proposal, Chromium // currently implements R6. The biggest difference is span does not support a // static extent template parameter. Other differences are documented in // subsections below. // // Differences from [views.constants]: // - no dynamic_extent constant // // Differences from [span.objectrep]: // - no as_bytes() // - no as_writeable_bytes() // // Differences in constants and types: // - no element_type type alias // - no index_type type alias // - no different_type type alias // - no extent constant // // Differences from [span.cons]: // - no constructor from a pointer range // - no constructor from std::array // // Differences from [span.sub]: // - no templated first() // - no templated last() // - no templated subspan() // - using size_t instead of ptrdiff_t for indexing // // Differences from [span.obs]: // - no size_bytes() // - using size_t instead of ptrdiff_t to represent size() // // Differences from [span.elem]: // - no operator ()() // - using size_t instead of ptrdiff_t for indexing // [span], class template span template <typename T> class span { public: using value_type = std::remove_cv_t<T>; using pointer = T*; using reference = T&; using iterator = T*; using const_iterator = const T*; using reverse_iterator = std::reverse_iterator<iterator>; using const_reverse_iterator = std::reverse_iterator<const_iterator>; // [span.cons], span constructors, copy, assignment, and destructor constexpr span() noexcept : data_(nullptr), size_(0) {} constexpr span(T* data, size_t size) noexcept : data_(data), size_(size) {} // TODO(dcheng): Implement construction from a |begin| and |end| pointer. template <size_t N> constexpr span(T (&array)[N]) noexcept : span(array, N) {} // TODO(dcheng): Implement construction from std::array. // Conversion from a container that provides |T* data()| and |integral_type // size()|. template <typename Container, typename = internal::EnableIfSpanCompatibleContainer<Container, T>> constexpr span(Container& container) : span(container.data(), container.size()) {} template < typename Container, typename = internal::EnableIfConstSpanCompatibleContainer<Container, T>> span(const Container& container) : span(container.data(), container.size()) {} constexpr span(const span& other) noexcept = default; // Conversions from spans of compatible types: this allows a span<T> to be // seamlessly used as a span<const T>, but not the other way around. template <typename U, typename = internal::EnableIfLegalSpanConversion<U, T>> constexpr span(const span<U>& other) : span(other.data(), other.size()) {} constexpr span& operator=(const span& other) noexcept = default; ~span() noexcept = default; // [span.sub], span subviews constexpr span first(size_t count) const { CHECK(count <= size_); return span(data_, count); } constexpr span last(size_t count) const { CHECK(count <= size_); return span(data_ + (size_ - count), count); } constexpr span subspan(size_t pos, size_t count = -1) const { constexpr auto npos = static_cast<size_t>(-1); CHECK(pos <= size_); CHECK(count == npos || count <= size_ - pos); return span(data_ + pos, count == npos ? size_ - pos : count); } // [span.obs], span observers constexpr size_t size() const noexcept { return size_; } constexpr bool empty() const noexcept { return size_ == 0; } // [span.elem], span element access constexpr T& operator[](size_t index) const noexcept { CHECK(index < size_); return data_[index]; } constexpr T* data() const noexcept { return data_; } // [span.iter], span iterator support constexpr iterator begin() const noexcept { return data_; } constexpr iterator end() const noexcept { return data_ + size_; } constexpr const_iterator cbegin() const noexcept { return begin(); } constexpr const_iterator cend() const noexcept { return end(); } constexpr reverse_iterator rbegin() const noexcept { return reverse_iterator(end()); } constexpr reverse_iterator rend() const noexcept { return reverse_iterator(begin()); } constexpr const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(cend()); } constexpr const_reverse_iterator crend() const noexcept { return const_reverse_iterator(cbegin()); } private: T* data_; size_t size_; }; // [span.comparison], span comparison operators // Relational operators. Equality is a element-wise comparison. template <typename T> constexpr bool operator==(span<T> lhs, span<T> rhs) noexcept { return std::equal(lhs.cbegin(), lhs.cend(), rhs.cbegin(), rhs.cend()); } template <typename T> constexpr bool operator!=(span<T> lhs, span<T> rhs) noexcept { return !(lhs == rhs); } template <typename T> constexpr bool operator<(span<T> lhs, span<T> rhs) noexcept { return std::lexicographical_compare(lhs.cbegin(), lhs.cend(), rhs.cbegin(), rhs.cend()); } template <typename T> constexpr bool operator<=(span<T> lhs, span<T> rhs) noexcept { return !(rhs < lhs); } template <typename T> constexpr bool operator>(span<T> lhs, span<T> rhs) noexcept { return rhs < lhs; } template <typename T> constexpr bool operator>=(span<T> lhs, span<T> rhs) noexcept { return !(lhs < rhs); } // Type-deducing helpers for constructing a span. template <typename T> constexpr span<T> make_span(T* data, size_t size) noexcept { return span<T>(data, size); } template <typename T, size_t N> constexpr span<T> make_span(T (&array)[N]) noexcept { return span<T>(array); } template <typename Container, typename T = typename Container::value_type, typename = internal::EnableIfSpanCompatibleContainer<Container, T>> constexpr span<T> make_span(Container& container) { return span<T>(container); } template < typename Container, typename T = std::add_const_t<typename Container::value_type>, typename = internal::EnableIfConstSpanCompatibleContainer<Container, T>> constexpr span<T> make_span(const Container& container) { return span<T>(container); } } // namespace base #endif // BASE_CONTAINERS_SPAN_H_
null
null
null
null
40,629
40,246
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
205,241
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* include/linux/dm9000.h * * Copyright (c) 2004 Simtec Electronics * Ben Dooks <[email protected]> * * Header file for dm9000 platform data * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #ifndef __DM9000_PLATFORM_DATA #define __DM9000_PLATFORM_DATA __FILE__ #include <linux/if_ether.h> /* IO control flags */ #define DM9000_PLATF_8BITONLY (0x0001) #define DM9000_PLATF_16BITONLY (0x0002) #define DM9000_PLATF_32BITONLY (0x0004) #define DM9000_PLATF_EXT_PHY (0x0008) #define DM9000_PLATF_NO_EEPROM (0x0010) #define DM9000_PLATF_SIMPLE_PHY (0x0020) /* Use NSR to find LinkStatus */ /* platform data for platform device structure's platform_data field */ struct dm9000_plat_data { unsigned int flags; unsigned char dev_addr[ETH_ALEN]; /* allow replacement IO routines */ void (*inblk)(void __iomem *reg, void *data, int len); void (*outblk)(void __iomem *reg, void *data, int len); void (*dumpblk)(void __iomem *reg, int len); }; #endif /* __DM9000_PLATFORM_DATA */
null
null
null
null
113,588
41,347
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
206,342
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* thread_info.h: common low-level thread information accessors * * Copyright (C) 2002 David Howells ([email protected]) * - Incorporating suggestions made by Linus Torvalds */ #ifndef _LINUX_THREAD_INFO_H #define _LINUX_THREAD_INFO_H #include <linux/types.h> #include <linux/bug.h> #include <linux/restart_block.h> #ifdef CONFIG_THREAD_INFO_IN_TASK /* * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels, * including <asm/current.h> can cause a circular dependency on some platforms. */ #include <asm/current.h> #define current_thread_info() ((struct thread_info *)current) #endif #include <linux/bitops.h> #include <asm/thread_info.h> #ifdef __KERNEL__ #ifdef CONFIG_DEBUG_STACK_USAGE # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ __GFP_ZERO) #else # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK) #endif /* * flag set/clear/test wrappers * - pass TIF_xxxx constants to these functions */ static inline void set_ti_thread_flag(struct thread_info *ti, int flag) { set_bit(flag, (unsigned long *)&ti->flags); } static inline void clear_ti_thread_flag(struct thread_info *ti, int flag) { clear_bit(flag, (unsigned long *)&ti->flags); } static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) { return test_and_set_bit(flag, (unsigned long *)&ti->flags); } static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) { return test_and_clear_bit(flag, (unsigned long *)&ti->flags); } static inline int test_ti_thread_flag(struct thread_info *ti, int flag) { return test_bit(flag, (unsigned long *)&ti->flags); } #define set_thread_flag(flag) \ set_ti_thread_flag(current_thread_info(), flag) #define clear_thread_flag(flag) \ clear_ti_thread_flag(current_thread_info(), flag) #define test_and_set_thread_flag(flag) \ test_and_set_ti_thread_flag(current_thread_info(), flag) #define test_and_clear_thread_flag(flag) \ test_and_clear_ti_thread_flag(current_thread_info(), flag) #define test_thread_flag(flag) \ test_ti_thread_flag(current_thread_info(), flag) #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES static inline int arch_within_stack_frames(const void * const stack, const void * const stackend, const void *obj, unsigned long len) { return 0; } #endif #ifdef CONFIG_HARDENED_USERCOPY extern void __check_object_size(const void *ptr, unsigned long n, bool to_user); static __always_inline void check_object_size(const void *ptr, unsigned long n, bool to_user) { if (!__builtin_constant_p(n)) __check_object_size(ptr, n, to_user); } #else static inline void check_object_size(const void *ptr, unsigned long n, bool to_user) { } #endif /* CONFIG_HARDENED_USERCOPY */ #endif /* __KERNEL__ */ #endif /* _LINUX_THREAD_INFO_H */
null
null
null
null
114,689
13,196
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
178,191
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _ALPHA_UNISTD_H #define _ALPHA_UNISTD_H #include <uapi/asm/unistd.h> #define NR_SYSCALLS 514 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP #define __ARCH_WANT_SYS_OLD_GETRLIMIT #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE #endif /* _ALPHA_UNISTD_H */
null
null
null
null
86,538
28,470
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
28,470
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* Copyright (c) 2016, Alliance for Open Media. All rights reserved. */ /* */ /* This source code is subject to the terms of the BSD 2 Clause License and */ /* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License */ /* was not distributed with this source code in the LICENSE file, you can */ /* obtain it at www.aomedia.org/license/software. If the Alliance for Open */ /* Media Patent License 1.0 was not distributed with this source code in the */ /* PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ /* This file automatically generated by configure. Do not edit! */ #ifndef AOM_CONFIG_H #define AOM_CONFIG_H #define RESTRICT #define INLINE inline #define ARCH_ARM 0 #define ARCH_MIPS 0 #define ARCH_X86 0 #define ARCH_X86_64 0 #define HAVE_NEON 0 #define HAVE_NEON_ASM 0 #define HAVE_MIPS32 0 #define HAVE_DSPR2 0 #define HAVE_MSA 0 #define HAVE_MIPS64 0 #define HAVE_MMX 0 #define HAVE_SSE 0 #define HAVE_SSE2 0 #define HAVE_SSE3 0 #define HAVE_SSSE3 0 #define HAVE_SSE4_1 0 #define HAVE_AVX 0 #define HAVE_AVX2 0 #define HAVE_AOM_PORTS 1 #define HAVE_FEXCEPT 1 #define HAVE_PTHREAD_H 1 #define HAVE_UNISTD_H 1 #define HAVE_WXWIDGETS 0 #define CONFIG_DEPENDENCY_TRACKING 1 #define CONFIG_EXTERNAL_BUILD 1 #define CONFIG_INSTALL_DOCS 0 #define CONFIG_INSTALL_BINS 1 #define CONFIG_INSTALL_LIBS 1 #define CONFIG_INSTALL_SRCS 0 #define CONFIG_DEBUG 0 #define CONFIG_GPROF 0 #define CONFIG_GCOV 0 #define CONFIG_RVCT 0 #define CONFIG_GCC 1 #define CONFIG_MSVS 0 #define CONFIG_PIC 0 #define CONFIG_BIG_ENDIAN 0 #define CONFIG_CODEC_SRCS 0 #define CONFIG_DEBUG_LIBS 0 #define CONFIG_RUNTIME_CPU_DETECT 0 #define CONFIG_POSTPROC 1 #define CONFIG_MULTITHREAD 1 #define CONFIG_INTERNAL_STATS 0 #define CONFIG_AV1_ENCODER 0 #define CONFIG_AV1_DECODER 1 #define CONFIG_AV1 1 #define CONFIG_STATIC_MSVCRT 0 #define CONFIG_SPATIAL_RESAMPLING 1 #define CONFIG_REALTIME_ONLY 1 #define CONFIG_SHARED 0 #define CONFIG_STATIC 1 #define CONFIG_SMALL 0 #define CONFIG_POSTPROC_VISUALIZER 0 #define CONFIG_OS_SUPPORT 1 #define CONFIG_UNIT_TESTS 1 #define CONFIG_WEBM_IO 1 #define CONFIG_LIBYUV 1 #define CONFIG_ACCOUNTING 0 #define CONFIG_INSPECTION 0 #define CONFIG_DECODE_PERF_TESTS 0 #define CONFIG_ENCODE_PERF_TESTS 0 #define CONFIG_COEFFICIENT_RANGE_CHECKING 0 #define CONFIG_LOWBITDEPTH 1 #define CONFIG_HIGHBITDEPTH 0 #define CONFIG_EXPERIMENTAL 0 #define CONFIG_SIZE_LIMIT 1 #define CONFIG_FP_MB_STATS 0 #define CONFIG_CDEF 1 #define CONFIG_CDEF_SINGLEPASS 0 #define CONFIG_VAR_TX 1 #define CONFIG_RECT_TX 1 #define CONFIG_RECT_TX_EXT 0 #define CONFIG_TPL_MV 0 #define CONFIG_DUAL_FILTER 1 #define CONFIG_CONVOLVE_ROUND 1 #define CONFIG_COMPOUND_ROUND 0 #define CONFIG_EXT_TX 1 #define CONFIG_DPCM_INTRA 0 #define CONFIG_TX64X64 0 #define CONFIG_EXT_INTRA 1 #define CONFIG_INTRA_INTERP 0 #define CONFIG_FILTER_INTRA 0 #define CONFIG_INTRA_EDGE 0 #define CONFIG_INTRABC 0 #define CONFIG_EXT_INTER 1 #define CONFIG_INTERINTRA 1 #define CONFIG_WEDGE 1 #define CONFIG_COMPOUND_SEGMENT 1 #define CONFIG_EXT_REFS 1 #define CONFIG_SPEED_REFS 0 #define CONFIG_GF_GROUPS 0 #define CONFIG_GLOBAL_MOTION 1 #define CONFIG_NEW_QUANT 0 #define CONFIG_SUPERTX 0 #define CONFIG_ANS 0 #define CONFIG_LOOP_RESTORATION 0 #define CONFIG_EXT_PARTITION 0 #define CONFIG_EXT_PARTITION_TYPES 0 #define CONFIG_UNPOISON_PARTITION_CTX 0 #define CONFIG_EXT_TILE 0 #define CONFIG_MOTION_VAR 1 #define CONFIG_NCOBMC 0 #define CONFIG_WARPED_MOTION 1 #define CONFIG_Q_ADAPT_PROBS 0 #define CONFIG_BITSTREAM_DEBUG 0 #define CONFIG_INTER_STATS_ONLY 0 #define CONFIG_PALETTE_DELTA_ENCODING 0 #define CONFIG_RAWBITS 0 #define CONFIG_PVQ 0 #define CONFIG_CFL 0 #define CONFIG_XIPHRC 0 #define CONFIG_DCT_ONLY 0 #define CONFIG_DAALA_DCT4 0 #define CONFIG_DAALA_DCT8 0 #define CONFIG_DAALA_DCT16 0 #define CONFIG_DAALA_DCT32 0 #define CONFIG_DAALA_DCT64 0 #define CONFIG_CB4X4 1 #define CONFIG_CHROMA_2X2 0 #define CONFIG_CHROMA_SUB8X8 1 #define CONFIG_FRAME_SIZE 0 #define CONFIG_DELTA_Q 1 #define CONFIG_EXT_DELTA_Q 1 #define CONFIG_ADAPT_SCAN 0 #define CONFIG_FILTER_7BIT 1 #define CONFIG_PARALLEL_DEBLOCKING 1 #define CONFIG_LOOPFILTERING_ACROSS_TILES 1 #define CONFIG_TEMPMV_SIGNALING 1 #define CONFIG_RD_DEBUG 0 #define CONFIG_REFERENCE_BUFFER 1 #define CONFIG_COEF_INTERLEAVE 0 #define CONFIG_ENTROPY_STATS 0 #define CONFIG_MASKED_TX 0 #define CONFIG_DEPENDENT_HORZTILES 0 #define CONFIG_DIST_8X8 1 #define CONFIG_TRIPRED 0 #define CONFIG_PALETTE_THROUGHPUT 1 #define CONFIG_REF_ADAPT 0 #define CONFIG_LV_MAP 0 #define CONFIG_TXK_SEL 0 #define CONFIG_MV_COMPRESS 1 #define CONFIG_SEGMENT_ZEROMV 0 #define CONFIG_FRAME_SUPERRES 0 #define CONFIG_NEW_MULTISYMBOL 0 #define CONFIG_COMPOUND_SINGLEREF 0 #define CONFIG_AOM_QM 1 #define CONFIG_ONE_SIDED_COMPOUND 1 #define CONFIG_EXT_COMP_REFS 1 #define CONFIG_SMOOTH_HV 1 #define CONFIG_VAR_REFS 0 #define CONFIG_RECT_INTRA_PRED 1 #define CONFIG_LGT 0 #define CONFIG_SBL_SYMBOL 0 #define CONFIG_NCOBMC_ADAPT_WEIGHT 0 #define CONFIG_BGSPRITE 0 #define CONFIG_VAR_TX_NO_TX_MODE 0 #define CONFIG_MRC_TX 0 #define CONFIG_LPF_DIRECT 0 #define CONFIG_LOOPFILTER_LEVEL 0 #define CONFIG_NO_FRAME_CONTEXT_SIGNALING 0 #define CONFIG_TXMG 0 #define CONFIG_HASH_ME 0 #define CONFIG_COLORSPACE_HEADERS 0 #define CONFIG_MFMV 0 #define CONFIG_JNT_COMP 0 #define CONFIG_ANALYZER 0 #define DECODE_WIDTH_LIMIT 16384 #define DECODE_HEIGHT_LIMIT 16384 #endif /* AOM_CONFIG_H */
null
null
null
null
25,333
37,746
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
37,746
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/platform/heap/sparse_heap_bitmap.h" #include "third_party/blink/renderer/platform/heap/heap.h" namespace blink { // Return the subtree/bitmap that covers the // [address, address + size) range. Null if there is none. SparseHeapBitmap* SparseHeapBitmap::HasRange(Address address, size_t size) { DCHECK(!(reinterpret_cast<uintptr_t>(address) & kPointerAlignmentMask)); SparseHeapBitmap* bitmap = this; while (bitmap) { // Interval starts after, |m_right| handles. if (address > bitmap->end()) { bitmap = bitmap->right_.get(); continue; } // Interval starts within, |bitmap| is included in the resulting range. if (address >= bitmap->Base()) break; Address right = address + size - 1; // Interval starts before, but intersects with |bitmap|'s range. if (right >= bitmap->Base()) break; // Interval is before entirely, for |m_left| to handle. bitmap = bitmap->left_.get(); } return bitmap; } bool SparseHeapBitmap::IsSet(Address address) { DCHECK(!(reinterpret_cast<uintptr_t>(address) & kPointerAlignmentMask)); SparseHeapBitmap* bitmap = this; while (bitmap) { if (address > bitmap->end()) { bitmap = bitmap->right_.get(); continue; } if (address >= bitmap->Base()) { if (bitmap->bitmap_) { return bitmap->bitmap_->test((address - bitmap->Base()) >> kPointerAlignmentInBits); } DCHECK(address == bitmap->Base()); DCHECK_EQ(bitmap->size(), 1u); return true; } bitmap = bitmap->left_.get(); } return false; } void SparseHeapBitmap::Add(Address address) { DCHECK(!(reinterpret_cast<uintptr_t>(address) & kPointerAlignmentMask)); // |address| is beyond the maximum that this SparseHeapBitmap node can // encompass. if (address >= MaxEnd()) { if (!right_) { right_ = SparseHeapBitmap::Create(address); return; } right_->Add(address); return; } // Same on the other side. if (address < MinStart()) { if (!left_) { left_ = SparseHeapBitmap::Create(address); return; } left_->Add(address); return; } if (address == Base()) return; // |address| can be encompassed by |this| by expanding its size. if (address > Base()) { if (!bitmap_) CreateBitmap(); bitmap_->set((address - Base()) >> kPointerAlignmentInBits); return; } // Use |address| as the new base for this interval. Address old_base = SwapBase(address); CreateBitmap(); bitmap_->set((old_base - address) >> kPointerAlignmentInBits); } void SparseHeapBitmap::CreateBitmap() { DCHECK(!bitmap_ && size() == 1); bitmap_ = std::make_unique<std::bitset<kBitmapChunkSize>>(); size_ = kBitmapChunkRange; bitmap_->set(0); } size_t SparseHeapBitmap::IntervalCount() const { size_t count = 1; if (left_) count += left_->IntervalCount(); if (right_) count += right_->IntervalCount(); return count; } } // namespace blink
null
null
null
null
34,609
66,499
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
66,499
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/search/one_google_bar/one_google_bar_data.h" OneGoogleBarData::OneGoogleBarData() = default; OneGoogleBarData::OneGoogleBarData(const OneGoogleBarData&) = default; OneGoogleBarData::OneGoogleBarData(OneGoogleBarData&&) = default; OneGoogleBarData::~OneGoogleBarData() = default; OneGoogleBarData& OneGoogleBarData::operator=(const OneGoogleBarData&) = default; OneGoogleBarData& OneGoogleBarData::operator=(OneGoogleBarData&&) = default; bool operator==(const OneGoogleBarData& lhs, const OneGoogleBarData& rhs) { return lhs.bar_html == rhs.bar_html && lhs.in_head_script == rhs.in_head_script && lhs.in_head_style == rhs.in_head_style && lhs.after_bar_script == rhs.after_bar_script && lhs.end_of_body_html == rhs.end_of_body_html && lhs.end_of_body_script == rhs.end_of_body_script; } bool operator!=(const OneGoogleBarData& lhs, const OneGoogleBarData& rhs) { return !(lhs == rhs); }
null
null
null
null
63,362
69,218
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
69,218
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef LIBRARIES_NACL_IO_FS_FACTORY_H_ #define LIBRARIES_NACL_IO_FS_FACTORY_H_ #include <errno.h> #include "nacl_io/error.h" #include "nacl_io/filesystem.h" #include "sdk_util/scoped_ref.h" namespace nacl_io { class FsFactory { public: virtual ~FsFactory() {} virtual Error CreateFilesystem(const FsInitArgs& args, ScopedFilesystem* out_fs) = 0; }; } // namespace nacl_io #endif // LIBRARIES_NACL_IO_FS_FACTORY_H_
null
null
null
null
66,081
57,313
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
57,313
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/arc/auth/arc_auth_context.h" #include <utility> #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" #include "chrome/browser/chromeos/arc/arc_support_host.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/signin/profile_oauth2_token_service_factory.h" #include "chrome/browser/signin/signin_manager_factory.h" #include "chrome/browser/signin/signin_ui_util.h" #include "chrome/browser/ui/app_list/arc/arc_app_utils.h" #include "components/signin/core/browser/profile_oauth2_token_service.h" #include "components/signin/core/browser/signin_manager_base.h" #include "content/public/common/url_constants.h" #include "google_apis/gaia/gaia_auth_fetcher.h" #include "google_apis/gaia/gaia_constants.h" namespace arc { namespace { constexpr int kMaxRetryAttempts = 3; constexpr base::TimeDelta kRefreshTokenTimeout = base::TimeDelta::FromSeconds(10); constexpr net::BackoffEntry::Policy kRetryBackoffPolicy = { // Number of initial errors (in sequence) to ignore before applying // exponential back-off rules. 0, // Initial delay for exponential back-off in ms. 5000, // Factor by which the waiting time will be multiplied. 2.0, // Fuzzing percentage. ex: 10% will spread requests randomly // between 90%-100% of the calculated time. 0.0, // 0% // Maximum amount of time we are willing to delay our request in ms. 1000 * 15, // 15 seconds. // Time to keep an entry from being discarded even when it // has no significant state, -1 to never discard. -1, // Don't use initial delay unless the last request was an error. false, }; } // namespace ArcAuthContext::ArcAuthContext(Profile* profile) : profile_(profile), retry_backoff_(&kRetryBackoffPolicy) { // Get token service and account ID to fetch auth tokens. token_service_ = ProfileOAuth2TokenServiceFactory::GetForProfile(profile); const SigninManagerBase* const signin_manager = SigninManagerFactory::GetForProfile(profile); CHECK(token_service_ && signin_manager); account_id_ = signin_manager->GetAuthenticatedAccountId(); full_account_id_ = base::UTF16ToUTF8( signin_ui_util::GetAuthenticatedUsername(signin_manager)); } ArcAuthContext::~ArcAuthContext() { token_service_->RemoveObserver(this); } void ArcAuthContext::Prepare(const PrepareCallback& callback) { if (context_prepared_) { callback.Run(profile_->GetRequestContext()); return; } callback_ = callback; token_service_->RemoveObserver(this); refresh_token_timeout_.Stop(); ResetFetchers(); retry_backoff_.Reset(); if (!token_service_->RefreshTokenIsAvailable(account_id_)) { token_service_->AddObserver(this); refresh_token_timeout_.Start(FROM_HERE, kRefreshTokenTimeout, this, &ArcAuthContext::OnRefreshTokenTimeout); return; } StartFetchers(); } void ArcAuthContext::OnRefreshTokenAvailable(const std::string& account_id) { if (account_id != account_id_) return; OnRefreshTokensLoaded(); } void ArcAuthContext::OnRefreshTokensLoaded() { token_service_->RemoveObserver(this); refresh_token_timeout_.Stop(); StartFetchers(); } void ArcAuthContext::OnRefreshTokenTimeout() { LOG(WARNING) << "Failed to wait for refresh token."; token_service_->RemoveObserver(this); std::move(callback_).Run(nullptr); } void ArcAuthContext::StartFetchers() { DCHECK(!refresh_token_timeout_.IsRunning()); ResetFetchers(); if (skip_merge_session_for_testing_) { OnMergeSessionSuccess(""); return; } ubertoken_fetcher_.reset(new UbertokenFetcher(token_service_, this, GaiaConstants::kChromeOSSource, profile_->GetRequestContext())); ubertoken_fetcher_->StartFetchingToken(account_id_); } void ArcAuthContext::ResetFetchers() { merger_fetcher_.reset(); ubertoken_fetcher_.reset(); retry_timeout_.Stop(); } void ArcAuthContext::OnFetcherError(const GoogleServiceAuthError& error) { ResetFetchers(); DCHECK(error.state() != GoogleServiceAuthError::NONE); if (error.IsTransientError()) { retry_backoff_.InformOfRequest(false); if (retry_backoff_.failure_count() <= kMaxRetryAttempts) { LOG(WARNING) << "Found transient error. Retry attempt " << retry_backoff_.failure_count() << "."; refresh_token_timeout_.Start(FROM_HERE, retry_backoff_.GetTimeUntilRelease(), this, &ArcAuthContext::StartFetchers); return; } LOG(WARNING) << "Too many transient errors. Stop retrying."; } std::move(callback_).Run(nullptr); } void ArcAuthContext::OnUbertokenSuccess(const std::string& token) { ResetFetchers(); merger_fetcher_.reset(new GaiaAuthFetcher( this, GaiaConstants::kChromeOSSource, profile_->GetRequestContext())); merger_fetcher_->StartMergeSession(token, std::string()); } void ArcAuthContext::OnUbertokenFailure(const GoogleServiceAuthError& error) { LOG(WARNING) << "Failed to get ubertoken " << error.ToString() << "."; OnFetcherError(error); } void ArcAuthContext::OnMergeSessionSuccess(const std::string& data) { VLOG_IF(1, retry_backoff_.failure_count()) << "Auth context was successfully prepared after retry."; context_prepared_ = true; ResetFetchers(); std::move(callback_).Run(profile_->GetRequestContext()); } void ArcAuthContext::OnMergeSessionFailure( const GoogleServiceAuthError& error) { LOG(WARNING) << "Failed to merge gaia session " << error.ToString() << "."; OnFetcherError(error); } } // namespace arc
null
null
null
null
54,176
16,739
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
181,734
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * SH7786 PCI-Express controller definitions. * * Copyright (C) 2008, 2009 Renesas Technology Corp. * All rights reserved. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #ifndef __PCI_SH7786_H #define __PCI_SH7786_H /* PCIe bus-0(x4) on SH7786 */ // Rev1.171 #define SH4A_PCIE_SPW_BASE 0xFE000000 /* spw config address for controller 0 */ #define SH4A_PCIE_SPW_BASE1 0xFE200000 /* spw config address for controller 1 (Rev1.14)*/ #define SH4A_PCIE_SPW_BASE2 0xFCC00000 /* spw config address for controller 2 (Rev1.171)*/ #define SH4A_PCIE_SPW_BASE_LEN 0x00080000 #define SH4A_PCI_CNFG_BASE 0xFE040000 /* pci config address for controller 0 */ #define SH4A_PCI_CNFG_BASE1 0xFE240000 /* pci config address for controller 1 (Rev1.14)*/ #define SH4A_PCI_CNFG_BASE2 0xFCC40000 /* pci config address for controller 2 (Rev1.171)*/ #define SH4A_PCI_CNFG_BASE_LEN 0x00040000 #define SH4A_PCIPIO_ADDR_OFFSET 0x000001c0 /* offset to pci config_address */ #define SH4A_PCIPIO_DATA_OFFSET 0x00000220 /* offset to pci config_data */ /* * for PEX8111(Max Payload Size=128B,PCIIO_SIZE=64K), * for other(Max Payload Size=4096B,PCIIO_SIZE=8M) */ /* PCI0: PCI memory target transfer 32-bit address translation value(Rev1.11T)*/ #define SH4A_PCIBMSTR_TRANSLATION 0x20000000 /* SPVCR0 */ #define SH4A_PCIEVCR0 (0x000000) /* R - 0x0000 0000 32 */ #define BITS_TOP_MB (24) #define MASK_TOP_MB (0xff<<BITS_TOP_MB) #define BITS_BOT_MB (16) #define MASK_BOT_MB (0xff<<BITS_BOT_MB) #define BITS_VC_ID (0) #define MASK_VC_ID (0xffff<<BITS_VC_ID) /* SPVCR1 */ #define SH4A_PCIEVCR1 (0x000004) /* R - 0x0000 0000 32*/ #define BITS_BADOPC (5) /* 5 BADOPC 0 R/W */ #define MASK_BADOPC (1<<BITS_BADOPC) #define BITS_BADDEST (4) /*4 BADDEST 0 R/W */ #define MASK_BADDEST (1<<BITS_BADDEST) #define BITS_UNSOLRESP (3) /* 3 UNSOLRESP 0 R/W */ #define MASK_UNSOLRESP (1<<BITS_UNSOLRESP) #define BITS_ERRSNT (1) /* 1 ERRSNT 0 */ #define MASK_ERRSNT (1<<BITS_ERRSNT) #define BITS_ERRRCV (0) /* 0 ERRRCV 0 */ #define MASK_ERRRCV (1<<BITS_ERRRCV) /* PCIEENBLR */ #define SH4A_PCIEENBLR (0x000008) /* R/W - 0x0000 0001 32 */ /* PCIEECR */ #define SH4A_PCIEECR (0x00000C) /* R/W - 0x0000 0000 32 */ #define BITS_ENBL (0) /* 0 ENBL 0 R/W */ #define MASK_ENBL (1<<BITS_ENBL) /* PCIEPAR */ #define SH4A_PCIEPAR (0x000010) /* R/W - 0x0000 0000 32 */ #define BITS_BN (24) #define MASK_BN (0xff<<BITS_BN) #define BITS_DN (19) #define MASK_DN (0x1f<<BITS_DN) #define BITS_FN (16) #define MASK_FN (0x7<<BITS_FN) #define BITS_EREGNO (8) #define MASK_EREGNO (0xff<<BITS_EREGNO) #define BITS_REGNO (2) #define MASK_REGNO (0x3f<<BITS_REGNO) /* PCIEPCTLR */ #define SH4A_PCIEPCTLR (0x000018) /* R/W - 0x0000 0000 32 */ #define BITS_CCIE (31) /* 31 CCIE */ #define MASK_CCIE (1<<BITS_CCIE) #define BITS_TYPE (8) #define MASK_TYPE (1<<BITS_TYPE) #define BITS_C_VC (0) #define MASK_C_VC (1<<BITS_C_VC) /* PCIEPDR */ #define SH4A_PCIEPDR (0x000020) /* R/W - 0x0000 0000 32 */ #define BITS_PDR (0) #define MASK_PDR (0xffffffff<<BITS_PDR) /* PCIEMSGALR */ #define SH4A_PCIEMSGALR (0x000030) /* R/W - 0x0000 0000 32 */ #define BITS_MSGADRL (0) #define MASK_MSGADRL (0xffffffff<<BITS_MSGADRL) /* PCIEMSGAHR */ #define SH4A_PCIEMSGAHR (0x000034) /* R/W - 0x0000 0000 32 */ #define BITS_MSGADRH (0) #define MASK_MSGADRH (0xffffffff<<BITS_MSGADRH) /* PCIEMSGCTLR */ #define SH4A_PCIEMSGCTLR (0x000038) /* R/W - 0x0000 0000 32 */ #define BITS_MSGIE (31) #define MASK_MSGIE (1<<BITS_MSGIE) #define BITS_MROUTE (16) #define MASK_MROUTE (0x7<<BITS_MROUTE) #define BITS_MCODE (8) #define MASK_MCODE (0xff<<BITS_MCODE) #define BITS_M_VC (0) #define MASK_M_VC (1<<BITS_M_VC) /* PCIEMSG */ #define SH4A_PCIEMSG (0x000040) /* W - - 32 */ #define BITS_MDATA (0) #define MASK_MDATA (0xffffffff<<BITS_MDATA) /* PCIEUNLOCKCR */ #define SH4A_PCIEUNLOCKCR (0x000048) /* R/W - 0x0000 0000 32 */ /* PCIEIDR */ #define SH4A_PCIEIDR (0x000060) /* R/W - 0x0101 1101 32 */ /* PCIEDBGCTLR */ #define SH4A_PCIEDBGCTLR (0x000100) /* R/W - 0x0000 0000 32 */ /* PCIEINTXR */ #define SH4A_PCIEINTXR (0x004000) /* R/W - 0x0000 0000 32 */ /* PCIERMSGR */ #define SH4A_PCIERMSGR (0x004010) /* R/W - 0x0000 0000 32 */ /* PCIERSTR */ #define SH4A_PCIERSTR(x) (0x008000 + ((x) * 0x4)) /* R/W - 0x0000 0000 32 */ /* PCIESRSTR */ #define SH4A_PCIESRSTR (0x008040) /* R/W - 0x0000 0000 32 */ /* PCIEPHYCTLR */ #define SH4A_PCIEPHYCTLR (0x010000) /* R/W - 0x0000 0000 32 */ #define BITS_CKE (0) #define MASK_CKE (1<<BITS_CKE) /* PCIERMSGIER */ #define SH4A_PCIERMSGIER (0x004040) /* R/W - 0x0000 0000 32 */ /* PCIEPHYADRR */ #define SH4A_PCIEPHYADRR (0x010004) /* R/W - 0x0000 0000 32 */ #define BITS_ACK (24) // Rev1.171 #define MASK_ACK (1<<BITS_ACK) // Rev1.171 #define BITS_CMD (16) // Rev1.171 #define MASK_CMD (0x03<<BITS_CMD) // Rev1.171 #define BITS_LANE (8) #define MASK_LANE (0x0f<<BITS_LANE) #define BITS_ADR (0) #define MASK_ADR (0xff<<BITS_ADR) /* PCIEPHYDINR */ // Rev1.171 start. #define SH4A_PCIEPHYDINR (0x010008) /* R/W - 0x0000 0000 32 */ /* PCIEPHYDOUTR */ #define SH4A_PCIEPHYDOUTR (0x01000C) /* R/W - 0x0000 0000 32 */ /* PCIEPHYSR */ #define SH4A_PCIEPHYSR (0x010010) /* R/W - 0x0000 0000 32 */ // Rev1.171 end. /* PCIEPHYDATAR */ #define SH4A_PCIEPHYDATAR (0x00008) /* R/W - 0xxxxx xxxx 32 */ #define BITS_DATA (0) #define MASK_DATA (0xffffffff<<BITS_DATA) /* PCIETCTLR */ #define SH4A_PCIETCTLR (0x020000) /* R/W R/W 0x0000 0000 32 */ #define BITS_CFINT (0) #define MASK_CFINT (1<<BITS_CFINT) /* PCIETSTR */ #define SH4A_PCIETSTR (0x020004) /* R 0x0000 0000 32 */ /* PCIEINTR */ #define SH4A_PCIEINTR (0x020008) /* R/W R/W 0x0000 0000 32 */ #define BITS_INT_RX_ERP (31) #define MASK_INT_RX_ERP (1<<BITS_INT_RX_ERP) #define BITS_INT_RX_VCX_Posted (30) #define MASK_INT_RX_VCX_Posted (1<<BITS_INT_RX_VCX_Posted) #define BITS_INT_RX_VCX_NonPosted (29) #define MASK_INT_RX_VCX_NonPosted (1<<BITS_INT_RX_VCX_NonPosted) #define BITS_INT_RX_VCX_CPL (28) #define MASK_INT_RX_VCX_CPL (1<<BITS_INT_RX_VCX_CPL) #define BITS_INT_TX_VCX_Posted (26) #define MASK_INT_TX_VCX_Posted (1<<BITS_INT_TX_VCX_Posted) #define BITS_INT_TX_VCX_NonPosted (25) #define MASK_INT_TX_VCX_NonPosted (1<<BITS_INT_TX_VCX_NonPosted) #define BITS_INT_TX_VCX_CPL (24) #define MASK_INT_TX_VCX_CPL (1<<BITS_INT_TX_VCX_CPL) #define BITS_INT_RX_VC0_Posted (22) #define MASK_INT_RX_VC0_Posted (1<<BITS_INT_RX_VC0_Posted) #define BITS_INT_RX_VC0_NonPosted (21) #define MASK_INT_RX_VC0_NonPosted (1<<BITS_INT_RX_VC0_NonPosted) #define BITS_INT_RX_VC0_CPL (20) #define MASK_INT_RX_VC0_CPL (1<<BITS_INT_RX_VC0_CPL) #define BITS_INT_TX_VC0_Posted (18) #define MASK_INT_TX_VC0_Posted (1<<BITS_INT_TX_VC0_Posted) #define BITS_INT_TX_VC0_NonPosted (17) #define MASK_INT_TX_VC0_NonPosted (1<<BITS_INT_TX_VC0_NonPosted) #define BITS_INT_TX_VC0_CPL (16) #define MASK_INT_TX_VC0_CPL (1<<BITS_INT_TX_VC0_CPL) #define BITS_INT_RX_CTRL (15) #define MASK_INT_RX_CTRL (1<<BITS_INT_RX_CTRL) #define BITS_INT_TX_CTRL (14) #define MASK_INT_TX_CTRL (1<<BITS_INT_TX_CTRL) #define BITS_INTTL (11) #define MASK_INTTL (1<<BITS_INTTL) #define BITS_INTDL (10) #define MASK_INTDL (1<<BITS_INTDL) #define BITS_INTMAC (9) #define MASK_INTMAC (1<<BITS_INTMAC) #define BITS_INTPM (8) #define MASK_INTPM (1<<BITS_INTPM) /* PCIEINTER */ #define SH4A_PCIEINTER (0x02000C) /* R/W R/W 0x0000 0000 32 */ #define BITS_INT_RX_ERP (31) #define MASK_INT_RX_ERP (1<<BITS_INT_RX_ERP) #define BITS_INT_RX_VCX_Posted (30) #define MASK_INT_RX_VCX_Posted (1<<BITS_INT_RX_VCX_Posted) #define BITS_INT_RX_VCX_NonPosted (29) #define MASK_INT_RX_VCX_NonPosted (1<<BITS_INT_RX_VCX_NonPosted) #define BITS_INT_RX_VCX_CPL (28) #define MASK_INT_RX_VCX_CPL (1<<BITS_INT_RX_VCX_CPL) #define BITS_INT_TX_VCX_Posted (26) #define MASK_INT_TX_VCX_Posted (1<<BITS_INT_TX_VCX_Posted) #define BITS_INT_TX_VCX_NonPosted (25) #define MASK_INT_TX_VCX_NonPosted (1<<BITS_INT_TX_VCX_NonPosted) #define BITS_INT_TX_VCX_CPL (24) #define MASK_INT_TX_VCX_CPL (1<<BITS_INT_TX_VCX_CPL) #define BITS_INT_RX_VC0_Posted (22) #define MASK_INT_RX_VC0_Posted (1<<BITS_INT_RX_VC0_Posted) #define BITS_INT_RX_VC0_NonPosted (21) #define MASK_INT_RX_VC0_NonPosted (1<<BITS_INT_RX_VC0_NonPosted) #define BITS_INT_RX_VC0_CPL (20) #define MASK_INT_RX_VC0_CPL (1<<BITS_INT_RX_VC0_CPL) #define BITS_INT_TX_VC0_Posted (18) #define MASK_INT_TX_VC0_Posted (1<<BITS_INT_TX_VC0_Posted) #define BITS_INT_TX_VC0_NonPosted (17) #define MASK_INT_TX_VC0_NonPosted (1<<BITS_INT_TX_VC0_NonPosted) #define BITS_INT_TX_VC0_CPL (16) #define MASK_INT_TX_VC0_CPL (1<<BITS_INT_TX_VC0_CPL) #define BITS_INT_RX_CTRL (15) #define MASK_INT_RX_CTRL (1<<BITS_INT_RX_CTRL) #define BITS_INT_TX_CTRL (14) #define MASK_INT_TX_CTRL (1<<BITS_INT_TX_CTRL) #define BITS_INTTL (11) #define MASK_INTTL (1<<BITS_INTTL) #define BITS_INTDL (10) #define MASK_INTDL (1<<BITS_INTDL) #define BITS_INTMAC (9) #define MASK_INTMAC (1<<BITS_INTMAC) #define BITS_INTPM (8) #define MASK_INTPM (1<<BITS_INTPM) /* PCIEEH0R */ #define SH4A_PCIEEHR(x) (0x020010 + ((x) * 0x4)) /* R - 0x0000 0000 32 */ /* PCIEAIR */ #define SH4A_PCIEAIR (SH4A_PCIE_BASE + 0x020010) /* R/W R/W 0xxxxx xxxx 32 */ /* PCIECIR */ #define SH4A_PCIECIR (SH4A_PCIE_BASE) /* R/W R/W 0xxxxx xxxx 32 */ /* PCIEERRFR */ // Rev1.18 #define SH4A_PCIEERRFR (0x020020) /* R/W R/W 0xxxxx xxxx 32 */ // Rev1.18 /* PCIEERRFER */ #define SH4A_PCIEERRFER (0x020024) /* R/W R/W 0x0000 0000 32 */ /* PCIEERRFR2 */ #define SH4A_PCIEERRFR2 (0x020028) /* R/W R/W 0x0000 0000 32 */ /* PCIEMSIR */ #define SH4A_PCIEMSIR (0x020040) /* R/W - 0x0000 0000 32 */ /* PCIEMSIFR */ #define SH4A_PCIEMSIFR (0x020044) /* R/W R/W 0x0000 0000 32 */ /* PCIEPWRCTLR */ #define SH4A_PCIEPWRCTLR (0x020100) /* R/W - 0x0000 0000 32 */ /* PCIEPCCTLR */ #define SH4A_PCIEPCCTLR (0x020180) /* R/W - 0x0000 0000 32 */ // Rev1.18 /* PCIELAR0 */ #define SH4A_PCIELAR0 (0x020200) /* R/W R/W 0x0000 0000 32 */ #define BITS_LARn (20) #define MASK_LARn (0xfff<<BITS_LARn) #define SH4A_PCIE_020204 (0x020204) /* R/W R/W 0x0000 0000 32 */ /* PCIELAMR0 */ #define SH4A_PCIELAMR0 (0x020208) /* R/W R/W 0x0000 0000 32 */ #define BITS_LAMRn (20) #define MASK_LAMRn (0x1ff<<BITS_LAMRn) #define BITS_LAREn (0) #define MASK_LAREn (0x1<<BITS_LAREn) /* PCIECSCR0 */ #define SH4A_PCIECSCR0 (0x020210) /* R/W R/W 0x0000 0000 32 */ #define BITS_RANGE (2) #define MASK_RANGE (0x7<<BITS_RANGE) #define BITS_SNPMD (0) #define MASK_SNPMD (0x3<<BITS_SNPMD) /* PCIECSAR0 */ #define SH4A_PCIECSAR0 (0x020214) /* R/W R/W 0x0000 0000 32 */ #define BITS_CSADR (0) #define MASK_CSADR (0xffffffff<<BITS_CSADR) /* PCIESTCTLR0 */ #define SH4A_PCIESTCTLR0 (0x020218) /* R/W R/W 0x0000 0000 32 */ #define BITS_SHPRI (8) #define MASK_SHPRI (0x0f<<BITS_SHPRI) #define SH4A_PCIE_020224 (0x020224) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIELAR1 (0x020220) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIELAMR1 (0x020228) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIECSCR1 (0x020230) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIECSAR1 (0x020234) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIESTCTLR1 (0x020238) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIELAR2 (0x020240) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIE_020244 (0x020244) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIELAMR2 (0x020248) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIECSCR2 (0x020250) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIECSAR2 (0x020254) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIESTCTLR2 (0x020258) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIELAR3 (0x020260) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIE_020264 (0x020264) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIELAMR3 (0x020268) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIECSCR3 (0x020270) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIECSAR3 (0x020274) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIESTCTLR3 (0x020278) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIELAR4 (0x020280) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIE_020284 (0x020284) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIELAMR4 (0x020288) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIECSCR4 (0x020290) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIECSAR4 (0x020294) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIESTCTLR4 (0x020298) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIELAR5 (0x0202A0) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIE_0202A4 (0x0202A4) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIELAMR5 (0x0202A8) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIECSCR5 (0x0202B0) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIECSAR5 (0x0202B4) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIESTCTLR5 (0x0202B8) /* R/W R/W 0x0000 0000 32 */ /* PCIEPARL */ #define SH4A_PCIEPARL(x) (0x020400 + ((x) * 0x20)) /* R/W R/W 0x0000 0000 32 */ #define BITS_PAL (18) #define MASK_PAL (0x3fff<<BITS_PAL) /* PCIEPARH */ #define SH4A_PCIEPARH(x) (0x020404 + ((x) * 0x20)) /* R/W R/W 0x0000 0000 32 */ #define BITS_PAH (0) #define MASK_PAH (0xffffffff<<BITS_PAH) /* PCIEPAMR */ #define SH4A_PCIEPAMR(x) (0x020408 + ((x) * 0x20)) /* R/W R/W 0x0000 0000 32 */ #define BITS_PAM (18) #define MASK_PAM (0x3fff<<BITS_PAM) /* PCIEPTCTLR */ #define SH4A_PCIEPTCTLR(x) (0x02040C + ((x) * 0x20)) #define BITS_PARE (31) #define MASK_PARE (0x1<<BITS_PARE) #define BITS_TC (20) #define MASK_TC (0x7<<BITS_TC) #define BITS_T_VC (16) #define MASK_T_VC (0x1<<BITS_T_VC) #define BITS_LOCK (12) #define MASK_LOCK (0x1<<BITS_LOCK) #define BITS_SPC (8) #define MASK_SPC (0x1<<BITS_SPC) #define SH4A_PCIEDMAOR (0x021000) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSAR0 (0x021100) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSAHR0 (0x021104) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMDAR0 (0x021108) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMDAHR0 (0x02110C) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMBCNTR0 (0x021110) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSBCNTR0 (0x021114) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSTRR0 (0x021118) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCCAR0 (0x02111C) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCCR0 (0x021120) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCC2R0 (0x021124) /* R/W R/W 0x0000 0000 - */ #define SH4A_PCIEDMCCCR0 (0x021128) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCHSR0 (0x02112C) /* R/W - 0x0000 0000 32 */ #define SH4A_PCIEDMSAR1 (0x021140) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSAHR1 (0x021144) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMDAR1 (0x021148) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMDAHR1 (0x02114C) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMBCNTR1 (0x021150) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSBCNTR1 (0x021154) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSTRR1 (0x021158) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCCAR1 (0x02115C) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCCR1 (0x021160) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCC2R1 (0x021164) /* R/W R/W 0x0000 0000 - */ #define SH4A_PCIEDMCCCR1 (0x021168) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCHSR1 (0x02116C) /* R/W - 0x0000 0000 32 */ #define SH4A_PCIEDMSAR2 (0x021180) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSAHR2 (0x021184) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMDAR2 (0x021188) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMDAHR2 (0x02118C) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMBCNTR2 (0x021190) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSBCNTR2 (0x021194) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSTRR2 (0x021198) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCCAR2 (0x02119C) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCCR2 (0x0211A0) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCC2R2 (0x0211A4) /* R/W R/W 0x0000 0000 - */ #define SH4A_PCIEDMCCCR2 (0x0211A8) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSAR3 (0x0211C0) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSAHR3 (0x0211C4) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMDAR3 (0x0211C8) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMDAHR3 (0x0211CC) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMBCNTR3 (0x0211D0) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSBCNTR3 (0x0211D4) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMSTRR3 (0x0211D8) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCCAR3 (0x0211DC) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCCR3 (0x0211E0) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCC2R3 (0x0211E4) /* R/W R/W 0x0000 0000 - */ #define SH4A_PCIEDMCCCR3 (0x0211E8) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEDMCHSR3 (0x0211EC) /* R/W R/W 0x0000 0000 32 */ #define SH4A_PCIEPCICONF0 (0x040000) /* R R - 8/16/32 */ #define SH4A_PCIEPCICONF1 (0x040004) /* R/W R/W 0x0008 0000 8/16/32 */ #define SH4A_PCIEPCICONF2 (0x040008) /* R/W R/W 0xFF00 0000 8/16/32 */ #define SH4A_PCIEPCICONF3 (0x04000C) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEPCICONF4 (0x040010) /* - R/W - 8/16/32 */ #define SH4A_PCIEPCICONF5 (0x040014) /* - R/W - 8/16/32 */ #define SH4A_PCIEPCICONF6 (0x040018) /* - R/W - 8/16/32 */ #define SH4A_PCIEPCICONF7 (0x04001C) /* - R/W - 8/16/32 */ #define SH4A_PCIEPCICONF8 (0x040020) /* - R/W - 8/16/32 */ #define SH4A_PCIEPCICONF9 (0x040024) /* - R/W - 8/16/32 */ #define SH4A_PCIEPCICONF10 (0x040028) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEPCICONF11 (0x04002C) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEPCICONF12 (0x040030) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEPCICONF13 (0x040034) /* R/W R/W 0x0000 0040 8/16/32 */ #define SH4A_PCIEPCICONF14 (0x040038) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEPCICONF15 (0x04003C) /* R/W R/W 0x0000 00FF 8/16/32 */ #define SH4A_PCIEPMCAP0 (0x040040) /* R/W R 0x0003 5001 8/16/32 */ #define SH4A_PCIEPMCAP1 (0x040044) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEMSICAP0 (0x040050) /* R/W R/W 0x0180 7005 8/16/32 */ #define SH4A_PCIEMSICAP1 (0x040054) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEMSICAP2 (0x040058) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEMSICAP3 (0x04005C) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEMSICAP4 (0x040060) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEMSICAP5 (0x040064) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEEXPCAP0 (0x040070) /* R/W R/W 0x0001 0010 8/16/32 */ #define SH4A_PCIEEXPCAP1 (0x040074) /* R/W R 0x0000 0005 8/16/32 */ #define SH4A_PCIEEXPCAP2 (0x040078) /* R/W R/W 0x0000 0801 8/16/32 */ #define SH4A_PCIEEXPCAP3 (0x04007C) /* R/W R 0x0003 F421 8/16/32 */ #define SH4A_PCIEEXPCAP4 (0x040080) /* R/W R/W 0x0041 0000 8/16/32 */ #define SH4A_PCIEEXPCAP5 (0x040084) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEEXPCAP6 (0x040088) /* R/W R/W 0x0000 03C0 8/16/32 */ #define SH4A_PCIEEXPCAP7 (0x04008C) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEEXPCAP8 (0x040090) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEVCCAP0 (0x040100) /* R/W R 0x1B01 0002 8/16/32 */ #define SH4A_PCIEVCCAP1 (0x040104) /* R R 0x0000 0001 8/16/32 */ #define SH4A_PCIEVCCAP2 (0x040108) /* R R 0x0000 0000 8/16/32 */ #define SH4A_PCIEVCCAP3 (0x04010C) /* R R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEVCCAP4 (0x040110) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEVCCAP5 (0x040114) /* R/W R/W 0x8000 00FF 8/16/32 */ #define SH4A_PCIEVCCAP6 (0x040118) /* R/W R 0x0002 0000 8/16/32 */ #define SH4A_PCIEVCCAP7 (0x04011C) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEVCCAP8 (0x040120) /* R/W R/W 0x0000 0000 8/16/32 */ #define SH4A_PCIEVCCAP9 (0x040124) /* R/W R 0x0002 0000 8/16/32 */ #define SH4A_PCIENUMCAP0 (0x0001B0) /* RW R 0x0001 0003 8/16/32 */ #define SH4A_PCIENUMCAP1 (0x0001B4) /* R R 0x0000 0000 8/16/32 */ #define SH4A_PCIENUMCAP2 (0x0001B8) /* R R 0x0000 0000 8/16/32 */ #define SH4A_PCIEIDSETR0 (0x041000) /* R/W R 0x0000 FFFF 16/32 */ #define SH4A_PCIEIDSETR1 (0x041004) /* R/W R 0xFF00 0000 16/32 */ #define SH4A_PCIEBAR0SETR (0x041008) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEBAR1SETR (0x04100C) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEBAR2SETR (0x041010) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEBAR3SETR (0x041014) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEBAR4SETR (0x041018) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEBAR5SETR (0x04101C) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIECISSETR (0x041020) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEIDSETR2 (0x041024) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEEROMSETR (0x041028) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEDSERSETR0 (0x04102C) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEDSERSETR1 (0x041030) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIECTLR (0x041040) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIETLSR (0x041044) /* R/W1C R 0x0000 0000 16/32 */ #define SH4A_PCIETLCTLR (0x041048) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEDLSR (0x04104C) /* R/W1C R 0x4003 0000 16/32 */ #define SH4A_PCIEDLCTLR (0x041050) /* R R 0x0000 0000 16/32 */ #define SH4A_PCIEMACSR (0x041054) /* R/W1C R 0x0041 0000 16/32 */ #define SH4A_PCIEMACCTLR (0x041058) /* R/W R 0x0000 0000 16/32 */ #define PCIEMACCTLR_SCR_DIS (1 << 27) /* scramble disable */ #define SH4A_PCIEPMSTR (0x04105C) /* R/W1C R 0x0000 0000 16/32 */ #define SH4A_PCIEPMCTLR (0x041060) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIETLINTENR (0x041064) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEDLINTENR (0x041068) /* R/W R 0x0000 0000 16/32 */ #define PCIEDLINTENR_DLL_ACT_ENABLE (1 << 31) /* DL active irq */ #define SH4A_PCIEMACINTENR (0x04106C) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIEPMINTENR (0x041070) /* R/W R 0x0000 0000 16/32 */ #define SH4A_PCIETXDCTLR (0x044000) /* R/W - H'00000000_00000000 32/64 */ #define SH4A_PCIETXCTLR (0x044020) /* R/W - H'00000000_00000000 32/64 */ #define SH4A_PCIETXSR (0x044028) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIETXVC0DCTLR (0x044100) /* R/W - H'00000000_00000000 32/64 */ #define SH4A_PCIETXVC0SR (0x044108) /* R/W - H'00888000_00000000 32/64 */ #define SH4A_PCIEVC0PDTXR (0x044110) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0PHTXR (0x044118) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0NPDTXR (0x044120) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0NPHTXR (0x044128) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0CDTXR (0x044130) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0CHTXR (0x044138) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIETXVCXDCTLR (0x044200) /* R/W - H'00000000_00000000 32/64 */ #define SH4A_PCIETXVCXSR (0x044208) /* R/W - H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXPDTXR (0x044210) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXPHTXR (0x044218) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXNPDTXR (0x044220) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXNPHTXR (0x044228) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXCDTXR (0x044230) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXCHTXR (0x044238) /* W - H'00000000_00000000 32/64 */ #define SH4A_PCIERDCTLR (0x046000) /* RW - H'00000000_00000000 32/64 */ #define SH4A_PCIEERPCTLR (0x046008) /* RW - H'00000000_00000000 32/64 */ #define SH4A_PCIEERPHR (0x046010) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIEERPERR (0x046018) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIERXVC0DCTLR (0x046100) /* RW - H'00000000_00000000 32/64 */ #define SH4A_PCIERXVC0SR (0x046108) /* RW - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0PDRXR (0x046140) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0PHRXR (0x046148) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0PERR (0x046150) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0NPDRXR (0x046158) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0NPHRXR (0x046160) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0NPERR (0x046168) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0CDRXR (0x046170) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0CHRXR (0x046178) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIEVC0CERR (0x046180) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIERXVCXDCTLR (0x046200) /* RW - H'00000000_00000000 32/64 */ #define SH4A_PCIERXVCXSR (0x046208) /* RW - H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXPDRXR (0x046240) /* R - H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXPHRXR (0x046248) /* R H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXPERR (0x046250) /* R H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXNPDRXR (0x046258) /* R H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXNPHRXR (0x046260) /* R H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXNPERR (0x046268) /* R H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXCDRXR (0x046270) /* R H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXCHRXR (0x046278) /* R H'00000000_00000000 32/64 */ #define SH4A_PCIEVCXCERR (0x046280) /* R H'00000000_00000000 32/64 */ /* SSI Register Definition for MSI WORK AROUND --hamada */ #define SH4A_PCI_SSI_BASE 0xFFE00000 /* spw config address */ #define SH4A_PCI_SSI_BASE_LEN 0x00100000 /* 1MB */ #define SH4A_SSICR0 (0x000000) #define SH4A_SSICR1 (0x010000) #define SH4A_SSICR2 (0x020000) #define SH4A_SSICR3 (0x030000) #define PCI_REG(x) ((x) + 0x40000) static inline void pci_write_reg(struct pci_channel *chan, unsigned long val, unsigned long reg) { __raw_writel(val, chan->reg_base + reg); } static inline unsigned long pci_read_reg(struct pci_channel *chan, unsigned long reg) { return __raw_readl(chan->reg_base + reg); } #endif /* __PCI_SH7786_H */
null
null
null
null
90,081
3,546
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
168,541
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * segment.c - NILFS segment constructor. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Written by Ryusuke Konishi. * */ #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/bitops.h> #include <linux/bio.h> #include <linux/completion.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/crc32.h> #include <linux/pagevec.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include "nilfs.h" #include "btnode.h" #include "page.h" #include "segment.h" #include "sufile.h" #include "cpfile.h" #include "ifile.h" #include "segbuf.h" /* * Segment constructor */ #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */ #define SC_MAX_SEGDELTA 64 /* * Upper limit of the number of segments * appended in collection retry loop */ /* Construction mode */ enum { SC_LSEG_SR = 1, /* Make a logical segment having a super root */ SC_LSEG_DSYNC, /* * Flush data blocks of a given file and make * a logical segment without a super root. */ SC_FLUSH_FILE, /* * Flush data files, leads to segment writes without * creating a checkpoint. */ SC_FLUSH_DAT, /* * Flush DAT file. This also creates segments * without a checkpoint. */ }; /* Stage numbers of dirty block collection */ enum { NILFS_ST_INIT = 0, NILFS_ST_GC, /* Collecting dirty blocks for GC */ NILFS_ST_FILE, NILFS_ST_IFILE, NILFS_ST_CPFILE, NILFS_ST_SUFILE, NILFS_ST_DAT, NILFS_ST_SR, /* Super root */ NILFS_ST_DSYNC, /* Data sync blocks */ NILFS_ST_DONE, }; #define CREATE_TRACE_POINTS #include <trace/events/nilfs2.h> /* * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of * the variable must use them because transition of stage count must involve * trace events (trace_nilfs2_collection_stage_transition). * * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't * produce tracepoint events. It is provided just for making the intention * clear. */ static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci) { sci->sc_stage.scnt++; trace_nilfs2_collection_stage_transition(sci); } static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt) { sci->sc_stage.scnt = next_scnt; trace_nilfs2_collection_stage_transition(sci); } static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci) { return sci->sc_stage.scnt; } /* State flags of collection */ #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */ #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */ #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */ #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED) /* Operations depending on the construction mode and file type */ struct nilfs_sc_operations { int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *, struct inode *); int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *, struct inode *); int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *, struct inode *); void (*write_data_binfo)(struct nilfs_sc_info *, struct nilfs_segsum_pointer *, union nilfs_binfo *); void (*write_node_binfo)(struct nilfs_sc_info *, struct nilfs_segsum_pointer *, union nilfs_binfo *); }; /* * Other definitions */ static void nilfs_segctor_start_timer(struct nilfs_sc_info *); static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int); static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *); static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int); #define nilfs_cnt32_gt(a, b) \ (typecheck(__u32, a) && typecheck(__u32, b) && \ ((__s32)(b) - (__s32)(a) < 0)) #define nilfs_cnt32_ge(a, b) \ (typecheck(__u32, a) && typecheck(__u32, b) && \ ((__s32)(a) - (__s32)(b) >= 0)) #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a) #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a) static int nilfs_prepare_segment_lock(struct super_block *sb, struct nilfs_transaction_info *ti) { struct nilfs_transaction_info *cur_ti = current->journal_info; void *save = NULL; if (cur_ti) { if (cur_ti->ti_magic == NILFS_TI_MAGIC) return ++cur_ti->ti_count; /* * If journal_info field is occupied by other FS, * it is saved and will be restored on * nilfs_transaction_commit(). */ nilfs_msg(sb, KERN_WARNING, "journal info from a different FS"); save = current->journal_info; } if (!ti) { ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS); if (!ti) return -ENOMEM; ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC; } else { ti->ti_flags = 0; } ti->ti_count = 0; ti->ti_save = save; ti->ti_magic = NILFS_TI_MAGIC; current->journal_info = ti; return 0; } /** * nilfs_transaction_begin - start indivisible file operations. * @sb: super block * @ti: nilfs_transaction_info * @vacancy_check: flags for vacancy rate checks * * nilfs_transaction_begin() acquires a reader/writer semaphore, called * the segment semaphore, to make a segment construction and write tasks * exclusive. The function is used with nilfs_transaction_commit() in pairs. * The region enclosed by these two functions can be nested. To avoid a * deadlock, the semaphore is only acquired or released in the outermost call. * * This function allocates a nilfs_transaction_info struct to keep context * information on it. It is initialized and hooked onto the current task in * the outermost call. If a pre-allocated struct is given to @ti, it is used * instead; otherwise a new struct is assigned from a slab. * * When @vacancy_check flag is set, this function will check the amount of * free space, and will wait for the GC to reclaim disk space if low capacity. * * Return Value: On success, 0 is returned. On error, one of the following * negative error code is returned. * * %-ENOMEM - Insufficient memory available. * * %-ENOSPC - No space left on device */ int nilfs_transaction_begin(struct super_block *sb, struct nilfs_transaction_info *ti, int vacancy_check) { struct the_nilfs *nilfs; int ret = nilfs_prepare_segment_lock(sb, ti); struct nilfs_transaction_info *trace_ti; if (unlikely(ret < 0)) return ret; if (ret > 0) { trace_ti = current->journal_info; trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count, trace_ti->ti_flags, TRACE_NILFS2_TRANSACTION_BEGIN); return 0; } sb_start_intwrite(sb); nilfs = sb->s_fs_info; down_read(&nilfs->ns_segctor_sem); if (vacancy_check && nilfs_near_disk_full(nilfs)) { up_read(&nilfs->ns_segctor_sem); ret = -ENOSPC; goto failed; } trace_ti = current->journal_info; trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count, trace_ti->ti_flags, TRACE_NILFS2_TRANSACTION_BEGIN); return 0; failed: ti = current->journal_info; current->journal_info = ti->ti_save; if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); sb_end_intwrite(sb); return ret; } /** * nilfs_transaction_commit - commit indivisible file operations. * @sb: super block * * nilfs_transaction_commit() releases the read semaphore which is * acquired by nilfs_transaction_begin(). This is only performed * in outermost call of this function. If a commit flag is set, * nilfs_transaction_commit() sets a timer to start the segment * constructor. If a sync flag is set, it starts construction * directly. */ int nilfs_transaction_commit(struct super_block *sb) { struct nilfs_transaction_info *ti = current->journal_info; struct the_nilfs *nilfs = sb->s_fs_info; int err = 0; BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); ti->ti_flags |= NILFS_TI_COMMIT; if (ti->ti_count > 0) { ti->ti_count--; trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT); return 0; } if (nilfs->ns_writer) { struct nilfs_sc_info *sci = nilfs->ns_writer; if (ti->ti_flags & NILFS_TI_COMMIT) nilfs_segctor_start_timer(sci); if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark) nilfs_segctor_do_flush(sci, 0); } up_read(&nilfs->ns_segctor_sem); trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT); current->journal_info = ti->ti_save; if (ti->ti_flags & NILFS_TI_SYNC) err = nilfs_construct_segment(sb); if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); sb_end_intwrite(sb); return err; } void nilfs_transaction_abort(struct super_block *sb) { struct nilfs_transaction_info *ti = current->journal_info; struct the_nilfs *nilfs = sb->s_fs_info; BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); if (ti->ti_count > 0) { ti->ti_count--; trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT); return; } up_read(&nilfs->ns_segctor_sem); trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT); current->journal_info = ti->ti_save; if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); sb_end_intwrite(sb); } void nilfs_relax_pressure_in_lock(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; if (!sci || !sci->sc_flush_request) return; set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); up_read(&nilfs->ns_segctor_sem); down_write(&nilfs->ns_segctor_sem); if (sci->sc_flush_request && test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) { struct nilfs_transaction_info *ti = current->journal_info; ti->ti_flags |= NILFS_TI_WRITER; nilfs_segctor_do_immediate_flush(sci); ti->ti_flags &= ~NILFS_TI_WRITER; } downgrade_write(&nilfs->ns_segctor_sem); } static void nilfs_transaction_lock(struct super_block *sb, struct nilfs_transaction_info *ti, int gcflag) { struct nilfs_transaction_info *cur_ti = current->journal_info; struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; WARN_ON(cur_ti); ti->ti_flags = NILFS_TI_WRITER; ti->ti_count = 0; ti->ti_save = cur_ti; ti->ti_magic = NILFS_TI_MAGIC; current->journal_info = ti; for (;;) { trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK); down_write(&nilfs->ns_segctor_sem); if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) break; nilfs_segctor_do_immediate_flush(sci); up_write(&nilfs->ns_segctor_sem); cond_resched(); } if (gcflag) ti->ti_flags |= NILFS_TI_GC; trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK); } static void nilfs_transaction_unlock(struct super_block *sb) { struct nilfs_transaction_info *ti = current->journal_info; struct the_nilfs *nilfs = sb->s_fs_info; BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); BUG_ON(ti->ti_count > 0); up_write(&nilfs->ns_segctor_sem); current->journal_info = ti->ti_save; trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK); } static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, struct nilfs_segsum_pointer *ssp, unsigned int bytes) { struct nilfs_segment_buffer *segbuf = sci->sc_curseg; unsigned int blocksize = sci->sc_super->s_blocksize; void *p; if (unlikely(ssp->offset + bytes > blocksize)) { ssp->offset = 0; BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh, &segbuf->sb_segsum_buffers)); ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh); } p = ssp->bh->b_data + ssp->offset; ssp->offset += bytes; return p; } /** * nilfs_segctor_reset_segment_buffer - reset the current segment buffer * @sci: nilfs_sc_info */ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) { struct nilfs_segment_buffer *segbuf = sci->sc_curseg; struct buffer_head *sumbh; unsigned int sumbytes; unsigned int flags = 0; int err; if (nilfs_doing_gc()) flags = NILFS_SS_GC; err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno); if (unlikely(err)) return err; sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); sumbytes = segbuf->sb_sum.sumbytes; sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes; sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes; sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; return 0; } static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) { sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs)) return -E2BIG; /* * The current segment is filled up * (internal code) */ sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg); return nilfs_segctor_reset_segment_buffer(sci); } static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci) { struct nilfs_segment_buffer *segbuf = sci->sc_curseg; int err; if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) { err = nilfs_segctor_feed_segment(sci); if (err) return err; segbuf = sci->sc_curseg; } err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root); if (likely(!err)) segbuf->sb_sum.flags |= NILFS_SS_SR; return err; } /* * Functions for making segment summary and payloads */ static int nilfs_segctor_segsum_block_required( struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp, unsigned int binfo_size) { unsigned int blocksize = sci->sc_super->s_blocksize; /* Size of finfo and binfo is enough small against blocksize */ return ssp->offset + binfo_size + (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) > blocksize; } static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci, struct inode *inode) { sci->sc_curseg->sb_sum.nfinfo++; sci->sc_binfo_ptr = sci->sc_finfo_ptr; nilfs_segctor_map_segsum_entry( sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); if (NILFS_I(inode)->i_root && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); /* skip finfo */ } static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci, struct inode *inode) { struct nilfs_finfo *finfo; struct nilfs_inode_info *ii; struct nilfs_segment_buffer *segbuf; __u64 cno; if (sci->sc_blk_cnt == 0) return; ii = NILFS_I(inode); if (test_bit(NILFS_I_GCINODE, &ii->i_state)) cno = ii->i_cno; else if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) cno = 0; else cno = sci->sc_cno; finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr, sizeof(*finfo)); finfo->fi_ino = cpu_to_le64(inode->i_ino); finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt); finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt); finfo->fi_cno = cpu_to_le64(cno); segbuf = sci->sc_curseg; segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset + sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1); sci->sc_finfo_ptr = sci->sc_binfo_ptr; sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; } static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode, unsigned int binfo_size) { struct nilfs_segment_buffer *segbuf; int required, err = 0; retry: segbuf = sci->sc_curseg; required = nilfs_segctor_segsum_block_required( sci, &sci->sc_binfo_ptr, binfo_size); if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) { nilfs_segctor_end_finfo(sci, inode); err = nilfs_segctor_feed_segment(sci); if (err) return err; goto retry; } if (unlikely(required)) { err = nilfs_segbuf_extend_segsum(segbuf); if (unlikely(err)) goto failed; } if (sci->sc_blk_cnt == 0) nilfs_segctor_begin_finfo(sci, inode); nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size); /* Substitution to vblocknr is delayed until update_blocknr() */ nilfs_segbuf_add_file_buffer(segbuf, bh); sci->sc_blk_cnt++; failed: return err; } /* * Callback functions that enumerate, mark, and collect dirty blocks */ static int nilfs_collect_file_data(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode) { int err; err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); if (err < 0) return err; err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(struct nilfs_binfo_v)); if (!err) sci->sc_datablk_cnt++; return err; } static int nilfs_collect_file_node(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode) { return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); } static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode) { WARN_ON(!buffer_dirty(bh)); return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); } static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci, struct nilfs_segsum_pointer *ssp, union nilfs_binfo *binfo) { struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry( sci, ssp, sizeof(*binfo_v)); *binfo_v = binfo->bi_v; } static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci, struct nilfs_segsum_pointer *ssp, union nilfs_binfo *binfo) { __le64 *vblocknr = nilfs_segctor_map_segsum_entry( sci, ssp, sizeof(*vblocknr)); *vblocknr = binfo->bi_v.bi_vblocknr; } static const struct nilfs_sc_operations nilfs_sc_file_ops = { .collect_data = nilfs_collect_file_data, .collect_node = nilfs_collect_file_node, .collect_bmap = nilfs_collect_file_bmap, .write_data_binfo = nilfs_write_file_data_binfo, .write_node_binfo = nilfs_write_file_node_binfo, }; static int nilfs_collect_dat_data(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode) { int err; err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); if (err < 0) return err; err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); if (!err) sci->sc_datablk_cnt++; return err; } static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode) { WARN_ON(!buffer_dirty(bh)); return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(struct nilfs_binfo_dat)); } static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci, struct nilfs_segsum_pointer *ssp, union nilfs_binfo *binfo) { __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*blkoff)); *blkoff = binfo->bi_dat.bi_blkoff; } static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci, struct nilfs_segsum_pointer *ssp, union nilfs_binfo *binfo) { struct nilfs_binfo_dat *binfo_dat = nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat)); *binfo_dat = binfo->bi_dat; } static const struct nilfs_sc_operations nilfs_sc_dat_ops = { .collect_data = nilfs_collect_dat_data, .collect_node = nilfs_collect_file_node, .collect_bmap = nilfs_collect_dat_bmap, .write_data_binfo = nilfs_write_dat_data_binfo, .write_node_binfo = nilfs_write_dat_node_binfo, }; static const struct nilfs_sc_operations nilfs_sc_dsync_ops = { .collect_data = nilfs_collect_file_data, .collect_node = NULL, .collect_bmap = NULL, .write_data_binfo = nilfs_write_file_data_binfo, .write_node_binfo = NULL, }; static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, struct list_head *listp, size_t nlimit, loff_t start, loff_t end) { struct address_space *mapping = inode->i_mapping; struct pagevec pvec; pgoff_t index = 0, last = ULONG_MAX; size_t ndirties = 0; int i; if (unlikely(start != 0 || end != LLONG_MAX)) { /* * A valid range is given for sync-ing data pages. The * range is rounded to per-page; extra dirty buffers * may be included if blocksize < pagesize. */ index = start >> PAGE_SHIFT; last = end >> PAGE_SHIFT; } pagevec_init(&pvec, 0); repeat: if (unlikely(index > last) || !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, min_t(pgoff_t, last - index, PAGEVEC_SIZE - 1) + 1)) return ndirties; for (i = 0; i < pagevec_count(&pvec); i++) { struct buffer_head *bh, *head; struct page *page = pvec.pages[i]; if (unlikely(page->index > last)) break; lock_page(page); if (!page_has_buffers(page)) create_empty_buffers(page, i_blocksize(inode), 0); unlock_page(page); bh = head = page_buffers(page); do { if (!buffer_dirty(bh) || buffer_async_write(bh)) continue; get_bh(bh); list_add_tail(&bh->b_assoc_buffers, listp); ndirties++; if (unlikely(ndirties >= nlimit)) { pagevec_release(&pvec); cond_resched(); return ndirties; } } while (bh = bh->b_this_page, bh != head); } pagevec_release(&pvec); cond_resched(); goto repeat; } static void nilfs_lookup_dirty_node_buffers(struct inode *inode, struct list_head *listp) { struct nilfs_inode_info *ii = NILFS_I(inode); struct address_space *mapping = &ii->i_btnode_cache; struct pagevec pvec; struct buffer_head *bh, *head; unsigned int i; pgoff_t index = 0; pagevec_init(&pvec, 0); while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { bh = head = page_buffers(pvec.pages[i]); do { if (buffer_dirty(bh) && !buffer_async_write(bh)) { get_bh(bh); list_add_tail(&bh->b_assoc_buffers, listp); } bh = bh->b_this_page; } while (bh != head); } pagevec_release(&pvec); cond_resched(); } } static void nilfs_dispose_list(struct the_nilfs *nilfs, struct list_head *head, int force) { struct nilfs_inode_info *ii, *n; struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii; unsigned int nv = 0; while (!list_empty(head)) { spin_lock(&nilfs->ns_inode_lock); list_for_each_entry_safe(ii, n, head, i_dirty) { list_del_init(&ii->i_dirty); if (force) { if (unlikely(ii->i_bh)) { brelse(ii->i_bh); ii->i_bh = NULL; } } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) { set_bit(NILFS_I_QUEUED, &ii->i_state); list_add_tail(&ii->i_dirty, &nilfs->ns_dirty_files); continue; } ivec[nv++] = ii; if (nv == SC_N_INODEVEC) break; } spin_unlock(&nilfs->ns_inode_lock); for (pii = ivec; nv > 0; pii++, nv--) iput(&(*pii)->vfs_inode); } } static void nilfs_iput_work_func(struct work_struct *work) { struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info, sc_iput_work); struct the_nilfs *nilfs = sci->sc_super->s_fs_info; nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0); } static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, struct nilfs_root *root) { int ret = 0; if (nilfs_mdt_fetch_dirty(root->ifile)) ret++; if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile)) ret++; if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile)) ret++; if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat)) ret++; return ret; } static int nilfs_segctor_clean(struct nilfs_sc_info *sci) { return list_empty(&sci->sc_dirty_files) && !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) && sci->sc_nfreesegs == 0 && (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes)); } static int nilfs_segctor_confirm(struct nilfs_sc_info *sci) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; int ret = 0; if (nilfs_test_metadata_dirty(nilfs, sci->sc_root)) set_bit(NILFS_SC_DIRTY, &sci->sc_flags); spin_lock(&nilfs->ns_inode_lock); if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci)) ret++; spin_unlock(&nilfs->ns_inode_lock); return ret; } static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; nilfs_mdt_clear_dirty(sci->sc_root->ifile); nilfs_mdt_clear_dirty(nilfs->ns_cpfile); nilfs_mdt_clear_dirty(nilfs->ns_sufile); nilfs_mdt_clear_dirty(nilfs->ns_dat); } static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; struct buffer_head *bh_cp; struct nilfs_checkpoint *raw_cp; int err; /* XXX: this interface will be changed */ err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1, &raw_cp, &bh_cp); if (likely(!err)) { /* * The following code is duplicated with cpfile. But, it is * needed to collect the checkpoint even if it was not newly * created. */ mark_buffer_dirty(bh_cp); nilfs_mdt_mark_dirty(nilfs->ns_cpfile); nilfs_cpfile_put_checkpoint( nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); } else WARN_ON(err == -EINVAL || err == -ENOENT); return err; } static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; struct buffer_head *bh_cp; struct nilfs_checkpoint *raw_cp; int err; err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0, &raw_cp, &bh_cp); if (unlikely(err)) { WARN_ON(err == -EINVAL || err == -ENOENT); goto failed_ibh; } raw_cp->cp_snapshot_list.ssl_next = 0; raw_cp->cp_snapshot_list.ssl_prev = 0; raw_cp->cp_inodes_count = cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count)); raw_cp->cp_blocks_count = cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count)); raw_cp->cp_nblk_inc = cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc); raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime); raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno); if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) nilfs_checkpoint_clear_minor(raw_cp); else nilfs_checkpoint_set_minor(raw_cp); nilfs_write_inode_common(sci->sc_root->ifile, &raw_cp->cp_ifile_inode, 1); nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); return 0; failed_ibh: return err; } static void nilfs_fill_in_file_bmap(struct inode *ifile, struct nilfs_inode_info *ii) { struct buffer_head *ibh; struct nilfs_inode *raw_inode; if (test_bit(NILFS_I_BMAP, &ii->i_state)) { ibh = ii->i_bh; BUG_ON(!ibh); raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino, ibh); nilfs_bmap_write(ii->i_bmap, raw_inode); nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh); } } static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci) { struct nilfs_inode_info *ii; list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) { nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii); set_bit(NILFS_I_COLLECTED, &ii->i_state); } } static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { struct buffer_head *bh_sr; struct nilfs_super_root *raw_sr; unsigned int isz, srsz; bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root; raw_sr = (struct nilfs_super_root *)bh_sr->b_data; isz = nilfs->ns_inode_size; srsz = NILFS_SR_BYTES(isz); raw_sr->sr_bytes = cpu_to_le16(srsz); raw_sr->sr_nongc_ctime = cpu_to_le64(nilfs_doing_gc() ? nilfs->ns_nongc_ctime : sci->sc_seg_ctime); raw_sr->sr_flags = 0; nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr + NILFS_SR_DAT_OFFSET(isz), 1); nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr + NILFS_SR_CPFILE_OFFSET(isz), 1); nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr + NILFS_SR_SUFILE_OFFSET(isz), 1); memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz); } static void nilfs_redirty_inodes(struct list_head *head) { struct nilfs_inode_info *ii; list_for_each_entry(ii, head, i_dirty) { if (test_bit(NILFS_I_COLLECTED, &ii->i_state)) clear_bit(NILFS_I_COLLECTED, &ii->i_state); } } static void nilfs_drop_collected_inodes(struct list_head *head) { struct nilfs_inode_info *ii; list_for_each_entry(ii, head, i_dirty) { if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state)) continue; clear_bit(NILFS_I_INODE_SYNC, &ii->i_state); set_bit(NILFS_I_UPDATED, &ii->i_state); } } static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci, struct inode *inode, struct list_head *listp, int (*collect)(struct nilfs_sc_info *, struct buffer_head *, struct inode *)) { struct buffer_head *bh, *n; int err = 0; if (collect) { list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) { list_del_init(&bh->b_assoc_buffers); err = collect(sci, bh, inode); brelse(bh); if (unlikely(err)) goto dispose_buffers; } return 0; } dispose_buffers: while (!list_empty(listp)) { bh = list_first_entry(listp, struct buffer_head, b_assoc_buffers); list_del_init(&bh->b_assoc_buffers); brelse(bh); } return err; } static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci) { /* Remaining number of blocks within segment buffer */ return sci->sc_segbuf_nblocks - (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks); } static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci, struct inode *inode, const struct nilfs_sc_operations *sc_ops) { LIST_HEAD(data_buffers); LIST_HEAD(node_buffers); int err; if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { size_t n, rest = nilfs_segctor_buffer_rest(sci); n = nilfs_lookup_dirty_data_buffers( inode, &data_buffers, rest + 1, 0, LLONG_MAX); if (n > rest) { err = nilfs_segctor_apply_buffers( sci, inode, &data_buffers, sc_ops->collect_data); BUG_ON(!err); /* always receive -E2BIG or true error */ goto break_or_fail; } } nilfs_lookup_dirty_node_buffers(inode, &node_buffers); if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { err = nilfs_segctor_apply_buffers( sci, inode, &data_buffers, sc_ops->collect_data); if (unlikely(err)) { /* dispose node list */ nilfs_segctor_apply_buffers( sci, inode, &node_buffers, NULL); goto break_or_fail; } sci->sc_stage.flags |= NILFS_CF_NODE; } /* Collect node */ err = nilfs_segctor_apply_buffers( sci, inode, &node_buffers, sc_ops->collect_node); if (unlikely(err)) goto break_or_fail; nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers); err = nilfs_segctor_apply_buffers( sci, inode, &node_buffers, sc_ops->collect_bmap); if (unlikely(err)) goto break_or_fail; nilfs_segctor_end_finfo(sci, inode); sci->sc_stage.flags &= ~NILFS_CF_NODE; break_or_fail: return err; } static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci, struct inode *inode) { LIST_HEAD(data_buffers); size_t n, rest = nilfs_segctor_buffer_rest(sci); int err; n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1, sci->sc_dsync_start, sci->sc_dsync_end); err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers, nilfs_collect_file_data); if (!err) { nilfs_segctor_end_finfo(sci, inode); BUG_ON(n > rest); /* always receive -E2BIG or true error if n > rest */ } return err; } static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; struct list_head *head; struct nilfs_inode_info *ii; size_t ndone; int err = 0; switch (nilfs_sc_cstage_get(sci)) { case NILFS_ST_INIT: /* Pre-processes */ sci->sc_stage.flags = 0; if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) { sci->sc_nblk_inc = 0; sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN; if (mode == SC_LSEG_DSYNC) { nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC); goto dsync_mode; } } sci->sc_stage.dirty_file_ptr = NULL; sci->sc_stage.gc_inode_ptr = NULL; if (mode == SC_FLUSH_DAT) { nilfs_sc_cstage_set(sci, NILFS_ST_DAT); goto dat_stage; } nilfs_sc_cstage_inc(sci); /* Fall through */ case NILFS_ST_GC: if (nilfs_doing_gc()) { head = &sci->sc_gc_inodes; ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr, head, i_dirty); list_for_each_entry_continue(ii, head, i_dirty) { err = nilfs_segctor_scan_file( sci, &ii->vfs_inode, &nilfs_sc_file_ops); if (unlikely(err)) { sci->sc_stage.gc_inode_ptr = list_entry( ii->i_dirty.prev, struct nilfs_inode_info, i_dirty); goto break_or_fail; } set_bit(NILFS_I_COLLECTED, &ii->i_state); } sci->sc_stage.gc_inode_ptr = NULL; } nilfs_sc_cstage_inc(sci); /* Fall through */ case NILFS_ST_FILE: head = &sci->sc_dirty_files; ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head, i_dirty); list_for_each_entry_continue(ii, head, i_dirty) { clear_bit(NILFS_I_DIRTY, &ii->i_state); err = nilfs_segctor_scan_file(sci, &ii->vfs_inode, &nilfs_sc_file_ops); if (unlikely(err)) { sci->sc_stage.dirty_file_ptr = list_entry(ii->i_dirty.prev, struct nilfs_inode_info, i_dirty); goto break_or_fail; } /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */ /* XXX: required ? */ } sci->sc_stage.dirty_file_ptr = NULL; if (mode == SC_FLUSH_FILE) { nilfs_sc_cstage_set(sci, NILFS_ST_DONE); return 0; } nilfs_sc_cstage_inc(sci); sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED; /* Fall through */ case NILFS_ST_IFILE: err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile, &nilfs_sc_file_ops); if (unlikely(err)) break; nilfs_sc_cstage_inc(sci); /* Creating a checkpoint */ err = nilfs_segctor_create_checkpoint(sci); if (unlikely(err)) break; /* Fall through */ case NILFS_ST_CPFILE: err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile, &nilfs_sc_file_ops); if (unlikely(err)) break; nilfs_sc_cstage_inc(sci); /* Fall through */ case NILFS_ST_SUFILE: err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs, sci->sc_nfreesegs, &ndone); if (unlikely(err)) { nilfs_sufile_cancel_freev(nilfs->ns_sufile, sci->sc_freesegs, ndone, NULL); break; } sci->sc_stage.flags |= NILFS_CF_SUFREED; err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile, &nilfs_sc_file_ops); if (unlikely(err)) break; nilfs_sc_cstage_inc(sci); /* Fall through */ case NILFS_ST_DAT: dat_stage: err = nilfs_segctor_scan_file(sci, nilfs->ns_dat, &nilfs_sc_dat_ops); if (unlikely(err)) break; if (mode == SC_FLUSH_DAT) { nilfs_sc_cstage_set(sci, NILFS_ST_DONE); return 0; } nilfs_sc_cstage_inc(sci); /* Fall through */ case NILFS_ST_SR: if (mode == SC_LSEG_SR) { /* Appending a super root */ err = nilfs_segctor_add_super_root(sci); if (unlikely(err)) break; } /* End of a logical segment */ sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; nilfs_sc_cstage_set(sci, NILFS_ST_DONE); return 0; case NILFS_ST_DSYNC: dsync_mode: sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT; ii = sci->sc_dsync_inode; if (!test_bit(NILFS_I_BUSY, &ii->i_state)) break; err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode); if (unlikely(err)) break; sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; nilfs_sc_cstage_set(sci, NILFS_ST_DONE); return 0; case NILFS_ST_DONE: return 0; default: BUG(); } break_or_fail: return err; } /** * nilfs_segctor_begin_construction - setup segment buffer to make a new log * @sci: nilfs_sc_info * @nilfs: nilfs object */ static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { struct nilfs_segment_buffer *segbuf, *prev; __u64 nextnum; int err, alloc = 0; segbuf = nilfs_segbuf_new(sci->sc_super); if (unlikely(!segbuf)) return -ENOMEM; if (list_empty(&sci->sc_write_logs)) { nilfs_segbuf_map(segbuf, nilfs->ns_segnum, nilfs->ns_pseg_offset, nilfs); if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { nilfs_shift_to_next_segment(nilfs); nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs); } segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq; nextnum = nilfs->ns_nextnum; if (nilfs->ns_segnum == nilfs->ns_nextnum) /* Start from the head of a new full segment */ alloc++; } else { /* Continue logs */ prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs); nilfs_segbuf_map_cont(segbuf, prev); segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq; nextnum = prev->sb_nextnum; if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs); segbuf->sb_sum.seg_seq++; alloc++; } } err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum); if (err) goto failed; if (alloc) { err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum); if (err) goto failed; } nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs); BUG_ON(!list_empty(&sci->sc_segbufs)); list_add_tail(&segbuf->sb_list, &sci->sc_segbufs); sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks; return 0; failed: nilfs_segbuf_free(segbuf); return err; } static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci, struct the_nilfs *nilfs, int nadd) { struct nilfs_segment_buffer *segbuf, *prev; struct inode *sufile = nilfs->ns_sufile; __u64 nextnextnum; LIST_HEAD(list); int err, ret, i; prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs); /* * Since the segment specified with nextnum might be allocated during * the previous construction, the buffer including its segusage may * not be dirty. The following call ensures that the buffer is dirty * and will pin the buffer on memory until the sufile is written. */ err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum); if (unlikely(err)) return err; for (i = 0; i < nadd; i++) { /* extend segment info */ err = -ENOMEM; segbuf = nilfs_segbuf_new(sci->sc_super); if (unlikely(!segbuf)) goto failed; /* map this buffer to region of segment on-disk */ nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs); sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks; /* allocate the next next full segment */ err = nilfs_sufile_alloc(sufile, &nextnextnum); if (unlikely(err)) goto failed_segbuf; segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1; nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs); list_add_tail(&segbuf->sb_list, &list); prev = segbuf; } list_splice_tail(&list, &sci->sc_segbufs); return 0; failed_segbuf: nilfs_segbuf_free(segbuf); failed: list_for_each_entry(segbuf, &list, sb_list) { ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); WARN_ON(ret); /* never fails */ } nilfs_destroy_logs(&list); return err; } static void nilfs_free_incomplete_logs(struct list_head *logs, struct the_nilfs *nilfs) { struct nilfs_segment_buffer *segbuf, *prev; struct inode *sufile = nilfs->ns_sufile; int ret; segbuf = NILFS_FIRST_SEGBUF(logs); if (nilfs->ns_nextnum != segbuf->sb_nextnum) { ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); WARN_ON(ret); /* never fails */ } if (atomic_read(&segbuf->sb_err)) { /* Case 1: The first segment failed */ if (segbuf->sb_pseg_start != segbuf->sb_fseg_start) /* * Case 1a: Partial segment appended into an existing * segment */ nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start, segbuf->sb_fseg_end); else /* Case 1b: New full segment */ set_nilfs_discontinued(nilfs); } prev = segbuf; list_for_each_entry_continue(segbuf, logs, sb_list) { if (prev->sb_nextnum != segbuf->sb_nextnum) { ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); WARN_ON(ret); /* never fails */ } if (atomic_read(&segbuf->sb_err) && segbuf->sb_segnum != nilfs->ns_nextnum) /* Case 2: extended segment (!= next) failed */ nilfs_sufile_set_error(sufile, segbuf->sb_segnum); prev = segbuf; } } static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci, struct inode *sufile) { struct nilfs_segment_buffer *segbuf; unsigned long live_blocks; int ret; list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { live_blocks = segbuf->sb_sum.nblocks + (segbuf->sb_pseg_start - segbuf->sb_fseg_start); ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, live_blocks, sci->sc_seg_ctime); WARN_ON(ret); /* always succeed because the segusage is dirty */ } } static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile) { struct nilfs_segment_buffer *segbuf; int ret; segbuf = NILFS_FIRST_SEGBUF(logs); ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, segbuf->sb_pseg_start - segbuf->sb_fseg_start, 0); WARN_ON(ret); /* always succeed because the segusage is dirty */ list_for_each_entry_continue(segbuf, logs, sb_list) { ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, 0, 0); WARN_ON(ret); /* always succeed */ } } static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci, struct nilfs_segment_buffer *last, struct inode *sufile) { struct nilfs_segment_buffer *segbuf = last; int ret; list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) { sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks; ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); WARN_ON(ret); } nilfs_truncate_logs(&sci->sc_segbufs, last); } static int nilfs_segctor_collect(struct nilfs_sc_info *sci, struct the_nilfs *nilfs, int mode) { struct nilfs_cstage prev_stage = sci->sc_stage; int err, nadd = 1; /* Collection retry loop */ for (;;) { sci->sc_nblk_this_inc = 0; sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); err = nilfs_segctor_reset_segment_buffer(sci); if (unlikely(err)) goto failed; err = nilfs_segctor_collect_blocks(sci, mode); sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; if (!err) break; if (unlikely(err != -E2BIG)) goto failed; /* The current segment is filled up */ if (mode != SC_LSEG_SR || nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE) break; nilfs_clear_logs(&sci->sc_segbufs); if (sci->sc_stage.flags & NILFS_CF_SUFREED) { err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, sci->sc_freesegs, sci->sc_nfreesegs, NULL); WARN_ON(err); /* do not happen */ sci->sc_stage.flags &= ~NILFS_CF_SUFREED; } err = nilfs_segctor_extend_segments(sci, nilfs, nadd); if (unlikely(err)) return err; nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); sci->sc_stage = prev_stage; } nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile); return 0; failed: return err; } static void nilfs_list_replace_buffer(struct buffer_head *old_bh, struct buffer_head *new_bh) { BUG_ON(!list_empty(&new_bh->b_assoc_buffers)); list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers); /* The caller must release old_bh */ } static int nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci, struct nilfs_segment_buffer *segbuf, int mode) { struct inode *inode = NULL; sector_t blocknr; unsigned long nfinfo = segbuf->sb_sum.nfinfo; unsigned long nblocks = 0, ndatablk = 0; const struct nilfs_sc_operations *sc_op = NULL; struct nilfs_segsum_pointer ssp; struct nilfs_finfo *finfo = NULL; union nilfs_binfo binfo; struct buffer_head *bh, *bh_org; ino_t ino = 0; int err = 0; if (!nfinfo) goto out; blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk; ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); ssp.offset = sizeof(struct nilfs_segment_summary); list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { if (bh == segbuf->sb_super_root) break; if (!finfo) { finfo = nilfs_segctor_map_segsum_entry( sci, &ssp, sizeof(*finfo)); ino = le64_to_cpu(finfo->fi_ino); nblocks = le32_to_cpu(finfo->fi_nblocks); ndatablk = le32_to_cpu(finfo->fi_ndatablk); inode = bh->b_page->mapping->host; if (mode == SC_LSEG_DSYNC) sc_op = &nilfs_sc_dsync_ops; else if (ino == NILFS_DAT_INO) sc_op = &nilfs_sc_dat_ops; else /* file blocks */ sc_op = &nilfs_sc_file_ops; } bh_org = bh; get_bh(bh_org); err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr, &binfo); if (bh != bh_org) nilfs_list_replace_buffer(bh_org, bh); brelse(bh_org); if (unlikely(err)) goto failed_bmap; if (ndatablk > 0) sc_op->write_data_binfo(sci, &ssp, &binfo); else sc_op->write_node_binfo(sci, &ssp, &binfo); blocknr++; if (--nblocks == 0) { finfo = NULL; if (--nfinfo == 0) break; } else if (ndatablk > 0) ndatablk--; } out: return 0; failed_bmap: return err; } static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode) { struct nilfs_segment_buffer *segbuf; int err; list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode); if (unlikely(err)) return err; nilfs_segbuf_fill_in_segsum(segbuf); } return 0; } static void nilfs_begin_page_io(struct page *page) { if (!page || PageWriteback(page)) /* * For split b-tree node pages, this function may be called * twice. We ignore the 2nd or later calls by this check. */ return; lock_page(page); clear_page_dirty_for_io(page); set_page_writeback(page); unlock_page(page); } static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) { struct nilfs_segment_buffer *segbuf; struct page *bd_page = NULL, *fs_page = NULL; list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { struct buffer_head *bh; list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { if (bh->b_page != bd_page) { if (bd_page) { lock_page(bd_page); clear_page_dirty_for_io(bd_page); set_page_writeback(bd_page); unlock_page(bd_page); } bd_page = bh->b_page; } } list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { set_buffer_async_write(bh); if (bh == segbuf->sb_super_root) { if (bh->b_page != bd_page) { lock_page(bd_page); clear_page_dirty_for_io(bd_page); set_page_writeback(bd_page); unlock_page(bd_page); bd_page = bh->b_page; } break; } if (bh->b_page != fs_page) { nilfs_begin_page_io(fs_page); fs_page = bh->b_page; } } } if (bd_page) { lock_page(bd_page); clear_page_dirty_for_io(bd_page); set_page_writeback(bd_page); unlock_page(bd_page); } nilfs_begin_page_io(fs_page); } static int nilfs_segctor_write(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { int ret; ret = nilfs_write_logs(&sci->sc_segbufs, nilfs); list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs); return ret; } static void nilfs_end_page_io(struct page *page, int err) { if (!page) return; if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) { /* * For b-tree node pages, this function may be called twice * or more because they might be split in a segment. */ if (PageDirty(page)) { /* * For pages holding split b-tree node buffers, dirty * flag on the buffers may be cleared discretely. * In that case, the page is once redirtied for * remaining buffers, and it must be cancelled if * all the buffers get cleaned later. */ lock_page(page); if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); unlock_page(page); } return; } if (!err) { if (!nilfs_page_buffers_clean(page)) __set_page_dirty_nobuffers(page); ClearPageError(page); } else { __set_page_dirty_nobuffers(page); SetPageError(page); } end_page_writeback(page); } static void nilfs_abort_logs(struct list_head *logs, int err) { struct nilfs_segment_buffer *segbuf; struct page *bd_page = NULL, *fs_page = NULL; struct buffer_head *bh; if (list_empty(logs)) return; list_for_each_entry(segbuf, logs, sb_list) { list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { if (bh->b_page != bd_page) { if (bd_page) end_page_writeback(bd_page); bd_page = bh->b_page; } } list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { clear_buffer_async_write(bh); if (bh == segbuf->sb_super_root) { if (bh->b_page != bd_page) { end_page_writeback(bd_page); bd_page = bh->b_page; } break; } if (bh->b_page != fs_page) { nilfs_end_page_io(fs_page, err); fs_page = bh->b_page; } } } if (bd_page) end_page_writeback(bd_page); nilfs_end_page_io(fs_page, err); } static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci, struct the_nilfs *nilfs, int err) { LIST_HEAD(logs); int ret; list_splice_tail_init(&sci->sc_write_logs, &logs); ret = nilfs_wait_on_logs(&logs); nilfs_abort_logs(&logs, ret ? : err); list_splice_tail_init(&sci->sc_segbufs, &logs); nilfs_cancel_segusage(&logs, nilfs->ns_sufile); nilfs_free_incomplete_logs(&logs, nilfs); if (sci->sc_stage.flags & NILFS_CF_SUFREED) { ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile, sci->sc_freesegs, sci->sc_nfreesegs, NULL); WARN_ON(ret); /* do not happen */ } nilfs_destroy_logs(&logs); } static void nilfs_set_next_segment(struct the_nilfs *nilfs, struct nilfs_segment_buffer *segbuf) { nilfs->ns_segnum = segbuf->sb_segnum; nilfs->ns_nextnum = segbuf->sb_nextnum; nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start + segbuf->sb_sum.nblocks; nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq; nilfs->ns_ctime = segbuf->sb_sum.ctime; } static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) { struct nilfs_segment_buffer *segbuf; struct page *bd_page = NULL, *fs_page = NULL; struct the_nilfs *nilfs = sci->sc_super->s_fs_info; int update_sr = false; list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) { struct buffer_head *bh; list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { set_buffer_uptodate(bh); clear_buffer_dirty(bh); if (bh->b_page != bd_page) { if (bd_page) end_page_writeback(bd_page); bd_page = bh->b_page; } } /* * We assume that the buffers which belong to the same page * continue over the buffer list. * Under this assumption, the last BHs of pages is * identifiable by the discontinuity of bh->b_page * (page != fs_page). * * For B-tree node blocks, however, this assumption is not * guaranteed. The cleanup code of B-tree node pages needs * special care. */ list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { const unsigned long set_bits = BIT(BH_Uptodate); const unsigned long clear_bits = (BIT(BH_Dirty) | BIT(BH_Async_Write) | BIT(BH_Delay) | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Redirected)); set_mask_bits(&bh->b_state, clear_bits, set_bits); if (bh == segbuf->sb_super_root) { if (bh->b_page != bd_page) { end_page_writeback(bd_page); bd_page = bh->b_page; } update_sr = true; break; } if (bh->b_page != fs_page) { nilfs_end_page_io(fs_page, 0); fs_page = bh->b_page; } } if (!nilfs_segbuf_simplex(segbuf)) { if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) { set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); sci->sc_lseg_stime = jiffies; } if (segbuf->sb_sum.flags & NILFS_SS_LOGEND) clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); } } /* * Since pages may continue over multiple segment buffers, * end of the last page must be checked outside of the loop. */ if (bd_page) end_page_writeback(bd_page); nilfs_end_page_io(fs_page, 0); nilfs_drop_collected_inodes(&sci->sc_dirty_files); if (nilfs_doing_gc()) nilfs_drop_collected_inodes(&sci->sc_gc_inodes); else nilfs->ns_nongc_ctime = sci->sc_seg_ctime; sci->sc_nblk_inc += sci->sc_nblk_this_inc; segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs); nilfs_set_next_segment(nilfs, segbuf); if (update_sr) { nilfs->ns_flushed_device = 0; nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, segbuf->sb_sum.seg_seq, nilfs->ns_cno++); clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); clear_bit(NILFS_SC_DIRTY, &sci->sc_flags); set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); nilfs_segctor_clear_metadata_dirty(sci); } else clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); } static int nilfs_segctor_wait(struct nilfs_sc_info *sci) { int ret; ret = nilfs_wait_on_logs(&sci->sc_write_logs); if (!ret) { nilfs_segctor_complete_write(sci); nilfs_destroy_logs(&sci->sc_write_logs); } return ret; } static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { struct nilfs_inode_info *ii, *n; struct inode *ifile = sci->sc_root->ifile; spin_lock(&nilfs->ns_inode_lock); retry: list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) { if (!ii->i_bh) { struct buffer_head *ibh; int err; spin_unlock(&nilfs->ns_inode_lock); err = nilfs_ifile_get_inode_block( ifile, ii->vfs_inode.i_ino, &ibh); if (unlikely(err)) { nilfs_msg(sci->sc_super, KERN_WARNING, "log writer: error %d getting inode block (ino=%lu)", err, ii->vfs_inode.i_ino); return err; } mark_buffer_dirty(ibh); nilfs_mdt_mark_dirty(ifile); spin_lock(&nilfs->ns_inode_lock); if (likely(!ii->i_bh)) ii->i_bh = ibh; else brelse(ibh); goto retry; } clear_bit(NILFS_I_QUEUED, &ii->i_state); set_bit(NILFS_I_BUSY, &ii->i_state); list_move_tail(&ii->i_dirty, &sci->sc_dirty_files); } spin_unlock(&nilfs->ns_inode_lock); return 0; } static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { struct nilfs_inode_info *ii, *n; int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE); int defer_iput = false; spin_lock(&nilfs->ns_inode_lock); list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) || test_bit(NILFS_I_DIRTY, &ii->i_state)) continue; clear_bit(NILFS_I_BUSY, &ii->i_state); brelse(ii->i_bh); ii->i_bh = NULL; list_del_init(&ii->i_dirty); if (!ii->vfs_inode.i_nlink || during_mount) { /* * Defer calling iput() to avoid deadlocks if * i_nlink == 0 or mount is not yet finished. */ list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); defer_iput = true; } else { spin_unlock(&nilfs->ns_inode_lock); iput(&ii->vfs_inode); spin_lock(&nilfs->ns_inode_lock); } } spin_unlock(&nilfs->ns_inode_lock); if (defer_iput) schedule_work(&sci->sc_iput_work); } /* * Main procedure of segment constructor */ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; int err; nilfs_sc_cstage_set(sci, NILFS_ST_INIT); sci->sc_cno = nilfs->ns_cno; err = nilfs_segctor_collect_dirty_files(sci, nilfs); if (unlikely(err)) goto out; if (nilfs_test_metadata_dirty(nilfs, sci->sc_root)) set_bit(NILFS_SC_DIRTY, &sci->sc_flags); if (nilfs_segctor_clean(sci)) goto out; do { sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK; err = nilfs_segctor_begin_construction(sci, nilfs); if (unlikely(err)) goto out; /* Update time stamp */ sci->sc_seg_ctime = get_seconds(); err = nilfs_segctor_collect(sci, nilfs, mode); if (unlikely(err)) goto failed; /* Avoid empty segment */ if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE && nilfs_segbuf_empty(sci->sc_curseg)) { nilfs_segctor_abort_construction(sci, nilfs, 1); goto out; } err = nilfs_segctor_assign(sci, mode); if (unlikely(err)) goto failed; if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) nilfs_segctor_fill_in_file_bmap(sci); if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) { err = nilfs_segctor_fill_in_checkpoint(sci); if (unlikely(err)) goto failed_to_write; nilfs_segctor_fill_in_super_root(sci, nilfs); } nilfs_segctor_update_segusage(sci, nilfs->ns_sufile); /* Write partial segments */ nilfs_segctor_prepare_write(sci); nilfs_add_checksums_on_logs(&sci->sc_segbufs, nilfs->ns_crc_seed); err = nilfs_segctor_write(sci, nilfs); if (unlikely(err)) goto failed_to_write; if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE || nilfs->ns_blocksize_bits != PAGE_SHIFT) { /* * At this point, we avoid double buffering * for blocksize < pagesize because page dirty * flag is turned off during write and dirty * buffers are not properly collected for * pages crossing over segments. */ err = nilfs_segctor_wait(sci); if (err) goto failed_to_write; } } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE); out: nilfs_segctor_drop_written_files(sci, nilfs); return err; failed_to_write: if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) nilfs_redirty_inodes(&sci->sc_dirty_files); failed: if (nilfs_doing_gc()) nilfs_redirty_inodes(&sci->sc_gc_inodes); nilfs_segctor_abort_construction(sci, nilfs, err); goto out; } /** * nilfs_segctor_start_timer - set timer of background write * @sci: nilfs_sc_info * * If the timer has already been set, it ignores the new request. * This function MUST be called within a section locking the segment * semaphore. */ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci) { spin_lock(&sci->sc_state_lock); if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) { sci->sc_timer.expires = jiffies + sci->sc_interval; add_timer(&sci->sc_timer); sci->sc_state |= NILFS_SEGCTOR_COMMIT; } spin_unlock(&sci->sc_state_lock); } static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn) { spin_lock(&sci->sc_state_lock); if (!(sci->sc_flush_request & BIT(bn))) { unsigned long prev_req = sci->sc_flush_request; sci->sc_flush_request |= BIT(bn); if (!prev_req) wake_up(&sci->sc_wait_daemon); } spin_unlock(&sci->sc_state_lock); } /** * nilfs_flush_segment - trigger a segment construction for resource control * @sb: super block * @ino: inode number of the file to be flushed out. */ void nilfs_flush_segment(struct super_block *sb, ino_t ino) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; if (!sci || nilfs_doing_construction()) return; nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0); /* assign bit 0 to data files */ } struct nilfs_segctor_wait_request { wait_queue_t wq; __u32 seq; int err; atomic_t done; }; static int nilfs_segctor_sync(struct nilfs_sc_info *sci) { struct nilfs_segctor_wait_request wait_req; int err = 0; spin_lock(&sci->sc_state_lock); init_wait(&wait_req.wq); wait_req.err = 0; atomic_set(&wait_req.done, 0); wait_req.seq = ++sci->sc_seq_request; spin_unlock(&sci->sc_state_lock); init_waitqueue_entry(&wait_req.wq, current); add_wait_queue(&sci->sc_wait_request, &wait_req.wq); set_current_state(TASK_INTERRUPTIBLE); wake_up(&sci->sc_wait_daemon); for (;;) { if (atomic_read(&wait_req.done)) { err = wait_req.err; break; } if (!signal_pending(current)) { schedule(); continue; } err = -ERESTARTSYS; break; } finish_wait(&sci->sc_wait_request, &wait_req.wq); return err; } static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err) { struct nilfs_segctor_wait_request *wrq, *n; unsigned long flags; spin_lock_irqsave(&sci->sc_wait_request.lock, flags); list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list, wq.task_list) { if (!atomic_read(&wrq->done) && nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) { wrq->err = err; atomic_set(&wrq->done, 1); } if (atomic_read(&wrq->done)) { wrq->wq.func(&wrq->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL); } } spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags); } /** * nilfs_construct_segment - construct a logical segment * @sb: super block * * Return Value: On success, 0 is retured. On errors, one of the following * negative error code is returned. * * %-EROFS - Read only filesystem. * * %-EIO - I/O error * * %-ENOSPC - No space left on device (only in a panic state). * * %-ERESTARTSYS - Interrupted. * * %-ENOMEM - Insufficient memory available. */ int nilfs_construct_segment(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; struct nilfs_transaction_info *ti; int err; if (!sci) return -EROFS; /* A call inside transactions causes a deadlock. */ BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC); err = nilfs_segctor_sync(sci); return err; } /** * nilfs_construct_dsync_segment - construct a data-only logical segment * @sb: super block * @inode: inode whose data blocks should be written out * @start: start byte offset * @end: end byte offset (inclusive) * * Return Value: On success, 0 is retured. On errors, one of the following * negative error code is returned. * * %-EROFS - Read only filesystem. * * %-EIO - I/O error * * %-ENOSPC - No space left on device (only in a panic state). * * %-ERESTARTSYS - Interrupted. * * %-ENOMEM - Insufficient memory available. */ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, loff_t start, loff_t end) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; struct nilfs_inode_info *ii; struct nilfs_transaction_info ti; int err = 0; if (!sci) return -EROFS; nilfs_transaction_lock(sb, &ti, 0); ii = NILFS_I(inode); if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) || nilfs_test_opt(nilfs, STRICT_ORDER) || test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || nilfs_discontinued(nilfs)) { nilfs_transaction_unlock(sb); err = nilfs_segctor_sync(sci); return err; } spin_lock(&nilfs->ns_inode_lock); if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && !test_bit(NILFS_I_BUSY, &ii->i_state)) { spin_unlock(&nilfs->ns_inode_lock); nilfs_transaction_unlock(sb); return 0; } spin_unlock(&nilfs->ns_inode_lock); sci->sc_dsync_inode = ii; sci->sc_dsync_start = start; sci->sc_dsync_end = end; err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC); if (!err) nilfs->ns_flushed_device = 0; nilfs_transaction_unlock(sb); return err; } #define FLUSH_FILE_BIT (0x1) /* data file only */ #define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */ /** * nilfs_segctor_accept - record accepted sequence count of log-write requests * @sci: segment constructor object */ static void nilfs_segctor_accept(struct nilfs_sc_info *sci) { spin_lock(&sci->sc_state_lock); sci->sc_seq_accepted = sci->sc_seq_request; spin_unlock(&sci->sc_state_lock); del_timer_sync(&sci->sc_timer); } /** * nilfs_segctor_notify - notify the result of request to caller threads * @sci: segment constructor object * @mode: mode of log forming * @err: error code to be notified */ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err) { /* Clear requests (even when the construction failed) */ spin_lock(&sci->sc_state_lock); if (mode == SC_LSEG_SR) { sci->sc_state &= ~NILFS_SEGCTOR_COMMIT; sci->sc_seq_done = sci->sc_seq_accepted; nilfs_segctor_wakeup(sci, err); sci->sc_flush_request = 0; } else { if (mode == SC_FLUSH_FILE) sci->sc_flush_request &= ~FLUSH_FILE_BIT; else if (mode == SC_FLUSH_DAT) sci->sc_flush_request &= ~FLUSH_DAT_BIT; /* re-enable timer if checkpoint creation was not done */ if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && time_before(jiffies, sci->sc_timer.expires)) add_timer(&sci->sc_timer); } spin_unlock(&sci->sc_state_lock); } /** * nilfs_segctor_construct - form logs and write them to disk * @sci: segment constructor object * @mode: mode of log forming */ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; struct nilfs_super_block **sbp; int err = 0; nilfs_segctor_accept(sci); if (nilfs_discontinued(nilfs)) mode = SC_LSEG_SR; if (!nilfs_segctor_confirm(sci)) err = nilfs_segctor_do_construct(sci, mode); if (likely(!err)) { if (mode != SC_FLUSH_DAT) atomic_set(&nilfs->ns_ndirtyblks, 0); if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) && nilfs_discontinued(nilfs)) { down_write(&nilfs->ns_sem); err = -EIO; sbp = nilfs_prepare_super(sci->sc_super, nilfs_sb_will_flip(nilfs)); if (likely(sbp)) { nilfs_set_log_cursor(sbp[0], nilfs); err = nilfs_commit_super(sci->sc_super, NILFS_SB_COMMIT); } up_write(&nilfs->ns_sem); } } nilfs_segctor_notify(sci, mode, err); return err; } static void nilfs_construction_timeout(unsigned long data) { struct task_struct *p = (struct task_struct *)data; wake_up_process(p); } static void nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) { struct nilfs_inode_info *ii, *n; list_for_each_entry_safe(ii, n, head, i_dirty) { if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) continue; list_del_init(&ii->i_dirty); truncate_inode_pages(&ii->vfs_inode.i_data, 0); nilfs_btnode_cache_clear(&ii->i_btnode_cache); iput(&ii->vfs_inode); } } int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, void **kbufs) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; struct nilfs_transaction_info ti; int err; if (unlikely(!sci)) return -EROFS; nilfs_transaction_lock(sb, &ti, 1); err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat); if (unlikely(err)) goto out_unlock; err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs); if (unlikely(err)) { nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat); goto out_unlock; } sci->sc_freesegs = kbufs[4]; sci->sc_nfreesegs = argv[4].v_nmembs; list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes); for (;;) { err = nilfs_segctor_construct(sci, SC_LSEG_SR); nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes); if (likely(!err)) break; nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(sci->sc_interval); } if (nilfs_test_opt(nilfs, DISCARD)) { int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs, sci->sc_nfreesegs); if (ret) { nilfs_msg(sb, KERN_WARNING, "error %d on discard request, turning discards off for the device", ret); nilfs_clear_opt(nilfs, DISCARD); } } out_unlock: sci->sc_freesegs = NULL; sci->sc_nfreesegs = 0; nilfs_mdt_clear_shadow_map(nilfs->ns_dat); nilfs_transaction_unlock(sb); return err; } static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode) { struct nilfs_transaction_info ti; nilfs_transaction_lock(sci->sc_super, &ti, 0); nilfs_segctor_construct(sci, mode); /* * Unclosed segment should be retried. We do this using sc_timer. * Timeout of sc_timer will invoke complete construction which leads * to close the current logical segment. */ if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) nilfs_segctor_start_timer(sci); nilfs_transaction_unlock(sci->sc_super); } static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci) { int mode = 0; spin_lock(&sci->sc_state_lock); mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ? SC_FLUSH_DAT : SC_FLUSH_FILE; spin_unlock(&sci->sc_state_lock); if (mode) { nilfs_segctor_do_construct(sci, mode); spin_lock(&sci->sc_state_lock); sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ? ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT; spin_unlock(&sci->sc_state_lock); } clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); } static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci) { if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) { if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT)) return SC_FLUSH_FILE; else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT)) return SC_FLUSH_DAT; } return SC_LSEG_SR; } /** * nilfs_segctor_thread - main loop of the segment constructor thread. * @arg: pointer to a struct nilfs_sc_info. * * nilfs_segctor_thread() initializes a timer and serves as a daemon * to execute segment constructions. */ static int nilfs_segctor_thread(void *arg) { struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg; struct the_nilfs *nilfs = sci->sc_super->s_fs_info; int timeout = 0; sci->sc_timer.data = (unsigned long)current; sci->sc_timer.function = nilfs_construction_timeout; /* start sync. */ sci->sc_task = current; wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */ nilfs_msg(sci->sc_super, KERN_INFO, "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds", sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ); spin_lock(&sci->sc_state_lock); loop: for (;;) { int mode; if (sci->sc_state & NILFS_SEGCTOR_QUIT) goto end_thread; if (timeout || sci->sc_seq_request != sci->sc_seq_done) mode = SC_LSEG_SR; else if (sci->sc_flush_request) mode = nilfs_segctor_flush_mode(sci); else break; spin_unlock(&sci->sc_state_lock); nilfs_segctor_thread_construct(sci, mode); spin_lock(&sci->sc_state_lock); timeout = 0; } if (freezing(current)) { spin_unlock(&sci->sc_state_lock); try_to_freeze(); spin_lock(&sci->sc_state_lock); } else { DEFINE_WAIT(wait); int should_sleep = 1; prepare_to_wait(&sci->sc_wait_daemon, &wait, TASK_INTERRUPTIBLE); if (sci->sc_seq_request != sci->sc_seq_done) should_sleep = 0; else if (sci->sc_flush_request) should_sleep = 0; else if (sci->sc_state & NILFS_SEGCTOR_COMMIT) should_sleep = time_before(jiffies, sci->sc_timer.expires); if (should_sleep) { spin_unlock(&sci->sc_state_lock); schedule(); spin_lock(&sci->sc_state_lock); } finish_wait(&sci->sc_wait_daemon, &wait); timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && time_after_eq(jiffies, sci->sc_timer.expires)); if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs)) set_nilfs_discontinued(nilfs); } goto loop; end_thread: spin_unlock(&sci->sc_state_lock); /* end sync. */ sci->sc_task = NULL; wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */ return 0; } static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci) { struct task_struct *t; t = kthread_run(nilfs_segctor_thread, sci, "segctord"); if (IS_ERR(t)) { int err = PTR_ERR(t); nilfs_msg(sci->sc_super, KERN_ERR, "error %d creating segctord thread", err); return err; } wait_event(sci->sc_wait_task, sci->sc_task != NULL); return 0; } static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci) __acquires(&sci->sc_state_lock) __releases(&sci->sc_state_lock) { sci->sc_state |= NILFS_SEGCTOR_QUIT; while (sci->sc_task) { wake_up(&sci->sc_wait_daemon); spin_unlock(&sci->sc_state_lock); wait_event(sci->sc_wait_task, sci->sc_task == NULL); spin_lock(&sci->sc_state_lock); } } /* * Setup & clean-up functions */ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb, struct nilfs_root *root) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci; sci = kzalloc(sizeof(*sci), GFP_KERNEL); if (!sci) return NULL; sci->sc_super = sb; nilfs_get_root(root); sci->sc_root = root; init_waitqueue_head(&sci->sc_wait_request); init_waitqueue_head(&sci->sc_wait_daemon); init_waitqueue_head(&sci->sc_wait_task); spin_lock_init(&sci->sc_state_lock); INIT_LIST_HEAD(&sci->sc_dirty_files); INIT_LIST_HEAD(&sci->sc_segbufs); INIT_LIST_HEAD(&sci->sc_write_logs); INIT_LIST_HEAD(&sci->sc_gc_inodes); INIT_LIST_HEAD(&sci->sc_iput_queue); INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func); init_timer(&sci->sc_timer); sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ; sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK; if (nilfs->ns_interval) sci->sc_interval = HZ * nilfs->ns_interval; if (nilfs->ns_watermark) sci->sc_watermark = nilfs->ns_watermark; return sci; } static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) { int ret, retrycount = NILFS_SC_CLEANUP_RETRY; /* * The segctord thread was stopped and its timer was removed. * But some tasks remain. */ do { struct nilfs_transaction_info ti; nilfs_transaction_lock(sci->sc_super, &ti, 0); ret = nilfs_segctor_construct(sci, SC_LSEG_SR); nilfs_transaction_unlock(sci->sc_super); flush_work(&sci->sc_iput_work); } while (ret && retrycount-- > 0); } /** * nilfs_segctor_destroy - destroy the segment constructor. * @sci: nilfs_sc_info * * nilfs_segctor_destroy() kills the segctord thread and frees * the nilfs_sc_info struct. * Caller must hold the segment semaphore. */ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; int flag; up_write(&nilfs->ns_segctor_sem); spin_lock(&sci->sc_state_lock); nilfs_segctor_kill_thread(sci); flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request || sci->sc_seq_request != sci->sc_seq_done); spin_unlock(&sci->sc_state_lock); if (flush_work(&sci->sc_iput_work)) flag = true; if (flag || !nilfs_segctor_confirm(sci)) nilfs_segctor_write_out(sci); if (!list_empty(&sci->sc_dirty_files)) { nilfs_msg(sci->sc_super, KERN_WARNING, "disposed unprocessed dirty file(s) when stopping log writer"); nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); } if (!list_empty(&sci->sc_iput_queue)) { nilfs_msg(sci->sc_super, KERN_WARNING, "disposed unprocessed inode(s) in iput queue when stopping log writer"); nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1); } WARN_ON(!list_empty(&sci->sc_segbufs)); WARN_ON(!list_empty(&sci->sc_write_logs)); nilfs_put_root(sci->sc_root); down_write(&nilfs->ns_segctor_sem); del_timer_sync(&sci->sc_timer); kfree(sci); } /** * nilfs_attach_log_writer - attach log writer * @sb: super block instance * @root: root object of the current filesystem tree * * This allocates a log writer object, initializes it, and starts the * log writer. * * Return Value: On success, 0 is returned. On error, one of the following * negative error code is returned. * * %-ENOMEM - Insufficient memory available. */ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) { struct the_nilfs *nilfs = sb->s_fs_info; int err; if (nilfs->ns_writer) { /* * This happens if the filesystem was remounted * read/write after nilfs_error degenerated it into a * read-only mount. */ nilfs_detach_log_writer(sb); } nilfs->ns_writer = nilfs_segctor_new(sb, root); if (!nilfs->ns_writer) return -ENOMEM; err = nilfs_segctor_start_thread(nilfs->ns_writer); if (err) { kfree(nilfs->ns_writer); nilfs->ns_writer = NULL; } return err; } /** * nilfs_detach_log_writer - destroy log writer * @sb: super block instance * * This kills log writer daemon, frees the log writer object, and * destroys list of dirty files. */ void nilfs_detach_log_writer(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; LIST_HEAD(garbage_list); down_write(&nilfs->ns_segctor_sem); if (nilfs->ns_writer) { nilfs_segctor_destroy(nilfs->ns_writer); nilfs->ns_writer = NULL; } /* Force to free the list of dirty files */ spin_lock(&nilfs->ns_inode_lock); if (!list_empty(&nilfs->ns_dirty_files)) { list_splice_init(&nilfs->ns_dirty_files, &garbage_list); nilfs_msg(sb, KERN_WARNING, "disposed unprocessed dirty file(s) when detaching log writer"); } spin_unlock(&nilfs->ns_inode_lock); up_write(&nilfs->ns_segctor_sem); nilfs_dispose_list(nilfs, &garbage_list, 1); }
null
null
null
null
76,888
3,233
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
156,290
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * Binary text demuxer * eXtended BINary text (XBIN) demuxer * Artworx Data Format demuxer * iCEDraw File demuxer * Copyright (c) 2010 Peter Ross <[email protected]> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Binary text demuxer * eXtended BINary text (XBIN) demuxer * Artworx Data Format demuxer * iCEDraw File demuxer */ #include "libavutil/intreadwrite.h" #include "libavutil/opt.h" #include "libavutil/parseutils.h" #include "avformat.h" #include "internal.h" #include "sauce.h" #include "libavcodec/bintext.h" typedef struct { const AVClass *class; int chars_per_frame; /**< characters to send decoder per frame; set by private options as characters per second, and then converted to characters per frame at runtime */ int width, height; /**< video size (WxH pixels) (private option) */ AVRational framerate; /**< frames per second (private option) */ uint64_t fsize; /**< file size less metadata buffer */ } BinDemuxContext; static AVStream * init_stream(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVStream *st = avformat_new_stream(s, NULL); if (!st) return NULL; st->codecpar->codec_tag = 0; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; if (!bin->width) { st->codecpar->width = (80<<3); st->codecpar->height = (25<<4); } avpriv_set_pts_info(st, 60, bin->framerate.den, bin->framerate.num); /* simulate tty display speed */ bin->chars_per_frame = av_clip(av_q2d(st->time_base) * bin->chars_per_frame, 1, INT_MAX); return st; } #if CONFIG_BINTEXT_DEMUXER | CONFIG_ADF_DEMUXER | CONFIG_IDF_DEMUXER /** * Given filesize and width, calculate height (assume font_height of 16) */ static void calculate_height(AVCodecParameters *par, uint64_t fsize) { par->height = (fsize / ((par->width>>3)*2)) << 4; } #endif #if CONFIG_BINTEXT_DEMUXER static const uint8_t next_magic[]={ 0x1A, 0x1B, '[', '0', ';', '3', '0', ';', '4', '0', 'm', 'N', 'E', 'X', 'T', 0x00 }; static int next_tag_read(AVFormatContext *avctx, uint64_t *fsize) { AVIOContext *pb = avctx->pb; char buf[36]; int len; uint64_t start_pos = avio_size(pb) - 256; avio_seek(pb, start_pos, SEEK_SET); if (avio_read(pb, buf, sizeof(next_magic)) != sizeof(next_magic)) return -1; if (memcmp(buf, next_magic, sizeof(next_magic))) return -1; if (avio_r8(pb) != 0x01) return -1; *fsize -= 256; #define GET_EFI2_META(name,size) \ len = avio_r8(pb); \ if (len < 1 || len > size) \ return -1; \ if (avio_read(pb, buf, size) == size && *buf) { \ buf[len] = 0; \ av_dict_set(&avctx->metadata, name, buf, 0); \ } GET_EFI2_META("filename", 12) GET_EFI2_META("author", 20) GET_EFI2_META("publisher", 20) GET_EFI2_META("title", 35) return 0; } static void predict_width(AVCodecParameters *par, uint64_t fsize, int got_width) { /** attempt to guess width */ if (!got_width) par->width = fsize > 4000 ? (160<<3) : (80<<3); } static int bin_probe(AVProbeData *p) { const uint8_t *d = p->buf; int magic = 0, sauce = 0; int invisible = 0; int i; if (p->buf_size > 256) magic = !memcmp(d + p->buf_size - 256, next_magic, sizeof(next_magic)); if (p->buf_size > 128) sauce = !memcmp(d + p->buf_size - 128, "SAUCE00", 7); if (magic) return AVPROBE_SCORE_EXTENSION + 1; if (av_match_ext(p->filename, "bin")) { AVCodecParameters par; int got_width = 0; par.width = par.height = 0; if (sauce) return AVPROBE_SCORE_EXTENSION + 1; predict_width(&par, p->buf_size, got_width); if (par.width <= 0) return 0; calculate_height(&par, p->buf_size); if (par.height <= 0) return 0; for (i = 0; i < p->buf_size - 256; i+=2) { if ((d[i+1] & 15) == (d[i+1] >> 4) && d[i] && d[i] != 0xFF && d[i] != ' ') { invisible ++; } } if (par.width * par.height * 2 / (8*16) == p->buf_size) return AVPROBE_SCORE_MAX / 2; return 0; } if (sauce) return 1; return 0; } static int bintext_read_header(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; AVStream *st = init_stream(s); if (!st) return AVERROR(ENOMEM); st->codecpar->codec_id = AV_CODEC_ID_BINTEXT; if (ff_alloc_extradata(st->codecpar, 2)) return AVERROR(ENOMEM); st->codecpar->extradata[0] = 16; st->codecpar->extradata[1] = 0; if (pb->seekable & AVIO_SEEKABLE_NORMAL) { int got_width = 0; bin->fsize = avio_size(pb); if (ff_sauce_read(s, &bin->fsize, &got_width, 0) < 0) next_tag_read(s, &bin->fsize); if (!bin->width) { predict_width(st->codecpar, bin->fsize, got_width); calculate_height(st->codecpar, bin->fsize); } avio_seek(pb, 0, SEEK_SET); } return 0; } #endif /* CONFIG_BINTEXT_DEMUXER */ #if CONFIG_XBIN_DEMUXER static int xbin_probe(AVProbeData *p) { const uint8_t *d = p->buf; if (AV_RL32(d) == MKTAG('X','B','I','N') && d[4] == 0x1A && AV_RL16(d+5) > 0 && AV_RL16(d+5) <= 160 && d[9] > 0 && d[9] <= 32) return AVPROBE_SCORE_MAX; return 0; } static int xbin_read_header(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; char fontheight, flags; AVStream *st = init_stream(s); if (!st) return AVERROR(ENOMEM); avio_skip(pb, 5); st->codecpar->width = avio_rl16(pb)<<3; st->codecpar->height = avio_rl16(pb); fontheight = avio_r8(pb); st->codecpar->height *= fontheight; flags = avio_r8(pb); st->codecpar->extradata_size = 2; if ((flags & BINTEXT_PALETTE)) st->codecpar->extradata_size += 48; if ((flags & BINTEXT_FONT)) st->codecpar->extradata_size += fontheight * (flags & 0x10 ? 512 : 256); st->codecpar->codec_id = flags & 4 ? AV_CODEC_ID_XBIN : AV_CODEC_ID_BINTEXT; if (ff_alloc_extradata(st->codecpar, st->codecpar->extradata_size)) return AVERROR(ENOMEM); st->codecpar->extradata[0] = fontheight; st->codecpar->extradata[1] = flags; if (avio_read(pb, st->codecpar->extradata + 2, st->codecpar->extradata_size - 2) < 0) return AVERROR(EIO); if (pb->seekable & AVIO_SEEKABLE_NORMAL) { bin->fsize = avio_size(pb) - 9 - st->codecpar->extradata_size; ff_sauce_read(s, &bin->fsize, NULL, 0); avio_seek(pb, 9 + st->codecpar->extradata_size, SEEK_SET); } return 0; } #endif /* CONFIG_XBIN_DEMUXER */ #if CONFIG_ADF_DEMUXER static int adf_read_header(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; if (avio_r8(pb) != 1) return AVERROR_INVALIDDATA; st = init_stream(s); if (!st) return AVERROR(ENOMEM); st->codecpar->codec_id = AV_CODEC_ID_BINTEXT; if (ff_alloc_extradata(st->codecpar, 2 + 48 + 4096)) return AVERROR(ENOMEM); st->codecpar->extradata[0] = 16; st->codecpar->extradata[1] = BINTEXT_PALETTE|BINTEXT_FONT; if (avio_read(pb, st->codecpar->extradata + 2, 24) < 0) return AVERROR(EIO); avio_skip(pb, 144); if (avio_read(pb, st->codecpar->extradata + 2 + 24, 24) < 0) return AVERROR(EIO); if (avio_read(pb, st->codecpar->extradata + 2 + 48, 4096) < 0) return AVERROR(EIO); if (pb->seekable & AVIO_SEEKABLE_NORMAL) { int got_width = 0; bin->fsize = avio_size(pb) - 1 - 192 - 4096; st->codecpar->width = 80<<3; ff_sauce_read(s, &bin->fsize, &got_width, 0); if (!bin->width) calculate_height(st->codecpar, bin->fsize); avio_seek(pb, 1 + 192 + 4096, SEEK_SET); } return 0; } #endif /* CONFIG_ADF_DEMUXER */ #if CONFIG_IDF_DEMUXER static const uint8_t idf_magic[] = { 0x04, 0x31, 0x2e, 0x34, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x00, 0x15, 0x00 }; static int idf_probe(AVProbeData *p) { if (p->buf_size < sizeof(idf_magic)) return 0; if (!memcmp(p->buf, idf_magic, sizeof(idf_magic))) return AVPROBE_SCORE_MAX; return 0; } static int idf_read_header(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; int got_width = 0; if (!(pb->seekable & AVIO_SEEKABLE_NORMAL)) return AVERROR(EIO); st = init_stream(s); if (!st) return AVERROR(ENOMEM); st->codecpar->codec_id = AV_CODEC_ID_IDF; if (ff_alloc_extradata(st->codecpar, 2 + 48 + 4096)) return AVERROR(ENOMEM); st->codecpar->extradata[0] = 16; st->codecpar->extradata[1] = BINTEXT_PALETTE|BINTEXT_FONT; avio_seek(pb, avio_size(pb) - 4096 - 48, SEEK_SET); if (avio_read(pb, st->codecpar->extradata + 2 + 48, 4096) < 0) return AVERROR(EIO); if (avio_read(pb, st->codecpar->extradata + 2, 48) < 0) return AVERROR(EIO); bin->fsize = avio_size(pb) - 12 - 4096 - 48; ff_sauce_read(s, &bin->fsize, &got_width, 0); if (!bin->width) calculate_height(st->codecpar, bin->fsize); avio_seek(pb, 12, SEEK_SET); return 0; } #endif /* CONFIG_IDF_DEMUXER */ static int read_packet(AVFormatContext *s, AVPacket *pkt) { BinDemuxContext *bin = s->priv_data; if (bin->fsize > 0) { if (av_get_packet(s->pb, pkt, bin->fsize) < 0) return AVERROR(EIO); bin->fsize = -1; /* done */ } else if (!bin->fsize) { if (avio_feof(s->pb)) return AVERROR(EIO); if (av_get_packet(s->pb, pkt, bin->chars_per_frame) < 0) return AVERROR(EIO); } else { return AVERROR(EIO); } pkt->flags |= AV_PKT_FLAG_KEY; return 0; } #define OFFSET(x) offsetof(BinDemuxContext, x) static const AVOption options[] = { { "linespeed", "set simulated line speed (bytes per second)", OFFSET(chars_per_frame), AV_OPT_TYPE_INT, {.i64 = 6000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM}, { "video_size", "set video size, such as 640x480 or hd720.", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM }, { "framerate", "set framerate (frames per second)", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM }, { NULL }, }; #define CLASS(name) \ (const AVClass[1]){{ \ .class_name = name, \ .item_name = av_default_item_name, \ .option = options, \ .version = LIBAVUTIL_VERSION_INT, \ }} #if CONFIG_BINTEXT_DEMUXER AVInputFormat ff_bintext_demuxer = { .name = "bin", .long_name = NULL_IF_CONFIG_SMALL("Binary text"), .priv_data_size = sizeof(BinDemuxContext), .read_probe = bin_probe, .read_header = bintext_read_header, .read_packet = read_packet, .priv_class = CLASS("Binary text demuxer"), }; #endif #if CONFIG_XBIN_DEMUXER AVInputFormat ff_xbin_demuxer = { .name = "xbin", .long_name = NULL_IF_CONFIG_SMALL("eXtended BINary text (XBIN)"), .priv_data_size = sizeof(BinDemuxContext), .read_probe = xbin_probe, .read_header = xbin_read_header, .read_packet = read_packet, .priv_class = CLASS("eXtended BINary text (XBIN) demuxer"), }; #endif #if CONFIG_ADF_DEMUXER AVInputFormat ff_adf_demuxer = { .name = "adf", .long_name = NULL_IF_CONFIG_SMALL("Artworx Data Format"), .priv_data_size = sizeof(BinDemuxContext), .read_header = adf_read_header, .read_packet = read_packet, .extensions = "adf", .priv_class = CLASS("Artworx Data Format demuxer"), }; #endif #if CONFIG_IDF_DEMUXER AVInputFormat ff_idf_demuxer = { .name = "idf", .long_name = NULL_IF_CONFIG_SMALL("iCE Draw File"), .priv_data_size = sizeof(BinDemuxContext), .read_probe = idf_probe, .read_header = idf_read_header, .read_packet = read_packet, .extensions = "idf", .priv_class = CLASS("iCE Draw File demuxer"), }; #endif
null
null
null
null
72,345
51,730
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
51,730
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "media/mojo/services/mojo_jpeg_decode_accelerator_service.h" #include <stdint.h> #include <memory> #include <utility> #include "base/logging.h" #include "base/memory/ptr_util.h" #include "base/memory/shared_memory.h" #include "base/threading/thread_task_runner_handle.h" #include "base/trace_event/trace_event.h" #include "media/base/bind_to_current_loop.h" #include "mojo/public/cpp/bindings/strong_binding.h" #include "mojo/public/cpp/system/platform_handle.h" #include "ui/gfx/geometry/size.h" namespace { void DecodeFinished(std::unique_ptr<base::SharedMemory> shm) { // Do nothing. Because VideoFrame is backed by |shm|, the purpose of this // function is to just keep reference of |shm| to make sure it lives until // decode finishes. } bool VerifyDecodeParams(const gfx::Size& coded_size, mojo::ScopedSharedBufferHandle* output_handle, uint32_t output_buffer_size) { const int kJpegMaxDimension = UINT16_MAX; if (coded_size.IsEmpty() || coded_size.width() > kJpegMaxDimension || coded_size.height() > kJpegMaxDimension) { LOG(ERROR) << "invalid coded_size " << coded_size.ToString(); return false; } if (!output_handle->is_valid()) { LOG(ERROR) << "invalid output_handle"; return false; } uint32_t allocation_size = media::VideoFrame::AllocationSize(media::PIXEL_FORMAT_I420, coded_size); if (output_buffer_size < allocation_size) { DLOG(ERROR) << "output_buffer_size is too small: " << output_buffer_size << ". It needs: " << allocation_size; return false; } return true; } } // namespace namespace media { // static void MojoJpegDecodeAcceleratorService::Create( mojom::JpegDecodeAcceleratorRequest request) { auto* jpeg_decoder = new MojoJpegDecodeAcceleratorService(); mojo::MakeStrongBinding(base::WrapUnique(jpeg_decoder), std::move(request)); } MojoJpegDecodeAcceleratorService::MojoJpegDecodeAcceleratorService() : accelerator_factory_functions_( GpuJpegDecodeAcceleratorFactory::GetAcceleratorFactories()) {} MojoJpegDecodeAcceleratorService::~MojoJpegDecodeAcceleratorService() { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); } void MojoJpegDecodeAcceleratorService::VideoFrameReady( int32_t bitstream_buffer_id) { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); NotifyDecodeStatus(bitstream_buffer_id, ::media::JpegDecodeAccelerator::Error::NO_ERRORS); } void MojoJpegDecodeAcceleratorService::NotifyError( int32_t bitstream_buffer_id, ::media::JpegDecodeAccelerator::Error error) { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); NotifyDecodeStatus(bitstream_buffer_id, error); } void MojoJpegDecodeAcceleratorService::Initialize(InitializeCallback callback) { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); // When adding non-chromeos platforms, VideoCaptureGpuJpegDecoder::Initialize // needs to be updated. std::unique_ptr<::media::JpegDecodeAccelerator> accelerator; for (const auto& create_jda_function : accelerator_factory_functions_) { std::unique_ptr<::media::JpegDecodeAccelerator> tmp_accelerator = create_jda_function.Run(base::ThreadTaskRunnerHandle::Get()); if (tmp_accelerator && tmp_accelerator->Initialize(this)) { accelerator = std::move(tmp_accelerator); break; } } if (!accelerator) { DLOG(ERROR) << "JPEG accelerator initialization failed"; std::move(callback).Run(false); return; } accelerator_ = std::move(accelerator); std::move(callback).Run(true); } void MojoJpegDecodeAcceleratorService::Decode( const BitstreamBuffer& input_buffer, const gfx::Size& coded_size, mojo::ScopedSharedBufferHandle output_handle, uint32_t output_buffer_size, DecodeCallback callback) { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); TRACE_EVENT0("jpeg", "MojoJpegDecodeAcceleratorService::Decode"); DCHECK_EQ(decode_cb_map_.count(input_buffer.id()), 0u); decode_cb_map_[input_buffer.id()] = std::move(callback); if (!VerifyDecodeParams(coded_size, &output_handle, output_buffer_size)) { NotifyDecodeStatus(input_buffer.id(), ::media::JpegDecodeAccelerator::Error::INVALID_ARGUMENT); return; } base::SharedMemoryHandle memory_handle; MojoResult result = mojo::UnwrapSharedMemoryHandle( std::move(output_handle), &memory_handle, nullptr, nullptr); DCHECK_EQ(MOJO_RESULT_OK, result); std::unique_ptr<base::SharedMemory> output_shm( new base::SharedMemory(memory_handle, false)); if (!output_shm->Map(output_buffer_size)) { LOG(ERROR) << "Could not map output shared memory for input buffer id " << input_buffer.id(); NotifyDecodeStatus(input_buffer.id(), ::media::JpegDecodeAccelerator::Error::PLATFORM_FAILURE); return; } uint8_t* shm_memory = static_cast<uint8_t*>(output_shm->memory()); scoped_refptr<VideoFrame> frame = VideoFrame::WrapExternalSharedMemory( PIXEL_FORMAT_I420, // format coded_size, // coded_size gfx::Rect(coded_size), // visible_rect coded_size, // natural_size shm_memory, // data output_buffer_size, // data_size memory_handle, // handle 0, // data_offset base::TimeDelta()); // timestamp if (!frame.get()) { LOG(ERROR) << "Could not create VideoFrame for input buffer id " << input_buffer.id(); NotifyDecodeStatus(input_buffer.id(), ::media::JpegDecodeAccelerator::Error::PLATFORM_FAILURE); return; } frame->AddDestructionObserver( base::Bind(DecodeFinished, base::Passed(&output_shm))); DCHECK(accelerator_); accelerator_->Decode(input_buffer, frame); } void MojoJpegDecodeAcceleratorService::DecodeWithFD( int32_t buffer_id, mojo::ScopedHandle input_handle, uint32_t input_buffer_size, int32_t coded_size_width, int32_t coded_size_height, mojo::ScopedHandle output_handle, uint32_t output_buffer_size, DecodeWithFDCallback callback) { #if defined(OS_CHROMEOS) DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); base::PlatformFile input_fd; base::PlatformFile output_fd; MojoResult result; result = mojo::UnwrapPlatformFile(std::move(input_handle), &input_fd); if (result != MOJO_RESULT_OK) { std::move(callback).Run( buffer_id, ::media::JpegDecodeAccelerator::Error::PLATFORM_FAILURE); return; } result = mojo::UnwrapPlatformFile(std::move(output_handle), &output_fd); if (result != MOJO_RESULT_OK) { std::move(callback).Run( buffer_id, ::media::JpegDecodeAccelerator::Error::PLATFORM_FAILURE); return; } base::UnguessableToken guid = base::UnguessableToken::Create(); base::SharedMemoryHandle input_shm_handle( base::FileDescriptor(input_fd, true), 0u, guid); base::SharedMemoryHandle output_shm_handle( base::FileDescriptor(output_fd, true), 0u, guid); media::BitstreamBuffer in_buffer(buffer_id, input_shm_handle, input_buffer_size); gfx::Size coded_size(coded_size_width, coded_size_height); mojo::ScopedSharedBufferHandle output_scoped_handle = mojo::WrapSharedMemoryHandle( output_shm_handle, output_buffer_size, mojo::UnwrappedSharedMemoryHandleProtection::kReadWrite); Decode(in_buffer, coded_size, std::move(output_scoped_handle), output_buffer_size, std::move(callback)); #else NOTREACHED(); #endif } void MojoJpegDecodeAcceleratorService::Uninitialize() { // TODO(c.padhi): see http://crbug.com/699255. NOTIMPLEMENTED(); } void MojoJpegDecodeAcceleratorService::NotifyDecodeStatus( int32_t bitstream_buffer_id, ::media::JpegDecodeAccelerator::Error error) { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); auto iter = decode_cb_map_.find(bitstream_buffer_id); DCHECK(iter != decode_cb_map_.end()); DecodeCallback decode_cb = std::move(iter->second); decode_cb_map_.erase(iter); std::move(decode_cb).Run(bitstream_buffer_id, error); } } // namespace media
null
null
null
null
48,593
7,747
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
7,747
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/url_request/url_request.h" #include <stddef.h> #include <stdint.h> #include <memory> #include "base/run_loop.h" #include "base/test/fuzzed_data_provider.h" #include "net/base/request_priority.h" #include "net/socket/fuzzed_socket_factory.h" #include "net/traffic_annotation/network_traffic_annotation_test_helper.h" #include "net/url_request/url_request.h" #include "net/url_request/url_request_context.h" #include "net/url_request/url_request_test_util.h" #include "url/gurl.h" // Integration fuzzer for URLRequest's handling of HTTP requests. Can follow // redirects, both on the same server (using a new socket or the old one) and // across servers. // TODO(mmenke): Add support for testing HTTPS, auth, proxies, uploading, // cancelation, deferring reads / redirects, using preconnected sockets, SPDY, // QUIC, DNS failures (they all currently resolve to localhost), IPv6 DNS // results, URLs with IPs instead of hostnames (v4 and v6), etc. extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { base::FuzzedDataProvider data_provider(data, size); net::TestURLRequestContext url_request_context(true); net::FuzzedSocketFactory fuzzed_socket_factory(&data_provider); url_request_context.set_client_socket_factory(&fuzzed_socket_factory); url_request_context.Init(); net::TestDelegate delegate; std::unique_ptr<net::URLRequest> url_request( url_request_context.CreateRequest(GURL("http://foo/"), net::DEFAULT_PRIORITY, &delegate, TRAFFIC_ANNOTATION_FOR_TESTS)); url_request->Start(); // TestDelegate quits the message loop on completion. base::RunLoop().Run(); return 0; }
null
null
null
null
4,610
42,835
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
207,830
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/sound/soc-dai.h -- ALSA SoC Layer * * Copyright: 2005-2008 Wolfson Microelectronics. PLC. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Digital Audio Interface (DAI) API. */ #ifndef __LINUX_SND_SOC_DAI_H #define __LINUX_SND_SOC_DAI_H #include <linux/list.h> #include <sound/asoc.h> struct snd_pcm_substream; struct snd_soc_dapm_widget; struct snd_compr_stream; /* * DAI hardware audio formats. * * Describes the physical PCM data formating and clocking. Add new formats * to the end. */ #define SND_SOC_DAIFMT_I2S SND_SOC_DAI_FORMAT_I2S #define SND_SOC_DAIFMT_RIGHT_J SND_SOC_DAI_FORMAT_RIGHT_J #define SND_SOC_DAIFMT_LEFT_J SND_SOC_DAI_FORMAT_LEFT_J #define SND_SOC_DAIFMT_DSP_A SND_SOC_DAI_FORMAT_DSP_A #define SND_SOC_DAIFMT_DSP_B SND_SOC_DAI_FORMAT_DSP_B #define SND_SOC_DAIFMT_AC97 SND_SOC_DAI_FORMAT_AC97 #define SND_SOC_DAIFMT_PDM SND_SOC_DAI_FORMAT_PDM /* left and right justified also known as MSB and LSB respectively */ #define SND_SOC_DAIFMT_MSB SND_SOC_DAIFMT_LEFT_J #define SND_SOC_DAIFMT_LSB SND_SOC_DAIFMT_RIGHT_J /* * DAI Clock gating. * * DAI bit clocks can be be gated (disabled) when the DAI is not * sending or receiving PCM data in a frame. This can be used to save power. */ #define SND_SOC_DAIFMT_CONT (1 << 4) /* continuous clock */ #define SND_SOC_DAIFMT_GATED (0 << 4) /* clock is gated */ /* * DAI hardware signal polarity. * * Specifies whether the DAI can also support inverted clocks for the specified * format. * * BCLK: * - "normal" polarity means signal is available at rising edge of BCLK * - "inverted" polarity means signal is available at falling edge of BCLK * * FSYNC "normal" polarity depends on the frame format: * - I2S: frame consists of left then right channel data. Left channel starts * with falling FSYNC edge, right channel starts with rising FSYNC edge. * - Left/Right Justified: frame consists of left then right channel data. * Left channel starts with rising FSYNC edge, right channel starts with * falling FSYNC edge. * - DSP A/B: Frame starts with rising FSYNC edge. * - AC97: Frame starts with rising FSYNC edge. * * "Negative" FSYNC polarity is the one opposite of "normal" polarity. */ #define SND_SOC_DAIFMT_NB_NF (0 << 8) /* normal bit clock + frame */ #define SND_SOC_DAIFMT_NB_IF (2 << 8) /* normal BCLK + inv FRM */ #define SND_SOC_DAIFMT_IB_NF (3 << 8) /* invert BCLK + nor FRM */ #define SND_SOC_DAIFMT_IB_IF (4 << 8) /* invert BCLK + FRM */ /* * DAI hardware clock masters. * * This is wrt the codec, the inverse is true for the interface * i.e. if the codec is clk and FRM master then the interface is * clk and frame slave. */ #define SND_SOC_DAIFMT_CBM_CFM (1 << 12) /* codec clk & FRM master */ #define SND_SOC_DAIFMT_CBS_CFM (2 << 12) /* codec clk slave & FRM master */ #define SND_SOC_DAIFMT_CBM_CFS (3 << 12) /* codec clk master & frame slave */ #define SND_SOC_DAIFMT_CBS_CFS (4 << 12) /* codec clk & FRM slave */ #define SND_SOC_DAIFMT_FORMAT_MASK 0x000f #define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0 #define SND_SOC_DAIFMT_INV_MASK 0x0f00 #define SND_SOC_DAIFMT_MASTER_MASK 0xf000 /* * Master Clock Directions */ #define SND_SOC_CLOCK_IN 0 #define SND_SOC_CLOCK_OUT 1 #define SND_SOC_STD_AC97_FMTS (SNDRV_PCM_FMTBIT_S8 |\ SNDRV_PCM_FMTBIT_S16_LE |\ SNDRV_PCM_FMTBIT_S16_BE |\ SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S20_3BE |\ SNDRV_PCM_FMTBIT_S24_3LE |\ SNDRV_PCM_FMTBIT_S24_3BE |\ SNDRV_PCM_FMTBIT_S32_LE |\ SNDRV_PCM_FMTBIT_S32_BE) struct snd_soc_dai_driver; struct snd_soc_dai; struct snd_ac97_bus_ops; /* Digital Audio Interface clocking API.*/ int snd_soc_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir); int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div); int snd_soc_dai_set_pll(struct snd_soc_dai *dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out); int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio); /* Digital Audio interface formatting */ int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt); int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width); int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai, unsigned int tx_num, unsigned int *tx_slot, unsigned int rx_num, unsigned int *rx_slot); int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate); /* Digital Audio Interface mute */ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute, int direction); int snd_soc_dai_is_dummy(struct snd_soc_dai *dai); struct snd_soc_dai_ops { /* * DAI clocking configuration, all optional. * Called by soc_card drivers, normally in their hw_params. */ int (*set_sysclk)(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir); int (*set_pll)(struct snd_soc_dai *dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out); int (*set_clkdiv)(struct snd_soc_dai *dai, int div_id, int div); int (*set_bclk_ratio)(struct snd_soc_dai *dai, unsigned int ratio); /* * DAI format configuration * Called by soc_card drivers, normally in their hw_params. */ int (*set_fmt)(struct snd_soc_dai *dai, unsigned int fmt); int (*xlate_tdm_slot_mask)(unsigned int slots, unsigned int *tx_mask, unsigned int *rx_mask); int (*set_tdm_slot)(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width); int (*set_channel_map)(struct snd_soc_dai *dai, unsigned int tx_num, unsigned int *tx_slot, unsigned int rx_num, unsigned int *rx_slot); int (*set_tristate)(struct snd_soc_dai *dai, int tristate); /* * DAI digital mute - optional. * Called by soc-core to minimise any pops. */ int (*digital_mute)(struct snd_soc_dai *dai, int mute); int (*mute_stream)(struct snd_soc_dai *dai, int mute, int stream); /* * ALSA PCM audio operations - all optional. * Called by soc-core during audio PCM operations. */ int (*startup)(struct snd_pcm_substream *, struct snd_soc_dai *); void (*shutdown)(struct snd_pcm_substream *, struct snd_soc_dai *); int (*hw_params)(struct snd_pcm_substream *, struct snd_pcm_hw_params *, struct snd_soc_dai *); int (*hw_free)(struct snd_pcm_substream *, struct snd_soc_dai *); int (*prepare)(struct snd_pcm_substream *, struct snd_soc_dai *); /* * NOTE: Commands passed to the trigger function are not necessarily * compatible with the current state of the dai. For example this * sequence of commands is possible: START STOP STOP. * So do not unconditionally use refcounting functions in the trigger * function, e.g. clk_enable/disable. */ int (*trigger)(struct snd_pcm_substream *, int, struct snd_soc_dai *); int (*bespoke_trigger)(struct snd_pcm_substream *, int, struct snd_soc_dai *); /* * For hardware based FIFO caused delay reporting. * Optional. */ snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *, struct snd_soc_dai *); }; struct snd_soc_cdai_ops { /* * for compress ops */ int (*startup)(struct snd_compr_stream *, struct snd_soc_dai *); int (*shutdown)(struct snd_compr_stream *, struct snd_soc_dai *); int (*set_params)(struct snd_compr_stream *, struct snd_compr_params *, struct snd_soc_dai *); int (*get_params)(struct snd_compr_stream *, struct snd_codec *, struct snd_soc_dai *); int (*set_metadata)(struct snd_compr_stream *, struct snd_compr_metadata *, struct snd_soc_dai *); int (*get_metadata)(struct snd_compr_stream *, struct snd_compr_metadata *, struct snd_soc_dai *); int (*trigger)(struct snd_compr_stream *, int, struct snd_soc_dai *); int (*pointer)(struct snd_compr_stream *, struct snd_compr_tstamp *, struct snd_soc_dai *); int (*ack)(struct snd_compr_stream *, size_t, struct snd_soc_dai *); }; /* * Digital Audio Interface Driver. * * Describes the Digital Audio Interface in terms of its ALSA, DAI and AC97 * operations and capabilities. Codec and platform drivers will register this * structure for every DAI they have. * * This structure covers the clocking, formating and ALSA operations for each * interface. */ struct snd_soc_dai_driver { /* DAI description */ const char *name; unsigned int id; unsigned int base; struct snd_soc_dobj dobj; /* DAI driver callbacks */ int (*probe)(struct snd_soc_dai *dai); int (*remove)(struct snd_soc_dai *dai); int (*suspend)(struct snd_soc_dai *dai); int (*resume)(struct snd_soc_dai *dai); /* compress dai */ int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num); /* Optional Callback used at pcm creation*/ int (*pcm_new)(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); /* DAI is also used for the control bus */ bool bus_control; /* ops */ const struct snd_soc_dai_ops *ops; const struct snd_soc_cdai_ops *cops; /* DAI capabilities */ struct snd_soc_pcm_stream capture; struct snd_soc_pcm_stream playback; unsigned int symmetric_rates:1; unsigned int symmetric_channels:1; unsigned int symmetric_samplebits:1; /* probe ordering - for components with runtime dependencies */ int probe_order; int remove_order; }; /* * Digital Audio Interface runtime data. * * Holds runtime data for a DAI. */ struct snd_soc_dai { const char *name; int id; struct device *dev; /* driver ops */ struct snd_soc_dai_driver *driver; /* DAI runtime info */ unsigned int capture_active:1; /* stream is in use */ unsigned int playback_active:1; /* stream is in use */ unsigned int symmetric_rates:1; unsigned int symmetric_channels:1; unsigned int symmetric_samplebits:1; unsigned int probed:1; unsigned int active; struct snd_soc_dapm_widget *playback_widget; struct snd_soc_dapm_widget *capture_widget; /* DAI DMA data */ void *playback_dma_data; void *capture_dma_data; /* Symmetry data - only valid if symmetry is being enforced */ unsigned int rate; unsigned int channels; unsigned int sample_bits; /* parent platform/codec */ struct snd_soc_codec *codec; struct snd_soc_component *component; /* CODEC TDM slot masks and params (for fixup) */ unsigned int tx_mask; unsigned int rx_mask; struct list_head list; }; static inline void *snd_soc_dai_get_dma_data(const struct snd_soc_dai *dai, const struct snd_pcm_substream *ss) { return (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) ? dai->playback_dma_data : dai->capture_dma_data; } static inline void snd_soc_dai_set_dma_data(struct snd_soc_dai *dai, const struct snd_pcm_substream *ss, void *data) { if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) dai->playback_dma_data = data; else dai->capture_dma_data = data; } static inline void snd_soc_dai_init_dma_data(struct snd_soc_dai *dai, void *playback, void *capture) { dai->playback_dma_data = playback; dai->capture_dma_data = capture; } static inline void snd_soc_dai_set_drvdata(struct snd_soc_dai *dai, void *data) { dev_set_drvdata(dai->dev, data); } static inline void *snd_soc_dai_get_drvdata(struct snd_soc_dai *dai) { return dev_get_drvdata(dai->dev); } #endif
null
null
null
null
116,177
28,952
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
28,952
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef GOOGLE_CACHEINVALIDATION_DEPS_CALLBACK_H_ #define GOOGLE_CACHEINVALIDATION_DEPS_CALLBACK_H_ #include <memory> #include "base/bind.h" #include "base/bind_helpers.h" #include "base/callback.h" #define INVALIDATION_CALLBACK1_TYPE(Arg1) ::base::Callback<void(Arg1)> // Below are a collection of types and functions that adapt base::Callback's // pass-by-value semantics to the pointer-based callback system that // cacheinvalidation needs. namespace invalidation { typedef ::base::Closure Closure; template <class T> bool IsCallbackRepeatable(const T* callback) { // The default cacheinvalidation Callbacks may be self-deleting. We don't // support this behave, so we already return true to indicate that the // cacheinvalidation implementation should delete our Callbacks. return true; } namespace internal { // Identity<T>::type is a typedef of T. Useful for preventing the // compiler from inferring the type of an argument in templates. template <typename T> struct Identity { typedef T type; }; } // namespace internal // The cacheinvalidation callback system expects to take the callback by // pointer and handle the ownership semantics itself. Adapting the // Chromium Callback system requires returning a dynamically allocated // copy of the result of Bind(). inline Closure* NewPermanentCallback(void (*fn)()) { return new ::base::Closure(::base::Bind(fn)); } template <class T1, class T2> Closure* NewPermanentCallback( T1* object, void (T2::*method)()) { return new ::base::Closure(::base::Bind(method, base::Unretained(object))); } template <class T1, class T2, typename Arg1> ::base::Callback<void(Arg1)>* NewPermanentCallback( T1* object, void (T2::*method)(Arg1)) { return new ::base::Callback<void(Arg1)>( ::base::Bind(method, base::Unretained(object))); } template <class T1, class T2, typename Arg1> Closure* NewPermanentCallback( T1* object, void (T2::*method)(Arg1), typename internal::Identity<Arg1>::type arg1) { return new ::base::Closure(::base::Bind(method, base::Unretained(object), arg1)); } template <typename Arg1, typename Arg2> Closure* NewPermanentCallback( void (*fn)(Arg1, Arg2), typename internal::Identity<Arg1>::type arg1, typename internal::Identity<Arg2>::type arg2) { return new ::base::Closure(::base::Bind(fn, arg1, arg2)); } template <class T1, class T2, typename Arg1, typename Arg2> Closure* NewPermanentCallback( T1* object, void (T2::*method)(Arg1, Arg2), typename internal::Identity<Arg1>::type arg1, typename internal::Identity<Arg2>::type arg2) { return new ::base::Closure(::base::Bind(method, base::Unretained(object), arg1, arg2)); } template <class T1, class T2, typename Arg1, typename Arg2> ::base::Callback<void(Arg2)>* NewPermanentCallback( T1* object, void (T2::*method)(Arg1, Arg2), typename internal::Identity<Arg1>::type arg1) { return new ::base::Callback<void(Arg2)>( ::base::Bind(method, base::Unretained(object), arg1)); } template <class T1, class T2, typename Arg1, typename Arg2, typename Arg3> Closure* NewPermanentCallback( T1* object, void (T2::*method)(Arg1, Arg2, Arg3), typename internal::Identity<Arg1>::type arg1, typename internal::Identity<Arg2>::type arg2, typename internal::Identity<Arg3>::type arg3) { return new ::base::Closure(::base::Bind(method, base::Unretained(object), arg1, arg2, arg3)); } template <class T1, class T2, typename Arg1, typename Arg2, typename Arg3, typename Arg4> Closure* NewPermanentCallback( T1* object, void (T2::*method)(Arg1, Arg2, Arg3, Arg4), typename internal::Identity<Arg1>::type arg1, typename internal::Identity<Arg2>::type arg2, typename internal::Identity<Arg3>::type arg3, typename internal::Identity<Arg4>::type arg4) { return new ::base::Closure(::base::Bind(method, base::Unretained(object), arg1, arg2, arg3, arg4)); } // Creates a Closure that runs |callback| on |arg|. The returned Closure owns // |callback|. template <typename ArgType> Closure* NewPermanentCallback( INVALIDATION_CALLBACK1_TYPE(ArgType)* callback, typename internal::Identity<ArgType>::type arg) { std::unique_ptr<::base::Callback<void(ArgType)>> deleter(callback); return new ::base::Closure(::base::Bind(*callback, arg)); } } // namespace invalidation #endif // GOOGLE_CACHEINVALIDATION_DEPS_CALLBACK_H_
null
null
null
null
25,815
38,193
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
38,193
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/public/platform/web_surface_layer_bridge.h" #include <memory> #include "third_party/blink/renderer/platform/graphics/surface_layer_bridge.h" namespace blink { std::unique_ptr<WebSurfaceLayerBridge> WebSurfaceLayerBridge::Create( WebLayerTreeView* layer_tree_view, WebSurfaceLayerBridgeObserver* observer) { return std::make_unique<SurfaceLayerBridge>(layer_tree_view, observer); } WebSurfaceLayerBridge::~WebSurfaceLayerBridge() = default; } // namespace blink
null
null
null
null
35,056
904
1,2,3
train_val
be0726d33cb8f411945884664924bed3cb8c70ee
165,899
linux
1
https://github.com/torvalds/linux
2016-02-22 11:56:38-05:00
ext2_xattr_put_super(struct super_block *sb) { mb_cache_shrink(sb->s_bdev); }
CVE-2015-8952
CWE-19
https://github.com/torvalds/linux/commit/be0726d33cb8f411945884664924bed3cb8c70ee
Low
3,793
603
null
train_val
a6802e21d824e786d1e2a8440cf749a6e1a8d95f
160,731
ImageMagick
0
https://github.com/ImageMagick/ImageMagick
2017-07-18 18:28:29-04:00
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF DDDD % % F D D % % FFF D D % % F D D % % F DDDD % % % % % % Retrieve An Image Via a File Descriptor. % % % % Software Design % % Cristy % % Bill Radcliffe % % March 2000 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d F D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadFDImage retrieves an image via a file descriptor, decodes the image, % and returns it. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the ReadFDImage method is: % % Image *ReadFDImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadFDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); read_info=CloneImageInfo(image_info); read_info->file=fdopen(StringToLong(image_info->filename),"rb"); if ((read_info->file == (FILE *) NULL) || (IsGeometry(image_info->filename) == MagickFalse)) { read_info=DestroyImageInfo(read_info); ThrowFileException(exception,BlobError,"UnableToOpenBlob", image_info->filename); return((Image *) NULL); } *read_info->magick='\0'; image=ReadImage(read_info,exception); (void) fclose(read_info->file); read_info=DestroyImageInfo(read_info); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "NoDataReturned","`%s'",image_info->filename); return((Image *) NULL); } return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r F D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterFDImage() adds attributes for the FD image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterFDImage method is: % % size_t RegisterFDImage(void) % */ ModuleExport size_t RegisterFDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("FD","FD","Read image from a file descriptor"); entry->decoder=(DecodeImageHandler *) ReadFDImage; entry->flags|=CoderStealthFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r F D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterFDImage() removes format registrations made by the FD module from % the list of supported formats. % % The format of the UnregisterFDImage method is: % % UnregisterFDImage(void) % */ ModuleExport void UnregisterFDImage(void) { (void) UnregisterMagickInfo("FD"); }
null
null
null
null
73,024
41,094
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
206,089
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _LINUX_COMPACTION_H #define _LINUX_COMPACTION_H /* * Determines how hard direct compaction should try to succeed. * Lower value means higher priority, analogically to reclaim priority. */ enum compact_priority { COMPACT_PRIO_SYNC_FULL, MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL, COMPACT_PRIO_SYNC_LIGHT, MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, COMPACT_PRIO_ASYNC, INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC }; /* Return values for compact_zone() and try_to_compact_pages() */ /* When adding new states, please adjust include/trace/events/compaction.h */ enum compact_result { /* For more detailed tracepoint output - internal to compaction */ COMPACT_NOT_SUITABLE_ZONE, /* * compaction didn't start as it was not possible or direct reclaim * was more suitable */ COMPACT_SKIPPED, /* compaction didn't start as it was deferred due to past failures */ COMPACT_DEFERRED, /* compaction not active last round */ COMPACT_INACTIVE = COMPACT_DEFERRED, /* For more detailed tracepoint output - internal to compaction */ COMPACT_NO_SUITABLE_PAGE, /* compaction should continue to another pageblock */ COMPACT_CONTINUE, /* * The full zone was compacted scanned but wasn't successfull to compact * suitable pages. */ COMPACT_COMPLETE, /* * direct compaction has scanned part of the zone but wasn't successfull * to compact suitable pages. */ COMPACT_PARTIAL_SKIPPED, /* compaction terminated prematurely due to lock contentions */ COMPACT_CONTENDED, /* * direct compaction terminated after concluding that the allocation * should now succeed */ COMPACT_SUCCESS, }; struct alloc_context; /* in mm/internal.h */ /* * Number of free order-0 pages that should be available above given watermark * to make sure compaction has reasonable chance of not running out of free * pages that it needs to isolate as migration target during its work. */ static inline unsigned long compact_gap(unsigned int order) { /* * Although all the isolations for migration are temporary, compaction * free scanner may have up to 1 << order pages on its list and then * try to split an (order - 1) free page. At that point, a gap of * 1 << order might not be enough, so it's safer to require twice that * amount. Note that the number of pages on the list is also * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum * that the migrate scanner can have isolated on migrate list, and free * scanner is only invoked when the number of isolated free pages is * lower than that. But it's not worth to complicate the formula here * as a bigger gap for higher orders than strictly necessary can also * improve chances of compaction success. */ return 2UL << order; } #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_extfrag_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); extern int sysctl_compact_unevictable_allowed; extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio); extern void reset_isolation_suitable(pg_data_t *pgdat); extern enum compact_result compaction_suitable(struct zone *zone, int order, unsigned int alloc_flags, int classzone_idx); extern void defer_compaction(struct zone *zone, int order); extern bool compaction_deferred(struct zone *zone, int order); extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); extern bool compaction_restarting(struct zone *zone, int order); /* Compaction has made some progress and retrying makes sense */ static inline bool compaction_made_progress(enum compact_result result) { /* * Even though this might sound confusing this in fact tells us * that the compaction successfully isolated and migrated some * pageblocks. */ if (result == COMPACT_SUCCESS) return true; return false; } /* Compaction has failed and it doesn't make much sense to keep retrying. */ static inline bool compaction_failed(enum compact_result result) { /* All zones were scanned completely and still not result. */ if (result == COMPACT_COMPLETE) return true; return false; } /* * Compaction has backed off for some reason. It might be throttling or * lock contention. Retrying is still worthwhile. */ static inline bool compaction_withdrawn(enum compact_result result) { /* * Compaction backed off due to watermark checks for order-0 * so the regular reclaim has to try harder and reclaim something. */ if (result == COMPACT_SKIPPED) return true; /* * If compaction is deferred for high-order allocations, it is * because sync compaction recently failed. If this is the case * and the caller requested a THP allocation, we do not want * to heavily disrupt the system, so we fail the allocation * instead of entering direct reclaim. */ if (result == COMPACT_DEFERRED) return true; /* * If compaction in async mode encounters contention or blocks higher * priority task we back off early rather than cause stalls. */ if (result == COMPACT_CONTENDED) return true; /* * Page scanners have met but we haven't scanned full zones so this * is a back off in fact. */ if (result == COMPACT_PARTIAL_SKIPPED) return true; return false; } bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags); extern int kcompactd_run(int nid); extern void kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); #else static inline void reset_isolation_suitable(pg_data_t *pgdat) { } static inline enum compact_result compaction_suitable(struct zone *zone, int order, int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; } static inline void defer_compaction(struct zone *zone, int order) { } static inline bool compaction_deferred(struct zone *zone, int order) { return true; } static inline bool compaction_made_progress(enum compact_result result) { return false; } static inline bool compaction_failed(enum compact_result result) { return false; } static inline bool compaction_withdrawn(enum compact_result result) { return true; } static inline int kcompactd_run(int nid) { return 0; } static inline void kcompactd_stop(int nid) { } static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) { } #endif /* CONFIG_COMPACTION */ #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) struct node; extern int compaction_register_node(struct node *node); extern void compaction_unregister_node(struct node *node); #else static inline int compaction_register_node(struct node *node) { return 0; } static inline void compaction_unregister_node(struct node *node) { } #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ #endif /* _LINUX_COMPACTION_H */
null
null
null
null
114,436
30,222
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
195,217
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Driver for the Atmel USBA high speed USB device controller * * Copyright (C) 2005-2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __LINUX_USB_GADGET_USBA_UDC_H__ #define __LINUX_USB_GADGET_USBA_UDC_H__ /* USB register offsets */ #define USBA_CTRL 0x0000 #define USBA_FNUM 0x0004 #define USBA_INT_ENB 0x0010 #define USBA_INT_STA 0x0014 #define USBA_INT_CLR 0x0018 #define USBA_EPT_RST 0x001c #define USBA_TST 0x00e0 /* USB endpoint register offsets */ #define USBA_EPT_CFG 0x0000 #define USBA_EPT_CTL_ENB 0x0004 #define USBA_EPT_CTL_DIS 0x0008 #define USBA_EPT_CTL 0x000c #define USBA_EPT_SET_STA 0x0014 #define USBA_EPT_CLR_STA 0x0018 #define USBA_EPT_STA 0x001c /* USB DMA register offsets */ #define USBA_DMA_NXT_DSC 0x0000 #define USBA_DMA_ADDRESS 0x0004 #define USBA_DMA_CONTROL 0x0008 #define USBA_DMA_STATUS 0x000c /* Bitfields in CTRL */ #define USBA_DEV_ADDR_OFFSET 0 #define USBA_DEV_ADDR_SIZE 7 #define USBA_FADDR_EN (1 << 7) #define USBA_EN_USBA (1 << 8) #define USBA_DETACH (1 << 9) #define USBA_REMOTE_WAKE_UP (1 << 10) #define USBA_PULLD_DIS (1 << 11) #if defined(CONFIG_AVR32) #define USBA_ENABLE_MASK USBA_EN_USBA #define USBA_DISABLE_MASK 0 #elif defined(CONFIG_ARCH_AT91) #define USBA_ENABLE_MASK (USBA_EN_USBA | USBA_PULLD_DIS) #define USBA_DISABLE_MASK USBA_DETACH #endif /* CONFIG_ARCH_AT91 */ /* Bitfields in FNUM */ #define USBA_MICRO_FRAME_NUM_OFFSET 0 #define USBA_MICRO_FRAME_NUM_SIZE 3 #define USBA_FRAME_NUMBER_OFFSET 3 #define USBA_FRAME_NUMBER_SIZE 11 #define USBA_FRAME_NUM_ERROR (1 << 31) /* Bitfields in INT_ENB/INT_STA/INT_CLR */ #define USBA_HIGH_SPEED (1 << 0) #define USBA_DET_SUSPEND (1 << 1) #define USBA_MICRO_SOF (1 << 2) #define USBA_SOF (1 << 3) #define USBA_END_OF_RESET (1 << 4) #define USBA_WAKE_UP (1 << 5) #define USBA_END_OF_RESUME (1 << 6) #define USBA_UPSTREAM_RESUME (1 << 7) #define USBA_EPT_INT_OFFSET 8 #define USBA_EPT_INT_SIZE 16 #define USBA_DMA_INT_OFFSET 24 #define USBA_DMA_INT_SIZE 8 /* Bitfields in EPT_RST */ #define USBA_RST_OFFSET 0 #define USBA_RST_SIZE 16 /* Bitfields in USBA_TST */ #define USBA_SPEED_CFG_OFFSET 0 #define USBA_SPEED_CFG_SIZE 2 #define USBA_TST_J_MODE (1 << 2) #define USBA_TST_K_MODE (1 << 3) #define USBA_TST_PKT_MODE (1 << 4) #define USBA_OPMODE2 (1 << 5) /* Bitfields in EPT_CFG */ #define USBA_EPT_SIZE_OFFSET 0 #define USBA_EPT_SIZE_SIZE 3 #define USBA_EPT_DIR_IN (1 << 3) #define USBA_EPT_TYPE_OFFSET 4 #define USBA_EPT_TYPE_SIZE 2 #define USBA_BK_NUMBER_OFFSET 6 #define USBA_BK_NUMBER_SIZE 2 #define USBA_NB_TRANS_OFFSET 8 #define USBA_NB_TRANS_SIZE 2 #define USBA_EPT_MAPPED (1 << 31) /* Bitfields in EPT_CTL/EPT_CTL_ENB/EPT_CTL_DIS */ #define USBA_EPT_ENABLE (1 << 0) #define USBA_AUTO_VALID (1 << 1) #define USBA_INTDIS_DMA (1 << 3) #define USBA_NYET_DIS (1 << 4) #define USBA_DATAX_RX (1 << 6) #define USBA_MDATA_RX (1 << 7) /* Bits 8-15 and 31 enable interrupts for respective bits in EPT_STA */ #define USBA_BUSY_BANK_IE (1 << 18) /* Bitfields in EPT_SET_STA/EPT_CLR_STA/EPT_STA */ #define USBA_FORCE_STALL (1 << 5) #define USBA_TOGGLE_CLR (1 << 6) #define USBA_TOGGLE_SEQ_OFFSET 6 #define USBA_TOGGLE_SEQ_SIZE 2 #define USBA_ERR_OVFLW (1 << 8) #define USBA_RX_BK_RDY (1 << 9) #define USBA_KILL_BANK (1 << 9) #define USBA_TX_COMPLETE (1 << 10) #define USBA_TX_PK_RDY (1 << 11) #define USBA_ISO_ERR_TRANS (1 << 11) #define USBA_RX_SETUP (1 << 12) #define USBA_ISO_ERR_FLOW (1 << 12) #define USBA_STALL_SENT (1 << 13) #define USBA_ISO_ERR_CRC (1 << 13) #define USBA_ISO_ERR_NBTRANS (1 << 13) #define USBA_NAK_IN (1 << 14) #define USBA_ISO_ERR_FLUSH (1 << 14) #define USBA_NAK_OUT (1 << 15) #define USBA_CURRENT_BANK_OFFSET 16 #define USBA_CURRENT_BANK_SIZE 2 #define USBA_BUSY_BANKS_OFFSET 18 #define USBA_BUSY_BANKS_SIZE 2 #define USBA_BYTE_COUNT_OFFSET 20 #define USBA_BYTE_COUNT_SIZE 11 #define USBA_SHORT_PACKET (1 << 31) /* Bitfields in DMA_CONTROL */ #define USBA_DMA_CH_EN (1 << 0) #define USBA_DMA_LINK (1 << 1) #define USBA_DMA_END_TR_EN (1 << 2) #define USBA_DMA_END_BUF_EN (1 << 3) #define USBA_DMA_END_TR_IE (1 << 4) #define USBA_DMA_END_BUF_IE (1 << 5) #define USBA_DMA_DESC_LOAD_IE (1 << 6) #define USBA_DMA_BURST_LOCK (1 << 7) #define USBA_DMA_BUF_LEN_OFFSET 16 #define USBA_DMA_BUF_LEN_SIZE 16 /* Bitfields in DMA_STATUS */ #define USBA_DMA_CH_ACTIVE (1 << 1) #define USBA_DMA_END_TR_ST (1 << 4) #define USBA_DMA_END_BUF_ST (1 << 5) #define USBA_DMA_DESC_LOAD_ST (1 << 6) /* Constants for SPEED_CFG */ #define USBA_SPEED_CFG_NORMAL 0 #define USBA_SPEED_CFG_FORCE_HIGH 2 #define USBA_SPEED_CFG_FORCE_FULL 3 /* Constants for EPT_SIZE */ #define USBA_EPT_SIZE_8 0 #define USBA_EPT_SIZE_16 1 #define USBA_EPT_SIZE_32 2 #define USBA_EPT_SIZE_64 3 #define USBA_EPT_SIZE_128 4 #define USBA_EPT_SIZE_256 5 #define USBA_EPT_SIZE_512 6 #define USBA_EPT_SIZE_1024 7 /* Constants for EPT_TYPE */ #define USBA_EPT_TYPE_CONTROL 0 #define USBA_EPT_TYPE_ISO 1 #define USBA_EPT_TYPE_BULK 2 #define USBA_EPT_TYPE_INT 3 /* Constants for BK_NUMBER */ #define USBA_BK_NUMBER_ZERO 0 #define USBA_BK_NUMBER_ONE 1 #define USBA_BK_NUMBER_DOUBLE 2 #define USBA_BK_NUMBER_TRIPLE 3 /* Bit manipulation macros */ #define USBA_BF(name, value) \ (((value) & ((1 << USBA_##name##_SIZE) - 1)) \ << USBA_##name##_OFFSET) #define USBA_BFEXT(name, value) \ (((value) >> USBA_##name##_OFFSET) \ & ((1 << USBA_##name##_SIZE) - 1)) #define USBA_BFINS(name, value, old) \ (((old) & ~(((1 << USBA_##name##_SIZE) - 1) \ << USBA_##name##_OFFSET)) \ | USBA_BF(name, value)) /* Register access macros */ #ifdef CONFIG_AVR32 #define usba_io_readl __raw_readl #define usba_io_writel __raw_writel #define usba_io_writew __raw_writew #else #define usba_io_readl readl_relaxed #define usba_io_writel writel_relaxed #define usba_io_writew writew_relaxed #endif #define usba_readl(udc, reg) \ usba_io_readl((udc)->regs + USBA_##reg) #define usba_writel(udc, reg, value) \ usba_io_writel((value), (udc)->regs + USBA_##reg) #define usba_ep_readl(ep, reg) \ usba_io_readl((ep)->ep_regs + USBA_EPT_##reg) #define usba_ep_writel(ep, reg, value) \ usba_io_writel((value), (ep)->ep_regs + USBA_EPT_##reg) #define usba_dma_readl(ep, reg) \ usba_io_readl((ep)->dma_regs + USBA_DMA_##reg) #define usba_dma_writel(ep, reg, value) \ usba_io_writel((value), (ep)->dma_regs + USBA_DMA_##reg) /* Calculate base address for a given endpoint or DMA controller */ #define USBA_EPT_BASE(x) (0x100 + (x) * 0x20) #define USBA_DMA_BASE(x) (0x300 + (x) * 0x10) #define USBA_FIFO_BASE(x) ((x) << 16) /* Synth parameters */ #define USBA_NR_DMAS 7 #define EP0_FIFO_SIZE 64 #define EP0_EPT_SIZE USBA_EPT_SIZE_64 #define EP0_NR_BANKS 1 #define FIFO_IOMEM_ID 0 #define CTRL_IOMEM_ID 1 #define DBG_ERR 0x0001 /* report all error returns */ #define DBG_HW 0x0002 /* debug hardware initialization */ #define DBG_GADGET 0x0004 /* calls to/from gadget driver */ #define DBG_INT 0x0008 /* interrupts */ #define DBG_BUS 0x0010 /* report changes in bus state */ #define DBG_QUEUE 0x0020 /* debug request queue processing */ #define DBG_FIFO 0x0040 /* debug FIFO contents */ #define DBG_DMA 0x0080 /* debug DMA handling */ #define DBG_REQ 0x0100 /* print out queued request length */ #define DBG_ALL 0xffff #define DBG_NONE 0x0000 #define DEBUG_LEVEL (DBG_ERR) #define DBG(level, fmt, ...) \ do { \ if ((level) & DEBUG_LEVEL) \ pr_debug("udc: " fmt, ## __VA_ARGS__); \ } while (0) enum usba_ctrl_state { WAIT_FOR_SETUP, DATA_STAGE_IN, DATA_STAGE_OUT, STATUS_STAGE_IN, STATUS_STAGE_OUT, STATUS_STAGE_ADDR, STATUS_STAGE_TEST, }; /* EP_STATE_IDLE, EP_STATE_SETUP, EP_STATE_IN_DATA, EP_STATE_OUT_DATA, EP_STATE_SET_ADDR_STATUS, EP_STATE_RX_STATUS, EP_STATE_TX_STATUS, EP_STATE_HALT, */ struct usba_dma_desc { dma_addr_t next; dma_addr_t addr; u32 ctrl; }; struct usba_fifo_cfg { u8 hw_ep_num; u16 fifo_size; u8 nr_banks; }; struct usba_ep { int state; void __iomem *ep_regs; void __iomem *dma_regs; void __iomem *fifo; char name[8]; struct usb_ep ep; struct usba_udc *udc; struct list_head queue; u16 fifo_size; u8 nr_banks; u8 index; unsigned int can_dma:1; unsigned int can_isoc:1; unsigned int is_isoc:1; unsigned int is_in:1; unsigned long ept_cfg; #ifdef CONFIG_USB_GADGET_DEBUG_FS u32 last_dma_status; struct dentry *debugfs_dir; struct dentry *debugfs_queue; struct dentry *debugfs_dma_status; struct dentry *debugfs_state; #endif }; struct usba_request { struct usb_request req; struct list_head queue; u32 ctrl; unsigned int submitted:1; unsigned int last_transaction:1; unsigned int using_dma:1; unsigned int mapped:1; }; struct usba_udc_errata { void (*toggle_bias)(struct usba_udc *udc, int is_on); void (*pulse_bias)(struct usba_udc *udc); }; struct usba_udc { /* Protect hw registers from concurrent modifications */ spinlock_t lock; /* Mutex to prevent concurrent start or stop */ struct mutex vbus_mutex; void __iomem *regs; void __iomem *fifo; struct usb_gadget gadget; struct usb_gadget_driver *driver; struct platform_device *pdev; const struct usba_udc_errata *errata; int irq; int vbus_pin; int vbus_pin_inverted; int num_ep; int configured_ep; struct usba_fifo_cfg *fifo_cfg; struct clk *pclk; struct clk *hclk; struct usba_ep *usba_ep; bool bias_pulse_needed; bool clocked; u16 devstatus; u16 test_mode; int vbus_prev; u32 int_enb_cache; #ifdef CONFIG_USB_GADGET_DEBUG_FS struct dentry *debugfs_root; struct dentry *debugfs_regs; #endif struct regmap *pmc; }; static inline struct usba_ep *to_usba_ep(struct usb_ep *ep) { return container_of(ep, struct usba_ep, ep); } static inline struct usba_request *to_usba_req(struct usb_request *req) { return container_of(req, struct usba_request, req); } static inline struct usba_udc *to_usba_udc(struct usb_gadget *gadget) { return container_of(gadget, struct usba_udc, gadget); } #define ep_is_control(ep) ((ep)->index == 0) #define ep_is_idle(ep) ((ep)->state == EP_STATE_IDLE) #endif /* __LINUX_USB_GADGET_USBA_UDC_H */
null
null
null
null
103,564
538
null
train_val
a6802e21d824e786d1e2a8440cf749a6e1a8d95f
160,666
ImageMagick
0
https://github.com/ImageMagick/ImageMagick
2017-07-18 18:28:29-04:00
// This may look like C code, but it is really -*- C++ -*- // // Copyright Bob Friesenhahn, 1999, 2000, 2001, 2002, 2004 // Copyright Dirk Lemstra 2014-2015 // // Implementation of Blob // #define MAGICKCORE_IMPLEMENTATION 1 #define MAGICK_PLUSPLUS_IMPLEMENTATION 1 #include "Magick++/Include.h" #include "Magick++/BlobRef.h" #include "Magick++/Exception.h" #include "Magick++/Thread.h" #include <string.h> Magick::BlobRef::BlobRef(const void* data_,const size_t length_) : allocator(Magick::Blob::NewAllocator), length(length_), data((void*) NULL), _mutexLock(), _refCount(1) { if (data_ != (const void*) NULL) { data=new unsigned char[length_]; memcpy(data,data_,length_); } } Magick::BlobRef::~BlobRef(void) { if (allocator == Magick::Blob::NewAllocator) { delete[] static_cast<unsigned char*>(data); data=(void *) NULL; } else if (allocator == Magick::Blob::MallocAllocator) data=(void *) RelinquishMagickMemory(data); } size_t Magick::BlobRef::decrease() { size_t count; _mutexLock.lock(); if (_refCount == 0) { _mutexLock.unlock(); throwExceptionExplicit(MagickCore::OptionError, "Invalid call to decrease"); return(0); } count=--_refCount; _mutexLock.unlock(); return(count); } void Magick::BlobRef::increase() { _mutexLock.lock(); _refCount++; _mutexLock.unlock(); }
null
null
null
null
72,959
457
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
153,514
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * Work around the class() function in AIX math.h clashing with * identifiers named "class". * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef COMPAT_AIX_MATH_H #define COMPAT_AIX_MATH_H #define class class_in_math_h_causes_problems #include_next <math.h> #undef class #endif /* COMPAT_AIX_MATH_H */
null
null
null
null
69,569
11,982
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
11,982
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_RAPPOR_RAPPOR_RECORDER_IMPL_H_ #define COMPONENTS_RAPPOR_RAPPOR_RECORDER_IMPL_H_ #include "base/threading/thread_checker.h" #include "components/rappor/public/interfaces/rappor_recorder.mojom.h" class GURL; namespace rappor { class RapporServiceImpl; // Records aggregate, privacy-preserving samples from the renderers. // See https://www.chromium.org/developers/design-documents/rappor class RapporRecorderImpl : public mojom::RapporRecorder { public: explicit RapporRecorderImpl(RapporServiceImpl* rappor_service); ~RapporRecorderImpl() override; static void Create(RapporServiceImpl* rappor_service, mojom::RapporRecorderRequest request); private: // rappor::mojom::RapporRecorder: void RecordRappor(const std::string& metric, const std::string& sample) override; void RecordRapporURL(const std::string& metric, const GURL& sample) override; RapporServiceImpl* rappor_service_; base::ThreadChecker thread_checker_; DISALLOW_COPY_AND_ASSIGN(RapporRecorderImpl); }; } // namespace rappor #endif // COMPONENTS_RAPPOR_RAPPOR_RECORDER_IMPL_H_
null
null
null
null
8,845
3,688
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
3,688
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_WEB_WEB_STATE_CONTEXT_MENU_CONSTANTS_H_ #define IOS_WEB_WEB_STATE_CONTEXT_MENU_CONSTANTS_H_ #import <Foundation/Foundation.h> // Contains keys present in dictionary returned by __gCrWeb.getElementFromPoint // and __gCrWeb.findElementAtPoint JS APIs. namespace web { // Required in findElementAtPoint response. (Not used by getElementFromPoint.) // Represents a unique string request ID that is passed through directly from a // call to findElementAtPoint to the response dictionary. The request ID should // be used to correlate a response with a previous call to findElementAtPoint. extern NSString* const kContextMenuElementRequestId; // Optional key. Represents element's href attribute if present or parent's href // if element is an image. extern NSString* const kContextMenuElementHyperlink; // Optional key. Represents element's src attribute if present (<img> element // only). extern NSString* const kContextMenuElementSource; // Optional key. Represents element's title attribute if present (<img> element // only). extern NSString* const kContextMenuElementTitle; // Optional key. Represents referrer policy to use for navigations away from the // current page. Key is present if |kContextMenuElementError| is |NO_ERROR|. extern NSString* const kContextMenuElementReferrerPolicy; // Optional key. Represents element's innerText attribute if present (<a> // elements with href only). extern NSString* const kContextMenuElementInnerText; } // namespace web #endif // IOS_WEB_WEB_STATE_CONTEXT_MENU_CONSTANTS_H_
null
null
null
null
551
24,851
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
189,846
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs <[email protected]> * Roy Spliet <[email protected]> */ #include "ram.h" struct ramxlat { int id; u8 enc; }; static inline int ramxlat(const struct ramxlat *xlat, int id) { while (xlat->id >= 0) { if (xlat->id == id) return xlat->enc; xlat++; } return -EINVAL; } static const struct ramxlat ramgddr3_cl_lo[] = { { 5, 5 }, { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 }, { 12, 8 }, /* the below are mentioned in some, but not all, gddr3 docs */ { 13, 9 }, { 14, 6 }, /* XXX: Per Samsung docs, are these used? They overlap with Qimonda */ /* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 }, * { 15, 11 }, */ { -1 } }; static const struct ramxlat ramgddr3_cl_hi[] = { { 10, 2 }, { 11, 3 }, { 12, 4 }, { 13, 5 }, { 14, 6 }, { 15, 7 }, { 16, 0 }, { 17, 1 }, { -1 } }; static const struct ramxlat ramgddr3_wr_lo[] = { { 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 }, { 11, 0 }, { 13 , 1 }, /* the below are mentioned in some, but not all, gddr3 docs */ { 4, 0 }, { 6, 3 }, { 12, 1 }, { -1 } }; int nvkm_gddr3_calc(struct nvkm_ram *ram) { int CL, WR, CWL, DLL = 0, ODT = 0, RON, hi; switch (ram->next->bios.timing_ver) { case 0x10: CWL = ram->next->bios.timing_10_CWL; CL = ram->next->bios.timing_10_CL; WR = ram->next->bios.timing_10_WR; DLL = !ram->next->bios.ramcfg_DLLoff; ODT = ram->next->bios.timing_10_ODT; RON = ram->next->bios.ramcfg_RON; break; case 0x20: CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7; CL = (ram->next->bios.timing[1] & 0x0000001f) >> 0; WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16; /* XXX: Get these values from the VBIOS instead */ DLL = !(ram->mr[1] & 0x1); RON = !(ram->mr[1] & 0x300) >> 8; break; default: return -ENOSYS; } if (ram->next->bios.timing_ver == 0x20 || ram->next->bios.ramcfg_timing == 0xff) { ODT = (ram->mr[1] & 0xc) >> 2; } hi = ram->mr[2] & 0x1; CL = ramxlat(hi ? ramgddr3_cl_hi : ramgddr3_cl_lo, CL); WR = ramxlat(ramgddr3_wr_lo, WR); if (CL < 0 || CWL < 1 || CWL > 7 || WR < 0) return -EINVAL; ram->mr[0] &= ~0xf74; ram->mr[0] |= (CWL & 0x07) << 9; ram->mr[0] |= (CL & 0x07) << 4; ram->mr[0] |= (CL & 0x08) >> 1; ram->mr[1] &= ~0x3fc; ram->mr[1] |= (ODT & 0x03) << 2; ram->mr[1] |= (RON & 0x03) << 8; ram->mr[1] |= (WR & 0x03) << 4; ram->mr[1] |= (WR & 0x04) << 5; ram->mr[1] |= !DLL << 6; return 0; }
null
null
null
null
98,193
4,060
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
4,060
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_TABS_TAB_MODEL_LIST_H_ #define IOS_CHROME_BROWSER_TABS_TAB_MODEL_LIST_H_ #import <Foundation/Foundation.h> #include "base/macros.h" @class TabModel; class TabModelListObserver; namespace ios { class ChromeBrowserState; } // A class containing static functions to help maintain a 1:N relationship // between an ios::ChromeBrowserState and multiple TabModels. class TabModelList { public: // Adds |observer| to the list of observers. static void AddObserver(TabModelListObserver* observer); // Removes |observer| from the list of observers. static void RemoveObserver(TabModelListObserver* observer); // Registers |tab_model| as associated to |browser_state|. The object will // be retained until |UnregisterTabModelFromChromeBrowserState| is called. // It is an error if |tab_model is already registered as associated to // |browser_state|. static void RegisterTabModelWithChromeBrowserState( ios::ChromeBrowserState* browser_state, TabModel* tab_model); // Unregisters the association between |tab_model| and |browser_state|. // It is an error if no such association exists. static void UnregisterTabModelFromChromeBrowserState( ios::ChromeBrowserState* browser_state, TabModel* tab_model); // Returns the list of all TabModels associated with |browser_state|. static NSArray<TabModel*>* GetTabModelsForChromeBrowserState( ios::ChromeBrowserState* browser_state); // Returns the last active TabModel associated with |browser_state|. static TabModel* GetLastActiveTabModelForChromeBrowserState( ios::ChromeBrowserState* browser_state); // Returns true if a incognito session is currently active (i.e. at least // one incognito tab is open). static bool IsOffTheRecordSessionActive(); private: DISALLOW_IMPLICIT_CONSTRUCTORS(TabModelList); }; #endif // IOS_CHROME_BROWSER_TABS_TAB_MODEL_LIST_H_
null
null
null
null
923
982
null
train_val
c536b6be1a72aefd632d5530106a67c516cb9f4b
257,369
openssl
0
https://github.com/openssl/openssl
2016-09-22 23:12:38+01:00
/* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include <stdlib.h> #include <string.h> #include <openssl/opensslconf.h> #include <openssl/ripemd.h> /* * DO EXAMINE COMMENTS IN crypto/md5/md5_locl.h & crypto/md5/md5_dgst.c * FOR EXPLANATIONS ON FOLLOWING "CODE." * <[email protected]> */ #ifdef RMD160_ASM # if defined(__i386) || defined(__i386__) || defined(_M_IX86) # define ripemd160_block_data_order ripemd160_block_asm_data_order # endif #endif void ripemd160_block_data_order(RIPEMD160_CTX *c, const void *p, size_t num); #define DATA_ORDER_IS_LITTLE_ENDIAN #define HASH_LONG RIPEMD160_LONG #define HASH_CTX RIPEMD160_CTX #define HASH_CBLOCK RIPEMD160_CBLOCK #define HASH_UPDATE RIPEMD160_Update #define HASH_TRANSFORM RIPEMD160_Transform #define HASH_FINAL RIPEMD160_Final #define HASH_MAKE_STRING(c,s) do { \ unsigned long ll; \ ll=(c)->A; (void)HOST_l2c(ll,(s)); \ ll=(c)->B; (void)HOST_l2c(ll,(s)); \ ll=(c)->C; (void)HOST_l2c(ll,(s)); \ ll=(c)->D; (void)HOST_l2c(ll,(s)); \ ll=(c)->E; (void)HOST_l2c(ll,(s)); \ } while (0) #define HASH_BLOCK_DATA_ORDER ripemd160_block_data_order #include "internal/md32_common.h" /* * Transformed F2 and F4 are courtesy of Wei Dai <[email protected]> */ #define F1(x,y,z) ((x) ^ (y) ^ (z)) #define F2(x,y,z) ((((y) ^ (z)) & (x)) ^ (z)) #define F3(x,y,z) (((~(y)) | (x)) ^ (z)) #define F4(x,y,z) ((((x) ^ (y)) & (z)) ^ (y)) #define F5(x,y,z) (((~(z)) | (y)) ^ (x)) #define RIPEMD160_A 0x67452301L #define RIPEMD160_B 0xEFCDAB89L #define RIPEMD160_C 0x98BADCFEL #define RIPEMD160_D 0x10325476L #define RIPEMD160_E 0xC3D2E1F0L #include "rmdconst.h" #define RIP1(a,b,c,d,e,w,s) { \ a+=F1(b,c,d)+X(w); \ a=ROTATE(a,s)+e; \ c=ROTATE(c,10); } #define RIP2(a,b,c,d,e,w,s,K) { \ a+=F2(b,c,d)+X(w)+K; \ a=ROTATE(a,s)+e; \ c=ROTATE(c,10); } #define RIP3(a,b,c,d,e,w,s,K) { \ a+=F3(b,c,d)+X(w)+K; \ a=ROTATE(a,s)+e; \ c=ROTATE(c,10); } #define RIP4(a,b,c,d,e,w,s,K) { \ a+=F4(b,c,d)+X(w)+K; \ a=ROTATE(a,s)+e; \ c=ROTATE(c,10); } #define RIP5(a,b,c,d,e,w,s,K) { \ a+=F5(b,c,d)+X(w)+K; \ a=ROTATE(a,s)+e; \ c=ROTATE(c,10); }
null
null
null
null
118,814
47,571
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
47,571
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CC_RESOURCES_CROSS_THREAD_SHARED_BITMAP_H_ #define CC_RESOURCES_CROSS_THREAD_SHARED_BITMAP_H_ #include <memory> #include "base/memory/ref_counted.h" #include "base/memory/shared_memory.h" #include "cc/cc_export.h" #include "components/viz/common/quads/shared_bitmap.h" #include "components/viz/common/resources/resource_format.h" #include "ui/gfx/geometry/size.h" namespace cc { // This class holds ownership of a base::SharedMemory segment for use as a // composited resource, and is refcounted in order to share ownership with the // LayerTreeHost, via TextureLayer, which needs access to the base::SharedMemory // from the compositor thread. // Because all the fields exposed are const, they can be used from any thread // without conflict, as they only read existing states. class CC_EXPORT CrossThreadSharedBitmap : public base::RefCountedThreadSafe<CrossThreadSharedBitmap> { public: CrossThreadSharedBitmap(const viz::SharedBitmapId& id, std::unique_ptr<base::SharedMemory> memory, const gfx::Size& size, viz::ResourceFormat format); const viz::SharedBitmapId& id() const { return id_; } const base::SharedMemory* shared_memory() const { return memory_.get(); } const gfx::Size& size() const { return size_; } viz::ResourceFormat format() const { return format_; } private: friend base::RefCountedThreadSafe<CrossThreadSharedBitmap>; ~CrossThreadSharedBitmap(); const viz::SharedBitmapId id_; const std::unique_ptr<const base::SharedMemory> memory_; const gfx::Size size_; const viz::ResourceFormat format_; }; } // namespace cc #endif // CC_RESOURCES_CROSS_THREAD_SHARED_BITMAP_H_
null
null
null
null
44,434
41,375
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
41,375
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef TOOLS_GN_SUBSTITUTION_TYPE_H_ #define TOOLS_GN_SUBSTITUTION_TYPE_H_ #include <vector> class Err; class ParseNode; // Keep kSubstitutionNames, kSubstitutionNinjaNames and the // IsValid*Substitution functions in sync if you change anything here. enum SubstitutionType { SUBSTITUTION_LITERAL = 0, // The index of the first pattern. To loop overal all patterns, go from here // until NUM_TYPES. SUBSTITUTION_FIRST_PATTERN, // These map to Ninja's {in} and {out} variables. SUBSTITUTION_SOURCE = SUBSTITUTION_FIRST_PATTERN, // {{source}} SUBSTITUTION_OUTPUT, // {{output}} // Valid for all compiler tools. SUBSTITUTION_SOURCE_NAME_PART, // {{source_name_part}} SUBSTITUTION_SOURCE_FILE_PART, // {{source_file_part}} SUBSTITUTION_SOURCE_DIR, // {{source_dir}} SUBSTITUTION_SOURCE_ROOT_RELATIVE_DIR, // {{root_relative_dir}} SUBSTITUTION_SOURCE_GEN_DIR, // {{source_gen_dir}} SUBSTITUTION_SOURCE_OUT_DIR, // {{source_out_dir}} SUBSTITUTION_SOURCE_TARGET_RELATIVE, // {{source_target_relative}} // Valid for all compiler and linker tools. These depend on the target and // do not vary on a per-file basis. SUBSTITUTION_LABEL, // {{label}} SUBSTITUTION_LABEL_NAME, // {{label_name}} SUBSTITUTION_ROOT_GEN_DIR, // {{root_gen_dir}} SUBSTITUTION_ROOT_OUT_DIR, // {{root_out_dir}} SUBSTITUTION_TARGET_GEN_DIR, // {{target_gen_dir}} SUBSTITUTION_TARGET_OUT_DIR, // {{target_out_dir}} SUBSTITUTION_TARGET_OUTPUT_NAME, // {{target_output_name}} // Valid for compiler tools. SUBSTITUTION_ASMFLAGS, // {{asmflags}} SUBSTITUTION_CFLAGS, // {{cflags}} SUBSTITUTION_CFLAGS_C, // {{cflags_c}} SUBSTITUTION_CFLAGS_CC, // {{cflags_cc}} SUBSTITUTION_CFLAGS_OBJC, // {{cflags_objc}} SUBSTITUTION_CFLAGS_OBJCC, // {{cflags_objcc}} SUBSTITUTION_DEFINES, // {{defines}} SUBSTITUTION_INCLUDE_DIRS, // {{include_dirs}} // Valid for linker tools. SUBSTITUTION_LINKER_INPUTS, // {{inputs}} SUBSTITUTION_LINKER_INPUTS_NEWLINE, // {{inputs_newline}} SUBSTITUTION_LDFLAGS, // {{ldflags}} SUBSTITUTION_LIBS, // {{libs}} SUBSTITUTION_OUTPUT_DIR, // {{output_dir}} SUBSTITUTION_OUTPUT_EXTENSION, // {{output_extension}} SUBSTITUTION_SOLIBS, // {{solibs}} // Valid for alink only. SUBSTITUTION_ARFLAGS, // {{arflags}} // Valid for bundle_data targets. SUBSTITUTION_BUNDLE_ROOT_DIR, // {{bundle_root_dir}} SUBSTITUTION_BUNDLE_CONTENTS_DIR, // {{bundle_contents_dir}} SUBSTITUTION_BUNDLE_RESOURCES_DIR, // {{bundle_resources_dir}} SUBSTITUTION_BUNDLE_EXECUTABLE_DIR, // {{bundle_executable_dir}} SUBSTITUTION_BUNDLE_PLUGINS_DIR, // {{bundle_plugins_dir}} // Valid for compile_xcassets tool. SUBSTITUTION_BUNDLE_PRODUCT_TYPE, // {{bundle_product_type}} SUBSTITUTION_BUNDLE_PARTIAL_INFO_PLIST, // {{bundle_partial_info_plist}} // Used only for the args of actions. SUBSTITUTION_RSP_FILE_NAME, // {{response_file_name}} SUBSTITUTION_NUM_TYPES // Must be last. }; // An array of size SUBSTITUTION_NUM_TYPES that lists the names of the // substitution patterns, including the curly braces. So, for example, // kSubstitutionNames[SUBSTITUTION_SOURCE] == "{{source}}". extern const char* kSubstitutionNames[SUBSTITUTION_NUM_TYPES]; // Ninja variables corresponding to each substitution. These do not include // the dollar sign. extern const char* kSubstitutionNinjaNames[SUBSTITUTION_NUM_TYPES]; // A wrapper around an array if flags indicating whether a given substitution // type is required in some context. By convention, the LITERAL type bit is // not set. struct SubstitutionBits { SubstitutionBits(); // Merges any bits set in the given "other" to this one. This object will // then be the union of all bits in the two lists. void MergeFrom(const SubstitutionBits& other); // Converts the substitution type bitfield (with a true set for each required // item) to a vector of the types listed. Does not include LITERAL. void FillVector(std::vector<SubstitutionType>* vect) const; bool used[SUBSTITUTION_NUM_TYPES]; }; // Returns true if the given substitution pattern references the output // directory. This is used to check strings that begin with a substitution to // verify that they produce a file in the output directory. bool SubstitutionIsInOutputDir(SubstitutionType type); // Returns true if the given substitution pattern references the bundle // directory. This is used to check strings that begin with a substitution to // verify that they produce a file in the bundle directory. bool SubstitutionIsInBundleDir(SubstitutionType type); // Returns true if the given substitution is valid for the named purpose. bool IsValidBundleDataSubstitution(SubstitutionType type); bool IsValidSourceSubstitution(SubstitutionType type); bool IsValidScriptArgsSubstitution(SubstitutionType type); // Both compiler and linker tools. bool IsValidToolSubstitution(SubstitutionType type); bool IsValidCompilerSubstitution(SubstitutionType type); bool IsValidCompilerOutputsSubstitution(SubstitutionType type); bool IsValidLinkerSubstitution(SubstitutionType type); bool IsValidLinkerOutputsSubstitution(SubstitutionType type); bool IsValidALinkSubstitution(SubstitutionType type); bool IsValidCopySubstitution(SubstitutionType type); bool IsValidCompileXCassetsSubstitution(SubstitutionType type); // Validates that each substitution type in the vector passes the given // is_valid_subst predicate. Returns true on success. On failure, fills in the // error object with an appropriate message and returns false. bool EnsureValidSubstitutions( const std::vector<SubstitutionType>& types, bool (*is_valid_subst)(SubstitutionType), const ParseNode* origin, Err* err); #endif // TOOLS_GN_SUBSTITUTION_TYPE_H_
null
null
null
null
38,238
26,628
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
26,628
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* -*- Mode: C; c-basic-offset:8 ; indent-tabs-mode:t -*- */ /* * Linux usbfs backend for libusb * Copyright (C) 2007-2009 Daniel Drake <[email protected]> * Copyright (c) 2001 Johannes Erdfelt <[email protected]> * Copyright (c) 2012-2013 Nathan Hjelm <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include <assert.h> #include <ctype.h> #include <dirent.h> #include <errno.h> #include <fcntl.h> #include <poll.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/ioctl.h> #include <sys/stat.h> #include <sys/types.h> #include <sys/utsname.h> #include <sys/socket.h> #include <unistd.h> extern "C" { #include "libusb.h" #include "libusbi.h" #include "linux_usbfs.h" } #include "device/udev_linux/udev.h" /* udev context */ static struct udev *udev_ctx = NULL; static int udev_monitor_fd = -1; static int udev_control_pipe[2] = {-1, -1}; static struct udev_monitor *udev_monitor = NULL; static pthread_t linux_event_thread; static void udev_hotplug_event(struct udev_device* udev_dev); static void *linux_udev_event_thread_main(void *arg); int linux_udev_start_event_monitor(void) { int r; assert(udev_ctx == NULL); udev_ctx = device::udev_new(); if (!udev_ctx) { usbi_err(NULL, "could not create udev context"); return LIBUSB_ERROR_OTHER; } udev_monitor = device::udev_monitor_new_from_netlink(udev_ctx, "udev"); if (!udev_monitor) { usbi_err(NULL, "could not initialize udev monitor"); goto err_free_ctx; } r = device::udev_monitor_filter_add_match_subsystem_devtype(udev_monitor, "usb", 0); if (r) { usbi_err(NULL, "could not initialize udev monitor filter for \"usb\" subsystem"); goto err_free_monitor; } if (device::udev_monitor_enable_receiving(udev_monitor)) { usbi_err(NULL, "failed to enable the udev monitor"); goto err_free_monitor; } udev_monitor_fd = device::udev_monitor_get_fd(udev_monitor); /* Some older versions of udev are not non-blocking by default, * so make sure this is set */ r = fcntl(udev_monitor_fd, F_GETFL); if (r == -1) { usbi_err(NULL, "getting udev monitor fd flags (%d)", errno); goto err_free_monitor; } r = fcntl(udev_monitor_fd, F_SETFL, r | O_NONBLOCK); if (r) { usbi_err(NULL, "setting udev monitor fd flags (%d)", errno); goto err_free_monitor; } r = usbi_pipe(udev_control_pipe); if (r) { usbi_err(NULL, "could not create udev control pipe"); goto err_free_monitor; } r = pthread_create(&linux_event_thread, NULL, linux_udev_event_thread_main, NULL); if (r) { usbi_err(NULL, "creating hotplug event thread (%d)", r); goto err_close_pipe; } return LIBUSB_SUCCESS; err_close_pipe: close(udev_control_pipe[0]); close(udev_control_pipe[1]); err_free_monitor: device::udev_monitor_unref(udev_monitor); udev_monitor = NULL; udev_monitor_fd = -1; err_free_ctx: device::udev_unref(udev_ctx); udev_ctx = NULL; return LIBUSB_ERROR_OTHER; } int linux_udev_stop_event_monitor(void) { char dummy = 1; int r; assert(udev_ctx != NULL); assert(udev_monitor != NULL); assert(udev_monitor_fd != -1); /* Write some dummy data to the control pipe and * wait for the thread to exit */ r = usbi_write(udev_control_pipe[1], &dummy, sizeof(dummy)); if (r <= 0) { usbi_warn(NULL, "udev control pipe signal failed"); } pthread_join(linux_event_thread, NULL); /* Release the udev monitor */ device::udev_monitor_unref(udev_monitor); udev_monitor = NULL; udev_monitor_fd = -1; /* Clean up the udev context */ device::udev_unref(udev_ctx); udev_ctx = NULL; /* close and reset control pipe */ close(udev_control_pipe[0]); close(udev_control_pipe[1]); udev_control_pipe[0] = -1; udev_control_pipe[1] = -1; return LIBUSB_SUCCESS; } static void *linux_udev_event_thread_main(void *arg) { char dummy; int r; struct udev_device* udev_dev; struct pollfd fds[] = { {.fd = udev_control_pipe[0], .events = POLLIN}, {.fd = udev_monitor_fd, .events = POLLIN}, }; usbi_dbg("udev event thread entering."); while (poll(fds, 2, -1) >= 0) { if (fds[0].revents & POLLIN) { /* activity on control pipe, read the byte and exit */ r = usbi_read(udev_control_pipe[0], &dummy, sizeof(dummy)); if (r <= 0) { usbi_warn(NULL, "udev control pipe read failed"); } break; } if (fds[1].revents & POLLIN) { usbi_mutex_static_lock(&linux_hotplug_lock); udev_dev = device::udev_monitor_receive_device(udev_monitor); if (udev_dev) udev_hotplug_event(udev_dev); usbi_mutex_static_unlock(&linux_hotplug_lock); } } usbi_dbg("udev event thread exiting"); return NULL; } static int udev_device_info(struct libusb_context *ctx, int detached, struct udev_device *udev_dev, uint8_t *busnum, uint8_t *devaddr, const char **sys_name) { const char *dev_node; dev_node = device::udev_device_get_devnode(udev_dev); if (!dev_node) { return LIBUSB_ERROR_OTHER; } *sys_name = device::udev_device_get_sysname(udev_dev); if (!*sys_name) { return LIBUSB_ERROR_OTHER; } return linux_get_device_address(ctx, detached, busnum, devaddr, dev_node, *sys_name); } static void udev_hotplug_event(struct udev_device* udev_dev) { const char* udev_action; const char* sys_name = NULL; uint8_t busnum = 0, devaddr = 0; int detached; int r; do { udev_action = device::udev_device_get_action(udev_dev); if (!udev_action) { break; } detached = !strncmp(udev_action, "remove", 6); r = udev_device_info(NULL, detached, udev_dev, &busnum, &devaddr, &sys_name); if (LIBUSB_SUCCESS != r) { break; } usbi_dbg("udev hotplug event. action: %s.", udev_action); if (strncmp(udev_action, "add", 3) == 0) { linux_hotplug_enumerate(busnum, devaddr, sys_name); } else if (detached) { linux_device_disconnected(busnum, devaddr, sys_name); } else { usbi_err(NULL, "ignoring udev action %s", udev_action); } } while (0); device::udev_device_unref(udev_dev); } int linux_udev_scan_devices(struct libusb_context *ctx) { struct udev_enumerate *enumerator; struct udev_list_entry *devices, *entry; struct udev_device *udev_dev; const char *sys_name; int r; assert(udev_ctx != NULL); enumerator = device::udev_enumerate_new(udev_ctx); if (NULL == enumerator) { usbi_err(ctx, "error creating udev enumerator"); return LIBUSB_ERROR_OTHER; } device::udev_enumerate_add_match_subsystem(enumerator, "usb"); device::udev_enumerate_scan_devices(enumerator); devices = device::udev_enumerate_get_list_entry(enumerator); udev_list_entry_foreach(entry, devices) { const char *path = device::udev_list_entry_get_name(entry); uint8_t busnum = 0, devaddr = 0; udev_dev = device::udev_device_new_from_syspath(udev_ctx, path); r = udev_device_info(ctx, 0, udev_dev, &busnum, &devaddr, &sys_name); if (r) { device::udev_device_unref(udev_dev); continue; } linux_enumerate_device(ctx, busnum, devaddr, sys_name); device::udev_device_unref(udev_dev); } device::udev_enumerate_unref(enumerator); return LIBUSB_SUCCESS; } void linux_udev_hotplug_poll(void) { struct udev_device* udev_dev; usbi_mutex_static_lock(&linux_hotplug_lock); do { udev_dev = device::udev_monitor_receive_device(udev_monitor); if (udev_dev) { usbi_dbg("Handling hotplug event from hotplug_poll"); udev_hotplug_event(udev_dev); } } while (udev_dev); usbi_mutex_static_unlock(&linux_hotplug_lock); }
null
null
null
null
23,491
1,104
null
train_val
c536b6be1a72aefd632d5530106a67c516cb9f4b
257,491
openssl
0
https://github.com/openssl/openssl
2016-09-22 23:12:38+01:00
/* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include "apps.h" #include <openssl/bio.h> #include <openssl/err.h> #include <openssl/evp.h> #include <openssl/objects.h> #include <openssl/x509.h> #include <openssl/rand.h> #include <openssl/pem.h> #ifndef OPENSSL_NO_COMP # include <openssl/comp.h> #endif #include <ctype.h> #undef SIZE #undef BSIZE #define SIZE (512) #define BSIZE (8*1024) static int set_hex(char *in, unsigned char *out, int size); static void show_ciphers(const OBJ_NAME *name, void *bio_); typedef enum OPTION_choice { OPT_ERR = -1, OPT_EOF = 0, OPT_HELP, OPT_LIST, OPT_E, OPT_IN, OPT_OUT, OPT_PASS, OPT_ENGINE, OPT_D, OPT_P, OPT_V, OPT_NOPAD, OPT_SALT, OPT_NOSALT, OPT_DEBUG, OPT_UPPER_P, OPT_UPPER_A, OPT_A, OPT_Z, OPT_BUFSIZE, OPT_K, OPT_KFILE, OPT_UPPER_K, OPT_NONE, OPT_UPPER_S, OPT_IV, OPT_MD, OPT_CIPHER } OPTION_CHOICE; OPTIONS enc_options[] = { {"help", OPT_HELP, '-', "Display this summary"}, {"ciphers", OPT_LIST, '-', "List ciphers"}, {"in", OPT_IN, '<', "Input file"}, {"out", OPT_OUT, '>', "Output file"}, {"pass", OPT_PASS, 's', "Passphrase source"}, {"e", OPT_E, '-', "Encrypt"}, {"d", OPT_D, '-', "Decrypt"}, {"p", OPT_P, '-', "Print the iv/key"}, {"P", OPT_UPPER_P, '-', "Print the iv/key and exit"}, {"v", OPT_V, '-', "Verbose output"}, {"nopad", OPT_NOPAD, '-', "Disable standard block padding"}, {"salt", OPT_SALT, '-', "Use salt in the KDF (default)"}, {"nosalt", OPT_NOSALT, '-', "Do not use salt in the KDF"}, {"debug", OPT_DEBUG, '-', "Print debug info"}, {"a", OPT_A, '-', "Base64 encode/decode, depending on encryption flag"}, {"base64", OPT_A, '-', "Same as option -a"}, {"A", OPT_UPPER_A, '-', "Used with -[base64|a] to specify base64 buffer as a single line"}, {"bufsize", OPT_BUFSIZE, 's', "Buffer size"}, {"k", OPT_K, 's', "Passphrase"}, {"kfile", OPT_KFILE, '<', "Read passphrase from file"}, {"K", OPT_UPPER_K, 's', "Raw key, in hex"}, {"S", OPT_UPPER_S, 's', "Salt, in hex"}, {"iv", OPT_IV, 's', "IV in hex"}, {"md", OPT_MD, 's', "Use specified digest to create a key from the passphrase"}, {"none", OPT_NONE, '-', "Don't encrypt"}, {"", OPT_CIPHER, '-', "Any supported cipher"}, #ifdef ZLIB {"z", OPT_Z, '-', "Use zlib as the 'encryption'"}, #endif #ifndef OPENSSL_NO_ENGINE {"engine", OPT_ENGINE, 's', "Use engine, possibly a hardware device"}, #endif {NULL} }; int enc_main(int argc, char **argv) { static char buf[128]; static const char magic[] = "Salted__"; BIO *in = NULL, *out = NULL, *b64 = NULL, *benc = NULL, *rbio = NULL, *wbio = NULL; EVP_CIPHER_CTX *ctx = NULL; const EVP_CIPHER *cipher = NULL, *c; const EVP_MD *dgst = NULL; char *hkey = NULL, *hiv = NULL, *hsalt = NULL, *p; char *infile = NULL, *outfile = NULL, *prog; char *str = NULL, *passarg = NULL, *pass = NULL, *strbuf = NULL; char mbuf[sizeof magic - 1]; OPTION_CHOICE o; int bsize = BSIZE, verbose = 0, debug = 0, olb64 = 0, nosalt = 0; int enc = 1, printkey = 0, i, k; int base64 = 0, informat = FORMAT_BINARY, outformat = FORMAT_BINARY; int ret = 1, inl, nopad = 0; unsigned char key[EVP_MAX_KEY_LENGTH], iv[EVP_MAX_IV_LENGTH]; unsigned char *buff = NULL, salt[PKCS5_SALT_LEN]; long n; #ifdef ZLIB int do_zlib = 0; BIO *bzl = NULL; #endif /* first check the program name */ prog = opt_progname(argv[0]); if (strcmp(prog, "base64") == 0) base64 = 1; #ifdef ZLIB else if (strcmp(prog, "zlib") == 0) do_zlib = 1; #endif else { cipher = EVP_get_cipherbyname(prog); if (cipher == NULL && strcmp(prog, "enc") != 0) { BIO_printf(bio_err, "%s is not a known cipher\n", prog); goto end; } } prog = opt_init(argc, argv, enc_options); while ((o = opt_next()) != OPT_EOF) { switch (o) { case OPT_EOF: case OPT_ERR: opthelp: BIO_printf(bio_err, "%s: Use -help for summary.\n", prog); goto end; case OPT_HELP: opt_help(enc_options); ret = 0; goto end; case OPT_LIST: BIO_printf(bio_err, "Supported ciphers:\n"); OBJ_NAME_do_all_sorted(OBJ_NAME_TYPE_CIPHER_METH, show_ciphers, bio_err); BIO_printf(bio_err, "\n"); goto end; case OPT_E: enc = 1; break; case OPT_IN: infile = opt_arg(); break; case OPT_OUT: outfile = opt_arg(); break; case OPT_PASS: passarg = opt_arg(); break; case OPT_ENGINE: (void)setup_engine(opt_arg(), 0); break; case OPT_D: enc = 0; break; case OPT_P: printkey = 1; break; case OPT_V: verbose = 1; break; case OPT_NOPAD: nopad = 1; break; case OPT_SALT: nosalt = 0; break; case OPT_NOSALT: nosalt = 1; break; case OPT_DEBUG: debug = 1; break; case OPT_UPPER_P: printkey = 2; break; case OPT_UPPER_A: olb64 = 1; break; case OPT_A: base64 = 1; break; case OPT_Z: #ifdef ZLIB do_zlib = 1; #endif break; case OPT_BUFSIZE: p = opt_arg(); i = (int)strlen(p) - 1; k = i >= 1 && p[i] == 'k'; if (k) p[i] = '\0'; if (!opt_long(opt_arg(), &n) || n < 0 || (k && n >= LONG_MAX / 1024)) goto opthelp; if (k) n *= 1024; bsize = (int)n; break; case OPT_K: str = opt_arg(); break; case OPT_KFILE: in = bio_open_default(opt_arg(), 'r', FORMAT_TEXT); if (in == NULL) goto opthelp; i = BIO_gets(in, buf, sizeof buf); BIO_free(in); in = NULL; if (i <= 0) { BIO_printf(bio_err, "%s Can't read key from %s\n", prog, opt_arg()); goto opthelp; } while (--i > 0 && (buf[i] == '\r' || buf[i] == '\n')) buf[i] = '\0'; if (i <= 0) { BIO_printf(bio_err, "%s: zero length password\n", prog); goto opthelp; } str = buf; break; case OPT_UPPER_K: hkey = opt_arg(); break; case OPT_UPPER_S: hsalt = opt_arg(); break; case OPT_IV: hiv = opt_arg(); break; case OPT_MD: if (!opt_md(opt_arg(), &dgst)) goto opthelp; break; case OPT_CIPHER: if (!opt_cipher(opt_unknown(), &c)) goto opthelp; cipher = c; break; case OPT_NONE: cipher = NULL; break; } } if (cipher && EVP_CIPHER_flags(cipher) & EVP_CIPH_FLAG_AEAD_CIPHER) { BIO_printf(bio_err, "%s: AEAD ciphers not supported\n", prog); goto end; } if (cipher && (EVP_CIPHER_mode(cipher) == EVP_CIPH_XTS_MODE)) { BIO_printf(bio_err, "%s XTS ciphers not supported\n", prog); goto end; } if (dgst == NULL) dgst = EVP_sha256(); /* It must be large enough for a base64 encoded line */ if (base64 && bsize < 80) bsize = 80; if (verbose) BIO_printf(bio_err, "bufsize=%d\n", bsize); #ifdef ZLIB if (!do_zlib) #endif if (base64) { if (enc) outformat = FORMAT_BASE64; else informat = FORMAT_BASE64; } strbuf = app_malloc(SIZE, "strbuf"); buff = app_malloc(EVP_ENCODE_LENGTH(bsize), "evp buffer"); if (infile == NULL) { unbuffer(stdin); in = dup_bio_in(informat); } else in = bio_open_default(infile, 'r', informat); if (in == NULL) goto end; if (!str && passarg) { if (!app_passwd(passarg, NULL, &pass, NULL)) { BIO_printf(bio_err, "Error getting password\n"); goto end; } str = pass; } if ((str == NULL) && (cipher != NULL) && (hkey == NULL)) { if (1) { #ifndef OPENSSL_NO_UI for (;;) { char prompt[200]; BIO_snprintf(prompt, sizeof prompt, "enter %s %s password:", OBJ_nid2ln(EVP_CIPHER_nid(cipher)), (enc) ? "encryption" : "decryption"); strbuf[0] = '\0'; i = EVP_read_pw_string((char *)strbuf, SIZE, prompt, enc); if (i == 0) { if (strbuf[0] == '\0') { ret = 1; goto end; } str = strbuf; break; } if (i < 0) { BIO_printf(bio_err, "bad password read\n"); goto end; } } } else { #endif BIO_printf(bio_err, "password required\n"); goto end; } } out = bio_open_default(outfile, 'w', outformat); if (out == NULL) goto end; if (debug) { BIO_set_callback(in, BIO_debug_callback); BIO_set_callback(out, BIO_debug_callback); BIO_set_callback_arg(in, (char *)bio_err); BIO_set_callback_arg(out, (char *)bio_err); } rbio = in; wbio = out; #ifdef ZLIB if (do_zlib) { if ((bzl = BIO_new(BIO_f_zlib())) == NULL) goto end; if (debug) { BIO_set_callback(bzl, BIO_debug_callback); BIO_set_callback_arg(bzl, (char *)bio_err); } if (enc) wbio = BIO_push(bzl, wbio); else rbio = BIO_push(bzl, rbio); } #endif if (base64) { if ((b64 = BIO_new(BIO_f_base64())) == NULL) goto end; if (debug) { BIO_set_callback(b64, BIO_debug_callback); BIO_set_callback_arg(b64, (char *)bio_err); } if (olb64) BIO_set_flags(b64, BIO_FLAGS_BASE64_NO_NL); if (enc) wbio = BIO_push(b64, wbio); else rbio = BIO_push(b64, rbio); } if (cipher != NULL) { /* * Note that str is NULL if a key was passed on the command line, so * we get no salt in that case. Is this a bug? */ if (str != NULL) { /* * Salt handling: if encrypting generate a salt and write to * output BIO. If decrypting read salt from input BIO. */ unsigned char *sptr; size_t str_len = strlen(str); if (nosalt) sptr = NULL; else { if (enc) { if (hsalt) { if (!set_hex(hsalt, salt, sizeof salt)) { BIO_printf(bio_err, "invalid hex salt value\n"); goto end; } } else if (RAND_bytes(salt, sizeof salt) <= 0) goto end; /* * If -P option then don't bother writing */ if ((printkey != 2) && (BIO_write(wbio, magic, sizeof magic - 1) != sizeof magic - 1 || BIO_write(wbio, (char *)salt, sizeof salt) != sizeof salt)) { BIO_printf(bio_err, "error writing output file\n"); goto end; } } else if (BIO_read(rbio, mbuf, sizeof mbuf) != sizeof mbuf || BIO_read(rbio, (unsigned char *)salt, sizeof salt) != sizeof salt) { BIO_printf(bio_err, "error reading input file\n"); goto end; } else if (memcmp(mbuf, magic, sizeof magic - 1)) { BIO_printf(bio_err, "bad magic number\n"); goto end; } sptr = salt; } if (!EVP_BytesToKey(cipher, dgst, sptr, (unsigned char *)str, str_len, 1, key, iv)) { BIO_printf(bio_err, "EVP_BytesToKey failed\n"); goto end; } /* * zero the complete buffer or the string passed from the command * line bug picked up by Larry J. Hughes Jr. <[email protected]> */ if (str == strbuf) OPENSSL_cleanse(str, SIZE); else OPENSSL_cleanse(str, str_len); } if (hiv != NULL) { int siz = EVP_CIPHER_iv_length(cipher); if (siz == 0) { BIO_printf(bio_err, "warning: iv not use by this cipher\n"); } else if (!set_hex(hiv, iv, sizeof iv)) { BIO_printf(bio_err, "invalid hex iv value\n"); goto end; } } if ((hiv == NULL) && (str == NULL) && EVP_CIPHER_iv_length(cipher) != 0) { /* * No IV was explicitly set and no IV was generated during * EVP_BytesToKey. Hence the IV is undefined, making correct * decryption impossible. */ BIO_printf(bio_err, "iv undefined\n"); goto end; } if ((hkey != NULL) && !set_hex(hkey, key, EVP_CIPHER_key_length(cipher))) { BIO_printf(bio_err, "invalid hex key value\n"); goto end; } if ((benc = BIO_new(BIO_f_cipher())) == NULL) goto end; /* * Since we may be changing parameters work on the encryption context * rather than calling BIO_set_cipher(). */ BIO_get_cipher_ctx(benc, &ctx); if (!EVP_CipherInit_ex(ctx, cipher, NULL, NULL, NULL, enc)) { BIO_printf(bio_err, "Error setting cipher %s\n", EVP_CIPHER_name(cipher)); ERR_print_errors(bio_err); goto end; } if (nopad) EVP_CIPHER_CTX_set_padding(ctx, 0); if (!EVP_CipherInit_ex(ctx, NULL, NULL, key, iv, enc)) { BIO_printf(bio_err, "Error setting cipher %s\n", EVP_CIPHER_name(cipher)); ERR_print_errors(bio_err); goto end; } if (debug) { BIO_set_callback(benc, BIO_debug_callback); BIO_set_callback_arg(benc, (char *)bio_err); } if (printkey) { if (!nosalt) { printf("salt="); for (i = 0; i < (int)sizeof(salt); i++) printf("%02X", salt[i]); printf("\n"); } if (EVP_CIPHER_key_length(cipher) > 0) { printf("key="); for (i = 0; i < EVP_CIPHER_key_length(cipher); i++) printf("%02X", key[i]); printf("\n"); } if (EVP_CIPHER_iv_length(cipher) > 0) { printf("iv ="); for (i = 0; i < EVP_CIPHER_iv_length(cipher); i++) printf("%02X", iv[i]); printf("\n"); } if (printkey == 2) { ret = 0; goto end; } } } /* Only encrypt/decrypt as we write the file */ if (benc != NULL) wbio = BIO_push(benc, wbio); for (;;) { inl = BIO_read(rbio, (char *)buff, bsize); if (inl <= 0) break; if (BIO_write(wbio, (char *)buff, inl) != inl) { BIO_printf(bio_err, "error writing output file\n"); goto end; } } if (!BIO_flush(wbio)) { BIO_printf(bio_err, "bad decrypt\n"); goto end; } ret = 0; if (verbose) { BIO_printf(bio_err, "bytes read :%8"PRIu64"\n", BIO_number_read(in)); BIO_printf(bio_err, "bytes written:%8"PRIu64"\n", BIO_number_written(out)); } end: ERR_print_errors(bio_err); OPENSSL_free(strbuf); OPENSSL_free(buff); BIO_free(in); BIO_free_all(out); BIO_free(benc); BIO_free(b64); #ifdef ZLIB BIO_free(bzl); #endif OPENSSL_free(pass); return (ret); } static void show_ciphers(const OBJ_NAME *name, void *bio_) { BIO *bio = bio_; static int n; if (!islower((unsigned char)*name->name)) return; BIO_printf(bio, "-%-25s", name->name); if (++n == 3) { BIO_printf(bio, "\n"); n = 0; } else BIO_printf(bio, " "); } static int set_hex(char *in, unsigned char *out, int size) { int i, n; unsigned char j; n = strlen(in); if (n > (size * 2)) { BIO_printf(bio_err, "hex string is too long\n"); return (0); } memset(out, 0, size); for (i = 0; i < n; i++) { j = (unsigned char)*in; *(in++) = '\0'; if (j == 0) break; if (!isxdigit(j)) { BIO_printf(bio_err, "non-hex digit\n"); return (0); } j = (unsigned char)OPENSSL_hexchar2int(j); if (i & 1) out[i / 2] |= j; else out[i / 2] = (j << 4); } return (1); }
null
null
null
null
118,936
71,184
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
71,184
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "services/proxy_resolver/proxy_resolver_factory_impl.h" #include <string> #include <utility> #include "base/macros.h" #include "base/memory/ptr_util.h" #include "mojo/public/cpp/bindings/strong_binding.h" #include "net/base/net_errors.h" #include "net/proxy_resolution/mojo_proxy_resolver_v8_tracing_bindings.h" #include "net/proxy_resolution/proxy_resolver_factory.h" #include "net/proxy_resolution/proxy_resolver_v8_tracing.h" #include "services/proxy_resolver/proxy_resolver_impl.h" namespace proxy_resolver { class ProxyResolverFactoryImpl::Job { public: Job(ProxyResolverFactoryImpl* parent, const scoped_refptr<net::PacFileData>& pac_script, net::ProxyResolverV8TracingFactory* proxy_resolver_factory, mojo::InterfaceRequest<mojom::ProxyResolver> request, mojom::ProxyResolverFactoryRequestClientPtr client, std::unique_ptr<service_manager::ServiceContextRef> service_ref); ~Job(); private: // Mojo error handler. void OnConnectionError(); void OnProxyResolverCreated(int error); ProxyResolverFactoryImpl* const parent_; std::unique_ptr<net::ProxyResolverV8Tracing> proxy_resolver_impl_; mojo::InterfaceRequest<mojom::ProxyResolver> proxy_request_; net::ProxyResolverV8TracingFactory* factory_; std::unique_ptr<net::ProxyResolverFactory::Request> request_; mojom::ProxyResolverFactoryRequestClientPtr client_ptr_; std::unique_ptr<service_manager::ServiceContextRef> service_ref_; DISALLOW_COPY_AND_ASSIGN(Job); }; ProxyResolverFactoryImpl::Job::Job( ProxyResolverFactoryImpl* factory, const scoped_refptr<net::PacFileData>& pac_script, net::ProxyResolverV8TracingFactory* proxy_resolver_factory, mojo::InterfaceRequest<mojom::ProxyResolver> request, mojom::ProxyResolverFactoryRequestClientPtr client, std::unique_ptr<service_manager::ServiceContextRef> service_ref) : parent_(factory), proxy_request_(std::move(request)), factory_(proxy_resolver_factory), client_ptr_(std::move(client)), service_ref_(std::move(service_ref)) { client_ptr_.set_connection_error_handler( base::Bind(&ProxyResolverFactoryImpl::Job::OnConnectionError, base::Unretained(this))); factory_->CreateProxyResolverV8Tracing( pac_script, std::make_unique<net::MojoProxyResolverV8TracingBindings< mojom::ProxyResolverFactoryRequestClient>>(client_ptr_.get()), &proxy_resolver_impl_, base::Bind(&ProxyResolverFactoryImpl::Job::OnProxyResolverCreated, base::Unretained(this)), &request_); } ProxyResolverFactoryImpl::Job::~Job() = default; void ProxyResolverFactoryImpl::Job::OnConnectionError() { client_ptr_->ReportResult(net::ERR_PAC_SCRIPT_TERMINATED); parent_->RemoveJob(this); } void ProxyResolverFactoryImpl::Job::OnProxyResolverCreated(int error) { if (error == net::OK) { mojo::MakeStrongBinding( std::make_unique<ProxyResolverImpl>(std::move(proxy_resolver_impl_), std::move(service_ref_)), std::move(proxy_request_)); } client_ptr_->ReportResult(error); parent_->RemoveJob(this); } ProxyResolverFactoryImpl::ProxyResolverFactoryImpl() : ProxyResolverFactoryImpl( net::ProxyResolverV8TracingFactory::Create()) {} void ProxyResolverFactoryImpl::BindRequest( proxy_resolver::mojom::ProxyResolverFactoryRequest request, service_manager::ServiceContextRefFactory* ref_factory) { if (binding_set_.empty()) { DCHECK(!service_ref_); service_ref_ = ref_factory->CreateRef(); } DCHECK(service_ref_.get()); binding_set_.AddBinding(this, std::move(request)); } ProxyResolverFactoryImpl::ProxyResolverFactoryImpl( std::unique_ptr<net::ProxyResolverV8TracingFactory> proxy_resolver_factory) : proxy_resolver_impl_factory_(std::move(proxy_resolver_factory)) { binding_set_.set_connection_error_handler(base::Bind( &ProxyResolverFactoryImpl::OnConnectionError, base::Unretained(this))); } ProxyResolverFactoryImpl::~ProxyResolverFactoryImpl() {} void ProxyResolverFactoryImpl::CreateResolver( const std::string& pac_script, mojo::InterfaceRequest<mojom::ProxyResolver> request, mojom::ProxyResolverFactoryRequestClientPtr client) { DCHECK(service_ref_); // The Job will call RemoveJob on |this| when either the create request // finishes or |request| or |client| encounters a connection error. std::unique_ptr<Job> job = std::make_unique<Job>( this, net::PacFileData::FromUTF8(pac_script), proxy_resolver_impl_factory_.get(), std::move(request), std::move(client), service_ref_->Clone()); Job* job_ptr = job.get(); jobs_[job_ptr] = std::move(job); } void ProxyResolverFactoryImpl::RemoveJob(Job* job) { size_t erased_count = jobs_.erase(job); DCHECK_EQ(1U, erased_count); } void ProxyResolverFactoryImpl::OnConnectionError() { DCHECK(service_ref_); if (binding_set_.empty()) service_ref_.reset(); } } // namespace proxy_resolver
null
null
null
null
68,047
9,282
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
174,277
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/arch/arm/mach-mmp/aspenite.c * * Support for the Marvell PXA168-based Aspenite and Zylonite2 * Development Platform. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/gpio-pxa.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/smc91x.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/nand.h> #include <linux/interrupt.h> #include <linux/platform_data/mv_usb.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <video/pxa168fb.h> #include <linux/input.h> #include <linux/platform_data/keypad-pxa27x.h> #include "addr-map.h" #include "mfp-pxa168.h" #include "pxa168.h" #include "irqs.h" #include "common.h" static unsigned long common_pin_config[] __initdata = { /* Data Flash Interface */ GPIO0_DFI_D15, GPIO1_DFI_D14, GPIO2_DFI_D13, GPIO3_DFI_D12, GPIO4_DFI_D11, GPIO5_DFI_D10, GPIO6_DFI_D9, GPIO7_DFI_D8, GPIO8_DFI_D7, GPIO9_DFI_D6, GPIO10_DFI_D5, GPIO11_DFI_D4, GPIO12_DFI_D3, GPIO13_DFI_D2, GPIO14_DFI_D1, GPIO15_DFI_D0, /* Static Memory Controller */ GPIO18_SMC_nCS0, GPIO34_SMC_nCS1, GPIO23_SMC_nLUA, GPIO25_SMC_nLLA, GPIO28_SMC_RDY, GPIO29_SMC_SCLK, GPIO35_SMC_BE1, GPIO36_SMC_BE2, GPIO27_GPIO, /* Ethernet IRQ */ /* UART1 */ GPIO107_UART1_RXD, GPIO108_UART1_TXD, /* SSP1 */ GPIO113_I2S_MCLK, GPIO114_I2S_FRM, GPIO115_I2S_BCLK, GPIO116_I2S_RXD, GPIO117_I2S_TXD, /* LCD */ GPIO56_LCD_FCLK_RD, GPIO57_LCD_LCLK_A0, GPIO58_LCD_PCLK_WR, GPIO59_LCD_DENA_BIAS, GPIO60_LCD_DD0, GPIO61_LCD_DD1, GPIO62_LCD_DD2, GPIO63_LCD_DD3, GPIO64_LCD_DD4, GPIO65_LCD_DD5, GPIO66_LCD_DD6, GPIO67_LCD_DD7, GPIO68_LCD_DD8, GPIO69_LCD_DD9, GPIO70_LCD_DD10, GPIO71_LCD_DD11, GPIO72_LCD_DD12, GPIO73_LCD_DD13, GPIO74_LCD_DD14, GPIO75_LCD_DD15, GPIO76_LCD_DD16, GPIO77_LCD_DD17, GPIO78_LCD_DD18, GPIO79_LCD_DD19, GPIO80_LCD_DD20, GPIO81_LCD_DD21, GPIO82_LCD_DD22, GPIO83_LCD_DD23, /* Keypad */ GPIO109_KP_MKIN1, GPIO110_KP_MKIN0, GPIO111_KP_MKOUT7, GPIO112_KP_MKOUT6, GPIO121_KP_MKIN4, }; static struct pxa_gpio_platform_data pxa168_gpio_pdata = { .irq_base = MMP_GPIO_TO_IRQ(0), }; static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, }; static struct resource smc91x_resources[] = { [0] = { .start = SMC_CS1_PHYS_BASE + 0x300, .end = SMC_CS1_PHYS_BASE + 0xfffff, .flags = IORESOURCE_MEM, }, [1] = { .start = MMP_GPIO_TO_IRQ(27), .end = MMP_GPIO_TO_IRQ(27), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .dev = { .platform_data = &smc91x_info, }, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static struct mtd_partition aspenite_nand_partitions[] = { { .name = "bootloader", .offset = 0, .size = SZ_1M, .mask_flags = MTD_WRITEABLE, }, { .name = "reserved", .offset = MTDPART_OFS_APPEND, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, }, { .name = "reserved", .offset = MTDPART_OFS_APPEND, .size = SZ_8M, .mask_flags = MTD_WRITEABLE, }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = (SZ_2M + SZ_1M), .mask_flags = 0, }, { .name = "filesystem", .offset = MTDPART_OFS_APPEND, .size = SZ_32M + SZ_16M, .mask_flags = 0, } }; static struct pxa3xx_nand_platform_data aspenite_nand_info = { .enable_arbiter = 1, .num_cs = 1, .parts[0] = aspenite_nand_partitions, .nr_parts[0] = ARRAY_SIZE(aspenite_nand_partitions), }; static struct i2c_board_info aspenite_i2c_info[] __initdata = { { I2C_BOARD_INFO("wm8753", 0x1b), }, }; static struct fb_videomode video_modes[] = { [0] = { .pixclock = 30120, .refresh = 60, .xres = 800, .yres = 480, .hsync_len = 1, .left_margin = 215, .right_margin = 40, .vsync_len = 1, .upper_margin = 34, .lower_margin = 10, .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT, }, }; struct pxa168fb_mach_info aspenite_lcd_info = { .id = "Graphic Frame", .modes = video_modes, .num_modes = ARRAY_SIZE(video_modes), .pix_fmt = PIX_FMT_RGB565, .io_pin_allocation_mode = PIN_MODE_DUMB_24, .dumb_mode = DUMB_MODE_RGB888, .active = 1, .panel_rbswap = 0, .invert_pixclock = 0, }; static const unsigned int aspenite_matrix_key_map[] = { KEY(0, 6, KEY_UP), /* SW 4 */ KEY(0, 7, KEY_DOWN), /* SW 5 */ KEY(1, 6, KEY_LEFT), /* SW 6 */ KEY(1, 7, KEY_RIGHT), /* SW 7 */ KEY(4, 6, KEY_ENTER), /* SW 8 */ KEY(4, 7, KEY_ESC), /* SW 9 */ }; static struct matrix_keymap_data aspenite_matrix_keymap_data = { .keymap = aspenite_matrix_key_map, .keymap_size = ARRAY_SIZE(aspenite_matrix_key_map), }; static struct pxa27x_keypad_platform_data aspenite_keypad_info __initdata = { .matrix_key_rows = 5, .matrix_key_cols = 8, .matrix_keymap_data = &aspenite_matrix_keymap_data, .debounce_interval = 30, }; #if IS_ENABLED(CONFIG_USB_EHCI_MV) static struct mv_usb_platform_data pxa168_sph_pdata = { .mode = MV_USB_MODE_HOST, .phy_init = pxa_usb_phy_init, .phy_deinit = pxa_usb_phy_deinit, .set_vbus = NULL, }; #endif static void __init common_init(void) { mfp_config(ARRAY_AND_SIZE(common_pin_config)); /* on-chip devices */ pxa168_add_uart(1); pxa168_add_twsi(1, NULL, ARRAY_AND_SIZE(aspenite_i2c_info)); pxa168_add_ssp(1); pxa168_add_nand(&aspenite_nand_info); pxa168_add_fb(&aspenite_lcd_info); pxa168_add_keypad(&aspenite_keypad_info); platform_device_add_data(&pxa168_device_gpio, &pxa168_gpio_pdata, sizeof(struct pxa_gpio_platform_data)); platform_device_register(&pxa168_device_gpio); /* off-chip devices */ platform_device_register(&smc91x_device); #if IS_ENABLED(CONFIG_USB_EHCI_MV) pxa168_add_usb_host(&pxa168_sph_pdata); #endif } MACHINE_START(ASPENITE, "PXA168-based Aspenite Development Platform") .map_io = mmp_map_io, .nr_irqs = MMP_NR_IRQS, .init_irq = pxa168_init_irq, .init_time = pxa168_timer_init, .init_machine = common_init, .restart = pxa168_restart, MACHINE_END MACHINE_START(ZYLONITE2, "PXA168-based Zylonite2 Development Platform") .map_io = mmp_map_io, .nr_irqs = MMP_NR_IRQS, .init_irq = pxa168_init_irq, .init_time = pxa168_timer_init, .init_machine = common_init, .restart = pxa168_restart, MACHINE_END
null
null
null
null
82,624
41,750
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
206,745
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Analog Devices ADP5589/ADP5585 I/O Expander and QWERTY Keypad Controller * * Copyright 2010-2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #ifndef _ADP5589_H #define _ADP5589_H /* * ADP5589 specific GPI and Keymap defines */ #define ADP5589_KEYMAPSIZE 88 #define ADP5589_GPI_PIN_ROW0 97 #define ADP5589_GPI_PIN_ROW1 98 #define ADP5589_GPI_PIN_ROW2 99 #define ADP5589_GPI_PIN_ROW3 100 #define ADP5589_GPI_PIN_ROW4 101 #define ADP5589_GPI_PIN_ROW5 102 #define ADP5589_GPI_PIN_ROW6 103 #define ADP5589_GPI_PIN_ROW7 104 #define ADP5589_GPI_PIN_COL0 105 #define ADP5589_GPI_PIN_COL1 106 #define ADP5589_GPI_PIN_COL2 107 #define ADP5589_GPI_PIN_COL3 108 #define ADP5589_GPI_PIN_COL4 109 #define ADP5589_GPI_PIN_COL5 110 #define ADP5589_GPI_PIN_COL6 111 #define ADP5589_GPI_PIN_COL7 112 #define ADP5589_GPI_PIN_COL8 113 #define ADP5589_GPI_PIN_COL9 114 #define ADP5589_GPI_PIN_COL10 115 #define GPI_LOGIC1 116 #define GPI_LOGIC2 117 #define ADP5589_GPI_PIN_ROW_BASE ADP5589_GPI_PIN_ROW0 #define ADP5589_GPI_PIN_ROW_END ADP5589_GPI_PIN_ROW7 #define ADP5589_GPI_PIN_COL_BASE ADP5589_GPI_PIN_COL0 #define ADP5589_GPI_PIN_COL_END ADP5589_GPI_PIN_COL10 #define ADP5589_GPI_PIN_BASE ADP5589_GPI_PIN_ROW_BASE #define ADP5589_GPI_PIN_END ADP5589_GPI_PIN_COL_END #define ADP5589_GPIMAPSIZE_MAX (ADP5589_GPI_PIN_END - ADP5589_GPI_PIN_BASE + 1) /* * ADP5585 specific GPI and Keymap defines */ #define ADP5585_KEYMAPSIZE 30 #define ADP5585_GPI_PIN_ROW0 37 #define ADP5585_GPI_PIN_ROW1 38 #define ADP5585_GPI_PIN_ROW2 39 #define ADP5585_GPI_PIN_ROW3 40 #define ADP5585_GPI_PIN_ROW4 41 #define ADP5585_GPI_PIN_ROW5 42 #define ADP5585_GPI_PIN_COL0 43 #define ADP5585_GPI_PIN_COL1 44 #define ADP5585_GPI_PIN_COL2 45 #define ADP5585_GPI_PIN_COL3 46 #define ADP5585_GPI_PIN_COL4 47 #define GPI_LOGIC 48 #define ADP5585_GPI_PIN_ROW_BASE ADP5585_GPI_PIN_ROW0 #define ADP5585_GPI_PIN_ROW_END ADP5585_GPI_PIN_ROW5 #define ADP5585_GPI_PIN_COL_BASE ADP5585_GPI_PIN_COL0 #define ADP5585_GPI_PIN_COL_END ADP5585_GPI_PIN_COL4 #define ADP5585_GPI_PIN_BASE ADP5585_GPI_PIN_ROW_BASE #define ADP5585_GPI_PIN_END ADP5585_GPI_PIN_COL_END #define ADP5585_GPIMAPSIZE_MAX (ADP5585_GPI_PIN_END - ADP5585_GPI_PIN_BASE + 1) struct adp5589_gpi_map { unsigned short pin; unsigned short sw_evt; }; /* scan_cycle_time */ #define ADP5589_SCAN_CYCLE_10ms 0 #define ADP5589_SCAN_CYCLE_20ms 1 #define ADP5589_SCAN_CYCLE_30ms 2 #define ADP5589_SCAN_CYCLE_40ms 3 /* RESET_CFG */ #define RESET_PULSE_WIDTH_500us 0 #define RESET_PULSE_WIDTH_1ms 1 #define RESET_PULSE_WIDTH_2ms 2 #define RESET_PULSE_WIDTH_10ms 3 #define RESET_TRIG_TIME_0ms (0 << 2) #define RESET_TRIG_TIME_1000ms (1 << 2) #define RESET_TRIG_TIME_1500ms (2 << 2) #define RESET_TRIG_TIME_2000ms (3 << 2) #define RESET_TRIG_TIME_2500ms (4 << 2) #define RESET_TRIG_TIME_3000ms (5 << 2) #define RESET_TRIG_TIME_3500ms (6 << 2) #define RESET_TRIG_TIME_4000ms (7 << 2) #define RESET_PASSTHRU_EN (1 << 5) #define RESET1_POL_HIGH (1 << 6) #define RESET1_POL_LOW (0 << 6) #define RESET2_POL_HIGH (1 << 7) #define RESET2_POL_LOW (0 << 7) /* ADP5589 Mask Bits: * C C C C C C C C C C C | R R R R R R R R * 1 9 8 7 6 5 4 3 2 1 0 | 7 6 5 4 3 2 1 0 * 0 * ---------------- BIT ------------------ * 1 1 1 1 1 1 1 1 1 0 0 | 0 0 0 0 0 0 0 0 * 8 7 6 5 4 3 2 1 0 9 8 | 7 6 5 4 3 2 1 0 */ #define ADP_ROW(x) (1 << (x)) #define ADP_COL(x) (1 << (x + 8)) #define ADP5589_ROW_MASK 0xFF #define ADP5589_COL_MASK 0xFF #define ADP5589_COL_SHIFT 8 #define ADP5589_MAX_ROW_NUM 7 #define ADP5589_MAX_COL_NUM 10 /* ADP5585 Mask Bits: * C C C C C | R R R R R R * 4 3 2 1 0 | 5 4 3 2 1 0 * * ---- BIT -- ----------- * 1 0 0 0 0 | 0 0 0 0 0 0 * 0 9 8 7 6 | 5 4 3 2 1 0 */ #define ADP5585_ROW_MASK 0x3F #define ADP5585_COL_MASK 0x1F #define ADP5585_ROW_SHIFT 0 #define ADP5585_COL_SHIFT 6 #define ADP5585_MAX_ROW_NUM 5 #define ADP5585_MAX_COL_NUM 4 #define ADP5585_ROW(x) (1 << ((x) & ADP5585_ROW_MASK)) #define ADP5585_COL(x) (1 << (((x) & ADP5585_COL_MASK) + ADP5585_COL_SHIFT)) /* Put one of these structures in i2c_board_info platform_data */ struct adp5589_kpad_platform_data { unsigned keypad_en_mask; /* Keypad (Rows/Columns) enable mask */ const unsigned short *keymap; /* Pointer to keymap */ unsigned short keymapsize; /* Keymap size */ bool repeat; /* Enable key repeat */ bool en_keylock; /* Enable key lock feature (ADP5589 only)*/ unsigned char unlock_key1; /* Unlock Key 1 (ADP5589 only) */ unsigned char unlock_key2; /* Unlock Key 2 (ADP5589 only) */ unsigned char unlock_timer; /* Time in seconds [0..7] between the two unlock keys 0=disable (ADP5589 only) */ unsigned char scan_cycle_time; /* Time between consecutive scan cycles */ unsigned char reset_cfg; /* Reset config */ unsigned short reset1_key_1; /* Reset Key 1 */ unsigned short reset1_key_2; /* Reset Key 2 */ unsigned short reset1_key_3; /* Reset Key 3 */ unsigned short reset2_key_1; /* Reset Key 1 */ unsigned short reset2_key_2; /* Reset Key 2 */ unsigned debounce_dis_mask; /* Disable debounce mask */ unsigned pull_dis_mask; /* Disable all pull resistors mask */ unsigned pullup_en_100k; /* Pull-Up 100k Enable Mask */ unsigned pullup_en_300k; /* Pull-Up 300k Enable Mask */ unsigned pulldown_en_300k; /* Pull-Down 300k Enable Mask */ const struct adp5589_gpi_map *gpimap; unsigned short gpimapsize; const struct adp5589_gpio_platform_data *gpio_data; }; struct i2c_client; /* forward declaration */ struct adp5589_gpio_platform_data { int gpio_start; /* GPIO Chip base # */ int (*setup)(struct i2c_client *client, int gpio, unsigned ngpio, void *context); int (*teardown)(struct i2c_client *client, int gpio, unsigned ngpio, void *context); void *context; }; #endif
null
null
null
null
115,092
34,137
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
199,132
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * ALSA PCM device for the * ALSA interface to cobalt PCM capture streams * * Copyright 2014-2015 Cisco Systems, Inc. and/or its affiliates. * All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <media/v4l2-device.h> #include <sound/core.h> #include <sound/pcm.h> #include "cobalt-driver.h" #include "cobalt-alsa.h" #include "cobalt-alsa-pcm.h" static unsigned int pcm_debug; module_param(pcm_debug, int, 0644); MODULE_PARM_DESC(pcm_debug, "enable debug messages for pcm"); #define dprintk(fmt, arg...) \ do { \ if (pcm_debug) \ pr_info("cobalt-alsa-pcm %s: " fmt, __func__, ##arg); \ } while (0) static struct snd_pcm_hardware snd_cobalt_hdmi_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 4 * 240 * 8 * 4, /* 5 ms of data */ .period_bytes_min = 1920, /* 1 sample = 8 * 4 bytes */ .period_bytes_max = 240 * 8 * 4, /* 5 ms of 8 channel data */ .periods_min = 1, .periods_max = 4, }; static struct snd_pcm_hardware snd_cobalt_playback = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 4 * 240 * 8 * 4, /* 5 ms of data */ .period_bytes_min = 1920, /* 1 sample = 8 * 4 bytes */ .period_bytes_max = 240 * 8 * 4, /* 5 ms of 8 channel data */ .periods_min = 1, .periods_max = 4, }; static void sample_cpy(u8 *dst, const u8 *src, u32 len, bool is_s32) { static const unsigned map[8] = { 0, 1, 5, 4, 2, 3, 6, 7 }; unsigned idx = 0; while (len >= (is_s32 ? 4 : 2)) { unsigned offset = map[idx] * 4; u32 val = src[offset + 1] + (src[offset + 2] << 8) + (src[offset + 3] << 16); if (is_s32) { *dst++ = 0; *dst++ = val & 0xff; } *dst++ = (val >> 8) & 0xff; *dst++ = (val >> 16) & 0xff; len -= is_s32 ? 4 : 2; idx++; } } static void cobalt_alsa_announce_pcm_data(struct snd_cobalt_card *cobsc, u8 *pcm_data, size_t skip, size_t samples) { struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; unsigned long flags; unsigned int oldptr; unsigned int stride; int length = samples; int period_elapsed = 0; bool is_s32; dprintk("cobalt alsa announce ptr=%p data=%p num_bytes=%zd\n", cobsc, pcm_data, samples); substream = cobsc->capture_pcm_substream; if (substream == NULL) { dprintk("substream was NULL\n"); return; } runtime = substream->runtime; if (runtime == NULL) { dprintk("runtime was NULL\n"); return; } is_s32 = runtime->format == SNDRV_PCM_FORMAT_S32_LE; stride = runtime->frame_bits >> 3; if (stride == 0) { dprintk("stride is zero\n"); return; } if (length == 0) { dprintk("%s: length was zero\n", __func__); return; } if (runtime->dma_area == NULL) { dprintk("dma area was NULL - ignoring\n"); return; } oldptr = cobsc->hwptr_done_capture; if (oldptr + length >= runtime->buffer_size) { unsigned int cnt = runtime->buffer_size - oldptr; unsigned i; for (i = 0; i < cnt; i++) sample_cpy(runtime->dma_area + (oldptr + i) * stride, pcm_data + i * skip, stride, is_s32); for (i = cnt; i < length; i++) sample_cpy(runtime->dma_area + (i - cnt) * stride, pcm_data + i * skip, stride, is_s32); } else { unsigned i; for (i = 0; i < length; i++) sample_cpy(runtime->dma_area + (oldptr + i) * stride, pcm_data + i * skip, stride, is_s32); } snd_pcm_stream_lock_irqsave(substream, flags); cobsc->hwptr_done_capture += length; if (cobsc->hwptr_done_capture >= runtime->buffer_size) cobsc->hwptr_done_capture -= runtime->buffer_size; cobsc->capture_transfer_done += length; if (cobsc->capture_transfer_done >= runtime->period_size) { cobsc->capture_transfer_done -= runtime->period_size; period_elapsed = 1; } snd_pcm_stream_unlock_irqrestore(substream, flags); if (period_elapsed) snd_pcm_period_elapsed(substream); } static int alsa_fnc(struct vb2_buffer *vb, void *priv) { struct cobalt_stream *s = priv; unsigned char *p = vb2_plane_vaddr(vb, 0); int i; if (pcm_debug) { pr_info("alsa: "); for (i = 0; i < 8 * 4; i++) { if (!(i & 3)) pr_cont(" "); pr_cont("%02x", p[i]); } pr_cont("\n"); } cobalt_alsa_announce_pcm_data(s->alsa, vb2_plane_vaddr(vb, 0), 8 * 4, vb2_get_plane_payload(vb, 0) / (8 * 4)); return 0; } static int snd_cobalt_pcm_capture_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cobalt_card *cobsc = snd_pcm_substream_chip(substream); struct cobalt_stream *s = cobsc->s; runtime->hw = snd_cobalt_hdmi_capture; snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); cobsc->capture_pcm_substream = substream; runtime->private_data = s; cobsc->alsa_record_cnt++; if (cobsc->alsa_record_cnt == 1) { int rc; rc = vb2_thread_start(&s->q, alsa_fnc, s, s->vdev.name); if (rc) { cobsc->alsa_record_cnt--; return rc; } } return 0; } static int snd_cobalt_pcm_capture_close(struct snd_pcm_substream *substream) { struct snd_cobalt_card *cobsc = snd_pcm_substream_chip(substream); struct cobalt_stream *s = cobsc->s; cobsc->alsa_record_cnt--; if (cobsc->alsa_record_cnt == 0) vb2_thread_stop(&s->q); return 0; } static int snd_cobalt_pcm_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { return snd_pcm_lib_ioctl(substream, cmd, arg); } static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t size) { struct snd_pcm_runtime *runtime = subs->runtime; dprintk("Allocating vbuffer\n"); if (runtime->dma_area) { if (runtime->dma_bytes > size) return 0; vfree(runtime->dma_area); } runtime->dma_area = vmalloc(size); if (!runtime->dma_area) return -ENOMEM; runtime->dma_bytes = size; return 0; } static int snd_cobalt_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { dprintk("%s called\n", __func__); return snd_pcm_alloc_vmalloc_buffer(substream, params_buffer_bytes(params)); } static int snd_cobalt_pcm_hw_free(struct snd_pcm_substream *substream) { if (substream->runtime->dma_area) { dprintk("freeing pcm capture region\n"); vfree(substream->runtime->dma_area); substream->runtime->dma_area = NULL; } return 0; } static int snd_cobalt_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_cobalt_card *cobsc = snd_pcm_substream_chip(substream); cobsc->hwptr_done_capture = 0; cobsc->capture_transfer_done = 0; return 0; } static int snd_cobalt_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_STOP: return 0; default: return -EINVAL; } return 0; } static snd_pcm_uframes_t snd_cobalt_pcm_pointer(struct snd_pcm_substream *substream) { snd_pcm_uframes_t hwptr_done; struct snd_cobalt_card *cobsc = snd_pcm_substream_chip(substream); hwptr_done = cobsc->hwptr_done_capture; return hwptr_done; } static void pb_sample_cpy(u8 *dst, const u8 *src, u32 len, bool is_s32) { static const unsigned map[8] = { 0, 1, 5, 4, 2, 3, 6, 7 }; unsigned idx = 0; while (len >= (is_s32 ? 4 : 2)) { unsigned offset = map[idx] * 4; u8 *out = dst + offset; *out++ = 0; if (is_s32) { src++; *out++ = *src++; } else { *out++ = 0; } *out++ = *src++; *out = *src++; len -= is_s32 ? 4 : 2; idx++; } } static void cobalt_alsa_pb_pcm_data(struct snd_cobalt_card *cobsc, u8 *pcm_data, size_t skip, size_t samples) { struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; unsigned long flags; unsigned int pos; unsigned int stride; bool is_s32; unsigned i; dprintk("cobalt alsa pb ptr=%p data=%p samples=%zd\n", cobsc, pcm_data, samples); substream = cobsc->playback_pcm_substream; if (substream == NULL) { dprintk("substream was NULL\n"); return; } runtime = substream->runtime; if (runtime == NULL) { dprintk("runtime was NULL\n"); return; } is_s32 = runtime->format == SNDRV_PCM_FORMAT_S32_LE; stride = runtime->frame_bits >> 3; if (stride == 0) { dprintk("stride is zero\n"); return; } if (samples == 0) { dprintk("%s: samples was zero\n", __func__); return; } if (runtime->dma_area == NULL) { dprintk("dma area was NULL - ignoring\n"); return; } pos = cobsc->pb_pos % cobsc->pb_size; for (i = 0; i < cobsc->pb_count / (8 * 4); i++) pb_sample_cpy(pcm_data + i * skip, runtime->dma_area + pos + i * stride, stride, is_s32); snd_pcm_stream_lock_irqsave(substream, flags); cobsc->pb_pos += i * stride; snd_pcm_stream_unlock_irqrestore(substream, flags); if (cobsc->pb_pos % cobsc->pb_count == 0) snd_pcm_period_elapsed(substream); } static int alsa_pb_fnc(struct vb2_buffer *vb, void *priv) { struct cobalt_stream *s = priv; if (s->alsa->alsa_pb_channel) cobalt_alsa_pb_pcm_data(s->alsa, vb2_plane_vaddr(vb, 0), 8 * 4, vb2_get_plane_payload(vb, 0) / (8 * 4)); return 0; } static int snd_cobalt_pcm_playback_open(struct snd_pcm_substream *substream) { struct snd_cobalt_card *cobsc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct cobalt_stream *s = cobsc->s; runtime->hw = snd_cobalt_playback; snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); cobsc->playback_pcm_substream = substream; runtime->private_data = s; cobsc->alsa_playback_cnt++; if (cobsc->alsa_playback_cnt == 1) { int rc; rc = vb2_thread_start(&s->q, alsa_pb_fnc, s, s->vdev.name); if (rc) { cobsc->alsa_playback_cnt--; return rc; } } return 0; } static int snd_cobalt_pcm_playback_close(struct snd_pcm_substream *substream) { struct snd_cobalt_card *cobsc = snd_pcm_substream_chip(substream); struct cobalt_stream *s = cobsc->s; cobsc->alsa_playback_cnt--; if (cobsc->alsa_playback_cnt == 0) vb2_thread_stop(&s->q); return 0; } static int snd_cobalt_pcm_pb_prepare(struct snd_pcm_substream *substream) { struct snd_cobalt_card *cobsc = snd_pcm_substream_chip(substream); cobsc->pb_size = snd_pcm_lib_buffer_bytes(substream); cobsc->pb_count = snd_pcm_lib_period_bytes(substream); cobsc->pb_pos = 0; return 0; } static int snd_cobalt_pcm_pb_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_cobalt_card *cobsc = snd_pcm_substream_chip(substream); switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (cobsc->alsa_pb_channel) return -EBUSY; cobsc->alsa_pb_channel = true; return 0; case SNDRV_PCM_TRIGGER_STOP: cobsc->alsa_pb_channel = false; return 0; default: return -EINVAL; } } static snd_pcm_uframes_t snd_cobalt_pcm_pb_pointer(struct snd_pcm_substream *substream) { struct snd_cobalt_card *cobsc = snd_pcm_substream_chip(substream); size_t ptr; ptr = cobsc->pb_pos; return bytes_to_frames(substream->runtime, ptr) % substream->runtime->buffer_size; } static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs, unsigned long offset) { void *pageptr = subs->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } static const struct snd_pcm_ops snd_cobalt_pcm_capture_ops = { .open = snd_cobalt_pcm_capture_open, .close = snd_cobalt_pcm_capture_close, .ioctl = snd_cobalt_pcm_ioctl, .hw_params = snd_cobalt_pcm_hw_params, .hw_free = snd_cobalt_pcm_hw_free, .prepare = snd_cobalt_pcm_prepare, .trigger = snd_cobalt_pcm_trigger, .pointer = snd_cobalt_pcm_pointer, .page = snd_pcm_get_vmalloc_page, }; static const struct snd_pcm_ops snd_cobalt_pcm_playback_ops = { .open = snd_cobalt_pcm_playback_open, .close = snd_cobalt_pcm_playback_close, .ioctl = snd_cobalt_pcm_ioctl, .hw_params = snd_cobalt_pcm_hw_params, .hw_free = snd_cobalt_pcm_hw_free, .prepare = snd_cobalt_pcm_pb_prepare, .trigger = snd_cobalt_pcm_pb_trigger, .pointer = snd_cobalt_pcm_pb_pointer, .page = snd_pcm_get_vmalloc_page, }; int snd_cobalt_pcm_create(struct snd_cobalt_card *cobsc) { struct snd_pcm *sp; struct snd_card *sc = cobsc->sc; struct cobalt_stream *s = cobsc->s; struct cobalt *cobalt = s->cobalt; int ret; s->q.gfp_flags |= __GFP_ZERO; if (!s->is_output) { cobalt_s_bit_sysctrl(cobalt, COBALT_SYS_CTRL_AUDIO_IPP_RESETN_BIT(s->video_channel), 0); mdelay(2); cobalt_s_bit_sysctrl(cobalt, COBALT_SYS_CTRL_AUDIO_IPP_RESETN_BIT(s->video_channel), 1); mdelay(1); ret = snd_pcm_new(sc, "Cobalt PCM-In HDMI", 0, /* PCM device 0, the only one for this card */ 0, /* 0 playback substreams */ 1, /* 1 capture substream */ &sp); if (ret) { cobalt_err("snd_cobalt_pcm_create() failed for input with err %d\n", ret); goto err_exit; } snd_pcm_set_ops(sp, SNDRV_PCM_STREAM_CAPTURE, &snd_cobalt_pcm_capture_ops); sp->info_flags = 0; sp->private_data = cobsc; strlcpy(sp->name, "cobalt", sizeof(sp->name)); } else { cobalt_s_bit_sysctrl(cobalt, COBALT_SYS_CTRL_AUDIO_OPP_RESETN_BIT, 0); mdelay(2); cobalt_s_bit_sysctrl(cobalt, COBALT_SYS_CTRL_AUDIO_OPP_RESETN_BIT, 1); mdelay(1); ret = snd_pcm_new(sc, "Cobalt PCM-Out HDMI", 0, /* PCM device 0, the only one for this card */ 1, /* 0 playback substreams */ 0, /* 1 capture substream */ &sp); if (ret) { cobalt_err("snd_cobalt_pcm_create() failed for output with err %d\n", ret); goto err_exit; } snd_pcm_set_ops(sp, SNDRV_PCM_STREAM_PLAYBACK, &snd_cobalt_pcm_playback_ops); sp->info_flags = 0; sp->private_data = cobsc; strlcpy(sp->name, "cobalt", sizeof(sp->name)); } return 0; err_exit: return ret; }
null
null
null
null
107,479
58,380
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
58,380
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_DBUS_DBUS_THREAD_LINUX_H_ #define CHROME_BROWSER_DBUS_DBUS_THREAD_LINUX_H_ #include "base/memory/ref_counted.h" #include "base/single_thread_task_runner.h" #include "build/build_config.h" // Many APIs in ::dbus are required to be called from the same thread // (https://crbug.com/130984). Therefore, a SingleThreadedTaskRunner is // maintained and accessible through GetDBusTaskRunner(), from which all calls // to dbus on Linux have to be made. #if defined(OS_CHROMEOS) #error On ChromeOS, use DBusThreadManager instead. #endif namespace chrome { scoped_refptr<base::SingleThreadTaskRunner> GetDBusTaskRunner(); } // namespace chrome #endif // CHROME_BROWSER_DBUS_DBUS_THREAD_LINUX_H_
null
null
null
null
55,243
12,483
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
12,483
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_INFOBARS_CORE_INFOBAR_H_ #define COMPONENTS_INFOBARS_CORE_INFOBAR_H_ #include <memory> #include <utility> #include "base/macros.h" #include "components/infobars/core/infobar_delegate.h" #include "ui/gfx/animation/animation_delegate.h" #include "ui/gfx/animation/slide_animation.h" #include "ui/gfx/geometry/size.h" namespace infobars { class InfoBarContainer; class InfoBarManager; // InfoBar is a cross-platform base class for an infobar "view" (in the MVC // sense), which owns a corresponding InfoBarDelegate "model". Typically, // a caller will call XYZInfoBarDelegate::Create() and pass in the // InfoBarManager for the relevant tab. This will create an XYZInfoBarDelegate, // create a platform-specific subclass of InfoBar to own it, and then call // InfoBarManager::AddInfoBar() to give it ownership of the infobar. // During its life, the InfoBar may be shown and hidden as the owning tab is // switched between the foreground and background. Eventually, InfoBarManager // will instruct the InfoBar to close itself. At this point, the InfoBar will // optionally animate closed; once it's no longer visible, it deletes itself, // destroying the InfoBarDelegate in the process. // // Thus, InfoBarDelegate and InfoBar implementations can assume they share // lifetimes, and not NULL-check each other; but if one needs to reach back into // the owning InfoBarManager, it must check whether that's still possible. class InfoBar : public gfx::AnimationDelegate { public: explicit InfoBar(std::unique_ptr<InfoBarDelegate> delegate); ~InfoBar() override; InfoBarManager* owner() { return owner_; } InfoBarDelegate* delegate() const { return delegate_.get(); } void set_container(InfoBarContainer* container) { container_ = container; } // Sets |owner_|. This also sets the nav entry ID on |delegate_|. This must // only be called once as there's no way to extract an infobar from its owner // without deleting it, for reparenting in another tab. void SetOwner(InfoBarManager* owner); // Makes the infobar visible. If |animate| is true, the infobar is then // animated to full size. void Show(bool animate); // Makes the infobar hidden. If |animate| is false, the infobar is // immediately removed from the container, and, if now unowned, deleted. If // |animate| is true, the infobar is animated to zero size, ultimately // triggering a call to AnimationEnded(). void Hide(bool animate); // Notifies the infobar that it is no longer owned and should delete itself // once it is invisible. void CloseSoon(); // Forwards a close request to our owner. This is a no-op if we're already // unowned. void RemoveSelf(); // Changes the target height of the infobar. void SetTargetHeight(int height); const gfx::SlideAnimation& animation() const { return animation_; } int computed_height() const { return height_; } protected: // gfx::AnimationDelegate: void AnimationProgressed(const gfx::Animation* animation) override; const InfoBarContainer* container() const { return container_; } InfoBarContainer* container() { return container_; } gfx::SlideAnimation* animation() { return &animation_; } int target_height() const { return target_height_; } // Platforms may optionally override these if they need to do work during // processing of the given calls. virtual void PlatformSpecificSetOwner() {} virtual void PlatformSpecificShow(bool animate) {} virtual void PlatformSpecificHide(bool animate) {} virtual void PlatformSpecificOnCloseSoon() {} virtual void PlatformSpecificOnHeightRecalculated() {} private: // gfx::AnimationDelegate: void AnimationEnded(const gfx::Animation* animation) override; // Finds the new desired height, and if it differs from the current height, // calls PlatformSpecificOnHeightRecalculated(). Informs our container our // state has changed if either the height has changed or |force_notify| is // set. void RecalculateHeight(bool force_notify); // Checks whether the infobar is unowned and done with all animations. If so, // notifies the container that it should remove this infobar, and deletes // itself. void MaybeDelete(); InfoBarManager* owner_; std::unique_ptr<InfoBarDelegate> delegate_; InfoBarContainer* container_; gfx::SlideAnimation animation_; // The current and target heights. int height_; // Includes both fill and bottom separator. int target_height_; DISALLOW_COPY_AND_ASSIGN(InfoBar); }; } // namespace infobars #endif // COMPONENTS_INFOBARS_CORE_INFOBAR_H_
null
null
null
null
9,346
67,088
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
67,088
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_NOTIFICATIONS_METRICS_MOCK_NOTIFICATION_METRICS_LOGGER_H_ #define CHROME_BROWSER_NOTIFICATIONS_METRICS_MOCK_NOTIFICATION_METRICS_LOGGER_H_ #include "base/macros.h" #include "chrome/browser/notifications/metrics/notification_metrics_logger.h" #include "components/keyed_service/core/keyed_service.h" #include "content/public/browser/browser_context.h" #include "testing/gmock/include/gmock/gmock.h" class MockNotificationMetricsLogger : public NotificationMetricsLogger { public: ~MockNotificationMetricsLogger() override; // Factory function to be used with NotificationMetricsLoggerFactory's // SetTestingFactory method, overriding the default metrics logger. static std::unique_ptr<KeyedService> FactoryForTests( content::BrowserContext* browser_context); MOCK_METHOD0(LogPersistentNotificationClosedByUser, void()); MOCK_METHOD0(LogPersistentNotificationClosedProgrammatically, void()); MOCK_METHOD0(LogPersistentNotificationActionButtonClick, void()); MOCK_METHOD0(LogPersistentNotificationClick, void()); MOCK_METHOD0(LogPersistentNotificationClickWithoutPermission, void()); MOCK_METHOD0(LogPersistentNotificationShown, void()); private: MockNotificationMetricsLogger(); }; #endif // CHROME_BROWSER_NOTIFICATIONS_METRICS_MOCK_NOTIFICATION_METRICS_LOGGER_H_
null
null
null
null
63,951
18,454
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
183,449
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef MVME147_H /* $Id: mvme147.h,v 1.4 1997/01/19 23:07:10 davem Exp $ * * Header file for the MVME147 built-in SCSI controller for Linux * * Written and (C) 1993, Hamish Macdonald, see mvme147.c for more info * */ #include <linux/types.h> int mvme147_detect(struct scsi_host_template *); int mvme147_release(struct Scsi_Host *); #ifndef CMD_PER_LUN #define CMD_PER_LUN 2 #endif #ifndef CAN_QUEUE #define CAN_QUEUE 16 #endif #endif /* MVME147_H */
null
null
null
null
91,796
33,311
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
33,311
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 1999 Lars Knoll ([email protected]) * (C) 1999 Antti Koivisto ([email protected]) * (C) 2001 Dirk Mueller ([email protected]) * Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Apple Inc. * All rights reserved. * Copyright (C) 2008, 2009 Torch Mobile Inc. All rights reserved. * (http://www.torchmobile.com/) * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * */ #include "third_party/blink/renderer/core/dom/node_traversal.h" #include "third_party/blink/renderer/core/dom/container_node.h" #include "third_party/blink/renderer/core/dom/range.h" namespace blink { Node* NodeTraversal::PreviousIncludingPseudo(const Node& current, const Node* stay_within) { if (current == stay_within) return nullptr; if (Node* previous = current.PseudoAwarePreviousSibling()) { while (previous->PseudoAwareLastChild()) previous = previous->PseudoAwareLastChild(); return previous; } return current.parentNode(); } Node* NodeTraversal::NextIncludingPseudo(const Node& current, const Node* stay_within) { if (Node* next = current.PseudoAwareFirstChild()) return next; if (current == stay_within) return nullptr; if (Node* next = current.PseudoAwareNextSibling()) return next; for (Node& parent : AncestorsOf(current)) { if (parent == stay_within) return nullptr; if (Node* next = parent.PseudoAwareNextSibling()) return next; } return nullptr; } Node* NodeTraversal::NextIncludingPseudoSkippingChildren( const Node& current, const Node* stay_within) { if (current == stay_within) return nullptr; if (Node* next = current.PseudoAwareNextSibling()) return next; for (Node& parent : AncestorsOf(current)) { if (parent == stay_within) return nullptr; if (Node* next = parent.PseudoAwareNextSibling()) return next; } return nullptr; } Node* NodeTraversal::NextAncestorSibling(const Node& current) { DCHECK(!current.nextSibling()); for (Node& parent : AncestorsOf(current)) { if (parent.nextSibling()) return parent.nextSibling(); } return nullptr; } Node* NodeTraversal::NextAncestorSibling(const Node& current, const Node* stay_within) { DCHECK(!current.nextSibling()); DCHECK_NE(current, stay_within); for (Node& parent : AncestorsOf(current)) { if (parent == stay_within) return nullptr; if (parent.nextSibling()) return parent.nextSibling(); } return nullptr; } Node* NodeTraversal::LastWithin(const ContainerNode& current) { Node* descendant = current.lastChild(); for (Node* child = descendant; child; child = child->lastChild()) descendant = child; return descendant; } Node& NodeTraversal::LastWithinOrSelf(Node& current) { Node* last_descendant = current.IsContainerNode() ? NodeTraversal::LastWithin(ToContainerNode(current)) : nullptr; return last_descendant ? *last_descendant : current; } Node* NodeTraversal::Previous(const Node& current, const Node* stay_within) { if (current == stay_within) return nullptr; if (current.previousSibling()) { Node* previous = current.previousSibling(); while (Node* child = previous->lastChild()) previous = child; return previous; } return current.parentNode(); } Node* NodeTraversal::PreviousSkippingChildren(const Node& current, const Node* stay_within) { if (current == stay_within) return nullptr; if (current.previousSibling()) return current.previousSibling(); for (Node& parent : AncestorsOf(current)) { if (parent == stay_within) return nullptr; if (parent.previousSibling()) return parent.previousSibling(); } return nullptr; } Node* NodeTraversal::NextPostOrder(const Node& current, const Node* stay_within) { if (current == stay_within) return nullptr; if (!current.nextSibling()) return current.parentNode(); Node* next = current.nextSibling(); while (Node* child = next->firstChild()) next = child; return next; } Node* NodeTraversal::PreviousAncestorSiblingPostOrder(const Node& current, const Node* stay_within) { DCHECK(!current.previousSibling()); for (Node& parent : NodeTraversal::AncestorsOf(current)) { if (parent == stay_within) return nullptr; if (parent.previousSibling()) return parent.previousSibling(); } return nullptr; } Node* NodeTraversal::PreviousPostOrder(const Node& current, const Node* stay_within) { if (Node* last_child = current.lastChild()) return last_child; if (current == stay_within) return nullptr; if (current.previousSibling()) return current.previousSibling(); return PreviousAncestorSiblingPostOrder(current, stay_within); } Node* NodeTraversal::CommonAncestor(const Node& node_a, const Node& node_b) { return Range::commonAncestorContainer(&node_a, &node_b); } } // namespace blink
null
null
null
null
30,174
65
20,21,22
train_val
a14fdb9746262549bbbb96abb87338bacd147e1b?w=1
259,020
php-src
1
https://github.com/php/php-src
null
int php_wddx_deserialize_ex(char *value, int vallen, zval *return_value) { wddx_stack stack; XML_Parser parser; st_entry *ent; int retval; wddx_stack_init(&stack); parser = XML_ParserCreate("UTF-8"); XML_SetUserData(parser, &stack); XML_SetElementHandler(parser, php_wddx_push_element, php_wddx_pop_element); XML_SetCharacterDataHandler(parser, php_wddx_process_data); XML_Parse(parser, value, vallen, 1); XML_ParserFree(parser); if (stack.top == 1) { wddx_stack_top(&stack, (void**)&ent); *return_value = *(ent->data); zval_copy_ctor(return_value); retval = SUCCESS; } else { retval = FAILURE; } wddx_stack_destroy(&stack); return retval; }
CVE-2016-7132
CWE-476
https://github.com/php/php-src/commit/a14fdb9746262549bbbb96abb87338bacd147e1b?w=1
Low
4,223