file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
vue.config_20191129085714.js
|
'use strict'
const path = require('path')
const defaultSettings = require('./src/settings.js')
function
|
(dir) {
return path.join(__dirname, dir)
}
const name = defaultSettings.title || 'vue Element Admin' // page title
// If your port is set to 80,
// use administrator privileges to execute the command line.
// For example, Mac: sudo npm run
// You can change the port by the following method:
// port = 9527 npm run dev OR npm run dev --port = 9527
const port = process.env.port || process.env.npm_config_port || 5678 // dev port
// All configuration item explanations can be find in https://cli.vuejs.org/config/
module.exports = {
/**
* You will need to set publicPath if you plan to deploy your site under a sub path,
* for example GitHub Pages. If you plan to deploy your site to https://foo.github.io/bar/,
* then publicPath should be set to "/bar/".
* In most cases please use '/' !!!
* Detail: https://cli.vuejs.org/config/#publicpath
*/
publicPath: '/',
outputDir: 'dist',
assetsDir: 'static',
lintOnSave: process.env.NODE_ENV === 'development',
productionSourceMap: false,
devServer: {
port: port,
open: true,
overlay: {
warnings: false,
errors: true
},
proxy: {
// change xxx-api/login => mock/login
// detail: https://cli.vuejs.org/config/#devserver-proxy
[process.env.VUE_APP_BASE_API]: {
/* target: `http://127.0.0.1:${port}/mock`, */
target: `http://192.168.2.44:8018/`,
changeOrigin: true,
pathRewrite: {
['^' + process.env.VUE_APP_BASE_API]: ''
}
}
}
//after: require('./mock/mock-server.js')
},
configureWebpack: {
// provide the app's title in webpack's name field, so that
// it can be accessed in index.html to inject the correct title.
name: name,
resolve: {
alias: {
'@': resolve('src')
}
}
},
chainWebpack(config) {
config.plugins.delete('preload') // TODO: need test
config.plugins.delete('prefetch') // TODO: need test
// set svg-sprite-loader
config.module
.rule('svg')
.exclude.add(resolve('src/icons'))
.end()
config.module
.rule('icons')
.test(/\.svg$/)
.include.add(resolve('src/icons'))
.end()
.use('svg-sprite-loader')
.loader('svg-sprite-loader')
.options({
symbolId: 'icon-[name]'
})
.end()
// set preserveWhitespace
config.module
.rule('vue')
.use('vue-loader')
.loader('vue-loader')
.tap(options => {
options.compilerOptions.preserveWhitespace = true
return options
})
.end()
config
// https://webpack.js.org/configuration/devtool/#development
.when(process.env.NODE_ENV === 'development',
config => config.devtool('cheap-source-map')
)
config
.when(process.env.NODE_ENV !== 'development',
config => {
config
.plugin('ScriptExtHtmlWebpackPlugin')
.after('html')
.use('script-ext-html-webpack-plugin', [{
// `runtime` must same as runtimeChunk name. default is `runtime`
inline: /runtime\..*\.js$/
}])
.end()
config
.optimization.splitChunks({
chunks: 'all',
cacheGroups: {
libs: {
name: 'chunk-libs',
test: /[\\/]node_modules[\\/]/,
priority: 10,
chunks: 'initial' // only package third parties that are initially dependent
},
elementUI: {
name: 'chunk-elementUI', // split elementUI into a single package
priority: 20, // the weight needs to be larger than libs and app or it will be packaged into libs or app
test: /[\\/]node_modules[\\/]_?element-ui(.*)/ // in order to adapt to cnpm
},
commons: {
name: 'chunk-commons',
test: resolve('src/components'), // can customize your rules
minChunks: 3, // minimum common number
priority: 5,
reuseExistingChunk: true
}
}
})
config.optimization.runtimeChunk('single')
}
)
}
}
|
resolve
|
AWTTransmissionManagerCore.d.ts
|
/**
* AWTTransmissionManagerCore.ts
* @author Abhilash Panwar (abpanwar)
* @copyright Microsoft 2017
*/
import { AWTEventHandler, AWTLogConfiguration } from './DataModels';
import { AWTEventDataWithMetaData } from '../common/DataModels';
/**
* Class that manages the timers for when to send events. It also
* handles flush and flushAndTeardown. This class also allows setting
* new event handlers. The default event handler is the Inbound Queue Manager.
*/
export default class
|
{
private static _eventHandler;
private static _newEventsAllowed;
private static _currentProfile;
private static _timeout;
private static _currentBackoffCount;
private static _profiles;
private static _paused;
private static _timerCount;
private static _lastUploadNowCall;
private static _config;
/**
* Sets the event handler to be used by the tranmission manager.
* The default event handler is the Inbound queue manager. This handler
* is used to batch and send events to Aria. If you intend to send events
* to Aria please make sure your event handler forwards events to the Inbound
* Queue Manager. You can retrieve the Inbound Queue Manager by calling
* getEventsHandler before you set your handler.
* @param {object} eventsHandler - The new events handler to be used by the tranmission
* manager.
*/
static setEventsHandler(eventsHandler: AWTEventHandler): void;
/**
* Gets the current event handler used by the tranmission manager.
* @return {object} The event handler currently used by the tranmission manager.
*/
static getEventsHandler(): AWTEventHandler;
/**
* Try to schedule the timer after which events will be sent. If there are
* no events to be sent, or there is already a timer scheduled, or the
* http manager doesn't have any idle connections this method is no-op.
*/
static scheduleTimer(): void;
/**
* Initialize the transmission manager. After this method is called events are
* accepted for tranmission.
* @param {object} config - The configuration passed during AWTLogManager initialize.
*/
static initialize(config: AWTLogConfiguration): void;
/**
* Set the transmit profile to be used. This will change the tranmission timers
* based on the transmit profile.
* @param {string} profileName - The name of the transmit profile to be used.
*/
static setTransmitProfile(profileName: string): void;
/**
* Load custom tranmission profiles. Each profile should have timers for
* high, normal and low. Each profile should make sure
* that a each priority timer is a multiple of the priority higher than it.
* Setting the timer value to -1 means that the events for that priority will
* not be sent. Note that once a priority has been set to not send, all priorities
* below it will also not be sent. The timers should be in the form of [low, normal, high].
* e.g Custom: [30,10,5]
* This also removes any previously loaded custom profiles.
* @param {object} profiles - A dictionary containing the transmit profiles.
*/
static loadTransmitProfiles(profiles: {
[profileName: string]: number[];
}): void;
/**
* Pass the event to the event handler and try to schedule the timer.
* @param {object} event - The event to be sent.
*/
static sendEvent(event: AWTEventDataWithMetaData): void;
/**
* Sends events for all priority for the current inbound queue.
* This method adds new inbound queues to which new events will be added.
* Note: If LogManager is paused or flush is called again in less than 30 sec
* then flush will be no-op and the callback will not be called.
* @param {function} callback - The function to be called when flush is finished.
*/
static flush(callback: () => void): void;
/**
* Pauses transmission. It pauses the http manager and also clears timers.
*/
static pauseTransmission(): void;
/**
* Resumes tranmission. It resumes the http manager and tries to schedule the timer.
*/
static resumeTransmision(): void;
/**
* Stops allowing new events being added for tranmission. It also batches all
* events currently in the queue and creates requests from them to be sent.
*/
static flushAndTeardown(): void;
/**
* Backs off tranmission. This exponentially increases all the timers.
*/
static backOffTransmission(): void;
/**
* Clears backoff for tranmission.
*/
static clearBackOff(): void;
/**
* Resets the transmit profiles to the default profiles of Real Time, Near Real Time
* and Best Effort. This removes all the custom profiles that were loaded.
*/
private static _resetTransmitProfiles();
private static clearTimeout();
private static _batchAndSendEvents();
private static _initializeProfiles();
}
|
AWTTransmissionManagerCore
|
vstate.rs
|
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::io;
use std::result;
use std::sync::{Arc, Barrier};
use super::TimestampUs;
use arch;
#[cfg(target_arch = "aarch64")]
use arch::aarch64::gic::GICDevice;
#[cfg(target_arch = "x86_64")]
use cpuid::{c3, filter_cpuid, t2, VmSpec};
use default_syscalls;
#[cfg(target_arch = "x86_64")]
use kvm_bindings::{kvm_pit_config, KVM_PIT_SPEAKER_DUMMY};
use kvm_bindings::{kvm_userspace_memory_region, KVM_API_VERSION};
use kvm_ioctls::*;
use logger::{LogOption, Metric, LOGGER, METRICS};
use memory_model::{Address, GuestAddress, GuestMemory, GuestMemoryError};
use utils::eventfd::EventFd;
#[cfg(target_arch = "x86_64")]
use vmm_config::machine_config::CpuFeaturesTemplate;
use vmm_config::machine_config::VmConfig;
const KVM_MEM_LOG_DIRTY_PAGES: u32 = 0x1;
#[cfg(target_arch = "x86_64")]
const MAGIC_IOPORT_SIGNAL_GUEST_BOOT_COMPLETE: u64 = 0x03f0;
#[cfg(target_arch = "aarch64")]
const MAGIC_IOPORT_SIGNAL_GUEST_BOOT_COMPLETE: u64 = 0x40000000;
const MAGIC_VALUE_SIGNAL_GUEST_BOOT_COMPLETE: u8 = 123;
/// Errors associated with the wrappers over KVM ioctls.
#[derive(Debug)]
pub enum Error {
#[cfg(target_arch = "x86_64")]
/// A call to cpuid instruction failed.
CpuId(cpuid::Error),
/// Invalid guest memory configuration.
GuestMemory(GuestMemoryError),
/// Hyperthreading flag is not initialized.
HTNotInitialized,
/// The host kernel reports an invalid KVM API version.
KvmApiVersion(i32),
/// Cannot initialize the KVM context due to missing capabilities.
KvmCap(kvm_ioctls::Cap),
/// vCPU count is not initialized.
VcpuCountNotInitialized,
/// Cannot open the VM file descriptor.
VmFd(io::Error),
/// Cannot open the VCPU file descriptor.
VcpuFd(io::Error),
/// Cannot configure the microvm.
VmSetup(io::Error),
/// Cannot run the VCPUs.
VcpuRun(io::Error),
/// The call to KVM_SET_CPUID2 failed.
SetSupportedCpusFailed(io::Error),
/// The number of configured slots is bigger than the maximum reported by KVM.
NotEnoughMemorySlots,
#[cfg(target_arch = "x86_64")]
/// Cannot set the local interruption due to bad configuration.
LocalIntConfiguration(arch::x86_64::interrupts::Error),
/// Cannot set the memory regions.
SetUserMemoryRegion(io::Error),
#[cfg(target_arch = "x86_64")]
/// Error configuring the MSR registers
MSRSConfiguration(arch::x86_64::regs::Error),
#[cfg(target_arch = "aarch64")]
/// Error configuring the general purpose aarch64 registers.
REGSConfiguration(arch::aarch64::regs::Error),
#[cfg(target_arch = "x86_64")]
/// Error configuring the general purpose registers
REGSConfiguration(arch::x86_64::regs::Error),
#[cfg(target_arch = "x86_64")]
/// Error configuring the special registers
SREGSConfiguration(arch::x86_64::regs::Error),
#[cfg(target_arch = "x86_64")]
/// Error configuring the floating point related registers
FPUConfiguration(arch::x86_64::regs::Error),
/// Cannot configure the IRQ.
Irq(io::Error),
/// Cannot spawn a new vCPU thread.
VcpuSpawn(io::Error),
/// Unexpected KVM_RUN exit reason
VcpuUnhandledKvmExit,
#[cfg(target_arch = "aarch64")]
/// Error setting up the global interrupt controller.
SetupGIC(arch::aarch64::gic::Error),
#[cfg(target_arch = "aarch64")]
/// Error getting the Vcpu preferred target on Arm.
VcpuArmPreferredTarget(io::Error),
#[cfg(target_arch = "aarch64")]
/// Error doing Vcpu Init on Arm.
VcpuArmInit(io::Error),
}
pub type Result<T> = result::Result<T, Error>;
/// Describes a KVM context that gets attached to the microVM.
/// It gives access to the functionality of the KVM wrapper as
/// long as every required KVM capability is present on the host.
pub struct KvmContext {
kvm: Kvm,
max_memslots: usize,
}
impl KvmContext {
pub fn new() -> Result<Self> {
use kvm_ioctls::Cap::*;
let kvm = Kvm::new().expect("Error creating the Kvm object");
// Check that KVM has the correct version.
if kvm.get_api_version() != KVM_API_VERSION as i32 {
return Err(Error::KvmApiVersion(kvm.get_api_version()));
}
// A list of KVM capabilities we want to check.
#[cfg(target_arch = "x86_64")]
let capabilities = vec![Irqchip, Ioeventfd, Irqfd, UserMemory, SetTssAddr];
#[cfg(target_arch = "aarch64")]
let capabilities = vec![Irqchip, Ioeventfd, Irqfd, UserMemory, ArmPsci02];
// Check that all desired capabilities are supported.
match capabilities
.iter()
.find(|&capability| !kvm.check_extension(*capability))
{
None => {
let max_memslots = kvm.get_nr_memslots();
Ok(KvmContext { kvm, max_memslots })
}
Some(c) => Err(Error::KvmCap(*c)),
}
}
pub fn fd(&self) -> &Kvm {
&self.kvm
}
/// Get the maximum number of memory slots reported by this KVM context.
fn max_memslots(&self) -> usize {
self.max_memslots
}
}
/// A wrapper around creating and using a VM.
pub struct Vm {
fd: VmFd,
guest_mem: Option<GuestMemory>,
// X86 specific fields.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
supported_cpuid: CpuId,
// Arm specific fields.
// On aarch64 we need to keep around the fd obtained by creating the VGIC device.
#[cfg(target_arch = "aarch64")]
irqchip_handle: Option<Box<dyn GICDevice>>,
}
impl Vm {
/// Constructs a new `Vm` using the given `Kvm` instance.
pub fn new(kvm: &Kvm) -> Result<Self> {
//create fd for interacting with kvm-vm specific functions
let vm_fd = kvm.create_vm().map_err(Error::VmFd)?;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
let supported_cpuid = kvm
.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES)
.map_err(Error::VmFd)?;
Ok(Vm {
fd: vm_fd,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
supported_cpuid,
guest_mem: None,
#[cfg(target_arch = "aarch64")]
irqchip_handle: None,
})
}
/// Returns a ref to the supported `CpuId` for this Vm.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn supported_cpuid(&self) -> &CpuId {
&self.supported_cpuid
}
/// Initializes the guest memory.
pub fn memory_init(&mut self, guest_mem: GuestMemory, kvm_context: &KvmContext) -> Result<()> {
if guest_mem.num_regions() > kvm_context.max_memslots() {
return Err(Error::NotEnoughMemorySlots);
}
guest_mem
.with_regions(|index, guest_addr, size, host_addr| {
info!("Guest memory starts at {:x?}", host_addr);
let flags = if LOGGER.flags() & LogOption::LogDirtyPages as usize > 0 {
KVM_MEM_LOG_DIRTY_PAGES
} else {
0
};
let memory_region = kvm_userspace_memory_region {
slot: index as u32,
guest_phys_addr: guest_addr.raw_value() as u64,
memory_size: size as u64,
userspace_addr: host_addr as u64,
flags,
};
// Safe because we mapped the memory region, we made sure that the regions
// are not overlapping.
unsafe { self.fd.set_user_memory_region(memory_region) }
})
.map_err(Error::SetUserMemoryRegion)?;
self.guest_mem = Some(guest_mem);
#[cfg(target_arch = "x86_64")]
self.fd
.set_tss_address(arch::x86_64::layout::KVM_TSS_ADDRESS as usize)
.map_err(Error::VmSetup)?;
Ok(())
}
/// Creates the irq chip and an in-kernel device model for the PIT.
#[cfg(target_arch = "x86_64")]
pub fn setup_irqchip(&self) -> Result<()> {
self.fd.create_irq_chip().map_err(Error::VmSetup)?;
let mut pit_config = kvm_pit_config::default();
// We need to enable the emulation of a dummy speaker port stub so that writing to port 0x61
// (i.e. KVM_SPEAKER_BASE_ADDRESS) does not trigger an exit to user space.
pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
self.fd.create_pit2(pit_config).map_err(Error::VmSetup)
}
/// Creates the GIC (Global Interrupt Controller).
#[cfg(target_arch = "aarch64")]
pub fn setup_irqchip(&mut self, vcpu_count: u8) -> Result<()> {
self.irqchip_handle = Some(
arch::aarch64::gic::create_gic(&self.fd, vcpu_count.into()).map_err(Error::SetupGIC)?,
);
Ok(())
}
/// Gets a reference to the irqchip of the VM
#[cfg(target_arch = "aarch64")]
pub fn get_irqchip(&self) -> &Box<dyn GICDevice> {
&self.irqchip_handle.as_ref().unwrap()
}
/// Gets a reference to the guest memory owned by this VM.
///
/// Note that `GuestMemory` does not include any device memory that may have been added after
/// this VM was constructed.
pub fn memory(&self) -> Option<&GuestMemory> {
self.guest_mem.as_ref()
}
/// Gets a reference to the kvm file descriptor owned by this VM.
pub fn fd(&self) -> &VmFd {
&self.fd
}
}
/// A wrapper around creating and using a kvm-based VCPU.
pub struct Vcpu {
#[cfg(target_arch = "x86_64")]
cpuid: CpuId,
fd: VcpuFd,
id: u8,
#[cfg(target_arch = "x86_64")]
io_bus: devices::Bus,
mmio_bus: Option<devices::Bus>,
create_ts: TimestampUs,
#[cfg(target_arch = "aarch64")]
mpidr: u64,
}
impl Vcpu {
/// Constructs a new VCPU for `vm`.
///
/// # Arguments
///
/// * `id` - Represents the CPU number between [0, max vcpus).
/// * `vm_fd` - The kvm `VmFd` for the virtual machine this vcpu will get attached to.
/// * `cpuid` - The `CpuId` listing the supported capabilities of this vcpu.
/// * `io_bus` - The io-bus used to access port-io devices.
/// * `create_ts` - A timestamp used by the vcpu to calculate its lifetime.
#[cfg(target_arch = "x86_64")]
pub fn new_x86_64(
id: u8,
vm_fd: &VmFd,
cpuid: CpuId,
io_bus: devices::Bus,
create_ts: TimestampUs,
) -> Result<Self> {
let kvm_vcpu = vm_fd.create_vcpu(id).map_err(Error::VcpuFd)?;
// Initially the cpuid per vCPU is the one supported by this VM.
Ok(Vcpu {
cpuid,
fd: kvm_vcpu,
id,
io_bus,
mmio_bus: None,
create_ts,
})
}
/// Constructs a new VCPU for `vm`.
///
/// # Arguments
///
/// * `id` - Represents the CPU number between [0, max vcpus).
/// * `vm_fd` - The kvm `VmFd` for the virtual machine this vcpu will get attached to.
/// * `create_ts` - A timestamp used by the vcpu to calculate its lifetime.
#[cfg(target_arch = "aarch64")]
pub fn new_aarch64(id: u8, vm_fd: &VmFd, create_ts: TimestampUs) -> Result<Self> {
let kvm_vcpu = vm_fd.create_vcpu(id).map_err(Error::VcpuFd)?;
Ok(Vcpu {
fd: kvm_vcpu,
id,
mmio_bus: None,
create_ts,
mpidr: 0,
})
}
/// Gets the MPIDR register value.
#[cfg(target_arch = "aarch64")]
pub fn get_mpidr(&self) -> u64 {
self.mpidr
}
/// Sets a MMIO bus for this vcpu.
pub fn set_mmio_bus(&mut self, mmio_bus: devices::Bus) {
self.mmio_bus = Some(mmio_bus);
}
#[cfg(target_arch = "x86_64")]
/// Configures a x86_64 specific vcpu and should be called once per vcpu.
///
/// # Arguments
///
/// * `machine_config` - The machine configuration of this microvm needed for the CPUID configuration.
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_start_addr` - Offset from `guest_mem` at which the kernel starts.
pub fn configure_x86_64(
&mut self,
machine_config: &VmConfig,
guest_mem: &GuestMemory,
kernel_start_addr: GuestAddress,
) -> Result<()> {
let cpuid_vm_spec = VmSpec::new(
self.id,
machine_config
.vcpu_count
.ok_or(Error::VcpuCountNotInitialized)?,
machine_config.ht_enabled.ok_or(Error::HTNotInitialized)?,
)
.map_err(Error::CpuId)?;
filter_cpuid(&mut self.cpuid, &cpuid_vm_spec).map_err(|e| {
METRICS.vcpu.filter_cpuid.inc();
error!("Failure in configuring CPUID for vcpu {}: {:?}", self.id, e);
Error::CpuId(e)
})?;
if let Some(template) = machine_config.cpu_template {
match template {
CpuFeaturesTemplate::T2 => {
t2::set_cpuid_entries(&mut self.cpuid, &cpuid_vm_spec).map_err(Error::CpuId)?
}
CpuFeaturesTemplate::C3 => {
c3::set_cpuid_entries(&mut self.cpuid, &cpuid_vm_spec).map_err(Error::CpuId)?
}
}
}
self.fd
.set_cpuid2(&self.cpuid)
.map_err(Error::SetSupportedCpusFailed)?;
arch::x86_64::regs::setup_msrs(&self.fd).map_err(Error::MSRSConfiguration)?;
arch::x86_64::regs::setup_regs(&self.fd, kernel_start_addr.raw_value() as u64)
.map_err(Error::REGSConfiguration)?;
arch::x86_64::regs::setup_fpu(&self.fd).map_err(Error::FPUConfiguration)?;
arch::x86_64::regs::setup_sregs(guest_mem, &self.fd).map_err(Error::SREGSConfiguration)?;
arch::x86_64::interrupts::set_lint(&self.fd).map_err(Error::LocalIntConfiguration)?;
Ok(())
}
#[cfg(target_arch = "aarch64")]
/// Configures an aarch64 specific vcpu.
///
/// # Arguments
///
/// * `vm_fd` - The kvm `VmFd` for this microvm.
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_load_addr` - Offset from `guest_mem` at which the kernel is loaded.
pub fn configure_aarch64(
&mut self,
vm_fd: &VmFd,
guest_mem: &GuestMemory,
kernel_load_addr: GuestAddress,
) -> Result<()> {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm_fd
.get_preferred_target(&mut kvi)
.map_err(Error::VcpuArmPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PSCI_0_2;
// Non-boot cpus are powered off initially.
if self.id > 0 {
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_POWER_OFF;
}
self.fd.vcpu_init(&kvi).map_err(Error::VcpuArmInit)?;
arch::aarch64::regs::setup_regs(&self.fd, self.id, kernel_load_addr.raw_value(), guest_mem)
.map_err(Error::REGSConfiguration)?;
self.mpidr = arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::REGSConfiguration)?;
Ok(())
}
fn check_boot_complete_signal(&self, addr: u64, data: &[u8]) {
if addr == MAGIC_IOPORT_SIGNAL_GUEST_BOOT_COMPLETE
&& data[0] == MAGIC_VALUE_SIGNAL_GUEST_BOOT_COMPLETE
{
super::Vmm::log_boot_time(&self.create_ts);
}
}
fn run_emulation(&mut self) -> Result<()> {
match self.fd.run() {
Ok(run) => match run {
#[cfg(target_arch = "x86_64")]
VcpuExit::IoIn(addr, data) => {
self.io_bus.read(u64::from(addr), data);
METRICS.vcpu.exit_io_in.inc();
Ok(())
}
#[cfg(target_arch = "x86_64")]
VcpuExit::IoOut(addr, data) => {
self.check_boot_complete_signal(u64::from(addr), data);
self.io_bus.write(u64::from(addr), data);
METRICS.vcpu.exit_io_out.inc();
Ok(())
}
VcpuExit::MmioRead(addr, data) => {
if let Some(ref mmio_bus) = self.mmio_bus {
mmio_bus.read(addr, data);
METRICS.vcpu.exit_mmio_read.inc();
}
Ok(())
}
VcpuExit::MmioWrite(addr, data) => {
if let Some(ref mmio_bus) = self.mmio_bus {
#[cfg(target_arch = "aarch64")]
self.check_boot_complete_signal(addr, data);
mmio_bus.write(addr, data);
METRICS.vcpu.exit_mmio_write.inc();
}
Ok(())
}
VcpuExit::Hlt => {
info!("Received KVM_EXIT_HLT signal");
Err(Error::VcpuUnhandledKvmExit)
}
VcpuExit::Shutdown => {
info!("Received KVM_EXIT_SHUTDOWN signal");
Err(Error::VcpuUnhandledKvmExit)
}
// Documentation specifies that below kvm exits are considered
// errors.
VcpuExit::FailEntry => {
METRICS.vcpu.failures.inc();
error!("Received KVM_EXIT_FAIL_ENTRY signal");
Err(Error::VcpuUnhandledKvmExit)
}
VcpuExit::InternalError => {
METRICS.vcpu.failures.inc();
error!("Received KVM_EXIT_INTERNAL_ERROR signal");
Err(Error::VcpuUnhandledKvmExit)
}
r => {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", r);
Err(Error::VcpuUnhandledKvmExit)
}
},
// The unwrap on raw_os_error can only fail if we have a logic
// error in our code in which case it is better to panic.
Err(ref e) => {
match e.raw_os_error().unwrap() {
// Why do we check for these if we only return EINVAL?
libc::EAGAIN | libc::EINTR => Ok(()),
_ => {
METRICS.vcpu.failures.inc();
error!("Failure during vcpu run: {}", e);
Err(Error::VcpuUnhandledKvmExit)
}
}
}
}
}
/// Main loop of the vCPU thread.
///
/// Runs the vCPU in KVM context in a loop. Handles KVM_EXITs then goes back in.
/// Note that the state of the VCPU and associated VM must be setup first for this to do
/// anything useful.
pub fn run(
&mut self,
thread_barrier: Arc<Barrier>,
seccomp_level: u32,
vcpu_exit_evt: EventFd,
) {
// Load seccomp filters for this vCPU thread.
// Execution panics if filters cannot be loaded, use --seccomp-level=0 if skipping filters
// altogether is the desired behaviour.
if let Err(e) = default_syscalls::set_seccomp_level(seccomp_level) {
panic!(
"Failed to set the requested seccomp filters on vCPU {}: Error: {}",
self.id, e
);
}
thread_barrier.wait();
while self.run_emulation().is_ok() {}
// Nothing we need do for the success case.
if let Err(e) = vcpu_exit_evt.write(1) {
METRICS.vcpu.failures.inc();
error!("Failed signaling vcpu exit event: {}", e);
}
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use super::super::devices;
use super::*;
// Auxiliary function being used throughout the tests.
fn setup_vcpu() -> (Vm, Vcpu) {
let kvm = KvmContext::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let mut vm = Vm::new(kvm.fd()).expect("Cannot create new vm");
assert!(vm.memory_init(gm, &kvm).is_ok());
let vcpu;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
vm.setup_irqchip().unwrap();
vcpu = Vcpu::new_x86_64(
1,
vm.fd(),
vm.supported_cpuid().clone(),
devices::Bus::new(),
super::super::TimestampUs::default(),
)
.unwrap();
}
#[cfg(target_arch = "aarch64")]
{
vcpu = Vcpu::new_aarch64(1, vm.fd(), super::super::TimestampUs::default()).unwrap();
vm.setup_irqchip(1).expect("Cannot setup irqchip");
}
(vm, vcpu)
}
#[test]
fn test_set_mmio_bus() {
let (_, mut vcpu) = setup_vcpu();
assert!(vcpu.mmio_bus.is_none());
vcpu.set_mmio_bus(devices::Bus::new());
assert!(vcpu.mmio_bus.is_some());
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn test_get_supported_cpuid() {
let kvm = KvmContext::new().unwrap();
let vm = Vm::new(kvm.fd()).expect("Cannot create new vm");
let cpuid = kvm
.kvm
.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES)
.expect("Cannot get supported cpuid");
assert_eq!(vm.supported_cpuid().as_slice(), cpuid.as_slice());
}
#[test]
fn test_vm_memory_init() {
let mut kvm_context = KvmContext::new().unwrap();
let mut vm = Vm::new(kvm_context.fd()).expect("Cannot create new vm");
// Create valid memory region and test that the initialization is successful.
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
assert!(vm.memory_init(gm, &kvm_context).is_ok());
// Set the maximum number of memory slots to 1 in KvmContext to check the error
// path of memory_init. Create 2 non-overlapping memory slots.
kvm_context.max_memslots = 1;
let gm = GuestMemory::new(&[(GuestAddress(0x0), 0x1000), (GuestAddress(0x1001), 0x2000)])
.unwrap();
assert!(vm.memory_init(gm, &kvm_context).is_err());
}
#[cfg(target_arch = "x86_64")]
#[test]
fn test_setup_irqchip() {
let kvm_context = KvmContext::new().unwrap();
let vm = Vm::new(kvm_context.fd()).expect("Cannot create new vm");
vm.setup_irqchip().expect("Cannot setup irqchip");
// Trying to setup two irqchips will result in EEXIST error. At the moment
// there is no good way of testing the actual error because io::Error does not implement
// PartialEq.
assert!(vm.setup_irqchip().is_err());
let _vcpu = Vcpu::new_x86_64(
1,
vm.fd(),
vm.supported_cpuid().clone(),
devices::Bus::new(),
super::super::TimestampUs::default(),
)
.unwrap();
// Trying to setup irqchip after KVM_VCPU_CREATE was called will result in error.
assert!(vm.setup_irqchip().is_err());
}
#[cfg(target_arch = "aarch64")]
#[test]
fn test_setup_irqchip() {
let kvm = KvmContext::new().unwrap();
let mut vm = Vm::new(kvm.fd()).expect("Cannot create new vm");
let vcpu_count = 1;
let _vcpu = Vcpu::new_aarch64(1, vm.fd(), super::super::TimestampUs::default()).unwrap();
vm.setup_irqchip(vcpu_count).expect("Cannot setup irqchip");
// Trying to setup two irqchips will result in EEXIST error.
assert!(vm.setup_irqchip(vcpu_count).is_err());
}
#[cfg(target_arch = "x86_64")]
#[test]
fn test_configure_vcpu() {
let (vm, mut vcpu) = setup_vcpu();
let vm_config = VmConfig::default();
let vm_mem = vm.memory().unwrap();
assert!(vcpu
.configure_x86_64(&vm_config, vm_mem, GuestAddress(0))
.is_ok());
// Test configure while using the T2 template.
let mut vm_config = VmConfig::default();
vm_config.cpu_template = Some(CpuFeaturesTemplate::T2);
assert!(vcpu
.configure_x86_64(&vm_config, vm_mem, GuestAddress(0))
.is_ok());
// Test configure while using the C3 template.
let mut vm_config = VmConfig::default();
vm_config.cpu_template = Some(CpuFeaturesTemplate::C3);
assert!(vcpu
.configure_x86_64(&vm_config, vm_mem, GuestAddress(0))
.is_ok());
}
#[cfg(target_arch = "aarch64")]
#[test]
fn
|
() {
let kvm = KvmContext::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let mut vm = Vm::new(kvm.fd()).expect("new vm failed");
assert!(vm.memory_init(gm, &kvm).is_ok());
let vm_mem = vm.memory().unwrap();
// Try it for when vcpu id is 0.
let mut vcpu = Vcpu::new_aarch64(0, vm.fd(), super::super::TimestampUs::default()).unwrap();
let vm_config = VmConfig::default();
assert!(vcpu
.configure_aarch64(vm.fd(), vm_mem, GuestAddress(0))
.is_ok());
// Try it for when vcpu id is NOT 0.
let mut vcpu = Vcpu::new_aarch64(1, vm.fd(), super::super::TimestampUs::default()).unwrap();
assert!(vcpu
.configure_aarch64(vm.fd(), vm_mem, GuestAddress(0))
.is_ok());
}
#[test]
#[should_panic]
fn test_vcpu_run_failed() {
let (_, mut vcpu) = setup_vcpu();
// Setting an invalid seccomp level should panic.
vcpu.run(
Arc::new(Barrier::new(1)),
seccomp::SECCOMP_LEVEL_ADVANCED + 10,
EventFd::new(libc::EFD_NONBLOCK).unwrap(),
);
}
#[test]
fn test_kvm_context() {
use std::os::unix::fs::MetadataExt;
use std::os::unix::io::{AsRawFd, FromRawFd};
let c = KvmContext::new().unwrap();
assert!(c.max_memslots >= 32);
let kvm = Kvm::new().unwrap();
let f = unsafe { File::from_raw_fd(kvm.as_raw_fd()) };
let m1 = f.metadata().unwrap();
let m2 = File::open("/dev/kvm").unwrap().metadata().unwrap();
assert_eq!(m1.dev(), m2.dev());
assert_eq!(m1.ino(), m2.ino());
}
}
|
test_configure_vcpu
|
udidauth.go
|
package device
import (
"context"
"crypto/sha256"
"crypto/subtle"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/micromdm/micromdm/mdm"
)
type UDIDCertAuthStore interface {
SaveUDIDCertHash(udid, certHash []byte) error
GetUDIDCertHash(udid []byte) ([]byte, error)
}
func
|
(store UDIDCertAuthStore, logger log.Logger) mdm.Middleware {
return func(next mdm.Service) mdm.Service {
return &udidCertAuthMiddleware{
store: store,
next: next,
logger: logger,
}
}
}
type udidCertAuthMiddleware struct {
store UDIDCertAuthStore
next mdm.Service
logger log.Logger
}
func hashCertRaw(c []byte) []byte {
retBytes := make([]byte, 32)
sum := sha256.Sum256(c)
copy(retBytes, sum[:])
return retBytes
}
func (mw *udidCertAuthMiddleware) validateUDIDCertAuth(udid, certHash []byte) (bool, error) {
dbCertHash, err := mw.store.GetUDIDCertHash(udid)
if err != nil && !isNotFound(err) {
return false, err
} else if err != nil && isNotFound(err) {
// TODO: we did not find any UDID at all. assume (but log) that
// this device already existed/was enrolled and we need to store
// its UDID-cert association. at some later late, when most/all
// micromdm instances have stored udid-cert associations
// this can be an outright failure.
level.Info(mw.logger).Log("msg", "device cert hash not found, saving anyway", "udid", string(udid))
if err := mw.store.SaveUDIDCertHash(udid, certHash); err != nil {
return false, err
}
return true, nil
}
if 1 != subtle.ConstantTimeCompare(certHash, dbCertHash) {
level.Info(mw.logger).Log("msg", "device cert hash mismatch", "udid", string(udid))
return false, nil
}
return true, nil
}
func (mw *udidCertAuthMiddleware) Acknowledge(ctx context.Context, req mdm.AcknowledgeEvent) ([]byte, error) {
devcert, err := mdm.DeviceCertificateFromContext(ctx)
if err != nil {
return nil, errors.Wrap(err, "error retrieving device certificate")
}
matched, err := mw.validateUDIDCertAuth([]byte(req.Response.UDID), hashCertRaw(devcert.Raw))
if err != nil {
return nil, err
}
if !matched {
return nil, errors.New("device certifcate UDID mismatch")
}
return mw.next.Acknowledge(ctx, req)
}
func (mw *udidCertAuthMiddleware) Checkin(ctx context.Context, req mdm.CheckinEvent) error {
devcert, err := mdm.DeviceCertificateFromContext(ctx)
if err != nil {
return errors.Wrap(err, "error retrieving device certificate")
}
switch req.Command.MessageType {
case "Authenticate":
// unconditionally save the cert hash on Authenticate message
if err := mw.store.SaveUDIDCertHash([]byte(req.Command.UDID), hashCertRaw(devcert.Raw)); err != nil {
return err
}
return mw.next.Checkin(ctx, req)
case "TokenUpdate", "CheckOut":
matched, err := mw.validateUDIDCertAuth([]byte(req.Command.UDID), hashCertRaw(devcert.Raw))
if err != nil {
return err
}
if !matched {
return errors.New("device certifcate UDID mismatch")
}
return mw.next.Checkin(ctx, req)
default:
return errors.Errorf("unknown checkin message type %s", req.Command.MessageType)
}
}
|
UDIDCertAuthMiddleware
|
utils.py
|
import pytest
import os
import sys
import json
from click.testing import CliRunner
from ...cli.main import cli
from ...core.project import Project
remotetest = pytest.mark.skipif('TEST_DSBFILE' not in os.environ,
reason="Environment variable 'TEST_DSBFILE' is required")
def get_test_project():
dsbfile = os.environ['TEST_DSBFILE']
return Project.from_file(dsbfile)
def invoke(*args):
dsbfile = os.environ['TEST_DSBFILE']
args = list(args)
args.extend(['--file', dsbfile])
runner = CliRunner()
return runner.invoke(cli, args, catch_exceptions=False, input=sys.stdin)
def check_all_true(salt_output, none_is_ok=False):
minions = []
for minion_output in salt_output.split('\n'):
minions.append(json.loads(minion_output))
for minion in minions:
minion_values = minion.values()[0]
for id_, value in minion_values.items():
|
def check_all_cmd_retcode0(salt_output):
minions = []
for minion_output in salt_output.split('\n'):
minions.append(json.loads(minion_output))
for minion in minions:
minion_output = minion.values()[0]
assert minion_output['retcode'] == 0, (minion_output)
|
if none_is_ok:
assert value['result'] is not False, (id_, value)
else:
assert value['result'] is True, (id_, value)
|
meee.py
|
## adapted from https://github.com/rail-berkeley/softlearning/blob/master/softlearning/algorithms/sac.py
import os
import math
import pickle
from collections import OrderedDict
from numbers import Number
from itertools import count
import gtimer as gt
import pdb
import numpy as np
import tensorflow as tf
from tensorflow.python.training import training_util
from softlearning.algorithms.rl_algorithm import RLAlgorithm
from softlearning.replay_pools.simple_replay_pool import WeightedReplayPool
from mbpo.models.constructor import construct_model, format_samples_for_training
from mbpo.models.fake_env import FakeEnv
from mbpo.utils.writer import Writer
from mbpo.utils.visualization import visualize_policy
from mbpo.utils.logging import Progress
import mbpo.utils.filesystem as filesystem
def td_target(reward, discount, next_value):
return reward + discount * next_value
class MEEE(RLAlgorithm):
""" Model-Ensemble Policy Optimization (MEEE)
"""
def __init__(
self,
training_environment,
evaluation_environment,
policy,
Qs,
pool,
static_fns,
plotter=None,
tf_summaries=False,
lr=3e-4,
reward_scale=1.0,
target_entropy='auto',
discount=0.99,
tau=5e-3,
target_update_interval=1,
action_prior='uniform',
reparameterize=False,
store_extra_policy_info=False,
deterministic=False,
model_train_freq=250,
num_networks=7,
num_elites=5,
model_retain_epochs=20,
rollout_batch_size=100e3,
real_ratio=0.1,
rollout_schedule=[20,100,1,1],
hidden_dim=200,
max_model_t=None,
**kwargs,
):
"""
Args:
env (`SoftlearningEnv`): Environment used for training.
policy: A policy function approximator.
initial_exploration_policy: ('Policy'): A policy that we use
for initial exploration which is not trained by the algorithm.
Qs: Q-function approximators. The min of these
approximators will be used. Usage of at least two Q-functions
improves performance by reducing overestimation bias.
pool (`PoolBase`): Replay pool to add gathered samples to.
plotter (`QFPolicyPlotter`): Plotter instance to be used for
visualizing Q-function during training.
lr (`float`): Learning rate used for the function approximators.
discount (`float`): Discount factor for Q-function updates.
tau (`float`): Soft value function target update weight.
target_update_interval ('int'): Frequency at which target network
updates occur in iterations.
reparameterize ('bool'): If True, we use a gradient estimator for
the policy derived using the reparameterization trick. We use
a likelihood ratio based estimator otherwise.
"""
super(MEEE, self).__init__(**kwargs)
obs_dim = np.prod(training_environment.observation_space.shape)
act_dim = np.prod(training_environment.action_space.shape)
self._model = construct_model(obs_dim=obs_dim, act_dim=act_dim, hidden_dim=hidden_dim, num_networks=num_networks, num_elites=num_elites)
self._static_fns = static_fns
self.fake_env = FakeEnv(self._model, self._static_fns)
self._rollout_schedule = rollout_schedule
self._max_model_t = max_model_t
# self._model_pool_size = model_pool_size
# print('[ MBPO ] Model pool size: {:.2E}'.format(self._model_pool_size))
# self._model_pool = WeightedReplayPool(pool._observation_space, pool._action_space, self._model_pool_size)
self._model_retain_epochs = model_retain_epochs
self._model_train_freq = model_train_freq
self._rollout_batch_size = int(rollout_batch_size)
self._deterministic = deterministic
self._real_ratio = real_ratio
self._log_dir = os.getcwd()
self._writer = Writer(self._log_dir)
self._training_environment = training_environment
self._evaluation_environment = evaluation_environment
self._policy = policy
self._Qs = Qs
self._Q_targets = tuple(tf.keras.models.clone_model(Q) for Q in Qs)
self._pool = pool
self._plotter = plotter
self._tf_summaries = tf_summaries
self._policy_lr = lr
self._Q_lr = lr
self._reward_scale = reward_scale
self._target_entropy = (
-np.prod(self._training_environment.action_space.shape)
if target_entropy == 'auto'
else target_entropy)
print('[ MEEE ] Target entropy: {}'.format(self._target_entropy))
self._discount = discount
self._tau = tau
self._target_update_interval = target_update_interval
self._action_prior = action_prior
self._reparameterize = reparameterize
self._store_extra_policy_info = store_extra_policy_info
observation_shape = self._training_environment.active_observation_shape
action_shape = self._training_environment.action_space.shape
assert len(observation_shape) == 1, observation_shape
self._observation_shape = observation_shape
assert len(action_shape) == 1, action_shape
self._action_shape = action_shape
self._build()
def _build(self):
|
def _train(self):
"""Return a generator that performs RL training.
Args:
env (`SoftlearningEnv`): Environment used for training.
policy (`Policy`): Policy used for training
initial_exploration_policy ('Policy'): Policy used for exploration
If None, then all exploration is done using policy
pool (`PoolBase`): Sample pool to add samples to
"""
training_environment = self._training_environment
evaluation_environment = self._evaluation_environment
policy = self._policy
pool = self._pool
model_metrics = {}
if not self._training_started:
self._init_training()
self._initial_exploration_hook(
training_environment, self._initial_exploration_policy, pool)
self.sampler.initialize(training_environment, policy, pool)
gt.reset_root()
gt.rename_root('RLAlgorithm')
gt.set_def_unique(False)
self._training_before_hook()
for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):
self._epoch_before_hook()
gt.stamp('epoch_before_hook')
self._training_progress = Progress(self._epoch_length * self._n_train_repeat)
start_samples = self.sampler._total_samples
for i in count():
samples_now = self.sampler._total_samples
self._timestep = samples_now - start_samples
if (samples_now >= start_samples + self._epoch_length
and self.ready_to_train):
break
self._timestep_before_hook()
gt.stamp('timestep_before_hook')
if self._timestep % self._model_train_freq == 0 and self._real_ratio < 1.0:
self._training_progress.pause()
print('[ MEEE ] log_dir: {} | ratio: {}'.format(self._log_dir, self._real_ratio))
print('[ MEEE ] Training model at epoch {} | freq {} | timestep {} (total: {}) | epoch train steps: {} (total: {})'.format(
self._epoch, self._model_train_freq, self._timestep, self._total_timestep, self._train_steps_this_epoch, self._num_train_steps)
)
model_train_metrics = self._train_model(batch_size=256, max_epochs=None, holdout_ratio=0.2, max_t=self._max_model_t)
model_metrics.update(model_train_metrics)
gt.stamp('epoch_train_model')
self._set_rollout_length()
self._reallocate_model_pool()
model_rollout_metrics = self._rollout_model(rollout_batch_size=self._rollout_batch_size, deterministic=self._deterministic)
model_metrics.update(model_rollout_metrics)
gt.stamp('epoch_rollout_model')
# self._visualize_model(self._evaluation_environment, self._total_timestep)
self._training_progress.resume()
# No UCB exploration
#self._do_sampling(timestep=self._total_timestep)
self._do_sampling(timestep=self._total_timestep, disturb=True, fake_env=self.fake_env, Qs = self._Qs)
#print("**exploration**")
gt.stamp('sample')
if self.ready_to_train:
self._do_training_repeats(timestep=self._total_timestep)
gt.stamp('train')
self._timestep_after_hook()
gt.stamp('timestep_after_hook')
training_paths = self.sampler.get_last_n_paths(
math.ceil(self._epoch_length / self.sampler._max_path_length))
gt.stamp('training_paths')
evaluation_paths = self._evaluation_paths(
policy, evaluation_environment)
gt.stamp('evaluation_paths')
training_metrics = self._evaluate_rollouts(
training_paths, training_environment)
gt.stamp('training_metrics')
if evaluation_paths:
evaluation_metrics = self._evaluate_rollouts(
evaluation_paths, evaluation_environment)
gt.stamp('evaluation_metrics')
else:
evaluation_metrics = {}
self._epoch_after_hook(training_paths)
gt.stamp('epoch_after_hook')
sampler_diagnostics = self.sampler.get_diagnostics()
diagnostics = self.get_diagnostics(
iteration=self._total_timestep,
batch=self._evaluation_batch(),
training_paths=training_paths,
evaluation_paths=evaluation_paths)
time_diagnostics = gt.get_times().stamps.itrs
diagnostics.update(OrderedDict((
*(
(f'evaluation/{key}', evaluation_metrics[key])
for key in sorted(evaluation_metrics.keys())
),
*(
(f'training/{key}', training_metrics[key])
for key in sorted(training_metrics.keys())
),
*(
(f'times/{key}', time_diagnostics[key][-1])
for key in sorted(time_diagnostics.keys())
),
*(
(f'sampler/{key}', sampler_diagnostics[key])
for key in sorted(sampler_diagnostics.keys())
),
*(
(f'model/{key}', model_metrics[key])
for key in sorted(model_metrics.keys())
),
('epoch', self._epoch),
('timestep', self._timestep),
('timesteps_total', self._total_timestep),
('train-steps', self._num_train_steps),
)))
if self._eval_render_mode is not None and hasattr(
evaluation_environment, 'render_rollouts'):
training_environment.render_rollouts(evaluation_paths)
yield diagnostics
self.sampler.terminate()
self._training_after_hook()
self._training_progress.close()
yield {'done': True, **diagnostics}
def train(self, *args, **kwargs):
return self._train(*args, **kwargs)
def _log_policy(self):
save_path = os.path.join(self._log_dir, 'models')
filesystem.mkdir(save_path)
weights = self._policy.get_weights()
data = {'policy_weights': weights}
full_path = os.path.join(save_path, 'policy_{}.pkl'.format(self._total_timestep))
print('Saving policy to: {}'.format(full_path))
pickle.dump(data, open(full_path, 'wb'))
def _log_model(self):
save_path = os.path.join(self._log_dir, 'models')
filesystem.mkdir(save_path)
print('Saving model to: {}'.format(save_path))
self._model.save(save_path, self._total_timestep)
def _set_rollout_length(self):
min_epoch, max_epoch, min_length, max_length = self._rollout_schedule
if self._epoch <= min_epoch:
y = min_length
else:
dx = (self._epoch - min_epoch) / (max_epoch - min_epoch)
dx = min(dx, 1)
y = dx * (max_length - min_length) + min_length
self._rollout_length = int(y)
print('[ Model Length ] Epoch: {} (min: {}, max: {}) | Length: {} (min: {} , max: {})'.format(
self._epoch, min_epoch, max_epoch, self._rollout_length, min_length, max_length
))
def _reallocate_model_pool(self):
obs_space = self._pool._observation_space
act_space = self._pool._action_space
rollouts_per_epoch = self._rollout_batch_size * self._epoch_length / self._model_train_freq
model_steps_per_epoch = int(self._rollout_length * rollouts_per_epoch)
new_pool_size = self._model_retain_epochs * model_steps_per_epoch
if not hasattr(self, '_model_pool'):
print('[ MEEE ] Initializing new model pool with size {:.2e}'.format(
new_pool_size
))
self._model_pool = WeightedReplayPool(obs_space, act_space, new_pool_size)
elif self._model_pool._max_size != new_pool_size:
print('[ MEEE ] Updating model pool | {:.2e} --> {:.2e}'.format(
self._model_pool._max_size, new_pool_size
))
samples = self._model_pool.return_all_samples()
new_pool = WeightedReplayPool(obs_space, act_space, new_pool_size)
new_pool.add_samples(samples)
assert self._model_pool.size == new_pool.size
self._model_pool = new_pool
def _train_model(self, **kwargs):
env_samples = self._pool.return_all_samples()
train_inputs, train_outputs = format_samples_for_training(env_samples)
model_metrics = self._model.train(train_inputs, train_outputs, **kwargs)
return model_metrics
def _rollout_model(self, rollout_batch_size, **kwargs):
print('[ Model Rollout ] Starting | Epoch: {} | Rollout length: {} | Batch size: {}'.format(
self._epoch, self._rollout_length, rollout_batch_size
))
batch = self.sampler.random_batch(rollout_batch_size)
obs = batch['observations']
steps_added = []
for i in range(self._rollout_length):
act = self._policy.actions_np(obs)
next_obs, rew, term, info = self.fake_env.step(obs, act, **kwargs)
steps_added.append(len(obs))
samples = {'observations': obs, 'actions': act, 'next_observations': next_obs, 'rewards': rew, 'terminals': term, 'stds': info['dev'][:,None]}
self._model_pool.add_samples(samples)
nonterm_mask = ~term.squeeze(-1)
if nonterm_mask.sum() == 0:
print('[ Model Rollout ] Breaking early: {} | {} / {}'.format(i, nonterm_mask.sum(), nonterm_mask.shape))
break
obs = next_obs[nonterm_mask]
mean_rollout_length = sum(steps_added) / rollout_batch_size
rollout_stats = {'mean_rollout_length': mean_rollout_length}
print('[ Model Rollout ] Added: {:.1e} | Model pool: {:.1e} (max {:.1e}) | Length: {} | Train rep: {}'.format(
sum(steps_added), self._model_pool.size, self._model_pool._max_size, mean_rollout_length, self._n_train_repeat
))
return rollout_stats
def _visualize_model(self, env, timestep):
## save env state
state = env.unwrapped.state_vector()
qpos_dim = len(env.unwrapped.sim.data.qpos)
qpos = state[:qpos_dim]
qvel = state[qpos_dim:]
print('[ Visualization ] Starting | Epoch {} | Log dir: {}\n'.format(self._epoch, self._log_dir))
visualize_policy(env, self.fake_env, self._policy, self._writer, timestep)
print('[ Visualization ] Done')
## set env state
env.unwrapped.set_state(qpos, qvel)
def _training_batch(self, batch_size=None):
batch_size = batch_size or self.sampler._batch_size
env_batch_size = int(batch_size*self._real_ratio)
model_batch_size = batch_size - env_batch_size
## can sample from the env pool even if env_batch_size == 0
env_batch = self._pool.random_batch(env_batch_size)
if model_batch_size > 0:
model_batch = self._model_pool.random_batch(model_batch_size)
keys = env_batch.keys()
batch = {k: np.concatenate((env_batch[k], model_batch[k]), axis=0) for k in keys}
else:
## if real_ratio == 1.0, no model pool was ever allocated,
## so skip the model pool sampling
batch = env_batch
return batch
def _init_global_step(self):
self.global_step = training_util.get_or_create_global_step()
self._training_ops.update({
'increment_global_step': training_util._increment_global_step(1)
})
def _init_placeholders(self):
"""Create input placeholders for the SAC algorithm.
Creates `tf.placeholder`s for:
- observation
- next observation
- action
- reward
- terminals
- stds
"""
self._iteration_ph = tf.placeholder(
tf.int64, shape=None, name='iteration')
self._observations_ph = tf.placeholder(
tf.float32,
shape=(None, *self._observation_shape),
name='observation',
)
self._next_observations_ph = tf.placeholder(
tf.float32,
shape=(None, *self._observation_shape),
name='next_observation',
)
self._actions_ph = tf.placeholder(
tf.float32,
shape=(None, *self._action_shape),
name='actions',
)
self._rewards_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='rewards',
)
self._stds_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='stds',
)
self._terminals_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='terminals',
)
if self._store_extra_policy_info:
self._log_pis_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='log_pis',
)
self._raw_actions_ph = tf.placeholder(
tf.float32,
shape=(None, *self._action_shape),
name='raw_actions',
)
def _get_Q_target(self):
next_actions = self._policy.actions([self._next_observations_ph])
next_log_pis = self._policy.log_pis(
[self._next_observations_ph], next_actions)
next_Qs_values = tuple(
Q([self._next_observations_ph, next_actions])
for Q in self._Q_targets)
min_next_Q = tf.reduce_min(next_Qs_values, axis=0)
next_value = min_next_Q - self._alpha * next_log_pis
Q_target = td_target(
reward=self._reward_scale * self._rewards_ph,
discount=self._discount,
next_value=(1 - self._terminals_ph) * next_value)
return Q_target
def _init_critic_update(self):
"""Create minimization operation for critic Q-function.
Creates a `tf.optimizer.minimize` operation for updating
critic Q-function with gradient descent, and appends it to
`self._training_ops` attribute.
"""
Q_target = tf.stop_gradient(self._get_Q_target())
assert Q_target.shape.as_list() == [None, 1]
# weighted critic loss
temperature_critic = 5.0
weight_target_Q = tf.stop_gradient(tf.sigmoid(-self._stds_ph * temperature_critic))
Q_values = self._Q_values = tuple(
Q([self._observations_ph, self._actions_ph])
for Q in self._Qs)
Q_losses = self._Q_losses = tuple(
tf.losses.mean_squared_error(
labels=Q_target, predictions=Q_value, weights=weight_target_Q)
for Q_value in Q_values)
self._Q_optimizers = tuple(
tf.train.AdamOptimizer(
learning_rate=self._Q_lr,
name='{}_{}_optimizer'.format(Q._name, i)
) for i, Q in enumerate(self._Qs))
Q_training_ops = tuple(
tf.contrib.layers.optimize_loss(
Q_loss,
self.global_step,
learning_rate=self._Q_lr,
optimizer=Q_optimizer,
variables=Q.trainable_variables,
increment_global_step=False,
summaries=((
"loss", "gradients", "gradient_norm", "global_gradient_norm"
) if self._tf_summaries else ()))
for i, (Q, Q_loss, Q_optimizer)
in enumerate(zip(self._Qs, Q_losses, self._Q_optimizers)))
self._training_ops.update({'Q': tf.group(Q_training_ops)})
def _init_actor_update(self):
"""Create minimization operations for policy and entropy.
Creates a `tf.optimizer.minimize` operations for updating
policy and entropy with gradient descent, and adds them to
`self._training_ops` attribute.
"""
actions = self._policy.actions([self._observations_ph])
log_pis = self._policy.log_pis([self._observations_ph], actions)
assert log_pis.shape.as_list() == [None, 1]
log_alpha = self._log_alpha = tf.get_variable(
'log_alpha',
dtype=tf.float32,
initializer=0.0)
alpha = tf.exp(log_alpha)
if isinstance(self._target_entropy, Number):
alpha_loss = -tf.reduce_mean(
log_alpha * tf.stop_gradient(log_pis + self._target_entropy))
self._alpha_optimizer = tf.train.AdamOptimizer(
self._policy_lr, name='alpha_optimizer')
self._alpha_train_op = self._alpha_optimizer.minimize(
loss=alpha_loss, var_list=[log_alpha])
self._training_ops.update({
'temperature_alpha': self._alpha_train_op
})
self._alpha = alpha
if self._action_prior == 'normal':
policy_prior = tf.contrib.distributions.MultivariateNormalDiag(
loc=tf.zeros(self._action_shape),
scale_diag=tf.ones(self._action_shape))
policy_prior_log_probs = policy_prior.log_prob(actions)
elif self._action_prior == 'uniform':
policy_prior_log_probs = 0.0
Q_log_targets = tuple(
Q([self._observations_ph, actions])
for Q in self._Qs)
min_Q_log_target = tf.reduce_min(Q_log_targets, axis=0)
# weighted actor loss
temperature_act = 5.0
weight_actor_Q = tf.stop_gradient(tf.sigmoid(-self._stds_ph * temperature_act) + 0.5)
if self._reparameterize:
policy_kl_losses = (
alpha * log_pis
- min_Q_log_target
- policy_prior_log_probs) * weight_actor_Q
else:
raise NotImplementedError
assert policy_kl_losses.shape.as_list() == [None, 1]
policy_loss = tf.reduce_mean(policy_kl_losses)
self._policy_optimizer = tf.train.AdamOptimizer(
learning_rate=self._policy_lr,
name="policy_optimizer")
policy_train_op = tf.contrib.layers.optimize_loss(
policy_loss,
self.global_step,
learning_rate=self._policy_lr,
optimizer=self._policy_optimizer,
variables=self._policy.trainable_variables,
increment_global_step=False,
summaries=(
"loss", "gradients", "gradient_norm", "global_gradient_norm"
) if self._tf_summaries else ())
self._training_ops.update({'policy_train_op': policy_train_op})
def _init_training(self):
self._update_target(tau=1.0)
def _update_target(self, tau=None):
tau = tau or self._tau
for Q, Q_target in zip(self._Qs, self._Q_targets):
source_params = Q.get_weights()
target_params = Q_target.get_weights()
Q_target.set_weights([
tau * source + (1.0 - tau) * target
for source, target in zip(source_params, target_params)
])
def _do_training(self, iteration, batch):
"""Runs the operations for updating training and target ops."""
self._training_progress.update()
self._training_progress.set_description()
feed_dict = self._get_feed_dict(iteration, batch)
self._session.run(self._training_ops, feed_dict)
if iteration % self._target_update_interval == 0:
# Run target ops here.
self._update_target()
def _get_feed_dict(self, iteration, batch):
"""Construct TensorFlow feed_dict from sample batch."""
feed_dict = {
self._observations_ph: batch['observations'],
self._actions_ph: batch['actions'],
self._next_observations_ph: batch['next_observations'],
self._rewards_ph: batch['rewards'],
self._terminals_ph: batch['terminals'],
self._stds_ph: batch['stds'],
}
if self._store_extra_policy_info:
feed_dict[self._log_pis_ph] = batch['log_pis']
feed_dict[self._raw_actions_ph] = batch['raw_actions']
if iteration is not None:
feed_dict[self._iteration_ph] = iteration
return feed_dict
def get_diagnostics(self,
iteration,
batch,
training_paths,
evaluation_paths):
"""Return diagnostic information as ordered dictionary.
Records mean and standard deviation of Q-function and state
value function, and TD-loss (mean squared Bellman error)
for the sample batch.
Also calls the `draw` method of the plotter, if plotter defined.
"""
feed_dict = self._get_feed_dict(iteration, batch)
(Q_values, Q_losses, alpha, global_step) = self._session.run(
(self._Q_values,
self._Q_losses,
self._alpha,
self.global_step),
feed_dict)
diagnostics = OrderedDict({
'Q-avg': np.mean(Q_values),
'Q-std': np.std(Q_values),
'Q_loss': np.mean(Q_losses),
'alpha': alpha,
})
policy_diagnostics = self._policy.get_diagnostics(
batch['observations'])
diagnostics.update({
f'policy/{key}': value
for key, value in policy_diagnostics.items()
})
if self._plotter:
self._plotter.draw()
return diagnostics
@property
def tf_saveables(self):
saveables = {
'_policy_optimizer': self._policy_optimizer,
**{
f'Q_optimizer_{i}': optimizer
for i, optimizer in enumerate(self._Q_optimizers)
},
'_log_alpha': self._log_alpha,
}
if hasattr(self, '_alpha_optimizer'):
saveables['_alpha_optimizer'] = self._alpha_optimizer
return saveables
|
self._training_ops = {}
self._init_global_step()
self._init_placeholders()
self._init_actor_update()
self._init_critic_update()
|
tests.rs
|
use std::prelude::v1::*;
use std::hash::*;
fn hash<T: Hash>(x: &T) -> u64 {
use std::collections::hash_map::RandomState;
let mut hasher = <RandomState as BuildHasher>::Hasher::new();
x.hash(&mut hasher);
hasher.finish()
}
//#[cfg(feature = "bigint")]
use num_rational::BigRational;
use num_rational::{Ratio, Rational, Rational64};
use core::f64;
use core::i32;
use core::str::FromStr;
use integer::Integer;
use traits::{FromPrimitive, One, Pow, Signed, Zero};
pub const _0: Rational = Ratio { numer: 0, denom: 1 };
pub const _1: Rational = Ratio { numer: 1, denom: 1 };
pub const _2: Rational = Ratio { numer: 2, denom: 1 };
pub const _NEG2: Rational = Ratio {
numer: -2,
denom: 1,
};
pub const _1_2: Rational = Ratio { numer: 1, denom: 2 };
pub const _3_2: Rational = Ratio { numer: 3, denom: 2 };
pub const _NEG1_2: Rational = Ratio {
numer: -1,
denom: 2,
};
pub const _1_NEG2: Rational = Ratio {
numer: 1,
denom: -2,
};
pub const _NEG1_NEG2: Rational = Ratio {
numer: -1,
denom: -2,
};
pub const _1_3: Rational = Ratio { numer: 1, denom: 3 };
pub const _NEG1_3: Rational = Ratio {
numer: -1,
denom: 3,
};
pub const _2_3: Rational = Ratio { numer: 2, denom: 3 };
pub const _NEG2_3: Rational = Ratio {
numer: -2,
denom: 3,
};
//#[cfg(feature = "bigint")]
fn to_big(n: Rational) -> BigRational {
Ratio::new(
FromPrimitive::from_isize(n.numer).unwrap(),
FromPrimitive::from_isize(n.denom).unwrap(),
)
}
//#[cfg(not(feature = "bigint"))]
//pub fn to_big(n: Rational) -> Rational {
// Ratio::new(
// FromPrimitive::from_isize(n.numer).unwrap(),
// FromPrimitive::from_isize(n.denom).unwrap(),
// )
//}
//#[test]
pub fn test_test_constants() {
// check our constants are what Ratio::new etc. would make.
assert_eq!(_0, Zero::zero());
assert_eq!(_1, One::one());
assert_eq!(_2, Ratio::from_integer(2));
assert_eq!(_1_2, Ratio::new(1, 2));
assert_eq!(_3_2, Ratio::new(3, 2));
assert_eq!(_NEG1_2, Ratio::new(-1, 2));
assert_eq!(_2, From::from(2));
}
//#[test]
pub fn test_new_reduce() {
let one22 = Ratio::new(2, 2);
assert_eq!(one22, One::one());
}
#[test]
#[should_panic]
fn test_new_zero() {
let _a = Ratio::new(1, 0);
}
//#[test]
pub fn test_approximate_float() {
assert_eq!(Ratio::from_f32(0.5f32), Some(Ratio::new(1i64, 2)));
assert_eq!(Ratio::from_f64(0.5f64), Some(Ratio::new(1i32, 2)));
assert_eq!(Ratio::from_f32(5f32), Some(Ratio::new(5i64, 1)));
assert_eq!(Ratio::from_f64(5f64), Some(Ratio::new(5i32, 1)));
assert_eq!(Ratio::from_f32(29.97f32), Some(Ratio::new(2997i64, 100)));
assert_eq!(Ratio::from_f32(-29.97f32), Some(Ratio::new(-2997i64, 100)));
assert_eq!(Ratio::<i8>::from_f32(63.5f32), Some(Ratio::new(127i8, 2)));
assert_eq!(Ratio::<i8>::from_f32(126.5f32), Some(Ratio::new(126i8, 1)));
assert_eq!(Ratio::<i8>::from_f32(127.0f32), Some(Ratio::new(127i8, 1)));
assert_eq!(Ratio::<i8>::from_f32(127.5f32), None);
assert_eq!(Ratio::<i8>::from_f32(-63.5f32), Some(Ratio::new(-127i8, 2)));
assert_eq!(
Ratio::<i8>::from_f32(-126.5f32),
Some(Ratio::new(-126i8, 1))
);
assert_eq!(
Ratio::<i8>::from_f32(-127.0f32),
Some(Ratio::new(-127i8, 1))
);
assert_eq!(Ratio::<i8>::from_f32(-127.5f32), None);
assert_eq!(Ratio::<u8>::from_f32(-127f32), None);
assert_eq!(Ratio::<u8>::from_f32(127f32), Some(Ratio::new(127u8, 1)));
assert_eq!(Ratio::<u8>::from_f32(127.5f32), Some(Ratio::new(255u8, 2)));
assert_eq!(Ratio::<u8>::from_f32(256f32), None);
assert_eq!(Ratio::<i64>::from_f64(-10e200), None);
assert_eq!(Ratio::<i64>::from_f64(10e200), None);
assert_eq!(Ratio::<i64>::from_f64(f64::INFINITY), None);
assert_eq!(Ratio::<i64>::from_f64(f64::NEG_INFINITY), None);
assert_eq!(Ratio::<i64>::from_f64(f64::NAN), None);
assert_eq!(
Ratio::<i64>::from_f64(f64::EPSILON),
Some(Ratio::new(1, 4503599627370496))
);
assert_eq!(Ratio::<i64>::from_f64(0.0), Some(Ratio::new(0, 1)));
assert_eq!(Ratio::<i64>::from_f64(-0.0), Some(Ratio::new(0, 1)));
}
//#[test]
pub fn test_cmp() {
assert!(_0 == _0 && _1 == _1);
assert!(_0 != _1 && _1 != _0);
assert!(_0 < _1 && !(_1 < _0));
assert!(_1 > _0 && !(_0 > _1));
assert!(_0 <= _0 && _1 <= _1);
assert!(_0 <= _1 && !(_1 <= _0));
assert!(_0 >= _0 && _1 >= _1);
assert!(_1 >= _0 && !(_0 >= _1));
}
//#[test]
pub fn test_cmp_overflow() {
use core::cmp::Ordering;
// issue #7 example:
let big = Ratio::new(128u8, 1);
let small = big.recip();
assert!(big > small);
// try a few that are closer together
// (some matching numer, some matching denom, some neither)
let ratios = [
Ratio::new(125_i8, 127_i8),
Ratio::new(63_i8, 64_i8),
Ratio::new(124_i8, 125_i8),
Ratio::new(125_i8, 126_i8),
Ratio::new(126_i8, 127_i8),
Ratio::new(127_i8, 126_i8),
];
fn check_cmp(a: Ratio<i8>, b: Ratio<i8>, ord: Ordering) {
//#[cfg(feature = "std")]
println!("comparing {} and {}", a, b);
assert_eq!(a.cmp(&b), ord);
assert_eq!(b.cmp(&a), ord.reverse());
}
for (i, &a) in ratios.iter().enumerate() {
check_cmp(a, a, Ordering::Equal);
check_cmp(-a, a, Ordering::Less);
for &b in &ratios[i + 1..] {
check_cmp(a, b, Ordering::Less);
check_cmp(-a, -b, Ordering::Greater);
check_cmp(a.recip(), b.recip(), Ordering::Greater);
check_cmp(-a.recip(), -b.recip(), Ordering::Less);
}
}
}
//#[test]
pub fn test_to_integer() {
assert_eq!(_0.to_integer(), 0);
assert_eq!(_1.to_integer(), 1);
assert_eq!(_2.to_integer(), 2);
assert_eq!(_1_2.to_integer(), 0);
assert_eq!(_3_2.to_integer(), 1);
assert_eq!(_NEG1_2.to_integer(), 0);
}
//#[test]
pub fn test_numer() {
assert_eq!(_0.numer(), &0);
assert_eq!(_1.numer(), &1);
assert_eq!(_2.numer(), &2);
assert_eq!(_1_2.numer(), &1);
assert_eq!(_3_2.numer(), &3);
assert_eq!(_NEG1_2.numer(), &(-1));
}
//#[test]
pub fn test_denom() {
assert_eq!(_0.denom(), &1);
assert_eq!(_1.denom(), &1);
assert_eq!(_2.denom(), &1);
assert_eq!(_1_2.denom(), &2);
assert_eq!(_3_2.denom(), &2);
assert_eq!(_NEG1_2.denom(), &2);
}
//#[test]
pub fn test_is_integer() {
assert!(_0.is_integer());
assert!(_1.is_integer());
assert!(_2.is_integer());
assert!(!_1_2.is_integer());
assert!(!_3_2.is_integer());
assert!(!_NEG1_2.is_integer());
}
//#[test]
//#[cfg(feature = "std")]
pub fn test_show() {
assert_eq!(format!("{}", _2), "2".to_string());
assert_eq!(format!("{}", _1_2), "1/2".to_string());
assert_eq!(format!("{}", _0), "0".to_string());
assert_eq!(format!("{}", Ratio::from_integer(-2)), "-2".to_string());
}
pub mod arith {
use num_rational::{Ratio, Rational};
use super::{to_big, _0, _1, _1_2, _2, _3_2, _NEG1_2};
use traits::{CheckedAdd, CheckedDiv, CheckedMul, CheckedSub};
//#[test]
pub fn test_add() {
fn test(a: Rational, b: Rational, c: Rational) {
assert_eq!(a + b, c);
assert_eq!(
{
let mut x = a;
x += b;
x
},
c
);
assert_eq!(to_big(a) + to_big(b), to_big(c));
assert_eq!(a.checked_add(&b), Some(c));
assert_eq!(to_big(a).checked_add(&to_big(b)), Some(to_big(c)));
}
fn test_assign(a: Rational, b: isize, c: Rational) {
assert_eq!(a + b, c);
assert_eq!(
{
let mut x = a;
x += b;
x
},
c
);
}
test(_1, _1_2, _3_2);
test(_1, _1, _2);
test(_1_2, _3_2, _2);
test(_1_2, _NEG1_2, _0);
test_assign(_1_2, 1, _3_2);
}
//#[test]
pub fn test_sub() {
fn test(a: Rational, b: Rational, c: Rational) {
assert_eq!(a - b, c);
assert_eq!(
{
let mut x = a;
x -= b;
x
},
c
);
assert_eq!(to_big(a) - to_big(b), to_big(c));
assert_eq!(a.checked_sub(&b), Some(c));
assert_eq!(to_big(a).checked_sub(&to_big(b)), Some(to_big(c)));
}
fn test_assign(a: Rational, b: isize, c: Rational) {
assert_eq!(a - b, c);
assert_eq!(
{
let mut x = a;
x -= b;
x
},
c
);
}
test(_1, _1_2, _1_2);
test(_3_2, _1_2, _1);
test(_1, _NEG1_2, _3_2);
test_assign(_1_2, 1, _NEG1_2);
}
//#[test]
pub fn test_mul() {
fn test(a: Rational, b: Rational, c: Rational) {
assert_eq!(a * b, c);
assert_eq!(
{
let mut x = a;
x *= b;
x
},
c
);
assert_eq!(to_big(a) * to_big(b), to_big(c));
assert_eq!(a.checked_mul(&b), Some(c));
assert_eq!(to_big(a).checked_mul(&to_big(b)), Some(to_big(c)));
}
fn test_assign(a: Rational, b: isize, c: Rational) {
assert_eq!(a * b, c);
assert_eq!(
{
let mut x = a;
x *= b;
x
},
c
);
}
test(_1, _1_2, _1_2);
test(_1_2, _3_2, Ratio::new(3, 4));
test(_1_2, _NEG1_2, Ratio::new(-1, 4));
test_assign(_1_2, 2, _1);
}
//#[test]
pub fn test_div() {
fn test(a: Rational, b: Rational, c: Rational) {
assert_eq!(a / b, c);
assert_eq!(
{
let mut x = a;
x /= b;
x
},
c
);
assert_eq!(to_big(a) / to_big(b), to_big(c));
assert_eq!(a.checked_div(&b), Some(c));
assert_eq!(to_big(a).checked_div(&to_big(b)), Some(to_big(c)));
}
fn test_assign(a: Rational, b: isize, c: Rational) {
assert_eq!(a / b, c);
assert_eq!(
{
let mut x = a;
x /= b;
x
},
c
);
}
test(_1, _1_2, _2);
test(_3_2, _1_2, _1 + _2);
test(_1, _NEG1_2, _NEG1_2 + _NEG1_2 + _NEG1_2 + _NEG1_2);
test_assign(_1, 2, _1_2);
}
//#[test]
pub fn test_rem() {
fn test(a: Rational, b: Rational, c: Rational) {
assert_eq!(a % b, c);
assert_eq!(
{
let mut x = a;
x %= b;
x
},
c
);
assert_eq!(to_big(a) % to_big(b), to_big(c))
}
fn test_assign(a: Rational, b: isize, c: Rational) {
assert_eq!(a % b, c);
assert_eq!(
{
let mut x = a;
x %= b;
x
},
c
);
}
test(_3_2, _1, _1_2);
test(_2, _NEG1_2, _0);
test(_1_2, _2, _1_2);
test_assign(_3_2, 1, _1_2);
}
//#[test]
pub fn test_neg() {
fn test(a: Rational, b: Rational) {
assert_eq!(-a, b);
assert_eq!(-to_big(a), to_big(b))
}
test(_0, _0);
test(_1_2, _NEG1_2);
test(-_1, _1);
}
//#[test]
pub fn test_zero() {
assert_eq!(_0 + _0, _0);
assert_eq!(_0 * _0, _0);
assert_eq!(_0 * _1, _0);
assert_eq!(_0 / _NEG1_2, _0);
assert_eq!(_0 - _0, _0);
}
//#[test]
//#[should_panic]
pub fn test_div_0() {
let _a = _1 / _0;
}
//#[test]
pub fn test_checked_failures() {
let big = Ratio::new(128u8, 1);
let small = Ratio::new(1, 128u8);
assert_eq!(big.checked_add(&big), None);
assert_eq!(small.checked_sub(&big), None);
assert_eq!(big.checked_mul(&big), None);
assert_eq!(small.checked_div(&big), None);
assert_eq!(_1.checked_div(&_0), None);
}
}
//#[test]
pub fn test_round() {
assert_eq!(_1_3.ceil(), _1);
assert_eq!(_1_3.floor(), _0);
assert_eq!(_1_3.round(), _0);
assert_eq!(_1_3.trunc(), _0);
assert_eq!(_NEG1_3.ceil(), _0);
assert_eq!(_NEG1_3.floor(), -_1);
assert_eq!(_NEG1_3.round(), _0);
assert_eq!(_NEG1_3.trunc(), _0);
assert_eq!(_2_3.ceil(), _1);
assert_eq!(_2_3.floor(), _0);
assert_eq!(_2_3.round(), _1);
assert_eq!(_2_3.trunc(), _0);
assert_eq!(_NEG2_3.ceil(), _0);
assert_eq!(_NEG2_3.floor(), -_1);
assert_eq!(_NEG2_3.round(), -_1);
assert_eq!(_NEG2_3.trunc(), _0);
assert_eq!(_1_2.ceil(), _1);
assert_eq!(_1_2.floor(), _0);
assert_eq!(_1_2.round(), _1);
assert_eq!(_1_2.trunc(), _0);
assert_eq!(_NEG1_2.ceil(), _0);
assert_eq!(_NEG1_2.floor(), -_1);
assert_eq!(_NEG1_2.round(), -_1);
assert_eq!(_NEG1_2.trunc(), _0);
assert_eq!(_1.ceil(), _1);
assert_eq!(_1.floor(), _1);
assert_eq!(_1.round(), _1);
assert_eq!(_1.trunc(), _1);
// Overflow checks
let _neg1 = Ratio::from_integer(-1);
let _large_rat1 = Ratio::new(i32::MAX, i32::MAX - 1);
let _large_rat2 = Ratio::new(i32::MAX - 1, i32::MAX);
let _large_rat3 = Ratio::new(i32::MIN + 2, i32::MIN + 1);
let _large_rat4 = Ratio::new(i32::MIN + 1, i32::MIN + 2);
let _large_rat5 = Ratio::new(i32::MIN + 2, i32::MAX);
let _large_rat6 = Ratio::new(i32::MAX, i32::MIN + 2);
let _large_rat7 = Ratio::new(1, i32::MIN + 1);
let _large_rat8 = Ratio::new(1, i32::MAX);
assert_eq!(_large_rat1.round(), One::one());
assert_eq!(_large_rat2.round(), One::one());
assert_eq!(_large_rat3.round(), One::one());
assert_eq!(_large_rat4.round(), One::one());
assert_eq!(_large_rat5.round(), _neg1);
assert_eq!(_large_rat6.round(), _neg1);
assert_eq!(_large_rat7.round(), Zero::zero());
assert_eq!(_large_rat8.round(), Zero::zero());
}
//#[test]
pub fn test_fract() {
assert_eq!(_1.fract(), _0);
assert_eq!(_NEG1_2.fract(), _NEG1_2);
assert_eq!(_1_2.fract(), _1_2);
assert_eq!(_3_2.fract(), _1_2);
}
//#[test]
pub fn test_recip() {
assert_eq!(_1 * _1.recip(), _1);
assert_eq!(_2 * _2.recip(), _1);
assert_eq!(_1_2 * _1_2.recip(), _1);
assert_eq!(_3_2 * _3_2.recip(), _1);
assert_eq!(_NEG1_2 * _NEG1_2.recip(), _1);
assert_eq!(_3_2.recip(), _2_3);
assert_eq!(_NEG1_2.recip(), _NEG2);
assert_eq!(_NEG1_2.recip().denom(), &1);
}
#[test]
#[should_panic(expected = "== 0")]
fn test_recip_fail() {
let _a = Ratio::new(0, 1).recip();
}
//#[test]
pub fn test_pow() {
fn test(r: Rational, e: i32, expected: Rational) {
assert_eq!(r.pow(e), expected);
assert_eq!(Pow::pow(r, e), expected);
assert_eq!(Pow::pow(r, &e), expected);
assert_eq!(Pow::pow(&r, e), expected);
assert_eq!(Pow::pow(&r, &e), expected);
}
test(_1_2, 2, Ratio::new(1, 4));
test(_1_2, -2, Ratio::new(4, 1));
test(_1, 1, _1);
test(_1, i32::MAX, _1);
test(_1, i32::MIN, _1);
test(_NEG1_2, 2, _1_2.pow(2i32));
test(_NEG1_2, 3, -_1_2.pow(3i32));
test(_3_2, 0, _1);
test(_3_2, -1, _3_2.recip());
test(_3_2, 3, Ratio::new(27, 8));
}
//#[test]
//#[cfg(feature = "std")]
pub fn test_to_from_str()
|
//#[test]
pub fn test_from_str_fail() {
fn test(s: &str) {
let rational: Result<Rational, _> = FromStr::from_str(s);
assert!(rational.is_err());
}
let xs = ["0 /1", "abc", "", "1/", "--1/2", "3/2/1", "1/0"];
for &s in xs.iter() {
test(s);
}
}
//#[cfg(feature = "bigint")]
//#[test]
pub fn test_from_float() {
use traits::float::FloatCore;
fn test<T: FloatCore>(given: T, (numer, denom): (&str, &str)) {
let ratio: BigRational = Ratio::from_float(given).unwrap();
assert_eq!(
ratio,
Ratio::new(
FromStr::from_str(numer).unwrap(),
FromStr::from_str(denom).unwrap()
)
);
}
// f32
test(3.14159265359f32, ("13176795", "4194304"));
test(2f32.powf(100.), ("1267650600228229401496703205376", "1"));
test(-2f32.powf(100.), ("-1267650600228229401496703205376", "1"));
test(
1.0 / 2f32.powf(100.),
("1", "1267650600228229401496703205376"),
);
test(684729.48391f32, ("1369459", "2"));
test(-8573.5918555f32, ("-4389679", "512"));
// f64
test(3.14159265359f64, ("3537118876014453", "1125899906842624"));
test(2f64.powf(100.), ("1267650600228229401496703205376", "1"));
test(-2f64.powf(100.), ("-1267650600228229401496703205376", "1"));
test(684729.48391f64, ("367611342500051", "536870912"));
test(-8573.5918555f64, ("-4713381968463931", "549755813888"));
test(
1.0 / 2f64.powf(100.),
("1", "1267650600228229401496703205376"),
);
}
//#[cfg(feature = "bigint")]
//#[test]
pub fn test_from_float_fail() {
use core::{f32};
assert_eq!(Ratio::from_float(f32::NAN), None);
assert_eq!(Ratio::from_float(f32::INFINITY), None);
assert_eq!(Ratio::from_float(f32::NEG_INFINITY), None);
assert_eq!(Ratio::from_float(f64::NAN), None);
assert_eq!(Ratio::from_float(f64::INFINITY), None);
assert_eq!(Ratio::from_float(f64::NEG_INFINITY), None);
}
//#[test]
pub fn test_signed() {
assert_eq!(_NEG1_2.abs(), _1_2);
assert_eq!(_3_2.abs_sub(&_1_2), _1);
assert_eq!(_1_2.abs_sub(&_3_2), Zero::zero());
assert_eq!(_1_2.signum(), One::one());
assert_eq!(_NEG1_2.signum(), -<Ratio<isize>>::one());
assert_eq!(_0.signum(), Zero::zero());
assert!(_NEG1_2.is_negative());
assert!(_1_NEG2.is_negative());
assert!(!_NEG1_2.is_positive());
assert!(!_1_NEG2.is_positive());
assert!(_1_2.is_positive());
assert!(_NEG1_NEG2.is_positive());
assert!(!_1_2.is_negative());
assert!(!_NEG1_NEG2.is_negative());
assert!(!_0.is_positive());
assert!(!_0.is_negative());
}
//#[test]
//#[cfg(feature = "std")]
pub fn test_hash() {
assert!(hash(&_0) != hash(&_1));
assert!(hash(&_0) != hash(&_3_2));
// a == b -> hash(a) == hash(b)
let a = Rational::new_raw(4, 2);
let b = Rational::new_raw(6, 3);
assert_eq!(a, b);
assert_eq!(hash(&a), hash(&b));
let a = Rational::new_raw(123456789, 1000);
let b = Rational::new_raw(123456789 * 5, 5000);
assert_eq!(a, b);
assert_eq!(hash(&a), hash(&b));
}
//#[test]
pub fn test_into_pair() {
assert_eq!((0, 1), _0.into());
assert_eq!((-2, 1), _NEG2.into());
assert_eq!((1, -2), _1_NEG2.into());
}
//#[test]
pub fn test_from_pair() {
assert_eq!(_0, Ratio::from((0, 1)));
assert_eq!(_1, Ratio::from((1, 1)));
assert_eq!(_NEG2, Ratio::from((-2, 1)));
assert_eq!(_1_NEG2, Ratio::from((1, -2)));
}
//#[test]
pub fn ratio_iter_sum() {
// generic function to assure the iter method can be called
// for any Iterator with Item = Ratio<impl Integer> or Ratio<&impl Integer>
fn iter_sums<T: Integer + Clone>(slice: &[Ratio<T>]) -> [Ratio<T>; 3] {
let mut manual_sum = Ratio::new(T::zero(), T::one());
for ratio in slice {
manual_sum = manual_sum + ratio;
}
[manual_sum, slice.iter().sum(), slice.iter().cloned().sum()]
}
// collect into array so test works on no_std
let mut nums = [Ratio::new(0, 1); 1000];
for (i, r) in (0..1000).map(|n| Ratio::new(n, 500)).enumerate() {
nums[i] = r;
}
let sums = iter_sums(&nums[..]);
assert_eq!(sums[0], sums[1]);
assert_eq!(sums[0], sums[2]);
}
//#[test]
pub fn ratio_iter_product() {
// generic function to assure the iter method can be called
// for any Iterator with Item = Ratio<impl Integer> or Ratio<&impl Integer>
fn iter_products<T: Integer + Clone>(slice: &[Ratio<T>]) -> [Ratio<T>; 3] {
let mut manual_prod = Ratio::new(T::one(), T::one());
for ratio in slice {
manual_prod = manual_prod * ratio;
}
[
manual_prod,
slice.iter().product(),
slice.iter().cloned().product(),
]
}
// collect into array so test works on no_std
let mut nums = [Ratio::new(0, 1); 1000];
for (i, r) in (0..1000).map(|n| Ratio::new(n, 500)).enumerate() {
nums[i] = r;
}
let products = iter_products(&nums[..]);
assert_eq!(products[0], products[1]);
assert_eq!(products[0], products[2]);
}
//#[test]
pub fn test_num_zero() {
let zero = Rational64::zero();
assert!(zero.is_zero());
let mut r = Rational64::new(123, 456);
assert!(!r.is_zero());
assert_eq!(&r + &zero, r);
r.set_zero();
assert!(r.is_zero());
}
//#[test]
pub fn test_num_one() {
let one = Rational64::one();
assert!(one.is_one());
let mut r = Rational64::new(123, 456);
assert!(!r.is_one());
assert_eq!(&r * &one, r);
r.set_one();
assert!(r.is_one());
}
|
{
//use std::string::{String, ToString};
fn test(r: Rational, s: String) {
assert_eq!(FromStr::from_str(&s), Ok(r));
assert_eq!(r.to_string(), s);
}
test(_1, "1".to_string());
test(_0, "0".to_string());
test(_1_2, "1/2".to_string());
test(_3_2, "3/2".to_string());
test(_2, "2".to_string());
test(_NEG1_2, "-1/2".to_string());
}
|
modifiers.rs
|
/// Modifiers which can be injected by the application logic to change the state
use crate::api;
/// `StatusModifier`s are used to modify the status
pub trait StatusModifier: Send + Sync {
/// Called after all registered sensors are read
fn modify(&self, status: &mut api::Status);
}
/// This modifier updates the opening state based on the
/// people now present sensor.
pub struct StateFromPeopleNowPresent;
impl StatusModifier for StateFromPeopleNowPresent {
fn modify(&self, status: &mut api::Status) {
// Update state depending on number of people present
let people_now_present: Option<u64> = status.sensors.as_ref()
.map(|sensors| sensors.people_now_present[0].value);
if let Some(count) = people_now_present {
status.state.open = Some(count > 0);
if count == 1 {
|
}
}
}
|
status.state.message = Some(format!("{} person here right now", count));
} else if count > 1 {
status.state.message = Some(format!("{} people here right now", count));
}
|
eth_client.rs
|
// External uses
use serde::Deserialize;
// Local uses
use crate::envy_load;
/// Configuration for the Ethereum gateways.
#[derive(Debug, Deserialize, Clone, PartialEq)]
pub struct
|
{
/// Numeric identifier of the L1 network (e.g. `9` for localhost).
pub chain_id: u8,
/// How much do we want to increase gas price provided by the network?
/// Normally it's 1, we use the network-provided price (and limit it with the gas adjuster in eth sender).
/// However, it can be increased to speed up the transaction mining time.
pub gas_price_factor: f64,
/// Address of the Ethereum node API.
pub web3_url: Vec<String>,
}
impl ETHClientConfig {
pub fn from_env() -> Self {
envy_load!("eth_client", "ETH_CLIENT_")
}
/// Get first web3 url, useful in direct web3 clients, which don't need any multiplexers
pub fn web3_url(&self) -> String {
self.web3_url
.first()
.cloned()
.expect("Should be at least one")
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::configs::test_utils::set_env;
fn expected_config() -> ETHClientConfig {
ETHClientConfig {
chain_id: 33,
gas_price_factor: 1.0f64,
web3_url: vec!["http://127.0.0.1:4444".into()],
}
}
#[test]
fn from_env() {
let config = r#"
ETH_CLIENT_CHAIN_ID="33"
ETH_CLIENT_GAS_PRICE_FACTOR="1"
ETH_CLIENT_WEB3_URL="http://127.0.0.1:4444"
"#;
set_env(config);
let actual = ETHClientConfig::from_env();
assert_eq!(actual, expected_config());
assert_eq!(actual.web3_url(), "http://127.0.0.1:4444");
}
}
|
ETHClientConfig
|
mongo.go
|
package mongo
import (
"fmt"
"os"
"os/exec"
"strings"
)
// Dump provides dump execution arguments.
type Dump struct {
Host string
Username string
Password string
Name string
Opts string
DumpName string
}
func getHostPort(h string) (string, string) {
data := strings.Split(h, ":")
host := data[0]
port := "27017"
if len(data) > 1 {
port = data[1]
}
return host, port
}
// Exec for dump command
func (d Dump) Exec() error {
envs := os.Environ()
// Print the version number fo rht ecommand line tools
cmd := exec.Command("mongodump", "--version")
cmd.Env = envs
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
trace(cmd)
if err := cmd.Run(); err != nil {
return err
}
flags := []string{"mongodump"}
host, port := getHostPort(d.Host)
if host != "" {
flags = append(flags, "-h", host)
}
if port != "" {
flags = append(flags, "--port", port)
}
if d.Username != "" {
flags = append(flags, "-u", d.Username)
}
if d.Password != "" {
flags = append(flags, "-p", d.Password)
}
|
flags = append(flags, "-d", d.Name)
}
// Compresses the output. If mongodump outputs to the dump directory, the new feature compresses the individual files. The files have the suffix .gz.
flags = append(flags, "--gzip")
flags = append(flags, "--archive="+d.DumpName)
if d.Opts != "" {
flags = append(flags, d.Opts)
}
cmd = exec.Command("bash", "-c", strings.Join(flags, " "))
cmd.Env = envs
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
trace(cmd)
return cmd.Run()
}
// trace prints the command to the stdout.
func trace(cmd *exec.Cmd) {
fmt.Printf("$ %s\n", strings.Join(cmd.Args, " "))
}
// NewEngine struct
func NewEngine(host, username, password, name, dumpName, opts string) (*Dump, error) {
return &Dump{
Host: host,
Username: username,
Password: password,
Name: name,
Opts: opts,
DumpName: dumpName,
}, nil
}
|
if d.Name != "" {
|
setAtSpec.ts
|
import {computeKeyPath, Obj, setAt} from "../../../src"
describe("setAt", (): void => {
it("sets the value on the object at the specified key path", (): void => {
const object = {polygons: {rectangles: {squares: {}}}} as Obj
const keyPath = computeKeyPath("polygons", "rectangles", "squares", "magicSquare")
const value = true
setAt(object, keyPath, value)
expect(object).toEqual({polygons: {rectangles: {squares: {magicSquare: true}}}})
})
|
const keyPath = computeKeyPath("polygons", 2, "squares", "magicSquare")
const value = true
const options = {parents: {}}
setAt(object, keyPath, value, options)
expect(object).toEqual({polygons: {2: {squares: {magicSquare: true}}}})
})
it("can create the path if necessary, with the 'parents' options", (): void => {
const object = [] as unknown[] as Obj
const keyPath = computeKeyPath(3, 2, 0)
const value = true
const options = {parents: []}
setAt(object, keyPath, value, options)
expect(object).toEqual([undefined, undefined, undefined, [undefined, undefined, [true]]] as Obj)
})
})
|
it("can create the path if necessary, with the 'parents' options", (): void => {
const object = {} as Obj
|
index.ts
|
import * as Mongoose from 'mongoose';
import { todoListModel } from '../routes/todolist/model';
import { IDatabase } from './interface';
import { userModel } from '../routes/user/model';
export function
|
() : IDatabase
{
Mongoose.connect("mongodb://localhost:27017/todolists");
return({
todoListModel,
userModel
});
};
|
start
|
io_small.py
|
#!/usr/bin/python
"""
(C) Copyright 2020-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
from ior_test_base import IorTestBase
from general_utils import human_to_bytes
class
|
(IorTestBase):
# pylint: disable=too-many-ancestors,too-few-public-methods
"""Test class for testing aggregation with small I/O.
Test class Description:
Run IOR (<4k) with -k option to verify the data is
written to SCM and after the aggregation, it's moved
to the Nvme.
:avocado: recursive
"""
def test_aggregation_io_small(self):
"""Jira ID: DAOS-3750.
Test Description:
Purpose of this test is to run ior with < 4k transfer size
and verify the data is initially written into SCM and later
moved to SSD NV DIMMs.
:avocado: tags=all,full_regression,hw,large,aggregate,daosio
:avocado: tags=aggregateiosmall
"""
# Create pool and container
self.update_ior_cmd_with_pool()
# Since the transfer size is 1K, the objects will be inserted
# into SCM
scm_index = 0
ssd_index = 1
block_size = human_to_bytes(self.params.get("block_size", "/run/ior/*"))
num_processes = self.params.get("np", "/run/ior/client_processes/*")
total_ior = block_size * num_processes
pool_info = self.pool.get_pool_daos_space()
initial_scm_free_space = pool_info["s_free"][scm_index]
initial_ssd_free_space = pool_info["s_free"][ssd_index]
self.log.info(
"Initial SCM Free Space = {}".format(initial_scm_free_space))
self.log.info(
"Initial SSD Free Space = {}".format(initial_ssd_free_space))
# Disable the aggregation
self.log.info("Disabling the aggregation")
self.pool.set_property("reclaim", "disabled")
# Run ior
self.run_ior_with_pool()
pool_info = self.pool.get_pool_daos_space()
scm_free_space_after_ior = pool_info["s_free"][scm_index]
ssd_free_space_after_ior = pool_info["s_free"][ssd_index]
self.log.info(
"SCM Free Space after ior = {}".format(scm_free_space_after_ior))
self.log.info(
"SSD Free Space after ior = {}".format(ssd_free_space_after_ior))
self.log.info(
"Comparing if scm space after ior - {} is less than initial free "
"space - {}".format(
scm_free_space_after_ior, initial_scm_free_space))
self.assertLessEqual(
scm_free_space_after_ior, (initial_scm_free_space - total_ior),
"SCM free space after IOR > the initial SCM free space")
self.log.info("Checking that nothing has been moved to SSD")
self.assertEqual(
ssd_free_space_after_ior, initial_ssd_free_space,
"Detected data moved to SSD after running IOR")
# Enable the aggregation
self.log.info("Enabling the aggregation")
self.pool.set_property("reclaim", "time")
# wait 90 seconds for files to get old enough for aggregation +
# 90 seconds for aggregation to start and finish
wait_time = 180
self.log.info("Waiting for {} seconds".format(wait_time))
time.sleep(wait_time)
pool_info = self.pool.get_pool_daos_space()
scm_free_space_after_aggregate = pool_info["s_free"][scm_index]
ssd_free_space_after_aggregate = pool_info["s_free"][ssd_index]
self.log.info("Checking the data is moved to SSD after aggregation")
self.log.info(
"{} == {}".format(
(initial_ssd_free_space - total_ior),
ssd_free_space_after_aggregate))
self.assertEqual(
(initial_ssd_free_space - total_ior),
ssd_free_space_after_aggregate,
"No data detected in SSD after aggregation")
self.log.info("Checking the SCM space is reclaimed")
self.log.info(
"{} > {}".format(
scm_free_space_after_aggregate, scm_free_space_after_ior))
self.assertGreater(
scm_free_space_after_aggregate, scm_free_space_after_ior,
"SCM space has not been reclaimed")
|
DaosAggregationIOSmall
|
compose.go
|
/*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compose
import (
"context"
"fmt"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"github.com/compose-spec/compose-go/cli"
"github.com/compose-spec/compose-go/types"
dockercli "github.com/docker/cli/cli"
"github.com/docker/cli/cli-plugins/manager"
"github.com/docker/compose/v2/cmd/formatter"
"github.com/morikuni/aec"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/docker/compose/v2/pkg/api"
"github.com/docker/compose/v2/pkg/compose"
)
// Command defines a compose CLI command as a func with args
type Command func(context.Context, []string) error
// CobraCommand defines a cobra command function
type CobraCommand func(context.Context, *cobra.Command, []string) error
// AdaptCmd adapt a CobraCommand func to cobra library
func AdaptCmd(fn CobraCommand) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
contextString := fmt.Sprintf("%s", ctx)
if !strings.HasSuffix(contextString, ".WithCancel") { // need to handle cancel
cancellableCtx, cancel := context.WithCancel(cmd.Context())
ctx = cancellableCtx
s := make(chan os.Signal, 1)
signal.Notify(s, syscall.SIGTERM, syscall.SIGINT)
go func() {
<-s
cancel()
}()
}
err := fn(ctx, cmd, args)
var composeErr compose.Error
if api.IsErrCanceled(err) || errors.Is(ctx.Err(), context.Canceled) {
err = dockercli.StatusError{
StatusCode: 130,
Status: compose.CanceledStatus,
}
}
if errors.As(err, &composeErr) {
err = dockercli.StatusError{
StatusCode: composeErr.GetMetricsFailureCategory().ExitCode,
Status: err.Error(),
}
}
return err
}
}
// Adapt a Command func to cobra library
func Adapt(fn Command) func(cmd *cobra.Command, args []string) error {
return AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
return fn(ctx, args)
})
}
// Warning is a global warning to be displayed to user on command failure
var Warning string
type projectOptions struct {
ProjectName string
Profiles []string
ConfigPaths []string
WorkDir string
ProjectDir string
EnvFile string
Compatibility bool
}
// ProjectFunc does stuff within a types.Project
type ProjectFunc func(ctx context.Context, project *types.Project) error
// ProjectServicesFunc does stuff within a types.Project and a selection of services
type ProjectServicesFunc func(ctx context.Context, project *types.Project, services []string) error
// WithProject creates a cobra run command from a ProjectFunc based on configured project options and selected services
func (o *projectOptions) WithProject(fn ProjectFunc) func(cmd *cobra.Command, args []string) error {
return o.WithServices(func(ctx context.Context, project *types.Project, services []string) error {
return fn(ctx, project)
})
}
// WithServices creates a cobra run command from a ProjectFunc based on configured project options and selected services
func (o *projectOptions) WithServices(fn ProjectServicesFunc) func(cmd *cobra.Command, args []string) error {
return Adapt(func(ctx context.Context, args []string) error {
project, err := o.toProject(args, cli.WithResolvedPaths(true))
if err != nil {
return err
}
if o.EnvFile != "" {
var services types.Services
for _, s := range project.Services {
|
}
if s.Labels == nil {
s.Labels = make(map[string]string)
}
s.Labels[api.EnvironmentFileLabel] = ef
services = append(services, s)
}
}
project.Services = services
}
return fn(ctx, project, args)
})
}
func (o *projectOptions) addProjectFlags(f *pflag.FlagSet) {
f.StringArrayVar(&o.Profiles, "profile", []string{}, "Specify a profile to enable")
f.StringVarP(&o.ProjectName, "project-name", "p", "", "Project name")
f.StringArrayVarP(&o.ConfigPaths, "file", "f", []string{}, "Compose configuration files")
f.StringVar(&o.EnvFile, "env-file", "", "Specify an alternate environment file.")
f.StringVar(&o.ProjectDir, "project-directory", "", "Specify an alternate working directory\n(default: the path of the Compose file)")
f.StringVar(&o.WorkDir, "workdir", "", "DEPRECATED! USE --project-directory INSTEAD.\nSpecify an alternate working directory\n(default: the path of the Compose file)")
f.BoolVar(&o.Compatibility, "compatibility", false, "Run compose in backward compatibility mode")
_ = f.MarkHidden("workdir")
}
func (o *projectOptions) toProjectName() (string, error) {
if o.ProjectName != "" {
return o.ProjectName, nil
}
project, err := o.toProject(nil)
if err != nil {
return "", err
}
return project.Name, nil
}
func (o *projectOptions) toProject(services []string, po ...cli.ProjectOptionsFn) (*types.Project, error) {
options, err := o.toProjectOptions(po...)
if err != nil {
return nil, compose.WrapComposeError(err)
}
project, err := cli.ProjectFromOptions(options)
if err != nil {
return nil, compose.WrapComposeError(err)
}
if len(services) > 0 {
s, err := project.GetServices(services...)
if err != nil {
return nil, err
}
o.Profiles = append(o.Profiles, s.GetProfiles()...)
}
if profiles, ok := options.Environment["COMPOSE_PROFILES"]; ok {
o.Profiles = append(o.Profiles, strings.Split(profiles, ",")...)
}
project.ApplyProfiles(o.Profiles)
project.WithoutUnnecessaryResources()
err = project.ForServices(services)
return project, err
}
func (o *projectOptions) toProjectOptions(po ...cli.ProjectOptionsFn) (*cli.ProjectOptions, error) {
return cli.NewProjectOptions(o.ConfigPaths,
append(po,
cli.WithEnvFile(o.EnvFile),
cli.WithDotEnv,
cli.WithOsEnv,
cli.WithWorkingDirectory(o.ProjectDir),
cli.WithConfigFileEnv,
cli.WithDefaultConfigPath,
cli.WithName(o.ProjectName))...)
}
const pluginName = "compose"
// RunningAsStandalone detects when running as a standalone program
func RunningAsStandalone() bool {
return len(os.Args) < 2 || os.Args[1] != manager.MetadataSubcommandName && os.Args[1] != pluginName
}
// RootCommand returns the compose command with its child commands
func RootCommand(backend api.Service) *cobra.Command {
opts := projectOptions{}
var (
ansi string
noAnsi bool
verbose bool
)
command := &cobra.Command{
Short: "Docker Compose",
Use: pluginName,
TraverseChildren: true,
// By default (no Run/RunE in parent command) for typos in subcommands, cobra displays the help of parent command but exit(0) !
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
_ = cmd.Help()
return dockercli.StatusError{
StatusCode: compose.CommandSyntaxFailure.ExitCode,
Status: fmt.Sprintf("unknown docker command: %q", "compose "+args[0]),
}
},
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
parent := cmd.Root()
if parent != nil {
parentPrerun := parent.PersistentPreRunE
if parentPrerun != nil {
err := parentPrerun(cmd, args)
if err != nil {
return err
}
}
}
if noAnsi {
if ansi != "auto" {
return errors.New(`cannot specify DEPRECATED "--no-ansi" and "--ansi". Please use only "--ansi"`)
}
ansi = "never"
fmt.Fprint(os.Stderr, aec.Apply("option '--no-ansi' is DEPRECATED ! Please use '--ansi' instead.\n", aec.RedF))
}
if verbose {
logrus.SetLevel(logrus.TraceLevel)
}
formatter.SetANSIMode(ansi)
if opts.WorkDir != "" {
if opts.ProjectDir != "" {
return errors.New(`cannot specify DEPRECATED "--workdir" and "--project-directory". Please use only "--project-directory" instead`)
}
opts.ProjectDir = opts.WorkDir
fmt.Fprint(os.Stderr, aec.Apply("option '--workdir' is DEPRECATED at root level! Please use '--project-directory' instead.\n", aec.RedF))
}
if opts.Compatibility || os.Getenv("COMPOSE_COMPATIBILITY") == "true" {
compose.Separator = "_"
}
return nil
},
}
command.AddCommand(
upCommand(&opts, backend),
downCommand(&opts, backend),
startCommand(&opts, backend),
restartCommand(&opts, backend),
stopCommand(&opts, backend),
psCommand(&opts, backend),
listCommand(backend),
logsCommand(&opts, backend),
convertCommand(&opts, backend),
killCommand(&opts, backend),
runCommand(&opts, backend),
removeCommand(&opts, backend),
execCommand(&opts, backend),
pauseCommand(&opts, backend),
unpauseCommand(&opts, backend),
topCommand(&opts, backend),
eventsCommand(&opts, backend),
portCommand(&opts, backend),
imagesCommand(&opts, backend),
versionCommand(),
buildCommand(&opts, backend),
pushCommand(&opts, backend),
pullCommand(&opts, backend),
createCommand(&opts, backend),
copyCommand(&opts, backend),
)
command.Flags().SetInterspersed(false)
opts.addProjectFlags(command.Flags())
command.Flags().StringVar(&ansi, "ansi", "auto", `Control when to print ANSI control characters ("never"|"always"|"auto")`)
command.Flags().BoolVar(&noAnsi, "no-ansi", false, `Do not print ANSI control characters (DEPRECATED)`)
command.Flags().MarkHidden("no-ansi") //nolint:errcheck
command.Flags().BoolVar(&verbose, "verbose", false, "Show more output")
command.Flags().MarkHidden("verbose") //nolint:errcheck
return command
}
|
ef := o.EnvFile
if ef != "" {
if !filepath.IsAbs(ef) {
ef = filepath.Join(project.WorkingDir, o.EnvFile)
|
__init__.py
|
from .modeling import *
from .tokenizer import *
|
||
renameInputField.ts
|
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import 'vs/css!./rename';
import {TPromise} from 'vs/base/common/winjs.base';
import strings = require('vs/base/common/strings');
import errors = require('vs/base/common/errors');
import lifecycle = require('vs/base/common/lifecycle');
import EditorCommon = require('vs/editor/common/editorCommon');
import EditorBrowser = require('vs/editor/browser/editorBrowser');
import {Range} from 'vs/editor/common/core/range';
class
|
implements EditorBrowser.IContentWidget, lifecycle.IDisposable {
private _editor: EditorBrowser.ICodeEditor;
private _position: EditorCommon.IPosition;
private _domNode: HTMLElement;
private _inputField: HTMLInputElement;
private _visible: boolean;
// Editor.IContentWidget.allowEditorOverflow
public allowEditorOverflow = true;
constructor(editor: EditorBrowser.ICodeEditor) {
this._editor = editor;
this._editor.addContentWidget(this);
}
public dispose(): void {
this._editor.removeContentWidget(this);
}
public getId(): string {
return '__renameInputWidget';
}
public getDomNode(): HTMLElement {
if (!this._domNode) {
this._inputField = document.createElement('input');
this._inputField.className = 'rename-input';
this._domNode = document.createElement('div');
this._domNode.style.height = `${this._editor.getConfiguration().lineHeight}px`;
this._domNode.className = 'monaco-editor rename-box';
this._domNode.appendChild(this._inputField);
}
return this._domNode;
}
public getPosition(): EditorBrowser.IContentWidgetPosition {
return this._visible
? { position: this._position, preference: [EditorBrowser.ContentWidgetPositionPreference.BELOW, EditorBrowser.ContentWidgetPositionPreference.ABOVE] }
: null;
}
private _currentAcceptInput: () => void = null;
private _currentCancelInput: () => void = null;
public acceptInput(): void {
if (this._currentAcceptInput) {
this._currentAcceptInput();
}
}
public cancelInput(): void {
if (this._currentCancelInput) {
this._currentCancelInput();
}
}
public getInput(where: EditorCommon.IRange, value: string, selectionStart: number, selectionEnd: number): TPromise<string> {
this._position = { lineNumber: where.startLineNumber, column: where.startColumn };
this._inputField.value = value;
this._inputField.setAttribute('selectionStart', selectionStart.toString());
this._inputField.setAttribute('selectionEnd', selectionEnd.toString());
this._inputField.size = Math.max((where.endColumn - where.startColumn) * 1.1, 20);
var disposeOnDone: lifecycle.IDisposable[] = [],
always: Function;
always = () => {
lifecycle.disposeAll(disposeOnDone);
this._hide();
};
return new TPromise<string>((c, e) => {
this._currentCancelInput = () => {
this._currentAcceptInput = null;
this._currentCancelInput = null;
e(errors.canceled());
return true;
};
this._currentAcceptInput = () => {
if (this._inputField.value.trim().length === 0 || this._inputField.value === value) {
// empty or whitespace only or not changed
this._currentCancelInput();
return;
}
this._currentAcceptInput = null;
this._currentCancelInput = null;
c(this._inputField.value);
}
var onCursorChanged = () => {
if (!Range.containsPosition(where, this._editor.getPosition())) {
this._currentCancelInput();
}
};
disposeOnDone.push(this._editor.addListener2(EditorCommon.EventType.CursorSelectionChanged, onCursorChanged));
disposeOnDone.push(this._editor.addListener2(EditorCommon.EventType.EditorBlur, this._currentCancelInput));
this._show();
}, this._currentCancelInput).then(value => {
always();
return value;
}, err => {
always();
return TPromise.wrapError(err);
});
}
private _show(): void{
this._visible = true;
this._editor.layoutContentWidget(this);
setTimeout(() => {
this._inputField.focus();
this._inputField.setSelectionRange(
parseInt(this._inputField.getAttribute('selectionStart')),
parseInt(this._inputField.getAttribute('selectionEnd')));
}, 25);
}
private _hide(): void {
this._visible = false;
this._editor.layoutContentWidget(this);
}
}
export = RenameInputField;
|
RenameInputField
|
number_representation.rs
|
use syntax::{ast, ast::Radix, AstToken};
use crate::{AssistContext, AssistId, AssistKind, Assists, GroupLabel};
const MIN_NUMBER_OF_DIGITS_TO_FORMAT: usize = 5;
// Assist: reformat_number_literal
//
// Adds or removes separators from integer literal.
//
// ```
// const _: i32 = 1012345$0;
// ```
// ->
// ```
// const _: i32 = 1_012_345;
// ```
pub(crate) fn reformat_number_literal(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
let literal = ctx.find_node_at_offset::<ast::Literal>()?;
let literal = match literal.kind() {
ast::LiteralKind::IntNumber(it) => it,
_ => return None,
};
let text = literal.text();
if text.contains('_') {
return remove_separators(acc, literal);
}
let (prefix, value, suffix) = literal.split_into_parts();
if value.len() < MIN_NUMBER_OF_DIGITS_TO_FORMAT {
return None;
}
let radix = literal.radix();
let mut converted = prefix.to_string();
converted.push_str(&add_group_separators(value, group_size(radix)));
converted.push_str(suffix);
let group_id = GroupLabel("Reformat number literal".into());
let label = format!("Convert {} to {}", literal, converted);
let range = literal.syntax().text_range();
acc.add_group(
&group_id,
AssistId("reformat_number_literal", AssistKind::RefactorInline),
label,
range,
|builder| builder.replace(range, converted),
)
}
fn remove_separators(acc: &mut Assists, literal: ast::IntNumber) -> Option<()> {
let group_id = GroupLabel("Reformat number literal".into());
let range = literal.syntax().text_range();
acc.add_group(
&group_id,
AssistId("reformat_number_literal", AssistKind::RefactorInline),
"Remove digit separators",
range,
|builder| builder.replace(range, literal.text().replace('_', "")),
)
}
const fn group_size(r: Radix) -> usize {
match r {
Radix::Binary => 4,
Radix::Octal => 3,
Radix::Decimal => 3,
Radix::Hexadecimal => 4,
}
}
fn add_group_separators(s: &str, group_size: usize) -> String {
let mut chars = Vec::new();
for (i, ch) in s.chars().filter(|&ch| ch != '_').rev().enumerate() {
if i > 0 && i % group_size == 0 {
chars.push('_');
}
chars.push(ch);
}
chars.into_iter().rev().collect()
}
#[cfg(test)]
mod tests {
use crate::tests::{check_assist_by_label, check_assist_not_applicable, check_assist_target};
use super::*;
#[test]
fn group_separators() {
let cases = vec![
("", 4, ""),
("1", 4, "1"),
("12", 4, "12"),
("123", 4, "123"),
("1234", 4, "1234"),
("12345", 4, "1_2345"),
("123456", 4, "12_3456"),
("1234567", 4, "123_4567"),
("12345678", 4, "1234_5678"),
("123456789", 4, "1_2345_6789"),
("1234567890", 4, "12_3456_7890"),
("1_2_3_4_5_6_7_8_9_0_", 4, "12_3456_7890"),
("1234567890", 3, "1_234_567_890"),
("1234567890", 2, "12_34_56_78_90"),
("1234567890", 1, "1_2_3_4_5_6_7_8_9_0"),
];
for case in cases {
let (input, group_size, expected) = case;
assert_eq!(add_group_separators(input, group_size), expected)
}
}
#[test]
fn good_targets() {
let cases = vec![
("const _: i32 = 0b11111$0", "0b11111"),
|
("const _: i32 = 10000i32$0;", "10000i32"),
("const _: i32 = 0b_10_0i32$0;", "0b_10_0i32"),
];
for case in cases {
check_assist_target(reformat_number_literal, case.0, case.1);
}
}
#[test]
fn bad_targets() {
let cases = vec![
"const _: i32 = 0b111$0",
"const _: i32 = 0b1111$0",
"const _: i32 = 0o77$0;",
"const _: i32 = 0o777$0;",
"const _: i32 = 10$0;",
"const _: i32 = 999$0;",
"const _: i32 = 0xFF$0;",
"const _: i32 = 0xFFFF$0;",
];
for case in cases {
check_assist_not_applicable(reformat_number_literal, case);
}
}
#[test]
fn labels() {
let cases = vec![
("const _: i32 = 10000$0", "const _: i32 = 10_000", "Convert 10000 to 10_000"),
(
"const _: i32 = 0xFF0000$0;",
"const _: i32 = 0xFF_0000;",
"Convert 0xFF0000 to 0xFF_0000",
),
(
"const _: i32 = 0b11111111$0;",
"const _: i32 = 0b1111_1111;",
"Convert 0b11111111 to 0b1111_1111",
),
(
"const _: i32 = 0o377211$0;",
"const _: i32 = 0o377_211;",
"Convert 0o377211 to 0o377_211",
),
(
"const _: i32 = 10000i32$0;",
"const _: i32 = 10_000i32;",
"Convert 10000i32 to 10_000i32",
),
("const _: i32 = 1_0_0_0_i32$0;", "const _: i32 = 1000i32;", "Remove digit separators"),
];
for case in cases {
let (before, after, label) = case;
check_assist_by_label(reformat_number_literal, before, after, label);
}
}
}
|
("const _: i32 = 0o77777$0;", "0o77777"),
("const _: i32 = 10000$0;", "10000"),
("const _: i32 = 0xFFFFF$0;", "0xFFFFF"),
|
api.go
|
package client
import (
"time"
"github.com/gmlewis/alpaca-trade-api-go/alpaca"
"github.com/gmlewis/alpaca-trade-api-go/polygon"
)
// Alpaca v2
func (c *Client) GetAccount() (*alpaca.Account, error) {
return c.AClient.GetAccount()
}
func (c *Client) GetAccountConfigurations() (*alpaca.AccountConfigurations, error) {
return c.AClient.GetAccountConfigurations()
}
func (c *Client) UpdateAccountConfigurations(newConfigs alpaca.AccountConfigurationsRequest) (*alpaca.AccountConfigurations, error) {
return c.AClient.UpdateAccountConfigurations(newConfigs)
}
func (c *Client) GetAccountActivities(activityType *string, opts *alpaca.AccountActivitiesRequest) ([]alpaca.AccountActvity, error) {
|
}
func (c *Client) ListPositions() ([]alpaca.Position, error) {
return c.AClient.ListPositions()
}
func (c *Client) GetPosition(symbol string) (*alpaca.Position, error) {
return c.AClient.GetPosition(symbol)
}
func (c *Client) GetAggregates(symbol, timespan, from, to string) (*alpaca.Aggregates, error) {
return c.AClient.GetAggregates(symbol, timespan, from, to)
}
func (c *Client) GetLastQuote(symbol string) (*alpaca.LastQuoteResponse, error) {
return c.AClient.GetLastQuote(symbol)
}
func (c *Client) GetLastTrade(symbol string) (*alpaca.LastTradeResponse, error) {
return c.AClient.GetLastTrade(symbol)
}
func (c *Client) CloseAllPositions() error {
return c.AClient.CloseAllPositions()
}
func (c *Client) ClosePosition(symbol string) error {
return c.AClient.ClosePosition(symbol)
}
func (c *Client) GetClock() (*alpaca.Clock, error) {
return c.AClient.GetClock()
}
func (c *Client) GetCalendar(start, end *string) ([]alpaca.CalendarDay, error) {
return c.AClient.GetCalendar(start, end)
}
func (c *Client) ListOrders(opts alpaca.ListOrdersOptions) ([]alpaca.Order, error) {
return c.AClient.ListOrders(opts)
}
func (c *Client) PlaceOrder(req alpaca.PlaceOrderRequest) (*alpaca.Order, error) {
return c.AClient.PlaceOrder(req)
}
func (c *Client) GetOrder(orderID string) (*alpaca.Order, error) {
return c.AClient.GetOrder(orderID)
}
func (c *Client) ReplaceOrder(orderID string, req alpaca.ReplaceOrderRequest) (*alpaca.Order, error) {
return c.AClient.ReplaceOrder(orderID, req)
}
func (c *Client) CancelOrder(orderID string) error {
return c.AClient.CancelOrder(orderID)
}
func (c *Client) CancelAllOrders() error {
return c.AClient.CancelAllOrders()
}
func (c *Client) ListAssets(status *string) ([]alpaca.Asset, error) {
return c.AClient.ListAssets(status)
}
func (c *Client) GetAsset(symbol string) (*alpaca.Asset, error) {
return c.AClient.GetAsset(symbol)
}
func (c *Client) ListBars(symbols []string, opts alpaca.ListBarParams) (map[string][]alpaca.Bar, error) {
return c.AClient.ListBars(symbols, opts)
}
func (c *Client) GetSymbolBars(symbol string, opts alpaca.ListBarParams) ([]alpaca.Bar, error) {
return c.AClient.GetSymbolBars(symbol, opts)
}
// Polygon v2
func (c *Client) GetHistoricAggregatesV2(
symbol string,
multiplier int,
resolution polygon.AggType,
from, to *time.Time,
unadjusted *bool) (*polygon.HistoricAggregatesV2, error) {
return c.PClient.GetHistoricAggregatesV2(symbol,
multiplier,
resolution,
from, to,
unadjusted)
}
func (c *Client) GetHistoricTradesV2(ticker string, date string, opts *polygon.HistoricTicksV2Params) (*polygon.HistoricTradesV2, error) {
return c.PClient.GetHistoricTradesV2(ticker, date, opts)
}
func (c *Client) GetHistoricQuotesV2(ticker string, date string, opts *polygon.HistoricTicksV2Params) (*polygon.HistoricQuotesV2, error) {
return c.PClient.GetHistoricQuotesV2(ticker, date, opts)
}
func (c *Client) GetStockExchanges() ([]polygon.StockExchange, error) {
return c.PClient.GetStockExchanges()
}
func (c *Client) GetPreviousClose(symbol string) (*polygon.PreviousCloseV2, error) {
return c.PClient.GetPreviousClose(symbol)
}
|
return c.AClient.GetAccountActivities(activityType, opts)
}
func (c *Client) GetPortfolioHistory(period *string, timeframe *alpaca.RangeFreq, dateEnd *time.Time, extendedHours bool) (*alpaca.PortfolioHistory, error) {
return c.AClient.GetPortfolioHistory(period, timeframe, dateEnd, extendedHours)
|
set_test.go
|
package set
import (
"bytes"
"errors"
"testing"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
k8serr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kubetesting "k8s.io/client-go/testing"
"github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
fakeroclient "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
options "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/options/fake"
)
func TestSetCmdUsage(t *testing.T) {
tf, o := options.NewFakeArgoRolloutsOptions()
defer tf.Cleanup()
cmd := NewCmdSet(o)
cmd.PersistentPreRunE = o.PersistentPreRunE
cmd.SetArgs([]string{})
err := cmd.Execute()
assert.Error(t, err)
stdout := o.Out.(*bytes.Buffer).String()
stderr := o.ErrOut.(*bytes.Buffer).String()
assert.Empty(t, stdout)
assert.Contains(t, stderr, "Usage:")
assert.Contains(t, stderr, "set COMMAND")
}
func TestSetImageCmdUsage(t *testing.T) {
tf, o := options.NewFakeArgoRolloutsOptions()
defer tf.Cleanup()
cmd := NewCmdSetImage(o)
cmd.PersistentPreRunE = o.PersistentPreRunE
for _, args := range [][]string{
{},
{"guestbook"},
{"guestbook", "forgot-equals-sign"},
{"guestbook", "too=many=equals=signs"},
} {
cmd.SetArgs(args)
err := cmd.Execute()
assert.Error(t, err)
stdout := o.Out.(*bytes.Buffer).String()
stderr := o.ErrOut.(*bytes.Buffer).String()
assert.Empty(t, stdout)
assert.Contains(t, stderr, "Usage:")
assert.Contains(t, stderr, "image ROLLOUT")
}
}
func TestSetImageCmd(t *testing.T) {
ro := v1alpha1.Rollout{
ObjectMeta: metav1.ObjectMeta{
Name: "guestbook",
Namespace: metav1.NamespaceDefault,
},
Spec: v1alpha1.RolloutSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
InitContainers: []corev1.Container{
{
Name: "guestbook",
Image: "argoproj/rollouts-demo:blue",
},
},
Containers: []corev1.Container{
{
Name: "foo",
Image: "alpine:3.8",
},
{
Name: "guestbook",
Image: "argoproj/rollouts-demo:blue",
},
{
Name: "bar",
Image: "alpine:3.8",
},
},
EphemeralContainers: []corev1.EphemeralContainer{
{
EphemeralContainerCommon: corev1.EphemeralContainerCommon{
Name: "guestbook",
Image: "argoproj/rollouts-demo:blue",
},
},
},
},
},
},
}
tf, o := options.NewFakeArgoRolloutsOptions(&ro)
defer tf.Cleanup()
cmd := NewCmdSetImage(o)
cmd.PersistentPreRunE = o.PersistentPreRunE
cmd.SetArgs([]string{"guestbook", "guestbook=argoproj/rollouts-demo:NEWIMAGE"})
err := cmd.Execute()
assert.Nil(t, err)
modifiedRo, err := o.RolloutsClientset().ArgoprojV1alpha1().Rollouts(metav1.NamespaceDefault).Get(ro.Name, metav1.GetOptions{})
assert.NoError(t, err)
assert.Equal(t, "argoproj/rollouts-demo:NEWIMAGE", modifiedRo.Spec.Template.Spec.Containers[1].Image)
assert.Equal(t, "alpine:3.8", modifiedRo.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, "alpine:3.8", modifiedRo.Spec.Template.Spec.Containers[2].Image)
assert.Equal(t, "argoproj/rollouts-demo:NEWIMAGE", modifiedRo.Spec.Template.Spec.InitContainers[0].Image)
assert.Equal(t, "argoproj/rollouts-demo:NEWIMAGE", modifiedRo.Spec.Template.Spec.EphemeralContainers[0].Image)
stdout := o.Out.(*bytes.Buffer).String()
stderr := o.ErrOut.(*bytes.Buffer).String()
assert.Equal(t, stdout, "rollout \"guestbook\" image updated\n")
assert.Empty(t, stderr)
}
func TestSetImageCmdStar(t *testing.T) {
ro := v1alpha1.Rollout{
ObjectMeta: metav1.ObjectMeta{
Name: "guestbook",
Namespace: metav1.NamespaceDefault,
},
Spec: v1alpha1.RolloutSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
InitContainers: []corev1.Container{
{
Name: "guestbook",
Image: "argoproj/rollouts-demo:blue",
},
},
Containers: []corev1.Container{
{
Name: "foo",
Image: "alpine:3.8",
},
{
Name: "guestbook",
Image: "argoproj/rollouts-demo:blue",
},
{
Name: "bar",
Image: "alpine:3.8",
},
},
EphemeralContainers: []corev1.EphemeralContainer{
{
EphemeralContainerCommon: corev1.EphemeralContainerCommon{
Name: "guestbook",
Image: "argoproj/rollouts-demo:blue",
},
},
},
},
},
},
}
tf, o := options.NewFakeArgoRolloutsOptions(&ro)
defer tf.Cleanup()
cmd := NewCmdSetImage(o)
cmd.PersistentPreRunE = o.PersistentPreRunE
cmd.SetArgs([]string{"guestbook", "*=argoproj/rollouts-demo:NEWIMAGE"})
err := cmd.Execute()
assert.Nil(t, err)
modifiedRo, err := o.RolloutsClientset().ArgoprojV1alpha1().Rollouts(metav1.NamespaceDefault).Get(ro.Name, metav1.GetOptions{})
assert.NoError(t, err)
assert.Equal(t, "argoproj/rollouts-demo:NEWIMAGE", modifiedRo.Spec.Template.Spec.Containers[1].Image)
assert.Equal(t, "argoproj/rollouts-demo:NEWIMAGE", modifiedRo.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, "argoproj/rollouts-demo:NEWIMAGE", modifiedRo.Spec.Template.Spec.Containers[2].Image)
assert.Equal(t, "argoproj/rollouts-demo:NEWIMAGE", modifiedRo.Spec.Template.Spec.InitContainers[0].Image)
assert.Equal(t, "argoproj/rollouts-demo:NEWIMAGE", modifiedRo.Spec.Template.Spec.EphemeralContainers[0].Image)
stdout := o.Out.(*bytes.Buffer).String()
stderr := o.ErrOut.(*bytes.Buffer).String()
assert.Equal(t, stdout, "rollout \"guestbook\" image updated\n")
assert.Empty(t, stderr)
}
func TestSetImageCmdRolloutNotFound(t *testing.T) {
tf, o := options.NewFakeArgoRolloutsOptions()
defer tf.Cleanup()
cmd := NewCmdSetImage(o)
cmd.PersistentPreRunE = o.PersistentPreRunE
cmd.SetArgs([]string{"does-not-exist", "guestbook=argoproj/rollouts-demo:yellow"})
err := cmd.Execute()
assert.Error(t, err)
stdout := o.Out.(*bytes.Buffer).String()
stderr := o.ErrOut.(*bytes.Buffer).String()
assert.Empty(t, stdout)
assert.Equal(t, "Error: rollouts.argoproj.io \"does-not-exist\" not found\n", stderr)
}
func TestSetImageCmdContainerNotFound(t *testing.T) {
ro := v1alpha1.Rollout{
ObjectMeta: metav1.ObjectMeta{
Name: "guestbook",
Namespace: metav1.NamespaceDefault,
},
Spec: v1alpha1.RolloutSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "guestbook",
Image: "argoproj/rollouts-demo:blue",
},
},
},
},
},
}
tf, o := options.NewFakeArgoRolloutsOptions(&ro)
defer tf.Cleanup()
cmd := NewCmdSetImage(o)
cmd.PersistentPreRunE = o.PersistentPreRunE
cmd.SetArgs([]string{"guestbook", "typo=argoproj/rollouts-demo:yellow"})
err := cmd.Execute()
assert.Error(t, err)
stdout := o.Out.(*bytes.Buffer).String()
stderr := o.ErrOut.(*bytes.Buffer).String()
assert.Empty(t, stdout)
assert.Equal(t, "Error: unable to find container named \"typo\"\n", stderr)
}
func TestSetImageConflict(t *testing.T)
|
{
ro := v1alpha1.Rollout{
ObjectMeta: metav1.ObjectMeta{
Name: "guestbook",
Namespace: metav1.NamespaceDefault,
},
Spec: v1alpha1.RolloutSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "foo",
Image: "alpine:3.8",
},
{
Name: "guestbook",
Image: "argoproj/rollouts-demo:blue",
},
},
},
},
},
}
tf, o := options.NewFakeArgoRolloutsOptions(&ro)
defer tf.Cleanup()
updateCalls := 0
fakeClient := o.RolloutsClient.(*fakeroclient.Clientset)
fakeClient.PrependReactor("update", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if updateCalls > 0 {
return true, &ro, nil
}
updateCalls++
return true, nil, k8serr.NewConflict(schema.GroupResource{}, "guestbook", errors.New("intentional-error"))
})
cmd := NewCmdSetImage(o)
cmd.PersistentPreRunE = o.PersistentPreRunE
cmd.SetArgs([]string{"guestbook", "guestbook=argoproj/rollouts-demo:yellow"})
err := cmd.Execute()
assert.Nil(t, err)
stdout := o.Out.(*bytes.Buffer).String()
stderr := o.ErrOut.(*bytes.Buffer).String()
assert.Equal(t, stdout, "rollout \"guestbook\" image updated\n")
assert.Empty(t, stderr)
assert.True(t, updateCalls > 0)
}
|
|
counters.js
|
import React from 'react'
import SEO from "../../components/seo"
import Layout from '../../containers/layout/layout'
|
import Footer from '../../containers/layout/footer/footer-one'
import PageHeader from '../../components/pageheader'
import CTA from '../../containers/global/cta-area/section-one'
import SectionOne from '../../containers/elements/counters/section-one'
import SectionTwo from '../../containers/elements/counters/section-two'
import SectionThree from '../../containers/elements/counters/section-three'
const ButtonPage = ({ pageContext, location }) => {
return (
<Layout location={location}>
<SEO title="Counters" />
<Header/>
<PageHeader
pageContext={pageContext}
location={location}
title="Counters"
/>
<main className="site-wrapper-reveal">
<SectionOne/>
<SectionTwo/>
<SectionThree/>
<CTA/>
</main>
<Footer/>
</Layout>
)
}
export default ButtonPage
|
import Header from '../../containers/layout/header/header-one'
|
wsgi.py
|
"""WSGI File that enables Apache/GUnicorn to run Django"""
# pylint: disable=C0103
import os
import sys
from django.core.wsgi import get_wsgi_application
sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(os.pardir), os.pardir)))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)))))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'termsandconditions_demo.settings')
|
application = get_wsgi_application()
|
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
|
manifest.go
|
// Onix Config Manager - Dbman
// Copyright (c) 2018-Present by www.gatblau.org
// Licensed under the
// Contributors to this project, hereby assign copyright in this code to the project,
// to be licensed under the same terms as the rest of the code.
package plugin
import (
"bytes"
"encoding/json"
"github.com/gatblau/onix/oxlib/oxc"
"gopkg.in/yaml.v3"
"strings"
"time"
)
// Manifest a database manifest containing the meta data required by DbMan
// to execute commands and queries
type Manifest struct {
// the database release version
DbVersion string `json:"dbVersion"`
// the release description
Description string `json:"description,omitempty"`
// the path to where the command scripts are (if not specified use the root of the release)
CommandsPath string `json:"commandsPath,omitempty"`
// the path to where the query scripts are (if not specified use the root of the release)
QueriesPath string `json:"queriesPath,omitempty"`
// the database provider to use
DbProvider string `json:"dbProvider"`
// the list of commands available to execute
Commands []Command `json:"commands"`
// the list of commands required to create the database in the first place
Create Action `json:"create"`
// the list of commands required to deploy the database objects on an empty database
Deploy Action `json:"deploy"`
// the list of commands required to upgrade an existing database
Upgrade Upgrade `json:"upgrade"`
// the list of queries available to execute
Queries []Query `json:"queries"`
}
// Action a database action containing either other sub-actions or commands
type Action struct {
// the description for the command
Description string `json:"description"`
// the list of actions that comprise the command
Actions []string `json:"actions,omitempty"`
// the list of sub commands that comprise this command (if any)
Commands []string `json:"commands,omitempty"`
}
// Command a set of scripts that must be executed within the same database connection
type Command struct {
// the command identifiable name
Name string `json:"name"`
// the description for the action
Description string `json:"description"`
// whether to run this action within a database transaction
Transactional bool `json:"transactional"`
// whether to connect to the database as an Admin to execute this action
AsAdmin bool `json:"asAdmin"`
// whether to connect to the database being managed or simply connect to the server with no specific database
UseDb bool `json:"useDb"`
// the list of database scripts that will be executed as part of this action
Scripts []Script `json:"scripts"`
}
// NewCommand creates a new command from a serialised json string
func NewCommand(jsonString string) (*Command, error) {
c := &Command{}
err := json.Unmarshal([]byte(jsonString), c)
return c, err
}
func (c *Command) ToString() string {
bytes, e := json.Marshal(c)
if e != nil {
return ""
}
return string(bytes)
}
// a database script and zero or more merge variables
type Script struct {
// the script identifiable name
Name string `json:"name"`
// the script file name in the git repository
File string `json:"file"`
// a list of variables to be merged with the script prior to execution
Vars []Var `json:"vars"`
// the content of the script file
// note: it is internal and automatically populated at runtime from the git repository
Content string `json:"content,omitempty"`
}
func (c *Script) All() map[string]interface{} {
m := map[string]interface{}{}
m["name"] = c.Name
m["file"] = c.File
m["content"] = c.Content
return m
}
// a merge variable for a script
type Var struct {
// the name of the merge variable use as a placeholder for merging within the script
Name string `json:"name"`
Description string `json:"description,omitempty" yaml:"description,omitempty"`
// the name of the variable to be merged from DbMan's current configuration set
// note: not used if omitted
FromConf string `json:"fromConf,omitempty" yaml:"fromConf,omitempty"`
// the value of the variable, if it is to be merged directly
// note: not used if omitted
FromValue string `json:"fromValue,omitempty" yaml:"fromValue,omitempty"`
// the name of the variable to be merged from the run context
// available values are dbVersion, appVersion, description
// note: this is primarily intended for updating the version tracking table
// not used if omitted
FromContext string `json:"fromContext,omitempty" yaml:"fromContext,omitempty"`
// the name of the input parameter
// allows to pass query parameters via command line or query string
FromInput string `json:"fromInput,omitempty" yaml:"fromInput,omitempty"`
}
func NewVersion(jsonString string) (*Version, error) {
v := &Version{}
err := json.Unmarshal([]byte(jsonString), v)
return v, err
}
// carries version information
type Version struct {
// the application version
AppVersion string `json:"appVersion"`
// the database version
DbVersion string `json:"dbVersion"`
// the name of the query to retrieve the version history
Description string `json:"description"`
// the name of the query to retrieve the version history
Source string `json:"source"`
// the time the version was released
Time time.Time `json:"time"`
}
func (v *Version) ToString() string {
bytes, e := json.Marshal(v)
if e != nil {
panic(e)
}
return string(bytes)
}
// Query a database query
type Query struct {
// the identifiable name for the query
Name string `json:"name"`
// the description for the query
Description string `json:"description,omitempty" yaml:"description,omitempty"`
// the name of the script file to be executed by the query
File string `json:"file,omitempty" yaml:"file,omitempty"`
// a list of variables to merge with the query
Vars []Var `json:"vars,omitempty" yaml:"vars,omitempty"`
// the content of the script file
// note: it is internal and automatically populated at runtime from the git repository
Content string `json:"content,omitempty" yaml:"content,omitempty"`
}
// NewQuery creates a new query from a serialised json string
func NewQuery(jsonString string) (*Query, error)
|
func (q *Query) ToString() string {
bytes, e := json.Marshal(q)
if e != nil {
return ""
}
return string(bytes)
}
// the commands to run at different stages in an upgrade
type Upgrade struct {
Description string `json:"description"`
Prepare string `json:"prepare"`
Alter string `json:"alter"`
Deploy string `json:"deploy"`
}
// get a JSON bytes reader for the Plan
func (m *Manifest) json() (*bytes.Reader, error) {
jsonBytes, err := m.bytes()
if err != nil {
return nil, err
}
return bytes.NewReader(*jsonBytes), err
}
// get a []byte representing the Plan
func (m *Manifest) bytes() (*[]byte, error) {
b, err := oxc.ToJson(m)
return &b, err
}
// Decode get the Plan in the http Response
func (m *Manifest) Decode(content []byte) (*Manifest, error) {
result := new(Manifest)
err := json.NewDecoder(bytes.NewReader(content)).Decode(result)
return result, err
}
func (m *Manifest) getCommand(cmdName string) *Command {
for _, cmd := range m.Commands {
if cmdName == cmd.Name {
return &cmd
}
}
return nil
}
func (m *Manifest) findCommands(action *Action) ([]Command, error) {
var commands []Command
for _, cmdName := range action.Commands {
for _, cmd := range m.Commands {
if cmd.Name == cmdName {
commands = append(commands, cmd)
}
}
}
return commands, nil
}
// GetQuery find the query by name
func (m *Manifest) GetQuery(queryName string) *Query {
for _, query := range m.Queries {
if query.Name == queryName {
return &query
}
}
return nil
}
// GetQueriesInfo get a string containing query information in the manifest
func (m *Manifest) GetQueriesInfo(format string, verbose bool) string {
// make a copy
queries := make([]Query, len(m.Queries))
if verbose {
queries = m.Queries
} else {
// clean unneeded info
for ix, query := range m.Queries {
queries[ix] = query
queries[ix].Content = ""
queries[ix].File = ""
queries[ix].Vars = nil
}
}
switch strings.ToLower(format) {
case "json":
{
bytes, err := json.Marshal(queries)
if err != nil {
return "!!! I cannot convert result into JSON\n"
}
return string(bytes)
}
default:
{
bytes, err := yaml.Marshal(queries)
if err != nil {
return "!!! I cannot convert result into YAML\n"
}
return string(bytes)
}
}
}
func (m *Manifest) GetCommands(commandNames []string) []Command {
result := make([]Command, 0)
for _, cmdName := range commandNames {
for _, command := range m.Commands {
if command.Name == cmdName {
result = append(result, command)
}
}
}
return result
}
|
{
q := &Query{}
err := json.Unmarshal([]byte(jsonString), q)
return q, err
}
|
api_client.go
|
// Code generated by smithy-go-codegen DO NOT EDIT.
package mediaconnect
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
smithy "github.com/aws/smithy-go"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"net/http"
"time"
)
const ServiceID = "MediaConnect"
const ServiceAPIVersion = "2018-11-14"
// Client provides the API client to make operations call for AWS MediaConnect.
type Client struct {
options Options
}
// New returns an initialized Client based on the functional options. Provide
// additional functional options to further configure the behavior of the client,
// such as changing the client's endpoint or adding custom middleware behavior.
func New(options Options, optFns ...func(*Options)) *Client {
options = options.Copy()
resolveDefaultLogger(&options)
resolveRetryer(&options)
resolveHTTPClient(&options)
resolveHTTPSignerV4(&options)
resolveDefaultEndpointConfiguration(&options)
for _, fn := range optFns {
fn(&options)
}
client := &Client{
options: options,
}
return client
}
type Options struct {
// Set of options to modify how an operation is invoked. These apply to all
// operations invoked for this client. Use functional options on operation call to
// modify this list for per operation behavior.
APIOptions []func(*middleware.Stack) error
// Configures the events that will be sent to the configured logger.
ClientLogMode aws.ClientLogMode
// The credentials object to use when signing requests.
Credentials aws.CredentialsProvider
// The endpoint options to be used when attempting to resolve an endpoint.
EndpointOptions EndpointResolverOptions
// The service endpoint resolver.
EndpointResolver EndpointResolver
// Signature Version 4 (SigV4) Signer
HTTPSignerV4 HTTPSignerV4
// The logger writer interface to write logging messages to.
Logger logging.Logger
// The region to send requests to. (Required)
Region string
// Retryer guides how HTTP requests should be retried in case of recoverable
// failures. When nil the API client will use a default retryer.
Retryer aws.Retryer
// The HTTP client to invoke API calls with. Defaults to client's default HTTP
// implementation if nil.
HTTPClient HTTPClient
}
// WithAPIOptions returns a functional option for setting the Client's APIOptions
// option.
func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
return func(o *Options) {
o.APIOptions = append(o.APIOptions, optFns...)
}
}
type HTTPClient interface {
Do(*http.Request) (*http.Response, error)
}
// Copy creates a clone where the APIOptions list is deep copied.
func (o Options) Copy() Options {
to := o
to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
copy(to.APIOptions, o.APIOptions)
return to
}
func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
ctx = middleware.ClearStackValues(ctx)
stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
options := c.options.Copy()
for _, fn := range optFns {
fn(&options)
}
for _, fn := range stackFns {
if err := fn(stack, options); err != nil {
return nil, metadata, err
}
}
for _, fn := range options.APIOptions {
if err := fn(stack); err != nil {
return nil, metadata, err
}
}
handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
result, metadata, err = handler.Handle(ctx, params)
if err != nil {
err = &smithy.OperationError{
ServiceID: ServiceID,
OperationName: opID,
Err: err,
}
}
return result, metadata, err
}
func resolveDefaultLogger(o *Options) {
if o.Logger != nil {
return
}
o.Logger = logging.Nop{}
}
func
|
(stack *middleware.Stack, o Options) error {
return middleware.AddSetLoggerMiddleware(stack, o.Logger)
}
// NewFromConfig returns a new client from the provided config.
func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
opts := Options{
Region: cfg.Region,
HTTPClient: cfg.HTTPClient,
Credentials: cfg.Credentials,
APIOptions: cfg.APIOptions,
Logger: cfg.Logger,
ClientLogMode: cfg.ClientLogMode,
}
resolveAWSRetryerProvider(cfg, &opts)
resolveAWSEndpointResolver(cfg, &opts)
return New(opts, optFns...)
}
func resolveHTTPClient(o *Options) {
if o.HTTPClient != nil {
return
}
o.HTTPClient = awshttp.NewBuildableClient()
}
func resolveRetryer(o *Options) {
if o.Retryer != nil {
return
}
o.Retryer = retry.NewStandard()
}
func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
if cfg.Retryer == nil {
return
}
o.Retryer = cfg.Retryer()
}
func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
if cfg.EndpointResolver == nil {
return
}
o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, NewDefaultEndpointResolver())
}
func addClientUserAgent(stack *middleware.Stack) error {
return awsmiddleware.AddRequestUserAgentMiddleware(stack)
}
func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error {
mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{
CredentialsProvider: o.Credentials,
Signer: o.HTTPSignerV4,
LogSigning: o.ClientLogMode.IsSigning(),
})
return stack.Finalize.Add(mw, middleware.After)
}
type HTTPSignerV4 interface {
SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
}
func resolveHTTPSignerV4(o *Options) {
if o.HTTPSignerV4 != nil {
return
}
o.HTTPSignerV4 = newDefaultV4Signer(*o)
}
func newDefaultV4Signer(o Options) *v4.Signer {
return v4.NewSigner(func(so *v4.SignerOptions) {
so.Logger = o.Logger
so.LogSigning = o.ClientLogMode.IsSigning()
})
}
func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
mo := retry.AddRetryMiddlewaresOptions{
Retryer: o.Retryer,
LogRetryAttempts: o.ClientLogMode.IsRetries(),
}
return retry.AddRetryMiddlewares(stack, mo)
}
func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
}
func addResponseErrorMiddleware(stack *middleware.Stack) error {
return awshttp.AddResponseErrorMiddleware(stack)
}
func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
LogRequest: o.ClientLogMode.IsRequest(),
LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(),
LogResponse: o.ClientLogMode.IsResponse(),
LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
}, middleware.After)
}
|
addSetLoggerMiddleware
|
regular_platform.rs
|
use crate::compiler::Compiler;
use crate::config::PlatformConfiguration;
use crate::overlay::Overlayer;
use crate::platform;
use crate::project::Project;
use crate::toolchain::ToolchainConfig;
use crate::Build;
use crate::BuildArgs;
use crate::Device;
use crate::Platform;
use crate::Result;
use dinghy_build::build_env::set_all_env;
use std::fmt::{Debug, Display, Formatter};
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use std::sync::Arc;
use anyhow::Context;
pub struct RegularPlatform {
compiler: Arc<Compiler>,
pub configuration: PlatformConfiguration,
pub id: String,
pub toolchain: ToolchainConfig,
}
impl Debug for RegularPlatform {
fn fmt(&self, fmt: &mut Formatter) -> ::std::fmt::Result {
write!(fmt, "{}", self.id)
}
}
impl RegularPlatform {
pub fn new<P: AsRef<Path>>(
compiler: &Arc<Compiler>,
configuration: PlatformConfiguration,
id: String,
rustc_triple: String,
toolchain_path: P,
) -> Result<Box<dyn Platform>> {
if let Some(prefix) = configuration.deb_multiarch.clone() {
return Ok(Box::new(RegularPlatform {
compiler: compiler.clone(),
configuration,
id,
toolchain: ToolchainConfig {
bin_dir: "/usr/bin".into(),
rustc_triple,
root: "/".into(),
sysroot: "/".into(),
cc: "gcc".to_string(),
binutils_prefix: prefix.clone(),
cc_prefix: prefix.clone(),
},
}));
}
let toolchain_path = toolchain_path.as_ref();
let toolchain_bin_path = toolchain_path.join("bin");
let mut bin: Option<PathBuf> = None;
let mut prefix: Option<String> = None;
for file in toolchain_bin_path.read_dir().with_context(|| {
format!(
"Couldn't find toolchain directory {}",
toolchain_path.display()
)
})? {
let file = file?;
if file.file_name().to_string_lossy().ends_with("-gcc")
|| file.file_name().to_string_lossy().ends_with("-gcc.exe")
{
bin = Some(toolchain_bin_path);
prefix = Some(
file.file_name()
.to_string_lossy()
.replace(".exe", "")
.replace("-gcc", ""),
);
break;
}
}
let bin_dir = bin.ok_or_else(|| anyhow!("no bin/*-gcc found in toolchain"))?;
let tc_triple = prefix
.ok_or_else(|| anyhow!("no gcc in toolchain"))?
.to_string();
let sysroot = find_sysroot(&toolchain_path)?;
let toolchain = ToolchainConfig {
bin_dir,
rustc_triple,
root: toolchain_path.into(),
sysroot,
cc: "gcc".to_string(),
binutils_prefix: tc_triple.clone(),
cc_prefix: tc_triple,
};
Self::new_with_tc(compiler.clone(), configuration, id, toolchain)
}
pub fn new_with_tc(
compiler: Arc<Compiler>,
configuration: PlatformConfiguration,
id: String,
toolchain: ToolchainConfig,
) -> Result<Box<dyn Platform>> {
Ok(Box::new(RegularPlatform {
compiler,
configuration,
id,
toolchain,
}))
}
}
impl Display for RegularPlatform {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::result::Result<(), ::std::fmt::Error>
|
}
impl Platform for RegularPlatform {
fn build(&self, project: &Project, build_args: &BuildArgs) -> Result<Build> {
// Cleanup environment
set_all_env(&[("LIBRARY_PATH", ""), ("LD_LIBRARY_PATH", "")]);
// Set custom env variables specific to the platform
set_all_env(&self.configuration.env());
Overlayer::overlay(&self.configuration, self, project, &self.toolchain.sysroot)?;
self.toolchain
.setup_cc(&self.id, &self.toolchain.cc_executable(&self.toolchain.cc))?;
if Path::new(&self.toolchain.binutils_executable("ar")).exists() {
self.toolchain
.setup_tool("AR", &self.toolchain.binutils_executable("ar"))?;
}
if Path::new(&self.toolchain.binutils_executable("as")).exists() {
self.toolchain
.setup_tool("AS", &self.toolchain.binutils_executable("as"))?;
}
if Path::new(&self.toolchain.binutils_executable("c++")).exists() {
self.toolchain
.setup_tool("CXX", &self.toolchain.cc_executable("c++"))?;
}
if Path::new(&self.toolchain.cc_executable("cpp")).exists() {
self.toolchain
.setup_tool("CPP", &self.toolchain.cc_executable("cpp"))?;
}
if Path::new(&self.toolchain.binutils_executable("gfortran")).exists() {
self.toolchain
.setup_tool("FC", &self.toolchain.binutils_executable("gfortran"))?;
}
trace!("Setup linker...");
let mut linker_cmd = self.toolchain.cc_executable(&*self.toolchain.cc);
linker_cmd.push_str(" ");
if build_args.verbose {
linker_cmd.push_str("-Wl,--verbose -v")
}
linker_cmd.push_str(&format!(" --sysroot {}", self.toolchain.sysroot.display()));
for forced_overlay in &build_args.forced_overlays {
linker_cmd.push_str(" -l");
linker_cmd.push_str(&forced_overlay);
// TODO Add -L
}
self.toolchain.setup_linker(&self.id, &linker_cmd)?;
trace!("Setup pkg-config");
self.toolchain.setup_pkg_config()?;
trace!("Setup sysroot...");
self.toolchain.setup_sysroot();
trace!("Setup shims...");
self.toolchain.shim_executables(&self.id)?;
trace!("Internally invoke cargo");
self.compiler.build(self.rustc_triple(), &build_args)
}
fn id(&self) -> String {
self.id.clone()
}
fn is_compatible_with(&self, device: &dyn Device) -> bool {
device.is_compatible_with_regular_platform(self)
}
fn rustc_triple(&self) -> Option<&str> {
Some(&self.toolchain.rustc_triple)
}
fn strip(&self, build: &Build) -> Result<()> {
for runnable in &build.runnables {
platform::strip_runnable(
runnable,
Command::new(self.toolchain.binutils_executable("strip")),
)?;
}
Ok(())
}
}
fn find_sysroot<P: AsRef<Path>>(toolchain_path: P) -> Result<PathBuf> {
let toolchain = toolchain_path.as_ref();
let immediate = toolchain.join("sysroot");
if immediate.is_dir() {
let sysroot = immediate
.to_str()
.ok_or_else(|| anyhow!("sysroot is not utf-8"))?;
return Ok(sysroot.into());
}
for subdir in toolchain.read_dir()? {
let subdir = subdir?;
let maybe = subdir.path().join("sysroot");
if maybe.is_dir() {
let sysroot = maybe
.to_str()
.ok_or_else(|| anyhow!("sysroot is not utf-8"))?;
return Ok(sysroot.into());
}
}
bail!("no sysroot found in toolchain {:?}", toolchain)
}
|
{
write!(f, "{:?}", self.toolchain.root)
}
|
lib.rs
|
//! FUSE kernel interface.
//!
//! Types and definitions used for communication between the kernel driver and the userspace
//! part of a FUSE filesystem. Since the kernel driver may be installed independently, the ABI
//! interface is versioned and capabilities are exchanged during the initialization (mounting)
//! of a filesystem.
//!
//! OSXFUSE (macOS): https://github.com/osxfuse/fuse/blob/master/include/fuse_kernel.h
//! - supports ABI 7.8 in OSXFUSE 2.x
//! - supports ABI 7.19 since OSXFUSE 3.0.0
//!
//! libfuse (Linux/BSD): https://github.com/libfuse/libfuse/blob/master/include/fuse_kernel.h
//! - supports ABI 7.8 since FUSE 2.6.0
//! - supports ABI 7.12 since FUSE 2.8.0
//! - supports ABI 7.18 since FUSE 2.9.0
//! - supports ABI 7.19 since FUSE 2.9.1
//! - supports ABI 7.26 since FUSE 3.0.0
//!
//! Items without a version annotation are valid with ABI 7.8 and later
#![warn(missing_debug_implementations, rust_2018_idioms)]
#![allow(missing_docs)]
use std::convert::TryFrom;
pub const FUSE_KERNEL_VERSION: u32 = 7;
#[cfg(not(feature = "abi-7-9"))]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 8;
#[cfg(all(feature = "abi-7-9", not(feature = "abi-7-10")))]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 9;
#[cfg(all(feature = "abi-7-10", not(feature = "abi-7-11")))]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 10;
#[cfg(all(feature = "abi-7-11", not(feature = "abi-7-12")))]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 11;
#[cfg(all(feature = "abi-7-12", not(feature = "abi-7-13")))]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 12;
#[cfg(all(feature = "abi-7-13", not(feature = "abi-7-14")))]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 13;
#[cfg(all(feature = "abi-7-14", not(feature = "abi-7-15")))]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 14;
#[cfg(all(feature = "abi-7-15", not(feature = "abi-7-16")))]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 15;
#[cfg(all(feature = "abi-7-16", not(feature = "abi-7-17")))]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 16;
#[cfg(all(feature = "abi-7-17", not(feature = "abi-7-18")))]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 17;
#[cfg(all(feature = "abi-7-18", not(feature = "abi-7-19")))]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 18;
#[cfg(feature = "abi-7-19")]
pub const FUSE_KERNEL_MINOR_VERSION: u32 = 19;
pub const FUSE_ROOT_ID: u64 = 1;
#[repr(C)]
#[derive(Debug)]
pub struct fuse_attr {
pub ino: u64,
pub size: u64,
pub blocks: u64,
pub atime: u64,
pub mtime: u64,
pub ctime: u64,
#[cfg(target_os = "macos")]
pub crtime: u64,
pub atimensec: u32,
pub mtimensec: u32,
pub ctimensec: u32,
#[cfg(target_os = "macos")]
pub crtimensec: u32,
pub mode: u32,
pub nlink: u32,
pub uid: u32,
pub gid: u32,
pub rdev: u32,
#[cfg(target_os = "macos")]
pub flags: u32, // see chflags(2)
#[cfg(feature = "abi-7-9")]
pub blksize: u32,
#[cfg(feature = "abi-7-9")]
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_kstatfs {
pub blocks: u64, // Total blocks (in units of frsize)
pub bfree: u64, // Free blocks
pub bavail: u64, // Free blocks for unprivileged users
pub files: u64, // Total inodes
pub ffree: u64, // Free inodes
pub bsize: u32, // Filesystem block size
pub namelen: u32, // Maximum filename length
pub frsize: u32, // Fundamental file system block size
pub padding: u32,
pub spare: [u32; 6],
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_file_lock {
pub start: u64,
pub end: u64,
pub typ: u32,
pub pid: u32,
}
pub mod consts {
// Bitmasks for fuse_setattr_in.valid
pub const FATTR_MODE: u32 = 1 << 0;
pub const FATTR_UID: u32 = 1 << 1;
pub const FATTR_GID: u32 = 1 << 2;
pub const FATTR_SIZE: u32 = 1 << 3;
pub const FATTR_ATIME: u32 = 1 << 4;
pub const FATTR_MTIME: u32 = 1 << 5;
pub const FATTR_FH: u32 = 1 << 6;
#[cfg(feature = "abi-7-9")]
pub const FATTR_ATIME_NOW: u32 = 1 << 7;
#[cfg(feature = "abi-7-9")]
pub const FATTR_MTIME_NOW: u32 = 1 << 8;
#[cfg(feature = "abi-7-9")]
pub const FATTR_LOCKOWNER: u32 = 1 << 9;
#[cfg(target_os = "macos")]
pub const FATTR_CRTIME: u32 = 1 << 28;
#[cfg(target_os = "macos")]
pub const FATTR_CHGTIME: u32 = 1 << 29;
#[cfg(target_os = "macos")]
pub const FATTR_BKUPTIME: u32 = 1 << 30;
#[cfg(target_os = "macos")]
pub const FATTR_FLAGS: u32 = 1 << 31;
// Flags returned by the open request
pub const FOPEN_DIRECT_IO: u32 = 1 << 0; // bypass page cache for this open file
pub const FOPEN_KEEP_CACHE: u32 = 1 << 1; // don't invalidate the data cache on open
#[cfg(feature = "abi-7-10")]
pub const FOPEN_NONSEEKABLE: u32 = 1 << 2; // the file is not seekable
#[cfg(target_os = "macos")]
pub const FOPEN_PURGE_ATTR: u32 = 1 << 30;
#[cfg(target_os = "macos")]
pub const FOPEN_PURGE_UBC: u32 = 1 << 31;
// Init request/reply flags
pub const FUSE_ASYNC_READ: u32 = 1 << 0; // asynchronous read requests
pub const FUSE_POSIX_LOCKS: u32 = 1 << 1; // remote locking for POSIX file locks
#[cfg(feature = "abi-7-9")]
pub const FUSE_FILE_OPS: u32 = 1 << 2; // kernel sends file handle for fstat, etc...
#[cfg(feature = "abi-7-9")]
pub const FUSE_ATOMIC_O_TRUNC: u32 = 1 << 3; // handles the O_TRUNC open flag in the filesystem
#[cfg(feature = "abi-7-10")]
pub const FUSE_EXPORT_SUPPORT: u32 = 1 << 4; // filesystem handles lookups of "." and ".."
#[cfg(feature = "abi-7-9")]
pub const FUSE_BIG_WRITES: u32 = 1 << 5; // filesystem can handle write size larger than 4kB
#[cfg(feature = "abi-7-12")]
pub const FUSE_DONT_MASK: u32 = 1 << 6; // don't apply umask to file mode on create operations
#[cfg(all(feature = "abi-7-14", not(target_os = "macos")))]
pub const FUSE_SPLICE_WRITE: u32 = 1 << 7; // kernel supports splice write on the device
#[cfg(all(feature = "abi-7-14", not(target_os = "macos")))]
pub const FUSE_SPLICE_MOVE: u32 = 1 << 8; // kernel supports splice move on the device
#[cfg(not(target_os = "macos"))]
#[cfg(feature = "abi-7-14")]
pub const FUSE_SPLICE_READ: u32 = 1 << 9; // kernel supports splice read on the device
#[cfg(feature = "abi-7-17")]
pub const FUSE_FLOCK_LOCKS: u32 = 1 << 10; // remote locking for BSD style file locks
#[cfg(feature = "abi-7-18")]
pub const FUSE_HAS_IOCTL_DIR: u32 = 1 << 11; // kernel supports ioctl on directories
#[cfg(target_os = "macos")]
pub const FUSE_ALLOCATE: u32 = 1 << 27;
#[cfg(target_os = "macos")]
pub const FUSE_EXCHANGE_DATA: u32 = 1 << 28;
#[cfg(target_os = "macos")]
pub const FUSE_CASE_INSENSITIVE: u32 = 1 << 29;
#[cfg(target_os = "macos")]
pub const FUSE_VOL_RENAME: u32 = 1 << 30;
#[cfg(target_os = "macos")]
pub const FUSE_XTIMES: u32 = 1 << 31;
// CUSE init request/reply flags
#[cfg(feature = "abi-7-12")]
pub const CUSE_UNRESTRICTED_IOCTL: u32 = 1 << 0; // use unrestricted ioctl
// Release flags
pub const FUSE_RELEASE_FLUSH: u32 = 1 << 0;
#[cfg(feature = "abi-7-17")]
pub const FUSE_RELEASE_FLOCK_UNLOCK: u32= 1 << 1;
// Getattr flags
#[cfg(feature = "abi-7-9")]
pub const FUSE_GETATTR_FH: u32 = 1 << 0;
// Lock flags
#[cfg(feature = "abi-7-9")]
pub const FUSE_LK_FLOCK: u32 = 1 << 0;
// Write flags
#[cfg(feature = "abi-7-9")]
pub const FUSE_WRITE_CACHE: u32 = 1 << 0; // delayed write from page cache, file handle is guessed
#[cfg(feature = "abi-7-9")]
pub const FUSE_WRITE_LOCKOWNER: u32 = 1 << 1; // lock_owner field is valid
// Read flags
#[cfg(feature = "abi-7-9")]
pub const FUSE_READ_LOCKOWNER: u32 = 1 << 1;
// IOCTL flags
#[cfg(feature = "abi-7-11")]
pub const FUSE_IOCTL_COMPAT: u32 = 1 << 0; // 32bit compat ioctl on 64bit machine
#[cfg(feature = "abi-7-11")]
pub const FUSE_IOCTL_UNRESTRICTED: u32 = 1 << 1; // not restricted to well-formed ioctls, retry allowed
#[cfg(feature = "abi-7-11")]
pub const FUSE_IOCTL_RETRY: u32 = 1 << 2; // retry with new iovecs
#[cfg(feature = "abi-7-16")]
pub const FUSE_IOCTL_32BIT: u32 = 1 << 3; // 32bit ioctl
#[cfg(feature = "abi-7-18")]
pub const FUSE_IOCTL_DIR: u32 = 1 << 4; // is a directory
#[cfg(feature = "abi-7-11")]
pub const FUSE_IOCTL_MAX_IOV: u32 = 256; // maximum of in_iovecs + out_iovecs
// Poll flags
#[cfg(feature = "abi-7-9")]
pub const FUSE_POLL_SCHEDULE_NOTIFY: u32= 1 << 0; // request poll notify
// The read buffer is required to be at least 8k, but may be much larger
pub const FUSE_MIN_READ_BUFFER: usize = 8192;
}
/// Invalid opcode error.
#[derive(Debug)]
pub struct InvalidOpcodeError;
#[repr(C)]
#[derive(Debug)]
#[allow(non_camel_case_types)]
pub enum fuse_opcode {
FUSE_LOOKUP = 1,
FUSE_FORGET = 2, // no reply
FUSE_GETATTR = 3,
FUSE_SETATTR = 4,
FUSE_READLINK = 5,
FUSE_SYMLINK = 6,
FUSE_MKNOD = 8,
FUSE_MKDIR = 9,
FUSE_UNLINK = 10,
FUSE_RMDIR = 11,
FUSE_RENAME = 12,
FUSE_LINK = 13,
FUSE_OPEN = 14,
FUSE_READ = 15,
FUSE_WRITE = 16,
FUSE_STATFS = 17,
FUSE_RELEASE = 18,
FUSE_FSYNC = 20,
FUSE_SETXATTR = 21,
FUSE_GETXATTR = 22,
FUSE_LISTXATTR = 23,
FUSE_REMOVEXATTR = 24,
FUSE_FLUSH = 25,
FUSE_INIT = 26,
FUSE_OPENDIR = 27,
FUSE_READDIR = 28,
FUSE_RELEASEDIR = 29,
FUSE_FSYNCDIR = 30,
FUSE_GETLK = 31,
FUSE_SETLK = 32,
FUSE_SETLKW = 33,
FUSE_ACCESS = 34,
FUSE_CREATE = 35,
FUSE_INTERRUPT = 36,
FUSE_BMAP = 37,
FUSE_DESTROY = 38,
#[cfg(feature = "abi-7-11")]
FUSE_IOCTL = 39,
#[cfg(feature = "abi-7-11")]
FUSE_POLL = 40,
#[cfg(feature = "abi-7-15")]
FUSE_NOTIFY_REPLY = 41,
#[cfg(feature = "abi-7-16")]
FUSE_BATCH_FORGET = 42,
#[cfg(feature = "abi-7-19")]
FUSE_FALLOCATE = 43,
#[cfg(target_os = "macos")]
FUSE_SETVOLNAME = 61,
#[cfg(target_os = "macos")]
FUSE_GETXTIMES = 62,
#[cfg(target_os = "macos")]
FUSE_EXCHANGE = 63,
#[cfg(feature = "abi-7-12")]
CUSE_INIT = 4096,
}
impl TryFrom<u32> for fuse_opcode {
type Error = InvalidOpcodeError;
fn try_from(n: u32) -> Result<Self, Self::Error> {
match n {
1 => Ok(fuse_opcode::FUSE_LOOKUP),
2 => Ok(fuse_opcode::FUSE_FORGET),
3 => Ok(fuse_opcode::FUSE_GETATTR),
4 => Ok(fuse_opcode::FUSE_SETATTR),
5 => Ok(fuse_opcode::FUSE_READLINK),
6 => Ok(fuse_opcode::FUSE_SYMLINK),
8 => Ok(fuse_opcode::FUSE_MKNOD),
9 => Ok(fuse_opcode::FUSE_MKDIR),
10 => Ok(fuse_opcode::FUSE_UNLINK),
11 => Ok(fuse_opcode::FUSE_RMDIR),
12 => Ok(fuse_opcode::FUSE_RENAME),
13 => Ok(fuse_opcode::FUSE_LINK),
14 => Ok(fuse_opcode::FUSE_OPEN),
15 => Ok(fuse_opcode::FUSE_READ),
16 => Ok(fuse_opcode::FUSE_WRITE),
17 => Ok(fuse_opcode::FUSE_STATFS),
18 => Ok(fuse_opcode::FUSE_RELEASE),
20 => Ok(fuse_opcode::FUSE_FSYNC),
21 => Ok(fuse_opcode::FUSE_SETXATTR),
22 => Ok(fuse_opcode::FUSE_GETXATTR),
23 => Ok(fuse_opcode::FUSE_LISTXATTR),
24 => Ok(fuse_opcode::FUSE_REMOVEXATTR),
25 => Ok(fuse_opcode::FUSE_FLUSH),
26 => Ok(fuse_opcode::FUSE_INIT),
27 => Ok(fuse_opcode::FUSE_OPENDIR),
28 => Ok(fuse_opcode::FUSE_READDIR),
29 => Ok(fuse_opcode::FUSE_RELEASEDIR),
30 => Ok(fuse_opcode::FUSE_FSYNCDIR),
31 => Ok(fuse_opcode::FUSE_GETLK),
32 => Ok(fuse_opcode::FUSE_SETLK),
33 => Ok(fuse_opcode::FUSE_SETLKW),
34 => Ok(fuse_opcode::FUSE_ACCESS),
35 => Ok(fuse_opcode::FUSE_CREATE),
36 => Ok(fuse_opcode::FUSE_INTERRUPT),
37 => Ok(fuse_opcode::FUSE_BMAP),
38 => Ok(fuse_opcode::FUSE_DESTROY),
#[cfg(feature = "abi-7-11")]
39 => Ok(fuse_opcode::FUSE_IOCTL),
#[cfg(feature = "abi-7-11")]
40 => Ok(fuse_opcode::FUSE_POLL),
#[cfg(feature = "abi-7-15")]
41 => Ok(fuse_opcode::FUSE_NOTIFY_REPLY),
#[cfg(feature = "abi-7-16")]
42 => Ok(fuse_opcode::FUSE_BATCH_FORGET),
#[cfg(feature = "abi-7-19")]
43 => Ok(fuse_opcode::FUSE_FALLOCATE),
#[cfg(target_os = "macos")]
61 => Ok(fuse_opcode::FUSE_SETVOLNAME),
#[cfg(target_os = "macos")]
62 => Ok(fuse_opcode::FUSE_GETXTIMES),
#[cfg(target_os = "macos")]
63 => Ok(fuse_opcode::FUSE_EXCHANGE),
#[cfg(feature = "abi-7-12")]
4096 => Ok(fuse_opcode::CUSE_INIT),
_ => Err(InvalidOpcodeError),
}
}
}
/// Invalid notify code error.
#[cfg(feature = "abi-7-11")]
#[derive(Debug)]
pub struct InvalidNotifyCodeError;
#[cfg(feature = "abi-7-11")]
#[repr(C)]
#[derive(Debug)]
#[allow(non_camel_case_types)]
pub enum fuse_notify_code {
#[cfg(feature = "abi-7-11")]
FUSE_POLL = 1,
#[cfg(feature = "abi-7-12")]
FUSE_NOTIFY_INVAL_INODE = 2,
#[cfg(feature = "abi-7-12")]
FUSE_NOTIFY_INVAL_ENTRY = 3,
#[cfg(feature = "abi-7-15")]
FUSE_NOTIFY_STORE = 4,
#[cfg(feature = "abi-7-15")]
FUSE_NOTIFY_RETRIEVE = 5,
#[cfg(feature = "abi-7-18")]
FUSE_NOTIFY_DELETE = 6,
}
#[cfg(feature = "abi-7-11")]
impl TryFrom<u32> for fuse_notify_code {
type Error = InvalidNotifyCodeError;
fn try_from(n: u32) -> Result<Self, Self::Error> {
match n {
#[cfg(feature = "abi-7-11")]
1 => Ok(fuse_notify_code::FUSE_POLL),
#[cfg(feature = "abi-7-12")]
2 => Ok(fuse_notify_code::FUSE_NOTIFY_INVAL_INODE),
#[cfg(feature = "abi-7-12")]
3 => Ok(fuse_notify_code::FUSE_NOTIFY_INVAL_ENTRY),
#[cfg(feature = "abi-7-15")]
4 => Ok(fuse_notify_code::FUSE_NOTIFY_STORE),
#[cfg(feature = "abi-7-15")]
5 => Ok(fuse_notify_code::FUSE_NOTIFY_RETRIEVE),
#[cfg(feature = "abi-7-18")]
6 => Ok(fuse_notify_code::FUSE_NOTIFY_DELETE),
_ => Err(InvalidNotifyCodeError),
}
}
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_entry_out {
pub nodeid: u64,
pub generation: u64,
pub entry_valid: u64,
pub attr_valid: u64,
pub entry_valid_nsec: u32,
pub attr_valid_nsec: u32,
pub attr: fuse_attr,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_forget_in {
pub nlookup: u64,
}
#[cfg(feature = "abi-7-16")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_forget_one {
pub nodeid: u64,
pub nlookup: u64,
}
#[cfg(feature = "abi-7-16")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_batch_forget_in {
pub count: u32,
pub dummy: u32,
}
#[cfg(feature = "abi-7-9")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_getattr_in {
pub getattr_flags: u32,
pub dummy: u32,
pub fh: u64,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_attr_out {
pub attr_valid: u64,
pub attr_valid_nsec: u32,
pub dummy: u32,
pub attr: fuse_attr,
}
#[cfg(target_os = "macos")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_getxtimes_out {
pub bkuptime: u64,
pub crtime: u64,
pub bkuptimensec: u32,
pub crtimensec: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_mknod_in {
pub mode: u32,
pub rdev: u32,
#[cfg(feature = "abi-7-12")]
pub umask: u32,
#[cfg(feature = "abi-7-12")]
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_mkdir_in {
pub mode: u32,
#[cfg(not(feature = "abi-7-12"))]
pub padding: u32,
#[cfg(feature = "abi-7-12")]
pub umask: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_rename_in {
pub newdir: u64,
}
#[cfg(target_os = "macos")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_exchange_in {
pub olddir: u64,
pub newdir: u64,
pub options: u64,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_link_in {
pub oldnodeid: u64,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_setattr_in {
pub valid: u32,
pub padding: u32,
pub fh: u64,
pub size: u64,
#[cfg(not(feature = "abi-7-9"))]
pub unused1: u64,
#[cfg(feature = "abi-7-9")]
pub lock_owner: u64,
pub atime: u64,
pub mtime: u64,
pub unused2: u64,
pub atimensec: u32,
pub mtimensec: u32,
pub unused3: u32,
pub mode: u32,
pub unused4: u32,
pub uid: u32,
pub gid: u32,
pub unused5: u32,
#[cfg(target_os = "macos")]
pub bkuptime: u64,
#[cfg(target_os = "macos")]
pub chgtime: u64,
#[cfg(target_os = "macos")]
pub crtime: u64,
#[cfg(target_os = "macos")]
pub bkuptimensec: u32,
#[cfg(target_os = "macos")]
pub chgtimensec: u32,
#[cfg(target_os = "macos")]
pub crtimensec: u32,
#[cfg(target_os = "macos")]
pub flags: u32, // see chflags(2)
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_open_in {
pub flags: u32,
pub unused: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_create_in {
pub flags: u32,
pub mode: u32,
#[cfg(feature = "abi-7-12")]
pub umask: u32,
#[cfg(feature = "abi-7-12")]
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_open_out {
pub fh: u64,
pub open_flags: u32,
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_release_in {
pub fh: u64,
pub flags: u32,
pub release_flags: u32,
pub lock_owner: u64,
}
#[repr(C)]
#[derive(Debug)]
pub struct
|
{
pub fh: u64,
pub unused: u32,
pub padding: u32,
pub lock_owner: u64,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_read_in {
pub fh: u64,
pub offset: u64,
pub size: u32,
#[cfg(feature = "abi-7-9")]
pub read_flags: u32,
#[cfg(feature = "abi-7-9")]
pub lock_owner: u64,
#[cfg(feature = "abi-7-9")]
pub flags: u32,
#[cfg(feature = "abi-7-9")]
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_write_in {
pub fh: u64,
pub offset: u64,
pub size: u32,
pub write_flags: u32,
#[cfg(feature = "abi-7-9")]
pub lock_owner: u64,
#[cfg(feature = "abi-7-9")]
pub flags: u32,
#[cfg(feature = "abi-7-9")]
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_write_out {
pub size: u32,
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_statfs_out {
pub st: fuse_kstatfs,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_fsync_in {
pub fh: u64,
pub fsync_flags: u32,
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_setxattr_in {
pub size: u32,
pub flags: u32,
#[cfg(target_os = "macos")]
pub position: u32,
#[cfg(target_os = "macos")]
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_getxattr_in {
pub size: u32,
pub padding: u32,
#[cfg(target_os = "macos")]
pub position: u32,
#[cfg(target_os = "macos")]
pub padding2: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_getxattr_out {
pub size: u32,
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_lk_in {
pub fh: u64,
pub owner: u64,
pub lk: fuse_file_lock,
#[cfg(feature = "abi-7-9")]
pub lk_flags: u32,
#[cfg(feature = "abi-7-9")]
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_lk_out {
pub lk: fuse_file_lock,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_access_in {
pub mask: u32,
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_init_in {
pub major: u32,
pub minor: u32,
pub max_readahead: u32,
pub flags: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_init_out {
pub major: u32,
pub minor: u32,
pub max_readahead: u32,
pub flags: u32,
#[cfg(not(feature = "abi-7-13"))]
pub unused: u32,
#[cfg(feature = "abi-7-13")]
pub max_background: u16,
#[cfg(feature = "abi-7-13")]
pub congestion_threshold: u16,
pub max_write: u32,
}
#[cfg(feature = "abi-7-12")]
#[repr(C)]
#[derive(Debug)]
pub struct cuse_init_in {
pub major: u32,
pub minor: u32,
pub unused: u32,
pub flags: u32,
}
#[cfg(feature = "abi-7-12")]
#[repr(C)]
#[derive(Debug)]
pub struct cuse_init_out {
pub major: u32,
pub minor: u32,
pub unused: u32,
pub flags: u32,
pub max_read: u32,
pub max_write: u32,
pub dev_major: u32, // chardev major
pub dev_minor: u32, // chardev minor
pub spare: [u32; 10],
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_interrupt_in {
pub unique: u64,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_bmap_in {
pub block: u64,
pub blocksize: u32,
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_bmap_out {
pub block: u64,
}
#[cfg(feature = "abi-7-11")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_ioctl_in {
pub fh: u64,
pub flags: u32,
pub cmd: u32,
pub arg: u64,
pub in_size: u32,
pub out_size: u32,
}
#[cfg(feature = "abi-7-16")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_ioctl_iovec {
pub base: u64,
pub len: u64,
}
#[cfg(feature = "abi-7-11")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_ioctl_out {
pub result: i32,
pub flags: u32,
pub in_iovs: u32,
pub out_iovs: u32,
}
#[cfg(feature = "abi-7-11")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_poll_in {
pub fh: u64,
pub kh: u64,
pub flags: u32,
pub padding: u32,
}
#[cfg(feature = "abi-7-11")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_poll_out {
pub revents: u32,
pub padding: u32,
}
#[cfg(feature = "abi-7-11")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_notify_poll_wakeup_out {
pub kh: u64,
}
#[cfg(feature = "abi-7-19")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_fallocate_in {
fh: u64,
offset: u64,
length: u64,
mode: u32,
padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_in_header {
pub len: u32,
pub opcode: u32,
pub unique: u64,
pub nodeid: u64,
pub uid: u32,
pub gid: u32,
pub pid: u32,
pub padding: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_out_header {
pub len: u32,
pub error: i32,
pub unique: u64,
}
#[repr(C)]
#[derive(Debug)]
pub struct fuse_dirent {
pub ino: u64,
pub off: u64,
pub namelen: u32,
pub typ: u32,
// followed by name of namelen bytes
}
#[cfg(feature = "abi-7-12")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_notify_inval_inode_out {
pub ino: u64,
pub off: i64,
pub len: i64,
}
#[cfg(feature = "abi-7-12")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_notify_inval_entry_out {
pub parent: u64,
pub namelen: u32,
pub padding: u32,
}
#[cfg(feature = "abi-7-18")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_notify_delete_out {
parent: u64,
child: u64,
namelen: u32,
padding: u32,
}
#[cfg(feature = "abi-7-15")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_notify_store_out {
pub nodeid: u64,
pub offset: u64,
pub size: u32,
pub padding: u32,
}
#[cfg(feature = "abi-7-15")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_notify_retrieve_out {
pub notify_unique: u64,
pub nodeid: u64,
pub offset: u64,
pub size: u32,
pub padding: u32,
}
#[cfg(feature = "abi-7-15")]
#[repr(C)]
#[derive(Debug)]
pub struct fuse_notify_retrieve_in { // matches the size of fuse_write_in
pub dummy1: u64,
pub offset: u64,
pub size: u32,
pub dummy2: u32,
pub dummy3: u64,
pub dummy4: u64,
}
|
fuse_flush_in
|
syncer_test.go
|
package campaigns
import (
"container/heap"
"context"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/cmd/repo-updater/repos"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/campaigns"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
)
func TestNextSync(t *testing.T) {
clock := func() time.Time { return time.Date(2020, 01, 01, 01, 01, 01, 01, time.UTC) }
tests := []struct {
name string
h campaigns.ChangesetSyncData
want time.Time
}{
{
name: "No time passed",
h: campaigns.ChangesetSyncData{
UpdatedAt: clock(),
ExternalUpdatedAt: clock(),
},
want: clock().Add(minSyncDelay),
},
{
name: "Linear backoff",
h: campaigns.ChangesetSyncData{
UpdatedAt: clock(),
ExternalUpdatedAt: clock().Add(-1 * time.Hour),
},
want: clock().Add(1 * time.Hour),
},
{
name: "Use max of ExternalUpdateAt and LatestEvent",
h: campaigns.ChangesetSyncData{
UpdatedAt: clock(),
ExternalUpdatedAt: clock().Add(-2 * time.Hour),
LatestEvent: clock().Add(-1 * time.Hour),
},
want: clock().Add(1 * time.Hour),
},
{
name: "Diff max is capped",
h: campaigns.ChangesetSyncData{
UpdatedAt: clock(),
ExternalUpdatedAt: clock().Add(-2 * maxSyncDelay),
},
want: clock().Add(maxSyncDelay),
},
{
name: "Diff min is capped",
h: campaigns.ChangesetSyncData{
UpdatedAt: clock(),
ExternalUpdatedAt: clock().Add(-1 * minSyncDelay / 2),
},
want: clock().Add(minSyncDelay),
},
{
name: "Event arrives after sync",
h: campaigns.ChangesetSyncData{
UpdatedAt: clock(),
ExternalUpdatedAt: clock().Add(-1 * maxSyncDelay / 2),
LatestEvent: clock().Add(10 * time.Minute),
},
want: clock().Add(10 * time.Minute).Add(minSyncDelay),
},
{
name: "Never synced",
h: campaigns.ChangesetSyncData{},
want: clock(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := NextSync(clock, tt.h)
if diff := cmp.Diff(got, tt.want); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestChangesetPriorityQueue(t *testing.T) {
assertOrder := func(t *testing.T, q *changesetPriorityQueue, expected []int64) {
t.Helper()
ids := make([]int64, len(q.items))
for i := range ids {
ids[i] = q.items[i].changesetID
}
if diff := cmp.Diff(expected, ids); diff != "" {
t.Fatal(diff)
}
}
now := time.Now()
q := newChangesetPriorityQueue()
items := []scheduledSync{
{
changesetID: 1,
nextSync: now,
priority: priorityNormal,
},
{
changesetID: 2,
nextSync: now,
priority: priorityHigh,
},
{
changesetID: 3,
nextSync: now.Add(-1 * time.Minute),
priority: priorityNormal,
},
{
changesetID: 4,
nextSync: now.Add(-2 * time.Hour),
priority: priorityNormal,
},
{
changesetID: 5,
nextSync: now.Add(1 * time.Hour),
priority: priorityNormal,
},
}
for i := range items {
q.Upsert(items[i])
}
assertOrder(t, q, []int64{2, 4, 3, 1, 5})
// Set item to high priority
q.Upsert(scheduledSync{
changesetID: 4,
nextSync: now.Add(-2 * time.Hour),
priority: priorityHigh,
})
assertOrder(t, q, []int64{4, 2, 3, 1, 5})
// Can't reduce priority of existing item
q.Upsert(scheduledSync{
changesetID: 4,
nextSync: now.Add(-2 * time.Hour),
priority: priorityNormal,
})
if q.Len() != len(items) {
t.Fatalf("Expected %d, got %d", q.Len(), len(items))
}
assertOrder(t, q, []int64{4, 2, 3, 1, 5})
for i := 0; i < len(items); i++ {
peeked, ok := q.Peek()
if !ok {
t.Fatalf("Queue should not be empty")
}
item := heap.Pop(q).(scheduledSync)
if peeked.changesetID != item.changesetID {
t.Fatalf("Peeked and Popped item should have the same id")
}
}
// Len() should be zero after all items popped
if q.Len() != 0 {
t.Fatalf("Expected %d, got %d", q.Len(), 0)
}
}
func TestSyncerRun(t *testing.T) {
t.Run("Sync due", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
now := time.Now()
store := MockSyncStore{
listChangesetSyncData: func(ctx context.Context, opts ListChangesetSyncDataOpts) ([]campaigns.ChangesetSyncData, error) {
return []campaigns.ChangesetSyncData{
{
ChangesetID: 1,
UpdatedAt: now.Add(-2 * maxSyncDelay),
LatestEvent: now.Add(-2 * maxSyncDelay),
ExternalUpdatedAt: now.Add(-2 * maxSyncDelay),
},
}, nil
},
}
syncFunc := func(ctx context.Context, ids int64) error {
cancel()
return nil
}
syncer := &ChangesetSyncer{
SyncStore: store,
scheduleInterval: 10 * time.Minute,
syncFunc: syncFunc,
}
go syncer.Run(ctx)
select {
case <-ctx.Done():
case <-time.After(50 * time.Millisecond):
t.Fatal("Sync should have been triggered")
}
})
t.Run("Sync not due", func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
now := time.Now()
store := MockSyncStore{
listChangesetSyncData: func(ctx context.Context, opts ListChangesetSyncDataOpts) ([]campaigns.ChangesetSyncData, error) {
return []campaigns.ChangesetSyncData{
{
ChangesetID: 1,
UpdatedAt: now,
LatestEvent: now,
ExternalUpdatedAt: now,
},
}, nil
},
}
var syncCalled bool
syncFunc := func(ctx context.Context, ids int64) error {
syncCalled = true
return nil
}
syncer := &ChangesetSyncer{
SyncStore: store,
scheduleInterval: 10 * time.Minute,
syncFunc: syncFunc,
}
syncer.Run(ctx)
if syncCalled {
t.Fatal("Sync should not have been triggered")
}
})
t.Run("Priority added", func(t *testing.T) {
// Empty schedule but then we add an item
ctx, cancel := context.WithCancel(context.Background())
store := MockSyncStore{
listChangesetSyncData: func(ctx context.Context, opts ListChangesetSyncDataOpts) ([]campaigns.ChangesetSyncData, error) {
return []campaigns.ChangesetSyncData{}, nil
},
}
syncFunc := func(ctx context.Context, ids int64) error {
cancel()
return nil
}
syncer := &ChangesetSyncer{
SyncStore: store,
scheduleInterval: 10 * time.Minute,
syncFunc: syncFunc,
priorityNotify: make(chan []int64, 1),
}
syncer.priorityNotify <- []int64{1}
go syncer.Run(ctx)
select {
case <-ctx.Done():
case <-time.After(50 * time.Millisecond):
t.Fatal("Sync not called")
}
})
}
func TestFilterSyncData(t *testing.T)
|
func TestSyncRegistry(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
now := time.Now()
repoStore := MockRepoStore{
listExternalServices: func(ctx context.Context, args repos.StoreListExternalServicesArgs) (services []*repos.ExternalService, err error) {
return []*repos.ExternalService{
{
ID: 1,
Kind: extsvc.KindGitHub,
DisplayName: "",
Config: "",
CreatedAt: time.Time{},
UpdatedAt: time.Time{},
},
}, nil
},
}
syncStore := MockSyncStore{
listChangesetSyncData: func(ctx context.Context, opts ListChangesetSyncDataOpts) (data []campaigns.ChangesetSyncData, err error) {
return []campaigns.ChangesetSyncData{
{
ChangesetID: 1,
UpdatedAt: now,
ExternalServiceIDs: []int64{1},
},
}, nil
},
}
r := NewSyncRegistry(ctx, syncStore, repoStore, nil, nil)
assertSyncerCount := func(want int) {
r.mu.Lock()
if len(r.syncers) != want {
t.Fatalf("Expected %d syncer, got %d", want, len(r.syncers))
}
r.mu.Unlock()
}
assertSyncerCount(1)
// Adding it again should have no effect
r.Add(1)
assertSyncerCount(1)
// Simulate a service being removed
r.HandleExternalServiceSync(api.ExternalService{
ID: 1,
Kind: extsvc.KindGitHub,
DeletedAt: &now,
})
assertSyncerCount(0)
// And added again
r.HandleExternalServiceSync(api.ExternalService{
ID: 1,
Kind: extsvc.KindGitHub,
DeletedAt: nil,
})
assertSyncerCount(1)
syncChan := make(chan int64, 1)
// In order to test that priority items are delivered we'll inject our own syncer
// with a custom sync func
syncer := &ChangesetSyncer{
SyncStore: syncStore,
ReposStore: repoStore,
HTTPFactory: nil,
externalServiceID: 1,
syncFunc: func(ctx context.Context, id int64) error {
syncChan <- id
return nil
},
priorityNotify: make(chan []int64, 1),
}
go syncer.Run(ctx)
// Set the syncer
r.mu.Lock()
r.syncers[1] = syncer
r.mu.Unlock()
// Send priority items
err := r.EnqueueChangesetSyncs(ctx, []int64{1, 2})
if err != nil {
t.Fatal(err)
}
select {
case id := <-syncChan:
if id != 1 {
t.Fatalf("Expected 1, got %d", id)
}
case <-time.After(1 * time.Second):
t.Fatal("Timed out waiting for sync")
}
}
type MockSyncStore struct {
listChangesetSyncData func(context.Context, ListChangesetSyncDataOpts) ([]campaigns.ChangesetSyncData, error)
getChangeset func(context.Context, GetChangesetOpts) (*campaigns.Changeset, error)
listChangesets func(context.Context, ListChangesetsOpts) ([]*campaigns.Changeset, int64, error)
updateChangesets func(context.Context, ...*campaigns.Changeset) error
upsertChangesetEvents func(context.Context, ...*campaigns.ChangesetEvent) error
transact func(context.Context) (*Store, error)
}
func (m MockSyncStore) ListChangesetSyncData(ctx context.Context, opts ListChangesetSyncDataOpts) ([]campaigns.ChangesetSyncData, error) {
return m.listChangesetSyncData(ctx, opts)
}
func (m MockSyncStore) GetChangeset(ctx context.Context, opts GetChangesetOpts) (*campaigns.Changeset, error) {
return m.getChangeset(ctx, opts)
}
func (m MockSyncStore) ListChangesets(ctx context.Context, opts ListChangesetsOpts) ([]*campaigns.Changeset, int64, error) {
return m.listChangesets(ctx, opts)
}
func (m MockSyncStore) UpdateChangesets(ctx context.Context, cs ...*campaigns.Changeset) error {
return m.updateChangesets(ctx, cs...)
}
func (m MockSyncStore) UpsertChangesetEvents(ctx context.Context, cs ...*campaigns.ChangesetEvent) error {
return m.upsertChangesetEvents(ctx, cs...)
}
func (m MockSyncStore) Transact(ctx context.Context) (*Store, error) {
return m.transact(ctx)
}
type MockRepoStore struct {
listExternalServices func(context.Context, repos.StoreListExternalServicesArgs) ([]*repos.ExternalService, error)
listRepos func(context.Context, repos.StoreListReposArgs) ([]*repos.Repo, error)
}
func (m MockRepoStore) UpsertExternalServices(ctx context.Context, svcs ...*repos.ExternalService) error {
panic("implement me")
}
func (m MockRepoStore) UpsertRepos(ctx context.Context, repos ...*repos.Repo) error {
panic("implement me")
}
func (m MockRepoStore) ListAllRepoNames(ctx context.Context) ([]api.RepoName, error) {
panic("implement me")
}
func (m MockRepoStore) ListExternalServices(ctx context.Context, args repos.StoreListExternalServicesArgs) ([]*repos.ExternalService, error) {
return m.listExternalServices(ctx, args)
}
func (m MockRepoStore) ListRepos(ctx context.Context, args repos.StoreListReposArgs) ([]*repos.Repo, error) {
return m.listRepos(ctx, args)
}
|
{
testCases := []struct {
name string
serviceID int64
data []campaigns.ChangesetSyncData
want []campaigns.ChangesetSyncData
}{
{
name: "Empty",
serviceID: 1,
data: []campaigns.ChangesetSyncData{},
want: []campaigns.ChangesetSyncData{},
},
{
name: "single item, should match",
serviceID: 1,
data: []campaigns.ChangesetSyncData{
{
ChangesetID: 1,
ExternalServiceIDs: []int64{1},
},
},
want: []campaigns.ChangesetSyncData{
{
ChangesetID: 1,
ExternalServiceIDs: []int64{1},
},
},
},
{
name: "single item, should not match",
serviceID: 1,
data: []campaigns.ChangesetSyncData{
{
ChangesetID: 1,
ExternalServiceIDs: []int64{2},
},
},
want: []campaigns.ChangesetSyncData{},
},
{
name: "multiple items, should match",
serviceID: 2,
data: []campaigns.ChangesetSyncData{
{
ChangesetID: 1,
ExternalServiceIDs: []int64{1, 2},
},
},
want: []campaigns.ChangesetSyncData{
{
ChangesetID: 1,
ExternalServiceIDs: []int64{1, 2},
},
},
},
{
name: "multiple items, should not match",
serviceID: 1,
data: []campaigns.ChangesetSyncData{
{
ChangesetID: 1,
ExternalServiceIDs: []int64{1, 2},
},
},
want: []campaigns.ChangesetSyncData{},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
data := filterSyncData(tc.serviceID, tc.data)
if diff := cmp.Diff(tc.want, data); diff != "" {
t.Fatal(diff)
}
})
}
}
|
knob-chart-demo.js
|
/*
Template Name: Heyqo - Admin & Dashboard Template
Author: Myra Studio
File: Knob Chart
|
$(function(){$('[data-plugin="knob"]').knob()});
|
*/
|
MediaCard.js
|
import React, { useEffect, useState } from 'react'
import { useSelector } from 'react-redux'
import { selectSong } from '../songSlice'
export default function
|
() {
const [radioStation, setRadioStation] = useState('not identified')
const song = useSelector(selectSong)
useEffect(() => {
setRadioStation(getRadioName(song.Link))
}, [])
const getRadioName = (link) => {
switch (link) {
case 'https://scdn.nrjaudio.fm/de/33003/mp3_128.mp3?origine=wlan&cdn_path=adswizz_lbs10&adws_out_b1':
return 'NRJ'
case 'http://mp3.topfm.c.nmdn.net/ps-topfm/livestream.mp3':
return 'TopFM'
case 'http://streams.radiobob.de/bob-national/mp3-128/streams.radiobob.de':
return 'Bob'
case 'http://streams.egofm.de/egoFM-hq':
return 'EgoFM'
case 'https://swr-edge-2035-fra-lg-cdn.cast.addradio.de/swr/swr3/live/mp3/128/stream.mp3':
return 'SWR3'
default:
return 'not identified'
}
}
return (
<>
<div>
{radioStation !== 'not identified'
? 'Radio ' + radioStation
: 'Media'
}
</div>
<div>
{song?.Title}
</div>
</>
)
}
|
MediaCard
|
tcp.rs
|
// Copyright 2021 Clivern. All rights reserved.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
use std::net::TcpListener;
// SingleThreadedTcp Type
pub struct SingleThreadedTcp {
address: String,
}
// SingleThreadedTcp Methods
impl SingleThreadedTcp {
pub fn new(address: &str) -> SingleThreadedTcp
|
// Set address
pub fn set_address(&mut self, address: &str) {
self.address = address.to_string();
}
// Get address
pub fn get_address(self) -> String {
self.address
}
// Listen to connections
pub fn listen(self) {
let listener = TcpListener::bind(self.address.to_string()).unwrap();
println!("Listen to tcp connection on {}..", self.address.to_string());
for stream in listener.incoming() {
let _stream = stream.unwrap();
println!("Connection established!");
}
}
}
|
{
SingleThreadedTcp {
address: address.to_string(),
}
}
|
MPIN192.go
|
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
/* MPIN API Functions */
package XXX
import "time"
import "github.com/milagro-crypto/amcl/version3/go/amcl"
//import "fmt"
const MFS int = int(MODBYTES)
const MGS int = int(MODBYTES)
const BAD_PARAMS int = -11
const INVALID_POINT int = -14
const WRONG_ORDER int = -18
const BAD_PIN int = -19
/* Configure your PIN here */
const MAXPIN int32 = 10000 /* PIN less than this */
const PBLEN int32 = 14 /* Number of bits in PIN */
const TS int = 10 /* 10 for 4 digit PIN, 14 for 6-digit PIN - 2^TS/TS approx = sqrt(MAXPIN) */
const TRAP int = 200 /* 200 for 4 digit PIN, 2000 for 6-digit PIN - approx 2*sqrt(MAXPIN) */
func mpin_hash(sha int, c *FP8, U *ECP) []byte {
var w [MFS]byte
var t [10 * MFS]byte
var h []byte
c.geta().geta().GetA().ToBytes(w[:])
for i := 0; i < MFS; i++ {
t[i] = w[i]
}
c.geta().geta().GetB().ToBytes(w[:])
for i := MFS; i < 2*MFS; i++ {
t[i] = w[i-MFS]
}
c.geta().getb().GetA().ToBytes(w[:])
for i := 2 * MFS; i < 3*MFS; i++ {
t[i] = w[i-2*MFS]
}
c.geta().getb().GetB().ToBytes(w[:])
for i := 3 * MFS; i < 4*MFS; i++ {
t[i] = w[i-3*MFS]
}
c.getb().geta().GetA().ToBytes(w[:])
for i := 4 * MFS; i < 5*MFS; i++ {
t[i] = w[i-4*MFS]
}
c.getb().geta().GetB().ToBytes(w[:])
for i := 5 * MFS; i < 6*MFS; i++ {
t[i] = w[i-5*MFS]
}
c.getb().getb().GetA().ToBytes(w[:])
for i := 6 * MFS; i < 7*MFS; i++ {
t[i] = w[i-6*MFS]
}
c.getb().getb().GetB().ToBytes(w[:])
for i := 7 * MFS; i < 8*MFS; i++ {
t[i] = w[i-7*MFS]
}
U.GetX().ToBytes(w[:])
for i := 8 * MFS; i < 9*MFS; i++ {
t[i] = w[i-8*MFS]
}
U.GetY().ToBytes(w[:])
for i := 9 * MFS; i < 10*MFS; i++ {
t[i] = w[i-9*MFS]
}
if sha == amcl.SHA256 {
H := amcl.NewHASH256()
H.Process_array(t[:])
h = H.Hash()
}
if sha == amcl.SHA384 {
H := amcl.NewHASH384()
H.Process_array(t[:])
h = H.Hash()
}
if sha == amcl.SHA512 {
H := amcl.NewHASH512()
H.Process_array(t[:])
h = H.Hash()
}
if h == nil {
return nil
}
R := make([]byte, AESKEY)
for i := 0; i < AESKEY; i++ {
R[i] = h[i]
}
return R
}
/* Hash number (optional) and string to coordinate on curve */
func mhashit(sha int, n int32, ID []byte) []byte {
var R []byte
if sha == amcl.SHA256 {
H := amcl.NewHASH256()
if n != 0 {
H.Process_num(n)
}
H.Process_array(ID)
R = H.Hash()
}
if sha == amcl.SHA384 {
H := amcl.NewHASH384()
if n != 0 {
H.Process_num(n)
}
H.Process_array(ID)
R = H.Hash()
}
if sha == amcl.SHA512 {
H := amcl.NewHASH512()
if n != 0 {
H.Process_num(n)
}
H.Process_array(ID)
R = H.Hash()
}
if R == nil {
return nil
}
const RM int = int(MODBYTES)
var W [RM]byte
if sha >= RM {
for i := 0; i < RM; i++ {
W[i] = R[i]
}
} else {
for i := 0; i < sha; i++ {
W[i+RM-sha] = R[i]
}
for i := 0; i < RM-sha; i++ {
W[i] = 0
}
}
return W[:]
}
/* return time in slots since epoch */
func Today() int {
now := time.Now()
return int(now.Unix()) / (60 * 1440)
}
/* these next two functions help to implement elligator squared - http://eprint.iacr.org/2014/043 */
/* maps a random u to a point on the curve */
func emap(u *BIG, cb int) *ECP {
var P *ECP
x := NewBIGcopy(u)
p := NewBIGints(Modulus)
x.Mod(p)
for true {
P = NewECPbigint(x, cb)
if !P.Is_infinity() {
break
}
x.inc(1)
x.norm()
}
return P
}
/* returns u derived from P. Random value in range 1 to return value should then be added to u */
func unmap(u *BIG, P *ECP) int {
s := P.GetS()
var R *ECP
r := 0
x := P.GetX()
u.copy(x)
for true {
u.dec(1)
u.norm()
r++
R = NewECPbigint(u, s)
if !R.Is_infinity() {
break
}
}
return r
}
func MPIN_HASH_ID(sha int, ID []byte) []byte {
return mhashit(sha, 0, ID)
}
/* these next two functions implement elligator squared - http://eprint.iacr.org/2014/043 */
/* Elliptic curve point E in format (0x04,x,y} is converted to form {0x0-,u,v} */
/* Note that u and v are indistinguisible from random strings */
func MPIN_ENCODING(rng *amcl.RAND, E []byte) int {
var T [MFS]byte
for i := 0; i < MFS; i++ {
T[i] = E[i+1]
}
u := FromBytes(T[:])
for i := 0; i < MFS; i++ {
T[i] = E[i+MFS+1]
}
v := FromBytes(T[:])
P := NewECPbigs(u, v)
if P.Is_infinity() {
return INVALID_POINT
}
p := NewBIGints(Modulus)
u = Randomnum(p, rng)
su := int(rng.GetByte())
su %= 2
W := emap(u, su)
P.Sub(W)
sv := P.GetS()
rn := unmap(v, P)
m := int(rng.GetByte())
m %= rn
v.inc(m + 1)
E[0] = byte(su + 2*sv)
u.ToBytes(T[:])
for i := 0; i < MFS; i++ {
E[i+1] = T[i]
}
v.ToBytes(T[:])
for i := 0; i < MFS; i++ {
E[i+MFS+1] = T[i]
}
return 0
}
func MPIN_DECODING(D []byte) int {
var T [MFS]byte
if (D[0] & 0x04) != 0 {
return INVALID_POINT
}
for i := 0; i < MFS; i++ {
T[i] = D[i+1]
}
u := FromBytes(T[:])
for i := 0; i < MFS; i++ {
T[i] = D[i+MFS+1]
}
v := FromBytes(T[:])
su := int(D[0] & 1)
sv := int((D[0] >> 1) & 1)
W := emap(u, su)
P := emap(v, sv)
P.Add(W)
u = P.GetX()
v = P.GetY()
D[0] = 0x04
u.ToBytes(T[:])
for i := 0; i < MFS; i++ {
D[i+1] = T[i]
}
v.ToBytes(T[:])
for i := 0; i < MFS; i++ {
D[i+MFS+1] = T[i]
}
return 0
}
/* R=R1+R2 in group G1 */
func MPIN_RECOMBINE_G1(R1 []byte, R2 []byte, R []byte) int {
P := ECP_fromBytes(R1)
Q := ECP_fromBytes(R2)
if P.Is_infinity() || Q.Is_infinity() {
return INVALID_POINT
}
P.Add(Q)
P.ToBytes(R[:], false)
return 0
}
/* W=W1+W2 in group G2 */
func MPIN_RECOMBINE_G2(W1 []byte, W2 []byte, W []byte) int {
P := ECP4_fromBytes(W1)
Q := ECP4_fromBytes(W2)
if P.Is_infinity() || Q.Is_infinity() {
return INVALID_POINT
}
P.Add(Q)
P.ToBytes(W)
return 0
}
/* create random secret S */
func MPIN_RANDOM_GENERATE(rng *amcl.RAND, S []byte) int {
r := NewBIGints(CURVE_Order)
s := Randomnum(r, rng)
s.ToBytes(S)
return 0
}
func MPIN_EXTRACT_PIN(sha int, CID []byte, pin int, TOKEN []byte) int {
return MPIN_EXTRACT_FACTOR(sha, CID, int32(pin)%MAXPIN, PBLEN, TOKEN)
}
/* Extract factor from TOKEN for identity CID */
func MPIN_EXTRACT_FACTOR(sha int, CID []byte, factor int32, facbits int32, TOKEN []byte) int {
P := ECP_fromBytes(TOKEN)
if P.Is_infinity() {
return INVALID_POINT
}
h := mhashit(sha, 0, CID)
R := ECP_mapit(h)
R = R.pinmul(factor, facbits)
P.Sub(R)
P.ToBytes(TOKEN, false)
return 0
}
/* Restore factor to TOKEN for identity CID */
func MPIN_RESTORE_FACTOR(sha int, CID []byte, factor int32, facbits int32, TOKEN []byte) int {
P := ECP_fromBytes(TOKEN)
if P.Is_infinity() {
return INVALID_POINT
}
h := mhashit(sha, 0, CID)
R := ECP_mapit(h)
R = R.pinmul(factor, facbits)
P.Add(R)
P.ToBytes(TOKEN, false)
return 0
}
/* Implement step 2 on client side of MPin protocol */
func MPIN_CLIENT_2(X []byte, Y []byte, SEC []byte) int {
r := NewBIGints(CURVE_Order)
P := ECP_fromBytes(SEC)
if P.Is_infinity() {
return INVALID_POINT
}
px := FromBytes(X)
py := FromBytes(Y)
px.add(py)
px.Mod(r)
P = G1mul(P, px)
P.neg()
P.ToBytes(SEC, false)
return 0
}
/* Implement step 1 on client side of MPin protocol */
func MPIN_CLIENT_1(sha int, date int, CLIENT_ID []byte, rng *amcl.RAND, X []byte, pin int, TOKEN []byte, SEC []byte, xID []byte, xCID []byte, PERMIT []byte) int {
r := NewBIGints(CURVE_Order)
var x *BIG
if rng != nil {
x = Randomnum(r, rng)
x.ToBytes(X)
} else {
x = FromBytes(X)
}
h := mhashit(sha, 0, CLIENT_ID)
P := ECP_mapit(h)
T := ECP_fromBytes(TOKEN)
if T.Is_infinity() {
return INVALID_POINT
}
W := P.pinmul(int32(pin)%MAXPIN, PBLEN)
T.Add(W)
if date != 0 {
W = ECP_fromBytes(PERMIT)
if W.Is_infinity() {
return INVALID_POINT
}
T.Add(W)
h = mhashit(sha, int32(date), h)
W = ECP_mapit(h)
if xID != nil {
P = G1mul(P, x)
P.ToBytes(xID, false)
W = G1mul(W, x)
P.Add(W)
} else {
P.Add(W)
P = G1mul(P, x)
}
if xCID != nil {
P.ToBytes(xCID, false)
}
} else {
if xID != nil {
P = G1mul(P, x)
P.ToBytes(xID, false)
}
}
T.ToBytes(SEC, false)
return 0
}
/* Extract Server Secret SST=S*Q where Q is fixed generator in G2 and S is master secret */
func MPIN_GET_SERVER_SECRET(S []byte, SST []byte) int {
Q := ECP4_generator()
s := FromBytes(S)
Q = G2mul(Q, s)
Q.ToBytes(SST)
return 0
}
/*
W=x*H(G);
if RNG == NULL then X is passed in
if RNG != NULL the X is passed out
if type=0 W=x*G where G is point on the curve, else W=x*M(G), where M(G) is mapping of octet G to point on the curve
*/
func MPIN_GET_G1_MULTIPLE(rng *amcl.RAND, typ int, X []byte, G []byte, W []byte) int {
var x *BIG
r := NewBIGints(CURVE_Order)
if rng != nil {
x = Randomnum(r, rng)
x.ToBytes(X)
} else {
x = FromBytes(X)
}
var P *ECP
if typ == 0 {
P = ECP_fromBytes(G)
if P.Is_infinity() {
return INVALID_POINT
}
} else {
P = ECP_mapit(G)
}
G1mul(P, x).ToBytes(W, false)
return 0
}
/* Client secret CST=S*H(CID) where CID is client ID and S is master secret */
/* CID is hashed externally */
func MPIN_GET_CLIENT_SECRET(S []byte, CID []byte, CST []byte) int {
return MPIN_GET_G1_MULTIPLE(nil, 1, S, CID, CST)
}
/* Time Permit CTT=S*(date|H(CID)) where S is master secret */
func MPIN_GET_CLIENT_PERMIT(sha, date int, S []byte, CID []byte, CTT []byte) int {
h := mhashit(sha, int32(date), CID)
P := ECP_mapit(h)
s := FromBytes(S)
G1mul(P, s).ToBytes(CTT, false)
return 0
}
/* Outputs H(CID) and H(T|H(CID)) for time permits. If no time permits set HID=HTID */
func MPIN_SERVER_1(sha int, date int, CID []byte, HID []byte, HTID []byte) {
h := mhashit(sha, 0, CID)
P := ECP_mapit(h)
P.ToBytes(HID, false)
if date != 0 {
h = mhashit(sha, int32(date), h)
R := ECP_mapit(h)
P.Add(R)
P.ToBytes(HTID, false)
}
}
/* Implement step 2 of MPin protocol on server side */
func MPIN_SERVER_2(date int, HID []byte, HTID []byte, Y []byte, SST []byte, xID []byte, xCID []byte, mSEC []byte, E []byte, F []byte) int {
// q:=NewBIGints(Modulus)
Q := ECP4_generator()
sQ := ECP4_fromBytes(SST)
if sQ.Is_infinity() {
return INVALID_POINT
}
var R *ECP
if date != 0 {
R = ECP_fromBytes(xCID)
} else {
if xID == nil {
return BAD_PARAMS
}
R = ECP_fromBytes(xID)
}
if R.Is_infinity() {
return INVALID_POINT
}
y := FromBytes(Y)
var P *ECP
if date != 0 {
P = ECP_fromBytes(HTID)
} else {
if HID == nil {
return BAD_PARAMS
}
P = ECP_fromBytes(HID)
}
if P.Is_infinity() {
return INVALID_POINT
}
P = G1mul(P, y)
P.Add(R)
R = ECP_fromBytes(mSEC)
if R.Is_infinity() {
return INVALID_POINT
}
var g *FP24
g = Ate2(Q, R, sQ, P)
g = Fexp(g)
if !g.Isunity() {
if HID != nil && xID != nil && E != nil && F != nil {
g.ToBytes(E)
if date != 0 {
P = ECP_fromBytes(HID)
if P.Is_infinity() {
return INVALID_POINT
}
R = ECP_fromBytes(xID)
if R.Is_infinity() {
return INVALID_POINT
}
P = G1mul(P, y)
P.Add(R)
//P.Affine()
}
g = Ate(Q, P)
g = Fexp(g)
g.ToBytes(F)
}
return BAD_PIN
}
return 0
}
/* Pollards kangaroos used to return PIN error */
func MPIN_KANGAROO(E []byte, F []byte) int {
ge := FP24_fromBytes(E)
gf := FP24_fromBytes(F)
var distance [TS]int
t := NewFP24copy(gf)
var table []*FP24
var i int
s := 1
for m := 0; m < TS; m++ {
distance[m] = s
table = append(table, NewFP24copy(t))
s *= 2
t.usqr()
}
t.one()
dn := 0
for j := 0; j < TRAP; j++ {
i = t.geta().geta().geta().GetA().lastbits(20) % TS
t.Mul(table[i])
dn += distance[i]
}
gf.Copy(t)
gf.conj()
steps := 0
dm := 0
res := 0
for dm-dn < int(MAXPIN) {
steps++
if steps > 4*TRAP {
break
}
i = ge.geta().geta().geta().GetA().lastbits(20) % TS
ge.Mul(table[i])
dm += distance[i]
if ge.Equals(t) {
res = dm - dn
break
}
if ge.Equals(gf) {
res = dn - dm
break
}
}
if steps > 4*TRAP || dm-dn >= int(MAXPIN) {
res = 0
} // Trap Failed - probable invalid token
return int(res)
}
/* Functions to support M-Pin Full */
func MPIN_PRECOMPUTE(TOKEN []byte, CID []byte, G1 []byte, G2 []byte) int {
var P, T *ECP
var g *FP24
T = ECP_fromBytes(TOKEN)
if T.Is_infinity() {
return INVALID_POINT
}
P = ECP_mapit(CID)
Q := ECP4_generator()
g = Ate(Q, T)
g = Fexp(g)
g.ToBytes(G1)
g = Ate(Q, P)
g = Fexp(g)
g.ToBytes(G2)
return 0
}
/* Hash the M-Pin transcript - new */
func MPIN_HASH_ALL(sha int, HID []byte, xID []byte, xCID []byte, SEC []byte, Y []byte, R []byte, W []byte) []byte {
tlen := 0
var T [10*int(MODBYTES) + 4]byte
for i := 0; i < len(HID); i++ {
T[i] = HID[i]
}
tlen += len(HID)
if xCID != nil {
for i := 0; i < len(xCID); i++ {
T[i+tlen] = xCID[i]
}
tlen += len(xCID)
} else {
for i := 0; i < len(xID); i++ {
T[i+tlen] = xID[i]
}
tlen += len(xID)
}
for i := 0; i < len(SEC); i++ {
T[i+tlen] = SEC[i]
}
tlen += len(SEC)
for i := 0; i < len(Y); i++ {
T[i+tlen] = Y[i]
}
tlen += len(Y)
for i := 0; i < len(R); i++ {
T[i+tlen] = R[i]
}
tlen += len(R)
for i := 0; i < len(W); i++ {
T[i+tlen] = W[i]
}
tlen += len(W)
return mhashit(sha, 0, T[:])
}
/* calculate common key on client side */
/* wCID = w.(A+AT) */
func MPIN_CLIENT_KEY(sha int, G1 []byte, G2 []byte, pin int, R []byte, X []byte, H []byte, wCID []byte, CK []byte) int {
g1 := FP24_fromBytes(G1)
g2 := FP24_fromBytes(G2)
z := FromBytes(R)
x := FromBytes(X)
h := FromBytes(H)
W := ECP_fromBytes(wCID)
if W.Is_infinity() {
return INVALID_POINT
}
W = G1mul(W, x)
r := NewBIGints(CURVE_Order)
z.add(h) //new
z.Mod(r)
g2.pinpow(pin, int(PBLEN))
g1.Mul(g2)
c := g1.Compow(z, r)
t := mpin_hash(sha, c, W)
for i := 0; i < AESKEY; i++ {
CK[i] = t[i]
}
return 0
}
/* calculate common key on server side */
/* Z=r.A - no time permits involved */
func MPIN_SERVER_KEY(sha int, Z []byte, SST []byte, W []byte, H []byte, HID []byte, xID []byte, xCID []byte, SK []byte) int {
sQ := ECP4_fromBytes(SST)
if sQ.Is_infinity() {
return INVALID_POINT
}
R := ECP_fromBytes(Z)
if R.Is_infinity() {
return INVALID_POINT
}
A := ECP_fromBytes(HID)
if A.Is_infinity() {
return INVALID_POINT
}
var U *ECP
if xCID != nil {
U = ECP_fromBytes(xCID)
} else {
U = ECP_fromBytes(xID)
}
if U.Is_infinity() {
return INVALID_POINT
}
w := FromBytes(W)
h := FromBytes(H)
A = G1mul(A, h) // new
R.Add(A)
U = G1mul(U, w)
g := Ate(sQ, R)
g = Fexp(g)
c := g.trace()
t := mpin_hash(sha, c, U)
for i := 0; i < AESKEY; i++ {
SK[i] = t[i]
}
return 0
}
/* return time since epoch */
func MPIN_GET_TIME() int {
now := time.Now()
return int(now.Unix())
}
/* Generate Y = H(epoch, xCID/xID) */
func MPIN_GET_Y(sha int, TimeValue int, xCID []byte, Y []byte) {
h := mhashit(sha, int32(TimeValue), xCID)
y := FromBytes(h)
q := NewBIGints(CURVE_Order)
y.Mod(q)
y.ToBytes(Y)
}
/* One pass MPIN Client */
func MPIN_CLIENT(sha int, date int, CLIENT_ID []byte, RNG *amcl.RAND, X []byte, pin int, TOKEN []byte, SEC []byte, xID []byte, xCID []byte, PERMIT []byte, TimeValue int, Y []byte) int {
rtn := 0
var pID []byte
if date == 0 {
pID = xID
} else {
pID = xCID
}
rtn = MPIN_CLIENT_1(sha, date, CLIENT_ID, RNG, X, pin, TOKEN, SEC, xID, xCID, PERMIT)
if rtn != 0 {
return rtn
}
MPIN_GET_Y(sha, TimeValue, pID, Y)
rtn = MPIN_CLIENT_2(X, Y, SEC)
if rtn != 0 {
return rtn
}
return 0
}
/* One pass MPIN Server */
func
|
(sha int, date int, HID []byte, HTID []byte, Y []byte, SST []byte, xID []byte, xCID []byte, SEC []byte, E []byte, F []byte, CID []byte, TimeValue int) int {
rtn := 0
var pID []byte
if date == 0 {
pID = xID
} else {
pID = xCID
}
MPIN_SERVER_1(sha, date, CID, HID, HTID)
MPIN_GET_Y(sha, TimeValue, pID, Y)
rtn = MPIN_SERVER_2(date, HID, HTID, Y, SST, xID, xCID, SEC, E, F)
if rtn != 0 {
return rtn
}
return 0
}
|
MPIN_SERVER
|
hymod.py
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska and Benjamin Manns
:paper: Houska, T., Kraft, P., Chamorro-Chavez, A. and Breuer, L.:
SPOTting Model Parameters Using a Ready-Made Python Package,
PLoS ONE, 10(12), e0145180, doi:10.1371/journal.pone.0145180, 2015.
'''
from numba import jit
def hymod(Precip, PET, cmax,bexp,alpha,Rs,Rq):
"""
See https://www.proc-iahs.net/368/180/2015/piahs-368-180-2015.pdf for a scientific paper:
Quan, Z.; Teng, J.; Sun, W.; Cheng, T. & Zhang, J. (2015): Evaluation of the HYMOD model
for rainfall–runoff simulation using the GLUE method. Remote Sensing and GIS for Hydrology
and Water Resources, 180 - 185, IAHS Publ. 368. DOI: 10.5194/piahs-368-180-2015.
:param cmax:
:param bexp:
:param alpha:
:param Rs:
:param Rq:
:return: Dataset of water in hymod (has to be calculated in litres)
:rtype: list
"""
# HYMOD PROGRAM IS SIMPLE RAINFALL RUNOFF MODEL
x_loss = 0.0
# Initialize slow tank state
x_slow = 2.3503 / (Rs * 22.5)
x_slow = 0 # --> works ok if calibration data starts with low discharge
# Initialize state(s) of quick tank(s)
x_quick = [0,0,0]
t = 0
output = []
# START PROGRAMMING LOOP WITH DETERMINING RAINFALL - RUNOFF AMOUNTS
while t <= len(Precip)-1:
Pval = Precip[t]
PETval = PET[t]
# Compute excess precipitation and evaporation
ER1, ER2, x_loss = excess(x_loss, cmax, bexp, Pval, PETval)
# Calculate total effective rainfall
ET = ER1 + ER2
# Now partition ER between quick and slow flow reservoirs
UQ = alpha * ET
US = (1 - alpha) * ET
# Route slow flow component with single linear reservoir
x_slow, QS = linres(x_slow, US, Rs)
# Route quick flow component with linear reservoirs
inflow = UQ
for i in range(3):
# Linear reservoir
x_quick[i], outflow = linres(x_quick[i], inflow, Rq)
inflow = outflow
# Compute total flow for timestep
output.append(QS + outflow)
t = t+1
return output
@jit
|
return X**Y
@jit
def linres(x_slow,inflow,Rs):
# Linear reservoir
x_slow = (1 - Rs) * x_slow + (1 - Rs) * inflow
outflow = (Rs / (1 - Rs)) * x_slow
return x_slow,outflow
@jit
def excess(x_loss,cmax,bexp,Pval,PETval):
# this function calculates excess precipitation and evaporation
xn_prev = x_loss
ct_prev = cmax * (1 - power((1 - ((bexp + 1) * (xn_prev) / cmax)), (1 / (bexp + 1))))
# Calculate Effective rainfall 1
ER1 = max((Pval - cmax + ct_prev), 0.0)
Pval = Pval - ER1
dummy = min(((ct_prev + Pval) / cmax), 1)
xn = (cmax / (bexp + 1)) * (1 - power((1 - dummy), (bexp + 1)))
# Calculate Effective rainfall 2
ER2 = max(Pval - (xn - xn_prev), 0)
# Alternative approach
evap = (1 - (((cmax / (bexp + 1)) - xn) / (cmax / (bexp + 1)))) * PETval # actual ET is linearly related to the soil moisture state
xn = max(xn - evap, 0) # update state
return ER1,ER2,xn
|
def power(X,Y):
X=abs(X) # Needed to capture invalid overflow with netgative values
|
bfb_admin_script.js
|
!function(t){var e={};function o(n){if(e[n])return e[n].exports;var i=e[n]={i:n,l:!1,exports:{}};return t[n].call(i.exports,i,i.exports,o),i.l=!0,i.exports}o.m=t,o.c=e,o.d=function(t,e,n){o.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:n})},o.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},o.t=function(t,e){if(1&e&&(t=o(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var n=Object.create(null);if(o.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var i in t)o.d(n,i,function(e){return t[e]}.bind(null,i));return n},o.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return o.d(e,"a",e),e},o.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},o.p="",o(o.s=150)}({0:function(t,e){t.exports=jQuery},150:function(t,e,o){"use strict";(function(t){var e=o(151),n=document.createEvent("Event"),i=0;n.initEvent("et_fb_before_disabling_bfb",!0,!0);var a,r=t("#et_pb_layout").addClass("et-drag-disabled");function s(){a&&clearTimeout(a),a=setTimeout((function(){var e=t("#et_pb_layout"),o=t("#et-fb-app"),n=t(".et_pb_toggle_builder_wrapper.et_pb_builder_is_used"),i=t("#et_pb_toggle_builder"),a=t("#et_pb_fb_cta"),r=t(".et-fb-button-group--responsive-mode"),s=t(".et-fb-button-group--builder-mode"),_=t(".et-fb-page-settings-bar__column--right"),l=(n.outerWidth()||0)-(parseFloat(i.outerWidth()||0)+parseFloat(a.outerWidth()||0)+parseFloat(a.css("marginLeft")))+((r.length&&r.is(":visible")?r.outerWidth():0)+(s.length&&s.is(":visible")?s.outerWidth()+10:0)+(_.length?_.outerWidth():0))<=30;e.toggleClass("et_pb_layout--compact",l),o.toggleClass("et-fb-app--compact",l)}),50)}function _(e){t("#title").prop("required")&&t("#title").removeProp("required"),e.hasClass("disabled")?i<=20?(i++,setTimeout((function(){_(e)}),1e3)):t(".et-bfb-page-preloading").remove():e.trigger("click")}function l(e){void 0!==window.tinyMCE&&window.tinyMCE.get("content")&&!window.tinyMCE.get("content").isHidden()?window.tinyMCE.get("content").setContent(e,{format:"html"}):t("#content").val(e)}t(window).on("et_fb_init_app_after",(function(){r.removeClass("et-drag-disabled")})),t(window).on("load",(function(){setTimeout((function(){var e=t("#et_pb_toggle_builder"),o=t("#et_pb_fb_cta");t(".et_pb_toggle_builder_wrapper").css("opacity",""),e.addClass("et_pb_ready"),e.hasClass("et_pb_builder_is_used")&&o.addClass("et_pb_ready")}),250);var e=function(){t(this).find(".postbox").removeClass("first-visible"),t(this).is("#normal-sortables")&&t(this).find(".postbox:visible").first().addClass("first-visible")};t(".meta-box-sortables").sortable("option","update",e),t("#screen-options-wrap").on("change",".hide-postbox-tog",(function(){t(".meta-box-sortables").each(e)})),t(".handle-order-higher, .handle-order-lower").on("click",(function(){t(".meta-box-sortables").each(e)})),t(".meta-box-sortables").on("sortstart",(function(){t("body").addClass("et-bfb--metabox-dragged")})),t(".meta-box-sortables").on("sortstop",(function(){t("body").removeClass("et-bfb--metabox-dragged"),window.dispatchEvent(new CustomEvent("ETBFBMetaboxSortStopped",{}))}))})),t(window).on("et_fb_disabling_bfb_confirmed",(function(){var e=t("#et_pb_old_content"),o=t("#et_pb_use_builder"),n=t("#minor-publishing-actions #save-post").length>0?t("#minor-publishing-actions #save-post"):t("#publishing-action #publish");l(e.val()),e.val(""),o.val("off"),_(n)})),t(window).on("et_fb_init_app_after resize et_fb_toolbar_change",s),t(e.top_window).on("et-preview-animation-complete et-bfb-modal-snapped",s),t("#et_pb_toggle_builder").on("click",(function(e){e.preventDefault();var o=t(this),i=t("#et_pb_use_builder"),a=function(){var e;e=void 0!==window.tinyMCE&&window.tinyMCE.get("content")&&!window.tinyMCE.get("content").isHidden()?window.tinyMCE.get("content").getContent():t("#content").val();return e.trim()}(),r=t("#minor-publishing-actions #save-post").length>0?t("#minor-publishing-actions #save-post"):t("#publishing-action #publish"),s=t("#et_pb_old_content"),d=t("#titlediv #title").length>0?t("#titlediv #title").val():"";if(o.hasClass("et_pb_builder_is_used"))window.dispatchEvent(n);else if(i.val("on"),""!==a&&(s.val(a),a.indexOf("[et_pb_section")<0&&"skip"!==et_bfb_options.skip_default_content_adding&&(a='[et_pb_section][et_pb_row][et_pb_column type="'.concat(et_bfb_options.default_initial_column_type,'"][').concat(et_bfb_options.default_initial_text_module,"]").concat(a,"[/").concat(et_bfb_options.default_initial_text_module,"][/et_pb_column][/et_pb_row][/et_pb_section]")),l(a)),t("body").append('<div class="et-bfb-page-preloading"></div>'),""!==a||""!==d)_(r);else{var b=t("#post_ID").length>0?t("#post_ID").val():0;t.ajax({type:"POST",url:et_bfb_options.ajaxurl,data:{action:"et_builder_activate_bfb_auto_draft",et_enable_bfb_nonce:et_bfb_options.et_enable_bfb_nonce,et_post_id:b},complete:function(){_(r)}})}}))}).call(this,o(0))},151:function(t,e,o){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.top_window=e.is_iframe=void 0;var n=window;e.top_window=n;var i,a=!1;e.is_iframe=a;try{i=!!window.top.document&&window.top}catch(t){i=!1}i&&i.__Cypress__?window.parent===i?(e.top_window=n=window,e.is_iframe=a=!1):(e.top_window=n=window.parent,e.is_iframe=a=!0):i&&(e.top_window=n=i,e.is_iframe=a=i!==window.self)}});
|
||
index.tsx
|
/*!
* Copyright (c) Microsoft. All rights reserved.
* Licensed under the MIT license. See LICENSE file in the project.
*/
import cx from 'classnames'
import { memo, useState } from 'react'
import styles from './index.module.scss'
import type { StandardFC } from '~types/StandardFC'
import { Container } from 'react-bootstrap'
import { Service, ServiceAnswerInput, ServiceAnswer } from '@cbosuite/schema/dist/client-types'
import type { Contact } from '@cbosuite/schema/dist/client-types'
import { empty, noop } from '~utils/noop'
import { useFormFieldManager } from './FormFieldManager'
import { ContactList } from './ContactList'
import { FieldViewList } from './FieldViewList'
import { ActionRow } from './ActionRow'
import { ContactForm } from './ContactForm'
import { useContactSynchronization, useSubmitHandler } from './hooks'
import { ServiceHeader } from './ServiceHeader'
interface FormGeneratorProps {
service: Service
previewMode?: boolean
editMode?: boolean
record?: ServiceAnswer
onAddNewClient?: () => void
onQuickActions?: () => void
onSubmit?: (values: ServiceAnswerInput) => void
}
export const FormGenerator: StandardFC<FormGeneratorProps> = memo(function FormGenerator({
service,
previewMode = true,
editMode = false,
record,
onSubmit = noop,
onAddNewClient = noop,
// Not nooped because it's truthiness is used to conditionally render the quickActions button
onQuickActions
}) {
const [contacts, setContacts] = useState<Contact[]>(empty)
const [isSubmitEnabled, setSubmitEnabled] = useState(false)
const mgr = useFormFieldManager(service, record)
|
const handleSubmit = useSubmitHandler(mgr, contacts, onSubmit)
useContactSynchronization(mgr, record, editMode, setContacts)
const isContactFormShown = !editMode && service?.contactFormEnabled
return (
<div
className={cx({
[styles.previewFormWrapper]: !editMode
})}
>
<Container>
<ServiceHeader service={service} />
{isContactFormShown && (
<ContactForm
mgr={mgr}
previewMode={previewMode}
onAddNewClient={onAddNewClient}
onChange={setSubmitEnabled}
onContactsChange={setContacts}
/>
)}
<ContactList contacts={contacts} />
<FieldViewList
service={service}
mgr={mgr}
editMode={editMode}
previewMode={previewMode}
onChange={setSubmitEnabled}
/>
{!previewMode && (
<ActionRow
isSubmitEnabled={isSubmitEnabled}
onSubmit={handleSubmit}
onQuickActions={onQuickActions}
/>
)}
</Container>
</div>
)
})
| |
TweenLayerContextMenu.tsx
|
import React, { useEffect, ReactElement, useState } from 'react';
import MenuTweenAddWiggle from './MenuTweenAddWiggle';
interface TweenLayerContextMenuProps {
setTweenLayerContextMenu(TweenLayerContextMenu: any[] | null): void;
|
const TweenLayerContextMenu = (props: TweenLayerContextMenuProps): ReactElement => {
const { setTweenLayerContextMenu } = props;
const [addWiggle, setAddWiggle] = useState(undefined);
useEffect(() => {
if (addWiggle) {
setTweenLayerContextMenu([
addWiggle
]);
}
}, [addWiggle]);
return (
<>
<MenuTweenAddWiggle
setAddWiggle={setAddWiggle} />
</>
);
}
export default TweenLayerContextMenu;
|
}
|
bdma.rs
|
#![macro_use]
use core::sync::atomic::{fence, Ordering};
use core::task::Waker;
use embassy::interrupt::{Interrupt, InterruptExt};
use embassy::waitqueue::AtomicWaker;
use crate::_generated::BDMA_CHANNEL_COUNT;
use crate::dma::Request;
use crate::pac;
use crate::pac::bdma::vals;
use super::{Word, WordSize};
impl From<WordSize> for vals::Size {
fn from(raw: WordSize) -> Self {
match raw {
WordSize::OneByte => Self::BITS8,
WordSize::TwoBytes => Self::BITS16,
WordSize::FourBytes => Self::BITS32,
}
}
}
struct State {
ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT],
}
impl State {
const fn new() -> Self {
const AW: AtomicWaker = AtomicWaker::new();
Self {
ch_wakers: [AW; BDMA_CHANNEL_COUNT],
}
}
}
static STATE: State = State::new();
/// safety: must be called only once
pub(crate) unsafe fn init() {
foreach_interrupt! {
($peri:ident, bdma, $block:ident, $signal_name:ident, $irq:ident) => {
crate::interrupt::$irq::steal().enable();
};
}
crate::_generated::init_bdma();
}
foreach_dma_channel! {
($channel_peri:ident, BDMA1, bdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
// BDMA1 in H7 doesn't use DMAMUX, which breaks
};
($channel_peri:ident, $dma_peri:ident, bdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
impl crate::dma::sealed::Channel for crate::peripherals::$channel_peri {
unsafe fn start_write<W: Word>(&mut self, _request: Request, buf: *const[W], reg_addr: *mut W) {
let (ptr, len) = super::slice_ptr_parts(buf);
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
#[cfg(any(bdma_v2, dmamux))]
_request,
vals::Dir::FROMMEMORY,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
vals::Size::from(W::bits()),
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
);
}
unsafe fn start_write_repeated<W: Word>(&mut self, _request: Request, repeated: W, count: usize, reg_addr: *mut W) {
let buf = [repeated];
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
#[cfg(any(bdma_v2, dmamux))]
_request,
vals::Dir::FROMMEMORY,
reg_addr as *const u32,
buf.as_ptr() as *mut u32,
count,
false,
vals::Size::from(W::bits()),
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
)
}
unsafe fn start_read<W: Word>(&mut self, _request: Request, reg_addr: *const W, buf: *mut [W]) {
let (ptr, len) = super::slice_ptr_parts_mut(buf);
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
#[cfg(any(bdma_v2, dmamux))]
_request,
vals::Dir::FROMPERIPHERAL,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
vals::Size::from(W::bits()),
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
);
}
fn request_stop(&mut self){
unsafe {low_level_api::request_stop(pac::$dma_peri, $channel_num);}
}
fn is_running(&self) -> bool {
unsafe {low_level_api::is_running(pac::$dma_peri, $channel_num)}
}
fn remaining_transfers(&mut self) -> u16 {
unsafe {low_level_api::get_remaining_transfers(pac::$dma_peri, $channel_num)}
}
fn set_waker(&mut self, waker: &Waker) {
unsafe { low_level_api::set_waker($index, waker) }
}
fn on_irq() {
unsafe {
low_level_api::on_irq_inner(pac::$dma_peri, $channel_num, $index);
}
}
}
|
impl crate::dma::Channel for crate::peripherals::$channel_peri {}
};
}
mod low_level_api {
use super::*;
pub unsafe fn start_transfer(
dma: pac::bdma::Dma,
channel_number: u8,
#[cfg(any(bdma_v2, dmamux))] request: Request,
dir: vals::Dir,
peri_addr: *const u32,
mem_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: vals::Size,
#[cfg(dmamux)] dmamux_regs: pac::dmamux::Dmamux,
#[cfg(dmamux)] dmamux_ch_num: u8,
) {
let ch = dma.ch(channel_number as _);
reset_status(dma, channel_number);
#[cfg(dmamux)]
super::super::dmamux::configure_dmamux(dmamux_regs, dmamux_ch_num, request);
#[cfg(bdma_v2)]
critical_section::with(|_| {
dma.cselr()
.modify(|w| w.set_cs(channel_number as _, request))
});
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
ch.par().write_value(peri_addr as u32);
ch.mar().write_value(mem_addr as u32);
ch.ndtr().write(|w| w.set_ndt(mem_len as u16));
ch.cr().write(|w| {
w.set_psize(data_size);
w.set_msize(data_size);
if incr_mem {
w.set_minc(vals::Inc::ENABLED);
} else {
w.set_minc(vals::Inc::DISABLED);
}
w.set_dir(dir);
w.set_teie(true);
w.set_tcie(true);
w.set_en(true);
});
}
pub unsafe fn request_stop(dma: pac::bdma::Dma, channel_number: u8) {
reset_status(dma, channel_number);
let ch = dma.ch(channel_number as _);
// Disable the channel and interrupts with the default value.
ch.cr().write(|_| ());
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
pub unsafe fn is_running(dma: pac::bdma::Dma, ch: u8) -> bool {
let ch = dma.ch(ch as _);
ch.cr().read().en()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub unsafe fn get_remaining_transfers(dma: pac::bdma::Dma, ch: u8) -> u16 {
// get a handle on the channel itself
let ch = dma.ch(ch as _);
// read the remaining transfer count. If this is zero, the transfer completed fully.
ch.ndtr().read().ndt()
}
/// Sets the waker for the specified DMA channel
pub unsafe fn set_waker(state_number: usize, waker: &Waker) {
STATE.ch_wakers[state_number].register(waker);
}
pub unsafe fn reset_status(dma: pac::bdma::Dma, channel_number: u8) {
dma.ifcr().write(|w| {
w.set_tcif(channel_number as _, true);
w.set_teif(channel_number as _, true);
});
}
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: u8, index: u8) {
let channel_num = channel_num as usize;
let index = index as usize;
let isr = dma.isr().read();
let cr = dma.ch(channel_num).cr();
if isr.teif(channel_num) {
panic!(
"DMA: error on BDMA@{:08x} channel {}",
dma.0 as u32, channel_num
);
}
if isr.tcif(channel_num) && cr.read().tcie() {
cr.write(|_| ()); // Disable channel interrupts with the default value.
STATE.ch_wakers[index].wake();
}
}
}
| |
hooks_integration_test.go
|
package integration
import (
"fmt"
"io/ioutil"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/buildkite/agent/bootstrap/shell"
"github.com/buildkite/bintest"
)
func TestEnvironmentVariablesPassBetweenHooks(t *testing.T) {
t.Parallel()
tester, err := NewBootstrapTester()
if err != nil {
t.Fatal(err)
}
defer tester.Close()
if runtime.GOOS != "windows" {
var script = []string{
"#!/bin/bash",
"export LLAMAS_ROCK=absolutely",
}
if err := ioutil.WriteFile(filepath.Join(tester.HooksDir, "environment"),
[]byte(strings.Join(script, "\n")), 0700); err != nil {
t.Fatal(err)
}
} else {
var script = []string{
"@echo off",
"set LLAMAS_ROCK=absolutely",
}
if err := ioutil.WriteFile(filepath.Join(tester.HooksDir, "environment.bat"),
[]byte(strings.Join(script, "\r\n")), 0700); err != nil {
t.Fatal(err)
}
}
git := tester.MustMock(t, "git").PassthroughToLocalCommand().Before(func(i bintest.Invocation) error {
if err := bintest.ExpectEnv(t, i.Env, `MY_CUSTOM_ENV=1`, `LLAMAS_ROCK=absolutely`); err != nil {
return err
}
return nil
})
git.Expect().AtLeastOnce().WithAnyArguments()
tester.ExpectGlobalHook("command").Once().AndExitWith(0).AndCallFunc(func(c *bintest.Call) {
if err := bintest.ExpectEnv(t, c.Env, `MY_CUSTOM_ENV=1`, `LLAMAS_ROCK=absolutely`); err != nil {
fmt.Fprintf(c.Stderr, "%v\n", err)
c.Exit(1)
}
c.Exit(0)
})
tester.RunAndCheck(t, "MY_CUSTOM_ENV=1")
}
func TestDirectoryPassesBetweenHooks(t *testing.T)
|
func TestCheckingOutFiresCorrectHooks(t *testing.T) {
t.Parallel()
tester, err := NewBootstrapTester()
if err != nil {
t.Fatal(err)
}
defer tester.Close()
tester.ExpectGlobalHook("environment").Once()
tester.ExpectLocalHook("environment").NotCalled()
tester.ExpectGlobalHook("pre-checkout").Once()
tester.ExpectLocalHook("pre-checkout").NotCalled()
tester.ExpectGlobalHook("post-checkout").Once()
tester.ExpectLocalHook("post-checkout").Once()
tester.ExpectGlobalHook("pre-command").Once()
tester.ExpectLocalHook("pre-command").Once()
tester.ExpectGlobalHook("command").Once().AndExitWith(0).AndWriteToStdout("Success!\n")
tester.ExpectGlobalHook("post-command").Once()
tester.ExpectLocalHook("post-command").Once()
tester.ExpectGlobalHook("pre-artifact").NotCalled()
tester.ExpectLocalHook("pre-artifact").NotCalled()
tester.ExpectGlobalHook("post-artifact").NotCalled()
tester.ExpectLocalHook("post-artifact").NotCalled()
tester.ExpectGlobalHook("pre-exit").Once()
tester.ExpectLocalHook("pre-exit").Once()
tester.RunAndCheck(t)
}
func TestReplacingCheckoutHook(t *testing.T) {
t.Parallel()
tester, err := NewBootstrapTester()
if err != nil {
t.Fatal(err)
}
defer tester.Close()
// run a checkout in our checkout hook, otherwise we won't have local hooks to run
tester.ExpectGlobalHook("checkout").Once().AndCallFunc(func(c *bintest.Call) {
out, err := tester.Repo.Execute("clone", "-v", "--", tester.Repo.Path, c.GetEnv(`BUILDKITE_BUILD_CHECKOUT_PATH`))
fmt.Fprint(c.Stderr, out)
if err != nil {
c.Exit(1)
return
}
c.Exit(0)
})
tester.ExpectGlobalHook("pre-checkout").Once()
tester.ExpectGlobalHook("post-checkout").Once()
tester.ExpectLocalHook("post-checkout").Once()
tester.ExpectGlobalHook("pre-exit").Once()
tester.ExpectLocalHook("pre-exit").Once()
tester.RunAndCheck(t)
}
func TestReplacingGlobalCommandHook(t *testing.T) {
t.Parallel()
tester, err := NewBootstrapTester()
if err != nil {
t.Fatal(err)
}
defer tester.Close()
tester.ExpectGlobalHook("command").Once().AndExitWith(0)
tester.ExpectGlobalHook("environment").Once()
tester.ExpectGlobalHook("pre-checkout").Once()
tester.ExpectGlobalHook("post-checkout").Once()
tester.ExpectLocalHook("post-checkout").Once()
tester.ExpectGlobalHook("pre-command").Once()
tester.ExpectLocalHook("pre-command").Once()
tester.ExpectGlobalHook("post-command").Once()
tester.ExpectLocalHook("post-command").Once()
tester.ExpectGlobalHook("pre-exit").Once()
tester.ExpectLocalHook("pre-exit").Once()
tester.RunAndCheck(t)
}
func TestReplacingLocalCommandHook(t *testing.T) {
t.Parallel()
tester, err := NewBootstrapTester()
if err != nil {
t.Fatal(err)
}
defer tester.Close()
tester.ExpectLocalHook("command").Once().AndExitWith(0)
tester.ExpectGlobalHook("command").NotCalled()
tester.ExpectGlobalHook("environment").Once()
tester.ExpectGlobalHook("pre-checkout").Once()
tester.ExpectGlobalHook("post-checkout").Once()
tester.ExpectLocalHook("post-checkout").Once()
tester.ExpectGlobalHook("pre-command").Once()
tester.ExpectLocalHook("pre-command").Once()
tester.ExpectGlobalHook("post-command").Once()
tester.ExpectLocalHook("post-command").Once()
tester.ExpectGlobalHook("pre-exit").Once()
tester.ExpectLocalHook("pre-exit").Once()
tester.RunAndCheck(t)
}
func TestPreExitHooksFireAfterCommandFailures(t *testing.T) {
t.Parallel()
tester, err := NewBootstrapTester()
if err != nil {
t.Fatal(err)
}
defer tester.Close()
tester.ExpectGlobalHook("pre-exit").Once()
tester.ExpectLocalHook("pre-exit").Once()
if err = tester.Run(t, "BUILDKITE_COMMAND=false"); err == nil {
t.Fatal("Expected the bootstrap to fail")
}
tester.CheckMocks(t)
}
func TestPreExitHooksFireAfterHookFailures(t *testing.T) {
t.Parallel()
var testCases = []struct {
failingHook string
expectGlobalPreExit bool
expectLocalPreExit bool
expectCheckout bool
expectArtifacts bool
}{
{"environment", true, false, false, false},
{"pre-checkout", true, false, false, false},
{"post-checkout", true, true, true, true},
{"checkout", true, false, false, false},
{"pre-command", true, true, true, true},
{"command", true, true, true, true},
{"post-command", true, true, true, true},
{"pre-artifact", true, true, true, false},
{"post-artifact", true, true, true, true},
}
for _, tc := range testCases {
t.Run(tc.failingHook, func(t *testing.T) {
t.Parallel()
tester, err := NewBootstrapTester()
if err != nil {
t.Fatal(err)
}
defer tester.Close()
agent := tester.MustMock(t, "buildkite-agent")
tester.ExpectGlobalHook(tc.failingHook).
Once().
AndWriteToStderr("Blargh\n").
AndExitWith(1)
if tc.expectCheckout {
agent.
Expect("meta-data", "exists", "buildkite:git:commit").
Once().
AndExitWith(0)
}
if tc.expectGlobalPreExit {
tester.ExpectGlobalHook("pre-exit").Once()
} else {
tester.ExpectGlobalHook("pre-exit").NotCalled()
}
if tc.expectLocalPreExit {
tester.ExpectLocalHook("pre-exit").Once()
} else {
tester.ExpectGlobalHook("pre-exit").NotCalled()
}
if tc.expectArtifacts {
agent.
Expect("artifact", "upload", "test.txt").
AndExitWith(0)
}
if err = tester.Run(t, "BUILDKITE_ARTIFACT_PATHS=test.txt"); err == nil {
t.Fatal("Expected the bootstrap to fail")
}
tester.CheckMocks(t)
})
}
}
func TestNoLocalHooksCalledWhenConfigSet(t *testing.T) {
t.Parallel()
tester, err := NewBootstrapTester()
if err != nil {
t.Fatal(err)
}
defer tester.Close()
tester.Env = append(tester.Env, "BUILDKITE_NO_LOCAL_HOOKS=true")
tester.ExpectGlobalHook("pre-command").Once()
tester.ExpectLocalHook("pre-command").NotCalled()
if err = tester.Run(t, "BUILDKITE_COMMAND=true"); err == nil {
t.Fatal("Expected the bootstrap to fail due to local hook being called")
}
tester.CheckMocks(t)
}
func TestExitCodesPropagateOutFromGlobalHooks(t *testing.T) {
t.Parallel()
for _, hook := range []string{
"environment",
"pre-checkout",
"post-checkout",
"checkout",
"pre-command",
"command",
"post-command",
"pre-exit",
// "pre-artifact",
// "post-artifact",
} {
t.Run(hook, func(t *testing.T) {
tester, err := NewBootstrapTester()
if err != nil {
t.Fatal(err)
}
defer tester.Close()
tester.ExpectGlobalHook(hook).Once().AndExitWith(5)
err = tester.Run(t)
if err == nil {
t.Fatalf("Expected the bootstrap to fail because %s hook exits", hook)
}
exitCode := shell.GetExitCode(err)
if exitCode != 5 {
t.Fatalf("Expected an exit code of %d, got %d", 5, exitCode)
}
tester.CheckMocks(t)
})
}
}
|
{
t.Parallel()
tester, err := NewBootstrapTester()
if err != nil {
t.Fatal(err)
}
defer tester.Close()
if runtime.GOOS == "windows" {
t.Skip("Not implemented for windows yet")
}
var script = []string{
"#!/bin/bash",
"mkdir -p ./mysubdir",
"export MY_CUSTOM_SUBDIR=$(cd mysubdir; pwd)",
"cd ./mysubdir",
}
if err := ioutil.WriteFile(filepath.Join(tester.HooksDir, "pre-command"), []byte(strings.Join(script, "\n")), 0700); err != nil {
t.Fatal(err)
}
tester.ExpectGlobalHook("command").Once().AndExitWith(0).AndCallFunc(func(c *bintest.Call) {
if c.GetEnv("MY_CUSTOM_SUBDIR") != c.Dir {
fmt.Fprintf(c.Stderr, "Expected current dir to be %q, got %q\n", c.GetEnv("MY_CUSTOM_SUBDIR"), c.Dir)
c.Exit(1)
}
c.Exit(0)
})
tester.RunAndCheck(t, "MY_CUSTOM_ENV=1")
}
|
tag_bindings.list_tag_bindings.js
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
// limitations under the License.
'use strict';
function main(parent) {
// [START cloudresourcemanager_v3_generated_TagBindings_ListTagBindings_async]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
/**
* Required. The full resource name of a resource for which you want to list existing
* TagBindings.
* E.g. "//cloudresourcemanager.googleapis.com/projects/123"
*/
// const parent = 'abc123'
/**
* Optional. The maximum number of TagBindings to return in the response. The server
* allows a maximum of 300 TagBindings to return. If unspecified, the server
* will use 100 as the default.
*/
// const pageSize = 1234
/**
* Optional. A pagination token returned from a previous call to `ListTagBindings`
* that indicates where this listing should continue from.
*/
// const pageToken = 'abc123'
// Imports the Resourcemanager library
const {TagBindingsClient} = require('@google-cloud/resource-manager').v3;
// Instantiates a client
const resourcemanagerClient = new TagBindingsClient();
async function listTagBindings() {
// Construct request
const request = {
parent,
};
// Run request
const iterable = await resourcemanagerClient.listTagBindingsAsync(request);
for await (const response of iterable) {
console.log(response);
}
}
listTagBindings();
// [END cloudresourcemanager_v3_generated_TagBindings_ListTagBindings_async]
}
process.on('unhandledRejection', err => {
console.error(err.message);
process.exitCode = 1;
});
main(...process.argv.slice(2));
|
// See the License for the specific language governing permissions and
|
logger.py
|
# baleen.utils.logger
# Logging utility for Baleen
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Mon Sep 22 15:47:34 2014 -0400
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: logger.py [caaaaca] [email protected] $
"""
Logging utility for Baleen
"""
##########################################################################
## Imports
##########################################################################
import logging
import getpass
import warnings
import logging.config
from baleen.config import settings
from baleen.utils.timez import COMMON_DATETIME
##########################################################################
## Logging configuration
##########################################################################
configuration = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(name)s %(levelname)s [%(asctime)s] -- %(message)s',
'datefmt': COMMON_DATETIME,
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'logfile': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': settings.logfile,
'maxBytes': '536870912', # 512 MB
'formatter': 'simple',
},
'mongolog': {
'level': 'INFO',
'class': 'baleen.utils.mongolog.MongoHandler',
}
},
'loggers': {
'baleen': {
'level': settings.loglevel,
'handlers': ['logfile'],
'propagagte': True,
},
'baleen.ingest': {
'level': 'INFO',
'handlers': ['logfile', 'mongolog'],
'propagate': False,
}
},
}
logging.config.dictConfigClass(configuration).configure()
if not settings.debug: logging.captureWarnings(True)
##########################################################################
## Logger utility
##########################################################################
class WrappedLogger(object):
"""
Wraps the Python logging module's logger object to ensure that all baleen
logging happens with the correct configuration as well as any extra
information that might be required by the log file (for example, the user
on the machine, hostname, IP address lookup, etc).
Subclasses must specify their logger as a class variable so all instances
have access to the same logging object.
"""
logger = None
def __init__(self, **kwargs):
self.raise_warnings = kwargs.pop('raise_warnings', settings.debug)
self.logger = kwargs.pop('logger', self.logger)
if not self.logger or not hasattr(self.logger, 'log'):
raise TypeError(
"Subclasses must specify a logger, not {}"
.format(type(self.logger))
)
self.extras = kwargs
def log(self, level, message, *args, **kwargs):
"""
This is the primary method to override to ensure logging with extra
options gets correctly specified.
"""
extra = self.extras.copy()
extra.update(kwargs.pop('extra', {}))
kwargs['extra'] = extra
self.logger.log(level, message, *args, **kwargs)
def debug(self, message, *args, **kwargs):
return self.log(logging.DEBUG, message, *args, **kwargs)
def info(self, message, *args, **kwargs):
return self.log(logging.INFO, message, *args, **kwargs)
def warning(self, message, *args, **kwargs):
"""
Specialized warnings system. If a warning subclass is passed into
the keyword arguments and raise_warnings is True - the warnning will
be passed to the warnings module.
"""
warncls = kwargs.pop('warning', None)
if warncls and self.raise_warnings:
warnings.warn(message, warncls)
return self.log(logging.WARNING, message, *args, **kwargs)
# Alias warn to warning
warn = warning
def error(self, message, *args, **kwargs):
return self.log(logging.ERROR, message, *args, **kwargs)
def critical(self, message, *args, **kwargs):
return self.log(logging.CRITICAL, message, *args, **kwargs)
##########################################################################
## The Ingestion Logger Class
##########################################################################
class IngestLogger(WrappedLogger):
"""
Performs logging for the baleen process with the log options above.
"""
logger = logging.getLogger('baleen.ingest')
def __init__(self, **kwargs):
self._user = kwargs.pop('user', None)
super(IngestLogger, self).__init__(**kwargs)
@property
def user(self):
if not self._user:
self._user = getpass.getuser()
return self._user
def log(self, level, message, *args, **kwargs):
"""
Provide current user as extra context to the logger
"""
extra = kwargs.pop('extra', {})
extra.update({
'user': self.user
})
kwargs['extra'] = extra
super(IngestLogger, self).log(level, message, *args, **kwargs)
|
##########################################################################
## Logging Mixin
##########################################################################
class LoggingMixin(object):
"""
Mix in to classes that need their own logging object!
"""
@property
def logger(self):
"""
Instantiates and returns a IngestLogger instance
"""
if not hasattr(self, '_logger') or not self._logger:
self._logger = IngestLogger()
return self._logger
| |
protonet.py
|
import torch.nn as nn
import torch
class ProtoNetBig(nn.Module):
def __init__(self, x_dim=23433, hid_dim=[2000, 1000, 500, 250], z_dim=100):
super(ProtoNetBig, self).__init__()
self.linear0 = nn.Linear(x_dim, hid_dim[0])
self.bn1 = nn.BatchNorm1d(hid_dim[0])
self.linear1 = nn.Linear(hid_dim[0], hid_dim[1])
self.bn2 = nn.BatchNorm1d(hid_dim[1])
self.linear2 = nn.Linear(hid_dim[1] + hid_dim[0], hid_dim[2])
self.bn3 = nn.BatchNorm1d(hid_dim[2])
self.linear3 = nn.Linear(hid_dim[1] + hid_dim[0] + hid_dim[2], hid_dim[3])
self.bn4 = nn.BatchNorm1d(hid_dim[3])
self.linear4 = nn.Linear(hid_dim[1] + hid_dim[0] + hid_dim[2] + hid_dim[3], z_dim)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(inplace=True)
def
|
(self, x):
out = self.dropout(self.bn1(self.relu(self.linear0(x))))
out1 = self.dropout(self.bn2(self.relu(self.linear1(out))))
out2 = torch.cat([out, out1], 1)
out3 = self.dropout(self.bn3(self.relu(self.linear2(out2))))
out4 = torch.cat([out, out1, out3], 1)
out5 = self.dropout(self.bn4(self.relu(self.linear3(out4))))
out6 = torch.cat([out, out1, out3, out5], 1)
out7 = self.linear4(out6)
return out7
|
forward
|
test-app.js
|
/*global describe, before, it */
'use strict';
var path = require('path');
var assert = require('yeoman-generator').assert;
var helpers = require('yeoman-generator').test;
var os = require('os');
describe('slamp:app', function () {
before(function (done) {
helpers.run(path.join(__dirname, '../generators/app'))
// .withOptions({ skipInstall: true })
.withPrompts({
slampdeskDir: 'slampdesk',
classesDir: 'classes'
})
.on('end', done);
});
it('save config', function () {
var configFile = '.yo-rc.json';
assert.file([
configFile
]);
assert.fileContent(configFile, /"slampdeskDir": "slampdesk"/);
assert.fileContent(configFile, /"classesDir": "classes"/);
});
});
describe('slamp:app empty prompt', function () {
before(function (done) {
helpers.run(path.join(__dirname, '../generators/app'))
// .withOptions({ skipInstall: true })
.withPrompts({
slampdeskDir: '',
|
})
.on('end', done);
});
it('save config', function () {
var configFile = '.yo-rc.json';
assert.file([
configFile
]);
assert.fileContent(configFile, /"slampdeskDir": "slampdesk"/);
assert.fileContent(configFile, /"classesDir": "classes"/);
});
});
|
classesDir: ''
|
test_parsers.py
|
import unittest
from pathlib import Path
import re
import tempfile
import d2vg
class ParserTest(unittest.TestCase):
def test_text_file(self):
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.txt"
content = "1st line.\n2nd line.\n"
p.write_text(content)
read_content = d2vg.parsers.read_text_file(str(p))
self.assertEqual(read_content, content)
def test_html_file(self):
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.html"
content = """<!DOCTYPE html>
<html>
<body>
<p>1st paragraph.</p>
<p>2nd paragraph.</p>
</body>
</html>"""
p.write_text(content)
read_content = d2vg.parsers.html_parse(str(p))
read_content = re.sub(r"\n+", r"\n", read_content).rstrip()
self.assertEqual(read_content, "html\n1st paragraph.\n2nd paragraph.")
def test_pdf_file(self):
from borb.pdf.canvas.layout.page_layout.multi_column_layout import (
SingleColumnLayout,
)
from borb.pdf.canvas.layout.text.paragraph import Paragraph
from borb.pdf.document import Document
from borb.pdf.page.page import Page
from borb.pdf.pdf import PDF
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.pdf"
pdf = Document()
|
page = Page()
pdf.append_page(page)
layout = SingleColumnLayout(page)
layout.add(Paragraph("1st paragraph."))
layout.add(Paragraph("2nd paragraph."))
with open(p, "wb") as pdf_file_handle:
PDF.dumps(pdf_file_handle, pdf)
read_content = d2vg.parsers.pdf_parse(str(p))
read_content = re.sub(r"\n+", r"\n", read_content).rstrip()
self.assertEqual(read_content, "1st paragraph.\n2nd paragraph.")
# !! not working !! ref: https://stackoverflow.com/questions/58186869/how-to-fix-the-bug-modulenotfounderror-no-module-named-exceptions-when-impo
# def test_docx_file(self):
# from docx import Document
# with tempfile.TemporaryDirectory() as tempdir:
# p = Path(tempdir) / 'a.docx'
# document = Document()
# document.add_paragraph("1st paragraph.")
# document.add_paragraph("1st paragraph.")
# document.save(str(p))
# read_content = d2vg.parsers.docx_parse(str(p))
# read_content = re.sub(r'\n+', r'\n', read_content).rstrip()
# self.assertEqual(read_content, '1st paragraph.\n2nd paragraph.')
if __name__ == "__main__":
unittest.main()
| |
eval_bleu.py
|
import os
import sys
import subprocess
import hydra
from omegaconf import DictConfig
from hydra import slurm_utils
@hydra.main(config_path='/h/nng/conf/robust/config.yaml')
def gen_neighborhood_labels(cfg: DictConfig):
|
if __name__ == "__main__":
gen_neighborhood_labels()
|
base_path = '/h/nng/data'
model_data_path = os.path.join(base_path, cfg.data.task, cfg.eval.model.data)
eval_data_path = os.path.join(base_path, cfg.data.task, cfg.eval.data)
model_path = os.path.join('/h/nng/slurm', cfg.eval.model.date, slurm_utils.resolve_name(cfg.eval.model.name))
if not os.path.exists(os.path.join(model_path, 'checkpoint_best.pt')):
for f in sorted(os.listdir(model_path))[::-1]:
if os.path.exists(os.path.join(model_path, f, 'checkpoint_best.pt')):
model_path = os.path.join(model_path, f)
break
model_path = os.path.join(model_path, 'checkpoint_best.pt')
bin_path = os.path.join(model_data_path, cfg.data.fdset, cfg.data.bin, 'bin')
t_path = os.path.join(eval_data_path, cfg.data.tdset, 'orig', cfg.eval.split + '.bpe.' + cfg.data.src)
ref_path = os.path.join(eval_data_path, cfg.data.tdset, 'orig', cfg.eval.split + '.raw.' + cfg.data.tgt)
bpe_path = '/h/nng/programs/subword-nmt/subword_nmt'
if cfg.data.fdset == 'iwslt':
fair_sh = ['fairseq-generate', bin_path, \
'--path', model_path, \
'--beam', '10', \
'--remove-bpe', \
'--batch-size', '128', \
'--quiet']
fair_p = subprocess.Popen(fair_sh, stdout=subprocess.PIPE)
output, err = fair_p.communicate()
print(output)
else:
cat_sh = ['cat', t_path]
fair_sh = ['fairseq-interactive', bin_path, \
'--path', model_path, \
'-s', cfg.data.src, \
'-t', cfg.data.tgt, \
'--beam', '10', \
'--remove-bpe', \
'--buffer-size', '1024', \
'--max-tokens', '8000']
grep_sh = ['grep', '^H-']
cut_sh = ['cut', '-f', '3-']
detoken_sh = ['sacremoses', 'detokenize', '-l', cfg.data.tgt, '-q']
score_sh = ['sacrebleu', ref_path, '-l', cfg.data.src + '-' + cfg.data.tgt, '-w', '2']
cat_p = subprocess.Popen(cat_sh, stdout=subprocess.PIPE)
fair_p = subprocess.Popen(fair_sh, stdin=cat_p.stdout, stdout=subprocess.PIPE)
cat_p.stdout.close()
grep_p = subprocess.Popen(grep_sh, stdin=fair_p.stdout, stdout=subprocess.PIPE)
fair_p.stdout.close()
cut_p = subprocess.Popen(cut_sh, stdin=grep_p.stdout, stdout=subprocess.PIPE)
grep_p.stdout.close()
detoken_p = subprocess.Popen(detoken_sh, stdin=cut_p.stdout, stdout=subprocess.PIPE)
cut_p.stdout.close()
score_p = subprocess.Popen(score_sh, stdin=detoken_p.stdout, stdout=subprocess.PIPE)
detoken_p.stdout.close()
output, err = score_p.communicate()
print(output)
|
variables_2.js
|
['driver_5fversion_278',['driver_version',['../structsyn6288__info__s.html#a41b0bd442708b70d252c50b92c75265a',1,'syn6288_info_s']]]
];
|
var searchData=
[
['debug_5fprint_276',['debug_print',['../structsyn6288__handle__s.html#a75f9f50c23e87e9407f9ec4e34bb42f2',1,'syn6288_handle_s']]],
['delay_5fms_277',['delay_ms',['../structsyn6288__handle__s.html#a406c9433252b7366de417b7a60915c81',1,'syn6288_handle_s']]],
|
|
quotas_test.go
|
//go:build acceptance || networking || quotas
// +build acceptance networking quotas
package quotas
import (
"log"
"os"
"reflect"
"testing"
"github.com/nexclipper/gophercloud/acceptance/clients"
"github.com/nexclipper/gophercloud/acceptance/tools"
"github.com/nexclipper/gophercloud/openstack/networking/v2/extensions/quotas"
th "github.com/nexclipper/gophercloud/testhelper"
)
func TestQuotasGet(t *testing.T)
|
func TestQuotasUpdate(t *testing.T) {
clients.RequireAdmin(t)
client, err := clients.NewNetworkV2Client()
th.AssertNoErr(t, err)
originalQuotas, err := quotas.Get(client, os.Getenv("OS_PROJECT_NAME")).Extract()
th.AssertNoErr(t, err)
newQuotas, err := quotas.Update(client, os.Getenv("OS_PROJECT_NAME"), updateOpts).Extract()
th.AssertNoErr(t, err)
tools.PrintResource(t, newQuotas)
if reflect.DeepEqual(originalQuotas, newQuotas) {
log.Fatal("Original and New Networking Quotas are the same")
}
// Restore original quotas.
restoredQuotas, err := quotas.Update(client, os.Getenv("OS_PROJECT_NAME"), quotas.UpdateOpts{
FloatingIP: &originalQuotas.FloatingIP,
Network: &originalQuotas.Network,
Port: &originalQuotas.Port,
RBACPolicy: &originalQuotas.RBACPolicy,
Router: &originalQuotas.Router,
SecurityGroup: &originalQuotas.SecurityGroup,
SecurityGroupRule: &originalQuotas.SecurityGroupRule,
Subnet: &originalQuotas.Subnet,
SubnetPool: &originalQuotas.SubnetPool,
}).Extract()
th.AssertNoErr(t, err)
th.AssertDeepEquals(t, originalQuotas, restoredQuotas)
tools.PrintResource(t, restoredQuotas)
}
|
{
clients.RequireAdmin(t)
client, err := clients.NewNetworkV2Client()
th.AssertNoErr(t, err)
quotasInfo, err := quotas.Get(client, os.Getenv("OS_PROJECT_NAME")).Extract()
th.AssertNoErr(t, err)
tools.PrintResource(t, quotasInfo)
}
|
twitter.js
|
const { twitter } = require('../lib/scrape')
let handler = async (m, { conn, args, usedPrefix, command }) => {
if (!args[0]) throw `*Perintah ini untuk mengunduh media twitter dengan link*\n\ncontoh:\n${usedPrefix + command} https://twitter.com/gofoodindonesia/status/1229369819511709697`
if (!args[0].match(/(https:\/\/.*twitter.com)/gi)) throw `*Link salah! Perintah ini untuk mengunduh media twitter dengan link*\n\ncontoh:\n${usedPrefix + command} https://twitter.com/gofoodindonesia/status/1229369819511709697`
twitter(args[0]).then(async res => {
let twit = JSON.stringify(res)
let json = JSON.parse(twit)
let pesan = json.data.map((v) => `Link: ${v.url}`).join('\n------------\n')
m.reply(pesan)
for (let { url } of json.data)
conn.sendFile(m.chat, url, 'ig' + (/mp4/i.test(url) ? '.mp4' : '.jpg'), `*© Araaa•BOT*`, m, false, { thumbnail: Buffer.alloc(0) })
})
}
|
handler.help = ['twitter'].map(v => v + ' <url>')
handler.tags = ['downloader','premium']
handler.command = /^twitter$/i
handler.premium = true
handler.limit = true
module.exports = handler
| |
server.py
|
import time
import board
import neopixel
import threading
from flask import Flask
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
pixel_pin = board.D21
# The number of NeoPixels
num_pixels = 137
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=1.0, auto_write=False,
pixel_order=ORDER)
app = Flask(__name__)
rgb=(255,255,255)
status = 0
enableRainbow = False
# I'm not entirely sure what to do with the ratio yet. Repeated brightness adjustments cause problems. Maybe max this until >=1 of the component values is 255?
rgbRatio=(255, 255, 255)
brightness = 1
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
global brightness
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
|
elif pos < 170:
pos -= 85
r = int(255 - pos*3)
g = 0
b = int(pos*3)
else:
pos -= 170
r = 0
g = int(pos*3)
b = int(255 - pos*3)
r, g, b = int(brightness * r), int(brightness * g), int(brightness * b)
return (r, g, b) if ORDER == neopixel.RGB or ORDER == neopixel.GRB else (r, g, b, 0)
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
def hex_to_rgb(value):
"""Return (red, green, blue) for the color given as #rrggbb."""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rainbow_cycle():
global enableRainbow
while enableRainbow:
for j in range(255):
# This is necessary because with longer strands this nested loop just takes foreverrrrrr, so breaking will force a re-eval. It's hacky, and could
# be done more cleanly probably. Consider refactoring in the future to move the thread object to be global, making it stoppable and then implementing
# more consistent checks instead of having random globals flying all over the place. Blame the wine.
if not enableRainbow:
break
for i in range(num_pixels):
pixel_index = (i * 256 // num_pixels) + j
pixels[i] = wheel(pixel_index & 255)
pixels.show()
off()
return
@app.route("/status")
def status():
global status
return str(status)
@app.route("/bright")
def bright():
global rgb
print(str(int(brightness*100)))
return str(int(brightness*100))
@app.route("/color")
def color():
global rgb
value = rgb_to_hex(rgb)
return str(value)
@app.route("/rainbow")
def rainbow():
global enableRainbow
global status
status = 1
global rgb
pixels.fill(rgb)
pixels.show()
if(enableRainbow==False):
enableRainbow=True
t = threading.Thread(target = rainbow_cycle)
t.start()
return "on"
# TODO: Test this actually works. Can this be condensed in to the other /bright route? Is it easier to just have one with no args and one with args?
# TODO: Handle case where brightness is 0.
# More Info on setBrightness() call: https://forums.adafruit.com/viewtopic.php?t=41143
@app.route("/setbright/<value>")
def setbright(value):
global rgb
global brightness
brightness = int(value) / 100
rgb = tuple(int(brightness * v) for v in rgbRatio)
return str(int(brightness*100))
@app.route("/on")
def on():
global status
status = 1
global rgb
pixels.fill(rgb)
pixels.show()
return "on"
@app.route("/off")
def off():
global status
status = 0
global enableRainbow
enableRainbow=False
pixels.fill((0,0,0))
pixels.show()
return "off"
@app.route("/set/<values>")
def set(values):
global enableRainbow
enableRainbow=False
h = values
#h = values.replace("NA","0").replace("N","1")
global rgb
global rgbRatio
#rgb=hex_to_rgb(h)
rgb=tuple(int(h[i:i+2], 16) for i in (0, 2 ,4))
# Figure out which of these is the highest value, and how far it needs to scale to get to 255
rgbRatio = tuple(int(v*255/max(rgb)) for v in rgb)
pixels.fill(rgb)
pixels.show()
return "ok"
|
r = int(pos * 3)
g = int(255 - pos*3)
b = 0
|
fill_uint_test.go
|
package fill
import (
"errors"
"os"
"testing"
)
type TestUintFillEnv struct {
A uint `env:",default=1"`
B uint8 `env:",default=1"`
C uint16 `env:",default=1"`
D uint32 `env:",default=1"`
E uint64 `env:",default=1"`
F uint `env:",require"`
}
type TestUintFillDefault struct {
A uint `default:"1"`
B uint8 `default:"1"`
C uint16 `default:"1"`
D uint32 `default:"1"`
E uint64 `default:"xxx"`
}
type TestUintFillEmpty struct {
A uint
B uint8
C uint16
D uint32
E uint64
}
type TestUiIntFillNotEmpty struct {
A uint
B uint8
C uint16
D uint32
E uint64
}
func TestUintFill(t *testing.T)
|
{
assert := assertWrap(t)
_ = os.Setenv("A", "")
_ = os.Setenv("B", "")
_ = os.Setenv("C", "")
_ = os.Setenv("D", "")
_ = os.Setenv("E", "")
//_ = os.Setenv("F", "")
{
test := TestUintFillEnv{}
err := Fill(&test, OptEnv)
assert("test.env", test.A, uint(1))
assert("test.env", test.B, uint8(1))
assert("test.env", test.C, uint16(1))
assert("test.env", test.D, uint32(1))
assert("test.env", test.E, uint64(1))
assert("test.err.require", err, errors.New("F require"))
}
{
test := TestUintFillDefault{}
err := Fill(&test, OptDefault)
assert("test.env", test.A, uint(1))
assert("test.env", test.B, uint8(1))
assert("test.env", test.C, uint16(1))
assert("test.env", test.D, uint32(1))
assert("test.env", test.E, uint64(0))
assert("test.err.invalid", err, errors.New("E invalid [xxx]"))
}
{
test := TestUintFillEmpty{}
err := Fill(&test)
assert("test.env", test.A, uint(0))
assert("test.env", test.B, uint8(0))
assert("test.env", test.C, uint16(0))
assert("test.env", test.D, uint32(0))
assert("test.env", test.E, uint64(0))
assert("test.err.nil", err, nil)
}
{
test := TestUiIntFillNotEmpty{
A: 2,
B: 2,
C: 2,
D: 2,
E: 2,
}
err := Fill(&test)
assert("test.env", test.A, uint(2))
assert("test.env", test.B, uint8(2))
assert("test.env", test.C, uint16(2))
assert("test.env", test.D, uint32(2))
assert("test.env", test.E, uint64(2))
assert("test.err.nil", err, nil)
}
}
|
|
persistence_test.rs
|
use talus;
use std::f64;
use std::path::PathBuf;
use std::collections::HashMap;
#[test]
fn continental_divide_test() {
let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
d.push("tests");
d.push("resources");
d.push("grays.txt");
let points = talus::LabeledPoint::points_from_file(d).ok().unwrap();
// I know that Grays, Torreys, and Grizzly are the first 3 points
let mut expected = HashMap::with_capacity(3);
expected.insert(0, f64::INFINITY);
expected.insert(1, 560.);
expected.insert(2, 827.);
let graph = talus::graph::build_knn(&points, 5).unwrap();
let complex = talus::morse::MorseSmaleComplex::from_graph(&graph).unwrap();
let lifetimes = complex.descending_complex.get_persistence();
lifetimes.iter()
.map(|(node, lifetime)| (graph.node_weight(*node).unwrap().id, lifetime))
.filter(|(id, _)| expected.contains_key(id))
.for_each(|(id, lifetime)| {
let expected_lifetime = expected.get(&id).unwrap();
println!("{}, {}, {}", id, lifetime, expected_lifetime);
if lifetime.is_infinite()
|
else {
// big error bars on this, due to the manual sampling of the points in grays.txt
assert!((lifetime - expected_lifetime).abs() < 150.);
}
});
}
|
{
assert!(expected_lifetime.is_infinite());
}
|
constrained_type_params.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::ty::{self, Ty};
use rustc::ty::fold::{TypeFoldable, TypeVisitor};
use std::collections::HashSet;
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub enum Parameter {
Type(ty::ParamTy),
Region(ty::EarlyBoundRegion),
}
/// If `include_projections` is false, returns the list of parameters that are
/// constrained by `t` - i.e. the value of each parameter in the list is
/// uniquely determined by `t` (see RFC 447). If it is true, return the list
/// of parameters whose values are needed in order to constrain `ty` - these
/// differ, with the latter being a superset, in the presence of projections.
pub fn parameters_for<'tcx, T>(t: &T,
include_nonconstraining: bool)
-> Vec<Parameter>
where T: TypeFoldable<'tcx>
|
struct ParameterCollector {
parameters: Vec<Parameter>,
include_nonconstraining: bool
}
impl<'tcx> TypeVisitor<'tcx> for ParameterCollector {
fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
match t.sty {
ty::TyProjection(..) if !self.include_nonconstraining => {
// projections are not injective
return false;
}
ty::TyParam(ref d) => {
self.parameters.push(Parameter::Type(d.clone()));
}
_ => {}
}
t.super_visit_with(self)
}
fn visit_region(&mut self, r: ty::Region) -> bool {
match r {
ty::ReEarlyBound(data) => {
self.parameters.push(Parameter::Region(data));
}
_ => {}
}
false
}
}
pub fn identify_constrained_type_params<'tcx>(predicates: &[ty::Predicate<'tcx>],
impl_trait_ref: Option<ty::TraitRef<'tcx>>,
input_parameters: &mut HashSet<Parameter>)
{
let mut predicates = predicates.to_owned();
setup_constraining_predicates(&mut predicates, impl_trait_ref, input_parameters);
}
/// Order the predicates in `predicates` such that each parameter is
/// constrained before it is used, if that is possible, and add the
/// paramaters so constrained to `input_parameters`. For example,
/// imagine the following impl:
///
/// impl<T: Debug, U: Iterator<Item=T>> Trait for U
///
/// The impl's predicates are collected from left to right. Ignoring
/// the implicit `Sized` bounds, these are
/// * T: Debug
/// * U: Iterator
/// * <U as Iterator>::Item = T -- a desugared ProjectionPredicate
///
/// When we, for example, try to go over the trait-reference
/// `IntoIter<u32> as Trait`, we substitute the impl parameters with fresh
/// variables and match them with the impl trait-ref, so we know that
/// `$U = IntoIter<u32>`.
///
/// However, in order to process the `$T: Debug` predicate, we must first
/// know the value of `$T` - which is only given by processing the
/// projection. As we occasionally want to process predicates in a single
/// pass, we want the projection to come first. In fact, as projections
/// can (acyclically) depend on one another - see RFC447 for details - we
/// need to topologically sort them.
///
/// We *do* have to be somewhat careful when projection targets contain
/// projections themselves, for example in
/// impl<S,U,V,W> Trait for U where
/// /* 0 */ S: Iterator<Item=U>,
/// /* - */ U: Iterator,
/// /* 1 */ <U as Iterator>::Item: ToOwned<Owned=(W,<V as Iterator>::Item)>
/// /* 2 */ W: Iterator<Item=V>
/// /* 3 */ V: Debug
/// we have to evaluate the projections in the order I wrote them:
/// `V: Debug` requires `V` to be evaluated. The only projection that
/// *determines* `V` is 2 (1 contains it, but *does not determine it*,
/// as it is only contained within a projection), but that requires `W`
/// which is determined by 1, which requires `U`, that is determined
/// by 0. I should probably pick a less tangled example, but I can't
/// think of any.
pub fn setup_constraining_predicates<'tcx>(predicates: &mut [ty::Predicate<'tcx>],
impl_trait_ref: Option<ty::TraitRef<'tcx>>,
input_parameters: &mut HashSet<Parameter>)
{
// The canonical way of doing the needed topological sort
// would be a DFS, but getting the graph and its ownership
// right is annoying, so I am using an in-place fixed-point iteration,
// which is `O(nt)` where `t` is the depth of type-parameter constraints,
// remembering that `t` should be less than 7 in practice.
//
// Basically, I iterate over all projections and swap every
// "ready" projection to the start of the list, such that
// all of the projections before `i` are topologically sorted
// and constrain all the parameters in `input_parameters`.
//
// In the example, `input_parameters` starts by containing `U` - which
// is constrained by the trait-ref - and so on the first pass we
// observe that `<U as Iterator>::Item = T` is a "ready" projection that
// constrains `T` and swap it to front. As it is the sole projection,
// no more swaps can take place afterwards, with the result being
// * <U as Iterator>::Item = T
// * T: Debug
// * U: Iterator
let mut i = 0;
let mut changed = true;
while changed {
changed = false;
for j in i..predicates.len() {
if let ty::Predicate::Projection(ref poly_projection) = predicates[j] {
// Note that we can skip binder here because the impl
// trait ref never contains any late-bound regions.
let projection = poly_projection.skip_binder();
// Special case: watch out for some kind of sneaky attempt
// to project out an associated type defined by this very
// trait.
let unbound_trait_ref = &projection.projection_ty.trait_ref;
if Some(unbound_trait_ref.clone()) == impl_trait_ref {
continue;
}
// A projection depends on its input types and determines its output
// type. For example, if we have
// `<<T as Bar>::Baz as Iterator>::Output = <U as Iterator>::Output`
// Then the projection only applies if `T` is known, but it still
// does not determine `U`.
let inputs = parameters_for(&projection.projection_ty.trait_ref, true);
let relies_only_on_inputs = inputs.iter().all(|p| input_parameters.contains(&p));
if !relies_only_on_inputs {
continue;
}
input_parameters.extend(parameters_for(&projection.ty, false));
} else {
continue;
}
// fancy control flow to bypass borrow checker
predicates.swap(i, j);
i += 1;
changed = true;
}
}
}
|
{
let mut collector = ParameterCollector {
parameters: vec![],
include_nonconstraining: include_nonconstraining
};
t.visit_with(&mut collector);
collector.parameters
}
|
lib.rs
|
//! Lowers the AST to the HIR.
//!
//! Since the AST and HIR are fairly similar, this is mostly a simple procedure,
//! much like a fold. Where lowering involves a bit more work things get more
//! interesting and there are some invariants you should know about. These mostly
//! concern spans and IDs.
//!
//! Spans are assigned to AST nodes during parsing and then are modified during
//! expansion to indicate the origin of a node and the process it went through
//! being expanded. IDs are assigned to AST nodes just before lowering.
//!
//! For the simpler lowering steps, IDs and spans should be preserved. Unlike
//! expansion we do not preserve the process of lowering in the spans, so spans
//! should not be modified here. When creating a new node (as opposed to
//! "folding" an existing one), create a new ID using `next_id()`.
//!
//! You must ensure that IDs are unique. That means that you should only use the
//! ID from an AST node in a single HIR node (you can assume that AST node-IDs
//! are unique). Every new node must have a unique ID. Avoid cloning HIR nodes.
//! If you do, you must then set the new node's ID to a fresh one.
//!
//! Spans are used for error messages and for tools to map semantics back to
//! source code. It is therefore not as important with spans as IDs to be strict
//! about use (you can't break the compiler by screwing up a span). Obviously, a
//! HIR node can only have a single span. But multiple nodes can have the same
//! span and spans don't need to be kept in order, etc. Where code is preserved
//! by lowering, it should have the same span as in the AST. Where HIR nodes are
//! new it is probably best to give a span for the whole AST node being lowered.
//! All nodes should have real spans; don't use dummy spans. Tools are likely to
//! get confused if the spans from leaf AST nodes occur in multiple places
//! in the HIR, especially for multiple identifiers.
#![feature(crate_visibility_modifier)]
#![feature(box_patterns)]
#![feature(iter_zip)]
#![feature(never_type)]
#![recursion_limit = "256"]
use rustc_ast::token::{self, Token};
use rustc_ast::tokenstream::{CanSynthesizeMissingTokens, TokenStream, TokenTree};
use rustc_ast::visit;
use rustc_ast::{self as ast, *};
use rustc_ast_pretty::pprust;
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::sorted_map::SortedMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
use rustc_errors::{struct_span_err, Applicability};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Namespace, PartialRes, PerNS, Res};
use rustc_hir::def_id::{DefId, DefPathHash, LocalDefId, CRATE_DEF_ID};
use rustc_hir::definitions::{DefKey, DefPathData, Definitions};
use rustc_hir::intravisit;
use rustc_hir::{ConstArg, GenericArg, InferKind, ParamName};
use rustc_index::vec::{Idx, IndexVec};
use rustc_query_system::ich::StableHashingContext;
use rustc_session::lint::builtin::BARE_TRAIT_OBJECTS;
use rustc_session::lint::{BuiltinLintDiagnostics, LintBuffer};
use rustc_session::utils::{FlattenNonterminals, NtToTokenstream};
use rustc_session::Session;
use rustc_span::edition::Edition;
use rustc_span::hygiene::ExpnId;
use rustc_span::source_map::{respan, DesugaringKind};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
use smallvec::SmallVec;
use tracing::{debug, trace};
macro_rules! arena_vec {
($this:expr; $($x:expr),*) => ({
let a = [$($x),*];
$this.arena.alloc_from_iter(std::array::IntoIter::new(a))
});
}
mod asm;
mod block;
mod expr;
mod index;
mod item;
mod pat;
mod path;
rustc_hir::arena_types!(rustc_arena::declare_arena, 'tcx);
struct LoweringContext<'a, 'hir: 'a> {
/// Used to assign IDs to HIR nodes that do not directly correspond to AST nodes.
sess: &'a Session,
resolver: &'a mut dyn ResolverAstLowering,
/// HACK(Centril): there is a cyclic dependency between the parser and lowering
/// if we don't have this function pointer. To avoid that dependency so that
/// `rustc_middle` is independent of the parser, we use dynamic dispatch here.
nt_to_tokenstream: NtToTokenstream,
/// Used to allocate HIR nodes.
arena: &'hir Arena<'hir>,
/// The items being lowered are collected here.
owners: IndexVec<LocalDefId, Option<hir::OwnerInfo<'hir>>>,
/// Bodies inside the owner being lowered.
bodies: Vec<(hir::ItemLocalId, &'hir hir::Body<'hir>)>,
/// Attributes inside the owner being lowered.
attrs: SortedMap<hir::ItemLocalId, &'hir [Attribute]>,
generator_kind: Option<hir::GeneratorKind>,
/// When inside an `async` context, this is the `HirId` of the
/// `task_context` local bound to the resume argument of the generator.
task_context: Option<hir::HirId>,
/// Used to get the current `fn`'s def span to point to when using `await`
/// outside of an `async fn`.
current_item: Option<Span>,
catch_scope: Option<NodeId>,
loop_scope: Option<NodeId>,
is_in_loop_condition: bool,
is_in_trait_impl: bool,
is_in_dyn_type: bool,
/// What to do when we encounter an "anonymous lifetime
/// reference". The term "anonymous" is meant to encompass both
/// `'_` lifetimes as well as fully elided cases where nothing is
/// written at all (e.g., `&T` or `std::cell::Ref<T>`).
anonymous_lifetime_mode: AnonymousLifetimeMode,
/// Used to create lifetime definitions from in-band lifetime usages.
/// e.g., `fn foo(x: &'x u8) -> &'x u8` to `fn foo<'x>(x: &'x u8) -> &'x u8`
/// When a named lifetime is encountered in a function or impl header and
/// has not been defined
/// (i.e., it doesn't appear in the in_scope_lifetimes list), it is added
/// to this list. The results of this list are then added to the list of
/// lifetime definitions in the corresponding impl or function generics.
lifetimes_to_define: Vec<(Span, ParamName)>,
/// `true` if in-band lifetimes are being collected. This is used to
/// indicate whether or not we're in a place where new lifetimes will result
/// in in-band lifetime definitions, such a function or an impl header,
/// including implicit lifetimes from `impl_header_lifetime_elision`.
is_collecting_in_band_lifetimes: bool,
/// Currently in-scope lifetimes defined in impl headers, fn headers, or HRTB.
/// When `is_collecting_in_band_lifetimes` is true, each lifetime is checked
/// against this list to see if it is already in-scope, or if a definition
/// needs to be created for it.
///
/// We always store a `normalize_to_macros_2_0()` version of the param-name in this
/// vector.
in_scope_lifetimes: Vec<ParamName>,
current_hir_id_owner: LocalDefId,
item_local_id_counter: hir::ItemLocalId,
node_id_to_hir_id: IndexVec<NodeId, Option<hir::HirId>>,
/// NodeIds that are lowered inside the current HIR owner.
local_node_ids: Vec<NodeId>,
allow_try_trait: Option<Lrc<[Symbol]>>,
allow_gen_future: Option<Lrc<[Symbol]>>,
}
pub trait ResolverAstLowering {
fn def_key(&mut self, id: DefId) -> DefKey;
fn def_span(&self, id: LocalDefId) -> Span;
fn item_generics_num_lifetimes(&self, def: DefId) -> usize;
fn legacy_const_generic_args(&mut self, expr: &Expr) -> Option<Vec<usize>>;
/// Obtains resolution for a `NodeId` with a single resolution.
fn get_partial_res(&self, id: NodeId) -> Option<PartialRes>;
/// Obtains per-namespace resolutions for `use` statement with the given `NodeId`.
fn get_import_res(&mut self, id: NodeId) -> PerNS<Option<Res<NodeId>>>;
/// Obtains resolution for a label with the given `NodeId`.
fn get_label_res(&mut self, id: NodeId) -> Option<NodeId>;
/// We must keep the set of definitions up to date as we add nodes that weren't in the AST.
/// This should only return `None` during testing.
fn definitions(&mut self) -> &mut Definitions;
fn create_stable_hashing_context(&self) -> StableHashingContext<'_>;
fn lint_buffer(&mut self) -> &mut LintBuffer;
fn next_node_id(&mut self) -> NodeId;
fn take_trait_map(&mut self, node: NodeId) -> Option<Vec<hir::TraitCandidate>>;
fn opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId>;
fn local_def_id(&self, node: NodeId) -> LocalDefId;
fn def_path_hash(&self, def_id: DefId) -> DefPathHash;
fn create_def(
&mut self,
parent: LocalDefId,
node_id: ast::NodeId,
data: DefPathData,
expn_id: ExpnId,
span: Span,
) -> LocalDefId;
}
/// Context of `impl Trait` in code, which determines whether it is allowed in an HIR subtree,
/// and if so, what meaning it has.
#[derive(Debug)]
enum ImplTraitContext<'b, 'a> {
/// Treat `impl Trait` as shorthand for a new universal generic parameter.
/// Example: `fn foo(x: impl Debug)`, where `impl Debug` is conceptually
/// equivalent to a fresh universal parameter like `fn foo<T: Debug>(x: T)`.
///
/// Newly generated parameters should be inserted into the given `Vec`.
Universal(&'b mut Vec<hir::GenericParam<'a>>, LocalDefId),
/// Treat `impl Trait` as shorthand for a new opaque type.
/// Example: `fn foo() -> impl Debug`, where `impl Debug` is conceptually
/// equivalent to a new opaque type like `type T = impl Debug; fn foo() -> T`.
///
ReturnPositionOpaqueTy {
/// `DefId` for the parent function, used to look up necessary
/// information later.
fn_def_id: DefId,
/// Origin: Either OpaqueTyOrigin::FnReturn or OpaqueTyOrigin::AsyncFn,
origin: hir::OpaqueTyOrigin,
},
/// Impl trait in type aliases.
TypeAliasesOpaqueTy {
/// Set of lifetimes that this opaque type can capture, if it uses
/// them. This includes lifetimes bound since we entered this context.
/// For example:
///
/// ```
/// type A<'b> = impl for<'a> Trait<'a, Out = impl Sized + 'a>;
/// ```
///
/// Here the inner opaque type captures `'a` because it uses it. It doesn't
/// need to capture `'b` because it already inherits the lifetime
/// parameter from `A`.
// FIXME(impl_trait): but `required_region_bounds` will ICE later
// anyway.
capturable_lifetimes: &'b mut FxHashSet<hir::LifetimeName>,
},
/// `impl Trait` is not accepted in this position.
Disallowed(ImplTraitPosition),
}
/// Position in which `impl Trait` is disallowed.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum ImplTraitPosition {
/// Disallowed in `let` / `const` / `static` bindings.
Binding,
/// All other positions.
Other,
}
impl<'a> ImplTraitContext<'_, 'a> {
#[inline]
fn disallowed() -> Self {
ImplTraitContext::Disallowed(ImplTraitPosition::Other)
}
fn reborrow<'this>(&'this mut self) -> ImplTraitContext<'this, 'a> {
use self::ImplTraitContext::*;
match self {
Universal(params, parent) => Universal(params, *parent),
ReturnPositionOpaqueTy { fn_def_id, origin } => {
ReturnPositionOpaqueTy { fn_def_id: *fn_def_id, origin: *origin }
}
TypeAliasesOpaqueTy { capturable_lifetimes } => {
TypeAliasesOpaqueTy { capturable_lifetimes }
}
Disallowed(pos) => Disallowed(*pos),
}
}
}
pub fn lower_crate<'a, 'hir>(
sess: &'a Session,
krate: &'a Crate,
resolver: &'a mut dyn ResolverAstLowering,
nt_to_tokenstream: NtToTokenstream,
arena: &'hir Arena<'hir>,
) -> &'hir hir::Crate<'hir> {
let _prof_timer = sess.prof.verbose_generic_activity("hir_lowering");
let owners = IndexVec::from_fn_n(|_| None, resolver.definitions().def_index_count());
LoweringContext {
sess,
resolver,
nt_to_tokenstream,
arena,
owners,
bodies: Vec::new(),
attrs: SortedMap::new(),
catch_scope: None,
loop_scope: None,
is_in_loop_condition: false,
is_in_trait_impl: false,
is_in_dyn_type: false,
anonymous_lifetime_mode: AnonymousLifetimeMode::PassThrough,
current_hir_id_owner: CRATE_DEF_ID,
item_local_id_counter: hir::ItemLocalId::new(0),
node_id_to_hir_id: IndexVec::new(),
local_node_ids: Vec::new(),
generator_kind: None,
task_context: None,
current_item: None,
lifetimes_to_define: Vec::new(),
is_collecting_in_band_lifetimes: false,
in_scope_lifetimes: Vec::new(),
allow_try_trait: Some([sym::try_trait_v2][..].into()),
allow_gen_future: Some([sym::gen_future][..].into()),
}
.lower_crate(krate)
}
#[derive(Copy, Clone, PartialEq)]
enum ParamMode {
/// Any path in a type context.
Explicit,
/// Path in a type definition, where the anonymous lifetime `'_` is not allowed.
ExplicitNamed,
/// The `module::Type` in `module::Type::method` in an expression.
Optional,
}
enum ParenthesizedGenericArgs {
Ok,
Err,
}
/// What to do when we encounter an **anonymous** lifetime
/// reference. Anonymous lifetime references come in two flavors. You
/// have implicit, or fully elided, references to lifetimes, like the
/// one in `&T` or `Ref<T>`, and you have `'_` lifetimes, like `&'_ T`
/// or `Ref<'_, T>`. These often behave the same, but not always:
///
/// - certain usages of implicit references are deprecated, like
/// `Ref<T>`, and we sometimes just give hard errors in those cases
/// as well.
/// - for object bounds there is a difference: `Box<dyn Foo>` is not
/// the same as `Box<dyn Foo + '_>`.
///
/// We describe the effects of the various modes in terms of three cases:
///
/// - **Modern** -- includes all uses of `'_`, but also the lifetime arg
/// of a `&` (e.g., the missing lifetime in something like `&T`)
/// - **Dyn Bound** -- if you have something like `Box<dyn Foo>`,
/// there is an elided lifetime bound (`Box<dyn Foo + 'X>`). These
/// elided bounds follow special rules. Note that this only covers
/// cases where *nothing* is written; the `'_` in `Box<dyn Foo +
/// '_>` is a case of "modern" elision.
/// - **Deprecated** -- this covers cases like `Ref<T>`, where the lifetime
/// parameter to ref is completely elided. `Ref<'_, T>` would be the modern,
/// non-deprecated equivalent.
///
/// Currently, the handling of lifetime elision is somewhat spread out
/// between HIR lowering and -- as described below -- the
/// `resolve_lifetime` module. Often we "fallthrough" to that code by generating
/// an "elided" or "underscore" lifetime name. In the future, we probably want to move
/// everything into HIR lowering.
#[derive(Copy, Clone, Debug)]
enum AnonymousLifetimeMode {
/// For **Modern** cases, create a new anonymous region parameter
/// and reference that.
///
/// For **Dyn Bound** cases, pass responsibility to
/// `resolve_lifetime` code.
///
/// For **Deprecated** cases, report an error.
CreateParameter,
/// Give a hard error when either `&` or `'_` is written. Used to
/// rule out things like `where T: Foo<'_>`. Does not imply an
/// error on default object bounds (e.g., `Box<dyn Foo>`).
ReportError,
/// Pass responsibility to `resolve_lifetime` code for all cases.
PassThrough,
}
impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_crate(mut self, c: &Crate) -> &'hir hir::Crate<'hir> {
debug_assert_eq!(self.resolver.local_def_id(CRATE_NODE_ID), CRATE_DEF_ID);
visit::walk_crate(&mut item::ItemLowerer { lctx: &mut self }, c);
self.with_hir_id_owner(CRATE_NODE_ID, |lctx| {
let module = lctx.lower_mod(&c.items, c.span);
lctx.lower_attrs(hir::CRATE_HIR_ID, &c.attrs);
hir::OwnerNode::Crate(lctx.arena.alloc(module))
});
let hir_hash = self.compute_hir_hash();
let mut def_id_to_hir_id = IndexVec::default();
for (node_id, hir_id) in self.node_id_to_hir_id.into_iter_enumerated() {
if let Some(def_id) = self.resolver.opt_local_def_id(node_id) {
if def_id_to_hir_id.len() <= def_id.index() {
def_id_to_hir_id.resize(def_id.index() + 1, None);
}
def_id_to_hir_id[def_id] = hir_id;
}
}
self.resolver.definitions().init_def_id_to_hir_id_mapping(def_id_to_hir_id);
let krate = hir::Crate { owners: self.owners, hir_hash };
self.arena.alloc(krate)
}
/// Compute the hash for the HIR of the full crate.
/// This hash will then be part of the crate_hash which is stored in the metadata.
fn compute_hir_hash(&mut self) -> Fingerprint {
let definitions = self.resolver.definitions();
let mut hir_body_nodes: Vec<_> = self
.owners
.iter_enumerated()
.filter_map(|(def_id, info)| {
let info = info.as_ref()?;
let def_path_hash = definitions.def_path_hash(def_id);
Some((def_path_hash, info))
})
.collect();
hir_body_nodes.sort_unstable_by_key(|bn| bn.0);
let mut stable_hasher = StableHasher::new();
let mut hcx = self.resolver.create_stable_hashing_context();
hir_body_nodes.hash_stable(&mut hcx, &mut stable_hasher);
stable_hasher.finish()
}
fn with_hir_id_owner(
&mut self,
owner: NodeId,
f: impl FnOnce(&mut Self) -> hir::OwnerNode<'hir>,
) -> LocalDefId {
let def_id = self.resolver.local_def_id(owner);
let current_attrs = std::mem::take(&mut self.attrs);
let current_bodies = std::mem::take(&mut self.bodies);
let current_node_ids = std::mem::take(&mut self.local_node_ids);
let current_owner = std::mem::replace(&mut self.current_hir_id_owner, def_id);
let current_local_counter =
std::mem::replace(&mut self.item_local_id_counter, hir::ItemLocalId::new(1));
// Always allocate the first `HirId` for the owner itself.
let _old = self.node_id_to_hir_id.insert(owner, hir::HirId::make_owner(def_id));
debug_assert_eq!(_old, None);
self.local_node_ids.push(owner);
let item = f(self);
debug_assert_eq!(def_id, item.def_id());
let info = self.make_owner_info(item);
self.attrs = current_attrs;
self.bodies = current_bodies;
self.local_node_ids = current_node_ids;
self.current_hir_id_owner = current_owner;
self.item_local_id_counter = current_local_counter;
let _old = self.owners.insert(def_id, info);
debug_assert!(_old.is_none());
def_id
}
fn make_owner_info(&mut self, node: hir::OwnerNode<'hir>) -> hir::OwnerInfo<'hir> {
let attrs = std::mem::take(&mut self.attrs);
let mut bodies = std::mem::take(&mut self.bodies);
let local_node_ids = std::mem::take(&mut self.local_node_ids);
let trait_map = local_node_ids
.into_iter()
.filter_map(|node_id| {
let hir_id = self.node_id_to_hir_id[node_id]?;
let traits = self.resolver.take_trait_map(node_id)?;
Some((hir_id.local_id, traits.into_boxed_slice()))
})
.collect();
#[cfg(debug_assertions)]
for (id, attrs) in attrs.iter() {
// Verify that we do not store empty slices in the map.
if attrs.is_empty() {
panic!("Stored empty attributes for {:?}", id);
}
}
bodies.sort_by_key(|(k, _)| *k);
let bodies = SortedMap::from_presorted_elements(bodies);
let (hash_including_bodies, hash_without_bodies) = self.hash_owner(node, &bodies);
let (nodes, parenting) =
index::index_hir(self.sess, self.resolver.definitions(), node, &bodies);
let nodes = hir::OwnerNodes { hash_including_bodies, hash_without_bodies, nodes, bodies };
let attrs = {
let mut hcx = self.resolver.create_stable_hashing_context();
let mut stable_hasher = StableHasher::new();
attrs.hash_stable(&mut hcx, &mut stable_hasher);
let hash = stable_hasher.finish();
hir::AttributeMap { map: attrs, hash }
};
hir::OwnerInfo { nodes, parenting, attrs, trait_map }
}
/// Hash the HIR node twice, one deep and one shallow hash. This allows to differentiate
/// queries which depend on the full HIR tree and those which only depend on the item signature.
fn hash_owner(
&mut self,
node: hir::OwnerNode<'hir>,
bodies: &SortedMap<hir::ItemLocalId, &'hir hir::Body<'hir>>,
) -> (Fingerprint, Fingerprint) {
let mut hcx = self.resolver.create_stable_hashing_context();
let mut stable_hasher = StableHasher::new();
hcx.with_hir_bodies(true, node.def_id(), bodies, |hcx| {
node.hash_stable(hcx, &mut stable_hasher)
});
let hash_including_bodies = stable_hasher.finish();
let mut stable_hasher = StableHasher::new();
hcx.with_hir_bodies(false, node.def_id(), bodies, |hcx| {
node.hash_stable(hcx, &mut stable_hasher)
});
let hash_without_bodies = stable_hasher.finish();
(hash_including_bodies, hash_without_bodies)
}
/// This method allocates a new `HirId` for the given `NodeId` and stores it in
/// the `LoweringContext`'s `NodeId => HirId` map.
/// Take care not to call this method if the resulting `HirId` is then not
/// actually used in the HIR, as that would trigger an assertion in the
/// `HirIdValidator` later on, which makes sure that all `NodeId`s got mapped
/// properly. Calling the method twice with the same `NodeId` is fine though.
fn lower_node_id(&mut self, ast_node_id: NodeId) -> hir::HirId {
assert_ne!(ast_node_id, DUMMY_NODE_ID);
*self.node_id_to_hir_id.get_or_insert_with(ast_node_id, || {
// Generate a new `HirId`.
let owner = self.current_hir_id_owner;
let local_id = self.item_local_id_counter;
self.item_local_id_counter.increment_by(1);
self.local_node_ids.push(ast_node_id);
hir::HirId { owner, local_id }
})
}
fn next_id(&mut self) -> hir::HirId {
let node_id = self.resolver.next_node_id();
self.lower_node_id(node_id)
}
fn lower_res(&mut self, res: Res<NodeId>) -> Res {
res.map_id(|id| {
self.node_id_to_hir_id.get(id).copied().flatten().unwrap_or_else(|| {
panic!("expected `NodeId` to be lowered already for res {:#?}", res);
})
})
}
fn expect_full_res(&mut self, id: NodeId) -> Res<NodeId> {
self.resolver.get_partial_res(id).map_or(Res::Err, |pr| {
if pr.unresolved_segments() != 0 {
panic!("path not fully resolved: {:?}", pr);
}
pr.base_res()
})
}
fn expect_full_res_from_use(&mut self, id: NodeId) -> impl Iterator<Item = Res<NodeId>> {
self.resolver.get_import_res(id).present_items()
}
fn diagnostic(&self) -> &rustc_errors::Handler {
self.sess.diagnostic()
}
/// Reuses the span but adds information like the kind of the desugaring and features that are
/// allowed inside this span.
fn mark_span_with_reason(
&self,
reason: DesugaringKind,
span: Span,
allow_internal_unstable: Option<Lrc<[Symbol]>>,
) -> Span {
span.mark_with_reason(
allow_internal_unstable,
reason,
self.sess.edition(),
self.resolver.create_stable_hashing_context(),
)
}
fn with_anonymous_lifetime_mode<R>(
&mut self,
anonymous_lifetime_mode: AnonymousLifetimeMode,
op: impl FnOnce(&mut Self) -> R,
) -> R {
debug!(
"with_anonymous_lifetime_mode(anonymous_lifetime_mode={:?})",
anonymous_lifetime_mode,
);
let old_anonymous_lifetime_mode = self.anonymous_lifetime_mode;
self.anonymous_lifetime_mode = anonymous_lifetime_mode;
let result = op(self);
self.anonymous_lifetime_mode = old_anonymous_lifetime_mode;
debug!(
"with_anonymous_lifetime_mode: restoring anonymous_lifetime_mode={:?}",
old_anonymous_lifetime_mode
);
result
}
/// Intercept all spans entering HIR.
/// Mark a span as relative to the current owning item.
fn lower_span(&self, span: Span) -> Span {
if self.sess.opts.debugging_opts.incremental_relative_spans {
span.with_parent(Some(self.current_hir_id_owner))
} else {
// Do not make spans relative when not using incremental compilation.
span
}
}
fn lower_ident(&self, ident: Ident) -> Ident {
Ident::new(ident.name, self.lower_span(ident.span))
}
/// Creates a new `hir::GenericParam` for every new lifetime and
/// type parameter encountered while evaluating `f`. Definitions
/// are created with the parent provided. If no `parent_id` is
/// provided, no definitions will be returned.
///
/// Presuming that in-band lifetimes are enabled, then
/// `self.anonymous_lifetime_mode` will be updated to match the
/// parameter while `f` is running (and restored afterwards).
fn collect_in_band_defs<T>(
&mut self,
parent_def_id: LocalDefId,
anonymous_lifetime_mode: AnonymousLifetimeMode,
f: impl FnOnce(&mut Self) -> (Vec<hir::GenericParam<'hir>>, T),
) -> (Vec<hir::GenericParam<'hir>>, T) {
assert!(!self.is_collecting_in_band_lifetimes);
assert!(self.lifetimes_to_define.is_empty());
let old_anonymous_lifetime_mode = self.anonymous_lifetime_mode;
self.anonymous_lifetime_mode = anonymous_lifetime_mode;
self.is_collecting_in_band_lifetimes = true;
let (in_band_ty_params, res) = f(self);
self.is_collecting_in_band_lifetimes = false;
self.anonymous_lifetime_mode = old_anonymous_lifetime_mode;
let lifetimes_to_define = self.lifetimes_to_define.split_off(0);
let params = lifetimes_to_define
.into_iter()
.map(|(span, hir_name)| self.lifetime_to_generic_param(span, hir_name, parent_def_id))
.chain(in_band_ty_params.into_iter())
.collect();
(params, res)
}
/// Converts a lifetime into a new generic parameter.
fn lifetime_to_generic_param(
&mut self,
span: Span,
hir_name: ParamName,
parent_def_id: LocalDefId,
) -> hir::GenericParam<'hir> {
let node_id = self.resolver.next_node_id();
// Get the name we'll use to make the def-path. Note
// that collisions are ok here and this shouldn't
// really show up for end-user.
let (str_name, kind) = match hir_name {
ParamName::Plain(ident) => (ident.name, hir::LifetimeParamKind::InBand),
ParamName::Fresh(_) => (kw::UnderscoreLifetime, hir::LifetimeParamKind::Elided),
ParamName::Error => (kw::UnderscoreLifetime, hir::LifetimeParamKind::Error),
};
// Add a definition for the in-band lifetime def.
self.resolver.create_def(
parent_def_id,
node_id,
DefPathData::LifetimeNs(str_name),
ExpnId::root(),
span.with_parent(None),
);
hir::GenericParam {
hir_id: self.lower_node_id(node_id),
name: hir_name,
bounds: &[],
span: self.lower_span(span),
pure_wrt_drop: false,
kind: hir::GenericParamKind::Lifetime { kind },
}
}
/// When there is a reference to some lifetime `'a`, and in-band
/// lifetimes are enabled, then we want to push that lifetime into
/// the vector of names to define later. In that case, it will get
/// added to the appropriate generics.
fn maybe_collect_in_band_lifetime(&mut self, ident: Ident) {
if !self.is_collecting_in_band_lifetimes {
return;
}
if !self.sess.features_untracked().in_band_lifetimes {
return;
}
if self.in_scope_lifetimes.contains(&ParamName::Plain(ident.normalize_to_macros_2_0())) {
return;
}
let hir_name = ParamName::Plain(ident);
if self.lifetimes_to_define.iter().any(|(_, lt_name)| {
lt_name.normalize_to_macros_2_0() == hir_name.normalize_to_macros_2_0()
}) {
return;
}
self.lifetimes_to_define.push((ident.span, hir_name));
}
/// When we have either an elided or `'_` lifetime in an impl
/// header, we convert it to an in-band lifetime.
fn collect_fresh_in_band_lifetime(&mut self, span: Span) -> ParamName {
assert!(self.is_collecting_in_band_lifetimes);
let index = self.lifetimes_to_define.len() + self.in_scope_lifetimes.len();
let hir_name = ParamName::Fresh(index);
self.lifetimes_to_define.push((span, hir_name));
hir_name
}
// Evaluates `f` with the lifetimes in `params` in-scope.
// This is used to track which lifetimes have already been defined, and
// which are new in-band lifetimes that need to have a definition created
// for them.
fn with_in_scope_lifetime_defs<T>(
&mut self,
params: &[GenericParam],
f: impl FnOnce(&mut Self) -> T,
) -> T {
let old_len = self.in_scope_lifetimes.len();
let lt_def_names = params.iter().filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(ParamName::Plain(param.ident.normalize_to_macros_2_0()))
}
_ => None,
});
self.in_scope_lifetimes.extend(lt_def_names);
let res = f(self);
self.in_scope_lifetimes.truncate(old_len);
res
}
/// Appends in-band lifetime defs and argument-position `impl
/// Trait` defs to the existing set of generics.
///
/// Presuming that in-band lifetimes are enabled, then
/// `self.anonymous_lifetime_mode` will be updated to match the
/// parameter while `f` is running (and restored afterwards).
fn add_in_band_defs<T>(
&mut self,
generics: &Generics,
parent_def_id: LocalDefId,
anonymous_lifetime_mode: AnonymousLifetimeMode,
f: impl FnOnce(&mut Self, &mut Vec<hir::GenericParam<'hir>>) -> T,
) -> (hir::Generics<'hir>, T) {
let (in_band_defs, (mut lowered_generics, res)) =
self.with_in_scope_lifetime_defs(&generics.params, |this| {
this.collect_in_band_defs(parent_def_id, anonymous_lifetime_mode, |this| {
let mut params = Vec::new();
// Note: it is necessary to lower generics *before* calling `f`.
// When lowering `async fn`, there's a final step when lowering
// the return type that assumes that all in-scope lifetimes have
// already been added to either `in_scope_lifetimes` or
// `lifetimes_to_define`. If we swapped the order of these two,
// in-band-lifetimes introduced by generics or where-clauses
// wouldn't have been added yet.
let generics = this.lower_generics_mut(
generics,
ImplTraitContext::Universal(&mut params, this.current_hir_id_owner),
);
let res = f(this, &mut params);
(params, (generics, res))
})
});
lowered_generics.params.extend(in_band_defs);
let lowered_generics = lowered_generics.into_generics(self.arena);
(lowered_generics, res)
}
fn with_dyn_type_scope<T>(&mut self, in_scope: bool, f: impl FnOnce(&mut Self) -> T) -> T {
let was_in_dyn_type = self.is_in_dyn_type;
self.is_in_dyn_type = in_scope;
let result = f(self);
self.is_in_dyn_type = was_in_dyn_type;
result
}
fn with_new_scopes<T>(&mut self, f: impl FnOnce(&mut Self) -> T) -> T {
let was_in_loop_condition = self.is_in_loop_condition;
self.is_in_loop_condition = false;
let catch_scope = self.catch_scope.take();
let loop_scope = self.loop_scope.take();
let ret = f(self);
self.catch_scope = catch_scope;
self.loop_scope = loop_scope;
self.is_in_loop_condition = was_in_loop_condition;
ret
}
fn lower_attrs(&mut self, id: hir::HirId, attrs: &[Attribute]) -> Option<&'hir [Attribute]> {
if attrs.is_empty() {
None
} else {
debug_assert_eq!(id.owner, self.current_hir_id_owner);
let ret = self.arena.alloc_from_iter(attrs.iter().map(|a| self.lower_attr(a)));
debug_assert!(!ret.is_empty());
self.attrs.insert(id.local_id, ret);
Some(ret)
}
}
fn lower_attr(&self, attr: &Attribute) -> Attribute {
// Note that we explicitly do not walk the path. Since we don't really
// lower attributes (we use the AST version) there is nowhere to keep
// the `HirId`s. We don't actually need HIR version of attributes anyway.
// Tokens are also not needed after macro expansion and parsing.
let kind = match attr.kind {
AttrKind::Normal(ref item, _) => AttrKind::Normal(
AttrItem {
path: item.path.clone(),
args: self.lower_mac_args(&item.args),
tokens: None,
},
None,
),
AttrKind::DocComment(comment_kind, data) => AttrKind::DocComment(comment_kind, data),
};
Attribute { kind, id: attr.id, style: attr.style, span: self.lower_span(attr.span) }
}
fn alias_attrs(&mut self, id: hir::HirId, target_id: hir::HirId) {
debug_assert_eq!(id.owner, self.current_hir_id_owner);
debug_assert_eq!(target_id.owner, self.current_hir_id_owner);
if let Some(&a) = self.attrs.get(&target_id.local_id) {
debug_assert!(!a.is_empty());
self.attrs.insert(id.local_id, a);
}
}
fn lower_mac_args(&self, args: &MacArgs) -> MacArgs {
match *args {
MacArgs::Empty => MacArgs::Empty,
MacArgs::Delimited(dspan, delim, ref tokens) => {
// This is either a non-key-value attribute, or a `macro_rules!` body.
// We either not have any nonterminals present (in the case of an attribute),
// or have tokens available for all nonterminals in the case of a nested
// `macro_rules`: e.g:
//
// ```rust
// macro_rules! outer {
// ($e:expr) => {
// macro_rules! inner {
// () => { $e }
// }
// }
// }
// ```
//
// In both cases, we don't want to synthesize any tokens
MacArgs::Delimited(
dspan,
delim,
self.lower_token_stream(tokens.clone(), CanSynthesizeMissingTokens::No),
)
}
// This is an inert key-value attribute - it will never be visible to macros
// after it gets lowered to HIR. Therefore, we can synthesize tokens with fake
// spans to handle nonterminals in `#[doc]` (e.g. `#[doc = $e]`).
MacArgs::Eq(eq_span, ref token) => {
// In valid code the value is always representable as a single literal token.
fn unwrap_single_token(sess: &Session, tokens: TokenStream, span: Span) -> Token {
if tokens.len() != 1 {
sess.diagnostic()
.delay_span_bug(span, "multiple tokens in key-value attribute's value");
}
match tokens.into_trees().next() {
Some(TokenTree::Token(token)) => token,
Some(TokenTree::Delimited(_, delim, tokens)) => {
if delim != token::NoDelim {
sess.diagnostic().delay_span_bug(
span,
"unexpected delimiter in key-value attribute's value",
)
}
unwrap_single_token(sess, tokens, span)
}
None => Token::dummy(),
}
}
let tokens = FlattenNonterminals {
parse_sess: &self.sess.parse_sess,
synthesize_tokens: CanSynthesizeMissingTokens::Yes,
nt_to_tokenstream: self.nt_to_tokenstream,
}
.process_token(token.clone());
MacArgs::Eq(eq_span, unwrap_single_token(self.sess, tokens, token.span))
}
}
}
fn lower_token_stream(
&self,
tokens: TokenStream,
synthesize_tokens: CanSynthesizeMissingTokens,
) -> TokenStream {
FlattenNonterminals {
parse_sess: &self.sess.parse_sess,
synthesize_tokens,
nt_to_tokenstream: self.nt_to_tokenstream,
}
.process_token_stream(tokens)
}
/// Given an associated type constraint like one of these:
///
/// ```
/// T: Iterator<Item: Debug>
/// ^^^^^^^^^^^
/// T: Iterator<Item = Debug>
/// ^^^^^^^^^^^^
/// ```
///
/// returns a `hir::TypeBinding` representing `Item`.
fn lower_assoc_ty_constraint(
&mut self,
constraint: &AssocTyConstraint,
mut itctx: ImplTraitContext<'_, 'hir>,
) -> hir::TypeBinding<'hir> {
debug!("lower_assoc_ty_constraint(constraint={:?}, itctx={:?})", constraint, itctx);
// lower generic arguments of identifier in constraint
let gen_args = if let Some(ref gen_args) = constraint.gen_args {
let gen_args_ctor = match gen_args {
GenericArgs::AngleBracketed(ref data) => {
self.lower_angle_bracketed_parameter_data(
data,
ParamMode::Explicit,
itctx.reborrow(),
)
.0
}
GenericArgs::Parenthesized(ref data) => {
let mut err = self.sess.struct_span_err(
gen_args.span(),
"parenthesized generic arguments cannot be used in associated type constraints"
);
// FIXME: try to write a suggestion here
err.emit();
self.lower_angle_bracketed_parameter_data(
&data.as_angle_bracketed_args(),
ParamMode::Explicit,
itctx.reborrow(),
)
.0
}
};
gen_args_ctor.into_generic_args(self)
} else {
self.arena.alloc(hir::GenericArgs::none())
};
let kind = match constraint.kind {
AssocTyConstraintKind::Equality { ref ty } => {
hir::TypeBindingKind::Equality { ty: self.lower_ty(ty, itctx) }
}
AssocTyConstraintKind::Bound { ref bounds } => {
let mut capturable_lifetimes;
let mut parent_def_id = self.current_hir_id_owner;
// Piggy-back on the `impl Trait` context to figure out the correct behavior.
let (desugar_to_impl_trait, itctx) = match itctx {
// We are in the return position:
//
// fn foo() -> impl Iterator<Item: Debug>
//
// so desugar to
//
// fn foo() -> impl Iterator<Item = impl Debug>
ImplTraitContext::ReturnPositionOpaqueTy { .. }
| ImplTraitContext::TypeAliasesOpaqueTy { .. } => (true, itctx),
// We are in the argument position, but within a dyn type:
//
// fn foo(x: dyn Iterator<Item: Debug>)
//
// so desugar to
//
// fn foo(x: dyn Iterator<Item = impl Debug>)
ImplTraitContext::Universal(_, parent) if self.is_in_dyn_type => {
parent_def_id = parent;
(true, itctx)
}
// In `type Foo = dyn Iterator<Item: Debug>` we desugar to
// `type Foo = dyn Iterator<Item = impl Debug>` but we have to override the
// "impl trait context" to permit `impl Debug` in this position (it desugars
// then to an opaque type).
//
// FIXME: this is only needed until `impl Trait` is allowed in type aliases.
ImplTraitContext::Disallowed(_) if self.is_in_dyn_type => {
capturable_lifetimes = FxHashSet::default();
(
true,
ImplTraitContext::TypeAliasesOpaqueTy {
capturable_lifetimes: &mut capturable_lifetimes,
},
)
}
// We are in the parameter position, but not within a dyn type:
//
// fn foo(x: impl Iterator<Item: Debug>)
//
// so we leave it as is and this gets expanded in astconv to a bound like
// `<T as Iterator>::Item: Debug` where `T` is the type parameter for the
// `impl Iterator`.
_ => (false, itctx),
};
if desugar_to_impl_trait {
// Desugar `AssocTy: Bounds` into `AssocTy = impl Bounds`. We do this by
// constructing the HIR for `impl bounds...` and then lowering that.
let impl_trait_node_id = self.resolver.next_node_id();
self.resolver.create_def(
parent_def_id,
impl_trait_node_id,
DefPathData::ImplTrait,
ExpnId::root(),
constraint.span,
);
self.with_dyn_type_scope(false, |this| {
let node_id = this.resolver.next_node_id();
let ty = this.lower_ty(
&Ty {
id: node_id,
kind: TyKind::ImplTrait(impl_trait_node_id, bounds.clone()),
span: this.lower_span(constraint.span),
tokens: None,
},
itctx,
);
hir::TypeBindingKind::Equality { ty }
})
} else {
// Desugar `AssocTy: Bounds` into a type binding where the
// later desugars into a trait predicate.
let bounds = self.lower_param_bounds(bounds, itctx);
hir::TypeBindingKind::Constraint { bounds }
}
}
};
hir::TypeBinding {
hir_id: self.lower_node_id(constraint.id),
ident: self.lower_ident(constraint.ident),
gen_args,
kind,
span: self.lower_span(constraint.span),
}
}
fn lower_generic_arg(
&mut self,
arg: &ast::GenericArg,
itctx: ImplTraitContext<'_, 'hir>,
) -> hir::GenericArg<'hir> {
match arg {
ast::GenericArg::Lifetime(lt) => GenericArg::Lifetime(self.lower_lifetime(<)),
ast::GenericArg::Type(ty) => {
match ty.kind {
TyKind::Infer if self.sess.features_untracked().generic_arg_infer => {
return GenericArg::Infer(hir::InferArg {
hir_id: self.lower_node_id(ty.id),
span: self.lower_span(ty.span),
kind: InferKind::Type,
});
}
// We parse const arguments as path types as we cannot distinguish them during
// parsing. We try to resolve that ambiguity by attempting resolution in both the
// type and value namespaces. If we resolved the path in the value namespace, we
// transform it into a generic const argument.
TyKind::Path(ref qself, ref path) => {
if let Some(partial_res) = self.resolver.get_partial_res(ty.id) {
let res = partial_res.base_res();
if !res.matches_ns(Namespace::TypeNS) {
debug!(
"lower_generic_arg: Lowering type argument as const argument: {:?}",
ty,
);
// Construct an AnonConst where the expr is the "ty"'s path.
let parent_def_id = self.current_hir_id_owner;
let node_id = self.resolver.next_node_id();
// Add a definition for the in-band const def.
self.resolver.create_def(
parent_def_id,
node_id,
DefPathData::AnonConst,
ExpnId::root(),
ty.span,
);
let span = self.lower_span(ty.span);
let path_expr = Expr {
id: ty.id,
kind: ExprKind::Path(qself.clone(), path.clone()),
span,
attrs: AttrVec::new(),
tokens: None,
};
let ct = self.with_new_scopes(|this| hir::AnonConst {
hir_id: this.lower_node_id(node_id),
body: this.lower_const_body(path_expr.span, Some(&path_expr)),
});
return GenericArg::Const(ConstArg { value: ct, span });
}
}
}
_ => {}
}
GenericArg::Type(self.lower_ty_direct(&ty, itctx))
}
ast::GenericArg::Const(ct) => GenericArg::Const(ConstArg {
value: self.lower_anon_const(&ct),
span: self.lower_span(ct.value.span),
}),
}
}
fn lower_ty(&mut self, t: &Ty, itctx: ImplTraitContext<'_, 'hir>) -> &'hir hir::Ty<'hir> {
self.arena.alloc(self.lower_ty_direct(t, itctx))
}
fn lower_path_ty(
&mut self,
t: &Ty,
qself: &Option<QSelf>,
path: &Path,
param_mode: ParamMode,
itctx: ImplTraitContext<'_, 'hir>,
) -> hir::Ty<'hir> {
let id = self.lower_node_id(t.id);
let qpath = self.lower_qpath(t.id, qself, path, param_mode, itctx);
let ty = self.ty_path(id, t.span, qpath);
if let hir::TyKind::TraitObject(..) = ty.kind {
self.maybe_lint_bare_trait(t.span, t.id, qself.is_none() && path.is_global());
}
ty
}
fn ty(&mut self, span: Span, kind: hir::TyKind<'hir>) -> hir::Ty<'hir> {
hir::Ty { hir_id: self.next_id(), kind, span: self.lower_span(span) }
}
fn ty_tup(&mut self, span: Span, tys: &'hir [hir::Ty<'hir>]) -> hir::Ty<'hir> {
self.ty(span, hir::TyKind::Tup(tys))
}
fn lower_ty_direct(&mut self, t: &Ty, mut itctx: ImplTraitContext<'_, 'hir>) -> hir::Ty<'hir> {
let kind = match t.kind {
TyKind::Infer => hir::TyKind::Infer,
TyKind::Err => hir::TyKind::Err,
TyKind::Slice(ref ty) => hir::TyKind::Slice(self.lower_ty(ty, itctx)),
TyKind::Ptr(ref mt) => hir::TyKind::Ptr(self.lower_mt(mt, itctx)),
TyKind::Rptr(ref region, ref mt) => {
let span = self.sess.source_map().next_point(t.span.shrink_to_lo());
let lifetime = match *region {
Some(ref lt) => self.lower_lifetime(lt),
None => self.elided_ref_lifetime(span),
};
hir::TyKind::Rptr(lifetime, self.lower_mt(mt, itctx))
}
TyKind::BareFn(ref f) => self.with_in_scope_lifetime_defs(&f.generic_params, |this| {
this.with_anonymous_lifetime_mode(AnonymousLifetimeMode::PassThrough, |this| {
hir::TyKind::BareFn(this.arena.alloc(hir::BareFnTy {
generic_params: this.lower_generic_params(
&f.generic_params,
ImplTraitContext::disallowed(),
),
unsafety: this.lower_unsafety(f.unsafety),
abi: this.lower_extern(f.ext),
decl: this.lower_fn_decl(&f.decl, None, false, None),
param_names: this.lower_fn_params_to_names(&f.decl),
}))
})
}),
TyKind::Never => hir::TyKind::Never,
TyKind::Tup(ref tys) => {
hir::TyKind::Tup(self.arena.alloc_from_iter(
tys.iter().map(|ty| self.lower_ty_direct(ty, itctx.reborrow())),
))
}
TyKind::Paren(ref ty) => {
return self.lower_ty_direct(ty, itctx);
}
TyKind::Path(ref qself, ref path) => {
return self.lower_path_ty(t, qself, path, ParamMode::Explicit, itctx);
}
TyKind::ImplicitSelf => {
let res = self.expect_full_res(t.id);
let res = self.lower_res(res);
hir::TyKind::Path(hir::QPath::Resolved(
None,
self.arena.alloc(hir::Path {
res,
segments: arena_vec![self; hir::PathSegment::from_ident(
Ident::with_dummy_span(kw::SelfUpper)
)],
span: self.lower_span(t.span),
}),
))
}
TyKind::Array(ref ty, ref length) => {
hir::TyKind::Array(self.lower_ty(ty, itctx), self.lower_anon_const(length))
}
TyKind::Typeof(ref expr) => hir::TyKind::Typeof(self.lower_anon_const(expr)),
TyKind::TraitObject(ref bounds, kind) => {
let mut lifetime_bound = None;
let (bounds, lifetime_bound) = self.with_dyn_type_scope(true, |this| {
let bounds =
this.arena.alloc_from_iter(bounds.iter().filter_map(
|bound| match *bound {
GenericBound::Trait(
ref ty,
TraitBoundModifier::None | TraitBoundModifier::MaybeConst,
) => Some(this.lower_poly_trait_ref(ty, itctx.reborrow())),
// `~const ?Bound` will cause an error during AST validation
// anyways, so treat it like `?Bound` as compilation proceeds.
GenericBound::Trait(
_,
TraitBoundModifier::Maybe | TraitBoundModifier::MaybeConstMaybe,
) => None,
GenericBound::Outlives(ref lifetime) => {
if lifetime_bound.is_none() {
lifetime_bound = Some(this.lower_lifetime(lifetime));
}
None
}
},
));
let lifetime_bound =
lifetime_bound.unwrap_or_else(|| this.elided_dyn_bound(t.span));
(bounds, lifetime_bound)
});
if kind != TraitObjectSyntax::Dyn {
self.maybe_lint_bare_trait(t.span, t.id, false);
}
hir::TyKind::TraitObject(bounds, lifetime_bound, kind)
}
TyKind::ImplTrait(def_node_id, ref bounds) => {
let span = t.span;
match itctx {
ImplTraitContext::ReturnPositionOpaqueTy { fn_def_id, origin } => self
.lower_opaque_impl_trait(
span,
Some(fn_def_id),
origin,
def_node_id,
None,
|this| this.lower_param_bounds(bounds, itctx),
),
ImplTraitContext::TypeAliasesOpaqueTy { ref capturable_lifetimes } => {
// Reset capturable lifetimes, any nested impl trait
// types will inherit lifetimes from this opaque type,
// so don't need to capture them again.
let nested_itctx = ImplTraitContext::TypeAliasesOpaqueTy {
capturable_lifetimes: &mut FxHashSet::default(),
};
self.lower_opaque_impl_trait(
span,
None,
hir::OpaqueTyOrigin::TyAlias,
def_node_id,
Some(capturable_lifetimes),
|this| this.lower_param_bounds(bounds, nested_itctx),
)
}
ImplTraitContext::Universal(in_band_ty_params, parent_def_id) => {
// Add a definition for the in-band `Param`.
let def_id = self.resolver.local_def_id(def_node_id);
let hir_bounds = self.lower_param_bounds(
bounds,
ImplTraitContext::Universal(in_band_ty_params, parent_def_id),
);
// Set the name to `impl Bound1 + Bound2`.
let ident = Ident::from_str_and_span(&pprust::ty_to_string(t), span);
in_band_ty_params.push(hir::GenericParam {
hir_id: self.lower_node_id(def_node_id),
name: ParamName::Plain(self.lower_ident(ident)),
pure_wrt_drop: false,
bounds: hir_bounds,
span: self.lower_span(span),
kind: hir::GenericParamKind::Type {
default: None,
synthetic: Some(hir::SyntheticTyParamKind::ImplTrait),
},
});
hir::TyKind::Path(hir::QPath::Resolved(
None,
self.arena.alloc(hir::Path {
span: self.lower_span(span),
res: Res::Def(DefKind::TyParam, def_id.to_def_id()),
segments: arena_vec![self; hir::PathSegment::from_ident(self.lower_ident(ident))],
}),
))
}
ImplTraitContext::Disallowed(_) => {
let mut err = struct_span_err!(
self.sess,
t.span,
E0562,
"`impl Trait` not allowed outside of {}",
"function and method return types",
);
err.emit();
hir::TyKind::Err
}
}
}
TyKind::MacCall(_) => panic!("`TyKind::MacCall` should have been expanded by now"),
TyKind::CVarArgs => {
self.sess.delay_span_bug(
t.span,
"`TyKind::CVarArgs` should have been handled elsewhere",
);
hir::TyKind::Err
}
};
hir::Ty { kind, span: self.lower_span(t.span), hir_id: self.lower_node_id(t.id) }
}
fn lower_opaque_impl_trait(
&mut self,
span: Span,
fn_def_id: Option<DefId>,
origin: hir::OpaqueTyOrigin,
opaque_ty_node_id: NodeId,
capturable_lifetimes: Option<&FxHashSet<hir::LifetimeName>>,
lower_bounds: impl FnOnce(&mut Self) -> hir::GenericBounds<'hir>,
) -> hir::TyKind<'hir> {
debug!(
"lower_opaque_impl_trait(fn_def_id={:?}, opaque_ty_node_id={:?}, span={:?})",
fn_def_id, opaque_ty_node_id, span,
);
// Make sure we know that some funky desugaring has been going on here.
// This is a first: there is code in other places like for loop
// desugaring that explicitly states that we don't want to track that.
// Not tracking it makes lints in rustc and clippy very fragile, as
// frequently opened issues show.
let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::OpaqueTy, span, None);
let opaque_ty_def_id = self.resolver.local_def_id(opaque_ty_node_id);
let mut collected_lifetimes = Vec::new();
self.with_hir_id_owner(opaque_ty_node_id, |lctx| {
let hir_bounds = lower_bounds(lctx);
collected_lifetimes = lifetimes_from_impl_trait_bounds(
opaque_ty_node_id,
&hir_bounds,
capturable_lifetimes,
);
let lifetime_defs =
lctx.arena.alloc_from_iter(collected_lifetimes.iter().map(|&(name, span)| {
let def_node_id = lctx.resolver.next_node_id();
let hir_id = lctx.lower_node_id(def_node_id);
lctx.resolver.create_def(
opaque_ty_def_id,
def_node_id,
DefPathData::LifetimeNs(name.ident().name),
ExpnId::root(),
span.with_parent(None),
);
let (name, kind) = match name {
hir::LifetimeName::Underscore => (
hir::ParamName::Plain(Ident::with_dummy_span(kw::UnderscoreLifetime)),
hir::LifetimeParamKind::Elided,
),
hir::LifetimeName::Param(param_name) => {
(param_name, hir::LifetimeParamKind::Explicit)
}
_ => panic!("expected `LifetimeName::Param` or `ParamName::Plain`"),
};
hir::GenericParam {
hir_id,
name,
span,
pure_wrt_drop: false,
bounds: &[],
kind: hir::GenericParamKind::Lifetime { kind },
}
}));
debug!("lower_opaque_impl_trait: lifetime_defs={:#?}", lifetime_defs);
let opaque_ty_item = hir::OpaqueTy {
generics: hir::Generics {
params: lifetime_defs,
where_clause: hir::WhereClause { predicates: &[], span: lctx.lower_span(span) },
span: lctx.lower_span(span),
},
bounds: hir_bounds,
impl_trait_fn: fn_def_id,
origin,
};
trace!("lower_opaque_impl_trait: {:#?}", opaque_ty_def_id);
lctx.generate_opaque_type(opaque_ty_def_id, opaque_ty_item, span, opaque_ty_span)
});
let lifetimes =
self.arena.alloc_from_iter(collected_lifetimes.into_iter().map(|(name, span)| {
hir::GenericArg::Lifetime(hir::Lifetime { hir_id: self.next_id(), span, name })
}));
debug!("lower_opaque_impl_trait: lifetimes={:#?}", lifetimes);
// `impl Trait` now just becomes `Foo<'a, 'b, ..>`.
hir::TyKind::OpaqueDef(hir::ItemId { def_id: opaque_ty_def_id }, lifetimes)
}
/// Registers a new opaque type with the proper `NodeId`s and
/// returns the lowered node-ID for the opaque type.
fn generate_opaque_type(
&mut self,
opaque_ty_id: LocalDefId,
opaque_ty_item: hir::OpaqueTy<'hir>,
span: Span,
opaque_ty_span: Span,
) -> hir::OwnerNode<'hir> {
let opaque_ty_item_kind = hir::ItemKind::OpaqueTy(opaque_ty_item);
// Generate an `type Foo = impl Trait;` declaration.
trace!("registering opaque type with id {:#?}", opaque_ty_id);
let opaque_ty_item = hir::Item {
def_id: opaque_ty_id,
ident: Ident::empty(),
kind: opaque_ty_item_kind,
vis: respan(self.lower_span(span.shrink_to_lo()), hir::VisibilityKind::Inherited),
span: self.lower_span(opaque_ty_span),
};
hir::OwnerNode::Item(self.arena.alloc(opaque_ty_item))
}
fn lower_fn_params_to_names(&mut self, decl: &FnDecl) -> &'hir [Ident] {
// Skip the `...` (`CVarArgs`) trailing arguments from the AST,
// as they are not explicit in HIR/Ty function signatures.
// (instead, the `c_variadic` flag is set to `true`)
let mut inputs = &decl.inputs[..];
if decl.c_variadic() {
inputs = &inputs[..inputs.len() - 1];
}
self.arena.alloc_from_iter(inputs.iter().map(|param| match param.pat.kind {
PatKind::Ident(_, ident, _) => self.lower_ident(ident),
_ => Ident::new(kw::Empty, self.lower_span(param.pat.span)),
}))
}
// Lowers a function declaration.
//
// `decl`: the unlowered (AST) function declaration.
// `fn_def_id`: if `Some`, impl Trait arguments are lowered into generic parameters on the
// given DefId, otherwise impl Trait is disallowed. Must be `Some` if
// `make_ret_async` is also `Some`.
// `impl_trait_return_allow`: determines whether `impl Trait` can be used in return position.
// This guards against trait declarations and implementations where `impl Trait` is
// disallowed.
// `make_ret_async`: if `Some`, converts `-> T` into `-> impl Future<Output = T>` in the
// return type. This is used for `async fn` declarations. The `NodeId` is the ID of the
// return type `impl Trait` item.
fn lower_fn_decl(
&mut self,
decl: &FnDecl,
mut in_band_ty_params: Option<(DefId, &mut Vec<hir::GenericParam<'hir>>)>,
impl_trait_return_allow: bool,
make_ret_async: Option<NodeId>,
) -> &'hir hir::FnDecl<'hir> {
debug!(
"lower_fn_decl(\
fn_decl: {:?}, \
in_band_ty_params: {:?}, \
impl_trait_return_allow: {}, \
make_ret_async: {:?})",
decl, in_band_ty_params, impl_trait_return_allow, make_ret_async,
);
let lt_mode = if make_ret_async.is_some() {
// In `async fn`, argument-position elided lifetimes
// must be transformed into fresh generic parameters so that
// they can be applied to the opaque `impl Trait` return type.
AnonymousLifetimeMode::CreateParameter
} else {
self.anonymous_lifetime_mode
};
let c_variadic = decl.c_variadic();
// Remember how many lifetimes were already around so that we can
// only look at the lifetime parameters introduced by the arguments.
let inputs = self.with_anonymous_lifetime_mode(lt_mode, |this| {
// Skip the `...` (`CVarArgs`) trailing arguments from the AST,
// as they are not explicit in HIR/Ty function signatures.
// (instead, the `c_variadic` flag is set to `true`)
let mut inputs = &decl.inputs[..];
if c_variadic {
inputs = &inputs[..inputs.len() - 1];
}
this.arena.alloc_from_iter(inputs.iter().map(|param| {
if let Some((_, ibty)) = &mut in_band_ty_params {
this.lower_ty_direct(
¶m.ty,
ImplTraitContext::Universal(ibty, this.current_hir_id_owner),
)
} else {
this.lower_ty_direct(¶m.ty, ImplTraitContext::disallowed())
}
}))
});
let output = if let Some(ret_id) = make_ret_async {
self.lower_async_fn_ret_ty(
&decl.output,
in_band_ty_params.expect("`make_ret_async` but no `fn_def_id`").0,
ret_id,
)
} else {
match decl.output {
FnRetTy::Ty(ref ty) => {
let context = match in_band_ty_params {
Some((def_id, _)) if impl_trait_return_allow => {
ImplTraitContext::ReturnPositionOpaqueTy {
fn_def_id: def_id,
origin: hir::OpaqueTyOrigin::FnReturn,
}
}
_ => ImplTraitContext::disallowed(),
};
hir::FnRetTy::Return(self.lower_ty(ty, context))
}
FnRetTy::Default(span) => hir::FnRetTy::DefaultReturn(self.lower_span(span)),
}
};
self.arena.alloc(hir::FnDecl {
inputs,
output,
c_variadic,
implicit_self: decl.inputs.get(0).map_or(hir::ImplicitSelfKind::None, |arg| {
use BindingMode::{ByRef, ByValue};
let is_mutable_pat = matches!(
arg.pat.kind,
PatKind::Ident(ByValue(Mutability::Mut) | ByRef(Mutability::Mut), ..)
);
match arg.ty.kind {
TyKind::ImplicitSelf if is_mutable_pat => hir::ImplicitSelfKind::Mut,
TyKind::ImplicitSelf => hir::ImplicitSelfKind::Imm,
// Given we are only considering `ImplicitSelf` types, we needn't consider
// the case where we have a mutable pattern to a reference as that would
// no longer be an `ImplicitSelf`.
TyKind::Rptr(_, ref mt)
if mt.ty.kind.is_implicit_self() && mt.mutbl == ast::Mutability::Mut =>
{
hir::ImplicitSelfKind::MutRef
}
TyKind::Rptr(_, ref mt) if mt.ty.kind.is_implicit_self() => {
hir::ImplicitSelfKind::ImmRef
}
_ => hir::ImplicitSelfKind::None,
}
}),
})
}
// Transforms `-> T` for `async fn` into `-> OpaqueTy { .. }`
// combined with the following definition of `OpaqueTy`:
//
// type OpaqueTy<generics_from_parent_fn> = impl Future<Output = T>;
//
// `inputs`: lowered types of parameters to the function (used to collect lifetimes)
// `output`: unlowered output type (`T` in `-> T`)
// `fn_def_id`: `DefId` of the parent function (used to create child impl trait definition)
// `opaque_ty_node_id`: `NodeId` of the opaque `impl Trait` type that should be created
// `elided_lt_replacement`: replacement for elided lifetimes in the return type
fn lower_async_fn_ret_ty(
&mut self,
output: &FnRetTy,
fn_def_id: DefId,
opaque_ty_node_id: NodeId,
) -> hir::FnRetTy<'hir> {
debug!(
"lower_async_fn_ret_ty(\
output={:?}, \
fn_def_id={:?}, \
opaque_ty_node_id={:?})",
output, fn_def_id, opaque_ty_node_id,
);
let span = output.span();
let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::Async, span, None);
let opaque_ty_def_id = self.resolver.local_def_id(opaque_ty_node_id);
// When we create the opaque type for this async fn, it is going to have
// to capture all the lifetimes involved in the signature (including in the
// return type). This is done by introducing lifetime parameters for:
//
// - all the explicitly declared lifetimes from the impl and function itself;
// - all the elided lifetimes in the fn arguments;
// - all the elided lifetimes in the return type.
//
// So for example in this snippet:
//
// ```rust
// impl<'a> Foo<'a> {
// async fn bar<'b>(&self, x: &'b Vec<f64>, y: &str) -> &u32 {
// // ^ '0 ^ '1 ^ '2
// // elided lifetimes used below
// }
// }
// ```
//
// we would create an opaque type like:
//
// ```
// type Bar<'a, 'b, '0, '1, '2> = impl Future<Output = &'2 u32>;
// ```
//
// and we would then desugar `bar` to the equivalent of:
//
// ```rust
// impl<'a> Foo<'a> {
// fn bar<'b, '0, '1>(&'0 self, x: &'b Vec<f64>, y: &'1 str) -> Bar<'a, 'b, '0, '1, '_>
// }
// ```
//
// Note that the final parameter to `Bar` is `'_`, not `'2` --
// this is because the elided lifetimes from the return type
// should be figured out using the ordinary elision rules, and
// this desugaring achieves that.
//
// The variable `input_lifetimes_count` tracks the number of
// lifetime parameters to the opaque type *not counting* those
// lifetimes elided in the return type. This includes those
// that are explicitly declared (`in_scope_lifetimes`) and
// those elided lifetimes we found in the arguments (current
// content of `lifetimes_to_define`). Next, we will process
// the return type, which will cause `lifetimes_to_define` to
// grow.
let input_lifetimes_count = self.in_scope_lifetimes.len() + self.lifetimes_to_define.len();
let mut lifetime_params = Vec::new();
self.with_hir_id_owner(opaque_ty_node_id, |this| {
// We have to be careful to get elision right here. The
// idea is that we create a lifetime parameter for each
// lifetime in the return type. So, given a return type
// like `async fn foo(..) -> &[&u32]`, we lower to `impl
// Future<Output = &'1 [ &'2 u32 ]>`.
//
// Then, we will create `fn foo(..) -> Foo<'_, '_>`, and
// hence the elision takes place at the fn site.
let future_bound = this
.with_anonymous_lifetime_mode(AnonymousLifetimeMode::CreateParameter, |this| {
this.lower_async_fn_output_type_to_future_bound(output, fn_def_id, span)
});
debug!("lower_async_fn_ret_ty: future_bound={:#?}", future_bound);
// Calculate all the lifetimes that should be captured
// by the opaque type. This should include all in-scope
// lifetime parameters, including those defined in-band.
//
// Note: this must be done after lowering the output type,
// as the output type may introduce new in-band lifetimes.
lifetime_params = this
.in_scope_lifetimes
.iter()
.cloned()
.map(|name| (name.ident().span, name))
.chain(this.lifetimes_to_define.iter().cloned())
.collect();
debug!("lower_async_fn_ret_ty: in_scope_lifetimes={:#?}", this.in_scope_lifetimes);
debug!("lower_async_fn_ret_ty: lifetimes_to_define={:#?}", this.lifetimes_to_define);
debug!("lower_async_fn_ret_ty: lifetime_params={:#?}", lifetime_params);
let generic_params =
this.arena.alloc_from_iter(lifetime_params.iter().map(|(span, hir_name)| {
this.lifetime_to_generic_param(*span, *hir_name, opaque_ty_def_id)
}));
let opaque_ty_item = hir::OpaqueTy {
generics: hir::Generics {
params: generic_params,
where_clause: hir::WhereClause { predicates: &[], span: this.lower_span(span) },
span: this.lower_span(span),
},
bounds: arena_vec![this; future_bound],
impl_trait_fn: Some(fn_def_id),
origin: hir::OpaqueTyOrigin::AsyncFn,
};
trace!("exist ty from async fn def id: {:#?}", opaque_ty_def_id);
this.generate_opaque_type(opaque_ty_def_id, opaque_ty_item, span, opaque_ty_span)
});
// As documented above on the variable
// `input_lifetimes_count`, we need to create the lifetime
// arguments to our opaque type. Continuing with our example,
|
// Bar<'a, 'b, '0, '1, '_>
// ```
//
// For the "input" lifetime parameters, we wish to create
// references to the parameters themselves, including the
// "implicit" ones created from parameter types (`'a`, `'b`,
// '`0`, `'1`).
//
// For the "output" lifetime parameters, we just want to
// generate `'_`.
let mut generic_args = Vec::with_capacity(lifetime_params.len());
generic_args.extend(lifetime_params[..input_lifetimes_count].iter().map(
|&(span, hir_name)| {
// Input lifetime like `'a` or `'1`:
GenericArg::Lifetime(hir::Lifetime {
hir_id: self.next_id(),
span: self.lower_span(span),
name: hir::LifetimeName::Param(hir_name),
})
},
));
generic_args.extend(lifetime_params[input_lifetimes_count..].iter().map(|&(span, _)|
// Output lifetime like `'_`.
GenericArg::Lifetime(hir::Lifetime {
hir_id: self.next_id(),
span: self.lower_span(span),
name: hir::LifetimeName::Implicit,
})));
let generic_args = self.arena.alloc_from_iter(generic_args);
// Create the `Foo<...>` reference itself. Note that the `type
// Foo = impl Trait` is, internally, created as a child of the
// async fn, so the *type parameters* are inherited. It's
// only the lifetime parameters that we must supply.
let opaque_ty_ref =
hir::TyKind::OpaqueDef(hir::ItemId { def_id: opaque_ty_def_id }, generic_args);
let opaque_ty = self.ty(opaque_ty_span, opaque_ty_ref);
hir::FnRetTy::Return(self.arena.alloc(opaque_ty))
}
/// Transforms `-> T` into `Future<Output = T>`.
fn lower_async_fn_output_type_to_future_bound(
&mut self,
output: &FnRetTy,
fn_def_id: DefId,
span: Span,
) -> hir::GenericBound<'hir> {
// Compute the `T` in `Future<Output = T>` from the return type.
let output_ty = match output {
FnRetTy::Ty(ty) => {
// Not `OpaqueTyOrigin::AsyncFn`: that's only used for the
// `impl Future` opaque type that `async fn` implicitly
// generates.
let context = ImplTraitContext::ReturnPositionOpaqueTy {
fn_def_id,
origin: hir::OpaqueTyOrigin::FnReturn,
};
self.lower_ty(ty, context)
}
FnRetTy::Default(ret_ty_span) => self.arena.alloc(self.ty_tup(*ret_ty_span, &[])),
};
// "<Output = T>"
let future_args = self.arena.alloc(hir::GenericArgs {
args: &[],
bindings: arena_vec![self; self.output_ty_binding(span, output_ty)],
parenthesized: false,
span_ext: DUMMY_SP,
});
hir::GenericBound::LangItemTrait(
// ::std::future::Future<future_params>
hir::LangItem::Future,
self.lower_span(span),
self.next_id(),
future_args,
)
}
fn lower_param_bound(
&mut self,
tpb: &GenericBound,
itctx: ImplTraitContext<'_, 'hir>,
) -> hir::GenericBound<'hir> {
match tpb {
GenericBound::Trait(p, modifier) => hir::GenericBound::Trait(
self.lower_poly_trait_ref(p, itctx),
self.lower_trait_bound_modifier(*modifier),
),
GenericBound::Outlives(lifetime) => {
hir::GenericBound::Outlives(self.lower_lifetime(lifetime))
}
}
}
fn lower_lifetime(&mut self, l: &Lifetime) -> hir::Lifetime {
let span = self.lower_span(l.ident.span);
match l.ident {
ident if ident.name == kw::StaticLifetime => {
self.new_named_lifetime(l.id, span, hir::LifetimeName::Static)
}
ident if ident.name == kw::UnderscoreLifetime => match self.anonymous_lifetime_mode {
AnonymousLifetimeMode::CreateParameter => {
let fresh_name = self.collect_fresh_in_band_lifetime(span);
self.new_named_lifetime(l.id, span, hir::LifetimeName::Param(fresh_name))
}
AnonymousLifetimeMode::PassThrough => {
self.new_named_lifetime(l.id, span, hir::LifetimeName::Underscore)
}
AnonymousLifetimeMode::ReportError => self.new_error_lifetime(Some(l.id), span),
},
ident => {
self.maybe_collect_in_band_lifetime(ident);
let param_name = ParamName::Plain(self.lower_ident(ident));
self.new_named_lifetime(l.id, span, hir::LifetimeName::Param(param_name))
}
}
}
fn new_named_lifetime(
&mut self,
id: NodeId,
span: Span,
name: hir::LifetimeName,
) -> hir::Lifetime {
hir::Lifetime { hir_id: self.lower_node_id(id), span: self.lower_span(span), name }
}
fn lower_generic_params_mut<'s>(
&'s mut self,
params: &'s [GenericParam],
mut itctx: ImplTraitContext<'s, 'hir>,
) -> impl Iterator<Item = hir::GenericParam<'hir>> + Captures<'a> + Captures<'s> {
params.iter().map(move |param| self.lower_generic_param(param, itctx.reborrow()))
}
fn lower_generic_params(
&mut self,
params: &[GenericParam],
itctx: ImplTraitContext<'_, 'hir>,
) -> &'hir [hir::GenericParam<'hir>] {
self.arena.alloc_from_iter(self.lower_generic_params_mut(params, itctx))
}
fn lower_generic_param(
&mut self,
param: &GenericParam,
mut itctx: ImplTraitContext<'_, 'hir>,
) -> hir::GenericParam<'hir> {
let bounds: Vec<_> = self
.with_anonymous_lifetime_mode(AnonymousLifetimeMode::ReportError, |this| {
this.lower_param_bounds_mut(¶m.bounds, itctx.reborrow()).collect()
});
let (name, kind) = match param.kind {
GenericParamKind::Lifetime => {
let was_collecting_in_band = self.is_collecting_in_band_lifetimes;
self.is_collecting_in_band_lifetimes = false;
let lt = self
.with_anonymous_lifetime_mode(AnonymousLifetimeMode::ReportError, |this| {
this.lower_lifetime(&Lifetime { id: param.id, ident: param.ident })
});
let param_name = match lt.name {
hir::LifetimeName::Param(param_name) => param_name,
hir::LifetimeName::Implicit
| hir::LifetimeName::Underscore
| hir::LifetimeName::Static => hir::ParamName::Plain(lt.name.ident()),
hir::LifetimeName::ImplicitObjectLifetimeDefault => {
self.sess.diagnostic().span_bug(
param.ident.span,
"object-lifetime-default should not occur here",
);
}
hir::LifetimeName::Error => ParamName::Error,
};
let kind =
hir::GenericParamKind::Lifetime { kind: hir::LifetimeParamKind::Explicit };
self.is_collecting_in_band_lifetimes = was_collecting_in_band;
(param_name, kind)
}
GenericParamKind::Type { ref default, .. } => {
let kind = hir::GenericParamKind::Type {
default: default.as_ref().map(|x| {
self.lower_ty(x, ImplTraitContext::Disallowed(ImplTraitPosition::Other))
}),
synthetic: param
.attrs
.iter()
.filter(|attr| attr.has_name(sym::rustc_synthetic))
.map(|_| hir::SyntheticTyParamKind::FromAttr)
.next(),
};
(hir::ParamName::Plain(self.lower_ident(param.ident)), kind)
}
GenericParamKind::Const { ref ty, kw_span: _, ref default } => {
let ty = self
.with_anonymous_lifetime_mode(AnonymousLifetimeMode::ReportError, |this| {
this.lower_ty(&ty, ImplTraitContext::disallowed())
});
let default = default.as_ref().map(|def| self.lower_anon_const(def));
(
hir::ParamName::Plain(self.lower_ident(param.ident)),
hir::GenericParamKind::Const { ty, default },
)
}
};
let name = match name {
hir::ParamName::Plain(ident) => hir::ParamName::Plain(self.lower_ident(ident)),
name => name,
};
let hir_id = self.lower_node_id(param.id);
self.lower_attrs(hir_id, ¶m.attrs);
hir::GenericParam {
hir_id,
name,
span: self.lower_span(param.ident.span),
pure_wrt_drop: self.sess.contains_name(¶m.attrs, sym::may_dangle),
bounds: self.arena.alloc_from_iter(bounds),
kind,
}
}
fn lower_trait_ref(
&mut self,
p: &TraitRef,
itctx: ImplTraitContext<'_, 'hir>,
) -> hir::TraitRef<'hir> {
let path = match self.lower_qpath(p.ref_id, &None, &p.path, ParamMode::Explicit, itctx) {
hir::QPath::Resolved(None, path) => path,
qpath => panic!("lower_trait_ref: unexpected QPath `{:?}`", qpath),
};
hir::TraitRef { path, hir_ref_id: self.lower_node_id(p.ref_id) }
}
fn lower_poly_trait_ref(
&mut self,
p: &PolyTraitRef,
mut itctx: ImplTraitContext<'_, 'hir>,
) -> hir::PolyTraitRef<'hir> {
let bound_generic_params =
self.lower_generic_params(&p.bound_generic_params, itctx.reborrow());
let trait_ref = self.with_in_scope_lifetime_defs(&p.bound_generic_params, |this| {
// Any impl Trait types defined within this scope can capture
// lifetimes bound on this predicate.
let lt_def_names = p.bound_generic_params.iter().filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => Some(hir::LifetimeName::Param(
ParamName::Plain(param.ident.normalize_to_macros_2_0()),
)),
_ => None,
});
if let ImplTraitContext::TypeAliasesOpaqueTy { ref mut capturable_lifetimes, .. } =
itctx
{
capturable_lifetimes.extend(lt_def_names.clone());
}
let res = this.lower_trait_ref(&p.trait_ref, itctx.reborrow());
if let ImplTraitContext::TypeAliasesOpaqueTy { ref mut capturable_lifetimes, .. } =
itctx
{
for param in lt_def_names {
capturable_lifetimes.remove(¶m);
}
}
res
});
hir::PolyTraitRef { bound_generic_params, trait_ref, span: self.lower_span(p.span) }
}
fn lower_mt(&mut self, mt: &MutTy, itctx: ImplTraitContext<'_, 'hir>) -> hir::MutTy<'hir> {
hir::MutTy { ty: self.lower_ty(&mt.ty, itctx), mutbl: mt.mutbl }
}
fn lower_param_bounds(
&mut self,
bounds: &[GenericBound],
itctx: ImplTraitContext<'_, 'hir>,
) -> hir::GenericBounds<'hir> {
self.arena.alloc_from_iter(self.lower_param_bounds_mut(bounds, itctx))
}
fn lower_param_bounds_mut<'s>(
&'s mut self,
bounds: &'s [GenericBound],
mut itctx: ImplTraitContext<'s, 'hir>,
) -> impl Iterator<Item = hir::GenericBound<'hir>> + Captures<'s> + Captures<'a> {
bounds.iter().map(move |bound| self.lower_param_bound(bound, itctx.reborrow()))
}
/// Lowers a block directly to an expression, presuming that it
/// has no attributes and is not targeted by a `break`.
fn lower_block_expr(&mut self, b: &Block) -> hir::Expr<'hir> {
let block = self.lower_block(b, false);
self.expr_block(block, AttrVec::new())
}
fn lower_anon_const(&mut self, c: &AnonConst) -> hir::AnonConst {
self.with_new_scopes(|this| hir::AnonConst {
hir_id: this.lower_node_id(c.id),
body: this.lower_const_body(c.value.span, Some(&c.value)),
})
}
fn lower_unsafe_source(&mut self, u: UnsafeSource) -> hir::UnsafeSource {
match u {
CompilerGenerated => hir::UnsafeSource::CompilerGenerated,
UserProvided => hir::UnsafeSource::UserProvided,
}
}
fn lower_trait_bound_modifier(&mut self, f: TraitBoundModifier) -> hir::TraitBoundModifier {
match f {
TraitBoundModifier::None => hir::TraitBoundModifier::None,
TraitBoundModifier::MaybeConst => hir::TraitBoundModifier::MaybeConst,
// `MaybeConstMaybe` will cause an error during AST validation, but we need to pick a
// placeholder for compilation to proceed.
TraitBoundModifier::MaybeConstMaybe | TraitBoundModifier::Maybe => {
hir::TraitBoundModifier::Maybe
}
}
}
// Helper methods for building HIR.
fn stmt(&mut self, span: Span, kind: hir::StmtKind<'hir>) -> hir::Stmt<'hir> {
hir::Stmt { span: self.lower_span(span), kind, hir_id: self.next_id() }
}
fn stmt_expr(&mut self, span: Span, expr: hir::Expr<'hir>) -> hir::Stmt<'hir> {
self.stmt(span, hir::StmtKind::Expr(self.arena.alloc(expr)))
}
fn stmt_let_pat(
&mut self,
attrs: Option<&'hir [Attribute]>,
span: Span,
init: Option<&'hir hir::Expr<'hir>>,
pat: &'hir hir::Pat<'hir>,
source: hir::LocalSource,
) -> hir::Stmt<'hir> {
let hir_id = self.next_id();
if let Some(a) = attrs {
debug_assert!(!a.is_empty());
self.attrs.insert(hir_id.local_id, a);
}
let local = hir::Local { hir_id, init, pat, source, span: self.lower_span(span), ty: None };
self.stmt(span, hir::StmtKind::Local(self.arena.alloc(local)))
}
fn block_expr(&mut self, expr: &'hir hir::Expr<'hir>) -> &'hir hir::Block<'hir> {
self.block_all(expr.span, &[], Some(expr))
}
fn block_all(
&mut self,
span: Span,
stmts: &'hir [hir::Stmt<'hir>],
expr: Option<&'hir hir::Expr<'hir>>,
) -> &'hir hir::Block<'hir> {
let blk = hir::Block {
stmts,
expr,
hir_id: self.next_id(),
rules: hir::BlockCheckMode::DefaultBlock,
span: self.lower_span(span),
targeted_by_break: false,
};
self.arena.alloc(blk)
}
fn pat_cf_continue(&mut self, span: Span, pat: &'hir hir::Pat<'hir>) -> &'hir hir::Pat<'hir> {
let field = self.single_pat_field(span, pat);
self.pat_lang_item_variant(span, hir::LangItem::ControlFlowContinue, field)
}
fn pat_cf_break(&mut self, span: Span, pat: &'hir hir::Pat<'hir>) -> &'hir hir::Pat<'hir> {
let field = self.single_pat_field(span, pat);
self.pat_lang_item_variant(span, hir::LangItem::ControlFlowBreak, field)
}
fn pat_some(&mut self, span: Span, pat: &'hir hir::Pat<'hir>) -> &'hir hir::Pat<'hir> {
let field = self.single_pat_field(span, pat);
self.pat_lang_item_variant(span, hir::LangItem::OptionSome, field)
}
fn pat_none(&mut self, span: Span) -> &'hir hir::Pat<'hir> {
self.pat_lang_item_variant(span, hir::LangItem::OptionNone, &[])
}
fn single_pat_field(
&mut self,
span: Span,
pat: &'hir hir::Pat<'hir>,
) -> &'hir [hir::PatField<'hir>] {
let field = hir::PatField {
hir_id: self.next_id(),
ident: Ident::new(sym::integer(0), self.lower_span(span)),
is_shorthand: false,
pat,
span: self.lower_span(span),
};
arena_vec![self; field]
}
fn pat_lang_item_variant(
&mut self,
span: Span,
lang_item: hir::LangItem,
fields: &'hir [hir::PatField<'hir>],
) -> &'hir hir::Pat<'hir> {
let qpath = hir::QPath::LangItem(lang_item, self.lower_span(span));
self.pat(span, hir::PatKind::Struct(qpath, fields, false))
}
fn pat_ident(&mut self, span: Span, ident: Ident) -> (&'hir hir::Pat<'hir>, hir::HirId) {
self.pat_ident_binding_mode(span, ident, hir::BindingAnnotation::Unannotated)
}
fn pat_ident_mut(&mut self, span: Span, ident: Ident) -> (hir::Pat<'hir>, hir::HirId) {
self.pat_ident_binding_mode_mut(span, ident, hir::BindingAnnotation::Unannotated)
}
fn pat_ident_binding_mode(
&mut self,
span: Span,
ident: Ident,
bm: hir::BindingAnnotation,
) -> (&'hir hir::Pat<'hir>, hir::HirId) {
let (pat, hir_id) = self.pat_ident_binding_mode_mut(span, ident, bm);
(self.arena.alloc(pat), hir_id)
}
fn pat_ident_binding_mode_mut(
&mut self,
span: Span,
ident: Ident,
bm: hir::BindingAnnotation,
) -> (hir::Pat<'hir>, hir::HirId) {
let hir_id = self.next_id();
(
hir::Pat {
hir_id,
kind: hir::PatKind::Binding(bm, hir_id, self.lower_ident(ident), None),
span: self.lower_span(span),
default_binding_modes: true,
},
hir_id,
)
}
fn pat(&mut self, span: Span, kind: hir::PatKind<'hir>) -> &'hir hir::Pat<'hir> {
self.arena.alloc(hir::Pat {
hir_id: self.next_id(),
kind,
span: self.lower_span(span),
default_binding_modes: true,
})
}
fn pat_without_dbm(&mut self, span: Span, kind: hir::PatKind<'hir>) -> hir::Pat<'hir> {
hir::Pat {
hir_id: self.next_id(),
kind,
span: self.lower_span(span),
default_binding_modes: false,
}
}
fn ty_path(
&mut self,
mut hir_id: hir::HirId,
span: Span,
qpath: hir::QPath<'hir>,
) -> hir::Ty<'hir> {
let kind = match qpath {
hir::QPath::Resolved(None, path) => {
// Turn trait object paths into `TyKind::TraitObject` instead.
match path.res {
Res::Def(DefKind::Trait | DefKind::TraitAlias, _) => {
let principal = hir::PolyTraitRef {
bound_generic_params: &[],
trait_ref: hir::TraitRef { path, hir_ref_id: hir_id },
span: self.lower_span(span),
};
// The original ID is taken by the `PolyTraitRef`,
// so the `Ty` itself needs a different one.
hir_id = self.next_id();
hir::TyKind::TraitObject(
arena_vec![self; principal],
self.elided_dyn_bound(span),
TraitObjectSyntax::None,
)
}
_ => hir::TyKind::Path(hir::QPath::Resolved(None, path)),
}
}
_ => hir::TyKind::Path(qpath),
};
hir::Ty { hir_id, kind, span: self.lower_span(span) }
}
/// Invoked to create the lifetime argument for a type `&T`
/// with no explicit lifetime.
fn elided_ref_lifetime(&mut self, span: Span) -> hir::Lifetime {
match self.anonymous_lifetime_mode {
// Intercept when we are in an impl header or async fn and introduce an in-band
// lifetime.
// Hence `impl Foo for &u32` becomes `impl<'f> Foo for &'f u32` for some fresh
// `'f`.
AnonymousLifetimeMode::CreateParameter => {
let fresh_name = self.collect_fresh_in_band_lifetime(span);
hir::Lifetime {
hir_id: self.next_id(),
span: self.lower_span(span),
name: hir::LifetimeName::Param(fresh_name),
}
}
AnonymousLifetimeMode::ReportError => self.new_error_lifetime(None, span),
AnonymousLifetimeMode::PassThrough => self.new_implicit_lifetime(span),
}
}
/// Report an error on illegal use of `'_` or a `&T` with no explicit lifetime;
/// return an "error lifetime".
fn new_error_lifetime(&mut self, id: Option<NodeId>, span: Span) -> hir::Lifetime {
let (id, msg, label) = match id {
Some(id) => (id, "`'_` cannot be used here", "`'_` is a reserved lifetime name"),
None => (
self.resolver.next_node_id(),
"`&` without an explicit lifetime name cannot be used here",
"explicit lifetime name needed here",
),
};
let mut err = struct_span_err!(self.sess, span, E0637, "{}", msg,);
err.span_label(span, label);
err.emit();
self.new_named_lifetime(id, span, hir::LifetimeName::Error)
}
/// Invoked to create the lifetime argument(s) for a path like
/// `std::cell::Ref<T>`; note that implicit lifetimes in these
/// sorts of cases are deprecated. This may therefore report a warning or an
/// error, depending on the mode.
fn elided_path_lifetimes<'s>(
&'s mut self,
span: Span,
count: usize,
) -> impl Iterator<Item = hir::Lifetime> + Captures<'a> + Captures<'s> + Captures<'hir> {
(0..count).map(move |_| self.elided_path_lifetime(span))
}
fn elided_path_lifetime(&mut self, span: Span) -> hir::Lifetime {
match self.anonymous_lifetime_mode {
AnonymousLifetimeMode::CreateParameter => {
// We should have emitted E0726 when processing this path above
self.sess
.delay_span_bug(span, "expected 'implicit elided lifetime not allowed' error");
let id = self.resolver.next_node_id();
self.new_named_lifetime(id, span, hir::LifetimeName::Error)
}
// `PassThrough` is the normal case.
// `new_error_lifetime`, which would usually be used in the case of `ReportError`,
// is unsuitable here, as these can occur from missing lifetime parameters in a
// `PathSegment`, for which there is no associated `'_` or `&T` with no explicit
// lifetime. Instead, we simply create an implicit lifetime, which will be checked
// later, at which point a suitable error will be emitted.
AnonymousLifetimeMode::PassThrough | AnonymousLifetimeMode::ReportError => {
self.new_implicit_lifetime(span)
}
}
}
/// Invoked to create the lifetime argument(s) for an elided trait object
/// bound, like the bound in `Box<dyn Debug>`. This method is not invoked
/// when the bound is written, even if it is written with `'_` like in
/// `Box<dyn Debug + '_>`. In those cases, `lower_lifetime` is invoked.
fn elided_dyn_bound(&mut self, span: Span) -> hir::Lifetime {
match self.anonymous_lifetime_mode {
// NB. We intentionally ignore the create-parameter mode here.
// and instead "pass through" to resolve-lifetimes, which will apply
// the object-lifetime-defaulting rules. Elided object lifetime defaults
// do not act like other elided lifetimes. In other words, given this:
//
// impl Foo for Box<dyn Debug>
//
// we do not introduce a fresh `'_` to serve as the bound, but instead
// ultimately translate to the equivalent of:
//
// impl Foo for Box<dyn Debug + 'static>
//
// `resolve_lifetime` has the code to make that happen.
AnonymousLifetimeMode::CreateParameter => {}
AnonymousLifetimeMode::ReportError => {
// ReportError applies to explicit use of `'_`.
}
// This is the normal case.
AnonymousLifetimeMode::PassThrough => {}
}
let r = hir::Lifetime {
hir_id: self.next_id(),
span: self.lower_span(span),
name: hir::LifetimeName::ImplicitObjectLifetimeDefault,
};
debug!("elided_dyn_bound: r={:?}", r);
r
}
fn new_implicit_lifetime(&mut self, span: Span) -> hir::Lifetime {
hir::Lifetime {
hir_id: self.next_id(),
span: self.lower_span(span),
name: hir::LifetimeName::Implicit,
}
}
fn maybe_lint_bare_trait(&mut self, span: Span, id: NodeId, is_global: bool) {
// FIXME(davidtwco): This is a hack to detect macros which produce spans of the
// call site which do not have a macro backtrace. See #61963.
let is_macro_callsite = self
.sess
.source_map()
.span_to_snippet(span)
.map(|snippet| snippet.starts_with("#["))
.unwrap_or(true);
if !is_macro_callsite {
if span.edition() < Edition::Edition2021 {
self.resolver.lint_buffer().buffer_lint_with_diagnostic(
BARE_TRAIT_OBJECTS,
id,
span,
"trait objects without an explicit `dyn` are deprecated",
BuiltinLintDiagnostics::BareTraitObject(span, is_global),
)
} else {
let msg = "trait objects must include the `dyn` keyword";
let label = "add `dyn` keyword before this trait";
let mut err = struct_span_err!(self.sess, span, E0782, "{}", msg,);
err.span_suggestion_verbose(
span.shrink_to_lo(),
label,
String::from("dyn "),
Applicability::MachineApplicable,
);
err.emit();
}
}
}
}
/// Helper struct for delayed construction of GenericArgs.
struct GenericArgsCtor<'hir> {
args: SmallVec<[hir::GenericArg<'hir>; 4]>,
bindings: &'hir [hir::TypeBinding<'hir>],
parenthesized: bool,
span: Span,
}
impl<'hir> GenericArgsCtor<'hir> {
fn is_empty(&self) -> bool {
self.args.is_empty() && self.bindings.is_empty() && !self.parenthesized
}
fn into_generic_args(self, this: &LoweringContext<'_, 'hir>) -> &'hir hir::GenericArgs<'hir> {
let ga = hir::GenericArgs {
args: this.arena.alloc_from_iter(self.args),
bindings: self.bindings,
parenthesized: self.parenthesized,
span_ext: this.lower_span(self.span),
};
this.arena.alloc(ga)
}
}
fn lifetimes_from_impl_trait_bounds(
opaque_ty_id: NodeId,
bounds: hir::GenericBounds<'_>,
lifetimes_to_include: Option<&FxHashSet<hir::LifetimeName>>,
) -> Vec<(hir::LifetimeName, Span)> {
debug!(
"lifetimes_from_impl_trait_bounds(opaque_ty_id={:?}, \
bounds={:#?})",
opaque_ty_id, bounds,
);
// This visitor walks over `impl Trait` bounds and creates defs for all lifetimes that
// appear in the bounds, excluding lifetimes that are created within the bounds.
// E.g., `'a`, `'b`, but not `'c` in `impl for<'c> SomeTrait<'a, 'b, 'c>`.
struct ImplTraitLifetimeCollector<'r> {
collect_elided_lifetimes: bool,
currently_bound_lifetimes: Vec<hir::LifetimeName>,
already_defined_lifetimes: FxHashSet<hir::LifetimeName>,
lifetimes: Vec<(hir::LifetimeName, Span)>,
lifetimes_to_include: Option<&'r FxHashSet<hir::LifetimeName>>,
}
impl<'r, 'v> intravisit::Visitor<'v> for ImplTraitLifetimeCollector<'r> {
type Map = intravisit::ErasedMap<'v>;
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
fn visit_generic_args(&mut self, span: Span, parameters: &'v hir::GenericArgs<'v>) {
// Don't collect elided lifetimes used inside of `Fn()` syntax.
if parameters.parenthesized {
let old_collect_elided_lifetimes = self.collect_elided_lifetimes;
self.collect_elided_lifetimes = false;
intravisit::walk_generic_args(self, span, parameters);
self.collect_elided_lifetimes = old_collect_elided_lifetimes;
} else {
intravisit::walk_generic_args(self, span, parameters);
}
}
fn visit_ty(&mut self, t: &'v hir::Ty<'v>) {
// Don't collect elided lifetimes used inside of `fn()` syntax.
if let hir::TyKind::BareFn(_) = t.kind {
let old_collect_elided_lifetimes = self.collect_elided_lifetimes;
self.collect_elided_lifetimes = false;
// Record the "stack height" of `for<'a>` lifetime bindings
// to be able to later fully undo their introduction.
let old_len = self.currently_bound_lifetimes.len();
intravisit::walk_ty(self, t);
self.currently_bound_lifetimes.truncate(old_len);
self.collect_elided_lifetimes = old_collect_elided_lifetimes;
} else {
intravisit::walk_ty(self, t)
}
}
fn visit_poly_trait_ref(
&mut self,
trait_ref: &'v hir::PolyTraitRef<'v>,
modifier: hir::TraitBoundModifier,
) {
// Record the "stack height" of `for<'a>` lifetime bindings
// to be able to later fully undo their introduction.
let old_len = self.currently_bound_lifetimes.len();
intravisit::walk_poly_trait_ref(self, trait_ref, modifier);
self.currently_bound_lifetimes.truncate(old_len);
}
fn visit_generic_param(&mut self, param: &'v hir::GenericParam<'v>) {
// Record the introduction of 'a in `for<'a> ...`.
if let hir::GenericParamKind::Lifetime { .. } = param.kind {
// Introduce lifetimes one at a time so that we can handle
// cases like `fn foo<'d>() -> impl for<'a, 'b: 'a, 'c: 'b + 'd>`.
let lt_name = hir::LifetimeName::Param(param.name);
self.currently_bound_lifetimes.push(lt_name);
}
intravisit::walk_generic_param(self, param);
}
fn visit_lifetime(&mut self, lifetime: &'v hir::Lifetime) {
let name = match lifetime.name {
hir::LifetimeName::Implicit | hir::LifetimeName::Underscore => {
if self.collect_elided_lifetimes {
// Use `'_` for both implicit and underscore lifetimes in
// `type Foo<'_> = impl SomeTrait<'_>;`.
hir::LifetimeName::Underscore
} else {
return;
}
}
hir::LifetimeName::Param(_) => lifetime.name,
// Refers to some other lifetime that is "in
// scope" within the type.
hir::LifetimeName::ImplicitObjectLifetimeDefault => return,
hir::LifetimeName::Error | hir::LifetimeName::Static => return,
};
if !self.currently_bound_lifetimes.contains(&name)
&& !self.already_defined_lifetimes.contains(&name)
&& self.lifetimes_to_include.map_or(true, |lifetimes| lifetimes.contains(&name))
{
self.already_defined_lifetimes.insert(name);
self.lifetimes.push((name, lifetime.span));
}
}
}
let mut lifetime_collector = ImplTraitLifetimeCollector {
collect_elided_lifetimes: true,
currently_bound_lifetimes: Vec::new(),
already_defined_lifetimes: FxHashSet::default(),
lifetimes: Vec::new(),
lifetimes_to_include,
};
for bound in bounds {
intravisit::walk_param_bound(&mut lifetime_collector, &bound);
}
lifetime_collector.lifetimes
}
|
// we're creating the type arguments for the return type:
//
// ```
|
tuple.rs
|
use super::{
int,
iter::IterStatus::{self, Active, Exhausted},
PyInt, PyTypeRef,
};
use crate::common::hash::PyHash;
use crate::{
function::OptionalArg,
protocol::PyIterReturn,
sequence::{self, SimpleSeq},
sliceable::PySliceableSequence,
slots::{
Comparable, Hashable, Iterable, IteratorIterable, PyComparisonOp, SlotConstructor,
SlotIterator,
},
utils::Either,
vm::{ReprGuard, VirtualMachine},
IdProtocol, IntoPyObject, PyArithmaticValue, PyClassDef, PyClassImpl, PyComparisonValue,
PyContext, PyObjectRef, PyRef, PyResult, PyValue, TransmuteFromObject, TryFromObject,
TypeProtocol,
};
use crossbeam_utils::atomic::AtomicCell;
use std::fmt;
use std::marker::PhantomData;
/// tuple() -> empty tuple
/// tuple(iterable) -> tuple initialized from iterable's items
///
/// If the argument is a tuple, the return value is the same object.
#[pyclass(module = false, name = "tuple")]
pub struct PyTuple {
elements: Box<[PyObjectRef]>,
}
impl fmt::Debug for PyTuple {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// TODO: implement more informational, non-recursive Debug formatter
f.write_str("tuple")
}
}
impl PyValue for PyTuple {
fn class(vm: &VirtualMachine) -> &PyTypeRef {
&vm.ctx.types.tuple_type
}
}
macro_rules! impl_intopyobj_tuple {
($(($T:ident, $idx:tt)),+) => {
impl<$($T: IntoPyObject),*> IntoPyObject for ($($T,)*) {
fn into_pyobject(self, vm: &VirtualMachine) -> PyObjectRef {
vm.ctx.new_tuple(vec![$(self.$idx.into_pyobject(vm)),*])
}
}
};
}
impl_intopyobj_tuple!((A, 0));
impl_intopyobj_tuple!((A, 0), (B, 1));
impl_intopyobj_tuple!((A, 0), (B, 1), (C, 2));
impl_intopyobj_tuple!((A, 0), (B, 1), (C, 2), (D, 3));
impl_intopyobj_tuple!((A, 0), (B, 1), (C, 2), (D, 3), (E, 4));
impl_intopyobj_tuple!((A, 0), (B, 1), (C, 2), (D, 3), (E, 4), (F, 5));
impl_intopyobj_tuple!((A, 0), (B, 1), (C, 2), (D, 3), (E, 4), (F, 5), (G, 6));
impl PyTuple {
pub(crate) fn fast_getitem(&self, idx: usize) -> PyObjectRef {
self.elements[idx].clone()
}
}
pub type PyTupleRef = PyRef<PyTuple>;
impl PyTupleRef {
pub(crate) fn with_elements(elements: Vec<PyObjectRef>, ctx: &PyContext) -> Self
|
}
impl SlotConstructor for PyTuple {
type Args = OptionalArg<PyObjectRef>;
fn py_new(cls: PyTypeRef, iterable: Self::Args, vm: &VirtualMachine) -> PyResult {
let elements = if let OptionalArg::Present(iterable) = iterable {
let iterable = if cls.is(&vm.ctx.types.tuple_type) {
match iterable.downcast_exact::<Self>(vm) {
Ok(tuple) => return Ok(tuple.into_object()),
Err(iterable) => iterable,
}
} else {
iterable
};
vm.extract_elements(&iterable)?
} else {
vec![]
};
// Return empty tuple only for exact tuple types if the iterable is empty.
if elements.is_empty() && cls.is(&vm.ctx.types.tuple_type) {
Ok(vm.ctx.empty_tuple.clone().into_object())
} else {
Self {
elements: elements.into_boxed_slice(),
}
.into_pyresult_with_type(vm, cls)
}
}
}
#[pyimpl(flags(BASETYPE), with(Hashable, Comparable, Iterable, SlotConstructor))]
impl PyTuple {
/// Creating a new tuple with given boxed slice.
/// NOTE: for usual case, you probably want to use PyTupleRef::with_elements.
/// Calling this function implies trying micro optimization for non-zero-sized tuple.
pub fn new_unchecked(elements: Box<[PyObjectRef]>) -> Self {
Self { elements }
}
pub fn as_slice(&self) -> &[PyObjectRef] {
&self.elements
}
#[pymethod(magic)]
fn add(
zelf: PyRef<Self>,
other: PyObjectRef,
vm: &VirtualMachine,
) -> PyArithmaticValue<PyRef<Self>> {
let added = other.downcast::<Self>().map(|other| {
if other.elements.is_empty() && zelf.class().is(&vm.ctx.types.tuple_type) {
zelf
} else if zelf.elements.is_empty() && other.class().is(&vm.ctx.types.tuple_type) {
other
} else {
let elements = zelf
.as_slice()
.iter()
.chain(other.as_slice())
.cloned()
.collect::<Box<[_]>>();
Self { elements }.into_ref(vm)
}
});
PyArithmaticValue::from_option(added.ok())
}
#[pymethod(magic)]
fn bool(&self) -> bool {
!self.elements.is_empty()
}
#[pymethod]
fn count(&self, needle: PyObjectRef, vm: &VirtualMachine) -> PyResult<usize> {
let mut count: usize = 0;
for element in self.elements.iter() {
if vm.identical_or_equal(element, &needle)? {
count += 1;
}
}
Ok(count)
}
#[pymethod(magic)]
#[inline]
pub fn len(&self) -> usize {
self.elements.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.elements.is_empty()
}
#[pymethod(magic)]
fn repr(zelf: PyRef<Self>, vm: &VirtualMachine) -> PyResult<String> {
let s = if let Some(_guard) = ReprGuard::enter(vm, zelf.as_object()) {
let mut str_parts = Vec::with_capacity(zelf.elements.len());
for elem in zelf.elements.iter() {
let s = vm.to_repr(elem)?;
str_parts.push(s.as_str().to_owned());
}
if str_parts.len() == 1 {
format!("({},)", str_parts[0])
} else {
format!("({})", str_parts.join(", "))
}
} else {
"(...)".to_owned()
};
Ok(s)
}
#[pymethod(name = "__rmul__")]
#[pymethod(magic)]
fn mul(zelf: PyRef<Self>, value: isize, vm: &VirtualMachine) -> PyResult<PyRef<Self>> {
Ok(if zelf.elements.is_empty() || value == 0 {
vm.ctx.empty_tuple.clone()
} else if value == 1 && zelf.class().is(&vm.ctx.types.tuple_type) {
// Special case: when some `tuple` is multiplied by `1`,
// nothing really happens, we need to return an object itself
// with the same `id()` to be compatible with CPython.
// This only works for `tuple` itself, not its subclasses.
zelf
} else {
let elements = sequence::seq_mul(vm, &zelf.elements, value)?
.cloned()
.collect::<Vec<_>>()
.into_boxed_slice();
Self { elements }.into_ref(vm)
})
}
#[pymethod(magic)]
fn getitem(zelf: PyRef<Self>, needle: PyObjectRef, vm: &VirtualMachine) -> PyResult {
let result = match zelf.elements.as_ref().get_item(vm, needle, Self::NAME)? {
Either::A(obj) => obj,
Either::B(vec) => vm.ctx.new_tuple(vec),
};
Ok(result)
}
#[pymethod]
fn index(
&self,
needle: PyObjectRef,
start: OptionalArg<isize>,
stop: OptionalArg<isize>,
vm: &VirtualMachine,
) -> PyResult<usize> {
let mut start = start.into_option().unwrap_or(0);
if start < 0 {
start += self.as_slice().len() as isize;
if start < 0 {
start = 0;
}
}
let mut stop = stop.into_option().unwrap_or(isize::MAX);
if stop < 0 {
stop += self.as_slice().len() as isize;
if stop < 0 {
stop = 0;
}
}
for (index, element) in self
.elements
.iter()
.enumerate()
.take(stop as usize)
.skip(start as usize)
{
if vm.identical_or_equal(element, &needle)? {
return Ok(index);
}
}
Err(vm.new_value_error("tuple.index(x): x not in tuple".to_owned()))
}
#[pymethod(magic)]
fn contains(&self, needle: PyObjectRef, vm: &VirtualMachine) -> PyResult<bool> {
for element in self.elements.iter() {
if vm.identical_or_equal(element, &needle)? {
return Ok(true);
}
}
Ok(false)
}
#[pymethod(magic)]
fn getnewargs(zelf: PyRef<Self>, vm: &VirtualMachine) -> (PyTupleRef,) {
// the arguments to pass to tuple() is just one tuple - so we'll be doing tuple(tup), which
// should just return tup, or tuplesubclass(tup), which'll copy/validate (e.g. for a
// structseq)
let tup_arg = if zelf.class().is(&vm.ctx.types.tuple_type) {
zelf
} else {
PyTupleRef::with_elements(zelf.elements.clone().into_vec(), &vm.ctx)
};
(tup_arg,)
}
}
impl Hashable for PyTuple {
fn hash(zelf: &PyRef<Self>, vm: &VirtualMachine) -> PyResult<PyHash> {
crate::utils::hash_iter(zelf.elements.iter(), vm)
}
}
impl Comparable for PyTuple {
fn cmp(
zelf: &PyRef<Self>,
other: &PyObjectRef,
op: PyComparisonOp,
vm: &VirtualMachine,
) -> PyResult<PyComparisonValue> {
if let Some(res) = op.identical_optimization(zelf, other) {
return Ok(res.into());
}
let other = class_or_notimplemented!(Self, other);
let a = zelf.as_slice();
let b = other.as_slice();
sequence::cmp(vm, a.boxed_iter(), b.boxed_iter(), op).map(PyComparisonValue::Implemented)
}
}
impl Iterable for PyTuple {
fn iter(zelf: PyRef<Self>, vm: &VirtualMachine) -> PyResult {
Ok(PyTupleIterator {
position: AtomicCell::new(0),
status: AtomicCell::new(Active),
tuple: zelf,
}
.into_object(vm))
}
}
#[pyclass(module = false, name = "tuple_iterator")]
#[derive(Debug)]
pub(crate) struct PyTupleIterator {
position: AtomicCell<usize>,
status: AtomicCell<IterStatus>,
tuple: PyTupleRef,
}
impl PyValue for PyTupleIterator {
fn class(vm: &VirtualMachine) -> &PyTypeRef {
&vm.ctx.types.tuple_iterator_type
}
}
#[pyimpl(with(SlotIterator))]
impl PyTupleIterator {
#[pymethod(magic)]
fn length_hint(&self) -> usize {
match self.status.load() {
Active => self.tuple.len().saturating_sub(self.position.load()),
Exhausted => 0,
}
}
#[pymethod(magic)]
fn setstate(&self, state: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> {
// When we're exhausted, just return.
if let Exhausted = self.status.load() {
return Ok(());
}
// Else, set to min of (pos, tuple_size).
if let Some(i) = state.payload::<PyInt>() {
let position = std::cmp::min(
int::try_to_primitive(i.as_bigint(), vm).unwrap_or(0),
self.tuple.len(),
);
self.position.store(position);
Ok(())
} else {
Err(vm.new_type_error("an integer is required.".to_owned()))
}
}
#[pymethod(magic)]
fn reduce(&self, vm: &VirtualMachine) -> PyResult {
let iter = vm.get_attribute(vm.builtins.clone(), "iter")?;
Ok(match self.status.load() {
Exhausted => vm
.ctx
.new_tuple(vec![iter, vm.ctx.new_tuple(vec![vm.ctx.new_list(vec![])])]),
Active => vm.ctx.new_tuple(vec![
iter,
vm.ctx.new_tuple(vec![self.tuple.clone().into_object()]),
vm.ctx.new_int(self.position.load()),
]),
})
}
}
impl IteratorIterable for PyTupleIterator {}
impl SlotIterator for PyTupleIterator {
fn next(zelf: &PyRef<Self>, _vm: &VirtualMachine) -> PyResult<PyIterReturn> {
if let Exhausted = zelf.status.load() {
return Ok(PyIterReturn::StopIteration(None));
}
let pos = zelf.position.fetch_add(1);
if let Some(obj) = zelf.tuple.as_slice().get(pos) {
Ok(PyIterReturn::Return(obj.clone()))
} else {
zelf.status.store(Exhausted);
Ok(PyIterReturn::StopIteration(None))
}
}
}
pub(crate) fn init(context: &PyContext) {
PyTuple::extend_class(context, &context.types.tuple_type);
PyTupleIterator::extend_class(context, &context.types.tuple_iterator_type);
}
pub struct PyTupleTyped<T: TransmuteFromObject> {
// SAFETY INVARIANT: T must be repr(transparent) over PyObjectRef, and the
// elements must be logically valid when transmuted to T
tuple: PyTupleRef,
_marker: PhantomData<Vec<T>>,
}
impl<T: TransmuteFromObject> TryFromObject for PyTupleTyped<T> {
fn try_from_object(vm: &VirtualMachine, obj: PyObjectRef) -> PyResult<Self> {
let tuple = PyTupleRef::try_from_object(vm, obj)?;
for elem in tuple.as_slice() {
T::check(vm, elem)?
}
// SAFETY: the contract of TransmuteFromObject upholds the variant on `tuple`
Ok(Self {
tuple,
_marker: PhantomData,
})
}
}
impl<T: TransmuteFromObject> PyTupleTyped<T> {
#[inline]
pub fn as_slice(&self) -> &[T] {
unsafe { &*(self.tuple.as_slice() as *const [PyObjectRef] as *const [T]) }
}
#[inline]
pub fn len(&self) -> usize {
self.tuple.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.tuple.is_empty()
}
}
impl<T: TransmuteFromObject> Clone for PyTupleTyped<T> {
fn clone(&self) -> Self {
Self {
tuple: self.tuple.clone(),
_marker: PhantomData,
}
}
}
impl<T: TransmuteFromObject + fmt::Debug> fmt::Debug for PyTupleTyped<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.as_slice().fmt(f)
}
}
impl<T: TransmuteFromObject> From<PyTupleTyped<T>> for PyTupleRef {
#[inline]
fn from(tup: PyTupleTyped<T>) -> Self {
tup.tuple
}
}
impl<T: TransmuteFromObject> IntoPyObject for PyTupleTyped<T> {
#[inline]
fn into_pyobject(self, _vm: &VirtualMachine) -> PyObjectRef {
self.tuple.into_object()
}
}
|
{
if elements.is_empty() {
ctx.empty_tuple.clone()
} else {
let elements = elements.into_boxed_slice();
Self::new_ref(PyTuple { elements }, ctx.types.tuple_type.clone(), None)
}
}
|
iam_admin_client.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import configparser
from airavata_custos import utils
from airavata_custos.settings import ProfileSettings
logger = logging.getLogger(__name__)
class IAMAdminClient(object):
def __init__(self, configuration_file_location):
"""
constructor for IAMAdminClient class
:param configuration_file_location: takes the location of the ini file containing server configuration
"""
self.profile_settings = ProfileSettings()
self._load_settings(configuration_file_location)
self.iamadmin_client_pool = utils.initialize_iamadmin_client_pool(self.profile_settings.PROFILE_SERVICE_HOST,
self.profile_settings.PROFILE_SERVICE_PORT)
def is_username_available(self, authz_token, username):
"""
This method validates if the username is available or not
:param authz_token: Object of AuthzToken class containing access token, username, gatewayId of the active user
:param username: The username whose availability needs to be verified
:return: boolean
"""
return self.iamadmin_client_pool.isUsernameAvailable(authz_token, username)
def register_user(self, authz_token, username, email_address, first_name, last_name, password):
"""
This method registers the user with the keycloak instance returns true if successful, false if the registration fails
:param authz_token: Object of AuthzToken class containing access token, username, gatewayId of the active user
:param username: The username of the user that needs to be registered
:param email_address: The email address of the user that needs to be registered
:param first_name: The first name of the user that needs to be registered
:param last_name: The last name of the user that needs to be registered
:param password: The password of the user that needs to be registered
:return: boolean
"""
return self.iamadmin_client_pool.registerUser(
authz_token,
username,
email_address,
first_name,
last_name,
password)
def is_user_enabled(self, authz_token, username):
"""
Checks the user is enabled/disabled in keycloak. Only the enabled user can login
:param authz_token: Object of AuthzToken class containing access token, username, gatewayId of the active user
:param username: The username of the user
:return: boolean
"""
return self.iamadmin_client_pool.isUserEnabled(authz_token, username)
def enable_user(self, authz_token, username):
"""
The method to enable a disabled user
:param authz_token: Object of AuthzToken class containing access token, username, gatewayId of the active user
:param username: The username of the user
:return: Object of UserProfile class, containing user details
"""
return self.iamadmin_client_pool.enableUser(authz_token, username)
def delete_user(self, authz_token, username):
"""
This method deleted the user from keycloak. Returns true if delete is successful
:param authz_token: Object of AuthzToken class containing access token, username, gatewayId of the active user
:param username: The username of the user
:return: boolean
"""
return self.iamadmin_client_pool.deleteUser(authz_token, username)
def
|
(self, authz_token, username):
"""
This method checks if the user exists in keycloak. Returns true if the user exists otherwise returns false
:param authz_token: Object of AuthzToken class containing access token, username, gatewayId of the active user
:param username: The username of the user
:return: boolean
"""
try:
return self.iamadmin_client_pool.isUserExist(authz_token, username)
except Exception:
return None
def get_user(self, authz_token, username):
"""
:param authz_token: Object of AuthzToken class containing access token, username, gatewayId of the active user
:param username: username of the user
:return: object of class UserProfile
"""
try:
return self.iamadmin_client_pool.getUser(authz_token, username)
except Exception:
return None
def get_users(self, authz_token, offset=0, limit=-1, search=None):
"""
:param authz_token: Object of AuthzToken class containing access token, username, gatewayId of the active user
:param offset: start index
:param limit: end index
:param search: search criteria for filtering users
:return: list of UserProfile class objects
"""
try:
return self.iamadmin_client_pool.getUsers(authz_token, offset, limit, search)
except Exception:
return None
def reset_user_password(self, authz_token, username, new_password):
"""
:param authz_token: Object of AuthzToken class containing access token, username, gatewayId of the active user
:param username: username of the user
:param new_password: new password for the user
:return:
"""
try:
return self.iamadmin_client_pool.resetUserPassword(
authz_token, username, new_password)
except Exception:
return None
def _load_settings(self, configuration_file_location):
config = configparser.ConfigParser()
config.read(configuration_file_location)
settings = config['ProfileServerSettings']
self.profile_settings.PROFILE_SERVICE_HOST = settings['PROFILE_SERVICE_HOST']
self.profile_settings.PROFILE_SERVICE_PORT = settings['PROFILE_SERVICE_PORT']
|
is_user_exist
|
copy_blob_builder.rs
|
use crate::blob::blob::responses::CopyBlobResponse;
use crate::blob::prelude::*;
use crate::RehydratePriority;
use azure_core::headers::COPY_SOURCE;
use azure_core::headers::{add_mandatory_header, add_optional_header, add_optional_header_ref};
use azure_core::prelude::*;
use std::convert::TryInto;
use url::Url;
#[derive(Debug, Clone)]
pub struct CopyBlobBuilder<'a> {
blob_client: &'a BlobClient,
source_url: &'a Url,
metadata: Option<&'a Metadata>,
sequence_number_condition: Option<SequenceNumberCondition>,
if_modified_since_condition: Option<IfModifiedSinceCondition>,
if_match_condition: Option<IfMatchCondition<'a>>,
access_tier: Option<AccessTier>,
timeout: Option<Timeout>,
lease_id: Option<&'a LeaseId>,
client_request_id: Option<ClientRequestId<'a>>,
if_source_since_condition: Option<IfSourceModifiedSinceCondition>,
|
impl<'a> CopyBlobBuilder<'a> {
pub(crate) fn new(blob_client: &'a BlobClient, source_url: &'a Url) -> Self {
Self {
blob_client,
source_url,
metadata: None,
sequence_number_condition: None,
if_modified_since_condition: None,
if_match_condition: None,
access_tier: None,
timeout: None,
lease_id: None,
client_request_id: None,
if_source_since_condition: None,
if_source_match_condition: None,
source_lease_id: None,
rehydrate_priority: RehydratePriority::Standard,
}
}
setters! {
metadata: &'a Metadata => Some(metadata),
sequence_number_condition: SequenceNumberCondition => Some(sequence_number_condition),
if_modified_since_condition: IfModifiedSinceCondition => Some(if_modified_since_condition),
if_match_condition: IfMatchCondition<'a> => Some(if_match_condition),
access_tier: AccessTier => Some(access_tier),
timeout: Timeout => Some(timeout),
lease_id: &'a LeaseId => Some(lease_id),
client_request_id: ClientRequestId<'a> => Some(client_request_id),
if_source_since_condition: IfSourceModifiedSinceCondition => Some(if_source_since_condition),
if_source_match_condition: IfSourceMatchCondition<'a> => Some(if_source_match_condition),
source_lease_id: &'a SourceLeaseId => Some(source_lease_id),
rehydrate_priority: RehydratePriority => rehydrate_priority,
}
pub async fn execute(
&self,
) -> Result<CopyBlobResponse, Box<dyn std::error::Error + Send + Sync>> {
let mut url = self.blob_client.url_with_segments(None)?;
self.timeout.append_to_url_query(&mut url);
trace!("url == {:?}", url);
let (request, _url) = self.blob_client.prepare_request(
url.as_str(),
&http::Method::PUT,
&|mut request| {
request = request.header(COPY_SOURCE, self.source_url.as_str());
request = add_optional_header(&self.metadata, request);
request = add_optional_header(&self.sequence_number_condition, request);
request = add_optional_header(&self.if_modified_since_condition, request);
request = add_optional_header(&self.if_match_condition, request);
request = add_optional_header(&self.access_tier, request);
request = add_optional_header_ref(&self.lease_id, request);
request = add_optional_header(&self.client_request_id, request);
request = add_optional_header(&self.if_source_since_condition, request);
request = add_optional_header(&self.if_source_match_condition, request);
request = add_optional_header_ref(&self.source_lease_id, request);
request = add_mandatory_header(&self.rehydrate_priority, request);
request
},
None,
)?;
let response = self
.blob_client
.http_client()
.execute_request_check_status(request, http::StatusCode::ACCEPTED)
.await?;
debug!("response.headers() == {:#?}", response.headers());
Ok((response.headers()).try_into()?)
}
}
|
if_source_match_condition: Option<IfSourceMatchCondition<'a>>,
source_lease_id: Option<&'a SourceLeaseId>,
rehydrate_priority: RehydratePriority,
}
|
blat_wrapper.py
|
#!/usr/bin/env python
import os
import sys
import tempfile
assert sys.version_info[:2] >= (2.4)
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def check_nib_file( dbkey, GALAXY_DATA_INDEX_DIR ):
nib_file = "%s/alignseq.loc" % GALAXY_DATA_INDEX_DIR
nib_path = ''
nibs = {}
for i, line in enumerate( open( nib_file ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( "#" ):
fields = line.split( '\t' )
if len( fields ) < 3:
continue
if fields[0] == 'seq':
nibs[( fields[1] )] = fields[2]
if dbkey in nibs:
nib_path = nibs[( dbkey )]
return nib_path
def
|
( dbkey, GALAXY_DATA_INDEX_DIR ):
twobit_file = "%s/twobit.loc" % GALAXY_DATA_INDEX_DIR
twobit_path = ''
twobits = {}
for i, line in enumerate( open( twobit_file ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( "#" ):
fields = line.split( '\t' )
if len( fields ) < 2:
continue
twobits[( fields[0] )] = fields[1]
if dbkey in twobits:
twobit_path = twobits[( dbkey )]
return twobit_path
def __main__():
# I/O
source_format = sys.argv[1] # 0: dbkey; 1: upload file
target_file = sys.argv[2]
query_file = sys.argv[3]
output_file = sys.argv[4]
min_iden = sys.argv[5]
tile_size = sys.argv[6]
one_off = sys.argv[7]
try:
float(min_iden)
except:
stop_err('Invalid value for minimal identity.')
try:
test = int(tile_size)
assert test >= 6 and test <= 18
except:
stop_err('Invalid value for tile size. DNA word size must be between 6 and 18.')
try:
test = int(one_off)
assert test >= 0 and test <= int(tile_size)
except:
stop_err('Invalid value for mismatch numbers in the word')
GALAXY_DATA_INDEX_DIR = sys.argv[8]
all_files = []
if source_format == '0':
# check target genome
dbkey = target_file
nib_path = check_nib_file( dbkey, GALAXY_DATA_INDEX_DIR )
twobit_path = check_twobit_file( dbkey, GALAXY_DATA_INDEX_DIR )
if not os.path.exists( nib_path ) and not os.path.exists( twobit_path ):
stop_err("No sequences are available for %s, request them by reporting this error." % dbkey)
# check the query file, see whether all of them are legitimate sequence
if nib_path and os.path.isdir( nib_path ):
compress_files = os.listdir(nib_path)
target_path = nib_path
elif twobit_path:
compress_files = [twobit_path]
target_path = ""
else:
stop_err("Requested genome build has no available sequence.")
for file in compress_files:
file = "%s/%s" % ( target_path, file )
file = os.path.normpath(file)
all_files.append(file)
else:
all_files = [target_file]
for detail_file_path in all_files:
output_tempfile = tempfile.NamedTemporaryFile().name
command = "blat %s %s %s -oneOff=%s -tileSize=%s -minIdentity=%s -mask=lower -noHead -out=pslx 2>&1" % ( detail_file_path, query_file, output_tempfile, one_off, tile_size, min_iden )
os.system( command )
os.system( 'cat %s >> %s' % ( output_tempfile, output_file ) )
os.remove( output_tempfile )
if __name__ == '__main__':
__main__()
|
check_twobit_file
|
admins.js
|
const mongoose = require('mongoose')
const authPlugin = require('../plugins/auth_plugin')
const bcryptjs = require('bcryptjs')
const adminSchema = mongoose.Schema({
name: { type: String, required: true, trim: true, unique: true },
password: { type: String, required: true, trim: true },
read: { type: Boolean, required: true },
write: { type: Boolean, required: true },
execute: { type: Boolean, required: true },
tokens: [
{token: { type: String}}
]
});
adminSchema.plugin(authPlugin)
adminSchema.statics.findAdmin = async (name, password) => {
//Finding user by his name
const admin = await Admin.findOne({ name });
if (!admin) {
throw { message: "There is no admin with this name", data: "name" };
}
|
throw { message: "Incorect password", data: "password" };
}
//If all good send back admin
return admin;
};
// userSchema.statics.validate = async (data) => {
// const { name, password } = data;
// const user = await User.findOne({ name: name });
// if (!user) throw { message: "Inncorrect name", data: "name" };
// if (user.password !== password) throw { message: "Incorect password", data: "password" };
// };
const Admin = mongoose.model('Admin',adminSchema)
module.exports = Admin
|
//Checking for correct password
const isMatch = await bcryptjs.compare(password, admin.password);
if (!isMatch) {
|
primitive_types2.rs
|
// primitive_types2.rs
// Fill in the rest of the line that has code missing!
// No hints, there's no tricks, just get used to typing these :)
fn main() {
// Characters (`char`)
let my_first_initial = 'C';
if my_first_initial.is_alphabetic() {
println!("Alphabetical!");
} else if my_first_initial.is_numeric() {
println!("Numerical!");
} else {
println!("Neither alphabetic nor numeric!");
}
// Finish this line like the example! What's your favorite character?
// Try a letter, try a number, try a special character, try a character
// from a different language than your own, try an emoji!
let your_character = '0';
if your_character.is_alphabetic() {
println!("Alphabetical!");
} else if your_character.is_numeric()
|
else {
println!("Neither alphabetic nor numeric!");
}
}
|
{
println!("Numerical!");
}
|
freetype.rs
|
// font-kit/src/loaders/freetype.rs
//
// Copyright © 2018 The Pathfinder Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A cross-platform loader that uses the FreeType library to load and rasterize fonts.
//!
//! On macOS and Windows, the Cargo feature `loader-freetype-default` can be used to opt into this
//! loader by default.
use byteorder::{BigEndian, ReadBytesExt};
use freetype::freetype::{FT_Byte, FT_Done_Face, FT_Error, FT_Face, FT_FACE_FLAG_FIXED_WIDTH};
use freetype::freetype::{
FT_Fixed, FT_Get_Char_Index, FT_Get_Name_Index, FT_Get_Postscript_Name, FT_Pos,
};
use freetype::freetype::{FT_Get_Sfnt_Table, FT_Init_FreeType, FT_LcdFilter, FT_Library};
use freetype::freetype::{FT_Library_SetLcdFilter, FT_Load_Glyph, FT_LOAD_DEFAULT};
use freetype::freetype::{FT_Load_Sfnt_Table, FT_Long, FT_Matrix, FT_New_Memory_Face};
use freetype::freetype::{FT_Reference_Face, FT_Set_Char_Size, FT_Set_Transform, FT_Sfnt_Tag};
use freetype::freetype::{FT_UInt, FT_ULong, FT_UShort, FT_Vector, FT_STYLE_FLAG_ITALIC};
use freetype::freetype::{FT_LOAD_MONOCHROME, FT_LOAD_NO_HINTING, FT_LOAD_RENDER};
use freetype::tt_os2::TT_OS2;
use log::warn;
use pathfinder_geometry::line_segment::LineSegment2F;
use pathfinder_geometry::rect::{RectF, RectI};
use pathfinder_geometry::transform2d::Transform2F;
use pathfinder_geometry::vector::{Vector2F, Vector2I};
use pathfinder_simd::default::F32x4;
use std::f32;
use std::ffi::{CStr, CString};
use std::fmt::{self, Debug, Formatter};
use std::io::{Seek, SeekFrom};
use std::iter;
use std::mem;
use std::os::raw::{c_char, c_void};
use std::ptr;
use std::slice;
use std::sync::Arc;
use crate::canvas::{Canvas, Format, RasterizationOptions};
use crate::error::{FontLoadingError, GlyphLoadingError};
use crate::file_type::FileType;
use crate::handle::Handle;
use crate::hinting::HintingOptions;
use crate::loader::{FallbackResult, Loader};
use crate::metrics::Metrics;
use crate::outline::OutlineSink;
use crate::properties::{Properties, Stretch, Style, Weight};
use crate::utils;
#[cfg(not(target_arch = "wasm32"))]
use std::fs::File;
#[cfg(not(target_arch = "wasm32"))]
use std::path::Path;
const PS_DICT_FULL_NAME: u32 = 38;
const TT_NAME_ID_FULL_NAME: u16 = 4;
const TT_PLATFORM_APPLE_UNICODE: u16 = 0;
const FT_POINT_TAG_ON_CURVE: c_char = 0x01;
const FT_POINT_TAG_CUBIC_CONTROL: c_char = 0x02;
const FT_RENDER_MODE_NORMAL: u32 = 0;
const FT_RENDER_MODE_LIGHT: u32 = 1;
const FT_RENDER_MODE_MONO: u32 = 2;
const FT_RENDER_MODE_LCD: u32 = 3;
const FT_LOAD_TARGET_LIGHT: u32 = (FT_RENDER_MODE_LIGHT & 15) << 16;
const FT_LOAD_TARGET_LCD: u32 = (FT_RENDER_MODE_LCD & 15) << 16;
const FT_LOAD_TARGET_MONO: u32 = (FT_RENDER_MODE_MONO & 15) << 16;
const FT_LOAD_TARGET_NORMAL: u32 = (FT_RENDER_MODE_NORMAL & 15) << 16;
const FT_PIXEL_MODE_MONO: u8 = 1;
const FT_PIXEL_MODE_GRAY: u8 = 2;
const FT_PIXEL_MODE_LCD: u8 = 5;
const FT_PIXEL_MODE_LCD_V: u8 = 6;
const OS2_FS_SELECTION_OBLIQUE: u16 = 1 << 9;
// Not in our FreeType bindings, so we define these ourselves.
#[allow(dead_code)]
const BDF_PROPERTY_TYPE_NONE: BDF_PropertyType = 0;
#[allow(dead_code)]
const BDF_PROPERTY_TYPE_ATOM: BDF_PropertyType = 1;
#[allow(dead_code)]
const BDF_PROPERTY_TYPE_INTEGER: BDF_PropertyType = 2;
#[allow(dead_code)]
const BDF_PROPERTY_TYPE_CARDINAL: BDF_PropertyType = 3;
thread_local! {
static FREETYPE_LIBRARY: FT_Library = {
unsafe {
let mut library = ptr::null_mut();
assert_eq!(FT_Init_FreeType(&mut library), 0);
FT_Library_SetLcdFilter(library, FT_LcdFilter::FT_LCD_FILTER_DEFAULT);
library
}
};
}
/// The handle that the FreeType API natively uses to represent a font.
pub type NativeFont = FT_Face;
// Not in our FreeType bindings, so we define this ourselves.
#[allow(non_camel_case_types)]
type BDF_PropertyType = i32;
// Not in our FreeType bindings, so we define this ourselves.
#[repr(C)]
struct BDF_PropertyRec {
property_type: BDF_PropertyType,
value: *const c_char,
}
/// A cross-platform loader that uses the FreeType library to load and rasterize fonts.
///
///
/// On macOS and Windows, the Cargo feature `loader-freetype-default` can be used to opt into this
/// loader by default.
pub struct Font {
freetype_face: FT_Face,
font_data: Arc<Vec<u8>>,
}
impl Font {
/// Loads a font from raw font data (the contents of a `.ttf`/`.otf`/etc. file).
///
/// If the data represents a collection (`.ttc`/`.otc`/etc.), `font_index` specifies the index
/// of the font to load from it. If the data represents a single font, pass 0 for `font_index`.
pub fn from_bytes(font_data: Arc<Vec<u8>>, font_index: u32) -> Result<Font, FontLoadingError> {
FREETYPE_LIBRARY.with(|freetype_library| unsafe {
let mut freetype_face = ptr::null_mut();
if FT_New_Memory_Face(
*freetype_library,
(*font_data).as_ptr(),
font_data.len() as FT_Long,
font_index as FT_Long,
&mut freetype_face,
) != 0
{
return Err(FontLoadingError::Parse);
}
setup_freetype_face(freetype_face);
Ok(Font {
freetype_face,
font_data,
})
})
}
/// Loads a font from a `.ttf`/`.otf`/etc. file.
///
/// If the file is a collection (`.ttc`/`.otc`/etc.), `font_index` specifies the index of the
/// font to load from it. If the file represents a single font, pass 0 for `font_index`.
#[cfg(not(target_arch = "wasm32"))]
pub fn from_file(file: &mut File, font_index: u32) -> Result<Font, FontLoadingError> {
file.seek(SeekFrom::Start(0))?;
let font_data = Arc::new(utils::slurp_file(file).map_err(FontLoadingError::Io)?);
Font::from_bytes(font_data, font_index)
}
/// Loads a font from the path to a `.ttf`/`.otf`/etc. file.
///
/// If the file is a collection (`.ttc`/`.otc`/etc.), `font_index` specifies the index of the
/// font to load from it. If the file represents a single font, pass 0 for `font_index`.
#[inline]
#[cfg(not(target_arch = "wasm32"))]
pub fn from_path<P>(path: P, font_index: u32) -> Result<Font, FontLoadingError>
where
P: AsRef<Path>,
{
// TODO(pcwalton): Perhaps use the native FreeType support for opening paths?
<Font as Loader>::from_path(path, font_index)
}
/// Creates a font from a native API handle.
pub unsafe fn from_native_font(freetype_face: NativeFont) -> Font {
// We make an in-memory copy of the underlying font data. This is because the native font
// does not necessarily hold a strong reference to the memory backing it.
const CHUNK_SIZE: usize = 4096;
let mut font_data = vec![];
loop {
font_data.extend(iter::repeat(0).take(CHUNK_SIZE));
let freetype_stream = (*freetype_face).stream;
let n_read = ((*freetype_stream).read.unwrap())(
freetype_stream,
font_data.len() as FT_ULong,
font_data.as_mut_ptr(),
CHUNK_SIZE as FT_ULong,
);
if n_read < CHUNK_SIZE as FT_ULong {
break;
}
}
Font::from_bytes(Arc::new(font_data), (*freetype_face).face_index as u32).unwrap()
}
/// Loads the font pointed to by a handle.
#[inline]
pub fn from_handle(handle: &Handle) -> Result<Self, FontLoadingError> {
<Self as Loader>::from_handle(handle)
}
/// Determines whether a blob of raw font data represents a supported font, and, if so, what
/// type of font it is.
pub fn analyze_bytes(font_data: Arc<Vec<u8>>) -> Result<FileType, FontLoadingError> {
FREETYPE_LIBRARY.with(|freetype_library| unsafe {
let mut freetype_face = ptr::null_mut();
if FT_New_Memory_Face(
*freetype_library,
(*font_data).as_ptr(),
font_data.len() as FT_Long,
0,
&mut freetype_face,
) != 0
{
return Err(FontLoadingError::Parse);
}
let font_type = match (*freetype_face).num_faces {
1 => FileType::Single,
num_faces => FileType::Collection(num_faces as u32),
};
FT_Done_Face(freetype_face);
Ok(font_type)
})
}
/// Determines whether a file represents a supported font, and, if so, what type of font it is.
#[cfg(not(target_arch = "wasm32"))]
pub fn analyze_file(file: &mut File) -> Result<FileType, FontLoadingError> {
FREETYPE_LIBRARY.with(|freetype_library| unsafe {
file.seek(SeekFrom::Start(0))?;
let font_data = Arc::new(utils::slurp_file(file).map_err(FontLoadingError::Io)?);
let mut freetype_face = ptr::null_mut();
if FT_New_Memory_Face(
*freetype_library,
(*font_data).as_ptr(),
font_data.len() as FT_Long,
0,
&mut freetype_face,
) != 0
{
return Err(FontLoadingError::Parse);
}
let font_type = match (*freetype_face).num_faces {
1 => FileType::Single,
num_faces => FileType::Collection(num_faces as u32),
};
FT_Done_Face(freetype_face);
Ok(font_type)
})
}
/// Determines whether a path points to a supported font, and, if so, what type of font it is.
#[inline]
#[cfg(not(target_arch = "wasm32"))]
pub fn analyze_path<P>(path: P) -> Result<FileType, FontLoadingError>
where
P: AsRef<Path>,
{
<Self as Loader>::analyze_path(path)
}
/// Returns the wrapped native font handle.
///
/// This function increments the reference count of the FreeType face before returning it.
/// Therefore, it is the caller's responsibility to free it with `FT_Done_Face`.
pub fn native_font(&self) -> NativeFont {
unsafe {
assert_eq!(FT_Reference_Face(self.freetype_face), 0);
self.freetype_face
}
}
/// Returns the PostScript name of the font. This should be globally unique.
pub fn postscript_name(&self) -> Option<String> {
unsafe {
let postscript_name = FT_Get_Postscript_Name(self.freetype_face);
if !postscript_name.is_null() {
return Some(CStr::from_ptr(postscript_name).to_str().unwrap().to_owned());
}
let font_format = FT_Get_Font_Format(self.freetype_face);
assert!(!font_format.is_null());
let font_format = CStr::from_ptr(font_format).to_str().unwrap();
if font_format != "BDF" && font_format != "PCF" {
return None;
}
let mut property = mem::zeroed();
if FT_Get_BDF_Property(
self.freetype_face,
"_DEC_DEVICE_FONTNAMES\0".as_ptr() as *const c_char,
&mut property,
) != 0
{
return None;
}
if property.property_type != BDF_PROPERTY_TYPE_ATOM {
return None;
}
let dec_device_fontnames = CStr::from_ptr(property.value).to_str().unwrap();
if !dec_device_fontnames.starts_with("PS=") {
return None;
}
Some(dec_device_fontnames[3..].to_string())
}
}
/// Returns the full name of the font (also known as "display name" on macOS).
pub fn full_name(&self) -> String {
self.get_type_1_or_sfnt_name(PS_DICT_FULL_NAME, TT_NAME_ID_FULL_NAME)
.unwrap_or_else(|| self.family_name())
}
/// Returns the name of the font family.
pub fn family_name(&self) -> String {
unsafe {
let ptr = (*self.freetype_face).family_name;
// FreeType doesn't guarantee a non-null family name (see issue #5).
if ptr.is_null() {
String::new()
} else {
CStr::from_ptr(ptr).to_str().unwrap().to_owned()
}
}
}
/// Returns true if and only if the font is monospace (fixed-width).
pub fn is_monospace(&self) -> bool {
unsafe { (*self.freetype_face).face_flags & (FT_FACE_FLAG_FIXED_WIDTH as FT_Long) != 0 }
}
/// Returns the values of various font properties, corresponding to those defined in CSS.
pub fn properties(&self) -> Properties {
unsafe {
let os2_table = self.get_os2_table();
let style = match os2_table {
Some(os2_table) if ((*os2_table).fsSelection & OS2_FS_SELECTION_OBLIQUE) != 0 => {
Style::Oblique
}
_ if ((*self.freetype_face).style_flags & (FT_STYLE_FLAG_ITALIC) as FT_Long)
!= 0 =>
{
Style::Italic
}
_ => Style::Normal,
};
let stretch = match os2_table {
Some(os2_table) if (1..=9).contains(&(*os2_table).usWidthClass) => {
Stretch(Stretch::MAPPING[((*os2_table).usWidthClass as usize) - 1])
}
_ => Stretch::NORMAL,
};
let weight = match os2_table {
None => Weight::NORMAL,
Some(os2_table) => Weight((*os2_table).usWeightClass as f32),
};
Properties {
style,
stretch,
weight,
}
}
}
/// Returns the usual glyph ID for a Unicode character.
///
/// Be careful with this function; typographically correct character-to-glyph mapping must be
/// done using a *shaper* such as HarfBuzz. This function is only useful for best-effort simple
/// use cases like "what does character X look like on its own".
#[inline]
pub fn glyph_for_char(&self, character: char) -> Option<u32> {
unsafe {
let res = FT_Get_Char_Index(self.freetype_face, character as FT_ULong);
match res {
0 => None,
_ => Some(res),
}
}
}
/// Returns the glyph ID for the specified glyph name.
#[inline]
pub fn glyph_by_name(&self, name: &str) -> Option<u32> {
if let Ok(ffi_name) = CString::new(name) {
let code =
unsafe { FT_Get_Name_Index(self.freetype_face, ffi_name.as_ptr() as *mut c_char) };
if code > 0 {
return Some(u32::from(code));
}
}
None
}
/// Returns the number of glyphs in the font.
///
/// Glyph IDs range from 0 inclusive to this value exclusive.
#[inline]
pub fn glyph_count(&self) -> u32 {
unsafe { (*self.freetype_face).num_glyphs as u32 }
}
/// Sends the vector path for a glyph to a path builder.
///
/// If `hinting_mode` is not None, this function performs grid-fitting as requested before
/// sending the hinding outlines to the builder.
///
/// TODO(pcwalton): What should we do for bitmap glyphs?
pub fn outline<S>(
&self,
glyph_id: u32,
hinting: HintingOptions,
sink: &mut S,
) -> Result<(), GlyphLoadingError>
where
S: OutlineSink,
{
unsafe {
let rasterization_options = RasterizationOptions::GrayscaleAa;
let load_flags = self
.hinting_and_rasterization_options_to_load_flags(hinting, rasterization_options);
let units_per_em = (*self.freetype_face).units_per_EM;
let grid_fitting_size = hinting.grid_fitting_size();
if let Some(size) = grid_fitting_size {
assert_eq!(
FT_Set_Char_Size(self.freetype_face, size.f32_to_ft_fixed_26_6(), 0, 0, 0),
0
);
}
if FT_Load_Glyph(self.freetype_face, glyph_id, load_flags as i32) != 0 {
return Err(GlyphLoadingError::NoSuchGlyph);
}
let outline = &(*(*self.freetype_face).glyph).outline;
let contours =
slice::from_raw_parts((*outline).contours, (*outline).n_contours as usize);
let point_positions =
slice::from_raw_parts((*outline).points, (*outline).n_points as usize);
let point_tags = slice::from_raw_parts((*outline).tags, (*outline).n_points as usize);
let mut current_point_index = 0;
for &last_point_index_in_contour in contours {
let last_point_index_in_contour = last_point_index_in_contour as usize;
let (mut first_point, first_tag) = get_point(
&mut current_point_index,
point_positions,
point_tags,
last_point_index_in_contour,
grid_fitting_size,
units_per_em,
);
if (first_tag & FT_POINT_TAG_ON_CURVE) == 0 {
// Rare, but can happen; e.g. with Inconsolata (see pathfinder#84).
//
// FIXME(pcwalton): I'm not sure this is right.
let mut temp_point_index = last_point_index_in_contour;
let (last_point, last_tag) = get_point(
&mut temp_point_index,
point_positions,
point_tags,
last_point_index_in_contour,
grid_fitting_size,
units_per_em,
);
if (last_tag & FT_POINT_TAG_ON_CURVE) != 0 {
first_point = last_point
} else {
first_point = last_point.lerp(first_point, 0.5)
}
// Back up so we properly process the first point as a control point.
current_point_index -= 1;
}
sink.move_to(first_point);
while current_point_index <= last_point_index_in_contour {
let (mut point0, tag0) = get_point(
&mut current_point_index,
point_positions,
point_tags,
last_point_index_in_contour,
grid_fitting_size,
units_per_em,
);
if (tag0 & FT_POINT_TAG_ON_CURVE) != 0 {
sink.line_to(point0);
continue;
}
loop {
if current_point_index > last_point_index_in_contour {
// The *last* point in the contour is off the curve. So we just need to
// close the contour with a quadratic Bézier curve.
sink.quadratic_curve_to(point0, first_point);
break;
}
let (point1, tag1) = get_point(
&mut current_point_index,
point_positions,
point_tags,
last_point_index_in_contour,
grid_fitting_size,
units_per_em,
);
if (tag0 & FT_POINT_TAG_CUBIC_CONTROL) != 0 {
let ctrl = LineSegment2F::new(point0, point1);
if current_point_index <= last_point_index_in_contour {
// FIXME(pcwalton): Can we have implied on-curve points for cubic
// control points too?
let (point2, _) = get_point(
&mut current_point_index,
point_positions,
point_tags,
last_point_index_in_contour,
grid_fitting_size,
units_per_em,
);
sink.cubic_curve_to(ctrl, point2);
} else {
// Last point on the contour. Use first_point as point2.
sink.cubic_curve_to(ctrl, first_point);
}
break;
}
if (tag1 & FT_POINT_TAG_ON_CURVE) != 0 {
sink.quadratic_curve_to(point0, point1);
break;
}
// We have an implied on-curve point midway between the two consecutive
// off-curve points.
let point_half = point0.lerp(point1, 0.5);
sink.quadratic_curve_to(point0, point_half);
point0 = point1;
}
}
sink.close();
}
if hinting.grid_fitting_size().is_some() {
reset_freetype_face_char_size((*self).freetype_face)
}
}
return Ok(());
fn get_point(
current_point_index: &mut usize,
point_positions: &[FT_Vector],
point_tags: &[c_char],
last_point_index_in_contour: usize,
grid_fitting_size: Option<f32>,
units_per_em: u16,
) -> (Vector2F, c_char) {
assert!(*current_point_index <= last_point_index_in_contour);
let point_position = point_positions[*current_point_index];
let point_tag = point_tags[*current_point_index];
*current_point_index += 1;
let point_position = Vector2I::new(point_position.x as i32, point_position.y as i32);
let mut point_position = point_position.ft_fixed_26_6_to_f32();
if let Some(grid_fitting_size) = grid_fitting_size {
point_position = point_position * (units_per_em as f32) / grid_fitting_size;
}
(point_position, point_tag)
}
}
/// Returns the boundaries of a glyph in font units.
pub fn typographic_bounds(&self, glyph_id: u32) -> Result<RectF, GlyphLoadingError> {
unsafe {
if FT_Load_Glyph(
self.freetype_face,
glyph_id,
(FT_LOAD_DEFAULT | FT_LOAD_NO_HINTING) as i32,
) != 0
{
return Err(GlyphLoadingError::NoSuchGlyph);
}
let metrics = &(*(*self.freetype_face).glyph).metrics;
let rect = RectI::new(
Vector2I::new(
metrics.horiBearingX as i32,
(metrics.horiBearingY - metrics.height) as i32,
),
Vector2I::new(metrics.width as i32, metrics.height as i32),
);
Ok(rect.ft_fixed_26_6_to_f32())
}
}
/// Returns the distance from the origin of the glyph with the given ID to the next, in font
/// units.
pub fn advance(&self, glyph_id: u32) -> Result<Vector2F, GlyphLoadingError> {
unsafe {
if FT_Load_Glyph(
self.freetype_face,
glyph_id,
(FT_LOAD_DEFAULT | FT_LOAD_NO_HINTING) as i32,
) != 0
{
return Err(GlyphLoadingError::NoSuchGlyph);
}
let advance = (*(*self.freetype_face).glyph).advance;
Ok(Vector2I::new(advance.x as i32, advance.y as i32).ft_fixed_26_6_to_f32())
}
}
/// Returns the amount that the given glyph should be displaced from the origin.
///
/// FIXME(pcwalton): This always returns zero on FreeType.
pub fn origin(&self, _: u32) -> Result<Vector2F, GlyphLoadingError> {
warn!("unimplemented");
Ok(Vector2F::default())
}
/// Retrieves various metrics that apply to the entire font.
pub fn metrics(&self) -> Metrics {
let os2_table = self.get_os2_table();
unsafe {
let ascender = (*self.freetype_face).ascender;
let descender = (*self.freetype_face).descender;
let underline_position = (*self.freetype_face).underline_position;
let underline_thickness = (*self.freetype_face).underline_thickness;
let bbox = (*self.freetype_face).bbox;
let bounding_box_origin = Vector2I::new(bbox.xMin as i32, bbox.yMin as i32);
let bounding_box_lower_right = Vector2I::new(bbox.xMax as i32, bbox.yMax as i32);
let bounding_box = RectI::from_points(bounding_box_origin, bounding_box_lower_right);
Metrics {
units_per_em: (*self.freetype_face).units_per_EM as u32,
ascent: ascender as f32,
descent: descender as f32,
line_gap: ((*self.freetype_face).height + descender - ascender) as f32,
underline_position: (underline_position + underline_thickness / 2) as f32,
underline_thickness: underline_thickness as f32,
cap_height: os2_table
.map(|table| (*table).sCapHeight as f32)
.unwrap_or(0.0),
x_height: os2_table
.map(|table| (*table).sxHeight as f32)
.unwrap_or(0.0),
bounding_box: bounding_box.to_f32(),
}
}
}
/// Returns true if and only if the font loader can perform hinting in the requested way.
///
/// Some APIs support only rasterizing glyphs with hinting, not retriving hinted outlines. If
/// `for_rasterization` is false, this function returns true if and only if the loader supports
/// retrieval of hinted *outlines*. If `for_rasterization` is true, this function returns true
/// if and only if the loader supports *rasterizing* hinted glyphs.
#[inline]
pub fn supports_hinting_options(
&self,
hinting_options: HintingOptions,
for_rasterization: bool,
) -> bool {
match (hinting_options, for_rasterization) {
(HintingOptions::None, _)
| (HintingOptions::Vertical(_), true)
| (HintingOptions::VerticalSubpixel(_), true)
| (HintingOptions::Full(_), true) => true,
(HintingOptions::Vertical(_), false)
| (HintingOptions::VerticalSubpixel(_), false)
| (HintingOptions::Full(_), false) => false,
}
}
fn get_type_1_or_sfnt_name(&self, type_1_id: u32, sfnt_id: u16) -> Option<String> {
unsafe {
let ps_value_size =
FT_Get_PS_Font_Value(self.freetype_face, type_1_id, 0, ptr::null_mut(), 0);
if ps_value_size > 0 {
let mut buffer = vec![0; ps_value_size as usize];
if FT_Get_PS_Font_Value(
self.freetype_face,
type_1_id,
0,
buffer.as_mut_ptr() as *mut c_void,
buffer.len() as FT_Long,
) == 0
{
return String::from_utf8(buffer).ok();
}
}
let sfnt_name_count = FT_Get_Sfnt_Name_Count(self.freetype_face);
let mut sfnt_name = mem::zeroed();
for sfnt_name_index in 0..sfnt_name_count {
assert_eq!(
FT_Get_Sfnt_Name(self.freetype_face, sfnt_name_index, &mut sfnt_name),
0
);
if sfnt_name.name_id != sfnt_id {
continue;
}
match (sfnt_name.platform_id, sfnt_name.encoding_id) {
(TT_PLATFORM_APPLE_UNICODE, _) => {
let mut sfnt_name_bytes =
slice::from_raw_parts(sfnt_name.string, sfnt_name.string_len as usize);
let mut sfnt_name_string = Vec::with_capacity(sfnt_name_bytes.len() / 2);
while !sfnt_name_bytes.is_empty() {
sfnt_name_string.push(sfnt_name_bytes.read_u16::<BigEndian>().unwrap())
}
if let Ok(result) = String::from_utf16(&sfnt_name_string) {
return Some(result);
}
}
(platform_id, _) => {
warn!(
"get_type_1_or_sfnt_name(): found invalid platform ID {}",
platform_id
);
// TODO(pcwalton)
|
}
None
}
}
fn get_os2_table(&self) -> Option<*const TT_OS2> {
unsafe {
let table = FT_Get_Sfnt_Table(self.freetype_face, FT_Sfnt_Tag::FT_SFNT_OS2);
if table.is_null() {
None
} else {
Some(table as *const TT_OS2)
}
}
}
/// Returns the pixel boundaries that the glyph will take up when rendered using this loader's
/// rasterizer at the given size and origin.
#[inline]
pub fn raster_bounds(
&self,
glyph_id: u32,
point_size: f32,
transform: Transform2F,
hinting_options: HintingOptions,
rasterization_options: RasterizationOptions,
) -> Result<RectI, GlyphLoadingError> {
<Self as Loader>::raster_bounds(
self,
glyph_id,
point_size,
transform,
hinting_options,
rasterization_options,
)
}
/// Rasterizes a glyph to a canvas with the given size and origin.
///
/// Format conversion will be performed if the canvas format does not match the rasterization
/// options. For example, if bilevel (black and white) rendering is requested to an RGBA
/// surface, this function will automatically convert the 1-bit raster image to the 32-bit
/// format of the canvas. Note that this may result in a performance penalty, depending on the
/// loader.
///
/// If `hinting_options` is not None, the requested grid fitting is performed.
pub fn rasterize_glyph(
&self,
canvas: &mut Canvas,
glyph_id: u32,
point_size: f32,
transform: Transform2F,
hinting_options: HintingOptions,
rasterization_options: RasterizationOptions,
) -> Result<(), GlyphLoadingError> {
// TODO(pcwalton): This is woefully incomplete. See WebRender's code for a more complete
// implementation.
unsafe {
let matrix = transform.matrix.0 * F32x4::new(65536.0, -65536.0, -65536.0, 65536.0);
let matrix = matrix.to_i32x4();
let vector = transform.vector.f32_to_ft_fixed_26_6();
let mut delta = FT_Vector {
x: vector.x() as FT_Pos,
y: -vector.y() as FT_Pos,
};
let mut ft_shape = FT_Matrix {
xx: matrix.x() as FT_Fixed,
xy: matrix.y() as FT_Fixed,
yx: matrix.z() as FT_Fixed,
yy: matrix.w() as FT_Fixed,
};
FT_Set_Transform(self.freetype_face, &mut ft_shape, &mut delta);
assert_eq!(
FT_Set_Char_Size(
self.freetype_face,
point_size.f32_to_ft_fixed_26_6(),
0,
0,
0
),
0
);
let mut load_flags = FT_LOAD_DEFAULT | FT_LOAD_RENDER;
load_flags |= self.hinting_and_rasterization_options_to_load_flags(
hinting_options,
rasterization_options,
);
if FT_Load_Glyph(self.freetype_face, glyph_id, load_flags as i32) != 0 {
return Err(GlyphLoadingError::NoSuchGlyph);
}
// TODO(pcwalton): Use the FreeType "direct" API to save a copy here. Note that we will
// need to keep this around for bilevel rendering, as the direct API doesn't work with
// that mode.
let bitmap = &(*(*self.freetype_face).glyph).bitmap;
let bitmap_stride = (*bitmap).pitch as usize;
let bitmap_width = (*bitmap).width as i32;
let bitmap_height = (*bitmap).rows as i32;
let bitmap_size = Vector2I::new(bitmap_width, bitmap_height);
let bitmap_buffer = (*bitmap).buffer as *const i8 as *const u8;
let bitmap_length = bitmap_stride * bitmap_height as usize;
let buffer = slice::from_raw_parts(bitmap_buffer, bitmap_length);
let dst_point = Vector2I::new(
(*(*self.freetype_face).glyph).bitmap_left,
-(*(*self.freetype_face).glyph).bitmap_top,
);
// FIXME(pcwalton): This function should return a Result instead.
match (*bitmap).pixel_mode {
FT_PIXEL_MODE_GRAY => {
canvas.blit_from(dst_point, buffer, bitmap_size, bitmap_stride, Format::A8);
}
FT_PIXEL_MODE_LCD | FT_PIXEL_MODE_LCD_V => {
canvas.blit_from(dst_point, buffer, bitmap_size, bitmap_stride, Format::Rgb24);
}
FT_PIXEL_MODE_MONO => {
canvas.blit_from_bitmap_1bpp(dst_point, buffer, bitmap_size, bitmap_stride);
}
_ => panic!("Unexpected FreeType pixel mode!"),
}
FT_Set_Transform(self.freetype_face, ptr::null_mut(), ptr::null_mut());
reset_freetype_face_char_size(self.freetype_face);
Ok(())
}
}
fn hinting_and_rasterization_options_to_load_flags(
&self,
hinting: HintingOptions,
rasterization: RasterizationOptions,
) -> u32 {
let mut options = match (hinting, rasterization) {
(HintingOptions::VerticalSubpixel(_), _) | (_, RasterizationOptions::SubpixelAa) => {
FT_LOAD_TARGET_LCD
}
(HintingOptions::None, _) => FT_LOAD_TARGET_NORMAL | FT_LOAD_NO_HINTING,
(HintingOptions::Vertical(_), RasterizationOptions::Bilevel)
| (HintingOptions::Full(_), RasterizationOptions::Bilevel) => FT_LOAD_TARGET_MONO,
(HintingOptions::Vertical(_), _) => FT_LOAD_TARGET_LIGHT,
(HintingOptions::Full(_), _) => FT_LOAD_TARGET_NORMAL,
};
if rasterization == RasterizationOptions::Bilevel {
options |= FT_LOAD_MONOCHROME
}
options
}
/// Returns a handle to this font, if possible.
///
/// This is useful if you want to open the font with a different loader.
#[inline]
pub fn handle(&self) -> Option<Handle> {
<Self as Loader>::handle(self)
}
/// Attempts to return the raw font data (contents of the font file).
///
/// If this font is a member of a collection, this function returns the data for the entire
/// collection.
pub fn copy_font_data(&self) -> Option<Arc<Vec<u8>>> {
Some(self.font_data.clone())
}
/// Get font fallback results for the given text and locale.
///
/// Note: this is currently just a stub implementation, a proper implementation
/// would likely use FontConfig, at least on Linux. It's not clear what a
/// FreeType loader with a non-FreeType source should do.
fn get_fallbacks(&self, text: &str, _locale: &str) -> FallbackResult<Font> {
warn!("unsupported");
FallbackResult {
fonts: Vec::new(),
valid_len: text.len(),
}
}
/// Returns the raw contents of the OpenType table with the given tag.
///
/// Tags are four-character codes. A list of tags can be found in the [OpenType specification].
///
/// [OpenType specification]: https://docs.microsoft.com/en-us/typography/opentype/spec/
pub fn load_font_table(&self, table_tag: u32) -> Option<Box<[u8]>> {
unsafe {
let mut len = 0;
if 0 != FT_Load_Sfnt_Table(
self.freetype_face,
table_tag as FT_ULong,
0,
ptr::null_mut(),
&mut len,
) {
return None;
}
let mut buf = Box::<[u8]>::from(vec![0; len as usize]);
if 0 != FT_Load_Sfnt_Table(
self.freetype_face,
table_tag as FT_ULong,
0,
buf.as_mut_ptr() as *mut FT_Byte,
&mut len,
) {
return None;
}
Some(buf)
}
}
}
impl Clone for Font {
fn clone(&self) -> Font {
unsafe {
assert_eq!(FT_Reference_Face(self.freetype_face), 0);
Font {
freetype_face: self.freetype_face,
font_data: self.font_data.clone(),
}
}
}
}
impl Drop for Font {
fn drop(&mut self) {
unsafe {
if !self.freetype_face.is_null() {
assert_eq!(FT_Done_Face(self.freetype_face), 0);
}
}
}
}
impl Debug for Font {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
self.family_name().fmt(fmt)
}
}
impl Loader for Font {
type NativeFont = NativeFont;
#[inline]
fn from_bytes(font_data: Arc<Vec<u8>>, font_index: u32) -> Result<Self, FontLoadingError> {
Font::from_bytes(font_data, font_index)
}
#[inline]
#[cfg(not(target_arch = "wasm32"))]
fn from_file(file: &mut File, font_index: u32) -> Result<Font, FontLoadingError> {
Font::from_file(file, font_index)
}
#[inline]
fn analyze_bytes(font_data: Arc<Vec<u8>>) -> Result<FileType, FontLoadingError> {
Font::analyze_bytes(font_data)
}
#[cfg(not(target_arch = "wasm32"))]
fn analyze_file(file: &mut File) -> Result<FileType, FontLoadingError> {
Font::analyze_file(file)
}
#[inline]
fn native_font(&self) -> Self::NativeFont {
self.native_font()
}
#[inline]
unsafe fn from_native_font(native_font: Self::NativeFont) -> Self {
Font::from_native_font(native_font)
}
#[inline]
fn postscript_name(&self) -> Option<String> {
self.postscript_name()
}
#[inline]
fn full_name(&self) -> String {
self.full_name()
}
#[inline]
fn family_name(&self) -> String {
self.family_name()
}
#[inline]
fn is_monospace(&self) -> bool {
self.is_monospace()
}
#[inline]
fn properties(&self) -> Properties {
self.properties()
}
#[inline]
fn glyph_for_char(&self, character: char) -> Option<u32> {
self.glyph_for_char(character)
}
#[inline]
fn glyph_by_name(&self, name: &str) -> Option<u32> {
self.glyph_by_name(name)
}
#[inline]
fn glyph_count(&self) -> u32 {
self.glyph_count()
}
#[inline]
fn outline<S>(
&self,
glyph_id: u32,
hinting_mode: HintingOptions,
sink: &mut S,
) -> Result<(), GlyphLoadingError>
where
S: OutlineSink,
{
self.outline(glyph_id, hinting_mode, sink)
}
#[inline]
fn typographic_bounds(&self, glyph_id: u32) -> Result<RectF, GlyphLoadingError> {
self.typographic_bounds(glyph_id)
}
#[inline]
fn advance(&self, glyph_id: u32) -> Result<Vector2F, GlyphLoadingError> {
self.advance(glyph_id)
}
#[inline]
fn origin(&self, origin: u32) -> Result<Vector2F, GlyphLoadingError> {
self.origin(origin)
}
#[inline]
fn metrics(&self) -> Metrics {
self.metrics()
}
#[inline]
fn copy_font_data(&self) -> Option<Arc<Vec<u8>>> {
self.copy_font_data()
}
#[inline]
fn supports_hinting_options(
&self,
hinting_options: HintingOptions,
for_rasterization: bool,
) -> bool {
self.supports_hinting_options(hinting_options, for_rasterization)
}
#[inline]
fn rasterize_glyph(
&self,
canvas: &mut Canvas,
glyph_id: u32,
point_size: f32,
transform: Transform2F,
hinting_options: HintingOptions,
rasterization_options: RasterizationOptions,
) -> Result<(), GlyphLoadingError> {
self.rasterize_glyph(
canvas,
glyph_id,
point_size,
transform,
hinting_options,
rasterization_options,
)
}
#[inline]
fn get_fallbacks(&self, text: &str, locale: &str) -> FallbackResult<Self> {
self.get_fallbacks(text, locale)
}
#[inline]
fn load_font_table(&self, table_tag: u32) -> Option<Box<[u8]>> {
self.load_font_table(table_tag)
}
}
unsafe fn setup_freetype_face(face: FT_Face) {
reset_freetype_face_char_size(face);
}
unsafe fn reset_freetype_face_char_size(face: FT_Face) {
// Apple Color Emoji has 0 units per em. Whee!
let units_per_em = (*face).units_per_EM as i64;
if units_per_em > 0 {
assert_eq!(
FT_Set_Char_Size(face, ((*face).units_per_EM as FT_Long) << 6, 0, 0, 0),
0
);
}
}
#[repr(C)]
struct FT_SfntName {
platform_id: FT_UShort,
encoding_id: FT_UShort,
language_id: FT_UShort,
name_id: FT_UShort,
string: *mut FT_Byte,
string_len: FT_UInt,
}
trait F32ToFtFixed {
type Output;
fn f32_to_ft_fixed_26_6(self) -> Self::Output;
}
trait FtFixedToF32 {
type Output;
fn ft_fixed_26_6_to_f32(self) -> Self::Output;
}
impl F32ToFtFixed for Vector2F {
type Output = Vector2I;
#[inline]
fn f32_to_ft_fixed_26_6(self) -> Vector2I {
(self * 64.0).to_i32()
}
}
impl F32ToFtFixed for f32 {
type Output = FT_Fixed;
#[inline]
fn f32_to_ft_fixed_26_6(self) -> FT_Fixed {
(self * 64.0) as FT_Fixed
}
}
impl FtFixedToF32 for Vector2I {
type Output = Vector2F;
#[inline]
fn ft_fixed_26_6_to_f32(self) -> Vector2F {
(self.to_f32() * (1.0 / 64.0)).round()
}
}
impl FtFixedToF32 for RectI {
type Output = RectF;
#[inline]
fn ft_fixed_26_6_to_f32(self) -> RectF {
(self.to_f32() * (1.0 / 64.0))
}
}
extern "C" {
fn FT_Get_Font_Format(face: FT_Face) -> *const c_char;
fn FT_Get_BDF_Property(
face: FT_Face,
prop_name: *const c_char,
aproperty: *mut BDF_PropertyRec,
) -> FT_Error;
fn FT_Get_PS_Font_Value(
face: FT_Face,
key: u32,
idx: FT_UInt,
value: *mut c_void,
value_len: FT_Long,
) -> FT_Long;
fn FT_Get_Sfnt_Name(face: FT_Face, idx: FT_UInt, aname: *mut FT_SfntName) -> FT_Error;
fn FT_Get_Sfnt_Name_Count(face: FT_Face) -> FT_UInt;
}
#[cfg(test)]
mod test {
use crate::loaders::freetype::Font;
static PCF_FONT_PATH: &'static str = "resources/tests/times-roman-pcf/timR12.pcf";
static PCF_FONT_POSTSCRIPT_NAME: &'static str = "Times-Roman";
#[test]
fn get_pcf_postscript_name() {
let font = Font::from_path(PCF_FONT_PATH, 0).unwrap();
assert_eq!(font.postscript_name().unwrap(), PCF_FONT_POSTSCRIPT_NAME);
}
}
|
}
}
|
cgroups.rs
|
use super::{
stats::{to_value, ContainerStats},
Container, EventTx, Pid,
};
use crate::{
npk::manifest,
runtime::{CGroupEvent, ContainerEvent, Event, MemoryEvent},
};
use cgroups_rs::{
memory::MemController, BlkIoDeviceResource, BlkIoDeviceThrottleResource, BlkIoResources,
Controller, CpuResources, Hierarchy, MemoryResources,
};
use futures::stream::StreamExt;
use inotify::{Inotify, WatchMask};
use log::{debug, info, warn};
use std::{collections::HashMap, fmt::Debug, os::unix::io::AsRawFd, path::Path};
use thiserror::Error;
use tokio::{
fs,
io::{self, AsyncReadExt, AsyncWriteExt},
select,
sync::mpsc::error::TrySendError,
task::{self, JoinHandle},
time,
};
use tokio_eventfd::EventFd;
use tokio_util::sync::CancellationToken;
#[derive(Error, Debug)]
pub enum Error {
#[error("Io error: {0}: {1:?}")]
Io(String, io::Error),
#[error("CGroups error: {0}")]
CGroups(String),
}
/// Default runtime hierarchy that yields only implemented and supported controllers
/// instead of the default list.
fn hierarchy() -> Box<dyn Hierarchy> {
Box::new(RuntimeHierarchy::new())
}
/// Create the top level cgroups used by northstar
pub async fn init(name: &Path) -> Result<(), Error> {
// TODO: Add check for supported controllers
info!("Initializing cgroups",);
let cgroup = cgroups_rs::Cgroup::new(hierarchy(), name);
debug!("Using cgroups {}", if cgroup.v2() { "v2" } else { "v1" });
Ok(())
}
/// Shutdown the cgroups config by removing the dir
pub async fn shutdown(dir: &Path) -> Result<(), Error> {
info!("Shutting down cgroups");
cgroups_rs::Cgroup::new(hierarchy(), dir)
.delete()
.map_err(|e| Error::CGroups(e.to_string()))
}
/// Implement a custom type for Hierarchy that filters subsystems
#[derive(Debug)]
struct RuntimeHierarchy {
inner: Box<dyn Hierarchy>,
}
impl RuntimeHierarchy {
/// Create a new instance
fn new() -> RuntimeHierarchy {
RuntimeHierarchy {
inner: cgroups_rs::hierarchies::auto(),
}
}
}
impl Hierarchy for RuntimeHierarchy {
/// Filter unimplemented controllers
fn subsystems(&self) -> Vec<cgroups_rs::Subsystem> {
self.inner
.subsystems()
.drain(..)
.filter(|s| match s {
cgroups_rs::Subsystem::Pid(_) => false,
cgroups_rs::Subsystem::Mem(_) => true,
cgroups_rs::Subsystem::CpuSet(_) => false,
cgroups_rs::Subsystem::CpuAcct(_) => false,
cgroups_rs::Subsystem::Cpu(_) => true,
cgroups_rs::Subsystem::Devices(_) => false,
cgroups_rs::Subsystem::Freezer(_) => false,
cgroups_rs::Subsystem::NetCls(_) => false,
cgroups_rs::Subsystem::BlkIo(_) => true,
cgroups_rs::Subsystem::PerfEvent(_) => false,
cgroups_rs::Subsystem::NetPrio(_) => false,
cgroups_rs::Subsystem::HugeTlb(_) => false,
cgroups_rs::Subsystem::Rdma(_) => false,
cgroups_rs::Subsystem::Systemd(_) => false,
})
.collect()
}
fn root(&self) -> std::path::PathBuf {
self.inner.root()
}
fn root_control_group(&self) -> cgroups_rs::Cgroup {
self.inner.root_control_group()
}
fn v2(&self) -> bool {
self.inner.v2()
}
}
#[derive(Debug)]
pub struct CGroups {
cgroup: cgroups_rs::Cgroup,
memory_monitor: MemoryMonitor,
}
impl CGroups {
pub(super) async fn new(
top_level_dir: &str,
tx: EventTx,
container: &Container,
config: &manifest::cgroups::CGroups,
pid: Pid,
) -> Result<CGroups, Error> {
info!("Creating cgroups for {}", container);
let cgroup: cgroups_rs::Cgroup = cgroups_rs::Cgroup::new(
hierarchy(),
Path::new(top_level_dir).join(container.name().to_str()),
);
let resources = cgroups_rs::Resources {
memory: config.memory.clone().map(Into::into).unwrap_or_default(),
pid: cgroups_rs::PidResources::default(),
cpu: config.cpu.clone().map(Into::into).unwrap_or_default(),
devices: cgroups_rs::DeviceResources::default(),
network: cgroups_rs::NetworkResources::default(),
hugepages: cgroups_rs::HugePageResources::default(),
blkio: config.blkio.clone().map(Into::into).unwrap_or_default(),
};
cgroup
.apply(&resources)
.map_err(|e| Error::CGroups(e.to_string()))?;
// If adding the task fails it's a fault of the runtime or it's integration
// and not of the container
debug!("Assigning pid {} of {} to cgroups", pid, container);
cgroup
.add_task(cgroups_rs::CgroupPid::from(pid as u64))
.expect("Failed to assign pid");
let memory_controller = cgroup
.controller_of::<MemController>()
.expect("Failed to get memory controller");
let memory_path = memory_controller.path();
let memory_monitor = if cgroup.v2() {
MemoryMonitor::new_v2(container.clone(), memory_path, tx).await
} else {
MemoryMonitor::new_v1(container.clone(), memory_path, tx).await
};
Ok(CGroups {
cgroup,
memory_monitor,
})
}
pub async fn destroy(self) {
self.memory_monitor.stop().await;
info!("Destroying cgroup");
self.cgroup.delete().expect("Failed to remove cgroups");
}
/// Gather statistics from controllers
pub(super) fn stats(&self) -> ContainerStats {
let mut stats = HashMap::new();
for c in self.cgroup.subsystems() {
match c {
cgroups_rs::Subsystem::BlkIo(c) => {
stats.insert("blkio".into(), to_value(c.blkio()).unwrap());
}
cgroups_rs::Subsystem::Cpu(c) => {
stats.insert("cpu".into(), to_value(c.cpu()).unwrap());
}
cgroups_rs::Subsystem::Mem(c) => {
let mut memory = HashMap::new();
memory.insert("memory".to_string(), to_value(c.memory_stat()).unwrap());
memory.insert("kmem".to_string(), to_value(c.kmem_stat()).unwrap());
memory.insert("kmem_tcp".to_string(), to_value(c.kmem_tcp_stat()).unwrap());
stats.insert("memory".to_string(), to_value(memory).unwrap());
}
_ => (),
}
}
stats
}
}
#[derive(Debug)]
struct MemoryMonitor {
token: CancellationToken,
task: JoinHandle<()>,
}
impl MemoryMonitor {
/// Setup an event fd and oom event listening.
async fn new_v1(container: Container, path: &Path, tx: EventTx) -> MemoryMonitor {
const OOM_CONTROL: &str = "memory.oom_control";
const EVENT_CONTROL: &str = "cgroup.event_control";
// Configure oom
let oom_control = path.join(OOM_CONTROL);
let event_control = path.join(EVENT_CONTROL);
let token = CancellationToken::new();
let mut event_fd = EventFd::new(0, false).expect("Failed to create eventfd");
debug!("Opening oom_control: {}", oom_control.display());
let oom_control = fs::OpenOptions::new()
.write(true)
.open(&oom_control)
.await
.expect("Failed to open oom_control");
debug!("Opening event_control: {}", event_control.display());
let mut event_control = fs::OpenOptions::new()
.write(true)
.open(&event_control)
.await
.expect("Failed to open event_control");
event_control
.write_all(format!("{} {}", event_fd.as_raw_fd(), oom_control.as_raw_fd()).as_bytes())
.await
.expect("Failed to setup event_control");
event_control
.flush()
.await
.expect("Failed to setup oom event fd");
// This task stops when the main loop receiver closes
let task = {
let stop = token.clone();
task::spawn(async move {
debug!("Listening for v1 oom events of {}", container);
let mut buffer = [0u8; 16];
'outer: loop {
select! {
_ = stop.cancelled() => {
debug!("Stopping oom monitor of {}", container);
break 'outer;
}
_ = tx.closed() => break 'outer,
_ = event_fd.read(&mut buffer) => {
'inner: loop {
warn!("Process {} is out of memory", container);
let event = Event::Container(container.clone(), ContainerEvent::CGroup(CGroupEvent::Memory(MemoryEvent {
oom: Some(1),
..Default::default()
})));
match tx.try_send(event) {
Ok(_) => break 'inner,
Err(TrySendError::Closed(_)) => break 'outer,
Err(TrySendError::Full(_)) => time::sleep(time::Duration::from_millis(1)).await,
}
}
}
}
}
})
};
MemoryMonitor { token, task }
}
/// Construct a new cgroups v2 memory monitor
async fn new_v2(container: Container, path: &Path, tx: EventTx) -> MemoryMonitor {
const MEMORY_EVENTS: &str = "memory.events";
let token = CancellationToken::new();
let path = path.join(MEMORY_EVENTS);
// This task stops when the main loop receiver closes
let task = {
let stop = token.clone();
let mut inotify = Inotify::init().expect("Error while initializing inotify instance");
inotify
.add_watch(&path, WatchMask::MODIFY)
.expect("Failed to add file watch");
task::spawn(async move {
debug!("Listening for v2 oom events of {}", container);
let mut buffer = [0; 1024];
let mut stream = inotify
.event_stream(&mut buffer)
.expect("Failed to initialize inotify event stream");
'outer: loop {
select! {
_ = stop.cancelled() => {
debug!("Stopping oom monitor of {}", container);
break 'outer;
}
_ = tx.closed() => break 'outer,
_ = stream.next() => {
let events = fs::read_to_string(&path).await.expect("Failed to read memory events");
let event = parse_cgroups_event(&events);
'inner: loop {
let event = Event::Container(container.clone(), ContainerEvent::CGroup(event.clone()));
warn!("Process {} is out of memory", container);
match tx.try_send(event) {
Ok(_) => break 'inner,
Err(TrySendError::Closed(_)) => break 'outer,
Err(TrySendError::Full(_)) => time::sleep(time::Duration::from_millis(1)).await,
}
}
}
}
}
})
};
MemoryMonitor { token, task }
}
/// Stop the monitor and wait for the task termination
async fn stop(self) {
self.token.cancel();
self.task.await.expect("Task error");
}
}
/// Parse the cgroup v2 memory.events file
fn parse_cgroups_event(s: &str) -> CGroupEvent {
let mut event = MemoryEvent::default();
for line in s.lines() {
let mut iter = line.split_whitespace().rev();
let value = iter.next().and_then(|s| s.parse::<u64>().ok());
match iter.next() {
Some("low") => event.low = value,
Some("high") => event.high = value,
Some("max") => event.max = value,
Some("oom") => event.oom = value,
Some("oom_kill") => event.oom_kill = value,
Some(_) | None => panic!("Invalid content of memory.events"),
}
}
CGroupEvent::Memory(event)
}
impl From<manifest::cgroups::CpuResources> for CpuResources {
fn from(v: manifest::cgroups::CpuResources) -> Self {
CpuResources {
cpus: v.cpus,
mems: v.mems,
shares: v.shares,
quota: v.quota,
period: v.period,
realtime_runtime: v.realtime_runtime,
realtime_period: v.realtime_period,
attrs: v.attrs,
}
}
}
impl From<manifest::cgroups::MemoryResources> for MemoryResources {
fn
|
(v: manifest::cgroups::MemoryResources) -> Self {
MemoryResources {
kernel_memory_limit: v.kernel_memory_limit,
memory_hard_limit: v.memory_hard_limit,
memory_soft_limit: v.memory_soft_limit,
kernel_tcp_memory_limit: v.kernel_tcp_memory_limit,
memory_swap_limit: v.memory_swap_limit,
swappiness: v.swappiness,
attrs: v.attrs,
}
}
}
impl From<manifest::cgroups::BlkIoResources> for BlkIoResources {
fn from(v: manifest::cgroups::BlkIoResources) -> Self {
BlkIoResources {
weight: v.weight,
leaf_weight: v.leaf_weight,
weight_device: v.weight_device.into_iter().map(Into::into).collect(),
throttle_read_bps_device: v
.throttle_read_bps_device
.into_iter()
.map(Into::into)
.collect(),
throttle_read_iops_device: v
.throttle_read_iops_device
.into_iter()
.map(Into::into)
.collect(),
throttle_write_bps_device: v
.throttle_write_bps_device
.into_iter()
.map(Into::into)
.collect(),
throttle_write_iops_device: v
.throttle_write_iops_device
.into_iter()
.map(Into::into)
.collect(),
}
}
}
impl From<manifest::cgroups::BlkIoDeviceResource> for BlkIoDeviceResource {
fn from(v: manifest::cgroups::BlkIoDeviceResource) -> Self {
BlkIoDeviceResource {
major: v.major,
minor: v.minor,
weight: v.weight,
leaf_weight: v.leaf_weight,
}
}
}
impl From<manifest::cgroups::BlkIoDeviceThrottleResource> for BlkIoDeviceThrottleResource {
fn from(v: manifest::cgroups::BlkIoDeviceThrottleResource) -> Self {
BlkIoDeviceThrottleResource {
major: v.major,
minor: v.minor,
rate: v.rate,
}
}
}
|
from
|
arraylist.go
|
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package arraylist implements the array list.
//
// Structure is not thread safe.
//
// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
package arraylist
import (
"fmt"
"strings"
"github.com/kopite008/gods/lists"
"github.com/kopite008/gods/utils"
)
func
|
() {
var _ lists.List = (*List)(nil)
}
// List holds the elements in a slice
type List struct {
elements []interface{}
size int
}
const (
growthFactor = float32(2.0) // growth by 100%
shrinkFactor = float32(0.25) // shrink when size is 25% of capacity (0 means never shrink)
)
// New instantiates a new list and adds the passed values, if any, to the list
func New(values ...interface{}) *List {
list := &List{}
if len(values) > 0 {
list.Add(values...)
}
return list
}
// Add appends a value at the end of the list
func (list *List) Add(values ...interface{}) {
list.growBy(len(values))
for _, value := range values {
list.elements[list.size] = value
list.size++
}
}
// Get returns the element at index.
// Second return parameter is true if index is within bounds of the array and array is not empty, otherwise false.
func (list *List) Get(index int) (interface{}, bool) {
if !list.withinRange(index) {
return nil, false
}
return list.elements[index], true
}
// Remove removes the element at the given index from the list.
func (list *List) Remove(index int) {
if !list.withinRange(index) {
return
}
list.elements[index] = nil // cleanup reference
copy(list.elements[index:], list.elements[index+1:list.size]) // shift to the left by one (slow operation, need ways to optimize this)
list.size--
list.shrink()
}
// Contains checks if elements (one or more) are present in the set.
// All elements have to be present in the set for the method to return true.
// Performance time complexity of n^2.
// Returns true if no arguments are passed at all, i.e. set is always super-set of empty set.
func (list *List) Contains(values ...interface{}) bool {
for _, searchValue := range values {
found := false
for _, element := range list.elements {
if element == searchValue {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// Values returns all elements in the list.
func (list *List) Values() []interface{} {
newElements := make([]interface{}, list.size, list.size)
copy(newElements, list.elements[:list.size])
return newElements
}
//IndexOf returns index of provided element
func (list *List) IndexOf(value interface{}) int {
if list.size == 0 {
return -1
}
for index, element := range list.elements {
if element == value {
return index
}
}
return -1
}
// Empty returns true if list does not contain any elements.
func (list *List) Empty() bool {
return list.size == 0
}
// Size returns number of elements within the list.
func (list *List) Size() int {
return list.size
}
// Clear removes all elements from the list.
func (list *List) Clear() {
list.size = 0
list.elements = []interface{}{}
}
// Sort sorts values (in-place) using.
func (list *List) Sort(comparator utils.Comparator) {
if len(list.elements) < 2 {
return
}
utils.Sort(list.elements[:list.size], comparator)
}
// Swap swaps the two values at the specified positions.
func (list *List) Swap(i, j int) {
if list.withinRange(i) && list.withinRange(j) {
list.elements[i], list.elements[j] = list.elements[j], list.elements[i]
}
}
// Insert inserts values at specified index position shifting the value at that position (if any) and any subsequent elements to the right.
// Does not do anything if position is negative or bigger than list's size
// Note: position equal to list's size is valid, i.e. append.
func (list *List) Insert(index int, values ...interface{}) {
if !list.withinRange(index) {
// Append
if index == list.size {
list.Add(values...)
}
return
}
l := len(values)
list.growBy(l)
list.size += l
copy(list.elements[index+l:], list.elements[index:list.size-l])
copy(list.elements[index:], values)
}
// Set the value at specified index
// Does not do anything if position is negative or bigger than list's size
// Note: position equal to list's size is valid, i.e. append.
func (list *List) Set(index int, value interface{}) {
if !list.withinRange(index) {
// Append
if index == list.size {
list.Add(value)
}
return
}
list.elements[index] = value
}
// String returns a string representation of container
func (list *List) String() string {
str := "ArrayList\n"
values := []string{}
for _, value := range list.elements[:list.size] {
values = append(values, fmt.Sprintf("%v", value))
}
str += strings.Join(values, ", ")
return str
}
// Check that the index is within bounds of the list
func (list *List) withinRange(index int) bool {
return index >= 0 && index < list.size
}
func (list *List) resize(cap int) {
newElements := make([]interface{}, cap, cap)
copy(newElements, list.elements)
list.elements = newElements
}
// Expand the array if necessary, i.e. capacity will be reached if we add n elements
func (list *List) growBy(n int) {
// When capacity is reached, grow by a factor of growthFactor and add number of elements
currentCapacity := cap(list.elements)
if list.size+n >= currentCapacity {
newCapacity := int(growthFactor * float32(currentCapacity+n))
list.resize(newCapacity)
}
}
// Shrink the array if necessary, i.e. when size is shrinkFactor percent of current capacity
func (list *List) shrink() {
if shrinkFactor == 0.0 {
return
}
// Shrink when size is at shrinkFactor * capacity
currentCapacity := cap(list.elements)
if list.size <= int(float32(currentCapacity)*shrinkFactor) {
list.resize(list.size)
}
}
|
assertListImplementation
|
error.rs
|
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
use std::{
fmt::{self, Display},
io::ErrorKind,
};
#[derive(Debug)]
pub enum Error {
Db(migration::DbErr),
TinderCrypt(tindercrypt::errors::Error),
Macaroon(macaroon::MacaroonError),
Io(std::io::Error),
Secp256k1(bitcoin::secp256k1::Error),
Bdk(bdk::Error),
BitcoinRpc(bitcoincore_rpc::Error),
LdkApi(lightning::util::errors::APIError),
LdkMsg(lightning::ln::msgs::LightningError),
LdkInvoice(lightning_invoice::payment::PaymentError),
LdkInvoiceSign(lightning_invoice::SignOrCreationError),
LdkInvoiceParse(lightning_invoice::ParseOrSemanticError),
InvalidSeedLength,
FailedToWriteSeed,
Unauthenticated,
InvalidMacaroon,
AdminNodeNotStarted,
AdminNodeNotCreated,
}
impl Display for Error {
fn
|
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str = match self {
Error::Db(e) => e.to_string(),
Error::Macaroon(_e) => "macaroon error".to_string(),
Error::TinderCrypt(e) => e.to_string(),
Error::Io(e) => e.to_string(),
Error::Secp256k1(e) => e.to_string(),
Error::Bdk(e) => e.to_string(),
Error::BitcoinRpc(e) => e.to_string(),
Error::LdkApi(e) => format!("{:?}", e),
Error::LdkMsg(e) => format!("{:?}", e),
Error::LdkInvoice(e) => format!("{:?}", e),
Error::LdkInvoiceSign(e) => e.to_string(),
Error::LdkInvoiceParse(e) => e.to_string(),
Error::InvalidSeedLength => String::from("invalid seed length"),
Error::FailedToWriteSeed => String::from("failed to write seed"),
Error::Unauthenticated => String::from("unauthenticated"),
Error::InvalidMacaroon => String::from("invalid macaroon"),
Error::AdminNodeNotCreated => String::from("admin node not created"),
Error::AdminNodeNotStarted => String::from("admin node not started"),
};
write!(f, "{}", str)
}
}
impl From<migration::DbErr> for Error {
fn from(e: migration::DbErr) -> Error {
Error::Db(e)
}
}
impl From<bitcoin::secp256k1::Error> for Error {
fn from(e: bitcoin::secp256k1::Error) -> Error {
Error::Secp256k1(e)
}
}
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Error {
Error::Io(e)
}
}
impl From<bdk::Error> for Error {
fn from(e: bdk::Error) -> Error {
Error::Bdk(e)
}
}
impl From<bitcoincore_rpc::Error> for Error {
fn from(e: bitcoincore_rpc::Error) -> Error {
Error::BitcoinRpc(e)
}
}
impl From<lightning_invoice::payment::PaymentError> for Error {
fn from(e: lightning_invoice::payment::PaymentError) -> Self {
Error::LdkInvoice(e)
}
}
impl From<lightning_invoice::SignOrCreationError> for Error {
fn from(e: lightning_invoice::SignOrCreationError) -> Self {
Error::LdkInvoiceSign(e)
}
}
impl From<lightning_invoice::ParseOrSemanticError> for Error {
fn from(e: lightning_invoice::ParseOrSemanticError) -> Self {
Error::LdkInvoiceParse(e)
}
}
impl From<lightning::util::errors::APIError> for Error {
fn from(e: lightning::util::errors::APIError) -> Self {
Error::LdkApi(e)
}
}
impl From<lightning::ln::msgs::LightningError> for Error {
fn from(e: lightning::ln::msgs::LightningError) -> Self {
Error::LdkMsg(e)
}
}
impl From<tindercrypt::errors::Error> for Error {
fn from(e: tindercrypt::errors::Error) -> Self {
Error::TinderCrypt(e)
}
}
impl From<macaroon::MacaroonError> for Error {
fn from(e: macaroon::MacaroonError) -> Self {
Error::Macaroon(e)
}
}
impl From<Error> for std::io::Error {
fn from(e: Error) -> std::io::Error {
std::io::Error::new(ErrorKind::Other, e.to_string())
}
}
|
fmt
|
import_hertsmere.py
|
from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
council_id = 'E07000098'
srid = 27700
districts_srid = 27700
districts_name = 'PollingDistricts'
stations_name = 'PollingStations.shp'
elections = [
'local.hertfordshire.2017-05-04',
'parl.2017-06-08'
]
def district_record_to_dict(self, record):
return {
'internal_council_id': str(record[0]).strip(),
'name': str(record[1]).strip(),
'polling_station_id': str(record[0]).strip(),
}
def
|
(self, record):
address_parts = [record[x].strip() for x in range(3, 7)]
for i, part in enumerate(address_parts):
if part == b'':
address_parts[i] = ''
for i, part in enumerate(address_parts):
if len(part) <= 3 and len(part) > 0:
address_parts[i+1] = part + ' ' + address_parts[i+1]
address_parts[i] = ''
break
address = "\n".join(address_parts)
while "\n\n" in address:
address = address.replace("\n\n", "\n")
return address.strip()
def station_record_to_dict(self, record):
postcode = record[8].strip()
if postcode == b'':
postcode = ''
return {
'internal_council_id': str(record[1]).strip(),
'address' : self.format_address(record),
'postcode': postcode,
}
|
format_address
|
highlight.spec.ts
|
import { highlightSearch, revertHighlightSearch } from '../../src/common/highlight-search';
import {profile , inMB, getMemoryProfile} from './common.spec';
describe("highlightsearch", function () {
beforeAll(() => {
const isDef = (o: any) => o !== undefined && o !== null;
if (!isDef(window.performance)) {
console.log("Unsupported environment, window.performance.memory is unavailable");
this.skip(); //Skips test (in Chai)
return;
}
});
it("highlight contents", function () {
let divElement = document.createElement("div");
divElement.setAttribute('id', 'listItems');
let ulElement = document.createElement("ul");
for (let i: number = 0; i < 2; i++) {
let liElement = document.createElement("li");
let text: string = i === 0 ? "java" : "ajax";
let textNode1 = document.createTextNode(text);
liElement.appendChild(textNode1);
highlightSearch(liElement, "j", true);
ulElement.appendChild(liElement);
}
divElement.appendChild(ulElement);
document.body.appendChild(divElement);
let id = "listItems";
let query = "j";
let li = document.getElementById(id).querySelectorAll("li");
let element = document.getElementById(id).querySelector("span.e-highlight");
let MarkElement = document.getElementById(id).querySelectorAll("span.e-highlight");
expect(li.length).toBe(2); //List Elements
expect(element.textContent).toBe("j"); //Element going to be highlighted.
expect(query.length).toBe(1); //Input query
expect(MarkElement.length).toBe(2); //Highlighted text , <mark> created
expect(element).not.toBeNull();
revertHighlightSearch(divElement); // <Span> is not Null
let spanEle = document.getElementById(id).querySelectorAll("span.e-highlight");
expect(spanEle.length).toBe(0); //Removing <Span> element.
highlightSearch(divElement, "", true);
let elements = document.getElementById(id).querySelectorAll("span.e-highlight");
expect(elements.length).toBe(0);
highlightSearch(divElement, "J", false);
let elements1 = document.getElementById(id).querySelectorAll("span.e-highlight");
expect(elements1.length).toBe(0);
highlightSearch(divElement, " ", false);
let elements2 = document.getElementById(id).querySelectorAll("span.e-highlight");
expect(elements1.length).toBe(0);
});
it('memory leak', () => {
profile.sample();
let average: any = inMB(profile.averageChange)
//Check average change in memory samples to not be over 10MB
expect(average).toBeLessThan(10);
let memory: any = inMB(getMemoryProfile())
//Check the final memory usage against the first usage, there should be little change if everything was properly deallocated
expect(memory).toBeLessThan(profile.samples[0] + 0.25);
|
})
});
|
|
specificity_score_distance_neighbors.py
|
__author__ = 'Alexendar Perez'
#####################
# #
# Introduction #
# #
#####################
"""compute specificity score, Hamming, and Levinstein distance neighborhoods for strings"""
#################
# #
# Libraries #
# #
#################
import sys
import os
import pickle
import argparse
import sqlite3
import gzip
import numpy as np
import pandas as pd
from Bio import trie
#############################
# #
# CFD Scoring Functions #
# #
#############################
def
|
(wt,sg,pam,mm_scores,pam_scores):
#mm_scores,pam_scores = get_mm_pam_scores()
score = 1
sg = sg.replace('T','U')
wt = wt.replace('T','U')
s_list = list(sg)
wt_list = list(wt)
for i,sl in enumerate(s_list):
if wt_list[i] == sl:
score*=1
else:
try:
key = 'r'+wt_list[i]+':d'+revcom(sl)+','+str(i+1)
score*= mm_scores[key]
except KeyError:
continue
score*=pam_scores[pam]
return (score)
def get_mm_pam_scores(mms,pams):
try:
mm_scores = pickle.load(open(mms,'rb'))
pam_scores = pickle.load(open(pams,'rb'))
sys.stdout.write('CFD scoring matrices loaded\n')
return (mm_scores,pam_scores)
except:
raise Exception("Could not find file with mismatch scores or PAM scores")
def revcom(s):
basecomp = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'U': 'A'}
letters = list(s[::-1])
letters = [basecomp[base] for base in letters]
return ''.join(letters)
#########################
# #
# Auxillary Function #
# #
#########################
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-i','--infile',help='absolute filepath to file with gRNA sequences',required=True)
parser.add_argument('-o','--outdir',help='absolute filepath to output directory',required=True)
parser.add_argument('-k','--kmer',help='absolute filepath to kmers_counted.txt file',required=True)
parser.add_argument('-t','--trie',help='absolute filepath to trie.dat file',required=True)
parser.add_argument('-m','--mismatch',help='absolute filepath to mismatch_score.pkl for CFD',required=True)
parser.add_argument('-p','--pam',help='absolute filepath to pam_scores.pkl for CFD',required=True)
parser.add_argument('--header',help='boolian value of whether header is present in infile, default = True',default=True)
parser.add_argument('--sequence_field',help='if sequences not in first field of file, default = 0',default=0)
parser.add_argument('--cpf1',help='cpf1 enzyme processing',default=False)
args = parser.parse_args()
in_file = args.infile
outdir = args.outdir
kmer_counts_file = args.kmer
trie_file = args.trie
mismatch_score = args.mismatch
pam_score = args.pam
header = args.header
sequence_field = args.sequence_field
cpf1 = args.cpf1
return in_file,outdir,kmer_counts_file,trie_file,mismatch_score,pam_score,header,int(sequence_field),cpf1
def load_pickle(infile):
"""load pickle file
:param infile: absolute filepath to pickle
:return: deserialized pickle
"""
with open(infile, 'r') as in_file:
data = pickle.load(in_file)
return data
def sequence_data_extraction(data,header,sequence_field):
""" extract sequence data from data array
:param data: numpy array of data, first output of sequence_file_read_in()
:param header: boolian value indicating if header is present in data array; will skip first line if header present
:param sequence_field: if sequence data is not in 0 field specify; defaults to 0 field
:return: sequence data column
"""
if header:
sys.stdout.write('skipping first line in data array due to header\n')
if sequence_field:
sys.stdout.write('sequence field specified as %s\n' % sequence_field)
sequence_data = data[1:,sequence_field]
else:
sys.stdout.write('sequence field defaulted to 0\n')
sequence_data = data[1:,0]
else:
if sequence_field:
sys.stdout.write('sequence field specified as %s\n' % sequence_field)
sequence_data = data[:,sequence_field]
else:
sys.stdout.write('sequence field defaulted to 0\n')
sequence_data = data[:,0]
return sequence_data
def sequence_file_read_in(in_file):
"""read in file with sequences like gRNAs
:param in_file: absolute filepath to file containing sequences
:return: numpy array representation of data accessed through either pickle or pandas modules
"""
sys.stdout.write(
'%s is being used to compute features for ClassTask\n***Sequence data should be in first field***\n' % in_file)
try:
sys.stdout.write('attempting to read %s as pickle\n' % in_file)
file_format = 'pickle'
data = load_pickle(in_file)
except:
try:
sys.stdout.write('attempting to read %s with pandas as excel\n' % in_file)
file_format = 'excel'
data = np.array(pd.read_excel(in_file, header=None))
except:
try:
sys.stdout.write('attempting to read %s with pandas as text file\n' % in_file)
file_format = 'text'
data = np.array(pd.read_table(in_file, header=None))
except:
sys.stderr.write('%s file format not recognized as pickle, excel, or text file; aborting\n' % in_file)
sys.exit(1)
sys.stdout.write('%s successfully read in as %s\n' % (in_file, file_format))
return data
def load_trie(trie_file):
"""deserialize trie
:param trie_file: serialized trie from BioPython and produced through GuideScan processer.py: x__all_trie.dat
:return: deserialized trie object
"""
tr_file = open(trie_file, 'r')
tr = trie.load(tr_file)
tr_file.close()
sys.stdout.write('trie loaded into memory from %s \n' % trie_file)
return tr
# TODO: this is better stored as a DB; it also does not get used as the docstring
# suggests: they keys are parts[1], which are *genomic positions*, and the values
# are kmers.
def kmer_exact_occurrence_dictionary(kmer_counts_file):
"""generate genome-wide kmer occurrence dictionary as an sqlite database. If the
filename suggests that the kmer_counts_file is already a .db, then just return the cursor.
:param kmer_counts_file: absolute filepath to XXX_all_kmers_counted.txt.gz file, or a .db file
:return: connection object to the kmer-count dictionary
"""
if kmer_counts_file.endswith('db'):
conn = sqlite3.connect(kmer_counts_file)
return conn
kmer_dictionary = {}
records = 0
sqlite_file = 'hg38_kmers.db'
table_name = 'kmer_counts'
first = 'kmer'
first_type = 'text'
second = 'count'
second_type = 'INTEGER'
# Connect to the database file
try:
conn = sqlite3.connect(os.path.join(os.path.dirname(kmer_counts_file),sqlite_file))
except:
sys.stdout.write('Cannot open the sqlite db! \n')
sys.exit()
c = conn.cursor()
# Create the table
c.execute('CREATE TABLE {tn} ({fc} {ft}, {sc} {st})'\
.format(tn=table_name, fc=first, ft=first_type, sc=second, st=second_type))
my_open = gzip.open if kmer_counts_file.endswith('.gz') else open
with my_open(kmer_counts_file, 'r') as kmers:
for line in kmers:
clean_line = line.lstrip().rstrip()
parts = clean_line.split()
if kmer_dictionary.has_key(parts[1]):
sys.stdout.write('kmer duplication detected %s %s \n' % (parts[1], parts[0]))
else:
kmer_dictionary[parts[1]] = parts[0]
records += 1
# dump dict into the database, then reset it
if records > 100000:
for k, v in kmer_dictionary.items():
c.execute("INSERT INTO kmer_counts VALUES (?,?)", (k, v))
kmer_dictionary = {}
records = 0
# handle the remaining records
for k, v in kmer_dictionary.items():
c.execute("INSERT INTO kmer_counts VALUES (?,?)", (k, v))
sys.stdout.write('kmer dictionary generated \n')
# commit the changes to the db
conn.commit()
return conn
def add_features_to_feature_array(feature_array,augmenting_array):
"""add new features to features array
:param feature_array: numpy array with sequences as UI and previously computed features
:param augmenting_array: numpy array with new features to be added to feature array
:return: new feature array with features from the augmented array added
"""
equivilence = np.all(feature_array[:, 0].reshape(feature_array.shape[0], 1) == augmenting_array[:, 0].reshape(augmenting_array.shape[0], 1))
if equivilence:
try:
feature_array = np.concatenate((feature_array,augmenting_array[:,1:]),1)
return feature_array
except IndexError:
feature_array = np.concatenate((feature_array.reshape(feature_array.shape[0],1), augmenting_array[:, 1:]), 1)
return feature_array
else:
sys.stdout.write('original data array and new features array NOT in same order: attempt sort\n')
feature_array = feature_array[feature_array[:, 0].argsort()]
augmenting_array = augmenting_array[augmenting_array[:, 0].argsort()]
return add_features_to_feature_array(feature_array,augmenting_array)
def hamming_distance(s1, s2):
"""calculate the Hamming distance between two bit strings
:param s1: first string
:param s2: second string
:return: Hamming distance between s1 and s2
"""
assert len(s1) == len(s2)
return sum(c1 != c2 for c1, c2 in zip(s1, s2))
def query_db(c, key):
"""
Query the kmer file sqlite3 database with cursor `c` to extract the count associated with kmer `key`
"""
try:
result = c.execute("SELECT count FROM kmer_counts WHERE kmer == ?", (key,))
result_list = result.fetchall()
if len(result_list) == 1:
return result_list[0][0]
elif len(result_list) == 0:
return 0
except:
sys.stderr.write('querying db returned loads of hits for {0}: {1}'.format(key, ' '.join([str(r) for r in result_list])))
def compute_specificity_score_and_mismatch_neighborhoods(sequence_data, final_header, kmer_dictionary_cursor, tr, mm_scores,
pam_scores,cpf1):
"""compute GuideScan based features
:param sequence_data: numpy array with sequence data in 0 field
:param final_header: numpy array with header information
:param kmer_dictionary_cursor: cursor for the sqlite database; output of kmer_exact_occurrence_dictionary()
:param tr: trie datastructure from load_trie() function
:param mm_scores: first output of get_mm_pam_scores()
:param pam_scores: second output of get_mm_pam_scores()
:return: feature array with GuideScan derived features
"""
distance = 3
guidescan_array, seq_array = np.zeros((sequence_data.shape[0], 10)), np.empty((sequence_data.shape[0], 1)).astype(
str)
for j, on_target_sequence in enumerate(sequence_data[:, 0]):
# sequence array value
on_target_sequence_value = on_target_sequence
print "Processing guide " + str(j)
sys.stdout.flush()
# guidescan format
if cpf1:
on_target_sequence = 'TTTN%s' % (on_target_sequence)
else:
on_target_sequence = '%sNGG' % (on_target_sequence)
# query trie, get all near matches
query_sequences = tr.get_approximate(on_target_sequence, distance)
# specificity score lists
cfd_lst, writeout_lst = [], []
# neighborhood enumeration dictionaries
hamming_key, hamming_distance_dict, levinstein_key, levinstein_distance_dict = {}, {0: 0, 1: 0, 2: 0,
3: 0}, {}, {1: 0, 2: 0,
3: 0}
for i in query_sequences:
# occurrence of sequence in genome
ot_sequence_occurence = int(query_db(kmer_dictionary_cursor,i[0]))
if hamming_distance(on_target_sequence, i[0]) <= distance:
# record key
if hamming_key.has_key(i[0]):
continue
else:
if cpf1:
pass
else:
# cfd computation
pam = i[0][-2:]
sg = i[0][:-3]
cfd_score = calc_cfd(on_target_sequence, sg, pam, mm_scores, pam_scores)
total_cfd_contribution = cfd_score * float(ot_sequence_occurence)
cfd_lst.append(total_cfd_contribution)
# augment count for Hamming neighbors at n mismatches
if hamming_distance_dict.has_key(hamming_distance(on_target_sequence, i[0])):
hamming_distance_dict[hamming_distance(on_target_sequence, i[0])] = \
hamming_distance_dict[hamming_distance(on_target_sequence, i[0])] + ot_sequence_occurence
hamming_key[i[0]] = 1
# establish count for Hamming neighbors at n mismatches
else:
hamming_distance_dict[hamming_distance(on_target_sequence, i[0])] = ot_sequence_occurence
hamming_key[i[0]] = 1
else:
# record key
if levinstein_key.has_key(i[0]):
continue
else:
# augment count for Levinstein neighbors at n mismatches
if levinstein_distance_dict.has_key(i[2]):
levinstein_distance_dict[i[2]] = levinstein_distance_dict[i[2]] + ot_sequence_occurence
levinstein_key[i[0]] = 1
# establish count for Hamming neighbors at n mismatches
else:
levinstein_distance_dict[i[2]] = ot_sequence_occurence
levinstein_key[i[0]] = 1
# cfd composite specificity score
if cpf1:
cfd_aggregate_score = 0
else:
cfd_array = np.array(cfd_lst)
cfd_aggregate_score = 1.0 / (cfd_array.sum())
# fill in features into feature array
seq_array[j, 0] = on_target_sequence_value
guidescan_array[j, 0] = cfd_aggregate_score
guidescan_array[j, 1] = int(hamming_distance_dict[0])
guidescan_array[j, 2] = int(hamming_distance_dict[1])
guidescan_array[j, 3] = int(hamming_distance_dict[2])
guidescan_array[j, 4] = int(hamming_distance_dict[3])
guidescan_array[j, 5] = sum(hamming_distance_dict.values())
guidescan_array[j, 6] = int(levinstein_distance_dict[1])
guidescan_array[j, 7] = int(levinstein_distance_dict[2])
guidescan_array[j, 8] = int(levinstein_distance_dict[3])
guidescan_array[j, 9] = sum(levinstein_distance_dict.values())
"""
sys.stdout.write('Hamming enumerated neighbors = %s\nLevinstein enumerated neighbors = %s\nSpecificity score = %s\nhamming sequence neigbors = %s\n'
% (sum(hamming_distance_dict.values()),sum(levinstein_distance_dict.values()),cfd_aggregate_score,len(cfd_lst)))
"""
# generate final augmented features array
seq_guidescan_array = np.concatenate((seq_array, guidescan_array), 1)
sequence_data = add_features_to_feature_array(sequence_data, seq_guidescan_array)
header_value = np.array(['Specificity_Score', 'Occurrences_at_Hamming_0', 'Occurrences_at_Hamming_1',
'Occurrences_at_Hamming_2', 'Occurrences_at_Hamming_3', 'Sum_Hamming_Neighbors',
'Occurrences_at_Levinstein_1', 'Occurrences_at_Levinstein_2',
'Occurrences_at_Levinstein_3',
'Sum_Levinstein_Neighbors']).reshape(1, 10)
final_header = np.concatenate((final_header, header_value), 1)
sys.stdout.write('GuideScan based features computed\n')
return sequence_data, final_header
#####################
# #
# Main Function #
# #
#####################
def main():
"""
in_file = '/Users/pereza1/Projects/Jo/data/gecko_proper_excel/mouse_library_A_gecko.xlsx'
header = True
sequence_field = 0
"""
# user inputs
in_file,outdir,kmer_counts_file,trie_file,mismatch_score,pam_score,header,sequence_field,cpf1 = arg_parser()
# data read in
data = sequence_file_read_in(in_file)
# sequence data extraction
sequence_data = sequence_data_extraction(data,header,sequence_field)
sequence_data = sequence_data.reshape(sequence_data.shape[0],1)
final_header = np.array(['sequence']).reshape(1,1)
# compute or load kmer dictionary object
kmer_dictionary = kmer_exact_occurrence_dictionary(kmer_counts_file)
kmer_dictionary_cursor = kmer_dictionary.cursor()
# load CFD scoring matrices
mm_scores, pam_scores = get_mm_pam_scores(mismatch_score, pam_score)
# load trie
tr = load_trie(trie_file)
# compute specificity score and mismatch neighborhoods
sequence_data,final_header = compute_specificity_score_and_mismatch_neighborhoods(sequence_data,final_header,
kmer_dictionary_cursor,tr,mm_scores,
pam_scores,cpf1)
# generate final feature arrays
final_feature_array = np.concatenate((final_header,sequence_data),0)
#final_feature_array_standardized = np.concatenate((final_header,sequence_data_standardized),0)
sys.stdout.write('final feature arrays generated\n')
# write output to csv
column_length = final_feature_array.shape[1]
np.savetxt('%s/raw_features_computed_%s.csv' % (outdir,in_file.split('/')[-1].split('.')[0]), final_feature_array,
fmt='%' + '%ss' % (column_length), delimiter=',')
#np.savetxt('%s/standarized_features_computed_%s.csv' % (outdir,in_file.split('/')[-1].split('.')[0]), final_feature_array_standardized,
# fmt='%' + '%ss' % (column_length), delimiter=',')
sys.stdout.write('final arrays written to csv\n%s\n' % ('%s/features_computed_%s.csv' % (outdir,in_file.split('/')[-1].split('.')[0])))
# close the kmer_dictionary db
kmer_dictionary.close()
# completion stdout
sys.stdout.write('feature generation for %s complete\n' % (in_file))
if __name__ == '__main__':
main()
|
calc_cfd
|
handler_gosquared_out.go
|
package gosquared
import (
"encoding/json"
"errors"
"fmt"
"net/url"
"strings"
"time"
"github.com/grokify/chathooks/src/config"
"github.com/grokify/chathooks/src/handlers"
"github.com/grokify/chathooks/src/models"
cc "github.com/grokify/commonchat"
)
const (
DisplayName = "GoSquared"
HandlerKey = "gosquared"
DocumentationURL = "https://www.gosquared.com/customer/en/portal/articles/1996494-webhooks"
MessageBodyType = models.JSON
)
func NewHandler() handlers.Handler {
return handlers.Handler{MessageBodyType: MessageBodyType, Normalize: Normalize}
}
func Normalize(cfg config.Configuration, bytes []byte) (cc.Message, error) {
src, err := GosquaredOutBaseMessageFromBytes(bytes)
if err != nil {
return cc.NewMessage(), err
}
if len(src.Message.Id) > 0 {
return NormalizeLiveMessage(cfg, bytes)
} else if len(src.Person.Id) > 0 {
return NormalizeSmartGroup(cfg, bytes)
}
return NormalizeSiteTraffic(cfg, bytes)
}
func NormalizeSiteTraffic(cfg config.Configuration, bytes []byte) (cc.Message, error) {
ccMsg := cc.NewMessage()
iconURL, err := cfg.GetAppIconURL(HandlerKey)
if err == nil {
ccMsg.IconURL = iconURL.String()
}
src, err := GosquaredOutMessageSiteTrafficFromBytes(bytes)
if err != nil {
return ccMsg, err
}
if src.TriggeredAlert.Boundary == "upper" {
ccMsg.Activity = "Site traffic spike"
} else { // if src.TriggeredAlert.Boundary == "lower" {
ccMsg.Activity = "Site traffic dip"
}
pluralSuffix := "s"
if src.Concurrents == int64(1) {
pluralSuffix = ""
}
ccMsg.Title = fmt.Sprintf("[%s](%s) has [%v visitor%s online](%s)",
src.SiteDetails.SiteName,
src.SiteDetails.URL,
src.Concurrents,
pluralSuffix,
src.SiteDetails.DashboardURL())
return ccMsg, nil
}
func NormalizeSmartGroup(cfg config.Configuration, bytes []byte) (cc.Message, error) {
ccMsg := cc.NewMessage()
iconURL, err := cfg.GetAppIconURL(HandlerKey)
if err == nil {
ccMsg.IconURL = iconURL.String()
}
src, err := GosquaredOutMessageSmartGroupFromBytes(bytes)
if err != nil {
return ccMsg, err
}
verb := "exited"
if src.Boundary == "enter" {
verb = "entered"
}
ccMsg.Activity = fmt.Sprintf("User has %s Smart Group", verb)
ccMsg.Title = fmt.Sprintf("%s has %s [%s](%s)",
src.Person.Name,
verb,
src.Group.Name,
src.GroupURL())
return ccMsg, nil
}
func NormalizeLiveMessage(cfg config.Configuration, bytes []byte) (cc.Message, error) {
ccMsg := cc.NewMessage()
iconURL, err := cfg.GetAppIconURL(HandlerKey)
if err == nil {
ccMsg.IconURL = iconURL.String()
}
src, err := GosquaredOutLiveMessageFromBytes(bytes)
if err != nil {
return ccMsg, err
}
ccMsg.Activity = "Live chat message"
personInboxURL, errURL := src.PersonInboxURL()
person := src.Person.DisplayName(false, true)
if errURL == nil
|
ccMsg.Title = fmt.Sprintf("%v sent a message", person)
attachment := cc.NewAttachment()
if len(strings.TrimSpace(src.Message.Content)) > 0 {
attachment.AddField(cc.Field{
Title: "Message",
Value: strings.TrimSpace(src.Message.Content)})
}
if src.Message.Timestamp > 0 {
epochMilli := src.Message.Timestamp
epochNano := epochMilli * 1000000
dt := time.Unix(0, epochNano)
attachment.AddField(cc.Field{
Title: "Time",
Value: dt.Format(time.RFC822)})
}
if len(attachment.Fields) > 0 {
ccMsg.AddAttachment(attachment)
}
return ccMsg, nil
}
type GosquaredOutBaseMessage struct {
TriggeredAlert GosquaredOutTriggeredAlert `json:"triggeredAlert,omitempty"`
Concurrents int64 `json:"concurrents,omitempty"`
Person GosquaredOutPerson `json:"person,omitempty"`
Message GosquaredOutLiveMessageMessage `json:"message,omitempty"`
}
func GosquaredOutBaseMessageFromBytes(bytes []byte) (GosquaredOutBaseMessage, error) {
msg := GosquaredOutBaseMessage{}
err := json.Unmarshal(bytes, &msg)
return msg, err
}
type GosquaredOutMessageSiteTraffic struct {
TriggeredAlert GosquaredOutTriggeredAlert `json:"triggeredAlert,omitempty"`
SiteDetails GosquaredOutSiteDetails `json:"siteDetails,omitempty"`
Concurrents int64 `json:"concurrents,omitempty"`
}
func GosquaredOutMessageSiteTrafficFromBytes(bytes []byte) (GosquaredOutMessageSiteTraffic, error) {
msg := GosquaredOutMessageSiteTraffic{}
err := json.Unmarshal(bytes, &msg)
return msg, err
}
type GosquaredOutLiveMessage struct {
Version string `json:"version,omitempty"`
SiteToken string `json:"site_token,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
Person GosquaredOutPerson `json:"person,omitempty"`
Message GosquaredOutLiveMessageMessage `json:"message,omitempty"`
}
func (msg *GosquaredOutLiveMessage) PersonInboxURL() (string, error) {
if len(strings.TrimSpace(msg.SiteToken)) == 0 || len(strings.TrimSpace(msg.Person.Id)) == 0 {
return "", errors.New("Information missing for PersonInboxURL")
}
personIdEsc := url.QueryEscape(strings.TrimSpace(msg.Person.Id))
personInboxURL := fmt.Sprintf(
"https://www.gosquared.com/inbox/%v/inbox/%v",
strings.TrimSpace(msg.SiteToken),
personIdEsc)
return personInboxURL, nil
}
/*
https://www.gosquared.com/inbox/GSN-345166-V/inbox/Anon%20Chat:%20bba4b6264b073a17c74f1b0da7720114
The rule to build the url is
https://www.gosquared.com/inbox/<site_token>/inbox/<person id encoded>.
*/
func GosquaredOutLiveMessageFromBytes(bytes []byte) (GosquaredOutLiveMessage, error) {
msg := GosquaredOutLiveMessage{}
err := json.Unmarshal(bytes, &msg)
return msg, err
}
/*
{
"version":"0.0.1",
"site_token":"GSN-67890-A",
"timestamp":"2017-05-02T05:19:34.252Z",
"person":{
"id":"Anon Chat: bba4b6264b073a17c74f1b0da7720114",
"email":"",
"name":"",
"avatar":"",
"chat":{
"archived":null,
"latest":{
"message":{
"type":"message",
"id":"167c06cf-ff7c-4d09-880c-5533795f9673",
"content":"hello world",
"timestamp":1493702374252,
"from":"client",
"private":false,
"session":{
"title":"",
"href":"http://127.0.0.1:2015/"
},
*/
type GosquaredOutLiveMessageMessage struct {
Type string `json:"message,omitempty"`
Id string `json:"id,omitempty"`
Content string `json:"content,omitempty"`
Timestamp int64 `json:"timestamp,omitempty"`
From string `json:"from,omitempty"`
Private bool `json:"private,omitempty"`
Session GosquaredOutLiveMessageSession `json:"session,omitempty"`
Data interface{} `json:"data,omitempty"`
Entities []interface{} `json:"entities,omitempty"`
PersonID string `json:"person_id,omitempty"`
}
type GosquaredOutLiveMessageSession struct {
Title string `json:"title,omitempty"`
Href string `json:"href,omitempty"`
}
/*
"message":{
"type":"message",
"id":"167c06cf-ff7c-4d09-880c-5533795f9673",
"content":"hello world",
"timestamp":1493702374252,
"from":"client",
"private":false,
"session":{
"title":"",
"href":"http://127.0.0.1:2015/"
},
"data":{
},
"entities":[
],
"person_id":"Anon Chat: bba4b6264b073a17c74f1b0da7720114"
}
*/
type GosquaredOutMessageSmartGroup struct {
Version string `json:"version,omitempty"`
SiteToken string `json:"site_token,omitempty"`
Group GosquaredOutGroup `json:"group,omitempty"`
Boundary string `json:"boundary,omitempty"`
Person GosquaredOutPerson `json:"person,omitempty"`
}
func GosquaredOutMessageSmartGroupFromBytes(bytes []byte) (GosquaredOutMessageSmartGroup, error) {
msg := GosquaredOutMessageSmartGroup{}
err := json.Unmarshal(bytes, &msg)
return msg, err
}
type GosquaredOutGroup struct {
Name string `json:"name,omitempty"`
Id string `json:"id,omitempty"`
}
func (msg *GosquaredOutMessageSmartGroup) GroupURL() string {
// https://www.gosquared.com/people/GSN-466237-B/last-seen-1-day
return fmt.Sprintf("https://www.gosquared.com/people/%s/%s",
msg.SiteToken, msg.Group.Id)
}
type GosquaredOutPerson struct {
CreatedAt string `json:"created_at,omitempty"`
Phone string `json:"phone,omitempty"`
Avatar string `json:"avatar,omitempty"`
Description string `json:"description,omitempty"`
Username string `json:"username,omitempty"`
Email string `json:"email,omitempty"`
Name string `json:"name,omitempty"`
Id string `json:"id,omitempty"`
}
func (person *GosquaredOutPerson) DisplayName(extraEmail bool, anonymous bool) string {
displayName := ""
if len(strings.TrimSpace(person.Name)) > 0 {
displayName = strings.TrimSpace(person.Name)
} else if len(strings.TrimSpace(person.Username)) > 0 {
displayName = strings.TrimSpace(person.Username)
}
if len(strings.TrimSpace(person.Email)) > 0 {
if len(displayName) > 0 && extraEmail {
displayName = fmt.Sprintf("%v (%v)", displayName, strings.TrimSpace(person.Email))
} else {
displayName = strings.TrimSpace(person.Email)
}
}
if len(displayName) == 0 && anonymous {
displayName = "Anonymous user"
}
return displayName
}
type GosquaredOutMessageConcurrent struct {
TriggeredAlert GosquaredOutTriggeredAlert `json:"triggeredAlert,omitempty"`
Concurrents int64 `json:"concurrents,omitempty"`
SiteDetails GosquaredOutSiteDetails `json:"siteDetails,omitempty"`
}
type GosquaredOutTriggeredAlert struct {
Id int64 `json:"id,omitempty"`
Boundary string `json:"boundary,omitempty"`
Value string `json:"value,omitempty"`
Type string `json:"type,omitempty"`
}
type GosquaredOutSiteDetails struct {
UserId int64 `json:"user_id,omitempty"`
Acct string `json:"acct,omitempty"`
Email string `json:"email,omitempty"`
FirstName string `json:"first_name,omitempty"`
LastName string `json:"last_name,omitempty"`
SiteName string `json:"site_name,omitempty"`
Domain string `json:"domain,omitempty"`
URL string `json:"url,omitempty"`
Timezone string `json:"timezone,omitempty"`
}
func (site *GosquaredOutSiteDetails) DashboardURL() string {
return fmt.Sprintf("https://www.gosquared.com/now/%s", site.Acct)
}
func DashboardURL(siteToken string) string {
return fmt.Sprintf("https://www.gosquared.com/now/%s", siteToken)
}
func PeopleEveryoneURL(siteToken string) string {
// https://www.gosquared.com/people/GSN-466237-B/everyone
return fmt.Sprintf("https://www.gosquared.com/people/%s/everyone", siteToken)
}
|
{
person = fmt.Sprintf("[%v](%v)", person, personInboxURL)
}
|
lib.rs
|
mod api;
mod error;
pub use error::TIError;
pub struct TrayItem(api::TrayItemImpl);
impl TrayItem {
pub fn new(title: &str, icon: &str) -> Result<Self, TIError> {
Ok(
Self(
api::TrayItemImpl::new(title, icon)?
)
)
}
pub fn set_icon(&mut self, icon: &str) -> Result<(), TIError> {
self.0.set_icon(icon)
}
pub fn add_label(&mut self, label: &str) -> Result<(), TIError> {
self.0.add_label(label)
}
pub fn add_menu_item<F>(&mut self, label: &str, cb: F) -> Result<(), TIError>
where F: FnMut() -> () + Send + Sync + 'static
|
pub fn inner_mut(&mut self) -> &mut api::TrayItemImpl {
&mut self.0
}
}
|
{
self.0.add_menu_item(label, cb)
}
|
views.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import functools
import json
import logging
import os
import re
import sys
import traceback
import uuid
from datetime import datetime
from django.utils import html
from six.moves.urllib.parse import urlparse
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.contrib.auth.models import User
from django.contrib.auth.views import logout as django_logout
from django.core import cache
from django.core.mail.message import EmailMessage
from django.http import HttpResponseRedirect, HttpResponse, Http404, \
HttpResponseServerError, HttpResponseNotFound, HttpResponseBadRequest, \
HttpResponseForbidden, HttpResponsePermanentRedirect
from django.shortcuts import redirect, render
from django.template import loader
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from django.urls import resolve
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_noop, LANGUAGE_SESSION_KEY
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.http import require_GET, require_POST
from django.views.generic import TemplateView
from django.views.generic.base import View
from djangular.views.mixins import JSONResponseMixin
import httpagentparser
from couchdbkit import ResourceNotFound
from two_factor.views import LoginView
from two_factor.forms import AuthenticationTokenForm, BackupTokenForm
from corehq.apps.analytics import ab_tests
from corehq.apps.hqadmin.service_checks import CHECKS, run_checks
from corehq.apps.users.landing_pages import get_redirect_url, get_cloudcare_urlname
from corehq.apps.users.models import CouchUser
from corehq.form_processor.utils.general import should_use_sql_backend
from dimagi.utils.couch.cache.cache_core import get_redis_default_cache
from dimagi.utils.couch.database import get_db
from memoized import memoized
from dimagi.utils.django.request import mutable_querydict
from dimagi.utils.logging import notify_exception
from dimagi.utils.parsing import string_to_datetime
from dimagi.utils.web import get_url_base, json_response, get_site_domain
from no_exceptions.exceptions import Http403
from soil import DownloadBase
from soil import views as soil_views
from corehq.apps.accounting.models import Subscription
from corehq.apps.domain.decorators import require_superuser, login_and_domain_required, two_factor_exempt, \
track_domain_request
from corehq.apps.domain.models import Domain
from corehq.apps.domain.utils import normalize_domain_name, get_domain_from_url
from corehq.apps.dropbox.decorators import require_dropbox_session
from corehq.apps.dropbox.exceptions import DropboxUploadAlreadyInProgress, DropboxInvalidToken
from corehq.apps.dropbox.models import DropboxUploadHelper
from corehq.apps.dropbox.views import DROPBOX_ACCESS_TOKEN, DropboxAuthInitiate
from corehq.apps.hqadmin.management.commands.deploy_in_progress import DEPLOY_IN_PROGRESS_FLAG
from corehq.apps.hqwebapp.doc_info import get_doc_info, get_object_info
from corehq.apps.hqwebapp.encoders import LazyEncoder
from corehq.apps.hqwebapp.forms import EmailAuthenticationForm, CloudCareAuthenticationForm
from corehq.apps.hqwebapp.utils import get_environment_friendly_name, update_session_language
from corehq.apps.locations.permissions import location_safe
from corehq.apps.locations.models import SQLLocation
from corehq.apps.users.util import format_username
from corehq.form_processor.backends.sql.dbaccessors import FormAccessorSQL, CaseAccessorSQL
from corehq.form_processor.exceptions import XFormNotFound, CaseNotFound
from corehq.util.context_processors import commcare_hq_names
from corehq.util.datadog.const import DATADOG_UNKNOWN
from corehq.util.datadog.metrics import JSERROR_COUNT
from corehq.util.datadog.utils import create_datadog_event, sanitize_url
from corehq.util.datadog.gauges import datadog_counter, datadog_gauge
from corehq.util.python_compatibility import soft_assert_type_text
from corehq.util.view_utils import reverse
import six
from six.moves import range
def is_deploy_in_progress():
cache = get_redis_default_cache()
return cache.get(DEPLOY_IN_PROGRESS_FLAG) is not None
def format_traceback_the_way_python_does(type, exc, tb):
"""
Returns a traceback that looks like the one python gives you in the shell, e.g.
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
NameError: name 'name' is not defined
"""
if six.PY3:
exc_message = six.text_type(exc)
else:
exc_message = exc.message
if isinstance(exc_message, bytes):
exc_message = exc_message.decode('utf-8')
return 'Traceback (most recent call last):\n{}{}: {}'.format(
''.join(traceback.format_tb(tb)),
type.__name__,
exc_message
)
def server_error(request, template_name='500.html'):
"""
500 error handler.
"""
urlname = resolve(request.path).url_name
submission_urls = [
'receiver_secure_post',
'receiver_secure_post_with_app_id',
'receiver_post_with_app_id'
]
if urlname in submission_urls + ['app_aware_restore']:
return HttpResponse(status=500)
domain = get_domain_from_url(request.path) or ''
# hat tip: http://www.arthurkoziel.com/2009/01/15/passing-mediaurl-djangos-500-error-view/
t = loader.get_template(template_name)
type, exc, tb = sys.exc_info()
traceback_text = format_traceback_the_way_python_does(type, exc, tb)
traceback_key = uuid.uuid4().hex
cache.cache.set(traceback_key, traceback_text, 60*60)
return HttpResponseServerError(t.render(
context={
'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL,
'domain': domain,
'500traceback': traceback_key,
},
request=request,
))
def not_found(request, template_name='404.html'):
"""
404 error handler.
"""
t = loader.get_template(template_name)
return HttpResponseNotFound(t.render(
context={
'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL,
},
request=request,
))
@require_GET
@location_safe
def redirect_to_default(req, domain=None):
if not req.user.is_authenticated:
if domain != None:
url = reverse('domain_login', args=[domain])
else:
url = reverse('login')
elif domain and _two_factor_needed(domain, req):
return TemplateResponse(
request=req,
template='two_factor/core/otp_required.html',
status=403,
)
else:
if domain:
domain = normalize_domain_name(domain)
domains = [Domain.get_by_name(domain)]
else:
domains = Domain.active_for_user(req.user)
if 0 == len(domains) and not req.user.is_superuser:
from corehq.apps.registration.views import track_domainless_new_user
track_domainless_new_user(req)
return redirect('registration_domain')
elif 1 == len(domains):
from corehq.apps.dashboard.views import dashboard_default
from corehq.apps.users.models import DomainMembershipError
if domains[0]:
domain = domains[0].name
couch_user = req.couch_user
try:
role = couch_user.get_role(domain)
except DomainMembershipError:
# commcare users without roles should always be denied access
if couch_user.is_commcare_user():
raise Http404()
else:
# web users without roles are redirected to the dashboard default
# view since some domains allow web users to request access if they
# don't have it
return dashboard_default(req, domain)
else:
if role and role.default_landing_page:
url = get_redirect_url(role.default_landing_page, domain)
elif couch_user.is_commcare_user():
url = reverse(get_cloudcare_urlname(domain), args=[domain])
else:
return dashboard_default(req, domain)
else:
raise Http404()
else:
url = settings.DOMAIN_SELECT_URL
return HttpResponseRedirect(url)
def _two_factor_needed(domain_name, request):
domain_name = normalize_domain_name(domain_name)
domain_obj = Domain.get_by_name(domain_name)
if domain_obj:
return (
domain_obj.two_factor_auth
and not request.couch_user.two_factor_disabled
and not request.user.is_verified()
)
def yui_crossdomain(req):
x_domain = """<?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM "http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">
<cross-domain-policy>
<allow-access-from domain="yui.yahooapis.com"/>
<allow-access-from domain="%s"/>
<site-control permitted-cross-domain-policies="master-only"/>
</cross-domain-policy>""" % get_site_domain()
return HttpResponse(x_domain, content_type="application/xml")
@login_required()
def password_change(req):
user_to_edit = User.objects.get(id=req.user.id)
if req.method == 'POST':
password_form = AdminPasswordChangeForm(user_to_edit, req.POST)
if password_form.is_valid():
password_form.save()
return HttpResponseRedirect('/')
else:
password_form = AdminPasswordChangeForm(user_to_edit)
template_name = "password_change.html"
return render(req, template_name, {"form": password_form})
def server_up(req):
"""
Health check view which can be hooked into server monitoring tools like 'pingdom'
Returns:
HttpResponse("success", status_code=200)
HttpResponse(error_message, status_code=500)
Hit serverup.txt to check all the default enabled services (always_check=True)
Hit serverup.txt?only={check_name} to only check a specific service
Hit serverup.txt?{check_name} to include a non-default check (currently only ``heartbeat``)
"""
only = req.GET.get('only', None)
if only and only in CHECKS:
checks_to_do = [only]
else:
checks_to_do = [
check
for check, check_info in CHECKS.items()
if check_info['always_check'] or req.GET.get(check, None) is not None
]
statuses = run_checks(checks_to_do)
failed_checks = [(check, status) for check, status in statuses if not status.success]
for check_name, status in statuses:
tags = [
'status:{}'.format('failed' if not status.success else 'ok'),
'check:{}'.format(check_name)
]
datadog_gauge('commcare.serverup.check', status.duration, tags=tags)
if failed_checks and not is_deploy_in_progress():
status_messages = [
html.linebreaks('<strong>{}</strong>: {}'.format(check, html.escape(status.msg)).strip())
for check, status in failed_checks
]
create_datadog_event(
'Serverup check failed', '\n'.join(status_messages),
alert_type='error', aggregation_key='serverup',
)
status_messages.insert(0, 'Failed Checks (%s):' % os.uname()[1])
return HttpResponse(''.join(status_messages), status=500)
else:
return HttpResponse("success")
def _no_permissions_message(request, template_name="403.html", message=None):
t = loader.get_template(template_name)
return t.render(
context={
'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL,
'message': message,
},
request=request,
)
def no_permissions(request, redirect_to=None, template_name="403.html", message=None):
"""
403 error handler.
"""
return HttpResponseForbidden(_no_permissions_message(request, template_name, message))
def no_permissions_exception(request, template_name="403.html", message=None):
return Http403(_no_permissions_message(request, template_name, message))
def csrf_failure(request, reason=None, template_name="csrf_failure.html"):
t = loader.get_template(template_name)
return HttpResponseForbidden(t.render(
context={
'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL,
},
request=request,
))
@sensitive_post_parameters('auth-password')
def _login(req, domain_name):
if req.user.is_authenticated and req.method == "GET":
redirect_to = req.GET.get('next', '')
if redirect_to:
return HttpResponseRedirect(redirect_to)
if not domain_name:
return HttpResponseRedirect(reverse('homepage'))
else:
return HttpResponseRedirect(reverse('domain_homepage', args=[domain_name]))
if req.method == 'POST' and domain_name and '@' not in req.POST.get('auth-username', '@'):
with mutable_querydict(req.POST):
req.POST['auth-username'] = format_username(req.POST['auth-username'], domain_name)
if 'auth-username' in req.POST:
couch_user = CouchUser.get_by_username(req.POST['auth-username'].lower())
if couch_user:
new_lang = couch_user.language
old_lang = req.session.get(LANGUAGE_SESSION_KEY)
update_session_language(req, old_lang, new_lang)
req.base_template = settings.BASE_TEMPLATE
context = {}
template_name = 'login_and_password/login.html'
custom_landing_page = settings.CUSTOM_LANDING_TEMPLATE
if custom_landing_page:
if isinstance(custom_landing_page, six.string_types):
soft_assert_type_text(custom_landing_page)
template_name = custom_landing_page
else:
template_name = custom_landing_page.get(req.get_host())
if template_name is None:
template_name = custom_landing_page.get('default', template_name)
elif domain_name:
domain_obj = Domain.get_by_name(domain_name)
req_params = req.GET if req.method == 'GET' else req.POST
context.update({
'domain': domain_name,
'hr_name': domain_obj.display_name(),
'next': req_params.get('next', '/a/%s/' % domain_name),
'allow_domain_requests': domain_obj.allow_domain_requests,
'current_page': {'page_name': _('Welcome back to %s!') % domain_obj.display_name()},
})
else:
commcare_hq_name = commcare_hq_names(req)['commcare_hq_names']["COMMCARE_HQ_NAME"]
context.update({
'current_page': {'page_name': _('Welcome back to %s!') % commcare_hq_name},
})
if settings.SERVER_ENVIRONMENT in settings.ICDS_ENVS:
auth_view = CloudCareLoginView
else:
auth_view = HQLoginView if not domain_name else CloudCareLoginView
demo_workflow_ab_v2 = ab_tests.SessionAbTest(ab_tests.DEMO_WORKFLOW_V2, req)
if settings.IS_SAAS_ENVIRONMENT:
context['demo_workflow_ab_v2'] = demo_workflow_ab_v2.context
response = auth_view.as_view(template_name=template_name, extra_context=context)(req)
if settings.IS_SAAS_ENVIRONMENT:
demo_workflow_ab_v2.update_response(response)
return response
@two_factor_exempt
@sensitive_post_parameters('auth-password')
def login(req):
# This is a wrapper around the _login view
if settings.SERVER_ENVIRONMENT in settings.ICDS_ENVS:
login_url = reverse('domain_login', kwargs={'domain': 'icds-cas'})
return HttpResponseRedirect(login_url)
req_params = req.GET if req.method == 'GET' else req.POST
domain = req_params.get('domain', None)
return _login(req, domain)
@location_safe
def domain_login(req, domain):
# This is a wrapper around the _login view which sets a different template
project = Domain.get_by_name(domain)
if not project:
raise Http404
# FYI, the domain context_processor will pick this up and apply the
# necessary domain contexts:
req.project = project
return _login(req, domain)
class HQLoginView(LoginView):
form_list = [
('auth', EmailAuthenticationForm),
('token', AuthenticationTokenForm),
('backup', BackupTokenForm),
]
extra_context = {}
def get_context_data(self, **kwargs):
context = super(HQLoginView, self).get_context_data(**kwargs)
context.update(self.extra_context)
context['implement_password_obfuscation'] = settings.OBFUSCATE_PASSWORD_FOR_NIC_COMPLIANCE
return context
class CloudCareLoginView(HQLoginView):
form_list = [
('auth', CloudCareAuthenticationForm),
('token', AuthenticationTokenForm),
('backup', BackupTokenForm),
]
@two_factor_exempt
def logout(req):
referer = req.META.get('HTTP_REFERER')
domain = get_domain_from_url(urlparse(referer).path) if referer else None
# we don't actually do anything with the response here:
django_logout(req, **{"template_name": settings.BASE_TEMPLATE})
if referer and domain:
domain_login_url = reverse('domain_login', kwargs={'domain': domain})
return HttpResponseRedirect('%s' % domain_login_url)
else:
return HttpResponseRedirect(reverse('login'))
@login_and_domain_required
@track_domain_request(calculated_prop='cp_n_downloads_custom_exports')
def retrieve_download(req, domain, download_id, template="hqwebapp/includes/file_download.html"):
next_url = req.GET.get('next', reverse('my_project_settings', args=[domain]))
return soil_views.retrieve_download(req, download_id, template,
extra_context={'domain': domain, 'next_url': next_url})
def dropbox_next_url(request, download_id):
return request.META.get('HTTP_REFERER', '/')
@login_required
@require_dropbox_session(next_url=dropbox_next_url)
def dropbox_upload(request, download_id):
download = DownloadBase.get(download_id)
if download is None:
logging.error("Download file request for expired/nonexistent file requested")
raise Http404
else:
filename = download.get_filename()
# Hack to get target filename from content disposition
match = re.search('filename="([^"]*)"', download.content_disposition)
dest = match.group(1) if match else 'download.txt'
try:
uploader = DropboxUploadHelper.create(
request.session.get(DROPBOX_ACCESS_TOKEN),
src=filename,
dest=dest,
download_id=download_id,
user=request.user,
)
except DropboxInvalidToken:
return HttpResponseRedirect(reverse(DropboxAuthInitiate.slug))
except DropboxUploadAlreadyInProgress:
uploader = DropboxUploadHelper.objects.get(download_id=download_id)
messages.warning(
request,
'The file is in the process of being synced to dropbox! It is {0:.2f}% '
'complete.'.format(uploader.progress * 100)
)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
uploader.upload()
messages.success(
request,
_("Apps/{app}/{dest} is queued to sync to dropbox! You will receive an email when it"
" completes.".format(app=settings.DROPBOX_APP_NAME, dest=dest))
)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
@require_superuser
def debug_notify(request):
try:
0 // 0
except ZeroDivisionError:
notify_exception(request,
"If you want to achieve a 500-style email-out but don't want the user to see a 500, use notify_exception(request[, message])")
return HttpResponse("Email should have been sent")
@require_POST
def jserror(request):
agent = request.META.get('HTTP_USER_AGENT', None)
os = browser_name = browser_version = bot = DATADOG_UNKNOWN
if agent:
parsed_agent = httpagentparser.detect(agent)
bot = parsed_agent.get('bot', False)
if 'os' in parsed_agent:
os = parsed_agent['os'].get('name', DATADOG_UNKNOWN)
if 'browser' in parsed_agent:
browser_version = parsed_agent['browser'].get('version', DATADOG_UNKNOWN)
browser_name = parsed_agent['browser'].get('name', DATADOG_UNKNOWN)
datadog_counter(JSERROR_COUNT, tags=[
'os:{}'.format(os),
'browser_version:{}'.format(browser_version),
'browser_name:{}'.format(browser_name),
'url:{}'.format(sanitize_url(request.POST.get('page', None))),
'file:{}'.format(request.POST.get('filename')),
'bot:{}'.format(bot),
])
return HttpResponse('')
@method_decorator([login_required], name='dispatch')
class BugReportView(View):
@property
def recipients(self):
"""
Returns:
list
"""
return settings.BUG_REPORT_RECIPIENTS
def post(self, req, *args, **kwargs):
report = dict([(key, req.POST.get(key, '')) for key in (
'subject',
'username',
'domain',
'url',
'message',
'app_id',
'cc',
'email',
'500traceback',
'sentry_id',
)])
try:
couch_user = req.couch_user
full_name = couch_user.full_name
if couch_user.is_commcare_user():
email = report['email']
else:
email = couch_user.get_email()
except Exception:
full_name = None
email = report['email']
report['full_name'] = full_name
report['email'] = email or report['username']
if report['domain']:
domain = report['domain']
elif len(couch_user.domains) == 1:
# This isn't a domain page, but the user has only one domain, so let's use that
domain = couch_user.domains[0]
else:
domain = "<no domain>"
message = (
"username: {username}\n"
"full name: {full_name}\n"
"domain: {domain}\n"
"url: {url}\n"
).format(**report)
domain_object = Domain.get_by_name(domain) if report['domain'] else None
debug_context = {
'datetime': datetime.utcnow(),
'self_started': '<unknown>',
'scale_backend': '<unknown>',
'has_handoff_info': '<unknown>',
'project_description': '<unknown>',
'sentry_error': '{}{}'.format(getattr(settings, 'SENTRY_QUERY_URL'), report['sentry_id'])
}
if domain_object:
current_project_description = domain_object.project_description if domain_object else None
new_project_description = req.POST.get('project_description')
if (domain_object and
req.couch_user.is_domain_admin(domain=domain) and
new_project_description and current_project_description != new_project_description):
domain_object.project_description = new_project_description
domain_object.save()
message += ((
"software plan: {software_plan}\n"
).format(
software_plan=Subscription.get_subscribed_plan_by_domain(domain),
))
debug_context.update({
'self_started': domain_object.internal.self_started,
'scale_backend': should_use_sql_backend(domain),
'has_handoff_info': bool(domain_object.internal.partner_contact),
'project_description': domain_object.project_description,
})
subject = '{subject} ({domain})'.format(subject=report['subject'], domain=domain)
cc = [el for el in report['cc'].strip().split(",") if el]
|
if full_name and not any([c in full_name for c in '<>"']):
reply_to = '"{full_name}" <{email}>'.format(**report)
else:
reply_to = report['email']
# if the person looks like a commcare user, fogbugz can't reply
# to their email, so just use the default
if settings.HQ_ACCOUNT_ROOT in reply_to:
reply_to = settings.SERVER_EMAIL
message += "Message:\n\n{message}\n".format(message=report['message'])
if req.POST.get('five-hundred-report'):
extra_message = ("This message was reported from a 500 error page! "
"Please fix this ASAP (as if you wouldn't anyway)...")
extra_debug_info = (
"datetime: {datetime}\n"
"Is self start: {self_started}\n"
"Is scale backend: {scale_backend}\n"
"Has Support Hand-off Info: {has_handoff_info}\n"
"Project description: {project_description}\n"
"Sentry Error: {sentry_error}\n"
).format(**debug_context)
traceback_info = cache.cache.get(report['500traceback']) or 'No traceback info available'
cache.cache.delete(report['500traceback'])
message = "\n\n".join([message, extra_debug_info, extra_message, traceback_info])
email = EmailMessage(
subject=subject,
body=message,
to=self.recipients,
headers={'Reply-To': reply_to},
cc=cc
)
uploaded_file = req.FILES.get('report_issue')
if uploaded_file:
filename = uploaded_file.name
content = uploaded_file.read()
email.attach(filename=filename, content=content)
# only fake the from email if it's an @dimagi.com account
is_icds_env = settings.SERVER_ENVIRONMENT in settings.ICDS_ENVS
if re.search(r'@dimagi\.com$', report['username']) and not is_icds_env:
email.from_email = report['username']
else:
email.from_email = settings.CCHQ_BUG_REPORT_EMAIL
email.send(fail_silently=False)
if req.POST.get('five-hundred-report'):
messages.success(
req,
"Your CommCare HQ Issue Report has been sent. We are working quickly to resolve this problem."
)
return HttpResponseRedirect(reverse('homepage'))
return HttpResponse()
def render_static(request, template, page_name):
"""
Takes an html file and renders it Commcare HQ's styling
"""
return render(request, "hqwebapp/blank.html",
{'tmpl': template, 'page_name': page_name})
def cda(request):
return render_static(request, "cda.html", _("Content Distribution Agreement"))
def apache_license(request):
return render_static(request, "apache_license.html", _("Apache License"))
def bsd_license(request):
return render_static(request, "bsd_license.html", _("BSD License"))
class BasePageView(TemplateView):
urlname = None # name of the view used in urls
page_title = None # what shows up in the <title>
template_name = 'hqwebapp/base_page.html'
@property
def page_name(self):
"""
This is what is visible to the user.
page_title is what shows up in <title> tags.
"""
return self.page_title
@property
def page_url(self):
raise NotImplementedError()
@property
def parent_pages(self):
"""
Specify parent pages as a list of
[{
'title': <name>,
'url: <url>,
}]
"""
return []
@property
def main_context(self):
"""
The shared context for rendering this page.
"""
return {
'current_page': {
'page_name': self.page_name,
'title': self.page_title,
'url': self.page_url,
'parents': self.parent_pages,
},
}
@property
def page_context(self):
"""
The Context for the settings page
"""
return {}
def get_context_data(self, **kwargs):
context = super(BasePageView, self).get_context_data(**kwargs)
context.update(self.main_context)
context.update(self.page_context)
return context
def render_to_response(self, context, **response_kwargs):
"""
Returns a response with a template rendered with the given context.
"""
return render(self.request, self.template_name, context)
class BaseSectionPageView(BasePageView):
section_name = ""
template_name = "hqwebapp/base_section.html"
@property
def section_url(self):
raise NotImplementedError
@property
def main_context(self):
context = super(BaseSectionPageView, self).main_context
context.update({
'section': {
'page_name': self.section_name,
'url': self.section_url,
}
})
return context
class PaginatedItemException(Exception):
pass
class CRUDPaginatedViewMixin(object):
"""
Mix this in with a TemplateView view object.
For usage tips, see the docs for UI Helpers > Paginated CRUD View.
"""
DEFAULT_LIMIT = 10
limit_text = ugettext_noop("items per page")
empty_notification = ugettext_noop("You have no items.")
loading_message = ugettext_noop("Loading...")
deleted_items_header = ugettext_noop("Deleted Items:")
new_items_header = ugettext_noop("New Items:")
def _safe_escape(self, expression, default):
try:
return expression()
except ValueError:
return default
@property
def parameters(self):
"""
Specify GET or POST from a request object.
"""
raise NotImplementedError("you need to implement get_param_source")
@property
@memoized
def page(self):
return self._safe_escape(
lambda: int(self.parameters.get('page', 1)),
1
)
@property
@memoized
def limit(self):
return self._safe_escape(
lambda: int(self.parameters.get('limit', self.DEFAULT_LIMIT)),
self.DEFAULT_LIMIT
)
@property
def total(self):
raise NotImplementedError("You must implement total.")
@property
def sort_by(self):
return self.parameters.GET.get('sortBy', 'abc')
@property
def skip(self):
return (self.page - 1) * self.limit
@property
def action(self):
action = self.parameters.get('action')
if action not in self.allowed_actions:
raise Http404()
return action
@property
def column_names(self):
raise NotImplementedError("you must return a list of column names")
@property
def pagination_context(self):
create_form = self.get_create_form()
return {
'pagination': {
'page': self.page,
'limit': self.limit,
'total': self.total,
'limit_options': list(range(self.DEFAULT_LIMIT, 51, self.DEFAULT_LIMIT)),
'column_names': self.column_names,
'num_columns': len(self.column_names),
'text': {
'limit': self.limit_text,
'empty': self.empty_notification,
'loading': self.loading_message,
'deleted_items': self.deleted_items_header,
'new_items': self.new_items_header,
},
'create_item_form': self.get_create_form_response(create_form) if create_form else None,
}
}
@property
def allowed_actions(self):
return [
'create',
'update',
'delete',
'paginate',
'refresh',
]
@property
def paginate_crud_response(self):
"""
Return this in the post method of your view class.
"""
response = getattr(self, '%s_response' % self.action)
return HttpResponse(json.dumps(response, cls=LazyEncoder))
@property
def create_response(self):
create_form = self.get_create_form()
new_item = None
if create_form.is_valid():
new_item = self.get_create_item_data(create_form)
create_form = self.get_create_form(is_blank=True)
return {
'newItem': new_item,
'form': self.get_create_form_response(create_form)
}
@property
def update_response(self):
update_form = self.get_update_form()
updated_item = None
if update_form.is_valid():
updated_item = self.get_updated_item_data(update_form)
return {
'updatedItem': updated_item,
'form': self.get_update_form_response(update_form),
}
@property
def refresh_response(self):
try:
self.refresh_item(self.item_id)
except PaginatedItemException as e:
return {
'error': _("<strong>Problem Refreshing List:</strong> %s") % e,
}
return {
'success': True,
'currentPage': self.page,
'total': self.total,
'paginatedList': list(self.paginated_list),
}
@property
def delete_response(self):
try:
response = self.get_deleted_item_data(self.item_id)
return {
'deletedItem': response
}
except PaginatedItemException as e:
return {
'error': _("<strong>Problem Deleting:</strong> %s") % e,
}
@property
def item_id(self):
try:
return self.parameters['itemId']
except KeyError:
raise PaginatedItemException(_("The item's ID was not passed to the server."))
@property
def paginate_response(self):
return {
'success': True,
'currentPage': self.page,
'total': self.total,
'paginatedList': list(self.paginated_list),
}
@property
def paginated_list(self):
"""
This should return a list (or generator object) of data formatted as follows:
[
{
'itemData': {
'id': <id of item>,
<json dict of item data for the knockout model to use>
},
'template': <knockout template id>
}
]
"""
raise NotImplementedError("Return a list of data for the request response.")
def get_create_form(self, is_blank=False):
"""
This should be a crispy form that creates an item.
It's not required if you just want a paginated view.
"""
pass
def get_create_form_response(self, create_form):
return render_to_string(
'hqwebapp/includes/create_item_form.html', {
'form': create_form
}
)
def get_update_form(self, initial_data=None):
raise NotImplementedError("You must return a form object that will update an Item")
def get_update_form_response(self, update_form):
return render_to_string(
'hqwebapp/partials/update_item_form.html', {
'form': update_form
}
)
def refresh_item(self, item_id):
"""
Process the item that triggered a list refresh here.
"""
raise NotImplementedError("You must implement refresh_item")
def get_create_item_data(self, create_form):
"""
This should return a dict of data for the created item.
{
'itemData': {
'id': <id of item>,
<json dict of item data for the knockout model to use>
},
'template': <knockout template id>
}
"""
raise NotImplementedError("You must implement get_new_item_data")
def get_updated_item_data(self, update_form):
"""
This should return a dict of data for the updated item.
{
'itemData': {
'id': <id of item>,
<json dict of item data for the knockout model to use>
},
'template': <knockout template id>
}
"""
raise NotImplementedError("You must implement get_updated_item_data")
def get_deleted_item_data(self, item_id):
"""
This should return a dict of data for the deleted item.
{
'itemData': {
'id': <id of item>,
<json dict of item data for the knockout model to use>
},
'template': <knockout template id>
}
"""
raise NotImplementedError("You must implement get_deleted_item_data")
@login_required
def quick_find(request):
query = request.GET.get('q')
redirect = request.GET.get('redirect') != 'false'
if not query:
return HttpResponseBadRequest('GET param "q" must be provided')
def deal_with_doc(doc, domain, doc_info_fn):
if request.couch_user.is_superuser or (domain and request.couch_user.is_member_of(domain)):
doc_info = doc_info_fn(doc)
else:
raise Http404()
if redirect and doc_info.link:
messages.info(request, _("We've redirected you to the %s matching your query") % doc_info.type_display)
return HttpResponseRedirect(doc_info.link)
elif redirect and request.couch_user.is_superuser:
return HttpResponseRedirect('{}?id={}'.format(reverse('raw_doc'), doc.get('_id')))
else:
return json_response(doc_info)
couch_dbs = [None] + settings.COUCH_SETTINGS_HELPER.extra_db_names
for db_name in couch_dbs:
try:
doc = get_db(db_name).get(query)
except ResourceNotFound:
pass
else:
domain = doc.get('domain') or doc.get('domains', [None])[0]
doc_info_fn = functools.partial(get_doc_info, domain_hint=domain)
return deal_with_doc(doc, domain, doc_info_fn)
for accessor in (FormAccessorSQL.get_form, CaseAccessorSQL.get_case):
try:
doc = accessor(query)
except (XFormNotFound, CaseNotFound):
pass
else:
domain = doc.domain
return deal_with_doc(doc, domain, get_object_info)
for django_model in (SQLLocation,):
try:
if hasattr(django_model, 'by_id') and callable(django_model.by_id):
doc = django_model.by_id(query)
else:
doc = django_model.objects.get(pk=query)
except django_model.DoesNotExist:
continue
else:
if doc is None:
continue
domain = doc.domain
return deal_with_doc(doc, domain, get_object_info)
raise Http404()
def osdd(request, template='osdd.xml'):
response = render(request, template, {
'url_base': get_url_base(),
'env': get_environment_friendly_name()
})
response['Content-Type'] = 'application/xml'
return response
class MaintenanceAlertsView(BasePageView):
urlname = 'alerts'
page_title = ugettext_noop("Maintenance Alerts")
template_name = 'hqwebapp/maintenance_alerts.html'
@method_decorator(require_superuser)
def dispatch(self, request, *args, **kwargs):
return super(MaintenanceAlertsView, self).dispatch(request, *args, **kwargs)
@property
def page_context(self):
from corehq.apps.hqwebapp.models import MaintenanceAlert
return {
'alerts': [{
'created': six.text_type(alert.created),
'active': alert.active,
'html': alert.html,
'id': alert.id,
'domains': ", ".join(alert.domains) if alert.domains else "All domains",
} for alert in MaintenanceAlert.objects.order_by('-active', '-created')[:5]]
}
@property
def page_url(self):
return reverse(self.urlname)
@require_POST
@require_superuser
def create_alert(request):
from corehq.apps.hqwebapp.models import MaintenanceAlert
alert_text = request.POST.get('alert_text')
domains = request.POST.get('domains').split() or None
MaintenanceAlert(active=False, text=alert_text, domains=domains).save()
return HttpResponseRedirect(reverse('alerts'))
@require_POST
@require_superuser
def activate_alert(request):
from corehq.apps.hqwebapp.models import MaintenanceAlert
ma = MaintenanceAlert.objects.get(id=request.POST.get('alert_id'))
ma.active = True
ma.save()
return HttpResponseRedirect(reverse('alerts'))
@require_POST
@require_superuser
def deactivate_alert(request):
from corehq.apps.hqwebapp.models import MaintenanceAlert
ma = MaintenanceAlert.objects.get(id=request.POST.get('alert_id'))
ma.active = False
ma.save()
return HttpResponseRedirect(reverse('alerts'))
def redirect_to_dimagi(endpoint):
def _redirect(request, lang_code=None):
if settings.SERVER_ENVIRONMENT in [
'production',
'india',
'staging',
'changeme',
'localdev',
]:
return HttpResponsePermanentRedirect(
"https://www.dimagi.com/{}{}".format(
endpoint,
"?lang={}".format(lang_code) if lang_code else "",
)
)
return redirect_to_default(request)
return _redirect
def temporary_google_verify(request):
# will remove once google search console verify process completes
# BMB 4/20/18
return render(request, "google9633af922b8b0064.html")
| |
brb_orswot.rs
|
use crdts::{orswot, CmRDT};
use std::cmp::Ordering;
use std::{fmt::Debug, hash::Hash};
use brb::{Actor, BRBDataType};
use serde::Serialize;
#[derive(Debug, Serialize, PartialEq, Eq, Clone)]
pub struct BRBOrswot<M: Clone + Eq + Debug + Hash + Serialize> {
actor: Actor,
orswot: orswot::Orswot<M, Actor>,
}
impl<M: Clone + Eq + Debug + Hash + Serialize> BRBOrswot<M> {
pub fn add(&self, member: M) -> orswot::Op<M, Actor> {
let add_ctx = self.orswot.read_ctx().derive_add_ctx(self.actor);
self.orswot.add(member, add_ctx)
}
pub fn rm(&self, member: M) -> orswot::Op<M, Actor> {
let rm_ctx = self.orswot.read_ctx().derive_rm_ctx();
self.orswot.rm(member, rm_ctx)
}
pub fn contains(&self, member: &M) -> bool {
self.orswot.contains(member).val
}
pub fn actor(&self) -> &Actor {
&self.actor
}
pub fn orswot(&self) -> &orswot::Orswot<M, Actor> {
&self.orswot
}
}
impl<M: Clone + Eq + Debug + Hash + Serialize> BRBDataType for BRBOrswot<M> {
type Op = orswot::Op<M, Actor>;
fn new(actor: Actor) -> Self {
BRBOrswot {
actor,
orswot: orswot::Orswot::new(),
}
}
fn validate(&self, from: &Actor, op: &Self::Op) -> bool {
match op {
orswot::Op::Add { dot, members: _ } => {
if &dot.actor != from {
println!(
"[ORSWOT/INVALID] Attempting to add with a dot different from the source proc"
);
false
} else {
true
}
}
orswot::Op::Rm { clock, members } => {
if members.len() != 1 {
println!("[ORSWOT/INVALID] We only support removes of a single element");
false
} else if matches!(
clock.partial_cmp(&self.orswot.clock()),
None | Some(Ordering::Greater)
) {
// NOTE: this check renders all the "deferred_remove" logic in the ORSWOT obsolete.
// The deferred removes would buffer these out-of-order removes.
println!("[ORSWOT/INVALID] This rm op is removing data we have not yet seen");
false
} else {
true
}
}
}
}
fn apply(&mut self, op: Self::Op)
|
}
|
{
self.orswot.apply(op);
}
|
check_unsafety.rs
|
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::intravisit;
use rustc_hir::Node;
use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::cast::CastTy;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, TyCtxt};
use rustc_session::lint::builtin::{SAFE_PACKED_BORROWS, UNUSED_UNSAFE};
use rustc_span::symbol::{sym, Symbol};
use std::ops::Bound;
use crate::const_eval::{is_const_fn, is_min_const_fn};
use crate::util;
pub struct UnsafetyChecker<'a, 'tcx> {
body: &'a Body<'tcx>,
body_did: LocalDefId,
const_context: bool,
min_const_fn: bool,
violations: Vec<UnsafetyViolation>,
source_info: SourceInfo,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
/// Mark an `unsafe` block as used, so we don't lint it.
used_unsafe: FxHashSet<hir::HirId>,
inherited_blocks: Vec<(hir::HirId, bool)>,
}
impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> {
fn new(
const_context: bool,
min_const_fn: bool,
body: &'a Body<'tcx>,
body_did: LocalDefId,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Self {
// sanity check
if min_const_fn {
assert!(const_context);
}
Self {
body,
body_did,
const_context,
min_const_fn,
violations: vec![],
source_info: SourceInfo::outermost(body.span),
tcx,
param_env,
used_unsafe: Default::default(),
inherited_blocks: vec![],
}
}
}
impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> {
fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
self.source_info = terminator.source_info;
match terminator.kind {
TerminatorKind::Goto { .. }
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::Drop { .. }
| TerminatorKind::Yield { .. }
| TerminatorKind::Assert { .. }
| TerminatorKind::DropAndReplace { .. }
| TerminatorKind::GeneratorDrop
| TerminatorKind::Resume
| TerminatorKind::Abort
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::FalseEdges { .. }
| TerminatorKind::FalseUnwind { .. } => {
// safe (at least as emitted during MIR construction)
}
TerminatorKind::Call { ref func, .. } => {
let func_ty = func.ty(self.body, self.tcx);
let sig = func_ty.fn_sig(self.tcx);
if let hir::Unsafety::Unsafe = sig.unsafety() {
self.require_unsafe(
"call to unsafe function",
"consult the function's documentation for information on how to avoid \
undefined behavior",
UnsafetyViolationKind::GeneralAndConstFn,
)
}
if let ty::FnDef(func_id, _) = func_ty.kind {
self.check_target_features(func_id);
}
}
}
self.super_terminator(terminator, location);
}
fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
self.source_info = statement.source_info;
match statement.kind {
StatementKind::Assign(..)
| StatementKind::FakeRead(..)
| StatementKind::SetDiscriminant { .. }
| StatementKind::StorageLive(..)
| StatementKind::StorageDead(..)
| StatementKind::Retag { .. }
| StatementKind::AscribeUserType(..)
| StatementKind::Nop => {
// safe (at least as emitted during MIR construction)
}
StatementKind::LlvmInlineAsm { .. } => self.require_unsafe(
"use of inline assembly",
"inline assembly is entirely unchecked and can cause undefined behavior",
UnsafetyViolationKind::General,
),
}
self.super_statement(statement, location);
}
fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
match rvalue {
Rvalue::Aggregate(box ref aggregate, _) => match aggregate {
&AggregateKind::Array(..) | &AggregateKind::Tuple => {}
&AggregateKind::Adt(ref def, ..) => {
match self.tcx.layout_scalar_valid_range(def.did) {
(Bound::Unbounded, Bound::Unbounded) => {}
_ => self.require_unsafe(
"initializing type with `rustc_layout_scalar_valid_range` attr",
"initializing a layout restricted type's field with a value \
outside the valid range is undefined behavior",
UnsafetyViolationKind::GeneralAndConstFn,
),
}
}
&AggregateKind::Closure(def_id, _) | &AggregateKind::Generator(def_id, _, _) => {
let UnsafetyCheckResult { violations, unsafe_blocks } =
self.tcx.unsafety_check_result(def_id.expect_local());
self.register_violations(&violations, &unsafe_blocks);
}
},
// casting pointers to ints is unsafe in const fn because the const evaluator cannot
// possibly know what the result of various operations like `address / 2` would be
// pointers during const evaluation have no integral address, only an abstract one
Rvalue::Cast(CastKind::Misc, ref operand, cast_ty)
if self.const_context && self.tcx.features().const_raw_ptr_to_usize_cast =>
{
let operand_ty = operand.ty(self.body, self.tcx);
let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
match (cast_in, cast_out) {
(CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
self.require_unsafe(
"cast of pointer to int",
"casting pointers to integers in constants",
UnsafetyViolationKind::General,
);
}
_ => {}
}
}
// raw pointer and fn pointer operations are unsafe as it is not clear whether one
// pointer would be "less" or "equal" to another, because we cannot know where llvm
// or the linker will place various statics in memory. Without this information the
// result of a comparison of addresses would differ between runtime and compile-time.
Rvalue::BinaryOp(_, ref lhs, _)
if self.const_context && self.tcx.features().const_compare_raw_pointers =>
{
if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(self.body, self.tcx).kind {
self.require_unsafe(
"pointer operation",
"operations on pointers in constants",
UnsafetyViolationKind::General,
);
}
}
_ => {}
}
self.super_rvalue(rvalue, location);
}
fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, _location: Location) {
// On types with `scalar_valid_range`, prevent
// * `&mut x.field`
// * `x.field = y;`
// * `&x.field` if `field`'s type has interior mutability
// because either of these would allow modifying the layout constrained field and
// insert values that violate the layout constraints.
if context.is_mutating_use() || context.is_borrow() {
self.check_mut_borrowing_layout_constrained_field(*place, context.is_mutating_use());
}
if context.is_borrow() {
if util::is_disaligned(self.tcx, self.body, self.param_env, *place) {
let source_info = self.source_info;
let lint_root = self.body.source_scopes[source_info.scope]
.local_data
.as_ref()
.assert_crate_local()
.lint_root;
self.require_unsafe(
"borrow of packed field",
"fields of packed structs might be misaligned: dereferencing a \
misaligned pointer or even just creating a misaligned reference \
is undefined behavior",
UnsafetyViolationKind::BorrowPacked(lint_root),
);
}
}
for (i, elem) in place.projection.iter().enumerate() {
let proj_base = &place.projection[..i];
let old_source_info = self.source_info;
if let [] = proj_base {
let decl = &self.body.local_decls[place.local];
if decl.internal {
if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info
|
else {
// Internal locals are used in the `move_val_init` desugaring.
// We want to check unsafety against the source info of the
// desugaring, rather than the source info of the RHS.
self.source_info = self.body.local_decls[place.local].source_info;
}
}
}
let base_ty = Place::ty_from(place.local, proj_base, self.body, self.tcx).ty;
match base_ty.kind {
ty::RawPtr(..) => self.require_unsafe(
"dereference of raw pointer",
"raw pointers may be NULL, dangling or unaligned; they can violate \
aliasing rules and cause data races: all of these are undefined \
behavior",
UnsafetyViolationKind::General,
),
ty::Adt(adt, _) => {
if adt.is_union() {
if context == PlaceContext::MutatingUse(MutatingUseContext::Store)
|| context == PlaceContext::MutatingUse(MutatingUseContext::Drop)
|| context == PlaceContext::MutatingUse(MutatingUseContext::AsmOutput)
{
let elem_ty = match elem {
ProjectionElem::Field(_, ty) => ty,
_ => span_bug!(
self.source_info.span,
"non-field projection {:?} from union?",
place
),
};
if !elem_ty.is_copy_modulo_regions(
self.tcx,
self.param_env,
self.source_info.span,
) {
self.require_unsafe(
"assignment to non-`Copy` union field",
"the previous content of the field will be dropped, which \
causes undefined behavior if the field was not properly \
initialized",
UnsafetyViolationKind::GeneralAndConstFn,
)
} else {
// write to non-move union, safe
}
} else {
self.require_unsafe(
"access to union field",
"the field may not be properly initialized: using \
uninitialized data will cause undefined behavior",
UnsafetyViolationKind::GeneralAndConstFn,
)
}
}
}
_ => {}
}
self.source_info = old_source_info;
}
}
}
impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> {
fn require_unsafe(
&mut self,
description: &'static str,
details: &'static str,
kind: UnsafetyViolationKind,
) {
let source_info = self.source_info;
self.register_violations(
&[UnsafetyViolation {
source_info,
description: Symbol::intern(description),
details: Symbol::intern(details),
kind,
}],
&[],
);
}
fn register_violations(
&mut self,
violations: &[UnsafetyViolation],
unsafe_blocks: &[(hir::HirId, bool)],
) {
let safety = self.body.source_scopes[self.source_info.scope]
.local_data
.as_ref()
.assert_crate_local()
.safety;
let within_unsafe = match safety {
// `unsafe` blocks are required in safe code
Safety::Safe => {
for violation in violations {
let mut violation = *violation;
match violation.kind {
UnsafetyViolationKind::GeneralAndConstFn
| UnsafetyViolationKind::General => {}
UnsafetyViolationKind::BorrowPacked(_) => {
if self.min_const_fn {
// const fns don't need to be backwards compatible and can
// emit these violations as a hard error instead of a backwards
// compat lint
violation.kind = UnsafetyViolationKind::General;
}
}
}
if !self.violations.contains(&violation) {
self.violations.push(violation)
}
}
false
}
// `unsafe` function bodies allow unsafe without additional unsafe blocks
Safety::BuiltinUnsafe | Safety::FnUnsafe => true,
Safety::ExplicitUnsafe(hir_id) => {
// mark unsafe block as used if there are any unsafe operations inside
if !violations.is_empty() {
self.used_unsafe.insert(hir_id);
}
// only some unsafety is allowed in const fn
if self.min_const_fn {
for violation in violations {
match violation.kind {
// these unsafe things are stable in const fn
UnsafetyViolationKind::GeneralAndConstFn => {}
// these things are forbidden in const fns
UnsafetyViolationKind::General
| UnsafetyViolationKind::BorrowPacked(_) => {
let mut violation = *violation;
// const fns don't need to be backwards compatible and can
// emit these violations as a hard error instead of a backwards
// compat lint
violation.kind = UnsafetyViolationKind::General;
if !self.violations.contains(&violation) {
self.violations.push(violation)
}
}
}
}
}
true
}
};
self.inherited_blocks.extend(
unsafe_blocks.iter().map(|&(hir_id, is_used)| (hir_id, is_used && !within_unsafe)),
);
}
fn check_mut_borrowing_layout_constrained_field(
&mut self,
place: Place<'tcx>,
is_mut_use: bool,
) {
let mut cursor = place.projection.as_ref();
while let &[ref proj_base @ .., elem] = cursor {
cursor = proj_base;
match elem {
// Modifications behind a dereference don't affect the value of
// the pointer.
ProjectionElem::Deref => return,
ProjectionElem::Field(..) => {
let ty =
Place::ty_from(place.local, proj_base, &self.body.local_decls, self.tcx).ty;
if let ty::Adt(def, _) = ty.kind {
if self.tcx.layout_scalar_valid_range(def.did)
!= (Bound::Unbounded, Bound::Unbounded)
{
let (description, details) = if is_mut_use {
(
"mutation of layout constrained field",
"mutating layout constrained fields cannot statically be \
checked for valid values",
)
// Check `is_freeze` as late as possible to avoid cycle errors
// with opaque types.
} else if !place.ty(self.body, self.tcx).ty.is_freeze(
self.tcx,
self.param_env,
self.source_info.span,
) {
(
"borrow of layout constrained field with interior \
mutability",
"references to fields of layout constrained fields \
lose the constraints. Coupled with interior mutability, \
the field can be changed to invalid values",
)
} else {
continue;
};
self.require_unsafe(
description,
details,
UnsafetyViolationKind::GeneralAndConstFn,
);
}
}
}
_ => {}
}
}
}
/// Checks whether calling `func_did` needs an `unsafe` context or not, i.e. whether
/// the called function has target features the calling function hasn't.
fn check_target_features(&mut self, func_did: DefId) {
let callee_features = &self.tcx.codegen_fn_attrs(func_did).target_features;
let self_features = &self.tcx.codegen_fn_attrs(self.body_did).target_features;
// Is `callee_features` a subset of `calling_features`?
if !callee_features.iter().all(|feature| self_features.contains(feature)) {
self.require_unsafe(
"call to function with `#[target_feature]`",
"can only be called if the required target features are available",
UnsafetyViolationKind::GeneralAndConstFn,
)
}
}
}
pub(crate) fn provide(providers: &mut Providers<'_>) {
*providers = Providers { unsafety_check_result, unsafe_derive_on_repr_packed, ..*providers };
}
struct UnusedUnsafeVisitor<'a> {
used_unsafe: &'a FxHashSet<hir::HirId>,
unsafe_blocks: &'a mut Vec<(hir::HirId, bool)>,
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for UnusedUnsafeVisitor<'a> {
type Map = intravisit::ErasedMap<'tcx>;
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
fn visit_block(&mut self, block: &'tcx hir::Block<'tcx>) {
intravisit::walk_block(self, block);
if let hir::BlockCheckMode::UnsafeBlock(hir::UnsafeSource::UserProvided) = block.rules {
self.unsafe_blocks.push((block.hir_id, self.used_unsafe.contains(&block.hir_id)));
}
}
}
fn check_unused_unsafe(
tcx: TyCtxt<'_>,
def_id: LocalDefId,
used_unsafe: &FxHashSet<hir::HirId>,
unsafe_blocks: &mut Vec<(hir::HirId, bool)>,
) {
let body_id = tcx.hir().maybe_body_owned_by(tcx.hir().as_local_hir_id(def_id));
let body_id = match body_id {
Some(body) => body,
None => {
debug!("check_unused_unsafe({:?}) - no body found", def_id);
return;
}
};
let body = tcx.hir().body(body_id);
debug!("check_unused_unsafe({:?}, body={:?}, used_unsafe={:?})", def_id, body, used_unsafe);
let mut visitor = UnusedUnsafeVisitor { used_unsafe, unsafe_blocks };
intravisit::Visitor::visit_body(&mut visitor, body);
}
fn unsafety_check_result(tcx: TyCtxt<'_>, def_id: LocalDefId) -> UnsafetyCheckResult {
debug!("unsafety_violations({:?})", def_id);
// N.B., this borrow is valid because all the consumers of
// `mir_built` force this.
let body = &tcx.mir_built(def_id).borrow();
let param_env = tcx.param_env(def_id);
let id = tcx.hir().as_local_hir_id(def_id);
let (const_context, min_const_fn) = match tcx.hir().body_owner_kind(id) {
hir::BodyOwnerKind::Closure => (false, false),
hir::BodyOwnerKind::Fn => {
(is_const_fn(tcx, def_id.to_def_id()), is_min_const_fn(tcx, def_id.to_def_id()))
}
hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => (true, false),
};
let mut checker =
UnsafetyChecker::new(const_context, min_const_fn, body, def_id, tcx, param_env);
checker.visit_body(&body);
check_unused_unsafe(tcx, def_id, &checker.used_unsafe, &mut checker.inherited_blocks);
UnsafetyCheckResult {
violations: checker.violations.into(),
unsafe_blocks: checker.inherited_blocks.into(),
}
}
fn unsafe_derive_on_repr_packed(tcx: TyCtxt<'_>, def_id: DefId) {
let lint_hir_id = tcx.hir().as_local_hir_id(def_id.expect_local());
tcx.struct_span_lint_hir(SAFE_PACKED_BORROWS, lint_hir_id, tcx.def_span(def_id), |lint| {
// FIXME: when we make this a hard error, this should have its
// own error code.
let message = if tcx.generics_of(def_id).own_requires_monomorphization() {
"`#[derive]` can't be used on a `#[repr(packed)]` struct with \
type or const parameters (error E0133)"
.to_string()
} else {
"`#[derive]` can't be used on a `#[repr(packed)]` struct that \
does not derive Copy (error E0133)"
.to_string()
};
lint.build(&message).emit()
});
}
/// Returns the `HirId` for an enclosing scope that is also `unsafe`.
fn is_enclosed(
tcx: TyCtxt<'_>,
used_unsafe: &FxHashSet<hir::HirId>,
id: hir::HirId,
) -> Option<(String, hir::HirId)> {
let parent_id = tcx.hir().get_parent_node(id);
if parent_id != id {
if used_unsafe.contains(&parent_id) {
Some(("block".to_string(), parent_id))
} else if let Some(Node::Item(&hir::Item {
kind: hir::ItemKind::Fn(ref sig, _, _), ..
})) = tcx.hir().find(parent_id)
{
match sig.header.unsafety {
hir::Unsafety::Unsafe => Some(("fn".to_string(), parent_id)),
hir::Unsafety::Normal => None,
}
} else {
is_enclosed(tcx, used_unsafe, parent_id)
}
} else {
None
}
}
fn report_unused_unsafe(tcx: TyCtxt<'_>, used_unsafe: &FxHashSet<hir::HirId>, id: hir::HirId) {
let span = tcx.sess.source_map().guess_head_span(tcx.hir().span(id));
tcx.struct_span_lint_hir(UNUSED_UNSAFE, id, span, |lint| {
let msg = "unnecessary `unsafe` block";
let mut db = lint.build(msg);
db.span_label(span, msg);
if let Some((kind, id)) = is_enclosed(tcx, used_unsafe, id) {
db.span_label(
tcx.sess.source_map().guess_head_span(tcx.hir().span(id)),
format!("because it's nested under this `unsafe` {}", kind),
);
}
db.emit();
});
}
fn builtin_derive_def_id(tcx: TyCtxt<'_>, def_id: DefId) -> Option<DefId> {
debug!("builtin_derive_def_id({:?})", def_id);
if let Some(impl_def_id) = tcx.impl_of_method(def_id) {
if tcx.has_attr(impl_def_id, sym::automatically_derived) {
debug!("builtin_derive_def_id({:?}) - is {:?}", def_id, impl_def_id);
Some(impl_def_id)
} else {
debug!("builtin_derive_def_id({:?}) - not automatically derived", def_id);
None
}
} else {
debug!("builtin_derive_def_id({:?}) - not a method", def_id);
None
}
}
pub fn check_unsafety(tcx: TyCtxt<'_>, def_id: DefId) {
debug!("check_unsafety({:?})", def_id);
// closures are handled by their parent fn.
if tcx.is_closure(def_id) {
return;
}
let UnsafetyCheckResult { violations, unsafe_blocks } =
tcx.unsafety_check_result(def_id.expect_local());
for &UnsafetyViolation { source_info, description, details, kind } in violations.iter() {
// Report an error.
match kind {
UnsafetyViolationKind::GeneralAndConstFn | UnsafetyViolationKind::General => {
struct_span_err!(
tcx.sess,
source_info.span,
E0133,
"{} is unsafe and requires unsafe function or block",
description
)
.span_label(source_info.span, &*description.as_str())
.note(&details.as_str())
.emit();
}
UnsafetyViolationKind::BorrowPacked(lint_hir_id) => {
if let Some(impl_def_id) = builtin_derive_def_id(tcx, def_id) {
tcx.ensure().unsafe_derive_on_repr_packed(impl_def_id);
} else {
tcx.struct_span_lint_hir(
SAFE_PACKED_BORROWS,
lint_hir_id,
source_info.span,
|lint| {
lint.build(&format!(
"{} is unsafe and requires unsafe function or block (error E0133)",
description
))
.note(&details.as_str())
.emit()
},
)
}
}
}
}
let (mut unsafe_used, mut unsafe_unused): (FxHashSet<_>, Vec<_>) = Default::default();
for &(block_id, is_used) in unsafe_blocks.iter() {
if is_used {
unsafe_used.insert(block_id);
} else {
unsafe_unused.push(block_id);
}
}
// The unused unsafe blocks might not be in source order; sort them so that the unused unsafe
// error messages are properly aligned and the issue-45107 and lint-unused-unsafe tests pass.
unsafe_unused.sort_by_cached_key(|hir_id| tcx.hir().span(*hir_id));
for &block_id in &unsafe_unused {
report_unused_unsafe(tcx, &unsafe_used, block_id);
}
}
|
{
if self.tcx.is_mutable_static(def_id) {
self.require_unsafe(
"use of mutable static",
"mutable statics can be mutated by multiple threads: aliasing \
violations or data races will cause undefined behavior",
UnsafetyViolationKind::General,
);
return;
} else if self.tcx.is_foreign_item(def_id) {
self.require_unsafe(
"use of extern static",
"extern statics are not controlled by the Rust type system: \
invalid data, aliasing violations or data races will cause \
undefined behavior",
UnsafetyViolationKind::General,
);
return;
}
}
|
simple-datepicker.js
|
define(["exports", "../../@polymer/polymer/polymer-element.js", "../hax-body-behaviors/lib/HAXWiring.js", "../hax-iconset/hax-iconset.js", "../../@polymer/paper-input/paper-input.js", "../../@polymer/paper-button/paper-button.js", "../../@polymer/iron-icon/iron-icon.js", "../../@polymer/iron-icons/av-icons.js"], function (_exports, _polymerElement, _HAXWiring, _haxIconset, _paperInput, _paperButton, _ironIcon, _avIcons) {
"use strict";
Object.defineProperty(_exports, "__esModule", {
value: true
});
_exports.SimpleDatepicker = void 0;
function _templateObject_2810a040c3a111e99204fd28bf05b7e9() {
var data = babelHelpers.taggedTemplateLiteral(["\n <style>\n :host {\n display: block;\n }\n :host([hidden]) {\n display: none;\n }\n :host paper-button {\n padding: 5px;\n margin: 0;\n cursor: pointer;\n border-radius: 0;\n min-width: 30px;\n }\n :host #calendar {\n font-size: 12px;\n border-collapse: collapse;\n }\n :host #calendar caption {\n padding: 0;\n }\n :host #calendar caption div {\n display: flex;\n justify-content: space-between;\n align-items: center;\n }\n :host #calendar caption paper-button {\n flex: 0 1 auto;\n --iron-icon-width: 16px;\n --iron-icon-height: 16px;\n }\n :host #calendarlabel {\n flex: 1 1 auto;\n text-align: center;\n margin: 0 5px;\n }\n :host #calendarlabel p {\n width: 100%;\n margin: 0;\n }\n :host #calendar {\n border: var(--simple-datepicker-calendar-border, 1px solid black);\n }\n :host #calendar,\n :host #calendar th,\n :host #calendar td {\n border: var(--simple-datepicker-calendar-days-border, none);\n }\n :host #calendar th {\n padding: 2px;\n }\n :host #calendar td {\n padding: 0;\n }\n :host #calendar td paper-button {\n width: 100%;\n height: 30px;\n cursor: pointer;\n }\n </style>\n <paper-input\n id=\"dateinput\"\n label$=\"[[label]]\"\n slot=\"heading\"\n value$=\"{{value}}\"\n type=\"date\"\n >\n <paper-button\n id=\"expand\"\n controls=\"content\"\n label=\"toggle datepicker\"\n tooltip=\"toggle datepicker\"\n slot=\"suffix\"\n >\n <iron-icon icon=\"hax:calendar\"></iron-icon>\n </paper-button>\n </paper-input>\n <div id=\"content\" role=\"application\">\n <table id=\"calendar\">\n <caption>\n <div>\n <paper-button\n controls=\"calendar\"\n label=\"previous year\"\n on-tap=\"prevYear\"\n >\n <iron-icon icon=\"av:fast-rewind\"></iron-icon>\n </paper-button>\n <paper-button\n controls=\"calendar\"\n label=\"previous month\"\n on-tap=\"prevMonth\"\n >\n <iron-icon icon=\"hax:arrow-left\"></iron-icon>\n </paper-button>\n <div id=\"calendarlabel\"><p>[[__calendarLabel]]</p></div>\n <paper-button\n controls=\"calendar\"\n label=\"next month\"\n on-tap=\"nextMonth\"\n >\n <iron-icon icon=\"hax:arrow-right\"></iron-icon>\n </paper-button>\n <paper-button\n controls=\"calendar\"\n label=\"next year\"\n on-tap=\"nextYear\"\n >\n <iron-icon\n icon=\"av:fast-forward\"\n controls=\"calendar\"\n ></iron-icon>\n </paper-button>\n </div>\n </caption>\n <thead>\n <tr>\n <template is=\"dom-repeat\" items=\"[[weekdays]]\" as=\"weekday\">\n <th scope=\"col\">[[weekday]]</th>\n </template>\n </tr>\n </thead>\n <tbody>\n <template is=\"dom-repeat\" items=\"[[__calendar]]\" as=\"week\" restamp>\n <tr>\n <template is=\"dom-repeat\" items=\"[[week]]\" as=\"day\" restamp>\n <td scope=\"row\">\n <paper-button\n class=\"day\"\n controls=\"dateinput\"\n day$=\"[[day]]\"\n disabled$=\"[[!disabled]]\"\n hidden$=\"[[!day]]\"\n >\n [[day]]\n </paper-button>\n </td>\n </template>\n </tr>\n </template>\n </tbody>\n </table>\n </div>\n "]);
_templateObject_2810a040c3a111e99204fd28bf05b7e9 = function _templateObject_2810a040c3a111e99204fd28bf05b7e9() {
return data;
};
return data;
}
/**
* `simple-datepicker`
* `a simple datepicker field`
*
* @microcopy - language worth noting:
* -
*
* @customElement
* @polymer
* @demo demo/index.html
*/
var SimpleDatepicker =
/*#__PURE__*/
function (_PolymerElement) {
babelHelpers.inherits(SimpleDatepicker, _PolymerElement);
function SimpleDatepicker() {
babelHelpers.classCallCheck(this, SimpleDatepicker);
return babelHelpers.possibleConstructorReturn(this, babelHelpers.getPrototypeOf(SimpleDatepicker).apply(this, arguments));
}
babelHelpers.createClass(SimpleDatepicker, [{
key: "connectedCallback",
/**
* life cycle, element is afixed to the DOM
*/
value: function connectedCallback() {
babelHelpers.get(babelHelpers.getPrototypeOf(SimpleDatepicker.prototype), "connectedCallback", this).call(this);
this.HAXWiring = new _HAXWiring.HAXWiring();
this.HAXWiring.setup(SimpleDatepicker.haxProperties, SimpleDatepicker.tag, this);
}
}, {
key: "nextMonth",
value: function nextMonth() {
var date = new Date(this.__calendarDate),
month = date.getMonth(),
year = date.getFullYear();
if (month < 11) {
date.setMonth(month + 1);
} else {
date.setMonth(0);
date.setYear(year + 1);
}
this.__calendarDate = date.toString();
}
}, {
key: "prevMonth",
value: function prevMonth() {
var date = new Date(this.__calendarDate),
month = date.getMonth(),
year = date.getFullYear();
if (month > 0) {
date.setMonth(month - 1);
} else {
date.setMonth(11);
date.setYear(year - 1);
}
this.__calendarDate = date.toString();
}
}, {
key: "nextYear",
value: function nextYear() {
var date = new Date(this.__calendarDate),
year = date.getFullYear();
date.setYear(year + 1);
this.__calendarDate = date.toString();
}
}, {
key: "prevYear",
value: function prevYear() {
var date = new Date(this.__calendarDate),
year = date.getFullYear();
date.setYear(year - 1);
this.__calendarDate = date.toString();
}
}, {
key: "updateCalendar",
value: function updateCalendar(__calendarDate) {
var label = this.shadowRoot ? this.shadowRoot.querySelector("#calendarlabel > p") : null,
date = new Date(__calendarDate),
month = this.monthNames[date.getMonth()],
year = date.getFullYear();
if (label) label.innerHTML = "".concat(month, " ").concat(year);
return "".concat(month, " ").concat(year);
}
}, {
key: "_getCalendarDate",
value: function _getCalendarDate(value) {
var date = value ? new Date(value) : new Date();
this.updateCalendar(date);
return date.toString();
}
}, {
key: "_getCalendar",
value: function _getCalendar(__calendarDate) {
var first = new Date(__calendarDate),
last = new Date(__calendarDate),
weeks = [],
start,
end,
cells,
rows;
first.setDate(1);
last.setDate(0);
start = first.getDay();
end = 6 - last.getDay();
cells = start + end + last.getDate();
rows = cells / 7;
for (var i = 0; i < rows - 1; i++) {
weeks[i] = [];
for (var j = 0; j < 7; j++) {
var cell = j + i * 7,
day = 1 + cell - start;
weeks[i][j] = day < 0 || day > last.getDate() ? false : day;
}
}
return weeks;
}
/**
* life cycle, element is removed from the DOM
*/
//disconnectedCallback() {}
}], [{
key: "template",
// render function
get: function get() {
return (0, _polymerElement.html)(_templateObject_2810a040c3a111e99204fd28bf05b7e9());
} // haxProperty definition
}, {
key: "haxProperties",
get: function get() {
return {
canScale: true,
canPosition: true,
canEditSource: false,
gizmo: {
title: "Simple datepicker",
description: "a simple datepicker field",
icon: "hax:calendar",
color: "green",
groups: ["Datepicker"],
handles: [{
type: "todo:read-the-docs-for-usage"
}],
meta: {
author: "nikkimk",
owner: "The Pennsylvania State University"
}
},
settings: {
quick: [],
configure: [],
advanced: []
}
};
} // properties available to the custom element for data binding
}, {
key: "properties",
get: function get() {
return {
dateFormat: {
name: "dateFormat",
type: String,
value: "mm-dd-yyyy"
},
monthNames: {
name: "monthNames",
type: Array,
value: ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
},
value: {
name: "value",
type: String,
value: null
},
weekStart: {
name: "weekStart",
type: Number,
value: 0
|
weekdays: {
name: "weekdays",
type: Array,
value: ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
},
__calendar: {
name: "__calendar",
type: Array,
computed: "_getCalendar(__calendarDate)"
},
__calendarDate: {
name: "__calendarDate",
type: String,
computed: "_getCalendarDate(value)"
},
__calendarLabel: {
name: "__calendarLabel",
type: String,
computed: "updateCalendar(__calendarDate)"
}
};
}
/**
* Store the tag name to make it easier to obtain directly.
* @notice function name must be here for tooling to operate correctly
*/
}, {
key: "tag",
get: function get() {
return "simple-datepicker";
}
}]);
return SimpleDatepicker;
}(_polymerElement.PolymerElement);
_exports.SimpleDatepicker = SimpleDatepicker;
window.customElements.define(SimpleDatepicker.tag, SimpleDatepicker);
});
|
},
|
test_bzr_helper.py
|
import sys
import subprocess
from unittest import TestCase
from unittest.mock import patch
diffview = sys.modules["DiffView"]
BzrHelper = diffview.util.vcs.BzrHelper
class test_BzrHelper(TestCase):
def setUp(self):
|
def test_init(self):
bzr_helper = BzrHelper('/repo/base')
self.assertFalse(bzr_helper.got_changed_files)
@patch('subprocess.Popen')
def test_file_versions(self, mocked_Popen):
bzr_helper = BzrHelper('/repo/base')
self.assertEquals(
bzr_helper.get_file_versions(''),
('last:1', ''))
self.assertEquals(
bzr_helper.get_file_versions('branch_name'),
('branch_name', ''))
self.assertEquals(
bzr_helper.get_file_versions('branch_name..'),
('branch_name', ''))
self.assertEquals(
bzr_helper.get_file_versions('branch_name..other_branch_name'),
('branch_name', 'other_branch_name'))
self.assertEquals(
bzr_helper.get_file_versions('..other_branch_name'),
('', 'other_branch_name'))
class DummyProcess(object):
"""Dummy process to return values from `communicate()`.
Set `ret_vals` to use.
"""
def communicate(self, *args, **kwargs):
return self.ret_vals.pop(0)
|
self.dummy_process = DummyProcess()
|
serde_support.rs
|
use serde_test;
use std::prelude::v1::*;
use uuid::prelude::*;
//#[test]
pub fn test_serialize_readable() {
use serde_test::Configure;
let uuid_str = "f9168c5e-ceb2-4faa-b6bf-329bf39fa1e4";
let u = Uuid::parse_str(uuid_str).unwrap();
serde_test::assert_tokens(
&u.readable(),
&[serde_test::Token::Str(uuid_str)],
);
}
//#[test]
pub fn test_serialize_compact()
|
{
use serde_test::Configure;
let uuid_bytes = b"F9168C5E-CEB2-4F";
let u = Uuid::from_slice(uuid_bytes).unwrap();
serde_test::assert_tokens(
&u.compact(),
&[serde_test::Token::Bytes(uuid_bytes)],
);
}
|
|
json_remove 2.rs
|
//Copyright 2021-2023 WHTCORPS INC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file File except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use super::super::Result;
use super::modifier::BinaryModifier;
use super::local_path_expr::local_pathExpression;
use super::{Json, JsonRef};
impl<'a> JsonRef<'a> {
/// Removes elements from Json,
/// All local_path expressions cannot contain * or ** wildcard.
/// If any error occurs, the input won't be changed.
pub fn remove(&self, local_path_expr_list: &[local_pathExpression]) -> Result<Json> {
if local_path_expr_list
.iter()
.any(|expr| expr.legs.is_empty() || expr.contains_any_asterisk())
{
return Err(box_err!("Invalid local_path expression"));
}
let mut res = self.to_owned();
for expr in local_path_expr_list {
let modifier = BinaryModifier::new(res.as_ref());
res = modifier.remove(&expr.legs)?;
}
Ok(res)
}
}
#[braneg(test)]
mod tests {
use super::super::local_path_expr::parse_json_local_path_expr;
use super::*;
#[test]
fn test_json_remove() {
let test_cases = vec![
(r#"{"a": [3, 4]}"#, "$.a[0]", r#"{"a": [4]}"#, true),
(r#"{"a": [3, 4]}"#, "$.a", r#"{}"#, true),
(
r#"{"a": [3, 4], "b":1, "c":{"a":1}}"#,
"$.c.a",
r#"{"a": [3, 4],"b":1, "c":{}}"#,
true,
),
// Nothing changed because the local_path without last leg doesn't exist.
(r#"{"a": [3, 4]}"#, "$.b[1]", r#"{"a": [3, 4]}"#, true),
// Nothing changed because the local_path without last leg doesn't exist.
(r#"{"a": [3, 4]}"#, "$.a[0].b", r#"{"a": [3, 4]}"#, true),
// Bad local_path expression.
(r#"null"#, "$.*", r#"null"#, false),
(r#"null"#, "$[*]", r#"null"#, false),
(r#"null"#, "$**.a", r#"null"#, false),
(r#"null"#, "$**[3]", r#"null"#, false),
];
for (i, (json, local_path, expected, success)) in test_cases.into_iter().enumerate() {
let j: Result<Json> = json.parse();
assert!(j.is_ok(), "#{} expect json parse ok but got {:?}", i, j);
let p = parse_json_local_path_expr(local_path);
assert!(p.is_ok(), "#{} expect local_path parse ok but got {:?}", i, p);
let e: Result<Json> = expected.parse();
assert!(
e.is_ok(),
"#{} expect expected value parse ok but got {:?}",
|
i,
e
);
let (j, p, e) = (j.unwrap(), p.unwrap(), e.unwrap());
let r = j.as_ref().remove(vec![p].as_slice());
if success {
assert!(r.is_ok(), "#{} expect remove ok but got {:?}", i, r);
let j = r.unwrap();
assert_eq!(e, j, "#{} expect remove json {:?} == {:?}", i, j, e);
} else {
assert!(r.is_err(), "#{} expect remove error but got {:?}", i, r);
}
}
}
}
| |
CommSyslogClient.py
|
"""This module contains the general information for CommSyslogClient ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class CommSyslogClientConsts():
ADMIN_STATE_DISABLED = "disabled"
ADMIN_STATE_ENABLED = "enabled"
FORWARDING_FACILITY_LOCAL0 = "local0"
FORWARDING_FACILITY_LOCAL1 = "local1"
FORWARDING_FACILITY_LOCAL2 = "local2"
FORWARDING_FACILITY_LOCAL3 = "local3"
FORWARDING_FACILITY_LOCAL4 = "local4"
FORWARDING_FACILITY_LOCAL5 = "local5"
FORWARDING_FACILITY_LOCAL6 = "local6"
FORWARDING_FACILITY_LOCAL7 = "local7"
NAME_PRIMARY = "primary"
NAME_SECONDARY = "secondary"
NAME_TERTIARY = "tertiary"
SEVERITY_ALERTS = "alerts"
SEVERITY_CRITICAL = "critical"
SEVERITY_DEBUGGING = "debugging"
SEVERITY_EMERGENCIES = "emergencies"
SEVERITY_ERRORS = "errors"
SEVERITY_INFORMATION = "information"
SEVERITY_NOTIFICATIONS = "notifications"
SEVERITY_WARNINGS = "warnings"
class CommSyslogClient(ManagedObject):
"""This is CommSyslogClient class."""
consts = CommSyslogClientConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("CommSyslogClient", "commSyslogClient", "client-[name]", VersionMeta.Version101e, "InputOutput", 0x3ff, [], ["admin", "operations"], [u'commSyslog'], [], ["Get", "Set"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["disabled", "enabled"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x4, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"forwarding_facility": MoPropertyMeta("forwarding_facility", "forwardingFacility", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"], []),
"hostname": MoPropertyMeta("hostname", "hostname", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x40, None, None, None, ["primary", "secondary", "tertiary"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"severity": MoPropertyMeta("severity", "severity", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["alerts", "critical", "debugging", "emergencies", "errors", "information", "notifications", "warnings"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"dn": "dn",
"forwardingFacility": "forwarding_facility",
"hostname": "hostname",
"name": "name",
"rn": "rn",
"sacl": "sacl",
"severity": "severity",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
|
self._dirty_mask = 0
self.name = name
self.admin_state = None
self.child_action = None
self.forwarding_facility = None
self.hostname = None
self.sacl = None
self.severity = None
self.status = None
ManagedObject.__init__(self, "CommSyslogClient", parent_mo_or_dn, **kwargs)
|
|
force_leave_test.go
|
package command
import (
"errors"
"fmt"
"strings"
"testing"
"github.com/hashicorp/consul/command/base"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/serf/serf"
"github.com/mitchellh/cli"
)
func testForceLeaveCommand(t *testing.T) (*cli.MockUi, *ForceLeaveCommand) {
ui := new(cli.MockUi)
return ui, &ForceLeaveCommand{
Command: base.Command{
Ui: ui,
Flags: base.FlagSetClientHTTP,
},
}
}
func TestForceLeaveCommand_implements(t *testing.T) {
var _ cli.Command = &ForceLeaveCommand{}
}
func TestForceLeaveCommandRun(t *testing.T) {
a1 := testAgent(t)
a2 := testAgent(t)
defer a1.Shutdown()
defer a2.Shutdown()
addr := fmt.Sprintf("127.0.0.1:%d", a2.config.Ports.SerfLan)
_, err := a1.agent.JoinLAN([]string{addr})
if err != nil {
t.Fatalf("err: %s", err)
}
|
// Forcibly shutdown a2 so that it appears "failed" in a1
a2.Shutdown()
ui, c := testForceLeaveCommand(t)
args := []string{
"-http-addr=" + a1.httpAddr,
a2.config.NodeName,
}
code := c.Run(args)
if code != 0 {
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
}
m := a1.agent.LANMembers()
if len(m) != 2 {
t.Fatalf("should have 2 members: %#v", m)
}
if err := testutil.WaitForResult(func() (bool, error) {
m = a1.agent.LANMembers()
success := m[1].Status == serf.StatusLeft
return success, errors.New(m[1].Status.String())
}); err != nil {
t.Fatalf("member status is %v, should be left", err)
}
}
func TestForceLeaveCommandRun_noAddrs(t *testing.T) {
ui := new(cli.MockUi)
ui, c := testForceLeaveCommand(t)
args := []string{"-http-addr=foo"}
code := c.Run(args)
if code != 1 {
t.Fatalf("bad: %d", code)
}
if !strings.Contains(ui.ErrorWriter.String(), "node name") {
t.Fatalf("bad: %#v", ui.ErrorWriter.String())
}
}
| |
exchange.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.61.55'
# -----------------------------------------------------------------------------
import asyncio
import concurrent.futures
import socket
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttler import Throttler
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
from ccxt.base.errors import BadSymbol
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
synchronous = False
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.throttle = None
self.init_rest_rate_limiter()
self.markets_loading = None
self.reloading_markets = False
def init_rest_rate_limiter(self):
self.throttle = Throttler(self.tokenBucket, self.asyncio_loop)
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def
|
(self):
if self.asyncio_loop is None:
if sys.version_info >= (3, 7):
self.asyncio_loop = asyncio.get_running_loop()
else:
self.asyncio_loop = asyncio.get_event_loop()
self.throttle.loop = self.asyncio_loop
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop, enable_cleanup_closed=True)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
cost = self.calculate_rate_limiter_cost(api, method, path, params, config, context)
# insert cost into here...
await self.throttle(cost)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.log("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text(errors='replace')
# CIMultiDictProxy
raw_headers = response.headers
headers = {}
for header in raw_headers:
if header in headers:
headers[header] = headers[header] + ', ' + raw_headers[header]
else:
headers[header] = raw_headers[header]
http_status_code = response.status
http_status_text = response.reason
http_response = self.on_rest_response(http_status_code, http_status_text, url, method, headers, http_response, request_headers, request_body)
json_response = self.parse_json(http_response)
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
self.log("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
details = ' '.join([self.id, method, url])
raise ExchangeNotAvailable(details) from e
except (concurrent.futures.TimeoutError, asyncio.TimeoutError) as e:
details = ' '.join([self.id, method, url])
raise RequestTimeout(details) from e
except aiohttp.ClientConnectionError as e:
details = ' '.join([self.id, method, url])
raise ExchangeNotAvailable(details) from e
except aiohttp.ClientError as e: # base exception class
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_http_status_code(http_status_code, http_status_text, url, method, http_response)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets_helper(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def load_markets(self, reload=False, params={}):
if (reload and not self.reloading_markets) or not self.markets_loading:
self.reloading_markets = True
coroutine = self.load_markets_helper(reload, params)
# coroutines can only be awaited once so we wrap it in a task
self.markets_loading = asyncio.ensure_future(coroutine)
try:
result = await self.markets_loading
except Exception as e:
self.reloading_markets = False
self.markets_loading = None
raise e
self.reloading_markets = False
return result
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcvc(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcvc(trades, timeframe, since, limit)
async def fetchOHLCVC(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
ohlcvs = await self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
return [ohlcv[0:-1] for ohlcv in ohlcvs]
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_balance(self, params={}):
raise NotSupported('fetch_balance() not supported yet')
async def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
async def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
if self.has['fetchTickers']:
tickers = await self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise BadSymbol(self.id + ' fetchTickers could not find a ticker for ' + symbol)
else:
return ticker
else:
raise NotSupported(self.id + ' fetchTicker not supported yet')
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_transactions() is not supported yet')
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposits() is not supported yet')
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_withdrawals() is not supported yet')
async def fetch_deposit_address(self, code, params={}):
if self.has['fetchDepositAddresses']:
deposit_addresses = await self.fetch_deposit_addresses([code], params)
deposit_address = self.safe_value(deposit_addresses, code)
if deposit_address is None:
raise NotSupported(self.id + ' fetch_deposit_address could not find a deposit address for ' + code + ', make sure you have created a corresponding deposit address in your wallet on the exchange website')
else:
return deposit_address
else:
raise NotSupported(self.id + ' fetchDepositAddress not supported yet')
async def sleep(self, milliseconds):
return await asyncio.sleep(milliseconds / 1000)
|
open
|
err_context.rs
|
use crate::parser::{keywords, meta_types::MetaList};
use core::convert::TryFrom;
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Clone)]
pub(crate) enum ErrContext {
Context(syn::Expr),
Format(syn::LitStr, Vec<syn::Expr>),
}
fn is_lit_str(expr: &syn::Expr) -> bool {
matches!(
expr,
syn::Expr::Lit(syn::ExprLit {
lit: syn::Lit::Str(_),
..
})
)
}
fn as_lit_str(expr: &syn::Expr) -> syn::LitStr
|
impl TryFrom<MetaList<keywords::err_context, syn::Expr>> for ErrContext {
type Error = syn::Error;
fn try_from(value: MetaList<keywords::err_context, syn::Expr>) -> Result<Self, Self::Error> {
match value.fields.len() {
0 => Err(syn::Error::new_spanned(
value.ident,
"err_context requires a value but none were given",
)),
// format string
_ if is_lit_str(&value.fields[0]) => {
let format = as_lit_str(&value.fields[0]);
Ok(ErrContext::Format(
format,
value.fields.into_iter().skip(1).collect(),
))
}
// payload
1 => Ok(ErrContext::Context(value.fields[0].clone())),
_ => Err(syn::Error::new_spanned(
&value.fields[0],
"format string expected",
)),
}
}
}
|
{
if let syn::Expr::Lit(syn::ExprLit {
lit: syn::Lit::Str(lit_str),
..
}) = expr
{
lit_str.clone()
} else {
panic!("Not a string literal")
}
}
|
index.d.ts
|
import React, { Component } from 'react';
import { TextStyle, ViewStyle } from 'react-native';
type Props = {
options: string[];
onPress: (index: number) => void;
title?: string;
message?: string;
tintColor?: string;
cancelButtonIndex?: number;
destructiveButtonIndex?: number;
/**
* Only for Android or ActionSheetCustom
*/
styles?: {
titleBox?: ViewStyle,
titleText?: TextStyle,
messageBox?: ViewStyle,
messageText?: TextStyle,
buttonText?: TextStyle,
buttonBox?: ViewStyle,
cancelButtonBox?: ViewStyle,
overlay?: TextStyle,
wrapper?: ViewStyle,
body?: ViewStyle,
};
/**
* Change theme color
* @default system theme color
*/
userInterfaceStyle?: "light" | "dark"
}
type ActionSheetProps = Props & {
/**
* Android only.
* **ios** theme is similar to the iOS ActionSheet with rounded boxes
* @default flat
*/
theme?: "flat" | "ios"
}
export default class ActionSheet extends Component<ActionSheetProps> {
public show: () => void;
}
type ActionSheetCustomProps = Props | {
title?: string | React.ReactNode
message?: string | React.ReactNode
options: (string | React.ReactChild)[]
/**
* Starting from v3.0.0 ActionSheetCustom uses a native-like theme build using react-native to allow React Components as options (or title or message)
*
* **flat** is the default option for Android (use theme="flat" to use it on iOS too)
*
* Use theme="ios" to use rounded boxes (like iOS theme) on Android
* @default flat for Android and native-like for iOS
*/
theme?: "flat" | "ios"
}
export class
|
extends Component<ActionSheetCustomProps> {
public show: () => void;
}
|
ActionSheetCustom
|
shellAsync.ts
|
import * as shell from "shelljs";
export default class ShellAsync {
static exec(commandString: string): Promise<boolean> {
return new Promise<boolean>((re, rt) => {
let result = shell.exec(commandString, {
async: true,
silent: false,
});
result.on("error", (error) => rt(error));
result.on("close", (code) => re(code === 0));
});
|
}
|
}
|
modeling_highway_bert.py
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def entropy(x):
"""Calculate entropy of a pre-softmax logit Tensor"""
exp_x = torch.exp(x)
A = torch.sum(exp_x, dim=1) # sum of exp(x_i)
B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i)
return torch.log(A) - B / A
class DeeBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)])
self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)]
def set_early_exit_entropy(self, x):
if (type(x) is float) or (type(x) is int):
for i in range(len(self.early_exit_entropy)):
self.early_exit_entropy[i] = x
else:
self.early_exit_entropy = x
def init_highway_pooler(self, pooler):
loaded_model = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
all_hidden_states = ()
all_attentions = ()
all_highway_exits = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask
)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
current_outputs = (hidden_states,)
if self.output_hidden_states:
current_outputs = current_outputs + (all_hidden_states,)
if self.output_attentions:
current_outputs = current_outputs + (all_attentions,)
highway_exit = self.highway[i](current_outputs)
# logits, pooled_output
if not self.training:
highway_logits = highway_exit[0]
highway_entropy = entropy(highway_logits)
highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
all_highway_exits = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(new_output, i + 1)
else:
all_highway_exits = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
outputs = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). ",
BERT_START_DOCSTRING,
)
class DeeBertModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = DeeBertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def init_highway_pooler(self):
self.encoder.init_highway_pooler(self.pooler)
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
|
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class HighwayException(Exception):
def __init__(self, message, exit_layer):
self.message = message
self.exit_layer = exit_layer # start from 1!
class BertHighway(nn.Module):
"""A module to provide a shortcut
from (the output of one non-final BertLayer in BertEncoder) to (cross-entropy computation in BertForSequenceClassification)
"""
def __init__(self, config):
super().__init__()
self.pooler = BertPooler(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, encoder_outputs):
# Pooler
pooler_input = encoder_outputs[0]
pooler_output = self.pooler(pooler_input)
# "return" pooler_output
# BertModel
bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
pooled_output = bmodel_output[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """,
BERT_START_DOCSTRING,
)
class DeeBertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.num_layers = config.num_hidden_layers
self.bert = DeeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_layer=-1,
train_highway=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
highway_exits (:obj:`tuple(tuple(torch.Tensor))`:
Tuple of each early exit's results (total length: number of layers)
Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.
"""
exit_layer = self.num_layers
try:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
outputs = e.message
exit_layer = e.exit_layer
logits = outputs[0]
if not self.training:
original_entropy = entropy(logits)
highway_entropy = []
highway_logits_all = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
highway_losses = []
for highway_exit in outputs[-1]:
highway_logits = highway_exit[0]
if not self.training:
highway_logits_all.append(highway_logits)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(highway_loss)
if train_highway:
outputs = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
outputs = (loss,) + outputs
if not self.training:
outputs = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
outputs = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
|
highway_exits (:obj:`tuple(tuple(torch.Tensor))`:
Tuple of each early exit's results (total length: number of layers)
Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.
|
generic.py
|
# Copyright (c) 2014 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Driver for shares."""
import os
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import units
import retrying
import six
from manila.common import constants as const
from manila import compute
from manila import context
from manila import exception
from manila.i18n import _, _LE, _LI, _LW
from manila.share import driver
from manila.share.drivers import service_instance
from manila import utils
from manila import volume
LOG = log.getLogger(__name__)
share_opts = [
cfg.StrOpt('smb_template_config_path',
default='$state_path/smb.conf',
help="Path to smb config."),
cfg.StrOpt('volume_name_template',
default='manila-share-%s',
help="Volume name template."),
cfg.StrOpt('volume_snapshot_name_template',
default='manila-snapshot-%s',
help="Volume snapshot name template."),
cfg.StrOpt('share_mount_path',
default='/shares',
help="Parent path in service instance where shares "
"will be mounted."),
cfg.IntOpt('max_time_to_create_volume',
default=180,
help="Maximum time to wait for creating cinder volume."),
cfg.IntOpt('max_time_to_extend_volume',
default=180,
help="Maximum time to wait for extending cinder volume."),
cfg.IntOpt('max_time_to_attach',
default=120,
help="Maximum time to wait for attaching cinder volume."),
cfg.StrOpt('service_instance_smb_config_path',
default='$share_mount_path/smb.conf',
help="Path to SMB config in service instance."),
cfg.ListOpt('share_helpers',
default=[
'CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess',
'NFS=manila.share.drivers.helpers.NFSHelper',
],
help='Specify list of share export helpers.'),
cfg.StrOpt('share_volume_fstype',
default='ext4',
choices=['ext4', 'ext3'],
help='Filesystem type of the share volume.'),
cfg.StrOpt('cinder_volume_type',
help='Name or id of cinder volume type which will be used '
'for all volumes created by driver.'),
]
CONF = cfg.CONF
CONF.register_opts(share_opts)
# NOTE(u_glide): These constants refer to the column number in the "df" output
BLOCK_DEVICE_SIZE_INDEX = 1
USED_SPACE_INDEX = 2
def ensure_server(f):
def wrap(self, context, *args, **kwargs):
server = kwargs.get('share_server')
if not self.driver_handles_share_servers:
if not server:
server = self.service_instance_manager.get_common_server()
kwargs['share_server'] = server
else:
raise exception.ManilaException(
_("Share server handling is not available. "
"But 'share_server' was provided. '%s'. "
"Share network should not be used.") % server.get('id'))
elif not server:
raise exception.ManilaException(
_("Share server handling is enabled. But 'share_server' "
"is not provided. Make sure you used 'share_network'."))
if not server.get('backend_details'):
raise exception.ManilaException(
_("Share server '%s' does not have backend details.") %
server['id'])
if not self.service_instance_manager.ensure_service_instance(
context, server['backend_details']):
raise exception.ServiceInstanceUnavailable()
return f(self, context, *args, **kwargs)
return wrap
class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
"""Executes commands relating to Shares."""
def __init__(self, *args, **kwargs):
"""Do initialization."""
super(GenericShareDriver, self).__init__(
[False, True], *args, **kwargs)
self.admin_context = context.get_admin_context()
self.configuration.append_config_values(share_opts)
self._helpers = {}
self.backend_name = self.configuration.safe_get(
'share_backend_name') or "Cinder_Volumes"
self.ssh_connections = {}
self._setup_service_instance_manager()
self.private_storage = kwargs.get('private_storage')
def _setup_service_instance_manager(self):
self.service_instance_manager = (
service_instance.ServiceInstanceManager(
driver_config=self.configuration))
def _ssh_exec(self, server, command, check_exit_code=True):
connection = self.ssh_connections.get(server['instance_id'])
ssh_conn_timeout = self.configuration.ssh_conn_timeout
if not connection:
ssh_pool = utils.SSHPool(server['ip'],
22,
ssh_conn_timeout,
server['username'],
server.get('password'),
server.get('pk_path'),
max_size=1)
ssh = ssh_pool.create()
self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
else:
ssh_pool, ssh = connection
if not ssh.get_transport().is_active():
ssh_pool.remove(ssh)
ssh = ssh_pool.create()
self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
# (aovchinnikov): ssh_execute does not behave well when passed
# parameters with spaces.
wrap = lambda token: "\"" + token + "\""
command = [wrap(tkn) if tkn.count(' ') else tkn for tkn in command]
return processutils.ssh_execute(ssh, ' '.join(command),
check_exit_code=check_exit_code)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
def do_setup(self, context):
"""Any initialization the generic driver does while starting."""
super(GenericShareDriver, self).do_setup(context)
self.compute_api = compute.API()
self.volume_api = volume.API()
self._setup_helpers()
common_sv_available = False
share_server = None
sv_fetch_retry_interval = 5
while not (common_sv_available or self.driver_handles_share_servers):
try:
# Verify availability of common server
share_server = (
self.service_instance_manager.get_common_server())
common_sv_available = self._is_share_server_active(
context, share_server)
except Exception as ex:
LOG.error(ex)
if not common_sv_available:
time.sleep(sv_fetch_retry_interval)
LOG.warning(_LW("Waiting for the common service VM to become "
"available. "
"Driver is currently uninitialized. "
"Share server: %(share_server)s "
"Retry interval: %(retry_interval)s"),
dict(share_server=share_server,
retry_interval=sv_fetch_retry_interval))
def _setup_helpers(self):
"""Initializes protocol-specific NAS drivers."""
helpers = self.configuration.share_helpers
if helpers:
for helper_str in helpers:
share_proto, __, import_str = helper_str.partition('=')
helper = importutils.import_class(import_str)
self._helpers[share_proto.upper()] = helper(
self._execute,
self._ssh_exec,
self.configuration)
else:
raise exception.ManilaException(
"No protocol helpers selected for Generic Driver. "
"Please specify using config option 'share_helpers'.")
@ensure_server
def create_share(self, context, share, share_server=None):
"""Creates share."""
return self._create_share(
context, share,
snapshot=None,
share_server=share_server,
)
def _create_share(self, context, share, snapshot, share_server=None):
helper = self._get_helper(share)
server_details = share_server['backend_details']
volume = self._allocate_container(
self.admin_context, share, snapshot=snapshot)
volume = self._attach_volume(
self.admin_context, share, server_details['instance_id'], volume)
if not snapshot:
self._format_device(server_details, volume)
self._mount_device(share, server_details, volume)
export_locations = helper.create_exports(
server_details, share['name'])
return export_locations
@utils.retry(exception.ProcessExecutionError, backoff_rate=1)
def _is_device_file_available(self, server_details, volume):
"""Checks whether the device file is available"""
command = ['sudo', 'test', '-b', volume['mountpoint']]
self._ssh_exec(server_details, command)
def _format_device(self, server_details, volume):
"""Formats device attached to the service vm."""
self._is_device_file_available(server_details, volume)
command = ['sudo', 'mkfs.%s' % self.configuration.share_volume_fstype,
volume['mountpoint']]
self._ssh_exec(server_details, command)
def _is_device_mounted(self, mount_path, server_details, volume=None):
"""Checks whether volume already mounted or not."""
log_data = {
'mount_path': mount_path,
'server_id': server_details['instance_id'],
}
if volume and volume.get('mountpoint', ''):
log_data['volume_id'] = volume['id']
log_data['dev_mount_path'] = volume['mountpoint']
msg = ("Checking whether volume '%(volume_id)s' with mountpoint "
"'%(dev_mount_path)s' is mounted on mount path '%(mount_p"
"ath)s' on server '%(server_id)s' or not." % log_data)
else:
msg = ("Checking whether mount path '%(mount_path)s' exists on "
"server '%(server_id)s' or not." % log_data)
LOG.debug(msg)
mounts_list_cmd = ['sudo', 'mount']
output, __ = self._ssh_exec(server_details, mounts_list_cmd)
mounts = output.split('\n')
for mount in mounts:
mount_elements = mount.split(' ')
if (len(mount_elements) > 2 and mount_path == mount_elements[2]):
if volume:
# Mount goes with device path and mount path
if (volume.get('mountpoint', '') == mount_elements[0]):
return True
else:
# Unmount goes only by mount path
return True
return False
def _sync_mount_temp_and_perm_files(self, server_details):
"""Sync temporary and permanent files for mounted filesystems."""
try:
self._ssh_exec(
server_details,
['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE],
)
except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to sync mount files on server '%s'."),
server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e))
try:
# Remount it to avoid postponed point of failure
self._ssh_exec(server_details, ['sudo', 'mount', '-a'])
except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to mount all shares on server '%s'."),
server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e))
def _mount_device(self, share, server_details, volume):
"""Mounts block device to the directory on service vm.
Mounts attached and formatted block device to the directory if not
mounted yet.
"""
@utils.synchronized('generic_driver_mounts_'
'%s' % server_details['instance_id'])
def _mount_device_with_lock():
mount_path = self._get_mount_path(share)
device_path = volume['mountpoint']
log_data = {
'dev': device_path,
'path': mount_path,
'server': server_details['instance_id'],
}
try:
if not self._is_device_mounted(mount_path, server_details,
volume):
LOG.debug("Mounting '%(dev)s' to path '%(path)s' on "
"server '%(server)s'.", log_data)
mount_cmd = (
'sudo', 'mkdir', '-p', mount_path,
'&&', 'sudo', 'mount', device_path, mount_path,
'&&', 'sudo', 'chmod', '777', mount_path,
'&&', 'sudo', 'umount', mount_path,
# NOTE(vponomaryov): 'tune2fs' is required to make
# filesystem of share created from snapshot have
# unique ID, in case of LVM volumes, by default,
# it will have the same UUID as source volume one.
# 'tune2fs' command can be executed only when device
# is not mounted and also, in current case, it takes
# effect only after it was mounted. Closes #1645751
'&&', 'sudo', 'tune2fs', '-U', 'random', device_path,
'&&', 'sudo', 'mount', device_path, mount_path,
)
self._ssh_exec(server_details, mount_cmd)
# Add mount permanently
self._sync_mount_temp_and_perm_files(server_details)
else:
LOG.warning(_LW("Mount point '%(path)s' already exists on "
"server '%(server)s'."), log_data)
except exception.ProcessExecutionError as e:
raise exception.ShareBackendException(msg=six.text_type(e))
return _mount_device_with_lock()
@utils.retry(exception.ProcessExecutionError)
def _unmount_device(self, share, server_details):
"""Unmounts block device from directory on service vm."""
@utils.synchronized('generic_driver_mounts_'
'%s' % server_details['instance_id'])
def _unmount_device_with_lock():
mount_path = self._get_mount_path(share)
log_data = {
'path': mount_path,
'server': server_details['instance_id'],
}
if self._is_device_mounted(mount_path, server_details):
LOG.debug("Unmounting path '%(path)s' on server "
"'%(server)s'.", log_data)
unmount_cmd = ['sudo', 'umount', mount_path, '&&', 'sudo',
'rmdir', mount_path]
self._ssh_exec(server_details, unmount_cmd)
# Remove mount permanently
self._sync_mount_temp_and_perm_files(server_details)
else:
LOG.warning(_LW("Mount point '%(path)s' does not exist on "
"server '%(server)s'."), log_data)
return _unmount_device_with_lock()
def
|
(self, share):
"""Returns the path to use for mount device in service vm."""
return os.path.join(self.configuration.share_mount_path, share['name'])
def _attach_volume(self, context, share, instance_id, volume):
"""Attaches cinder volume to service vm."""
@utils.synchronized(
"generic_driver_attach_detach_%s" % instance_id, external=True)
def do_attach(volume):
if volume['status'] == 'in-use':
attached_volumes = [vol.id for vol in
self.compute_api.instance_volumes_list(
self.admin_context, instance_id)]
if volume['id'] in attached_volumes:
return volume
else:
raise exception.ManilaException(
_('Volume %s is already attached to another instance')
% volume['id'])
@retrying.retry(stop_max_attempt_number=3,
wait_fixed=2000,
retry_on_exception=lambda exc: True)
def attach_volume():
self.compute_api.instance_volume_attach(
self.admin_context, instance_id, volume['id'])
attach_volume()
t = time.time()
while time.time() - t < self.configuration.max_time_to_attach:
volume = self.volume_api.get(context, volume['id'])
if volume['status'] == 'in-use':
return volume
elif volume['status'] != 'attaching':
raise exception.ManilaException(
_('Failed to attach volume %s') % volume['id'])
time.sleep(1)
else:
err_msg = {
'volume_id': volume['id'],
'max_time': self.configuration.max_time_to_attach
}
raise exception.ManilaException(
_('Volume %(volume_id)s has not been attached in '
'%(max_time)ss. Giving up.') % err_msg)
return do_attach(volume)
def _get_volume_name(self, share_id):
return self.configuration.volume_name_template % share_id
def _get_volume(self, context, share_id):
"""Finds volume, associated to the specific share."""
volume_id = self.private_storage.get(share_id, 'volume_id')
if volume_id is not None:
return self.volume_api.get(context, volume_id)
else: # Fallback to legacy method
return self._get_volume_legacy(context, share_id)
def _get_volume_legacy(self, context, share_id):
# NOTE(u_glide): this method is deprecated and will be removed in
# future versions
volume_name = self._get_volume_name(share_id)
search_opts = {'name': volume_name}
if context.is_admin:
search_opts['all_tenants'] = True
volumes_list = self.volume_api.get_all(context, search_opts)
if len(volumes_list) == 1:
return volumes_list[0]
elif len(volumes_list) > 1:
LOG.error(
_LE("Expected only one volume in volume list with name "
"'%(name)s', but got more than one in a result - "
"'%(result)s'."), {
'name': volume_name, 'result': volumes_list})
raise exception.ManilaException(
_("Error. Ambiguous volumes for name '%s'") % volume_name)
return None
def _get_volume_snapshot(self, context, snapshot_id):
"""Find volume snapshot associated to the specific share snapshot."""
volume_snapshot_id = self.private_storage.get(
snapshot_id, 'volume_snapshot_id')
if volume_snapshot_id is not None:
return self.volume_api.get_snapshot(context, volume_snapshot_id)
else: # Fallback to legacy method
return self._get_volume_snapshot_legacy(context, snapshot_id)
def _get_volume_snapshot_legacy(self, context, snapshot_id):
# NOTE(u_glide): this method is deprecated and will be removed in
# future versions
volume_snapshot_name = (
self.configuration.volume_snapshot_name_template % snapshot_id)
volume_snapshot_list = self.volume_api.get_all_snapshots(
context, {'name': volume_snapshot_name})
volume_snapshot = None
if len(volume_snapshot_list) == 1:
volume_snapshot = volume_snapshot_list[0]
elif len(volume_snapshot_list) > 1:
LOG.error(
_LE("Expected only one volume snapshot in list with name "
"'%(name)s', but got more than one in a result - "
"'%(result)s'."), {
'name': volume_snapshot_name,
'result': volume_snapshot_list})
raise exception.ManilaException(
_('Error. Ambiguous volume snaphots'))
return volume_snapshot
def _detach_volume(self, context, share, server_details):
"""Detaches cinder volume from service vm."""
instance_id = server_details['instance_id']
@utils.synchronized(
"generic_driver_attach_detach_%s" % instance_id, external=True)
def do_detach():
attached_volumes = [vol.id for vol in
self.compute_api.instance_volumes_list(
self.admin_context, instance_id)]
try:
volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound:
LOG.warning(_LW("Volume not found for share %s. "
"Possibly already deleted."), share['id'])
volume = None
if volume and volume['id'] in attached_volumes:
self.compute_api.instance_volume_detach(
self.admin_context,
instance_id,
volume['id']
)
t = time.time()
while time.time() - t < self.configuration.max_time_to_attach:
volume = self.volume_api.get(context, volume['id'])
if volume['status'] in (const.STATUS_AVAILABLE,
const.STATUS_ERROR):
break
time.sleep(1)
else:
err_msg = {
'volume_id': volume['id'],
'max_time': self.configuration.max_time_to_attach
}
raise exception.ManilaException(
_('Volume %(volume_id)s has not been detached in '
'%(max_time)ss. Giving up.') % err_msg)
do_detach()
def _allocate_container(self, context, share, snapshot=None):
"""Creates cinder volume, associated to share by name."""
volume_snapshot = None
if snapshot:
volume_snapshot = self._get_volume_snapshot(context,
snapshot['id'])
volume = self.volume_api.create(
context,
share['size'],
self.configuration.volume_name_template % share['id'], '',
snapshot=volume_snapshot,
volume_type=self.configuration.cinder_volume_type,
availability_zone=share['availability_zone'])
self.private_storage.update(
share['id'], {'volume_id': volume['id']})
msg_error = _('Failed to create volume')
msg_timeout = (
_('Volume has not been created in %ss. Giving up') %
self.configuration.max_time_to_create_volume
)
return self._wait_for_available_volume(
volume, self.configuration.max_time_to_create_volume,
msg_error=msg_error, msg_timeout=msg_timeout
)
def _wait_for_available_volume(self, volume, timeout,
msg_error, msg_timeout,
expected_size=None):
t = time.time()
while time.time() - t < timeout:
if volume['status'] == const.STATUS_AVAILABLE:
if expected_size and volume['size'] != expected_size:
LOG.debug("The volume %(vol_id)s is available but the "
"volume size does not match the expected size. "
"A volume resize operation may be pending. "
"Expected size: %(expected_size)s, "
"Actual size: %(volume_size)s.",
dict(vol_id=volume['id'],
expected_size=expected_size,
volume_size=volume['size']))
else:
break
elif 'error' in volume['status'].lower():
raise exception.ManilaException(msg_error)
time.sleep(1)
volume = self.volume_api.get(self.admin_context, volume['id'])
else:
raise exception.ManilaException(msg_timeout)
return volume
def _deallocate_container(self, context, share):
"""Deletes cinder volume."""
try:
volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound:
LOG.info(_LI("Volume not found. Already deleted?"))
volume = None
if volume:
if volume['status'] == 'in-use':
raise exception.ManilaException(
_('Volume is still in use and '
'cannot be deleted now.'))
self.volume_api.delete(context, volume['id'])
t = time.time()
while (time.time() - t <
self.configuration.max_time_to_create_volume):
try:
volume = self.volume_api.get(context, volume['id'])
except exception.VolumeNotFound:
LOG.debug('Volume was deleted successfully')
break
time.sleep(1)
else:
raise exception.ManilaException(
_('Volume have not been '
'deleted in %ss. Giving up')
% self.configuration.max_time_to_create_volume)
def _update_share_stats(self):
"""Retrieve stats info from share volume group."""
data = dict(
share_backend_name=self.backend_name,
storage_protocol='NFS_CIFS',
reserved_percentage=self.configuration.reserved_share_percentage,
)
super(GenericShareDriver, self)._update_share_stats(data)
@ensure_server
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Is called to create share from snapshot."""
return self._create_share(
context, share,
snapshot=snapshot,
share_server=share_server,
)
@ensure_server
def extend_share(self, share, new_size, share_server=None):
server_details = share_server['backend_details']
helper = self._get_helper(share)
helper.disable_access_for_maintenance(server_details, share['name'])
self._unmount_device(share, server_details)
volume = self._get_volume(self.admin_context, share['id'])
if int(new_size) > volume['size']:
self._detach_volume(self.admin_context, share, server_details)
volume = self._extend_volume(self.admin_context, volume, new_size)
volume = self._attach_volume(
self.admin_context,
share,
server_details['instance_id'],
volume)
self._resize_filesystem(server_details, volume)
self._mount_device(share, server_details, volume)
helper.restore_access_after_maintenance(server_details,
share['name'])
def _extend_volume(self, context, volume, new_size):
self.volume_api.extend(context, volume['id'], new_size)
msg_error = _('Failed to extend volume %s') % volume['id']
msg_timeout = (
_('Volume has not been extended in %ss. Giving up') %
self.configuration.max_time_to_extend_volume
)
return self._wait_for_available_volume(
volume, self.configuration.max_time_to_extend_volume,
msg_error=msg_error, msg_timeout=msg_timeout,
expected_size=new_size
)
@ensure_server
def shrink_share(self, share, new_size, share_server=None):
server_details = share_server['backend_details']
helper = self._get_helper(share)
export_location = share['export_locations'][0]['path']
mount_path = helper.get_share_path_by_export_location(
server_details, export_location)
consumed_space = self._get_consumed_space(mount_path, server_details)
LOG.debug("Consumed space on share: %s", consumed_space)
if consumed_space >= new_size:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
volume = self._get_volume(self.admin_context, share['id'])
helper.disable_access_for_maintenance(server_details, share['name'])
self._unmount_device(share, server_details)
try:
self._resize_filesystem(server_details, volume, new_size=new_size)
except exception.Invalid:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
except Exception as e:
msg = _("Cannot shrink share: %s") % six.text_type(e)
raise exception.Invalid(msg)
finally:
self._mount_device(share, server_details, volume)
helper.restore_access_after_maintenance(server_details,
share['name'])
def _resize_filesystem(self, server_details, volume, new_size=None):
"""Resize filesystem of provided volume."""
check_command = ['sudo', 'fsck', '-pf', volume['mountpoint']]
self._ssh_exec(server_details, check_command)
command = ['sudo', 'resize2fs', volume['mountpoint']]
if new_size:
command.append("%sG" % six.text_type(new_size))
try:
self._ssh_exec(server_details, command)
except processutils.ProcessExecutionError as e:
if e.stderr.find('New size smaller than minimum') != -1:
msg = (_("Invalid 'new_size' provided: %s")
% six.text_type(new_size))
raise exception.Invalid(msg)
else:
msg = _("Cannot resize file-system: %s") % six.text_type(e)
raise exception.ManilaException(msg)
def _is_share_server_active(self, context, share_server):
"""Check if the share server is active."""
has_active_share_server = (
share_server and share_server.get('backend_details') and
self.service_instance_manager.ensure_service_instance(
context, share_server['backend_details']))
return has_active_share_server
def delete_share(self, context, share, share_server=None):
"""Deletes share."""
helper = self._get_helper(share)
if not self.driver_handles_share_servers:
share_server = self.service_instance_manager.get_common_server()
if self._is_share_server_active(context, share_server):
helper.remove_exports(
share_server['backend_details'], share['name'])
self._unmount_device(share, share_server['backend_details'])
self._detach_volume(self.admin_context, share,
share_server['backend_details'])
# Note(jun): It is an intended breakage to deal with the cases
# with any reason that caused absence of Nova instances.
self._deallocate_container(self.admin_context, share)
self.private_storage.delete(share['id'])
def create_snapshot(self, context, snapshot, share_server=None):
"""Creates a snapshot."""
model_update = {}
volume = self._get_volume(self.admin_context, snapshot['share_id'])
volume_snapshot_name = (self.configuration.
volume_snapshot_name_template % snapshot['id'])
volume_snapshot = self.volume_api.create_snapshot_force(
self.admin_context, volume['id'], volume_snapshot_name, '')
t = time.time()
while time.time() - t < self.configuration.max_time_to_create_volume:
if volume_snapshot['status'] == const.STATUS_AVAILABLE:
break
if volume_snapshot['status'] == const.STATUS_ERROR:
raise exception.ManilaException(_('Failed to create volume '
'snapshot'))
time.sleep(1)
volume_snapshot = self.volume_api.get_snapshot(
self.admin_context,
volume_snapshot['id'])
# NOTE(xyang): We should look at whether we still need to save
# volume_snapshot_id in private_storage later, now that is saved
# in provider_location.
self.private_storage.update(
snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
# NOTE(xyang): Need to update provider_location in the db so
# that it can be used in manage/unmanage snapshot tempest tests.
model_update['provider_location'] = volume_snapshot['id']
else:
raise exception.ManilaException(
_('Volume snapshot have not been '
'created in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
return model_update
def delete_snapshot(self, context, snapshot, share_server=None):
"""Deletes a snapshot."""
volume_snapshot = self._get_volume_snapshot(self.admin_context,
snapshot['id'])
if volume_snapshot is None:
return
self.volume_api.delete_snapshot(self.admin_context,
volume_snapshot['id'])
t = time.time()
while time.time() - t < self.configuration.max_time_to_create_volume:
try:
snapshot = self.volume_api.get_snapshot(self.admin_context,
volume_snapshot['id'])
except exception.VolumeSnapshotNotFound:
LOG.debug('Volume snapshot was deleted successfully')
self.private_storage.delete(snapshot['id'])
break
time.sleep(1)
else:
raise exception.ManilaException(
_('Volume snapshot have not been '
'deleted in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
@ensure_server
def ensure_share(self, context, share, share_server=None):
"""Ensure that storage are mounted and exported."""
helper = self._get_helper(share)
volume = self._get_volume(context, share['id'])
# NOTE(vponomaryov): volume can be None for managed shares
if volume:
volume = self._attach_volume(
context,
share,
share_server['backend_details']['instance_id'],
volume)
self._mount_device(share, share_server['backend_details'], volume)
helper.create_exports(
share_server['backend_details'], share['name'], recreate=True)
@ensure_server
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules for given share.
This driver has two different behaviors according to parameters:
1. Recovery after error - 'access_rules' contains all access_rules,
'add_rules' and 'delete_rules' shall be empty. Previously existing
access rules are cleared and then added back according
to 'access_rules'.
2. Adding/Deleting of several access rules - 'access_rules' contains
all access_rules, 'add_rules' and 'delete_rules' contain rules which
should be added/deleted. Rules in 'access_rules' are ignored and
only rules from 'add_rules' and 'delete_rules' are applied.
:param context: Current context
:param share: Share model with share data.
:param access_rules: All access rules for given share
:param add_rules: Empty List or List of access rules which should be
added. access_rules already contains these rules.
:param delete_rules: Empty List or List of access rules which should be
removed. access_rules doesn't contain these rules.
:param share_server: None or Share server model
"""
self._get_helper(share).update_access(share_server['backend_details'],
share['name'], access_rules,
add_rules=add_rules,
delete_rules=delete_rules)
def _get_helper(self, share):
helper = self._helpers.get(share['share_proto'])
if helper:
return helper
else:
raise exception.InvalidShare(
reason="Wrong, unsupported or disabled protocol")
def get_network_allocations_number(self):
"""Get number of network interfaces to be created."""
# NOTE(vponomaryov): Generic driver does not need allocations, because
# Nova will handle it. It is valid for all multitenant drivers, that
# use service instance provided by Nova.
return 0
def _setup_server(self, network_info, metadata=None):
msg = "Creating share server '%s'."
LOG.debug(msg % network_info['server_id'])
server = self.service_instance_manager.set_up_service_instance(
self.admin_context, network_info)
for helper in self._helpers.values():
helper.init_helper(server)
return server
def _teardown_server(self, server_details, security_services=None):
instance_id = server_details.get("instance_id")
LOG.debug("Removing share infrastructure for service instance '%s'.",
instance_id)
self.service_instance_manager.delete_service_instance(
self.admin_context, server_details)
def manage_existing(self, share, driver_options):
"""Manage existing share to manila.
Generic driver accepts only one driver_option 'volume_id'.
If an administrator provides this option, then appropriate Cinder
volume will be managed by Manila as well.
:param share: share data
:param driver_options: Empty dict or dict with 'volume_id' option.
:return: dict with share size, example: {'size': 1}
"""
helper = self._get_helper(share)
share_server = self.service_instance_manager.get_common_server()
server_details = share_server['backend_details']
old_export_location = share['export_locations'][0]['path']
mount_path = helper.get_share_path_by_export_location(
share_server['backend_details'], old_export_location)
LOG.debug("Manage: mount path = %s", mount_path)
mounted = self._is_device_mounted(mount_path, server_details)
LOG.debug("Manage: is share mounted = %s", mounted)
if not mounted:
msg = _("Provided share %s is not mounted.") % share['id']
raise exception.ManageInvalidShare(reason=msg)
def get_volume():
if 'volume_id' in driver_options:
try:
return self.volume_api.get(
self.admin_context, driver_options['volume_id'])
except exception.VolumeNotFound as e:
raise exception.ManageInvalidShare(reason=six.text_type(e))
# NOTE(vponomaryov): Manila can only combine volume name by itself,
# nowhere to get volume ID from. Return None since Cinder volume
# names are not unique or fixed, hence, they can not be used for
# sure.
return None
share_volume = get_volume()
if share_volume:
instance_volumes = self.compute_api.instance_volumes_list(
self.admin_context, server_details['instance_id'])
attached_volumes = [vol.id for vol in instance_volumes]
LOG.debug('Manage: attached volumes = %s',
six.text_type(attached_volumes))
if share_volume['id'] not in attached_volumes:
msg = _("Provided volume %s is not attached "
"to service instance.") % share_volume['id']
raise exception.ManageInvalidShare(reason=msg)
linked_volume_name = self._get_volume_name(share['id'])
if share_volume['name'] != linked_volume_name:
LOG.debug('Manage: volume_id = %s' % share_volume['id'])
self.volume_api.update(self.admin_context, share_volume['id'],
{'name': linked_volume_name})
self.private_storage.update(
share['id'], {'volume_id': share_volume['id']})
share_size = share_volume['size']
else:
share_size = self._get_mounted_share_size(
mount_path, share_server['backend_details'])
export_locations = helper.get_exports_for_share(
server_details, old_export_location)
return {'size': share_size, 'export_locations': export_locations}
def manage_existing_snapshot(self, snapshot, driver_options):
"""Manage existing share snapshot with manila.
:param snapshot: Snapshot data
:param driver_options: Not used by the Generic driver currently
:return: dict with share snapshot size, example: {'size': 1}
"""
model_update = {}
volume_snapshot = None
snapshot_size = snapshot.get('share_size', 0)
provider_location = snapshot.get('provider_location')
try:
volume_snapshot = self.volume_api.get_snapshot(
self.admin_context,
provider_location)
except exception.VolumeSnapshotNotFound as e:
raise exception.ManageInvalidShareSnapshot(
reason=six.text_type(e))
if volume_snapshot:
snapshot_size = volume_snapshot['size']
# NOTE(xyang): volume_snapshot_id is saved in private_storage
# in create_snapshot, so saving it here too for consistency.
# We should look at whether we still need to save it in
# private_storage later.
self.private_storage.update(
snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
# NOTE(xyang): provider_location is used to map a Manila snapshot
# to its name on the storage backend and prevent managing of the
# same snapshot twice.
model_update['provider_location'] = volume_snapshot['id']
model_update['size'] = snapshot_size
return model_update
def unmanage_snapshot(self, snapshot):
"""Unmanage share snapshot with manila."""
self.private_storage.delete(snapshot['id'])
def _get_mount_stats_by_index(self, mount_path, server_details, index,
block_size='G'):
"""Get mount stats using df shell command.
:param mount_path: Share path on share server
:param server_details: Share server connection details
:param index: Data index in df command output:
BLOCK_DEVICE_SIZE_INDEX - Size of block device
USED_SPACE_INDEX - Used space
:param block_size: size of block (example: G, M, Mib, etc)
:returns: value of provided index
"""
share_size_cmd = ['df', '-PB%s' % block_size, mount_path]
output, __ = self._ssh_exec(server_details, share_size_cmd)
lines = output.split('\n')
return int(lines[1].split()[index][:-1])
def _get_mounted_share_size(self, mount_path, server_details):
try:
size = self._get_mount_stats_by_index(
mount_path, server_details, BLOCK_DEVICE_SIZE_INDEX)
except Exception as e:
msg = _("Cannot calculate size of share %(path)s : %(error)s") % {
'path': mount_path,
'error': six.text_type(e)
}
raise exception.ManageInvalidShare(reason=msg)
return size
def _get_consumed_space(self, mount_path, server_details):
try:
size = self._get_mount_stats_by_index(
mount_path, server_details, USED_SPACE_INDEX, block_size='M')
size /= float(units.Ki)
except Exception as e:
msg = _("Cannot calculate consumed space on share "
"%(path)s : %(error)s") % {
'path': mount_path,
'error': six.text_type(e)
}
raise exception.InvalidShare(reason=msg)
return size
|
_get_mount_path
|
main.rs
|
use std::{env, error::Error as StdError, fmt, fs::File as StdFile};
use futures::{stream::FuturesUnordered, TryStreamExt};
use plotters::{
data::fitting_range,
prelude::{
Boxplot, ChartBuilder, DrawingAreaErrorKind, IntoDrawingArea, IntoSegmentedCoord,
Quartiles, SVGBackend, SegmentValue,
},
style::WHITE,
};
use rosu_pp::{Beatmap, BeatmapExt, PerformanceAttributes};
use serde::Deserialize;
use tokio::{fs::File, runtime::Runtime};
fn main() {
dotenv::dotenv().expect("failed to read .env file");
Runtime::new()
.expect("failed to create runtime")
.block_on(async_main());
}
async fn async_main() {
let map_path_ = env::var("MAP_PATH").expect("missing `MAP_PATH` environment variable");
let map_path = map_path_.as_str();
println!("Deserializing data from output.json...");
let file = StdFile::open("../pp-gen/output.json").expect("failed to open `output.json` file");
let data: Vec<SimulateData> =
serde_json::from_reader(file).expect("failed to deserialize data");
println!(
"Calculating values for {} map-mod combinations...",
data.len()
);
// Calculate rosu-pp's PerformanceAttributes on all map-mod pairs
let result = data
.into_iter()
.map(|data| async move {
let path = format!("{}/{}.osu", map_path, data.score.map_id);
let file = File::open(path).await?;
let map = Beatmap::parse(file).await?;
let mods = parse_mods(&data.score.mods);
let attrs = map.max_pp(mods);
Ok::<_, Error>((data, attrs, mods))
})
.collect::<FuturesUnordered<_>>()
.try_collect::<Vec<_>>()
.await;
let tuples = match result {
Ok(attrs) => attrs,
Err(err) => return print_err(err),
};
println!("Evaluating values...");
// Compare the values from output.json with the PerformanceAttribute values
let mut evaluators = [
Evaluator::new("osu"),
Evaluator::new("taiko"),
Evaluator::new("catch"),
Evaluator::new("mania"),
];
for (data, attributes, mods) in tuples {
evaluators[data.score.mode as usize].process(data, attributes, mods);
}
for evaluator in evaluators {
let mode = evaluator.mode;
if let Err(err) = evaluator.plot() {
eprintln!("failed to plot for {}", mode);
print_err(err);
}
}
}
/// Mode specific evaluator containing differences
/// of values from `Data` and `PerformanceAttributes`.
#[derive(Default)]
struct Evaluator {
mode: &'static str,
count: usize,
aim: Option<Vec<f64>>,
accuracy: Option<Vec<f64>>,
flashlight: Option<Vec<f64>>,
speed: Option<Vec<f64>>,
strain: Option<Vec<f64>>,
stars: Vec<f64>,
pp: Vec<f64>,
}
impl Evaluator {
fn new(mode: &'static str) -> Self {
Self {
mode,
..Default::default()
}
}
/// For all mode-specific data points, calculate the
/// differences of `data`'s value and `attrs`' value
fn process(&mut self, data: SimulateData, attrs: PerformanceAttributes, mods: u32) {
self.count += 1;
self.stars
.push(difference(data.difficulty.stars, attrs.stars()));
self.pp.push(difference(data.performance.pp, attrs.pp()));
match attrs {
PerformanceAttributes::Catch(_) => {}
PerformanceAttributes::Mania(attrs) => {
if let Some(acc) = data.performance.acc {
let values = self.accuracy.get_or_insert_with(Vec::new);
let entry = difference(acc, attrs.pp_acc);
values.push(entry);
}
if let Some(strain) = data.performance.difficulty {
let values = self.strain.get_or_insert_with(Vec::new);
let entry = difference(strain, attrs.pp_strain);
values.push(entry);
}
}
PerformanceAttributes::Osu(attrs) => {
if let Some(acc) = data.performance.acc {
let values = self.accuracy.get_or_insert_with(Vec::new);
let entry = difference(acc, attrs.pp_acc);
values.push(entry);
}
if let Some(aim) = data.performance.aim {
let values = self.aim.get_or_insert_with(Vec::new);
let entry = difference(aim, attrs.pp_aim);
values.push(entry);
}
if mods & 1024 > 0 {
if let Some(flashlight) = data.performance.flashlight {
let values = self.flashlight.get_or_insert_with(Vec::new);
let entry = difference(flashlight, attrs.pp_flashlight);
values.push(entry);
}
}
if let Some(speed) = data.performance.speed {
let values = self.speed.get_or_insert_with(Vec::new);
let entry = difference(speed, attrs.pp_speed);
values.push(entry);
}
}
PerformanceAttributes::Taiko(attrs) => {
if let Some(acc) = data.performance.acc {
let values = self.accuracy.get_or_insert_with(Vec::new);
let entry = difference(acc, attrs.pp_acc);
values.push(entry);
}
if let Some(strain) = data.performance.difficulty {
let values = self.strain.get_or_insert_with(Vec::new);
let entry = difference(strain, attrs.pp_strain);
values.push(entry);
}
}
}
}
/// Plot all gathered differences
fn plot(self) -> Result<(), Error> {
let mode = self.mode;
let output_path = format!("{}_accuracy.svg", mode);
let dataset = self.to_quartiles();
let kind_list: Vec<_> = dataset.iter().map(|(kind, _)| *kind).collect();
let height = kind_list.len() as u32 * 128;
let root = SVGBackend::new(&output_path, (1024, height)).into_drawing_area();
root.fill(&WHITE)?;
let root = root.margin(5, 5, 15, 15);
let values = dataset
.iter()
.map(|(_, quartiles)| quartiles.values())
.flatten()
.collect::<Vec<_>>();
let values_range = fitting_range(values.iter());
let caption = format!("{} ({} data points)", mode, self.count);
// Set the chart structure
let mut chart = ChartBuilder::on(&root)
.x_label_area_size(40)
.y_label_area_size(80)
.caption(caption, ("sans-serif", 20))
.build_cartesian_2d(
0.0..values_range.end + values_range.end * 0.2,
kind_list[..].into_segmented(),
)?;
chart
.configure_mesh()
.x_desc("Away from actual value")
.y_labels(kind_list.len())
.light_line_style(&WHITE)
.draw()?;
// Insert data into the chart
for (kind, quartile) in dataset.iter() {
chart.draw_series(std::iter::once(
Boxplot::new_horizontal(SegmentValue::CenterOf(kind), quartile)
.width(20)
.whisker_width(0.5),
))?;
}
root.present()?;
Ok(())
}
fn to_quartiles(&self) -> Vec<(&'static str, Quartiles)> {
let mut vec = Vec::new();
println!("---");
|
.iter()
.fold(0.0, |m, &n| if n > m { n } else { m });
let avg = self.stars.iter().copied().sum::<f64>() / self.stars.len() as f64;
println!("[{}] Stars: average={} | max={}", self.mode, avg, max);
vec.push(("stars", Quartiles::new(&self.stars)));
let max = self.pp.iter().fold(0.0, |m, &n| if n > m { n } else { m });
let avg = self.pp.iter().copied().sum::<f64>() / self.pp.len() as f64;
println!("[{}] PP: average={} | max={}", self.mode, avg, max);
vec.push(("pp", Quartiles::new(&self.pp)));
if let Some(ref acc) = self.accuracy {
if !acc.is_empty() {
let max = acc.iter().fold(0.0, |m, &n| if n > m { n } else { m });
let avg = acc.iter().copied().sum::<f64>() / acc.len() as f64;
println!("[{}] Accuracy: average={} | max={}", self.mode, avg, max);
}
vec.push(("accuracy pp", Quartiles::new(acc)));
}
if let Some(ref aim) = self.aim {
if !aim.is_empty() {
let max = aim.iter().fold(0.0, |m, &n| if n > m { n } else { m });
let avg = aim.iter().copied().sum::<f64>() / aim.len() as f64;
println!("[{}] Aim: average={} | max={}", self.mode, avg, max);
}
vec.push(("aim pp", Quartiles::new(aim)));
}
if let Some(ref fl) = self.flashlight {
if !fl.is_empty() {
let max = fl.iter().fold(0.0, |m, &n| if n > m { n } else { m });
let avg = fl.iter().copied().sum::<f64>() / fl.len() as f64;
println!("[{}] Flashlight: average={} | max={}", self.mode, avg, max);
}
vec.push(("flashlight pp", Quartiles::new(fl)));
}
if let Some(ref speed) = self.speed {
if !speed.is_empty() {
let max = speed.iter().fold(0.0, |m, &n| if n > m { n } else { m });
let avg = speed.iter().copied().sum::<f64>() / speed.len() as f64;
println!("[{}] Speed: average={} | max={}", self.mode, avg, max);
}
vec.push(("speed pp", Quartiles::new(speed)));
}
if let Some(ref strain) = self.strain {
if !strain.is_empty() {
let max = strain.iter().fold(0.0, |m, &n| if n > m { n } else { m });
let avg = strain.iter().copied().sum::<f64>() / strain.len() as f64;
println!("[{}] Strain: average={} | max={}", self.mode, avg, max);
}
vec.push(("strain pp", Quartiles::new(strain)));
}
vec.reverse();
vec
}
}
#[derive(Debug)]
enum Error {
DrawingArea(String),
Io(std::io::Error),
ParseMap(rosu_pp::ParseError),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::DrawingArea(src) => write!(f, "drawing area error: {}", src),
Self::Io(_) => f.write_str("io error"),
Self::ParseMap(_) => f.write_str("failed to parse map"),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
Self::DrawingArea(_) => None,
Self::Io(src) => Some(src),
Self::ParseMap(src) => Some(src),
}
}
}
impl<E: StdError + Send + Sync> From<DrawingAreaErrorKind<E>> for Error {
fn from(e: DrawingAreaErrorKind<E>) -> Self {
Self::DrawingArea(e.to_string())
}
}
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Self {
Self::Io(e)
}
}
impl From<rosu_pp::ParseError> for Error {
fn from(e: rosu_pp::ParseError) -> Self {
Self::ParseMap(e)
}
}
fn difference(actual: f64, calculated: f64) -> f64 {
(actual - calculated).abs()
}
fn parse_mods(mods_list: &[String]) -> u32 {
let mut mods = 0;
for m in mods_list {
match m.as_str() {
"NF" => mods += 1,
"EZ" => mods += 2,
"TD" => mods += 4,
"HD" => mods += 8,
"HR" => mods += 16,
"DT" => mods += 64,
"RX" => mods += 128,
"HT" => mods += 256,
"FL" => mods += 1024,
_ => panic!("unrecognized mod: {}", m),
}
}
mods
}
fn print_err(err: Error) {
let mut e: &dyn StdError = &err;
eprintln!("{}", err);
while let Some(src) = e.source() {
eprintln!(" - caused by: {}", src);
e = src;
}
}
#[derive(Deserialize)]
struct SimulateData {
score: Score,
performance: Performance,
difficulty: Difficulty,
}
#[derive(Deserialize)]
struct Score {
mode: u32,
map_id: u32,
mods: Vec<String>,
total_score: u32,
acc: f64,
combo: u32,
stats: Statistics,
}
#[derive(Deserialize)]
struct Statistics {
#[serde(default)]
perfect: usize,
great: usize,
#[serde(default)]
good: usize,
ok: usize,
meh: usize,
miss: usize,
}
#[derive(Deserialize)]
struct Performance {
#[serde(default)]
aim: Option<f64>,
#[serde(default)]
speed: Option<f64>,
#[serde(default)]
acc: Option<f64>,
#[serde(default)]
flashlight: Option<f64>,
#[serde(default)]
effective_miss_count: Option<f64>,
#[serde(default)]
scaled_score: Option<f64>,
#[serde(default)]
difficulty: Option<f64>,
pp: f64,
}
#[derive(Deserialize)]
struct Difficulty {
stars: f64,
max_combo: u32,
#[serde(default)]
aim: Option<f64>,
#[serde(default)]
speed: Option<f64>,
#[serde(default)]
flashlight: Option<f64>,
#[serde(default)]
slider_factor: Option<f64>,
#[serde(default)]
stamina: Option<f64>,
#[serde(default)]
rhythm: Option<f64>,
#[serde(default)]
colour: Option<f64>,
#[serde(default)]
ar: Option<f64>,
#[serde(default)]
od: Option<f64>,
#[serde(default)]
great_hit_window: Option<f64>,
#[serde(default)]
score_multiplier: Option<f64>,
}
|
let max = self
.stars
|
id_bundle.rs
|
// Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
use std::collections::BTreeSet;
use mz_expr::GlobalId;
/// A bundle of storage and compute collection identifiers.
#[derive(Debug, Default, Clone)]
pub struct CollectionIdBundle {
/// The identifiers for sources in the storage layer.
pub storage_ids: BTreeSet<GlobalId>,
/// The identifiers for indexes in the compute layer.
pub compute_ids: BTreeSet<GlobalId>,
}
impl CollectionIdBundle {
/// Reports whether the bundle contains any identifiers of any type.
pub fn is_empty(&self) -> bool {
self.storage_ids.is_empty() && self.compute_ids.is_empty()
}
/// Extends the bundle with the identifiers from `other`.
pub fn extend(&mut self, other: &CollectionIdBundle) {
self.storage_ids.extend(&other.storage_ids);
|
/// Returns a new bundle without the identifiers from `other`.
pub fn difference(&self, other: &CollectionIdBundle) -> CollectionIdBundle {
CollectionIdBundle {
storage_ids: &self.storage_ids - &other.storage_ids,
compute_ids: &self.compute_ids - &other.compute_ids,
}
}
/// Returns an iterator over all IDs in the bundle.
///
/// The IDs are iterated in an unspecified order.
pub fn iter(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.storage_ids
.iter()
.copied()
.chain(self.compute_ids.iter().copied())
}
}
|
self.compute_ids.extend(&other.compute_ids);
}
|
main.rs
|
mod model;
pub mod row_data;
use gtk::{
glib::{self, clone},
prelude::*,
ResponseType,
};
use row_data::RowData;
fn main() {
let application = gtk::Application::new(
Some("com.github.gtk-rs.examples.listbox-model"),
Default::default(),
);
application.connect_activate(build_ui);
application.run();
}
fn
|
(application: >k::Application) {
let window = gtk::ApplicationWindow::new(application);
window.set_title("ListBox Model example");
window.set_border_width(10);
window.set_position(gtk::WindowPosition::Center);
window.set_default_size(320, 480);
let vbox = gtk::Box::new(gtk::Orientation::Vertical, 5);
// Create our list store and specify that the type stored in the
// list should be the RowData GObject we define at the bottom
let model = model::Model::new();
// And then create the UI part, the listbox and bind the list store
// model to it. Whenever the UI needs to show a new row, e.g. because
// it was notified that the model changed, it will call the callback
// with the corresponding item from the model and will ask for a new
// gtk::ListBoxRow that should be displayed.
//
// The gtk::ListBoxRow can contain any possible widgets.
let listbox = gtk::ListBox::new();
listbox.bind_model(Some(&model),
clone!(@weak window => @default-panic, move |item| {
let box_ = gtk::ListBoxRow::new();
let item = item.downcast_ref::<RowData>().expect("Row data is of wrong type");
let hbox = gtk::Box::new(gtk::Orientation::Horizontal, 5);
// Create the label and spin button that shows the two values
// of the item. We bind the properties for the two values to the
// corresponding properties of the widgets so that they are automatically
// updated whenever the item is changing. By specifying SYNC_CREATE the
// widget will automatically get the initial value of the item set.
//
// In case of the spin button the binding is bidirectional, that is any
// change of value in the spin button will be automatically reflected in
// the item.
let label = gtk::Label::new(None);
item.bind_property("name", &label, "label")
.flags(glib::BindingFlags::DEFAULT | glib::BindingFlags::SYNC_CREATE)
.build();
hbox.pack_start(&label, true, true, 0);
let spin_button = gtk::SpinButton::with_range(0.0, 100.0, 1.0);
item.bind_property("count", &spin_button, "value")
.flags(glib::BindingFlags::DEFAULT | glib::BindingFlags::SYNC_CREATE | glib::BindingFlags::BIDIRECTIONAL)
.build();
hbox.pack_start(&spin_button, false, false, 0);
// When the edit button is clicked, a new modal dialog is created for editing
// the corresponding row
let edit_button = gtk::Button::with_label("Edit");
edit_button.connect_clicked(clone!(@weak window, @strong item => move |_| {
let dialog = gtk::Dialog::with_buttons(Some("Edit Item"), Some(&window), gtk::DialogFlags::MODAL,
&[("Close", ResponseType::Close)]);
dialog.set_default_response(ResponseType::Close);
dialog.connect_response(|dialog, _| dialog.close());
let content_area = dialog.content_area();
// Similarly to the label and spin button inside the listbox, the text entry
// and spin button in the edit dialog are connected via property bindings to
// the item. Any changes will be immediately reflected inside the item and
// by the listbox
let entry = gtk::Entry::new();
item.bind_property("name", &entry, "text")
.flags(glib::BindingFlags::DEFAULT | glib::BindingFlags::SYNC_CREATE | glib::BindingFlags::BIDIRECTIONAL)
.build();
// Activating the entry (enter) will send response `ResponseType::Close` to the dialog
entry.connect_activate(clone!(@weak dialog => move |_| {
dialog.response(ResponseType::Close);
}));
content_area.add(&entry);
let spin_button = gtk::SpinButton::with_range(0.0, 100.0, 1.0);
item.bind_property("count", &spin_button, "value")
.flags(glib::BindingFlags::DEFAULT | glib::BindingFlags::SYNC_CREATE | glib::BindingFlags::BIDIRECTIONAL)
.build();
content_area.add(&spin_button);
dialog.show_all();
}));
hbox.pack_start(&edit_button, false, false, 0);
box_.add(&hbox);
// When a row is activated (select + enter) we simply emit the clicked
// signal on the corresponding edit button to open the edit dialog
box_.connect_activate(clone!(@weak edit_button => move |_| {
edit_button.emit_clicked();
}));
box_.show_all();
box_.upcast::<gtk::Widget>()
}));
let scrolled_window = gtk::ScrolledWindow::new(gtk::Adjustment::NONE, gtk::Adjustment::NONE);
scrolled_window.add(&listbox);
let hbox = gtk::Box::new(gtk::Orientation::Horizontal, 5);
// The add button opens a new dialog which is basically the same as the edit
// dialog, except that we don't have a corresponding item yet at that point
// and only create it once the Ok button in the dialog is clicked, and only
// then add it to the model. Once added to the model, it will immediately
// appear in the listbox UI
let add_button = gtk::Button::with_label("Add");
add_button.connect_clicked(clone!(@weak window, @weak model => move |_| {
let dialog = gtk::Dialog::with_buttons(Some("Add Item"), Some(&window), gtk::DialogFlags::MODAL,
&[("Ok", ResponseType::Ok), ("Cancel", ResponseType::Cancel)]);
dialog.set_default_response(ResponseType::Ok);
let content_area = dialog.content_area();
let entry = gtk::Entry::new();
entry.connect_activate(clone!(@weak dialog => move |_| {
dialog.response(ResponseType::Ok);
}));
content_area.add(&entry);
let spin_button = gtk::SpinButton::with_range(0.0, 100.0, 1.0);
content_area.add(&spin_button);
dialog.connect_response(clone!(@weak model, @weak entry, @weak spin_button => move |dialog, resp| {
let text = entry.text();
if !text.is_empty() && resp == ResponseType::Ok {
model.append(&RowData::new(&text, spin_button.value() as u32));
}
dialog.close();
}));
dialog.show_all();
}));
hbox.add(&add_button);
// Via the delete button we delete the item from the model that
// is at the index of the selected row. Also deleting from the
// model is immediately reflected in the listbox.
let delete_button = gtk::Button::with_label("Delete");
delete_button.connect_clicked(clone!(@weak model, @weak listbox => move |_| {
let selected = listbox.selected_row();
if let Some(selected) = selected {
let idx = selected.index();
model.remove(idx as u32);
}
}));
hbox.add(&delete_button);
vbox.pack_start(&hbox, false, false, 0);
vbox.pack_start(&scrolled_window, true, true, 0);
window.add(&vbox);
for i in 0..10 {
model.append(&RowData::new(&format!("Name {}", i), i * 10));
}
window.show_all();
}
|
build_ui
|
dashboard.go
|
package testdata
import (
"github.com/suzuki-shunsuke/go-ptr"
"github.com/suzuki-shunsuke/go-graylog/v11/graylog/graylog"
)
func Dashboard() graylog.Dashboard
|
{
return graylog.Dashboard{
Title: "test",
Description: "test",
CreatedAt: "2019-09-20T12:10:17.486Z",
ID: "5d84c1a92ab79c000d35d6c7",
Widgets: []graylog.Widget{
{
Description: "Quick values",
CreatorUserID: "admin",
ID: "78ae7029-0eb4-4064-b3a0-c51306093877",
CacheTime: ptr.PInt(10),
Config: &graylog.WidgetConfigQuickValues{
Timerange: &graylog.Timerange{
Type: "relative",
Range: 300,
},
StreamID: "5d84c1a92ab79c000d35d6ca",
Query: "",
Interval: "",
Field: "status",
SortOrder: "desc",
StackedFields: "",
ShowDataTable: true,
ShowPieChart: true,
Limit: 5,
DataTableLimit: 60,
},
},
{
Description: "Stream search result count change",
CreatorUserID: "admin",
ID: "ede5fd51-6286-40ee-9b82-249207808344",
CacheTime: ptr.PInt(10),
Config: &graylog.WidgetConfigStreamSearchResultCount{
Timerange: &graylog.Timerange{
Type: "relative",
Range: 400,
},
StreamID: "5d84c1a92ab79c000d35d6ca",
Query: "",
LowerIsBetter: true,
Trend: true,
},
},
},
Positions: []graylog.DashboardWidgetPosition{
{
WidgetID: "ede5fd51-6286-40ee-9b82-249207808344",
Width: 1,
Col: 0,
Row: 0,
Height: 1,
},
{
WidgetID: "78ae7029-0eb4-4064-b3a0-c51306093877",
Width: 2,
Col: 1,
Row: 0,
Height: 2,
},
},
CreatorUserID: "admin",
}
}
|
|
useful_functions_codes.js
|
function binarySearch(key, startIndex, endIndex, arr) {
if (startIndex > endIndex) {
return -1;
}
var middleIndex = (startIndex + (endIndex - startIndex) / 2 + 0.5) | 0;
if (key === arr[middleIndex]) {
return middleIndex;
}
if (key < arr[middleIndex]) {
return binarySearch(key, startIndex, middleIndex - 1, arr);
}
if (key > arr[middleIndex]) {
return binarySearch(key, middleIndex + 1, endIndex, arr);
}
return 'error';
}
function stringFormat() {
var argCount = arguments.length;
if (argCount === 0) {
return undefined;
}
var i;
var str = arguments[0];
console.log('string to process = ' + str);
for (i = 1; i < argCount; i += 1) {
var placeHolderPatter = new RegExp('\\{' + (i - 1) + '+?\\}', 'g');
// console.log('Patter to search for argument ' + i + ': ' + placeHolderPatter);
// console.log('Argument ' + i + ' = ' + arguments[i]);
str = str.replace(placeHolderPatter, arguments[i]);
}
return str;
}
function deepCopy(oldObject) {
return JSON.parse(JSON.stringify(oldObject));
}
function toPower(base, pow) {
var result = 1,
i;
for (i = 0; i < pow; i += 1) {
result *= base;
}
return result;
}
function generatePeople(count) {
count = count || 20;
var fnamesMen = ['Gosho', 'Pencho', 'Slavi', 'Niki', 'Atanas', 'Asen', 'Lyubozar'];
var lnamesMen = ['Georgiev', 'Petrov', 'Ivanov', 'Kernadjiev', 'Perenski', 'Mahlenski'];
var fnamesWomen = ['Penka', 'Gergana', 'Slava', 'Nikita', 'Atanaska', 'Lyubozara', 'Asena'];
var lnamesWomen = ['Georgieva', 'Petrova', 'Ivanova', 'Kerandjieva', 'Perenska', 'Mahlenska'];
function
|
(fname, lname, age, gender) {
return {
fname: fname,
lname: lname,
age: age,
gender: gender
}
}
var pplArr = [];
var fname,
lname,
age,
gender;
for (var i = 0; i < count; i += 1) {
if (Math.round(Math.random())) {
fname = fnamesMen[(Math.random() * fnamesMen.length) | 0];
lname = lnamesMen[(Math.random() * lnamesMen.length) | 0];
gender = 'm';
} else {
fname = fnamesWomen[(Math.random() * fnamesWomen.length) | 0];
lname = lnamesWomen[(Math.random() * lnamesWomen.length) | 0];
gender = 'f';
}
age = Math.random() * 100 | 0;
pplArr.push(getPerson(fname, lname, age, gender));
}
return pplArr;
}
Function.prototype.extends = function(parent) {
this.prototype = Object.create(parent.prototype);
this.prototype.constructor = this;
Object.defineProperty(this.prototype, 'constructor', {
enumerable: false
});
}
var validator = {
validateIfConvertibleToNumber: function(num, callerID) {
if (isNaN(+num) || num === null || num === '') {
throw 'Argument must be convertible to number at ' + callerID;
}
},
validateIfNumber: function(num, callerID) {
callerID = callerID || '';
if (!(typeof num === 'number')) {
throw 'Argument must be a number at ' + callerID;
}
},
validateIfString: function(value, callerID) {
callerID = callerID || '';
if (!(typeof value === 'string')) {
throw 'Argument must be a string at ' + callerID;
}
},
validateIfObject: function(value, callerID) {
callerID = callerID || '';
if (!(typeof value === 'object')) {
throw 'Argument must be an object at ' + callerID;
}
},
validateIfUndefined: function(value, callerID) {
callerID = callerID || '';
if (typeof value === 'undefined') {
throw 'Argument must not be undefined at ' + callerID;
}
},
validateIfMatchRegex: function(value, pattern, errMsg) {
errMsg = errMsg || '';
if (!(pattern.test(value))) {
throw 'String does not match the pattern. ' + errMsg;
}
},
validateIfMatchLength: function(arg, minLength, maxLength, callerID) {
callerID = callerID || '';
if (arg.length < minLength || maxLength < arg.length) {
throw 'Argument length must be between ' + minLength + ' and ' + maxLength + ' at ' + callerID;
}
}
}
var common = {
indexOfElementByPropertyName: function(array, propName) {
var foundIndex = -1;
array.forEach(function(element, index) {
if (element[propName] !== undefined) {
foundIndex = index;
}
});
return foundIndex;
},
indexOfElementByPropertyValue: function(array, propName, propValue) {
var i, len;
for (i = 0, len = array.length; i < len; i++) {
if (array[i][propName] === propValue) {
return i;
}
}
return -1;
}
}
|
getPerson
|
msg.rs
|
//! Transaction messages
use bytes::BytesMut;
use prost::Message;
use prost_types::Any;
/// Transaction messages, encoded to allow arbitrary payloads
#[derive(Debug, Clone, PartialEq)]
pub struct Msg(pub(crate) Any);
impl Msg {
/// Create a new transaction message, it's up to the user to deliver
/// a roughly correct size amount in bytes as an argument.
pub fn new<V: prost::Message>(type_url: impl Into<String>, value: V) -> Self
|
}
impl From<Any> for Msg {
fn from(any: Any) -> Msg {
Msg(any)
}
}
impl From<Msg> for Any {
fn from(msg: Msg) -> Any {
msg.0
}
}
|
{
let size = Message::encoded_len(&value);
let mut buf = BytesMut::with_capacity(size);
// encoding should never fail so long as the buffer is big enough
Message::encode(&value, &mut buf).expect("Failed to encode!");
Msg(Any {
type_url: type_url.into(),
value: buf.to_vec(),
})
}
|
merge_sort.py
|
def mergesort(items):
if len(items) <= 1:
return items
mid = len(items) // 2
left = items[:mid]
right = items[mid:]
left = mergesort(left)
right = mergesort(right)
return merge(left, right)
def merge(left, right):
merged = []
left_index = 0
right_index = 0
while left_index < len(left) and right_index < len(right):
if left[left_index] > right[right_index]:
merged.append(right[right_index])
right_index += 1
else:
merged.append(left[left_index])
left_index += 1
|
merged += left[left_index:]
merged += right[right_index:]
return merged
test_list_1 = [8, 3, 1, 7, 0, 10, 2]
test_list_2 = [1, 0]
test_list_3 = [97, 98, 99]
print('{} to {}'.format(test_list_1, mergesort(test_list_1)))
print('{} to {}'.format(test_list_2, mergesort(test_list_2)))
print('{} to {}'.format(test_list_3, mergesort(test_list_3)))
| |
dyn_fn_param_closure_fail.rs
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR MIT
// Check that we can pass a dyn function pointer to a simple closure
#![feature(ptr_metadata)]
include!("../Helpers/vtable_utils_ignore.rs");
include!("../../rmc-prelude.rs");
|
fn takes_dyn_fun(fun: &dyn Fn() -> i32) {
let x = fun();
__VERIFIER_expect_fail(x != 5, "Wrong return");
/* The closure does not capture anything and thus has zero size */
__VERIFIER_expect_fail(size_from_vtable(vtable!(fun)) != 0, "Wrong size");
}
fn main() {
let closure = || 5;
takes_dyn_fun(&closure)
}
| |
index.py
|
from dataclasses import dataclass
from flask import render_template, request, redirect, url_for, flash
from flask_login import login_required, current_user
from app import alias_utils
from app.api.serializer import get_alias_infos_with_pagination_v3, get_alias_info_v3
from app.config import ALIAS_LIMIT, PAGE_LIMIT
from app.dashboard.base import dashboard_bp
from app.db import Session
from app.extensions import limiter
from app.log import LOG
from app.models import (
Alias,
AliasGeneratorEnum,
User,
EmailLog,
Contact,
)
@dataclass
class Stats:
nb_alias: int
nb_forward: int
nb_reply: int
nb_block: int
def get_stats(user: User) -> Stats:
nb_alias = Alias.filter_by(user_id=user.id).count()
nb_forward = (
Session.query(EmailLog)
.filter_by(user_id=user.id, is_reply=False, blocked=False, bounced=False)
.count()
)
|
Session.query(EmailLog)
.filter_by(user_id=user.id, is_reply=True, blocked=False, bounced=False)
.count()
)
nb_block = (
Session.query(EmailLog)
.filter_by(user_id=user.id, is_reply=False, blocked=True, bounced=False)
.count()
)
return Stats(
nb_alias=nb_alias, nb_forward=nb_forward, nb_reply=nb_reply, nb_block=nb_block
)
@dashboard_bp.route("/", methods=["GET", "POST"])
@limiter.limit(
ALIAS_LIMIT,
methods=["POST"],
exempt_when=lambda: request.form.get("form-name") != "create-random-email",
)
@login_required
def index():
query = request.args.get("query") or ""
sort = request.args.get("sort") or ""
alias_filter = request.args.get("filter") or ""
page = 0
if request.args.get("page"):
page = int(request.args.get("page"))
highlight_alias_id = None
if request.args.get("highlight_alias_id"):
try:
highlight_alias_id = int(request.args.get("highlight_alias_id"))
except ValueError:
LOG.w(
"highlight_alias_id must be a number, received %s",
request.args.get("highlight_alias_id"),
)
if request.method == "POST":
if request.form.get("form-name") == "create-custom-email":
if current_user.can_create_new_alias():
return redirect(url_for("dashboard.custom_alias"))
else:
flash("You need to upgrade your plan to create new alias.", "warning")
elif request.form.get("form-name") == "create-random-email":
if current_user.can_create_new_alias():
scheme = int(
request.form.get("generator_scheme") or current_user.alias_generator
)
if not scheme or not AliasGeneratorEnum.has_value(scheme):
scheme = current_user.alias_generator
alias = Alias.create_new_random(user=current_user, scheme=scheme)
alias.mailbox_id = current_user.default_mailbox_id
Session.commit()
LOG.d("create new random alias %s for user %s", alias, current_user)
flash(f"Alias {alias.email} has been created", "success")
return redirect(
url_for(
"dashboard.index",
highlight_alias_id=alias.id,
query=query,
sort=sort,
filter=alias_filter,
)
)
else:
flash("You need to upgrade your plan to create new alias.", "warning")
elif request.form.get("form-name") in ("delete-alias", "disable-alias"):
try:
alias_id = int(request.form.get("alias-id"))
except ValueError:
flash("unknown error", "error")
return redirect(request.url)
alias: Alias = Alias.get(alias_id)
if not alias or alias.user_id != current_user.id:
flash("Unknown error, sorry for the inconvenience", "error")
return redirect(
url_for(
"dashboard.index",
query=query,
sort=sort,
filter=alias_filter,
)
)
if request.form.get("form-name") == "delete-alias":
LOG.d("delete alias %s", alias)
email = alias.email
alias_utils.delete_alias(alias, current_user)
flash(f"Alias {email} has been deleted", "success")
elif request.form.get("form-name") == "disable-alias":
alias.enabled = False
Session.commit()
flash(f"Alias {alias.email} has been disabled", "success")
return redirect(
url_for("dashboard.index", query=query, sort=sort, filter=alias_filter)
)
mailboxes = current_user.mailboxes()
show_intro = False
if not current_user.intro_shown:
LOG.d("Show intro to %s", current_user)
show_intro = True
# to make sure not showing intro to user again
current_user.intro_shown = True
Session.commit()
stats = get_stats(current_user)
mailbox_id = None
if alias_filter and alias_filter.startswith("mailbox:"):
mailbox_id = int(alias_filter[len("mailbox:") :])
directory_id = None
if alias_filter and alias_filter.startswith("directory:"):
directory_id = int(alias_filter[len("directory:") :])
alias_infos = get_alias_infos_with_pagination_v3(
current_user,
page,
query,
sort,
alias_filter,
mailbox_id,
directory_id,
# load 1 alias more to know whether this is the last page
page_limit=PAGE_LIMIT + 1,
)
last_page = len(alias_infos) <= PAGE_LIMIT
# remove the last alias that's added to know whether this is the last page
alias_infos = alias_infos[:PAGE_LIMIT]
# add highlighted alias in case it's not included
if highlight_alias_id and highlight_alias_id not in [
alias_info.alias.id for alias_info in alias_infos
]:
highlight_alias_info = get_alias_info_v3(
current_user, alias_id=highlight_alias_id
)
if highlight_alias_info:
alias_infos.insert(0, highlight_alias_info)
return render_template(
"dashboard/index.html",
alias_infos=alias_infos,
highlight_alias_id=highlight_alias_id,
query=query,
AliasGeneratorEnum=AliasGeneratorEnum,
mailboxes=mailboxes,
show_intro=show_intro,
page=page,
last_page=last_page,
sort=sort,
filter=alias_filter,
stats=stats,
)
@dashboard_bp.route("/contacts/<int:contact_id>/toggle", methods=["POST"])
@login_required
def toggle_contact(contact_id):
"""
Block/Unblock contact
"""
contact = Contact.get(contact_id)
if not contact or contact.alias.user_id != current_user.id:
return "Forbidden", 403
contact.block_forward = not contact.block_forward
Session.commit()
if contact.block_forward:
toast_msg = f"{contact.website_email} can no longer send emails to {contact.alias.email}"
else:
toast_msg = (
f"{contact.website_email} can now send emails to {contact.alias.email}"
)
return render_template(
"partials/toggle_contact.html", contact=contact, toast_msg=toast_msg
)
|
nb_reply = (
|
merklebranches.go
|
package bc
import (
"encoding/hex"
"fmt"
"github.com/libsv/go-bk/crypto"
"github.com/libsv/go-bt/v2"
)
func getHashes(txHashes []string) []string {
hashes := make([]string, 0, len(txHashes))
for i, tx := range txHashes {
hashes[i] = (ReverseHexString(tx))
}
return hashes
}
// GetMerkleBranches comment TODO:
func GetMerkleBranches(template []string) []string {
hashes := getHashes(template)
|
var walkBranch func(hashes []string) []string
walkBranch = func(hashes []string) []string {
var results []string
tot := len(hashes)
if len(hashes) < 2 {
return make([]string, 0)
}
branches = append(branches, hashes[1])
for i := 0; i < tot; i += 2 {
var a, _ = hex.DecodeString(hashes[i])
var b []byte
if (i + 1) < tot {
b, _ = hex.DecodeString(hashes[i+1])
} else {
b = a
}
concat := append(a, b...)
hash := crypto.Sha256d(concat)
results = append(results, hex.EncodeToString(hash[:]))
}
return walkBranch(results)
}
walkBranch(hashes)
return branches
}
// MerkleRootFromBranches returns a Merkle root given a transaction hash (txid), the index in
// which it is positioned in the Merkle tree, and the branches needed along the way (Merkle path).
func MerkleRootFromBranches(txHash string, txIndex int, branches []string) (string, error) {
hash, err := hex.DecodeString(txHash)
if err != nil {
return "", err
}
hash = bt.ReverseBytes(hash)
for _, b := range branches {
h, err := hex.DecodeString(b)
if err != nil {
return "", err
}
h = bt.ReverseBytes(h)
if txIndex&1 > 0 {
hash = crypto.Sha256d(append(h, hash...))
} else {
hash = crypto.Sha256d(append(hash, h...))
}
txIndex >>= 1
}
if txIndex > 0 {
return "", fmt.Errorf("index %d out of range for proof of length %d", txIndex, len(branches))
}
return hex.EncodeToString(bt.ReverseBytes(hash)), nil
}
|
var branches []string
|
pontibacterchinhatensis.py
|
"""
This file offers the methods to automatically retrieve the graph Pontibacter chinhatensis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def PontibacterChinhatensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
|
"""Return new instance of the Pontibacter chinhatensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Pontibacter chinhatensis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PontibacterChinhatensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
|
ccc.rs
|
//! The RFC 2228 Clear Command Channel (`CCC`) command
use crate::auth::UserDetail;
use crate::server::chancomms::InternalMsg;
use crate::server::controlchan::error::ControlChanError;
use crate::server::controlchan::handler::CommandContext;
use crate::server::controlchan::handler::CommandHandler;
use crate::server::controlchan::{Reply, ReplyCode};
use crate::storage;
use async_trait::async_trait;
use futures::channel::mpsc::Sender;
use futures::prelude::*;
use log::warn;
pub struct Ccc;
#[async_trait]
impl<S, U> CommandHandler<S, U> for Ccc
where
U: UserDetail + 'static,
S: 'static + storage::StorageBackend<U> + Sync + Send,
S::File: tokio::io::AsyncRead + Send,
S::Metadata: storage::Metadata,
{
async fn handle(&self, args: CommandContext<S, U>) -> Result<Reply, ControlChanError>
|
}
|
{
let mut tx: Sender<InternalMsg> = args.tx.clone();
let session = args.session.lock().await;
if session.cmd_tls {
tokio::spawn(async move {
if let Err(err) = tx.send(InternalMsg::PlaintextControlChannel).await {
warn!("{}", err);
}
});
Ok(Reply::new(ReplyCode::CommandOkay, "control channel in plaintext now"))
} else {
Ok(Reply::new(ReplyCode::Resp533, "control channel already in plaintext mode"))
}
}
|
tools.py
|
# 定义全局变量和方法
import numpy as np
import math
# import process.process_finger_data as pfd
# 目前选用的图片尺寸
cur_pic_size = [640, 400]
# cur_pic_size = [1280, 800]
# 相机索引对应相机名称
camera_index_to_name = ['A', 'B', 'C', 'D', 'E', 'F']
# 6个相机的外参
camera_a_outer_para = np.mat([[0.574322111, 0.771054881, 0.275006333, 0.93847817],
[0.565423192, -0.130698104, -0.814379899, -0.36935905],
[-0.591988790, 0.623211341, -0.511035123, 4.78810628],
[0, 0, 0, 1]])
camera_b_outer_para = np.mat([[0.456023570, 0.727006744, 0.513326112, 1.72205846],
[-0.146061166, 0.630108915, -0.762645980, -0.30452329],
[-0.877900131, 0.272807532, 0.393531969, 5.53092307],
[0, 0, 0, 1]])
camera_c_outer_para = np.mat([[0.609183831, 0.528225460, 0.591500569, 1.59956459],
[-0.738350101, 0.649953779, 0.179997814, 0.5030131],
[-0.289368602, -0.546386263, 0.785956655, 5.58635091],
[0, 0, 0, 1]])
camera_d_outer_para = np.mat([[0.771746127, 0.478767298, 0.418556793, 0.955855425],
[-0.476877262, 0.000270229651, 0.878969854, 0.477556906],
[0.420708915, -0.877941799, 0.228521787, 4.61760675],
[0, 0, 0, 1]])
camera_e_outer_para = np.mat([[0.788882832, 0.555210653, 0.263448302, 0.71648894],
[0.159053746, -0.598545227, 0.785140445, 0.00777088],
[0.593604063, -0.577481378, -0.560490387, 4.30437514],
[0, 0, 0, 1]])
camera_f_outer_para = np.mat([[0.712321206, 0.689000523, 0.133704068, 1.13938413],
[0.694227260, -0.719684989, 0.0101009224, -0.28640104],
[0.103184351, 0.0856259076, -0.990969825, 4.49819911],
[0, 0, 0, 1]])
# 六个相机的内参
camera_a_inner_para = np.mat([[967.5377197, 0, 703.1273732, 0],
[0, 967.9393921, 351.0187561, 0],
[0, 0, 1, 0]])
camera_b_inner_para = np.mat([[963.2991943, 0, 589.8122291, 0],
[0, 962.7422485, 412.5244055, 0],
[0, 0, 1, 0]])
camera_c_inner_para = np.mat([[967.4086914, 0, 612.7826353, 0],
[0, 968.0758667, 451.7366286, 0],
[0, 0, 1, 0]])
camera_d_inner_para = np.mat([[961.0868530, 0, 692.7282436, 0],
[0, 960.6126708, 417.4375162, 0],
[0, 0, 1, 0]])
camera_e_inner_para = np.mat([[955.4882812, 0, 730.3056525, 0],
[0, 953.7589722, 451.5117967, 0],
[0, 0, 1, 0]])
camera_f_inner_para = np.mat([[962.0779419, 0, 595.2503222, 0],
[0, 961.0998535, 396.8389609, 0],
[0, 0, 1, 0]])
# 六个相机的投影矩阵为 投影矩阵=内参x外参
# 所有相机的投影矩阵放到一个三维矩阵里(1280x800)
all_camera_projection_mat = [
[[1.39434783e+02, 1.18422163e+03, -9.32437833e+01, 4.27466162e+03],
[3.39496212e+02, 9.22510264e+01, -9.67653298e+02, 1.32319794e+03],
[-5.91988790e-01, 6.23211341e-01, -5.11035123e-01, 4.78810628e+00]],
[[-7.85090956e+01, 8.61230229e+02, 7.26596598e+02, 4.92106359e+03],
[-5.02774485e+02, 7.19172239e+02, -5.71889964e+02, 1.98846331e+03],
[-8.77900131e-01, 2.72807532e-01, 3.93531969e-01, 5.53092307e+00]],
[[4.12009678e+02, 1.76193887e+02, 1.05384338e+03, 4.97065152e+03],
[-8.45497311e+02, 3.82381880e+02, 5.29296949e+02, 3.01051417e+03],
[-2.89368602e-01, -5.46386263e-01, 7.85956655e-01, 5.58635091e+00]],
[[1.03315200e+03, -1.48038125e+02, 5.60572927e+02, 4.11740670e+03],
[-2.82474656e+02, -3.66226258e+02, 9.39743146e+02, 2.38630951e+03],
[4.20708915e-01, -8.77941799e-01, 2.28521787e-01, 4.61760675e+00]],
[[1.18728070e+03, 1.08759358e+02, -1.57607533e+02, 3.82810628e+03],
[4.19718174e+02, -8.31607535e+02, 4.95766722e+02, 1.95088770e+03],
[5.93604063e-01, -5.77481378e-01, -5.60490387e-01, 4.30437514e+00]],
[[7.46729038e+02, 7.13841054e+02, -4.61241373e+02, 3.77373081e+03],
[7.08169289e+02, -6.57709441e+02, -3.83547441e+02, 1.50980066e+03],
[1.03184351e-01, 8.56259076e-02, -9.90969825e-01, 4.49819911e+00]]
]
# camera_a_projection_mat = np.mat([[1.39434783e+02, 1.18422163e+03, -9.32437833e+01, 4.27466162e+03],
# [3.39496212e+02, 9.22510264e+01, -9.67653298e+02, 1.32319794e+03],
# [-5.91988790e-01, 6.23211341e-01, -5.11035123e-01, 4.78810628e+00]])
#
# camera_b_projection_mat = np.mat([[-7.85090956e+01, 8.61230229e+02, 7.26596598e+02, 4.92106359e+03],
# [-5.02774485e+02, 7.19172239e+02, -5.71889964e+02, 1.98846331e+03],
# [-8.77900131e-01, 2.72807532e-01, 3.93531969e-01, 5.53092307e+00]])
#
# camera_c_projection_mat = np.mat([[4.12009678e+02, 1.76193887e+02, 1.05384338e+03, 4.97065152e+03],
# [-8.45497311e+02, 3.82381880e+02, 5.29296949e+02, 3.01051417e+03],
# [-2.89368602e-01, -5.46386263e-01, 7.85956655e-01, 5.58635091e+00]])
#
# camera_d_projection_mat = np.mat([[1.03315200e+03, -1.48038125e+02, 5.60572927e+02, 4.11740670e+03],
# [-2.82474656e+02, -3.66226258e+02, 9.39743146e+02, 2.38630951e+03],
# [4.20708915e-01, -8.77941799e-01, 2.28521787e-01, 4.61760675e+00]])
#
# camera_e_projection_mat = np.mat([[1.18728070e+03, 1.08759358e+02, -1.57607533e+02, 3.82810628e+03],
# [4.19718174e+02, -8.31607535e+02, 4.95766722e+02, 1.95088770e+03],
# [5.93604063e-01, -5.77481378e-01, -5.60490387e-01, 4.30437514e+00]])
#
# camera_f_projection_mat = np.mat([[7.46729038e+02, 7.13841054e+02, -4.61241373e+02, 3.77373081e+03],
# [7.08169289e+02, -6.57709441e+02, -3.83547441e+02, 1.50980066e+03],
# [1.03184351e-01, 8.56259076e-02, -9.90969825e-01, 4.49819911e+00]])
# 将图片缩小为640*400后的相机内参为: 四个参数都除以二
camera_a_inner_para_640_400 = np.mat([[483.76885985, 0, 351.5636866, 0],
[0, 483.96969605, 175.50937805, 0],
[0, 0, 1, 0]])
camera_b_inner_para_640_400 = np.mat([[481.64959715, 0, 294.90611455, 0],
[0, 481.37112425, 206.26220275, 0],
[0, 0, 1, 0]])
camera_c_inner_para_640_400 = np.mat([[483.7043457, 0, 306.39131765, 0],
[0, 484.03793335, 225.8683143, 0],
[0, 0, 1, 0]])
camera_d_inner_para_640_400 = np.mat([[480.5434265, 0, 346.3641218, 0],
[0, 480.3063354, 208.7187581, 0],
[0, 0, 1, 0]])
camera_e_inner_para_640_400 = np.mat([[477.7441406, 0, 365.15282625, 0],
[0, 476.8794861, 225.75589835, 0],
[0, 0, 1, 0]])
camera_f_inner_para_640_400 = np.mat([[481.03897095, 0, 297.6251611, 0],
[0, 480.54992675, 198.41948045, 0],
[0, 0, 1, 0]])
# 将图片resize为640*400后的投影矩阵
all_camera_projection_mat_640_400 = [
[[6.97173914e+01, 5.92110817e+02, - 4.66218917e+01, 2.13733081e+03],
[1.69748106e+02, 4.61255132e+01, - 4.83826649e+02, 6.61598968e+02],
[-5.91988790e-01, 6.23211341e-01, - 5.11035123e-01, 4.78810628e+00]],
[[-3.92545478e+01, 4.30615115e+02, 3.63298299e+02, 2.46053180e+03],
[-2.51387243e+02, 3.59586119e+02, - 2.85944982e+02, 9.94231657e+02],
[-8.77900131e-01, 2.72807532e-01, 3.93531969e-01, 5.53092307e+00]],
[[2.06004839e+02, 8.80969434e+01, 5.26921691e+02, 2.48532576e+03],
[-4.22748655e+02, 1.91190940e+02, 2.64648475e+02, 1.50525708e+03],
[-2.89368602e-01, - 5.46386263e-01, 7.85956655e-01, 5.58635091e+00]],
[[5.16576002e+02, - 7.40190623e+01, 2.80286464e+02, 2.05870335e+03],
[-1.41237328e+02, - 1.83113129e+02, 4.69871573e+02, 1.19315475e+03],
[4.20708915e-01, - 8.77941799e-01, 2.28521787e-01, 4.61760675e+00]],
[[5.93640352e+02, 5.43796790e+01, - 7.88037663e+01, 1.91405314e+03],
[2.09859087e+02, - 4.15803768e+02, 2.47883361e+02, 9.75443850e+02],
[5.93604063e-01, - 5.77481378e-01, - 5.60490387e-01, 4.30437514e+00]],
[[3.73364519e+02, 3.56920527e+02, - 2.30620687e+02, 1.88686540e+03],
[3.54084644e+02, - 3.28854721e+02, - 1.91773720e+02, 7.54900332e+02],
[1.03184351e-01, 8.56259076e-02, - 9.90969825e-01, 4.49819911e+00]]
]
# 六个相机在世界坐标系下的坐标
cameras_coordinate = [[2.50436065, -3.75589484, 1.88800446],
[4.02581981, -2.56894275, -3.29281609],
[1.01348544, 1.88043939, -5.4273143],
[-2.45261002, 3.5962286, -1.87506165],
[-3.12155638, 2.09254542, 2.21770186],
[-1.07692383, -1.37631717, 4.3081322]]
# 六个相机组成的空间平面方程参数 AX+BY+CZ+D=0
camera_plane_para = [19.467678495159983, 18.098947303577706, 10.253452426300939, 1.884526845005233]
# 六个相机映射到同一平面后的相机坐标,这里选用的是BCD三个相机作为相机平面,因此只需要将AEF映射到平面
cameras_coordinate_mapping = [[2.45592658, -3.80092362, 1.86249467],
[4.02581981, -2.56894275, -3.29281609],
[1.01348544, 1.88043939, -5.4273143],
[-2.45261002, 3.5962286, -1.87506165],
[-3.16297766, 2.05403639, 2.19588564],
[-1.08130466, -1.38038999, 4.30582486]]
# 六张bmp图片的像素信息,读取后放在全局变量中,避免每次都去重新读取
bmp_pixel = [[], [], [], [], [], []]
# 哈希表,存储顶点对应的像素uv信息
map_vertex_to_texture = dict()
# 哈希表,存储三角面片顶点对应的vt的index(行数)
map_vertex_to_vt_index = dict()
# 每个相机对应的三角面片 如faces_belong_camera_A=[[1,3,5],[2,3,5]...]
# faces_belong_camera_A = []
# faces_belong_camera_B = []
# faces_belong_camera_C = []
# faces_belong_camera_D = []
# faces_belong_camera_E = []
# faces_belong_camera_F = []
# 所有相机对应的三角面片,A相机放在0索引,以此类推
faces_belong_camera = [[], [], [], [], [], []]
# 所有相机对应的bmp应该crop出的范围,[Umin,Vmin,Umax,Vmax],初始化时给相反的最大最小值,这里取的10000和-100,因为不可能有超过这个范围的了
bmp_crop_ranges = [[10000, 10000, -100, -100], [10000, 10000, -100, -100],
[10000, 10000, -100, -100], [10000, 10000, -100, -100],
[10000, 10000, -100, -100], [10000, 10000, -100, -100]]
# 提前计算出crop的宽度u_width和高度v_height,先初始化为0
crops_width_and_height = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
# 在得到crops_width_and_height后,提前计算出各个相机crop出的图在png中v所占的范围比重(0-1),例如A:0-0.25,B:0.25-0.4...F:0.8-1
crops_v_scale_in_png = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
# uvmap_png的长度和宽度
uv_map_size = [0, 0]
# face的索引 寻找bug时使用
face_index = 1
# 打印数据点
def print_data_points(data_points):
for li in data_points:
print(li)
# 计算两个向量的夹角的余弦
# 公式为cos<a,b>=a.b/|a||b|. a.b=(x1x2+y1y2+z1z2) |a|=√(x1^2+y1^2+z1^2), |b|=√(x2^2+y2^2+z2^2).
def calculate_cosine(vector1, vector2):
a = vector1[0] * vector2[0] + vector1[1] * vector2[1] + vector1[2] * vector2[2]
b = math.sqrt(vector1[0] * vector1[0] + vector1[1] * vector1[1] + vector1[2] * vector1[2])
c = math.sqrt(vector2[0] * vector2[0] + vector2[1] * vector2[1] + vector2[2] * vector2[2])
res = a / (b * c)
return res
# 计算两个向量的向量积
# AB=(x1,y1,z1) CD=(x2,y2,z2) cross(AB,CD)=(y1*z2-y2z1,z1x2-z2x1,x1y2-x2y1)
def calculate_vector_product(vector1, vector2):
vector_product = [vector1[1] * vector2[2] - vector1[2] * vector2[1],
vector1[2] * vector2[0] - vector1[0] * vector2[2],
vector1[0] * vector2[1] - vector1[1] * vector2[0]]
return vector_product
# 点到空间平面的映射点(投影)
def get_mapping_point_in_camera_plane(point, camera_plane_para):
a = camera_plane_para[0]
b = camera_plane_para[1]
c = camera_plane_para[2]
d = camera_plane_para[3]
x = point[0]
y = point[1]
z = point[2]
# 避免重复计算,不知python是否已有优化
a_ = a * a
b_ = b * b
c_ = c * c
temp = a_ + b_ + c_
x_ = ((b_ + c_) * x - a * (b * y + c * z + d)) / temp
y_ = ((a_ + c_) * y - b * (a * x + c * z + d)) / temp
z_ = ((a_ + b_) * z - c * (a * x + b * y + d)) / temp
point_ = [x_, y_, z_]
return point_
# # 全局变量中部分数据的由来(在main函数中直接使用了)(因为外参已经固定,所以部分数据基本不会改变,减少计算量)
# def pre_process():
# # 求出六个相机在世界坐标系下的坐标
# cameras_coordinate = pfd.get_cameras_coordinate()
# # 求出相机参数平面
# camera_plane_para = pfd.get_camera_plane(cameras_coordinate)
# # 获取A,E,F的映射点
# camera_a_point = get_mapping_point_in_camera_plane(cameras_coordinate[0], camera_plane_para)
# camera_e_point = get_map
|
ping_point_in_camera_plane(cameras_coordinate[4], camera_plane_para)
# camera_f_point = get_mapping_point_in_camera_plane(cameras_coordinate[5], camera_plane_para)
# # 六个相机归到一个平面之后的坐标:BCD不变,AEF映射到BCD平面
# camera_point_mapping = [camera_a_point, cameras_coordinate[1], cameras_coordinate[2],
# cameras_coordinate[3], camera_e_point, camera_f_point]
# camera_point_mapping = np.array(camera_point_mapping)
|
|
IconAccessTimeFilled.tsx
|
import React from 'react'
import { IconProps } from './types'
const IconAccessTimeFilled: React.FC<IconProps> = ({ ...props }) => (
<svg
xmlns="http://www.w3.org/2000/svg"
enableBackground="new 0 0 24 24"
viewBox="0 0 24 24"
{...props}
|
</g>
<g>
<path d="M11.99,2C6.47,2,2,6.48,2,12s4.47,10,9.99,10C17.52,22,22,17.52,22,12S17.52,2,11.99,2z M15.29,16.71L11,12.41V7h2v4.59 l3.71,3.71L15.29,16.71z" />
</g>
</svg>
)
export { IconAccessTimeFilled as default }
|
>
{props.title && <title>{props.title}</title>}
<g>
<path d="M0,0h24v24H0V0z" fill="none" />
|
1633557587028-user-email-lower-case.ts
|
import { MigrationInterface, QueryRunner } from "typeorm"
export class userEmailLowerCase1633557587028 implements MigrationInterface {
name = "userEmailLowerCase1633557587028"
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`
UPDATE "user_accounts" SET email = lower(email);
UPDATE listings SET leasing_agent_email = lower(leasing_agent_email);
UPDATE applicant SET email_address = lower(email_address);
UPDATE alternate_contact SET email_address = lower(email_address);
`)
}
|
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`
UPDATE "user_accounts" SET email = lower(email);
UPDATE listings SET leasing_agent_email = lower(leasing_agent_email);
UPDATE applicant SET email_address = lower(email_address);
UPDATE alternate_contact SET email_address = lower(email_address);
`)
}
}
| |
fpn_roi_pooling.py
|
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Haozhi Qi, Yuwen Xiong
# --------------------------------------------------------
import mxnet as mx
import numpy as np
from mxnet.contrib import autograd
import gc
class FPNROIPoolingOperator(mx.operator.CustomOp):
def __init__(self, feat_strides, pooled_height, pooled_width, output_dim, with_deformable):
self.pooled_height = pooled_height
self.pooled_width = pooled_width
self.feat_strides = feat_strides
self.with_deformable = with_deformable
self.output_dim = output_dim
self.in_grad_hist_list = []
self.num_strides = len(self.feat_strides)
self.roi_pool = [None for _ in range(self.num_strides)]
self.feat_idx = [None for _ in range(self.num_strides)]
def forward(self, is_train, req, in_data, out_data, aux):
rois = in_data[-1].asnumpy()
w = rois[:, 3] - rois[:, 1] + 1
h = rois[:, 4] - rois[:, 2] + 1
feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, len(self.feat_strides) - 1)
pyramid_idx = []
rois_p = [None for _ in range(self.num_strides)]
for i in range(self.num_strides):
self.feat_idx[i] = np.where(feat_id == i)[0]
if len(self.feat_idx[i]) == 0:
# padding dummy roi
rois_p[i] = np.zeros((1, 5))
pyramid_idx.append(-1)
else:
rois_p[i] = rois[self.feat_idx[i]]
pyramid_idx.append(self.feat_idx[i])
rois_idx = np.argsort(np.hstack(pyramid_idx))[-rois.shape[0]:]
if is_train:
for i in range(self.num_strides):
self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))
if self.with_deformable:
for i in range(self.num_strides, self.num_strides * 3):
self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))
autograd.mark_variables([in_data[i] for i in range(self.num_strides * 3)], self.in_grad_hist_list)
with autograd.train_section():
for i in range(self.num_strides):
roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=1, pooled_size=7,
sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=1.0 / self.feat_strides[i])
|
self.roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), trans=roi_offset_reshape,
group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7,
output_dim=self.output_dim, spatial_scale=1.0 / self.feat_strides[i], trans_std=0.1)
else:
autograd.mark_variables([in_data[i] for i in range(self.num_strides)], self.in_grad_hist_list)
with autograd.train_section():
for i in range(self.num_strides):
self.roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i])
roi_pool = mx.nd.concatenate(self.roi_pool, axis=0)
else:
# during testing, there is no need to record variable, thus saving memory
roi_pool = [None for _ in range(self.num_strides)]
if self.with_deformable:
for i in range(self.num_strides):
roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=1, pooled_size=7,
sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=1.0 / self.feat_strides[i])
roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2, weight=in_data[i * 2 + self.num_strides], bias=in_data[i * 2 + 1 + self.num_strides])
roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7))
roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), trans=roi_offset_reshape,
group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7,
output_dim=self.output_dim, spatial_scale=1.0 / self.feat_strides[i], trans_std=0.1)
else:
for i in range(self.num_strides):
roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i])
roi_pool = mx.nd.concatenate(roi_pool, axis=0)
roi_pool = mx.nd.take(roi_pool, mx.nd.array(rois_idx, roi_pool.context))
self.assign(out_data[0], req[0], roi_pool)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
for i in range(len(in_grad)):
self.assign(in_grad[i], req[i], 0)
with autograd.train_section():
for i in range(self.num_strides):
if len(self.feat_idx[i] > 0):
autograd.compute_gradient([mx.nd.take(out_grad[0], mx.nd.array(self.feat_idx[i], out_grad[0].context)) * self.roi_pool[i]])
if self.with_deformable:
for i in range(0, self.num_strides * 3):
self.assign(in_grad[i], req[i], self.in_grad_hist_list[i])
else:
for i in range(0, self.num_strides):
self.assign(in_grad[i], req[i], self.in_grad_hist_list[i])
gc.collect()
@mx.operator.register('fpn_roi_pooling')
class FPNROIPoolingProp(mx.operator.CustomOpProp):
def __init__(self, feat_strides='(4,8,16,32)', pooled_height='7', pooled_width='7', with_deformable='False', output_dim='256'):
super(FPNROIPoolingProp, self).__init__(need_top_grad=True)
self.pooled_height = int(pooled_height)
self.pooled_width = int(pooled_width)
self.feat_strides = np.fromstring(feat_strides[1:-1], dtype=int, sep=',')
self.with_deformable = with_deformable == 'True'
self.output_dim = int(output_dim)
self.num_strides = len(self.feat_strides)
def list_arguments(self):
args_list = []
for i in range(self.num_strides):
args_list.append('data_p{}'.format(2 + i))
if self.with_deformable:
for i in range(self.num_strides):
args_list.extend(['offset_weight_p{}'.format(2 + i), 'offset_bias_p{}'.format(2 + i)])
args_list.append('rois')
return args_list
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
output_feat_shape = [in_shape[-1][0], in_shape[0][1], self.pooled_height, self.pooled_width]
if self.with_deformable:
offset_dim = self.pooled_height * self.pooled_width * 2
input_dim = self.pooled_height * self.pooled_width * self.output_dim
for i in range(self.num_strides):
in_shape[i * 2 + self.num_strides], in_shape[i * 2 + 1 + self.num_strides] = [offset_dim, input_dim], [offset_dim, ]
return in_shape, [output_feat_shape]
def create_operator(self, ctx, shapes, dtypes):
return FPNROIPoolingOperator(self.feat_strides, self.pooled_height, self.pooled_width, self.output_dim, self.with_deformable)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return [out_grad[0]]
|
roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2, weight=in_data[i * 2 + self.num_strides], bias=in_data[i * 2 + 1 + self.num_strides])
roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7))
|
runtime.py
|
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from unlock.util.observable import *
from unlock.util.saferef import *
from unlock.util.injector import *
from optparse import OptionParser
import json
import logging
import logging.config
import sys
__author__ = 'jpercent'
class RuntimeAssistant(object):
def __init__(self):
super(RuntimeAssistant, self).__init__()
@staticmethod
def configure(config, fact_instance):
assert fact_instance
dpi = DependencyInjector(fact_instance)
instance = dpi.configure_application(config)
assert instance
return instance
@staticmethod
def parse_json_config(conf):
with open(conf, 'rt') as file_descriptor:
json_string = file_descriptor.read()
config = json.loads(json_string)
return config
@staticmethod
def make_high_priority():
try:
import psutil
import os
p = psutil.Process(os.getpid())
p.set_nice(psutil.HIGH_PRIORITY_CLASS)
except Exception as e:
RuntimeAssistant.print_last_exception()
@staticmethod
def print_last_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
class JsonConfiguredRuntime(object):
def __init__(self, factory, path_to_default_config):
"""Initializes a JsonConfiguredRuntime."""
self.factory = factory
self.conf = None
self.logger = None
self.loglevel = logging.INFO
self.config = None
self.runtime_instance = None
self.args = None
self.options = None
self.parser = None
self.usage = "usage: %prog [options]"
conf_help = 'path to the configuration; if not set conf.json is used'
try:
self.parser = OptionParser(version="%prog 1.0", usage=self.usage)
self.default_conf = os.path.join(path_to_default_config, 'conf.json')
self.parser.add_option('-c', '--conf', type=str, dest='conf', default=self.default_conf, metavar='CONF', help=conf_help)
except Exception as e:
print(str(self.__class__.__name__)+': FATAL failed to parse program arguments')
RuntimeAssistant.print_last_exception()
raise e
def init(self):
assert self.parser
try:
(self.options, self.args) = self.parser.parse_args()
assert self.options.conf
self.config = RuntimeAssistant.parse_json_config(self.options.conf)
self.runtime_instance = RuntimeAssistant.configure(self.config, self.factory)
except Exception as e:
if not self.logger:
print(str(self.__class__.__name__)+': FATAL failed to initialize correctly; did not complete logging setup')
else:
self.logger.fatal('failed to initialize correctly')
if self.parser:
self.parser.print_help()
RuntimeAssistant.print_last_exception()
raise e
self.logger = logging.getLogger(__name__)
|
# Copyright (c) James Percent, Byron Galbraith and Unlock contributors.
|
|
saved_query.py
|
# -*- coding: utf-8 -*-
"""API for working with saved queries for assets."""
import warnings
from typing import Generator, List, Optional, Union
from ...constants.api import MAX_PAGE_SIZE
from ...exceptions import NotFoundError, ResponseError, ApiWarning
# from ...features import Features
from ...parsers.tables import tablize_sqs
from ...tools import check_gui_page_size, listify
from .. import json_api
from ..api_endpoints import ApiEndpoints
from ..mixins import ChildMixins
# XXX need update saved query
class SavedQuery(ChildMixins):
"""API object for working with saved queries for the parent asset type.
Examples:
Create a ``client`` using :obj:`axonius_api_client.connect.Connect` and assume
``apiobj`` is either ``client.devices`` or ``client.users``
>>> apiobj = client.devices # or client.users
* Get a saved query by name: :meth:`get_by_name`
* Get a saved query by UUID: :meth:`get_by_uuid`
* Get a saved query by tags: :meth:`get_by_tags`
* Get all saved query tags: :meth:`get_tags`
* Get all saved queries: :meth:`get`
* Add a saved query: :meth:`add`
* Delete a saved query by name: :meth:`delete_by_name`
* Delete a saved query by UUID or SQ object: :meth:`delete`
See Also:
* Device assets :obj:`axonius_api_client.api.assets.devices.Devices`
* User assets :obj:`axonius_api_client.api.assets.users.Users`
"""
def get_by_name(self, value: str) -> dict:
"""Get a saved query by name.
Examples:
Get a saved query by name
>>> sq = apiobj.saved_query.get_by_name(name="test")
>>> sq['tags']
['Unmanaged Devices']
>>> sq['description'][:80]
'Devices that have been seen by at least one agent or at least one endpoint manag'
>>> sq['view']['fields']
[
'adapters',
'specific_data.data.name',
'specific_data.data.hostname',
'specific_data.data.last_seen',
'specific_data.data.network_interfaces.manufacturer',
'specific_data.data.network_interfaces.mac',
'specific_data.data.network_interfaces.ips',
'specific_data.data.os.type',
'labels'
]
>>> sq['view']['query']['filter'][:80]
'(specific_data.data.adapter_properties == "Agent") or (specific_data.data.adapte'
Args:
value: name of saved query
"""
data = self.get()
found = [x for x in data if x["name"] == value]
if found:
return found[0]
err = f"Saved Query with name of {value!r} not found"
raise NotFoundError(tablize_sqs(data=data, err=err))
def get_by_uuid(self, value: str) -> dict:
"""Get a saved query by uuid.
Examples:
Get a saved query by uuid
>>> sq = apiobj.saved_query.get_by_uuid(value="5f76721ce4557d5cba93f59e")
Args:
value: uuid of saved query
"""
data = self.get()
found = [x for x in data if x["uuid"] == value]
if found:
return found[0]
err = f"Saved Query with UUID of {value!r} not found"
raise NotFoundError(tablize_sqs(data=data, err=err))
def get_by_tags(self, value: Union[str, List[str]], **kwargs) -> List[dict]:
|
def get_tags(self, **kwargs) -> List[str]:
"""Get all tags for saved queries.
Examples:
Get all known tags for all saved queries
>>> tags = apiobj.saved_query.get_tags()
>>> len(tags)
19
Args:
**kwargs: passed to :meth:`get`
"""
rows = self.get(**kwargs)
tags = [y for x in rows for y in x.get("tags", [])]
return sorted(list(set(tags)))
def get(self, generator: bool = False) -> Union[Generator[dict, None, None], List[dict]]:
"""Get all saved queries.
Examples:
Get all saved queries
>>> sqs = apiobj.saved_query.get()
>>> len(sqs)
39
Args:
generator: return an iterator
"""
gen = self.get_generator()
return gen if generator else list(gen)
def get_generator(self) -> Generator[dict, None, None]:
"""Get Saved Queries using a generator."""
offset = 0
while True:
rows = self._get(offset=offset)
offset += len(rows)
if not rows:
break
for row in rows:
yield row.to_dict()
def add(
self,
name: str,
query: Optional[str] = None,
tags: Optional[List[str]] = None,
description: Optional[str] = None,
expressions: Optional[List[str]] = None,
fields: Optional[Union[List[str], str]] = None,
fields_manual: Optional[Union[List[str], str]] = None,
fields_regex: Optional[Union[List[str], str]] = None,
fields_fuzzy: Optional[Union[List[str], str]] = None,
fields_default: bool = True,
fields_root: Optional[str] = None,
sort_field: Optional[str] = None,
sort_descending: bool = True,
column_filters: Optional[dict] = None,
gui_page_size: Optional[int] = None,
private: bool = False,
always_cached: bool = False,
**kwargs,
) -> dict:
"""Create a saved query.
Examples:
Create a saved query using a :obj:`axonius_api_client.api.wizards.wizard.Wizard`
>>> parsed = apiobj.wizard_text.parse(content="simple hostname contains blah")
>>> query = parsed["query"]
>>> expressions = parsed["expressions"]
>>> sq = apiobj.saved_query.add(
... name="test",
... query=query,
... expressions=expressions,
... description="meep meep",
... tags=["nyuck1", "nyuck2", "nyuck3"],
... )
Notes:
Saved Queries created without expressions will not be editable using the query wizard
in the GUI. Use :obj:`axonius_api_client.api.wizards.wizard.Wizard` to produce a query
and it's accordant expressions for the GUI query wizard.
Args:
name: name of saved query
description: description
tags: list of tags
expressions: expressions built by :obj:`axonius_api_client.api.wizards.wizard.Wizard`
query: query built by GUI or the CLI query wizard
fields: fields to return for each asset (will be validated)
fields_manual: fields to return for each asset (will NOT be validated)
fields_regex: regex of fields to return for each asset
fields_fuzzy: string to fuzzy match of fields to return for each asset
fields_default: include the default fields defined in the parent asset object
fields_root: include all fields of an adapter that are not complex sub-fields
sort_field: sort the returned assets on a given field
sort_descending: reverse the sort of the returned assets
column_filters: column filters keyed as field_name:value
gui_page_size: show N rows per page in GUI
private: make this saved query private to current user
"""
query_expr: Optional[str] = kwargs.get("query_expr", None) or query
gui_page_size = check_gui_page_size(size=gui_page_size)
fields = self.parent.fields.validate(
fields=fields,
fields_manual=fields_manual,
fields_regex=fields_regex,
fields_default=fields_default,
fields_root=fields_root,
fields_fuzzy=fields_fuzzy,
)
if sort_field:
sort_field = self.parent.fields.get_field_name(value=sort_field)
data_column_filters = {}
if column_filters:
for col_field, col_value in column_filters.items():
col_field = self.parent.fields.get_field_name(value=col_field)
data_column_filters[col_field] = col_value
dmeta = {} # TBD
dmeta["enforcementFilter"] = None # TBD
dmeta["uniqueAdapters"] = False # TBD
data_query = {}
data_query["filter"] = query or ""
if query_expr:
data_query["onlyExpressionsFilter"] = query_expr
data_query["expressions"] = expressions or []
data_query["search"] = None # TBD
data_query["meta"] = dmeta # TBD
data_sort = {}
data_sort["desc"] = sort_descending
data_sort["field"] = sort_field or ""
data_view = {}
data_view["query"] = data_query
data_view["sort"] = data_sort
data_view["fields"] = fields
data_view["pageSize"] = gui_page_size
# 4.5 SEMI_BREAKING_CHANGE: now a list of dict
# data_view["colFilters"] = listify(data_column_filters or {})
if data_column_filters:
msg = f"Column filters structure has changed and is currently not supported by the API client."
warnings.warn(message=msg, category=ApiWarning)
# 4.5 SEMI_BREAKING_CHANGE: now a list of dict
# data_view["colExcludedAdapters"] = listify({}) # TBD
# data = {}
# data["name"] = name
# data["query_type"] = "saved"
# data["description"] = description
# data["view"] = data_view
# data["tags"] = tags or []
# data["private"] = private
added = self._add(
name=name,
description=description,
view=data_view,
private=private,
always_cached=always_cached,
tags=tags,
)
return self.get_by_uuid(value=added.id)
def delete_by_name(self, value: str, **kwargs) -> dict:
"""Delete a saved query by name.
Examples:
Delete the saved query by name
>>> deleted = apiobj.saved_query.delete_by_name(name="test")
Args:
value: name of saved query to delete
**kwargs: passed to :meth:`get_by_name`
"""
row = self.get_by_name(value=value, **kwargs)
self._delete(uuid=row["uuid"])
return row
def delete(self, rows: Union[str, List[str], List[dict]]) -> List[str]:
"""Delete saved queries.
Args:
rows: list of UUIDs or rows previously fetched saved queries to delete
"""
rows = listify(rows)
deleted = []
for row in rows:
uuid = row["uuid"] if isinstance(row, dict) else row
self._delete(uuid=uuid)
deleted.append(uuid)
return deleted
def _add(
self,
name: str,
view: dict,
description: Optional[str] = "",
always_cached: bool = False,
private: bool = False,
tags: Optional[List[str]] = None,
) -> str:
"""Direct API method to create a saved query.
Args:
data: saved query metadata
"""
api_endpoint = ApiEndpoints.saved_queries.create
request_obj = api_endpoint.load_request(
name=name,
view=view,
description=description,
always_cached=always_cached,
private=private,
tags=tags or [],
)
return api_endpoint.perform_request(
http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE
)
def _delete(self, uuid: str) -> json_api.generic.Metadata:
"""Direct API method to delete saved queries.
Args:
ids: list of uuid's to delete
"""
# NEW_IN: 05/31/21 cortex/develop
try:
api_endpoint = ApiEndpoints.saved_queries.delete
request_obj = api_endpoint.load_request()
return api_endpoint.perform_request(
http=self.auth.http,
request_obj=request_obj,
asset_type=self.parent.ASSET_TYPE,
uuid=uuid,
)
except ResponseError as exc:
if exc.is_incorrect_type:
api_endpoint = ApiEndpoints.saved_queries.delete_4_3
request_obj = api_endpoint.load_request()
return api_endpoint.perform_request(
http=self.auth.http,
request_obj=request_obj,
asset_type=self.parent.ASSET_TYPE,
uuid=uuid,
)
raise
def _get(
self, limit: int = MAX_PAGE_SIZE, offset: int = 0
) -> List[json_api.saved_queries.SavedQuery]:
"""Direct API method to get all users.
Args:
limit: limit to N rows per page
offset: start at row N
"""
api_endpoint = ApiEndpoints.saved_queries.get
request_obj = api_endpoint.load_request(page={"limit": limit, "offset": offset})
return api_endpoint.perform_request(
http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE
)
|
"""Get saved queries by tags.
Examples:
Get all saved queries with tagged with 'AD'
>>> sqs = apiobj.saved_query.get_by_tags('AD')
>>> len(sqs)
2
Get all saved queries with tagged with 'AD' or 'AWS'
>>> sqs = apiobj.saved_query.get_by_tags(['AD', 'AWS'])
>>> len(sqs)
5
Args:
value: list of tags
**kwargs: passed to :meth:`get`
Raises:
:exc:`NotFoundError`: if no saved queries found tagged with supplied tags
"""
value = listify(value)
rows = self.get(**kwargs)
matches = []
known = set()
for row in rows:
for tag in row.get("tags", []):
known.add(tag)
if tag in value and row not in matches:
matches.append(row)
if not matches:
valid = "\n " + "\n ".join(sorted(list(known)))
msg = f"No saved query found with tags {value!r}, valid tags:{valid}"
raise NotFoundError(msg)
return matches
|
london_data_importer.py
|
import csv
import calendar
import datetime
from django.core.management.base import BaseCommand, CommandError
from api_mihai.models import CollectedData
class Command(BaseCommand):
help = 'Imports the CSV file from the collected data to the database'
def add_arguments(self, parser):
parser.add_argument('file_name', type=str)
parser.add_argument('dataset', type=int)
# method that converts date in date format from the london data into phoneTimestamp
# for database records
@staticmethod
def __convert_date_to_timestamp(date):
d = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f").utctimetuple()
return calendar.timegm(d)
# method that gets time as a string from the date in the format
# of the london data, for database records
@staticmethod
def __get_time_string(date):
return datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f").time().strftime("%H:%M:%S")
def handle(self, *args, **options):
bin_vals = ['bin'+str(num) for num in range(0,16)]
with open(options['file_name'], 'rt') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',', quotechar='\"')
for row in reader:
if row['temperature'] == "" or row['humidity'] == "":
continue
|
feature = CollectedData(
phone_timestamp=timestamp,
pm1=float(row['pm1']),
pm2_5=float(row['pm2_5']),
pm10=float(row['pm10']),
temperature=float(row['temperature']),
humidity=float(row['humidity']),
latitude=float(row['gpsLatitude']),
longitude=float(row['gpsLongitude']),
time=time,
dataset_id=options['dataset'],
transport_label_id=row['environment_index'],
)
for val in bin_vals:
setattr(feature, val, row[val])
# save newly created feature
feature.save()
|
timestamp = self.__convert_date_to_timestamp(row['phoneTimestamp'])
time = self.__get_time_string(row['phoneTimestamp'])
|
conversion.go
|
package conversion
import (
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"math"
"math/big"
"sort"
"strconv"
"strings"
"github.com/binance-chain/tss-lib/crypto"
btss "github.com/binance-chain/tss-lib/tss"
"github.com/btcsuite/btcd/btcec"
coskey "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
sdk "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32"
crypto2 "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
"gitlab.com/thorchain/binance-sdk/common/types"
"github.com/akildemir/go-tss/messages"
)
// GetPeerIDFromSecp256PubKey convert the given pubkey into a peer.ID
func GetPeerIDFromSecp256PubKey(pk []byte) (peer.ID, error) {
if len(pk) == 0 {
return "", errors.New("empty public key raw bytes")
}
ppk, err := crypto2.UnmarshalSecp256k1PublicKey(pk)
if err != nil {
return "", fmt.Errorf("fail to convert pubkey to the crypto pubkey used in libp2p: %w", err)
}
return peer.IDFromPublicKey(ppk)
}
func GetPeerIDFromPartyID(partyID *btss.PartyID) (peer.ID, error) {
if partyID == nil || !partyID.ValidateBasic() {
return "", errors.New("invalid partyID")
}
pkBytes := partyID.KeyInt().Bytes()
return GetPeerIDFromSecp256PubKey(pkBytes)
}
func PartyIDtoPubKey(party *btss.PartyID) (string, error) {
if party == nil || !party.ValidateBasic() {
return "", errors.New("invalid party")
}
partyKeyBytes := party.GetKey()
pk := coskey.PubKey{
Key: partyKeyBytes,
}
pubKey, err := sdk.MarshalPubKey(sdk.AccPK, &pk)
if err != nil {
return "", err
}
return pubKey, nil
}
func AccPubKeysFromPartyIDs(partyIDs []string, partyIDMap map[string]*btss.PartyID) ([]string, error) {
pubKeys := make([]string, 0)
for _, partyID := range partyIDs {
blameParty, ok := partyIDMap[partyID]
if !ok {
return nil, errors.New("cannot find the blame party")
}
blamedPubKey, err := PartyIDtoPubKey(blameParty)
if err != nil {
return nil, err
}
pubKeys = append(pubKeys, blamedPubKey)
}
return pubKeys, nil
}
func SetupPartyIDMap(partiesID []*btss.PartyID) map[string]*btss.PartyID {
partyIDMap := make(map[string]*btss.PartyID)
for _, id := range partiesID {
partyIDMap[id.Id] = id
}
return partyIDMap
}
func GetPeersID(partyIDtoP2PID map[string]peer.ID, localPeerID string) []peer.ID {
if partyIDtoP2PID == nil {
return nil
}
peerIDs := make([]peer.ID, 0, len(partyIDtoP2PID)-1)
for _, value := range partyIDtoP2PID {
if value.String() == localPeerID {
continue
}
peerIDs = append(peerIDs, value)
}
return peerIDs
}
func SetupIDMaps(parties map[string]*btss.PartyID, partyIDtoP2PID map[string]peer.ID) error {
for id, party := range parties {
peerID, err := GetPeerIDFromPartyID(party)
if err != nil {
return err
}
partyIDtoP2PID[id] = peerID
}
return nil
}
func GetParties(keys []string, localPartyKey string) ([]*btss.PartyID, *btss.PartyID, error) {
var localPartyID *btss.PartyID
var unSortedPartiesID []*btss.PartyID
sort.Strings(keys)
for idx, item := range keys {
pk, err := sdk.UnmarshalPubKey(sdk.AccPK, item)
if err != nil {
return nil, nil, fmt.Errorf("fail to get account pub key address(%s): %w", item, err)
}
key := new(big.Int).SetBytes(pk.Bytes())
// Set up the parameters
// Note: The `id` and `moniker` fields are for convenience to allow you to easily track participants.
// The `id` should be a unique string representing this party in the network and `moniker` can be anything (even left blank).
// The `uniqueKey` is a unique identifying key for this peer (such as its p2p public key) as a big.Int.
partyID := btss.NewPartyID(strconv.Itoa(idx), "", key)
if item == localPartyKey {
localPartyID = partyID
}
unSortedPartiesID = append(unSortedPartiesID, partyID)
}
if localPartyID == nil {
return nil, nil, errors.New("local party is not in the list")
}
partiesID := btss.SortPartyIDs(unSortedPartiesID)
return partiesID, localPartyID, nil
}
func GetPreviousKeySignUicast(current string) string {
if strings.HasSuffix(current, messages.KEYSIGN1b) {
return messages.KEYSIGN1aUnicast
}
return messages.KEYSIGN2Unicast
}
func isOnCurve(x, y *big.Int) bool
|
func GetTssPubKey(pubKeyPoint *crypto.ECPoint) (string, types.AccAddress, error) {
// we check whether the point is on curve according to Kudelski report
if pubKeyPoint == nil || !isOnCurve(pubKeyPoint.X(), pubKeyPoint.Y()) {
return "", types.AccAddress{}, errors.New("invalid points")
}
tssPubKey := btcec.PublicKey{
Curve: btcec.S256(),
X: pubKeyPoint.X(),
Y: pubKeyPoint.Y(),
}
compressedPubkey := coskey.PubKey{
Key: tssPubKey.SerializeCompressed(),
}
pubKey, err := sdk.MarshalPubKey(sdk.AccPK, &compressedPubkey)
addr := types.AccAddress(compressedPubkey.Address().Bytes())
return pubKey, addr, err
}
func BytesToHashString(msg []byte) (string, error) {
h := sha256.New()
_, err := h.Write(msg)
if err != nil {
return "", fmt.Errorf("fail to caculate sha256 hash: %w", err)
}
return hex.EncodeToString(h.Sum(nil)), nil
}
func GetThreshold(value int) (int, error) {
if value < 0 {
return 0, errors.New("negative input")
}
threshold := int(math.Ceil(float64(value)*2.0/3.0)) - 1
return threshold, nil
}
|
{
curve := btcec.S256()
return curve.IsOnCurve(x, y)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.