file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
index.tsx | import Layout from '../components/Layout' | const IndexPage = () => (
<Layout index="0">
<ArticelList/>
</Layout>
)
export default IndexPage | import ArticelList from './articles/index'
|
ref.go | package vars
import "fmt"
type Ref string
func NewRef(name string) Ref {
return Ref(name)
}
func (ref Ref) Name() string {
return string(ref)
}
func (ref Ref) Var() string {
return fmt.Sprintf("{{%s}}", ref.Name())
}
func (ref Ref) String() string {
return string(ref)
}
func (ref Ref) Expand(vars Vars) string { | } | return Expand(vars, ref.Var()) |
watch.go | //go:build sspl && linux
// Copyright (c) 2020-present Mutagen IO, Inc.
//
// This program is free software: you can redistribute it and/or modify it under
// the terms of the Server Side Public License, version 1, as published by
// MongoDB, Inc.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the Server Side Public License for more
// details.
//
// You should have received a copy of the Server Side Public License along with
// this program. If not, see
// <http://www.mongodb.com/licensing/server-side-public-license>.
package fanotify
import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"golang.org/x/sys/unix"
"github.com/mutagen-io/mutagen/pkg/sidecar"
)
var (
// ErrWatchInternalOverflow indicates that a watcher saw an event buffering
// overflow in its underlying watching mechanism.
ErrWatchInternalOverflow = errors.New("internal event overflow")
// ErrWatchTerminated indicates that a watcher has been terminated.
ErrWatchTerminated = errors.New("watch terminated")
)
// RecursiveWatcher implements watching.RecursiveWatcher using fanotify.
type RecursiveWatcher struct {
// watch is a handle for closing the underlying fanotify watch descriptor.
watch io.Closer
// events is the event delivery channel.
events chan string
// writeErrorOnce ensures that only one error is written to errors.
writeErrorOnce sync.Once
// errors is the error delivery channel.
errors chan error
// cancel is the run loop cancellation function.
cancel context.CancelFunc
// done is the run loop completion signaling mechanism.
done sync.WaitGroup
}
// NewRecursiveWatcher creates a new fanotify-based recursive watcher using the
// specified target path.
func | (target string) (*RecursiveWatcher, error) {
// Enforce that the watch target path is absolute. This is necessary for our
// invocation of fanotify_mark and to adjust incoming event paths to be
// relative to the watch target.
if !filepath.IsAbs(target) {
return nil, errors.New("watch target path must be absolute")
}
// TODO: It's unclear if we want to perform symbolic link evaluation here
// like we do in other watchers. If we do, then we may want to make
// adjustments to our open and fanotify_mark calls below. At the moment,
// it's irrelevant to our very controlled use case. If we do perform
// symbolic link evaluation, then we can remove the filepath.Clean call.
// Ensure that the target is cleaned. This is necessary for adjusting event
// paths to be target-relative.
target = filepath.Clean(target)
// Determine the mount point for the path.
mountPoint := sidecar.VolumeMountPointForPath(target)
if mountPoint == "" {
return nil, errors.New("path does not exist at or below a mount point")
}
// Get a file descriptor for the mount point to use with open_by_handle_at.
// Despite the claim in the Linux open(2) man page, it doesn't appear that
// O_PATH returns a file descriptor suitable for use with all *at functions.
// Specifically, it doesn't work with open_by_handle_at, and thus we need to
// perform a "full" open operation.
mountDescriptor, err := unix.Open(
mountPoint,
unix.O_RDONLY|unix.O_CLOEXEC|unix.O_DIRECTORY|unix.O_NOFOLLOW, 0,
)
if err != nil {
return nil, fmt.Errorf("unable to open mount point: %w", err)
}
// Create an fanotify watch capable of detecting file events (i.e. using
// FAN_REPORT_FID). We set the descriptor for this watch to be non-blocking
// so that we can use an os.File to poll on it and enable read cancellation.
// The fanotify documentation explicitly states that this descriptor is
// compatible with epoll, poll, and select, so we know it will work with the
// Go poller. Also, because we're using FAN_REPORT_FID, we won't receive
// file descriptors automatically and thus don't need to provide flags for
// their construction.
watchDescriptor, err := unix.FanotifyInit(unix.FAN_REPORT_FID|unix.FAN_CLOEXEC|unix.FAN_NONBLOCK, 0)
if err != nil {
unix.Close(mountDescriptor)
return nil, fmt.Errorf("unable to initialize fanotify: %w", err)
}
// Add the target path to the watch. We notably exclude FAN_DELETE_SELF from
// our event mask because it is (almost) always accompanied by a stale file
// handle from which we cannot obtain a corresponding path. In rare cases
// (that come down to race conditions), it is possible to open a deleted
// file by its handle before it is fully removed from the filesystem, but
// the path will have " (deleted)" appended by the kernel. In any case, we
// don't need the deleted path when using accelerated scanning, only its
// parent path (which will come from the FAN_DELETE event). However, we do
// need FAN_MOVE_SELF, because FAN_MOVE alone will only tell accelerated
// scanning to rescan the parent, which in the case of file replacement on
// move wouldn't detect changes to the replaced file. We also exclude
// FAN_CLOSE_WRITE because it is generated in conjunction with a FAN_MODIFY
// event (which we already watch for) if changes are flushed to disk on
// closure, so we can skip it and avoid spurious events generated by closure
// of unmodified writable files. Also, despite what the documentation says,
// FAN_Q_OVERFLOW should not be specified as part of this mask, otherwise
// EINVAL will be returned. The generation of overflow events is automatic.
if err := unix.FanotifyMark(
watchDescriptor,
unix.FAN_MARK_ADD|unix.FAN_MARK_FILESYSTEM|unix.FAN_MARK_ONLYDIR|unix.FAN_MARK_DONT_FOLLOW,
unix.FAN_CREATE|unix.FAN_MOVE|unix.FAN_MODIFY|unix.FAN_ATTRIB|unix.FAN_DELETE|
unix.FAN_ONDIR,
-1, mountPoint,
); err != nil {
unix.Close(watchDescriptor)
unix.Close(mountDescriptor)
return nil, fmt.Errorf("unable to establish fanotify watch: %w", err)
}
// Convert the watch descriptor to an os.File so that it's pollable.
watch := os.NewFile(uintptr(watchDescriptor), "fanotify")
// Create a context to regulate the watcher's polling and run loops.
ctx, cancel := context.WithCancel(context.Background())
// Create the watcher.
watcher := &RecursiveWatcher{
watch: watch,
events: make(chan string),
errors: make(chan error, 1),
cancel: cancel,
}
// Track run loop termination.
watcher.done.Add(1)
// Start the run loop.
go func() {
err := watcher.run(ctx, watch, mountDescriptor, target)
unix.Close(mountDescriptor)
watcher.writeErrorOnce.Do(func() {
watcher.errors <- err
})
watcher.done.Done()
}()
// Success.
return watcher, nil
}
// run implements the event processing run loop for RecursiveWatcher.
func (w *RecursiveWatcher) run(ctx context.Context, watch io.Reader, mountDescriptor int, target string) error {
// Compute the prefix that we'll need to trim from event paths to make them
// target-relative (if they aren't the target itself). We know that target
// will be clean, and thus lacking a trailing slash (unless it's the system
// root path).
var eventPathTrimPrefix string
if target == "/" {
eventPathTrimPrefix = "/"
} else {
eventPathTrimPrefix = target + "/"
}
// Loop until cancellation or a read error occurs.
var buffer [fanotifyReadBufferSize]byte
for {
// Read the next group of events. Note that the fanotify API will only
// return whole events into the buffer, so there's no need to worry
// about partial event reads.
read, err := watch.Read(buffer[:])
if err != nil {
return fmt.Errorf("unable to read from fanotify watch: %w", err)
}
// Process the events.
populated := buffer[:read]
for len(populated) > 0 {
// Process a single event.
remaining, path, err := processEvent(mountDescriptor, populated)
if err != nil {
if err == ErrWatchInternalOverflow {
return err
}
return fmt.Errorf("unable to extract event path: %w", err)
}
populated = remaining
// If the path was stale, then just ignore it.
if path == pathStale {
continue
}
// Convert the event path to be target-relative. We have to ignore
// anything that doesn't fall at or below our watch target for two
// reasons:
//
// First, our watch and path resolution location is the mount point,
// not necessarily the watch target. Much like on Windows, we have
// to watch outside of the target in order to ensure a stable watch
// and to ensure that we're seeing changes to the target itself
// (though, in this case, we do it more for stability since we know
// the volume mounts aren't likely to disappear).
//
// Second, with fanotify, we're watching an entire filesystem, and
// thus we may see events that occur outside of the mount point that
// we're watching, especially if that mount point isn't mounting the
// filesystem root. If the event can't be referenced by a path
// beneath the mount point that we're using with open_by_handle_at,
// then a read of its path will result in "/". For example, watching
// container volumes that are just bind-mounted directories from the
// host filesystem will result in a watch of the entire host
// filesystem, but a modification to a path outside of the volume
// directory on the host filesystem will yield a notification with
// a path resolving to "/" (when resolved relative to the mount
// point). This is basically designed to indicate that the event
// occurred outside the mount point. Fortunately, as long as the
// target location can be referenced by a path beneath the mount
// point being used as the reference point for open_by_handle_at, a
// valid path will be returned. This means (e.g.) that a single
// volume mounted in multiple containers (even at different mount
// points) will still yield event notifications with paths relative
// to the mount point within the container performing the watch.
if path == target {
path = ""
} else if strings.HasPrefix(path, eventPathTrimPrefix) {
path = path[len(eventPathTrimPrefix):]
} else {
continue
}
// Transmit the path.
select {
case w.events <- path:
case <-ctx.Done():
return ErrWatchTerminated
}
}
}
}
// Events implements filesystem/watching.RecursiveWatcher.Events.
func (w *RecursiveWatcher) Events() <-chan string {
return w.events
}
// Errors implements filesystem/watching.RecursiveWatcher.Errors.
func (w *RecursiveWatcher) Errors() <-chan error {
return w.errors
}
// Terminate implements filesystem/watching.RecursiveWatcher.Terminate.
func (w *RecursiveWatcher) Terminate() error {
// Write a termination error to the errors channel since we're going to
// close the watch and we don't want a read error in the run loop to appear
// to the consumer if it's simply due to termination.
w.writeErrorOnce.Do(func() {
w.errors <- ErrWatchTerminated
})
// Signal termination to run loop. The run loop can block in multiple ways
// and thus needs both watch closure and context cancellation to signal
// termination. The mount descriptor used for resolving paths is closed by
// the run loop Goroutine when it exits.
err := w.watch.Close()
w.cancel()
// Wait for the run loop to exit.
w.done.Wait()
// Done.
return err
}
| NewRecursiveWatcher |
client_user.go | package users
import (
"fmt"
"github.com/logzio/logzio_terraform_client/client"
)
const (
userServiceEndpoint = "%s/v1/user-management"
)
const (
fldUserId string = "id"
fldUserUsername string = "username"
fldUserFullname string = "fullName"
fldUserAccountId string = "accountID"
fldUserRoles string = "roles"
fldUserActive string = "active"
)
const (
UserTypeUser int32 = 2
UserTypeAdmin int32 = 3
)
type User struct {
Id int64
Username string
Fullname string
AccountId int64
Roles []int32
Active bool
}
type UserError struct {
errorCode string
message string
requestId string
parameters map[string]interface{}
}
type UsersClient struct {
*client.Client
}
// Creates a new entry point into the users functions, accepts the user's logz.io API token and account Id
func New(apiToken, baseUrl string) (*UsersClient, error) {
if len(apiToken) == 0 |
if len(baseUrl) == 0 {
return nil, fmt.Errorf("Base URL not defined")
}
c := &UsersClient{
Client: client.New(apiToken, baseUrl),
}
return c, nil
}
func jsonToUser(json map[string]interface{}) User {
user := User{
Id: int64(json[fldUserId].(float64)),
Username: json[fldUserUsername].(string),
Fullname: json[fldUserFullname].(string),
AccountId: int64(json[fldUserAccountId].(float64)),
Active: json[fldUserActive].(bool),
}
roles := json[fldUserRoles].([]interface{})
var rs []int32
for _, num := range roles {
rs = append(rs, int32(num.(float64)))
}
user.Roles = rs
return user
}
| {
return nil, fmt.Errorf("API token not defined")
} |
pipe_windows.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Named pipes implementation for windows
//!
//! If are unfortunate enough to be reading this code, I would like to first
//! apologize. This was my first encounter with windows named pipes, and it
//! didn't exactly turn out very cleanly. If you, too, are new to named pipes,
//! read on as I'll try to explain some fun things that I ran into.
//!
//! # Unix pipes vs Named pipes
//!
//! As with everything else, named pipes on windows are pretty different from
//! unix pipes on unix. On unix, you use one "server pipe" to accept new client
//! pipes. So long as this server pipe is active, new children pipes can
//! connect. On windows, you instead have a number of "server pipes", and each
//! of these server pipes can throughout their lifetime be attached to a client
//! or not. Once attached to a client, a server pipe may then disconnect at a
//! later date.
//!
//! # Accepting clients
//!
//! As with most other I/O interfaces, our Listener/Acceptor/Stream interfaces
//! are built around the unix flavors. This means that we have one "server
//! pipe" to which many clients can connect. In order to make this compatible
//! with the windows model, each connected client consumes ownership of a server
//! pipe, and then a new server pipe is created for the next client.
//!
//! Note that the server pipes attached to clients are never given back to the
//! listener for recycling. This could possibly be implemented with a channel so
//! the listener half can re-use server pipes, but for now I err'd on the simple
//! side of things. Each stream accepted by a listener will destroy the server
//! pipe after the stream is dropped.
//!
//! This model ends up having a small race or two, and you can find more details
//! on the `native_accept` method.
//!
//! # Simultaneous reads and writes
//!
//! In testing, I found that two simultaneous writes and two simultaneous reads
//! on a pipe ended up working out just fine, but problems were encountered when
//! a read was executed simultaneously with a write. After some googling around,
//! it sounded like named pipes just weren't built for this kind of interaction,
//! and the suggested solution was to use overlapped I/O.
//!
//! I don't really know what overlapped I/O is, but my basic understanding after
//! reading about it is that you have an external Event which is used to signal
//! I/O completion, passed around in some OVERLAPPED structures. As to what this
//! is, I'm not exactly sure.
//!
//! This problem implies that all named pipes are created with the
//! FILE_FLAG_OVERLAPPED option. This means that all of their I/O is
//! asynchronous. Each I/O operation has an associated OVERLAPPED structure, and
//! inside of this structure is a HANDLE from CreateEvent. After the I/O is
//! determined to be pending (may complete in the future), the
//! GetOverlappedResult function is used to block on the event, waiting for the
//! I/O to finish.
//!
//! This scheme ended up working well enough. There were two snags that I ran
//! into, however:
//!
//! * Each UnixStream instance needs its own read/write events to wait on. These
//! can't be shared among clones of the same stream because the documentation
//! states that it unsets the event when the I/O is started (would possibly
//! corrupt other events simultaneously waiting). For convenience's sake,
//! these events are lazily initialized.
//!
//! * Each server pipe needs to be created with FILE_FLAG_OVERLAPPED in addition
//! to all pipes created through `connect`. Notably this means that the
//! ConnectNamedPipe function is nonblocking, implying that the Listener needs
//! to have yet another event to do the actual blocking.
//!
//! # Conclusion
//!
//! The conclusion here is that I probably don't know the best way to work with
//! windows named pipes, but the solution here seems to work well enough to get
//! the test suite passing (the suite is in libstd), and that's good enough for
//! me!
use alloc::arc::Arc;
use libc;
use std::c_str::CString;
use std::mem;
use std::os;
use std::ptr;
use std::rt::rtio;
use std::rt::rtio::{IoResult, IoError};
use std::sync::atomic;
use std::rt::mutex;
use super::c;
use super::util;
use super::file::to_utf16;
struct Event(libc::HANDLE);
impl Event {
fn new(manual_reset: bool, initial_state: bool) -> IoResult<Event> {
let event = unsafe {
libc::CreateEventW(ptr::null_mut(),
manual_reset as libc::BOOL,
initial_state as libc::BOOL,
ptr::null())
};
if event as uint == 0 {
Err(super::last_error())
} else {
Ok(Event(event))
}
}
fn handle(&self) -> libc::HANDLE { let Event(handle) = *self; handle }
}
impl Drop for Event {
fn drop(&mut self) {
unsafe { let _ = libc::CloseHandle(self.handle()); }
}
}
struct Inner {
handle: libc::HANDLE,
lock: mutex::NativeMutex,
read_closed: atomic::AtomicBool,
write_closed: atomic::AtomicBool,
}
impl Inner {
fn new(handle: libc::HANDLE) -> Inner {
Inner {
handle: handle,
lock: unsafe { mutex::NativeMutex::new() },
read_closed: atomic::AtomicBool::new(false),
write_closed: atomic::AtomicBool::new(false),
}
}
}
impl Drop for Inner {
fn drop(&mut self) {
unsafe {
let _ = libc::FlushFileBuffers(self.handle);
let _ = libc::CloseHandle(self.handle);
}
}
}
unsafe fn pipe(name: *const u16, init: bool) -> libc::HANDLE {
libc::CreateNamedPipeW(
name,
libc::PIPE_ACCESS_DUPLEX |
if init {libc::FILE_FLAG_FIRST_PIPE_INSTANCE} else {0} |
libc::FILE_FLAG_OVERLAPPED,
libc::PIPE_TYPE_BYTE | libc::PIPE_READMODE_BYTE |
libc::PIPE_WAIT,
libc::PIPE_UNLIMITED_INSTANCES,
65536,
65536,
0,
ptr::null_mut()
)
}
pub fn await(handle: libc::HANDLE, deadline: u64,
events: &[libc::HANDLE]) -> IoResult<uint> {
use libc::consts::os::extra::{WAIT_FAILED, WAIT_TIMEOUT, WAIT_OBJECT_0};
// If we've got a timeout, use WaitForSingleObject in tandem with CancelIo
// to figure out if we should indeed get the result.
let ms = if deadline == 0 {
libc::INFINITE as u64
} else {
let now = ::io::timer::now();
if deadline < now {0} else {deadline - now}
};
let ret = unsafe {
c::WaitForMultipleObjects(events.len() as libc::DWORD,
events.as_ptr(),
libc::FALSE,
ms as libc::DWORD)
};
match ret {
WAIT_FAILED => Err(super::last_error()),
WAIT_TIMEOUT => unsafe {
let _ = c::CancelIo(handle);
Err(util::timeout("operation timed out"))
},
n => Ok((n - WAIT_OBJECT_0) as uint)
}
}
fn epipe() -> IoError {
IoError {
code: libc::ERROR_BROKEN_PIPE as uint,
extra: 0,
detail: None,
}
}
////////////////////////////////////////////////////////////////////////////////
// Unix Streams
////////////////////////////////////////////////////////////////////////////////
pub struct UnixStream {
inner: Arc<Inner>,
write: Option<Event>,
read: Option<Event>,
read_deadline: u64,
write_deadline: u64,
}
impl UnixStream {
fn try_connect(p: *const u16) -> Option<libc::HANDLE> {
// Note that most of this is lifted from the libuv implementation.
// The idea is that if we fail to open a pipe in read/write mode
// that we try afterwards in just read or just write
let mut result = unsafe {
libc::CreateFileW(p,
libc::GENERIC_READ | libc::GENERIC_WRITE,
0,
ptr::null_mut(),
libc::OPEN_EXISTING,
libc::FILE_FLAG_OVERLAPPED,
ptr::null_mut())
};
if result != libc::INVALID_HANDLE_VALUE {
return Some(result)
}
let err = unsafe { libc::GetLastError() };
if err == libc::ERROR_ACCESS_DENIED as libc::DWORD {
result = unsafe {
libc::CreateFileW(p,
libc::GENERIC_READ | libc::FILE_WRITE_ATTRIBUTES,
0,
ptr::null_mut(),
libc::OPEN_EXISTING,
libc::FILE_FLAG_OVERLAPPED,
ptr::null_mut())
};
if result != libc::INVALID_HANDLE_VALUE {
return Some(result)
}
}
let err = unsafe { libc::GetLastError() };
if err == libc::ERROR_ACCESS_DENIED as libc::DWORD {
result = unsafe {
libc::CreateFileW(p,
libc::GENERIC_WRITE | libc::FILE_READ_ATTRIBUTES,
0,
ptr::null_mut(),
libc::OPEN_EXISTING,
libc::FILE_FLAG_OVERLAPPED,
ptr::null_mut())
};
if result != libc::INVALID_HANDLE_VALUE {
return Some(result)
}
}
None
}
pub fn connect(addr: &CString, timeout: Option<u64>) -> IoResult<UnixStream> {
let addr = try!(to_utf16(addr));
let start = ::io::timer::now();
loop {
match UnixStream::try_connect(addr.as_ptr()) {
Some(handle) => {
let inner = Inner::new(handle);
let mut mode = libc::PIPE_TYPE_BYTE |
libc::PIPE_READMODE_BYTE |
libc::PIPE_WAIT;
let ret = unsafe {
libc::SetNamedPipeHandleState(inner.handle,
&mut mode,
ptr::null_mut(),
ptr::null_mut())
};
return if ret == 0 {
Err(super::last_error())
} else {
Ok(UnixStream {
inner: Arc::new(inner),
read: None,
write: None,
read_deadline: 0,
write_deadline: 0,
})
}
}
None => {}
}
// On windows, if you fail to connect, you may need to call the
// `WaitNamedPipe` function, and this is indicated with an error
// code of ERROR_PIPE_BUSY.
let code = unsafe { libc::GetLastError() };
if code as int != libc::ERROR_PIPE_BUSY as int {
return Err(super::last_error())
}
match timeout {
Some(timeout) => {
let now = ::io::timer::now();
let timed_out = (now - start) >= timeout || unsafe {
let ms = (timeout - (now - start)) as libc::DWORD;
libc::WaitNamedPipeW(addr.as_ptr(), ms) == 0
};
if timed_out {
return Err(util::timeout("connect timed out"))
}
}
// An example I found on Microsoft's website used 20
// seconds, libuv uses 30 seconds, hence we make the
// obvious choice of waiting for 25 seconds.
None => {
if unsafe { libc::WaitNamedPipeW(addr.as_ptr(), 25000) } == 0 {
return Err(super::last_error())
}
}
}
}
}
fn handle(&self) -> libc::HANDLE { self.inner.handle }
fn read_closed(&self) -> bool |
fn write_closed(&self) -> bool {
self.inner.write_closed.load(atomic::SeqCst)
}
fn cancel_io(&self) -> IoResult<()> {
match unsafe { c::CancelIoEx(self.handle(), ptr::null_mut()) } {
0 if os::errno() == libc::ERROR_NOT_FOUND as uint => {
Ok(())
}
0 => Err(super::last_error()),
_ => Ok(())
}
}
}
impl rtio::RtioPipe for UnixStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
if self.read.is_none() {
self.read = Some(try!(Event::new(true, false)));
}
let mut bytes_read = 0;
let mut overlapped: libc::OVERLAPPED = unsafe { mem::zeroed() };
overlapped.hEvent = self.read.as_ref().unwrap().handle();
// Pre-flight check to see if the reading half has been closed. This
// must be done before issuing the ReadFile request, but after we
// acquire the lock.
//
// See comments in close_read() about why this lock is necessary.
let guard = unsafe { self.inner.lock.lock() };
if self.read_closed() {
return Err(util::eof())
}
// Issue a nonblocking requests, succeeding quickly if it happened to
// succeed.
let ret = unsafe {
libc::ReadFile(self.handle(),
buf.as_ptr() as libc::LPVOID,
buf.len() as libc::DWORD,
&mut bytes_read,
&mut overlapped)
};
if ret != 0 { return Ok(bytes_read as uint) }
// If our errno doesn't say that the I/O is pending, then we hit some
// legitimate error and return immediately.
if os::errno() != libc::ERROR_IO_PENDING as uint {
return Err(super::last_error())
}
// Now that we've issued a successful nonblocking request, we need to
// wait for it to finish. This can all be done outside the lock because
// we'll see any invocation of CancelIoEx. We also call this in a loop
// because we're woken up if the writing half is closed, we just need to
// realize that the reading half wasn't closed and we go right back to
// sleep.
drop(guard);
loop {
// Process a timeout if one is pending
let wait_succeeded = await(self.handle(), self.read_deadline,
[overlapped.hEvent]);
let ret = unsafe {
libc::GetOverlappedResult(self.handle(),
&mut overlapped,
&mut bytes_read,
libc::TRUE)
};
// If we succeeded, or we failed for some reason other than
// CancelIoEx, return immediately
if ret != 0 { return Ok(bytes_read as uint) }
if os::errno() != libc::ERROR_OPERATION_ABORTED as uint {
return Err(super::last_error())
}
// If the reading half is now closed, then we're done. If we woke up
// because the writing half was closed, keep trying.
if wait_succeeded.is_err() {
return Err(util::timeout("read timed out"))
}
if self.read_closed() {
return Err(util::eof())
}
}
}
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
if self.write.is_none() {
self.write = Some(try!(Event::new(true, false)));
}
let mut offset = 0;
let mut overlapped: libc::OVERLAPPED = unsafe { mem::zeroed() };
overlapped.hEvent = self.write.as_ref().unwrap().handle();
while offset < buf.len() {
let mut bytes_written = 0;
// This sequence below is quite similar to the one found in read().
// Some careful looping is done to ensure that if close_write() is
// invoked we bail out early, and if close_read() is invoked we keep
// going after we woke up.
//
// See comments in close_read() about why this lock is necessary.
let guard = unsafe { self.inner.lock.lock() };
if self.write_closed() {
return Err(epipe())
}
let ret = unsafe {
libc::WriteFile(self.handle(),
buf[offset..].as_ptr() as libc::LPVOID,
(buf.len() - offset) as libc::DWORD,
&mut bytes_written,
&mut overlapped)
};
let err = os::errno();
drop(guard);
if ret == 0 {
if err != libc::ERROR_IO_PENDING as uint {
return Err(IoError {
code: err as uint,
extra: 0,
detail: Some(os::error_string(err as uint)),
})
}
// Process a timeout if one is pending
let wait_succeeded = await(self.handle(), self.write_deadline,
[overlapped.hEvent]);
let ret = unsafe {
libc::GetOverlappedResult(self.handle(),
&mut overlapped,
&mut bytes_written,
libc::TRUE)
};
// If we weren't aborted, this was a legit error, if we were
// aborted, then check to see if the write half was actually
// closed or whether we woke up from the read half closing.
if ret == 0 {
if os::errno() != libc::ERROR_OPERATION_ABORTED as uint {
return Err(super::last_error())
}
if !wait_succeeded.is_ok() {
let amt = offset + bytes_written as uint;
return if amt > 0 {
Err(IoError {
code: libc::ERROR_OPERATION_ABORTED as uint,
extra: amt,
detail: Some("short write during write".to_string()),
})
} else {
Err(util::timeout("write timed out"))
}
}
if self.write_closed() {
return Err(epipe())
}
continue // retry
}
}
offset += bytes_written as uint;
}
Ok(())
}
fn clone(&self) -> Box<rtio::RtioPipe + Send> {
box UnixStream {
inner: self.inner.clone(),
read: None,
write: None,
read_deadline: 0,
write_deadline: 0,
} as Box<rtio::RtioPipe + Send>
}
fn close_read(&mut self) -> IoResult<()> {
// On windows, there's no actual shutdown() method for pipes, so we're
// forced to emulate the behavior manually at the application level. To
// do this, we need to both cancel any pending requests, as well as
// prevent all future requests from succeeding. These two operations are
// not atomic with respect to one another, so we must use a lock to do
// so.
//
// The read() code looks like:
//
// 1. Make sure the pipe is still open
// 2. Submit a read request
// 3. Wait for the read request to finish
//
// The race this lock is preventing is if another thread invokes
// close_read() between steps 1 and 2. By atomically executing steps 1
// and 2 with a lock with respect to close_read(), we're guaranteed that
// no thread will erroneously sit in a read forever.
let _guard = unsafe { self.inner.lock.lock() };
self.inner.read_closed.store(true, atomic::SeqCst);
self.cancel_io()
}
fn close_write(&mut self) -> IoResult<()> {
// see comments in close_read() for why this lock is necessary
let _guard = unsafe { self.inner.lock.lock() };
self.inner.write_closed.store(true, atomic::SeqCst);
self.cancel_io()
}
fn set_timeout(&mut self, timeout: Option<u64>) {
let deadline = timeout.map(|a| ::io::timer::now() + a).unwrap_or(0);
self.read_deadline = deadline;
self.write_deadline = deadline;
}
fn set_read_timeout(&mut self, timeout: Option<u64>) {
self.read_deadline = timeout.map(|a| ::io::timer::now() + a).unwrap_or(0);
}
fn set_write_timeout(&mut self, timeout: Option<u64>) {
self.write_deadline = timeout.map(|a| ::io::timer::now() + a).unwrap_or(0);
}
}
////////////////////////////////////////////////////////////////////////////////
// Unix Listener
////////////////////////////////////////////////////////////////////////////////
pub struct UnixListener {
handle: libc::HANDLE,
name: CString,
}
impl UnixListener {
pub fn bind(addr: &CString) -> IoResult<UnixListener> {
// Although we technically don't need the pipe until much later, we
// create the initial handle up front to test the validity of the name
// and such.
let addr_v = try!(to_utf16(addr));
let ret = unsafe { pipe(addr_v.as_ptr(), true) };
if ret == libc::INVALID_HANDLE_VALUE {
Err(super::last_error())
} else {
Ok(UnixListener { handle: ret, name: addr.clone() })
}
}
pub fn native_listen(self) -> IoResult<UnixAcceptor> {
Ok(UnixAcceptor {
listener: self,
event: try!(Event::new(true, false)),
deadline: 0,
inner: Arc::new(AcceptorState {
abort: try!(Event::new(true, false)),
closed: atomic::AtomicBool::new(false),
}),
})
}
}
impl Drop for UnixListener {
fn drop(&mut self) {
unsafe { let _ = libc::CloseHandle(self.handle); }
}
}
impl rtio::RtioUnixListener for UnixListener {
fn listen(self: Box<UnixListener>)
-> IoResult<Box<rtio::RtioUnixAcceptor + Send>> {
self.native_listen().map(|a| {
box a as Box<rtio::RtioUnixAcceptor + Send>
})
}
}
pub struct UnixAcceptor {
inner: Arc<AcceptorState>,
listener: UnixListener,
event: Event,
deadline: u64,
}
struct AcceptorState {
abort: Event,
closed: atomic::AtomicBool,
}
impl UnixAcceptor {
pub fn native_accept(&mut self) -> IoResult<UnixStream> {
// This function has some funky implementation details when working with
// unix pipes. On windows, each server named pipe handle can be
// connected to a one or zero clients. To the best of my knowledge, a
// named server is considered active and present if there exists at
// least one server named pipe for it.
//
// The model of this function is to take the current known server
// handle, connect a client to it, and then transfer ownership to the
// UnixStream instance. The next time accept() is invoked, it'll need a
// different server handle to connect a client to.
//
// Note that there is a possible race here. Once our server pipe is
// handed off to a `UnixStream` object, the stream could be closed,
// meaning that there would be no active server pipes, hence even though
// we have a valid `UnixAcceptor`, no one can connect to it. For this
// reason, we generate the next accept call's server pipe at the end of
// this function call.
//
// This provides us an invariant that we always have at least one server
// connection open at a time, meaning that all connects to this acceptor
// should succeed while this is active.
//
// The actual implementation of doing this is a little tricky. Once a
// server pipe is created, a client can connect to it at any time. I
// assume that which server a client connects to is nondeterministic, so
// we also need to guarantee that the only server able to be connected
// to is the one that we're calling ConnectNamedPipe on. This means that
// we have to create the second server pipe *after* we've already
// accepted a connection. In order to at least somewhat gracefully
// handle errors, this means that if the second server pipe creation
// fails that we disconnect the connected client and then just keep
// using the original server pipe.
let handle = self.listener.handle;
// If we've had an artificial call to close_accept, be sure to never
// proceed in accepting new clients in the future
if self.inner.closed.load(atomic::SeqCst) { return Err(util::eof()) }
let name = try!(to_utf16(&self.listener.name));
// Once we've got a "server handle", we need to wait for a client to
// connect. The ConnectNamedPipe function will block this thread until
// someone on the other end connects. This function can "fail" if a
// client connects after we created the pipe but before we got down
// here. Thanks windows.
let mut overlapped: libc::OVERLAPPED = unsafe { mem::zeroed() };
overlapped.hEvent = self.event.handle();
if unsafe { libc::ConnectNamedPipe(handle, &mut overlapped) == 0 } {
let mut err = unsafe { libc::GetLastError() };
if err == libc::ERROR_IO_PENDING as libc::DWORD {
// Process a timeout if one is pending
let wait_succeeded = await(handle, self.deadline,
[self.inner.abort.handle(),
overlapped.hEvent]);
// This will block until the overlapped I/O is completed. The
// timeout was previously handled, so this will either block in
// the normal case or succeed very quickly in the timeout case.
let ret = unsafe {
let mut transfer = 0;
libc::GetOverlappedResult(handle,
&mut overlapped,
&mut transfer,
libc::TRUE)
};
if ret == 0 {
if wait_succeeded.is_ok() {
err = unsafe { libc::GetLastError() };
} else {
return Err(util::timeout("accept timed out"))
}
} else {
// we succeeded, bypass the check below
err = libc::ERROR_PIPE_CONNECTED as libc::DWORD;
}
}
if err != libc::ERROR_PIPE_CONNECTED as libc::DWORD {
return Err(super::last_error())
}
}
// Now that we've got a connected client to our handle, we need to
// create a second server pipe. If this fails, we disconnect the
// connected client and return an error (see comments above).
let new_handle = unsafe { pipe(name.as_ptr(), false) };
if new_handle == libc::INVALID_HANDLE_VALUE {
let ret = Err(super::last_error());
// If our disconnection fails, then there's not really a whole lot
// that we can do, so panic
let err = unsafe { libc::DisconnectNamedPipe(handle) };
assert!(err != 0);
return ret;
} else {
self.listener.handle = new_handle;
}
// Transfer ownership of our handle into this stream
Ok(UnixStream {
inner: Arc::new(Inner::new(handle)),
read: None,
write: None,
read_deadline: 0,
write_deadline: 0,
})
}
}
impl rtio::RtioUnixAcceptor for UnixAcceptor {
fn accept(&mut self) -> IoResult<Box<rtio::RtioPipe + Send>> {
self.native_accept().map(|s| box s as Box<rtio::RtioPipe + Send>)
}
fn set_timeout(&mut self, timeout: Option<u64>) {
self.deadline = timeout.map(|i| i + ::io::timer::now()).unwrap_or(0);
}
fn clone(&self) -> Box<rtio::RtioUnixAcceptor + Send> {
let name = to_utf16(&self.listener.name).ok().unwrap();
box UnixAcceptor {
inner: self.inner.clone(),
event: Event::new(true, false).ok().unwrap(),
deadline: 0,
listener: UnixListener {
name: self.listener.name.clone(),
handle: unsafe {
let p = pipe(name.as_ptr(), false) ;
assert!(p != libc::INVALID_HANDLE_VALUE as libc::HANDLE);
p
},
},
} as Box<rtio::RtioUnixAcceptor + Send>
}
fn close_accept(&mut self) -> IoResult<()> {
self.inner.closed.store(true, atomic::SeqCst);
let ret = unsafe {
c::SetEvent(self.inner.abort.handle())
};
if ret == 0 {
Err(super::last_error())
} else {
Ok(())
}
}
}
| {
self.inner.read_closed.load(atomic::SeqCst)
} |
test_smtp.py | """integration tests for BridgeDB ."""
from __future__ import print_function
import smtplib
import asyncore
import threading
import queue
import random
import os
from smtpd import SMTPServer
from twisted.trial import unittest
from twisted.trial.unittest import FailTest
from twisted.trial.unittest import SkipTest
from bridgedb.test.util import processExists
from bridgedb.test.util import getBridgeDBPID
# ------------- SMTP Client Config
SMTP_DEBUG_LEVEL = 0 # set to 1 to see SMTP message exchange
BRIDGEDB_SMTP_SERVER_ADDRESS = "localhost"
BRIDGEDB_SMTP_SERVER_PORT = 6725
# %d is parameterised with a random integer to make the sender unique
FROM_ADDRESS_TEMPLATE = "test%[email protected]"
# Minimum value used to parameterise FROM_ADDRESS_TEMPLATE
MIN_FROM_ADDRESS = 1
# Max value used to parameterise FROM_ADDRESS_TEMPLATE. Needs to be pretty big
# to reduce the chance of collisions
MAX_FROM_ADDRESS = 10**8
TO_ADDRESS = "[email protected]"
MESSAGE_TEMPLATE = """From: %s
To: %s
Subject: testing
get bridges"""
# ------------- SMTP Server Setup
# Setup an SMTP server which we use to check for responses
# from bridgedb. This needs to be done before sending the actual mail
LOCAL_SMTP_SERVER_ADDRESS = 'localhost'
LOCAL_SMTP_SERVER_PORT = 2525 # Must be the same as bridgedb's EMAIL_SMTP_PORT
class EmailServer(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
''' Overridden from SMTP server, called whenever a message is received'''
self.message_queue.put(data)
def thread_proc(self):
''' This function runs in thread, and will continue looping
until the _stop Event object is set by the stop() function'''
while self._stop.is_set() == False:
asyncore.loop(timeout=0.0, count=1)
# Must close, or asyncore will hold on to the socket and subsequent
# tests will fail with 'Address not in use'.
self.close()
def start(self):
self.message_queue = queue.Queue()
self._stop = threading.Event()
self._thread = threading.Thread(target=self.thread_proc)
# Ensures that if any tests do fail, then threads will exit when the
# parent exits.
self._thread.setDaemon(True)
self._thread.start()
@classmethod
def startServer(cls):
#print("Starting SMTP server on %s:%s"
# % (LOCAL_SMTP_SERVER_ADDRESS, LOCAL_SMTP_SERVER_PORT))
server = EmailServer((LOCAL_SMTP_SERVER_ADDRESS,
LOCAL_SMTP_SERVER_PORT),
None)
server.start()
return server
def stop(self):
# Signal thread_proc to stop:
self._stop.set()
# Wait for thread_proc to return (shouldn't take long)
self._thread.join()
assert self._thread.is_alive() == False, "Thread is alive and kicking"
def getAndCheckMessageContains(self, text, timeoutInSecs=2.0):
try:
message = self.message_queue.get(block=True, timeout=timeoutInSecs)
# Queue.Empty, according to its documentation, is only supposed to be
# raised when Queue.get(block=False) or Queue.get_nowait() are called.
# I've no idea why it's getting raised here, when we're blocking for
# it, but nonetheless it causes occasional, non-deterministic CI
# failures:
#
# https://travis-ci.org/isislovecruft/bridgedb/jobs/58996136#L3281
except queue.Empty:
pass
else:
assert message.find(text) != -1, ("Message did not contain text '%s'."
"Full message is:\n %s"
% (text, message))
def checkNoMessageReceived(self, timeoutInSecs=2.0):
try:
self.message_queue.get(block=True, timeout=timeoutInSecs)
except queue.Empty:
return True
assert False, "Found a message in the queue, but expected none"
def sendMail(fromAddress):
#print("Connecting to %s:%d"
# % (BRIDGEDB_SMTP_SERVER_ADDRESS, BRIDGEDB_SMTP_SERVER_PORT))
client = smtplib.SMTP(BRIDGEDB_SMTP_SERVER_ADDRESS,
BRIDGEDB_SMTP_SERVER_PORT)
client.set_debuglevel(SMTP_DEBUG_LEVEL)
#print("Sending mail TO:%s, FROM:%s"
# % (TO_ADDRESS, fromAddress))
result = client.sendmail(fromAddress, TO_ADDRESS,
MESSAGE_TEMPLATE % (fromAddress, TO_ADDRESS))
assert result == {}, "Failed to send mail"
client.quit()
class SMTPTests(unittest.TestCase):
def setUp(self):
'''Called at the start of each test, ensures that the SMTP server is
running.
'''
here = os.getcwd()
topdir = here.rstrip('_trial_temp')
self.rundir = os.path.join(topdir, 'run')
self.pidfile = os.path.join(self.rundir, 'bridgedb.pid')
self.pid = getBridgeDBPID(self.pidfile)
self.server = EmailServer.startServer()
def | (self):
'''Called after each test, ensures that the SMTP server is cleaned up.
'''
self.server.stop()
def test_getBridges(self):
if os.environ.get("CI"):
if not self.pid or not processExists(self.pid):
raise FailTest("Could not start BridgeDB process on CI server!")
if not self.pid or not processExists(self.pid):
raise SkipTest("Can't run test: no BridgeDB process running.")
# send the mail to bridgedb, choosing a random email address
sendMail(fromAddress=FROM_ADDRESS_TEMPLATE
% random.randint(MIN_FROM_ADDRESS, MAX_FROM_ADDRESS))
# then check that our local SMTP server received a response
# and that response contained some bridges
self.server.getAndCheckMessageContains(b"Here are your bridges")
def test_getBridges_rateLimitExceeded(self):
if os.environ.get("CI"):
if not self.pid or not processExists(self.pid):
raise FailTest("Could not start BridgeDB process on CI server!")
if not self.pid or not processExists(self.pid):
raise SkipTest("Can't run test: no BridgeDB process running.")
# send the mail to bridgedb, choosing a random email address
FROM_ADDRESS = FROM_ADDRESS_TEMPLATE % random.randint(
MIN_FROM_ADDRESS, MAX_FROM_ADDRESS)
sendMail(FROM_ADDRESS)
# then check that our local SMTP server received a response
# and that response contained some bridges
self.server.getAndCheckMessageContains(b"Here are your bridges")
# send another request from the same email address
sendMail(FROM_ADDRESS)
# this time, the email response should not contain any bridges
self.server.getAndCheckMessageContains(
b"You have exceeded the rate limit. Please slow down!")
# then we send another request from the same email address
sendMail(FROM_ADDRESS)
# now there should be no response at all (wait 1 second to make sure)
self.server.checkNoMessageReceived(timeoutInSecs=1.0)
def test_getBridges_stressTest(self):
'''Sends a large number of emails in a short period of time, and checks
that a response is received for each message.
'''
if os.environ.get("CI"):
if not self.pid or not processExists(self.pid):
raise FailTest("Could not start BridgeDB process on CI server!")
if not self.pid or not processExists(self.pid):
raise SkipTest("Can't run test: no BridgeDB process running.")
NUM_MAILS = 100
for i in range(NUM_MAILS):
# Note: if by chance two emails with the same FROM_ADDRESS are
# generated, this test will fail Setting 'MAX_FROM_ADDRESS' to be
# a high value reduces the probability of this occuring, but does
# not rule it out
sendMail(fromAddress=FROM_ADDRESS_TEMPLATE
% random.randint(MIN_FROM_ADDRESS, MAX_FROM_ADDRESS))
for i in range(NUM_MAILS):
self.server.getAndCheckMessageContains(b"Here are your bridges")
| tearDown |
Event.js | import React from "react"
import TweenOne from "rc-tween-one"
import OverPack from "rc-scroll-anim/lib/ScrollOverPack"
import QueueAnim from "rc-queue-anim"
import { Row, Col } from "antd"
import { isImg } from "../utils/utils"
import Map from "./map"
class Event extends React.Component {
componentDidMount() {
console.log(this.props.event)
}
render() {
const event = this.props.event
return (
<div>
<OverPack>
<QueueAnim type="bottom" key="ul" leaveReverse></QueueAnim>
<TweenOne
animation={{ y: "+=30", opacity: 0, type: "from" }}
key="copyright"
>
<h1 style={{ marginTop: "1.5em" }}>Upcoming Event</h1>
<h2 className="meetup-title" style={{ textAlign: "center" }}>
{event.title}
</h2>
<div className="meetup-meta" style={{ marginTop: "1em" }}>
<p
className="meetup-metaField meetup-metaField--date"
style={{ textAlign: "center" }}
>
<span>
<span className="meetup-label">
<b>Date:</b>
</span>{" "}
{event.formattedDate}
</span>
<span style={{ marginLeft: "4em" }}>
<span className="meetup-label">
<b>Location:</b>
</span>{" "}
<a href={event.location.mapsLink}>{event.location.name}</a>
</span>
</p>
{/* <p className="meetup-metaField meetup-metaField--location"></p> */}
</div>
<div className="meetup-container">
<div className="presenters">
{event.presenters.map(presenter => {
return (
<div className="meetup-presenter">
<div className="meetup-presenterImageContainer">
<img
className="meetup-presenterImage"
src={presenter.image}
/>
<span className="meetup-presenterName">
{presenter.name}
</span>
</div>
<div className="meetup-presenterInfo">
<h3 className="meetup-presenterTitle">
{presenter.presentationTitle}
</h3> | <ul className="meetup-presenterLinks">
{presenter.links &&
presenter.links.map((link, index) => (
<li
key={index}
className="meetup-presenterLinkItem"
>
<a
className="meetup-presenterLink"
href={link.linkURL}
>
{link.linkText}
</a>
</li>
))}
</ul>
</div>
</div>
)
})}
</div>
</div>
<div style={{ height: "300px", margin: "0 auto" }}>
<Map
isMarkerShown
googleMapURL="https://maps.googleapis.com/maps/api/js?key=AIzaSyDUI4R2AGPOj9zBoKu6ulPMBg8lJ1aVm3k&v=3.exp&libraries=geometry,drawing,places"
loadingElement={<div style={{ height: `100%` }} />}
containerElement={<div style={{ height: `100%` }} />}
mapElement={<div style={{ height: `100%` }} />}
link={event.location.mapsLink}
latitude={event.location.mapsLatitude}
longitude={event.location.mapsLongitude}
/>
</div>
</TweenOne>
</OverPack>
</div>
)
}
}
export default Event | <p className="meetup-presenterText">{presenter.text}</p> |
page-changed-event.ts | import { Component } from '@angular/core';
import { PageChangedEvent } from 'ngx-bootstrap/pagination';
@Component({
// eslint-disable-next-line @angular-eslint/component-selector
selector: 'demo-pagination-page-changed-event',
templateUrl: './page-changed-event.html'
})
export class | {
currentPage = 4;
page?: number;
pageChanged(event: PageChangedEvent): void {
this.page = event.page;
}
}
| DemoPaginationPageChangedComponent |
combine.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
///////////////////////////////////////////////////////////////////////////
// # Type combining
//
// There are four type combiners: equate, sub, lub, and glb. Each
// implements the trait `Combine` and contains methods for combining
// two instances of various things and yielding a new instance. These
// combiner methods always yield a `Result<T>`. There is a lot of
// common code for these operations, implemented as default methods on
// the `Combine` trait.
//
// Each operation may have side-effects on the inference context,
// though these can be unrolled using snapshots. On success, the
// LUB/GLB operations return the appropriate bound. The Eq and Sub
// operations generally return the first operand.
//
// ## Contravariance
//
// When you are relating two things which have a contravariant
// relationship, you should use `contratys()` or `contraregions()`,
// rather than inversing the order of arguments! This is necessary
// because the order of arguments is not relevant for LUB and GLB. It
// is also useful to track which value is the "expected" value in
// terms of error reporting.
use super::equate::Equate;
use super::glb::Glb;
use super::lub::Lub;
use super::sub::Sub;
use super::InferCtxt;
use super::{MiscVariable, TypeTrace};
use hir::def_id::DefId;
use ty::{IntType, UintType};
use ty::{self, Ty, TyCtxt};
use ty::error::TypeError;
use ty::relate::{self, Relate, RelateResult, TypeRelation};
use ty::subst::Substs;
use traits::{Obligation, PredicateObligations};
use syntax::ast;
use syntax_pos::Span;
#[derive(Clone)]
pub struct CombineFields<'infcx, 'gcx: 'infcx+'tcx, 'tcx: 'infcx> {
pub infcx: &'infcx InferCtxt<'infcx, 'gcx, 'tcx>,
pub trace: TypeTrace<'tcx>,
pub cause: Option<ty::relate::Cause>,
pub param_env: ty::ParamEnv<'tcx>,
pub obligations: PredicateObligations<'tcx>,
}
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
pub enum RelationDir {
SubtypeOf, SupertypeOf, EqTo
}
impl<'infcx, 'gcx, 'tcx> InferCtxt<'infcx, 'gcx, 'tcx> {
pub fn super_combine_tys<R>(&self,
relation: &mut R,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> RelateResult<'tcx, Ty<'tcx>>
where R: TypeRelation<'infcx, 'gcx, 'tcx>
|
fn unify_integral_variable(&self,
vid_is_expected: bool,
vid: ty::IntVid,
val: ty::IntVarValue)
-> RelateResult<'tcx, Ty<'tcx>>
{
self.int_unification_table
.borrow_mut()
.unify_var_value(vid, val)
.map_err(|e| int_unification_error(vid_is_expected, e))?;
match val {
IntType(v) => Ok(self.tcx.mk_mach_int(v)),
UintType(v) => Ok(self.tcx.mk_mach_uint(v)),
}
}
fn unify_float_variable(&self,
vid_is_expected: bool,
vid: ty::FloatVid,
val: ast::FloatTy)
-> RelateResult<'tcx, Ty<'tcx>>
{
self.float_unification_table
.borrow_mut()
.unify_var_value(vid, val)
.map_err(|e| float_unification_error(vid_is_expected, e))?;
Ok(self.tcx.mk_mach_float(val))
}
}
impl<'infcx, 'gcx, 'tcx> CombineFields<'infcx, 'gcx, 'tcx> {
pub fn tcx(&self) -> TyCtxt<'infcx, 'gcx, 'tcx> {
self.infcx.tcx
}
pub fn equate<'a>(&'a mut self, a_is_expected: bool) -> Equate<'a, 'infcx, 'gcx, 'tcx> {
Equate::new(self, a_is_expected)
}
pub fn sub<'a>(&'a mut self, a_is_expected: bool) -> Sub<'a, 'infcx, 'gcx, 'tcx> {
Sub::new(self, a_is_expected)
}
pub fn lub<'a>(&'a mut self, a_is_expected: bool) -> Lub<'a, 'infcx, 'gcx, 'tcx> {
Lub::new(self, a_is_expected)
}
pub fn glb<'a>(&'a mut self, a_is_expected: bool) -> Glb<'a, 'infcx, 'gcx, 'tcx> {
Glb::new(self, a_is_expected)
}
/// Here dir is either EqTo, SubtypeOf, or SupertypeOf. The
/// idea is that we should ensure that the type `a_ty` is equal
/// to, a subtype of, or a supertype of (respectively) the type
/// to which `b_vid` is bound.
///
/// Since `b_vid` has not yet been instantiated with a type, we
/// will first instantiate `b_vid` with a *generalized* version
/// of `a_ty`. Generalization introduces other inference
/// variables wherever subtyping could occur.
pub fn instantiate(&mut self,
a_ty: Ty<'tcx>,
dir: RelationDir,
b_vid: ty::TyVid,
a_is_expected: bool)
-> RelateResult<'tcx, ()>
{
use self::RelationDir::*;
// Get the actual variable that b_vid has been inferred to
debug_assert!(self.infcx.type_variables.borrow_mut().probe(b_vid).is_none());
debug!("instantiate(a_ty={:?} dir={:?} b_vid={:?})", a_ty, dir, b_vid);
// Generalize type of `a_ty` appropriately depending on the
// direction. As an example, assume:
//
// - `a_ty == &'x ?1`, where `'x` is some free region and `?1` is an
// inference variable,
// - and `dir` == `SubtypeOf`.
//
// Then the generalized form `b_ty` would be `&'?2 ?3`, where
// `'?2` and `?3` are fresh region/type inference
// variables. (Down below, we will relate `a_ty <: b_ty`,
// adding constraints like `'x: '?2` and `?1 <: ?3`.)
let Generalization { ty: b_ty, needs_wf } = self.generalize(a_ty, b_vid, dir)?;
debug!("instantiate(a_ty={:?}, dir={:?}, b_vid={:?}, generalized b_ty={:?})",
a_ty, dir, b_vid, b_ty);
self.infcx.type_variables.borrow_mut().instantiate(b_vid, b_ty);
if needs_wf {
self.obligations.push(Obligation::new(self.trace.cause.clone(),
self.param_env,
ty::Predicate::WellFormed(b_ty)));
}
// Finally, relate `b_ty` to `a_ty`, as described in previous comment.
//
// FIXME(#16847): This code is non-ideal because all these subtype
// relations wind up attributed to the same spans. We need
// to associate causes/spans with each of the relations in
// the stack to get this right.
match dir {
EqTo => self.equate(a_is_expected).relate(&a_ty, &b_ty),
SubtypeOf => self.sub(a_is_expected).relate(&a_ty, &b_ty),
SupertypeOf => self.sub(a_is_expected).relate_with_variance(
ty::Contravariant, &a_ty, &b_ty),
}?;
Ok(())
}
/// Attempts to generalize `ty` for the type variable `for_vid`.
/// This checks for cycle -- that is, whether the type `ty`
/// references `for_vid`. The `dir` is the "direction" for which we
/// a performing the generalization (i.e., are we producing a type
/// that can be used as a supertype etc).
///
/// Preconditions:
///
/// - `for_vid` is a "root vid"
fn generalize(&self,
ty: Ty<'tcx>,
for_vid: ty::TyVid,
dir: RelationDir)
-> RelateResult<'tcx, Generalization<'tcx>>
{
// Determine the ambient variance within which `ty` appears.
// The surrounding equation is:
//
// ty [op] ty2
//
// where `op` is either `==`, `<:`, or `:>`. This maps quite
// naturally.
let ambient_variance = match dir {
RelationDir::EqTo => ty::Invariant,
RelationDir::SubtypeOf => ty::Covariant,
RelationDir::SupertypeOf => ty::Contravariant,
};
let mut generalize = Generalizer {
infcx: self.infcx,
span: self.trace.cause.span,
for_vid_sub_root: self.infcx.type_variables.borrow_mut().sub_root_var(for_vid),
ambient_variance,
needs_wf: false,
root_ty: ty,
};
let ty = generalize.relate(&ty, &ty)?;
let needs_wf = generalize.needs_wf;
Ok(Generalization { ty, needs_wf })
}
}
struct Generalizer<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> {
infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
/// Span, used when creating new type variables and things.
span: Span,
/// The vid of the type variable that is in the process of being
/// instantiated; if we find this within the type we are folding,
/// that means we would have created a cyclic type.
for_vid_sub_root: ty::TyVid,
/// Track the variance as we descend into the type.
ambient_variance: ty::Variance,
/// See the field `needs_wf` in `Generalization`.
needs_wf: bool,
/// The root type that we are generalizing. Used when reporting cycles.
root_ty: Ty<'tcx>,
}
/// Result from a generalization operation. This includes
/// not only the generalized type, but also a bool flag
/// indicating whether further WF checks are needed.q
struct Generalization<'tcx> {
ty: Ty<'tcx>,
/// If true, then the generalized type may not be well-formed,
/// even if the source type is well-formed, so we should add an
/// additional check to enforce that it is. This arises in
/// particular around 'bivariant' type parameters that are only
/// constrained by a where-clause. As an example, imagine a type:
///
/// struct Foo<A, B> where A: Iterator<Item=B> {
/// data: A
/// }
///
/// here, `A` will be covariant, but `B` is
/// unconstrained. However, whatever it is, for `Foo` to be WF, it
/// must be equal to `A::Item`. If we have an input `Foo<?A, ?B>`,
/// then after generalization we will wind up with a type like
/// `Foo<?C, ?D>`. When we enforce that `Foo<?A, ?B> <: Foo<?C,
/// ?D>` (or `>:`), we will wind up with the requirement that `?A
/// <: ?C`, but no particular relationship between `?B` and `?D`
/// (after all, we do not know the variance of the normalized form
/// of `A::Item` with respect to `A`). If we do nothing else, this
/// may mean that `?D` goes unconstrained (as in #41677). So, in
/// this scenario where we create a new type variable in a
/// bivariant context, we set the `needs_wf` flag to true. This
/// will force the calling code to check that `WF(Foo<?C, ?D>)`
/// holds, which in turn implies that `?C::Item == ?D`. So once
/// `?C` is constrained, that should suffice to restrict `?D`.
needs_wf: bool,
}
impl<'cx, 'gcx, 'tcx> TypeRelation<'cx, 'gcx, 'tcx> for Generalizer<'cx, 'gcx, 'tcx> {
fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> {
self.infcx.tcx
}
fn tag(&self) -> &'static str {
"Generalizer"
}
fn a_is_expected(&self) -> bool {
true
}
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
where T: Relate<'tcx>
{
Ok(ty::Binder(self.relate(a.skip_binder(), b.skip_binder())?))
}
fn relate_item_substs(&mut self,
item_def_id: DefId,
a_subst: &'tcx Substs<'tcx>,
b_subst: &'tcx Substs<'tcx>)
-> RelateResult<'tcx, &'tcx Substs<'tcx>>
{
if self.ambient_variance == ty::Variance::Invariant {
// Avoid fetching the variance if we are in an invariant
// context; no need, and it can induce dependency cycles
// (e.g. #41849).
relate::relate_substs(self, None, a_subst, b_subst)
} else {
let opt_variances = self.tcx().variances_of(item_def_id);
relate::relate_substs(self, Some(&opt_variances), a_subst, b_subst)
}
}
fn relate_with_variance<T: Relate<'tcx>>(&mut self,
variance: ty::Variance,
a: &T,
b: &T)
-> RelateResult<'tcx, T>
{
let old_ambient_variance = self.ambient_variance;
self.ambient_variance = self.ambient_variance.xform(variance);
let result = self.relate(a, b);
self.ambient_variance = old_ambient_variance;
result
}
fn tys(&mut self, t: Ty<'tcx>, t2: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
assert_eq!(t, t2); // we are abusing TypeRelation here; both LHS and RHS ought to be ==
// Check to see whether the type we are genealizing references
// any other type variable related to `vid` via
// subtyping. This is basically our "occurs check", preventing
// us from creating infinitely sized types.
match t.sty {
ty::TyInfer(ty::TyVar(vid)) => {
let mut variables = self.infcx.type_variables.borrow_mut();
let vid = variables.root_var(vid);
let sub_vid = variables.sub_root_var(vid);
if sub_vid == self.for_vid_sub_root {
// If sub-roots are equal, then `for_vid` and
// `vid` are related via subtyping.
return Err(TypeError::CyclicTy(self.root_ty));
} else {
match variables.probe_root(vid) {
Some(u) => {
drop(variables);
self.relate(&u, &u)
}
None => {
match self.ambient_variance {
// Invariant: no need to make a fresh type variable.
ty::Invariant => return Ok(t),
// Bivariant: make a fresh var, but we
// may need a WF predicate. See
// comment on `needs_wf` field for
// more info.
ty::Bivariant => self.needs_wf = true,
// Co/contravariant: this will be
// sufficiently constrained later on.
ty::Covariant | ty::Contravariant => (),
}
let origin = variables.origin(vid);
let new_var_id = variables.new_var(false, origin, None);
let u = self.tcx().mk_var(new_var_id);
debug!("generalize: replacing original vid={:?} with new={:?}",
vid, u);
return Ok(u);
}
}
}
}
ty::TyInfer(ty::IntVar(_)) |
ty::TyInfer(ty::FloatVar(_)) => {
// No matter what mode we are in,
// integer/floating-point types must be equal to be
// relatable.
Ok(t)
}
_ => {
relate::super_relate_tys(self, t, t)
}
}
}
fn regions(&mut self, r: ty::Region<'tcx>, r2: ty::Region<'tcx>)
-> RelateResult<'tcx, ty::Region<'tcx>> {
assert_eq!(r, r2); // we are abusing TypeRelation here; both LHS and RHS ought to be ==
match *r {
// Never make variables for regions bound within the type itself,
// nor for erased regions.
ty::ReLateBound(..) |
ty::ReErased => {
return Ok(r);
}
// Always make a fresh region variable for skolemized regions;
// the higher-ranked decision procedures rely on this.
ty::ReSkolemized(..) => { }
// For anything else, we make a region variable, unless we
// are *equating*, in which case it's just wasteful.
ty::ReEmpty |
ty::ReStatic |
ty::ReScope(..) |
ty::ReVar(..) |
ty::ReEarlyBound(..) |
ty::ReFree(..) => {
match self.ambient_variance {
ty::Invariant => return Ok(r),
ty::Bivariant | ty::Covariant | ty::Contravariant => (),
}
}
ty::ReClosureBound(..) => {
span_bug!(
self.span,
"encountered unexpected ReClosureBound: {:?}",
r,
);
}
}
// FIXME: This is non-ideal because we don't give a
// very descriptive origin for this region variable.
Ok(self.infcx.next_region_var(MiscVariable(self.span)))
}
}
pub trait RelateResultCompare<'tcx, T> {
fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
F: FnOnce() -> TypeError<'tcx>;
}
impl<'tcx, T:Clone + PartialEq> RelateResultCompare<'tcx, T> for RelateResult<'tcx, T> {
fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
F: FnOnce() -> TypeError<'tcx>,
{
self.clone().and_then(|s| {
if s == t {
self.clone()
} else {
Err(f())
}
})
}
}
fn int_unification_error<'tcx>(a_is_expected: bool, v: (ty::IntVarValue, ty::IntVarValue))
-> TypeError<'tcx>
{
let (a, b) = v;
TypeError::IntMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b))
}
fn float_unification_error<'tcx>(a_is_expected: bool,
v: (ast::FloatTy, ast::FloatTy))
-> TypeError<'tcx>
{
let (a, b) = v;
TypeError::FloatMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b))
}
| {
let a_is_expected = relation.a_is_expected();
match (&a.sty, &b.sty) {
// Relate integral variables to other types
(&ty::TyInfer(ty::IntVar(a_id)), &ty::TyInfer(ty::IntVar(b_id))) => {
self.int_unification_table
.borrow_mut()
.unify_var_var(a_id, b_id)
.map_err(|e| int_unification_error(a_is_expected, e))?;
Ok(a)
}
(&ty::TyInfer(ty::IntVar(v_id)), &ty::TyInt(v)) => {
self.unify_integral_variable(a_is_expected, v_id, IntType(v))
}
(&ty::TyInt(v), &ty::TyInfer(ty::IntVar(v_id))) => {
self.unify_integral_variable(!a_is_expected, v_id, IntType(v))
}
(&ty::TyInfer(ty::IntVar(v_id)), &ty::TyUint(v)) => {
self.unify_integral_variable(a_is_expected, v_id, UintType(v))
}
(&ty::TyUint(v), &ty::TyInfer(ty::IntVar(v_id))) => {
self.unify_integral_variable(!a_is_expected, v_id, UintType(v))
}
// Relate floating-point variables to other types
(&ty::TyInfer(ty::FloatVar(a_id)), &ty::TyInfer(ty::FloatVar(b_id))) => {
self.float_unification_table
.borrow_mut()
.unify_var_var(a_id, b_id)
.map_err(|e| float_unification_error(relation.a_is_expected(), e))?;
Ok(a)
}
(&ty::TyInfer(ty::FloatVar(v_id)), &ty::TyFloat(v)) => {
self.unify_float_variable(a_is_expected, v_id, v)
}
(&ty::TyFloat(v), &ty::TyInfer(ty::FloatVar(v_id))) => {
self.unify_float_variable(!a_is_expected, v_id, v)
}
// All other cases of inference are errors
(&ty::TyInfer(_), _) |
(_, &ty::TyInfer(_)) => {
Err(TypeError::Sorts(ty::relate::expected_found(relation, &a, &b)))
}
_ => {
ty::relate::super_relate_tys(relation, a, b)
}
}
} |
padregh.rs | #[doc = "Reader of register PADREGH"]
pub type R = crate::R<u32, super::PADREGH>;
#[doc = "Writer for register PADREGH"]
pub type W = crate::W<u32, super::PADREGH>;
#[doc = "Register PADREGH `reset()`'s with value 0x1818_1818"]
impl crate::ResetValue for super::PADREGH {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x1818_1818
}
}
#[doc = "Pad 31 function select\n\nValue on reset: 3"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum PAD31FNCSEL_A {
#[doc = "0: Configure as the analog input for ADC single ended input 3"]
ADCSE3 = 0,
#[doc = "1: Configure as the SPI channel 4 nCE signal from IOMSTR0"]
M0NCE4 = 1,
#[doc = "2: Configure as the input/output signal from CTIMER A3"]
TCTA3 = 2,
#[doc = "3: Configure as GPIO31"]
GPIO31 = 3,
#[doc = "4: Configure as the UART0 RX input signal"]
UART0RX = 4,
#[doc = "5: Configure as the input/output signal from CTIMER B1"]
TCTB1 = 5,
#[doc = "6: Undefined/should not be used"]
UNDEF6 = 6,
#[doc = "7: Undefined/should not be used"]
UNDEF7 = 7,
}
impl From<PAD31FNCSEL_A> for u8 {
#[inline(always)]
fn from(variant: PAD31FNCSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `PAD31FNCSEL`"]
pub type PAD31FNCSEL_R = crate::R<u8, PAD31FNCSEL_A>;
impl PAD31FNCSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD31FNCSEL_A {
match self.bits {
0 => PAD31FNCSEL_A::ADCSE3,
1 => PAD31FNCSEL_A::M0NCE4,
2 => PAD31FNCSEL_A::TCTA3,
3 => PAD31FNCSEL_A::GPIO31,
4 => PAD31FNCSEL_A::UART0RX,
5 => PAD31FNCSEL_A::TCTB1,
6 => PAD31FNCSEL_A::UNDEF6,
7 => PAD31FNCSEL_A::UNDEF7,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `ADCSE3`"]
#[inline(always)]
pub fn is_adcse3(&self) -> bool {
*self == PAD31FNCSEL_A::ADCSE3
}
#[doc = "Checks if the value of the field is `M0NCE4`"]
#[inline(always)]
pub fn is_m0n_ce4(&self) -> bool {
*self == PAD31FNCSEL_A::M0NCE4
}
#[doc = "Checks if the value of the field is `TCTA3`"]
#[inline(always)]
pub fn is_tcta3(&self) -> bool {
*self == PAD31FNCSEL_A::TCTA3
}
#[doc = "Checks if the value of the field is `GPIO31`"]
#[inline(always)]
pub fn is_gpio31(&self) -> bool {
*self == PAD31FNCSEL_A::GPIO31
}
#[doc = "Checks if the value of the field is `UART0RX`"]
#[inline(always)]
pub fn is_uart0rx(&self) -> bool {
*self == PAD31FNCSEL_A::UART0RX
}
#[doc = "Checks if the value of the field is `TCTB1`"]
#[inline(always)]
pub fn is_tctb1(&self) -> bool {
*self == PAD31FNCSEL_A::TCTB1
}
#[doc = "Checks if the value of the field is `UNDEF6`"]
#[inline(always)]
pub fn is_undef6(&self) -> bool {
*self == PAD31FNCSEL_A::UNDEF6
}
#[doc = "Checks if the value of the field is `UNDEF7`"]
#[inline(always)]
pub fn is_undef7(&self) -> bool {
*self == PAD31FNCSEL_A::UNDEF7
}
}
#[doc = "Write proxy for field `PAD31FNCSEL`"]
pub struct PAD31FNCSEL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD31FNCSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD31FNCSEL_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Configure as the analog input for ADC single ended input 3"]
#[inline(always)]
pub fn adcse3(self) -> &'a mut W {
self.variant(PAD31FNCSEL_A::ADCSE3)
}
#[doc = "Configure as the SPI channel 4 nCE signal from IOMSTR0"]
#[inline(always)]
pub fn m0n_ce4(self) -> &'a mut W {
self.variant(PAD31FNCSEL_A::M0NCE4)
}
#[doc = "Configure as the input/output signal from CTIMER A3"]
#[inline(always)]
pub fn tcta3(self) -> &'a mut W {
self.variant(PAD31FNCSEL_A::TCTA3)
}
#[doc = "Configure as GPIO31"]
#[inline(always)]
pub fn gpio31(self) -> &'a mut W {
self.variant(PAD31FNCSEL_A::GPIO31)
}
#[doc = "Configure as the UART0 RX input signal"]
#[inline(always)]
pub fn uart0rx(self) -> &'a mut W {
self.variant(PAD31FNCSEL_A::UART0RX)
}
#[doc = "Configure as the input/output signal from CTIMER B1"]
#[inline(always)]
pub fn tctb1(self) -> &'a mut W {
self.variant(PAD31FNCSEL_A::TCTB1)
}
#[doc = "Undefined/should not be used"]
#[inline(always)]
pub fn undef6(self) -> &'a mut W {
self.variant(PAD31FNCSEL_A::UNDEF6)
}
#[doc = "Undefined/should not be used"]
#[inline(always)]
pub fn undef7(self) -> &'a mut W {
self.variant(PAD31FNCSEL_A::UNDEF7)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 27)) | (((value as u32) & 0x07) << 27);
self.w
}
}
#[doc = "Pad 31 drive strength\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD31STRNG_A {
#[doc = "0: Low drive strength"]
LOW = 0,
#[doc = "1: High drive strength"]
HIGH = 1,
}
impl From<PAD31STRNG_A> for bool {
#[inline(always)]
fn from(variant: PAD31STRNG_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD31STRNG`"]
pub type PAD31STRNG_R = crate::R<bool, PAD31STRNG_A>;
impl PAD31STRNG_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD31STRNG_A {
match self.bits {
false => PAD31STRNG_A::LOW,
true => PAD31STRNG_A::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == PAD31STRNG_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == PAD31STRNG_A::HIGH
}
}
#[doc = "Write proxy for field `PAD31STRNG`"]
pub struct PAD31STRNG_W<'a> {
w: &'a mut W,
}
impl<'a> PAD31STRNG_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD31STRNG_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Low drive strength"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(PAD31STRNG_A::LOW)
}
#[doc = "High drive strength"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(PAD31STRNG_A::HIGH)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26);
self.w
}
}
#[doc = "Pad 31 input enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD31INPEN_A {
#[doc = "0: Pad input disabled"]
DIS = 0,
#[doc = "1: Pad input enabled"]
EN = 1,
}
impl From<PAD31INPEN_A> for bool {
#[inline(always)]
fn from(variant: PAD31INPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD31INPEN`"]
pub type PAD31INPEN_R = crate::R<bool, PAD31INPEN_A>;
impl PAD31INPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD31INPEN_A {
match self.bits {
false => PAD31INPEN_A::DIS,
true => PAD31INPEN_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD31INPEN_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD31INPEN_A::EN
}
}
#[doc = "Write proxy for field `PAD31INPEN`"]
pub struct PAD31INPEN_W<'a> {
w: &'a mut W,
}
impl<'a> PAD31INPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD31INPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pad input disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD31INPEN_A::DIS)
}
#[doc = "Pad input enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD31INPEN_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 25)) | (((value as u32) & 0x01) << 25);
self.w
}
}
#[doc = "Pad 31 pullup enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD31PULL_A {
#[doc = "0: Pullup disabled"]
DIS = 0,
#[doc = "1: Pullup enabled"]
EN = 1,
}
impl From<PAD31PULL_A> for bool {
#[inline(always)]
fn from(variant: PAD31PULL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD31PULL`"]
pub type PAD31PULL_R = crate::R<bool, PAD31PULL_A>;
impl PAD31PULL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD31PULL_A {
match self.bits {
false => PAD31PULL_A::DIS,
true => PAD31PULL_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD31PULL_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD31PULL_A::EN
}
}
#[doc = "Write proxy for field `PAD31PULL`"]
pub struct PAD31PULL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD31PULL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD31PULL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pullup disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD31PULL_A::DIS)
}
#[doc = "Pullup enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD31PULL_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
#[doc = "Pad 30 function select\n\nValue on reset: 3"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum PAD30FNCSEL_A {
#[doc = "0: Undefined/should not be used"]
UNDEF0 = 0,
#[doc = "1: Configure as the SPI channel 7 nCE signal from IOMSTR1"]
M1NCE7 = 1,
#[doc = "2: Configure as the input/output signal from CTIMER B2"]
TCTB2 = 2,
#[doc = "3: Configure as GPIO30"]
GPIO30 = 3,
#[doc = "4: Configure as UART0 TX output signal"]
UART0TX = 4,
#[doc = "5: Configure as UART1 RTS output signal"]
UA1RTS = 5,
#[doc = "6: Undefined/should not be used"]
UNDEF6 = 6,
#[doc = "7: Configure as the I2S Data output signal"]
I2S_DAT = 7,
}
impl From<PAD30FNCSEL_A> for u8 {
#[inline(always)]
fn from(variant: PAD30FNCSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `PAD30FNCSEL`"]
pub type PAD30FNCSEL_R = crate::R<u8, PAD30FNCSEL_A>;
impl PAD30FNCSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD30FNCSEL_A {
match self.bits {
0 => PAD30FNCSEL_A::UNDEF0,
1 => PAD30FNCSEL_A::M1NCE7,
2 => PAD30FNCSEL_A::TCTB2,
3 => PAD30FNCSEL_A::GPIO30,
4 => PAD30FNCSEL_A::UART0TX,
5 => PAD30FNCSEL_A::UA1RTS,
6 => PAD30FNCSEL_A::UNDEF6,
7 => PAD30FNCSEL_A::I2S_DAT,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `UNDEF0`"]
#[inline(always)]
pub fn is_undef0(&self) -> bool {
*self == PAD30FNCSEL_A::UNDEF0
}
#[doc = "Checks if the value of the field is `M1NCE7`"]
#[inline(always)]
pub fn is_m1n_ce7(&self) -> bool {
*self == PAD30FNCSEL_A::M1NCE7
}
#[doc = "Checks if the value of the field is `TCTB2`"]
#[inline(always)]
pub fn is_tctb2(&self) -> bool {
*self == PAD30FNCSEL_A::TCTB2
}
#[doc = "Checks if the value of the field is `GPIO30`"]
#[inline(always)]
pub fn is_gpio30(&self) -> bool {
*self == PAD30FNCSEL_A::GPIO30
}
#[doc = "Checks if the value of the field is `UART0TX`"]
#[inline(always)]
pub fn is_uart0tx(&self) -> bool {
*self == PAD30FNCSEL_A::UART0TX
}
#[doc = "Checks if the value of the field is `UA1RTS`"]
#[inline(always)]
pub fn is_ua1rts(&self) -> bool {
*self == PAD30FNCSEL_A::UA1RTS
}
#[doc = "Checks if the value of the field is `UNDEF6`"]
#[inline(always)]
pub fn is_undef6(&self) -> bool {
*self == PAD30FNCSEL_A::UNDEF6
}
#[doc = "Checks if the value of the field is `I2S_DAT`"]
#[inline(always)]
pub fn is_i2s_dat(&self) -> bool {
*self == PAD30FNCSEL_A::I2S_DAT
}
}
#[doc = "Write proxy for field `PAD30FNCSEL`"]
pub struct PAD30FNCSEL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD30FNCSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD30FNCSEL_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Undefined/should not be used"]
#[inline(always)]
pub fn undef0(self) -> &'a mut W {
self.variant(PAD30FNCSEL_A::UNDEF0)
}
#[doc = "Configure as the SPI channel 7 nCE signal from IOMSTR1"]
#[inline(always)]
pub fn m1n_ce7(self) -> &'a mut W {
self.variant(PAD30FNCSEL_A::M1NCE7)
}
#[doc = "Configure as the input/output signal from CTIMER B2"]
#[inline(always)]
pub fn tctb2(self) -> &'a mut W {
self.variant(PAD30FNCSEL_A::TCTB2)
}
#[doc = "Configure as GPIO30"]
#[inline(always)]
pub fn gpio30(self) -> &'a mut W {
self.variant(PAD30FNCSEL_A::GPIO30)
}
#[doc = "Configure as UART0 TX output signal"]
#[inline(always)]
pub fn uart0tx(self) -> &'a mut W {
self.variant(PAD30FNCSEL_A::UART0TX)
}
#[doc = "Configure as UART1 RTS output signal"]
#[inline(always)]
pub fn ua1rts(self) -> &'a mut W {
self.variant(PAD30FNCSEL_A::UA1RTS)
}
#[doc = "Undefined/should not be used"]
#[inline(always)]
pub fn undef6(self) -> &'a mut W {
self.variant(PAD30FNCSEL_A::UNDEF6)
}
#[doc = "Configure as the I2S Data output signal"]
#[inline(always)]
pub fn i2s_dat(self) -> &'a mut W {
self.variant(PAD30FNCSEL_A::I2S_DAT)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 19)) | (((value as u32) & 0x07) << 19);
self.w
}
}
#[doc = "Pad 30 drive strength\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD30STRNG_A {
#[doc = "0: Low drive strength"]
LOW = 0,
#[doc = "1: High drive strength"]
HIGH = 1,
}
impl From<PAD30STRNG_A> for bool {
#[inline(always)]
fn from(variant: PAD30STRNG_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD30STRNG`"]
pub type PAD30STRNG_R = crate::R<bool, PAD30STRNG_A>;
impl PAD30STRNG_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD30STRNG_A {
match self.bits {
false => PAD30STRNG_A::LOW,
true => PAD30STRNG_A::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == PAD30STRNG_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == PAD30STRNG_A::HIGH
}
}
#[doc = "Write proxy for field `PAD30STRNG`"]
pub struct PAD30STRNG_W<'a> {
w: &'a mut W,
}
impl<'a> PAD30STRNG_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD30STRNG_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Low drive strength"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(PAD30STRNG_A::LOW)
}
#[doc = "High drive strength"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(PAD30STRNG_A::HIGH)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Pad 30 input enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD30INPEN_A {
#[doc = "0: Pad input disabled"]
DIS = 0,
#[doc = "1: Pad input enabled"]
EN = 1,
}
impl From<PAD30INPEN_A> for bool {
#[inline(always)]
fn from(variant: PAD30INPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD30INPEN`"]
pub type PAD30INPEN_R = crate::R<bool, PAD30INPEN_A>;
impl PAD30INPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD30INPEN_A {
match self.bits {
false => PAD30INPEN_A::DIS,
true => PAD30INPEN_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD30INPEN_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD30INPEN_A::EN
}
}
#[doc = "Write proxy for field `PAD30INPEN`"]
pub struct PAD30INPEN_W<'a> {
w: &'a mut W,
}
impl<'a> PAD30INPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD30INPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pad input disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD30INPEN_A::DIS)
}
#[doc = "Pad input enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD30INPEN_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Pad 30 pullup enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD30PULL_A {
#[doc = "0: Pullup disabled"]
DIS = 0,
#[doc = "1: Pullup enabled"]
EN = 1,
}
impl From<PAD30PULL_A> for bool {
#[inline(always)]
fn from(variant: PAD30PULL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD30PULL`"]
pub type PAD30PULL_R = crate::R<bool, PAD30PULL_A>;
impl PAD30PULL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD30PULL_A {
match self.bits {
false => PAD30PULL_A::DIS,
true => PAD30PULL_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD30PULL_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD30PULL_A::EN
}
}
#[doc = "Write proxy for field `PAD30PULL`"]
pub struct PAD30PULL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD30PULL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD30PULL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pullup disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD30PULL_A::DIS)
}
#[doc = "Pullup enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD30PULL_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Pad 29 function select\n\nValue on reset: 3"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum PAD29FNCSEL_A {
#[doc = "0: Configure as the analog input for ADC single ended input 1"]
ADCSE1 = 0,
#[doc = "1: Configure as the SPI channel 6 nCE signal from IOMSTR1"]
M1NCE6 = 1,
#[doc = "2: Configure as the input/output signal from CTIMER A2"]
TCTA2 = 2,
#[doc = "3: Configure as GPIO29"]
GPIO29 = 3,
#[doc = "4: Configure as the UART0 CTS signal"]
UA0CTS = 4,
#[doc = "5: Configure as the UART1 CTS signal"]
UA1CTS = 5,
#[doc = "6: Configure as the SPI channel 0 nCE signal from IOMSTR4"]
M4NCE0 = 6,
#[doc = "7: Configure as PDM DATA input"]
PDM_DATA = 7,
}
impl From<PAD29FNCSEL_A> for u8 {
#[inline(always)]
fn from(variant: PAD29FNCSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `PAD29FNCSEL`"]
pub type PAD29FNCSEL_R = crate::R<u8, PAD29FNCSEL_A>;
impl PAD29FNCSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD29FNCSEL_A {
match self.bits {
0 => PAD29FNCSEL_A::ADCSE1,
1 => PAD29FNCSEL_A::M1NCE6,
2 => PAD29FNCSEL_A::TCTA2,
3 => PAD29FNCSEL_A::GPIO29,
4 => PAD29FNCSEL_A::UA0CTS,
5 => PAD29FNCSEL_A::UA1CTS,
6 => PAD29FNCSEL_A::M4NCE0,
7 => PAD29FNCSEL_A::PDM_DATA,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `ADCSE1`"]
#[inline(always)]
pub fn is_adcse1(&self) -> bool {
*self == PAD29FNCSEL_A::ADCSE1
}
#[doc = "Checks if the value of the field is `M1NCE6`"]
#[inline(always)]
pub fn is_m1n_ce6(&self) -> bool {
*self == PAD29FNCSEL_A::M1NCE6
}
#[doc = "Checks if the value of the field is `TCTA2`"]
#[inline(always)]
pub fn is_tcta2(&self) -> bool {
*self == PAD29FNCSEL_A::TCTA2
}
#[doc = "Checks if the value of the field is `GPIO29`"]
#[inline(always)]
pub fn is_gpio29(&self) -> bool {
*self == PAD29FNCSEL_A::GPIO29
}
#[doc = "Checks if the value of the field is `UA0CTS`"]
#[inline(always)]
pub fn is_ua0cts(&self) -> bool {
*self == PAD29FNCSEL_A::UA0CTS
}
#[doc = "Checks if the value of the field is `UA1CTS`"]
#[inline(always)]
pub fn is_ua1cts(&self) -> bool {
*self == PAD29FNCSEL_A::UA1CTS
}
#[doc = "Checks if the value of the field is `M4NCE0`"]
#[inline(always)]
pub fn is_m4n_ce0(&self) -> bool {
*self == PAD29FNCSEL_A::M4NCE0
}
#[doc = "Checks if the value of the field is `PDM_DATA`"]
#[inline(always)]
pub fn is_pdm_data(&self) -> bool {
*self == PAD29FNCSEL_A::PDM_DATA
}
}
#[doc = "Write proxy for field `PAD29FNCSEL`"]
pub struct PAD29FNCSEL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD29FNCSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD29FNCSEL_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Configure as the analog input for ADC single ended input 1"]
#[inline(always)]
pub fn adcse1(self) -> &'a mut W {
self.variant(PAD29FNCSEL_A::ADCSE1)
}
#[doc = "Configure as the SPI channel 6 nCE signal from IOMSTR1"]
#[inline(always)]
pub fn m1n_ce6(self) -> &'a mut W {
self.variant(PAD29FNCSEL_A::M1NCE6)
}
#[doc = "Configure as the input/output signal from CTIMER A2"]
#[inline(always)]
pub fn tcta2(self) -> &'a mut W {
self.variant(PAD29FNCSEL_A::TCTA2)
}
#[doc = "Configure as GPIO29"]
#[inline(always)]
pub fn gpio29(self) -> &'a mut W {
self.variant(PAD29FNCSEL_A::GPIO29)
}
#[doc = "Configure as the UART0 CTS signal"]
#[inline(always)]
pub fn ua0cts(self) -> &'a mut W {
self.variant(PAD29FNCSEL_A::UA0CTS)
}
#[doc = "Configure as the UART1 CTS signal"]
#[inline(always)]
pub fn ua1cts(self) -> &'a mut W {
self.variant(PAD29FNCSEL_A::UA1CTS)
}
#[doc = "Configure as the SPI channel 0 nCE signal from IOMSTR4"]
#[inline(always)]
pub fn m4n_ce0(self) -> &'a mut W {
self.variant(PAD29FNCSEL_A::M4NCE0)
}
#[doc = "Configure as PDM DATA input"]
#[inline(always)]
pub fn pdm_data(self) -> &'a mut W {
self.variant(PAD29FNCSEL_A::PDM_DATA)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 11)) | (((value as u32) & 0x07) << 11);
self.w
}
}
#[doc = "Pad 29 drive strength\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD29STRNG_A {
#[doc = "0: Low drive strength"]
LOW = 0,
#[doc = "1: High drive strength"]
HIGH = 1,
}
impl From<PAD29STRNG_A> for bool {
#[inline(always)]
fn from(variant: PAD29STRNG_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD29STRNG`"]
pub type PAD29STRNG_R = crate::R<bool, PAD29STRNG_A>;
impl PAD29STRNG_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD29STRNG_A {
match self.bits {
false => PAD29STRNG_A::LOW,
true => PAD29STRNG_A::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == PAD29STRNG_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == PAD29STRNG_A::HIGH
}
}
#[doc = "Write proxy for field `PAD29STRNG`"]
pub struct PAD29STRNG_W<'a> {
w: &'a mut W,
}
impl<'a> PAD29STRNG_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD29STRNG_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Low drive strength"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(PAD29STRNG_A::LOW)
}
#[doc = "High drive strength"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(PAD29STRNG_A::HIGH)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Pad 29 input enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD29INPEN_A {
#[doc = "0: Pad input disabled"]
DIS = 0,
#[doc = "1: Pad input enabled"]
EN = 1,
}
impl From<PAD29INPEN_A> for bool {
#[inline(always)]
fn from(variant: PAD29INPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD29INPEN`"]
pub type PAD29INPEN_R = crate::R<bool, PAD29INPEN_A>;
impl PAD29INPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD29INPEN_A {
match self.bits {
false => PAD29INPEN_A::DIS,
true => PAD29INPEN_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD29INPEN_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD29INPEN_A::EN
}
}
#[doc = "Write proxy for field `PAD29INPEN`"]
pub struct PAD29INPEN_W<'a> {
w: &'a mut W,
}
impl<'a> PAD29INPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD29INPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pad input disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD29INPEN_A::DIS)
}
#[doc = "Pad input enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD29INPEN_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Pad 29 pullup enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD29PULL_A {
#[doc = "0: Pullup disabled"]
DIS = 0,
#[doc = "1: Pullup enabled"]
EN = 1,
}
impl From<PAD29PULL_A> for bool {
#[inline(always)]
fn from(variant: PAD29PULL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD29PULL`"]
pub type PAD29PULL_R = crate::R<bool, PAD29PULL_A>;
impl PAD29PULL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD29PULL_A {
match self.bits {
false => PAD29PULL_A::DIS,
true => PAD29PULL_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD29PULL_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD29PULL_A::EN
}
}
#[doc = "Write proxy for field `PAD29PULL`"]
pub struct PAD29PULL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD29PULL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD29PULL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pullup disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD29PULL_A::DIS)
}
#[doc = "Pullup enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD29PULL_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Pad 28 function select\n\nValue on reset: 3"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum PAD28FNCSEL_A {
#[doc = "0: Configure as the I2S Word Clock input"]
I2S_WCLK = 0,
#[doc = "1: Configure as the SPI channel 5 nCE signal from IOMSTR1"]
M1NCE5 = 1,
#[doc = "2: Configure as the input/output signal from CTIMER B1"]
TCTB1 = 2,
#[doc = "3: Configure as GPIO28"]
GPIO28 = 3,
#[doc = "4: Configure as the IOMSTR2 SPI 3-wire MOSI/MISO signal"]
M2WIR3 = 4,
#[doc = "5: Configure as the IOMSTR2 SPI MOSI output signal"]
M2MOSI = 5,
#[doc = "6: Configure as the SPI channel 3 nCE signal from IOMSTR5"]
M5NCE3 = 6,
#[doc = "7: Configure as the IOMSTR2 SPI 3-wire MOSI/MISO loopback signal from IOSLAVE"]
SLWIR3LB = 7,
}
impl From<PAD28FNCSEL_A> for u8 {
#[inline(always)]
fn from(variant: PAD28FNCSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `PAD28FNCSEL`"]
pub type PAD28FNCSEL_R = crate::R<u8, PAD28FNCSEL_A>;
impl PAD28FNCSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD28FNCSEL_A {
match self.bits {
0 => PAD28FNCSEL_A::I2S_WCLK,
1 => PAD28FNCSEL_A::M1NCE5,
2 => PAD28FNCSEL_A::TCTB1,
3 => PAD28FNCSEL_A::GPIO28,
4 => PAD28FNCSEL_A::M2WIR3,
5 => PAD28FNCSEL_A::M2MOSI,
6 => PAD28FNCSEL_A::M5NCE3,
7 => PAD28FNCSEL_A::SLWIR3LB,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `I2S_WCLK`"]
#[inline(always)]
pub fn is_i2s_wclk(&self) -> bool {
*self == PAD28FNCSEL_A::I2S_WCLK
}
#[doc = "Checks if the value of the field is `M1NCE5`"]
#[inline(always)]
pub fn is_m1n_ce5(&self) -> bool {
*self == PAD28FNCSEL_A::M1NCE5
}
#[doc = "Checks if the value of the field is `TCTB1`"]
#[inline(always)]
pub fn is_tctb1(&self) -> bool {
*self == PAD28FNCSEL_A::TCTB1
}
#[doc = "Checks if the value of the field is `GPIO28`"]
#[inline(always)]
pub fn is_gpio28(&self) -> bool {
*self == PAD28FNCSEL_A::GPIO28
}
#[doc = "Checks if the value of the field is `M2WIR3`"]
#[inline(always)]
pub fn is_m2wir3(&self) -> bool {
*self == PAD28FNCSEL_A::M2WIR3
}
#[doc = "Checks if the value of the field is `M2MOSI`"]
#[inline(always)]
pub fn is_m2mosi(&self) -> bool {
*self == PAD28FNCSEL_A::M2MOSI
}
#[doc = "Checks if the value of the field is `M5NCE3`"]
#[inline(always)]
pub fn is_m5n_ce3(&self) -> bool {
*self == PAD28FNCSEL_A::M5NCE3
}
#[doc = "Checks if the value of the field is `SLWIR3LB`"]
#[inline(always)]
pub fn is_slwir3lb(&self) -> bool {
*self == PAD28FNCSEL_A::SLWIR3LB
}
}
#[doc = "Write proxy for field `PAD28FNCSEL`"]
pub struct PAD28FNCSEL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD28FNCSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD28FNCSEL_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Configure as the I2S Word Clock input"]
#[inline(always)]
pub fn i2s_wclk(self) -> &'a mut W {
self.variant(PAD28FNCSEL_A::I2S_WCLK)
}
#[doc = "Configure as the SPI channel 5 nCE signal from IOMSTR1"]
#[inline(always)]
pub fn m1n_ce5(self) -> &'a mut W {
self.variant(PAD28FNCSEL_A::M1NCE5)
}
#[doc = "Configure as the input/output signal from CTIMER B1"]
#[inline(always)]
pub fn tctb1(self) -> &'a mut W {
self.variant(PAD28FNCSEL_A::TCTB1)
}
#[doc = "Configure as GPIO28"]
#[inline(always)]
pub fn gpio28(self) -> &'a mut W {
self.variant(PAD28FNCSEL_A::GPIO28)
}
#[doc = "Configure as the IOMSTR2 SPI 3-wire MOSI/MISO signal"]
#[inline(always)]
pub fn m2wir3(self) -> &'a mut W {
self.variant(PAD28FNCSEL_A::M2WIR3)
}
#[doc = "Configure as the IOMSTR2 SPI MOSI output signal"]
#[inline(always)]
pub fn m2mosi(self) -> &'a mut W {
self.variant(PAD28FNCSEL_A::M2MOSI)
}
#[doc = "Configure as the SPI channel 3 nCE signal from IOMSTR5"]
#[inline(always)]
pub fn m5n_ce3(self) -> &'a mut W {
self.variant(PAD28FNCSEL_A::M5NCE3)
}
#[doc = "Configure as the IOMSTR2 SPI 3-wire MOSI/MISO loopback signal from IOSLAVE"]
#[inline(always)]
pub fn slwir3lb(self) -> &'a mut W {
self.variant(PAD28FNCSEL_A::SLWIR3LB)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 3)) | (((value as u32) & 0x07) << 3);
self.w
}
}
#[doc = "Pad 28 drive strength\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum | {
#[doc = "0: Low drive strength"]
LOW = 0,
#[doc = "1: High drive strength"]
HIGH = 1,
}
impl From<PAD28STRNG_A> for bool {
#[inline(always)]
fn from(variant: PAD28STRNG_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD28STRNG`"]
pub type PAD28STRNG_R = crate::R<bool, PAD28STRNG_A>;
impl PAD28STRNG_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD28STRNG_A {
match self.bits {
false => PAD28STRNG_A::LOW,
true => PAD28STRNG_A::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == PAD28STRNG_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == PAD28STRNG_A::HIGH
}
}
#[doc = "Write proxy for field `PAD28STRNG`"]
pub struct PAD28STRNG_W<'a> {
w: &'a mut W,
}
impl<'a> PAD28STRNG_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD28STRNG_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Low drive strength"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(PAD28STRNG_A::LOW)
}
#[doc = "High drive strength"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(PAD28STRNG_A::HIGH)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Pad 28 input enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD28INPEN_A {
#[doc = "0: Pad input disabled"]
DIS = 0,
#[doc = "1: Pad input enabled"]
EN = 1,
}
impl From<PAD28INPEN_A> for bool {
#[inline(always)]
fn from(variant: PAD28INPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD28INPEN`"]
pub type PAD28INPEN_R = crate::R<bool, PAD28INPEN_A>;
impl PAD28INPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD28INPEN_A {
match self.bits {
false => PAD28INPEN_A::DIS,
true => PAD28INPEN_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD28INPEN_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD28INPEN_A::EN
}
}
#[doc = "Write proxy for field `PAD28INPEN`"]
pub struct PAD28INPEN_W<'a> {
w: &'a mut W,
}
impl<'a> PAD28INPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD28INPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pad input disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD28INPEN_A::DIS)
}
#[doc = "Pad input enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD28INPEN_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Pad 28 pullup enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD28PULL_A {
#[doc = "0: Pullup disabled"]
DIS = 0,
#[doc = "1: Pullup enabled"]
EN = 1,
}
impl From<PAD28PULL_A> for bool {
#[inline(always)]
fn from(variant: PAD28PULL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD28PULL`"]
pub type PAD28PULL_R = crate::R<bool, PAD28PULL_A>;
impl PAD28PULL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD28PULL_A {
match self.bits {
false => PAD28PULL_A::DIS,
true => PAD28PULL_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD28PULL_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD28PULL_A::EN
}
}
#[doc = "Write proxy for field `PAD28PULL`"]
pub struct PAD28PULL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD28PULL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD28PULL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pullup disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD28PULL_A::DIS)
}
#[doc = "Pullup enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD28PULL_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl R {
#[doc = "Bits 27:29 - Pad 31 function select"]
#[inline(always)]
pub fn pad31fncsel(&self) -> PAD31FNCSEL_R {
PAD31FNCSEL_R::new(((self.bits >> 27) & 0x07) as u8)
}
#[doc = "Bit 26 - Pad 31 drive strength"]
#[inline(always)]
pub fn pad31strng(&self) -> PAD31STRNG_R {
PAD31STRNG_R::new(((self.bits >> 26) & 0x01) != 0)
}
#[doc = "Bit 25 - Pad 31 input enable"]
#[inline(always)]
pub fn pad31inpen(&self) -> PAD31INPEN_R {
PAD31INPEN_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bit 24 - Pad 31 pullup enable"]
#[inline(always)]
pub fn pad31pull(&self) -> PAD31PULL_R {
PAD31PULL_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bits 19:21 - Pad 30 function select"]
#[inline(always)]
pub fn pad30fncsel(&self) -> PAD30FNCSEL_R {
PAD30FNCSEL_R::new(((self.bits >> 19) & 0x07) as u8)
}
#[doc = "Bit 18 - Pad 30 drive strength"]
#[inline(always)]
pub fn pad30strng(&self) -> PAD30STRNG_R {
PAD30STRNG_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 17 - Pad 30 input enable"]
#[inline(always)]
pub fn pad30inpen(&self) -> PAD30INPEN_R {
PAD30INPEN_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 16 - Pad 30 pullup enable"]
#[inline(always)]
pub fn pad30pull(&self) -> PAD30PULL_R {
PAD30PULL_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bits 11:13 - Pad 29 function select"]
#[inline(always)]
pub fn pad29fncsel(&self) -> PAD29FNCSEL_R {
PAD29FNCSEL_R::new(((self.bits >> 11) & 0x07) as u8)
}
#[doc = "Bit 10 - Pad 29 drive strength"]
#[inline(always)]
pub fn pad29strng(&self) -> PAD29STRNG_R {
PAD29STRNG_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 9 - Pad 29 input enable"]
#[inline(always)]
pub fn pad29inpen(&self) -> PAD29INPEN_R {
PAD29INPEN_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 8 - Pad 29 pullup enable"]
#[inline(always)]
pub fn pad29pull(&self) -> PAD29PULL_R {
PAD29PULL_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bits 3:5 - Pad 28 function select"]
#[inline(always)]
pub fn pad28fncsel(&self) -> PAD28FNCSEL_R {
PAD28FNCSEL_R::new(((self.bits >> 3) & 0x07) as u8)
}
#[doc = "Bit 2 - Pad 28 drive strength"]
#[inline(always)]
pub fn pad28strng(&self) -> PAD28STRNG_R {
PAD28STRNG_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 1 - Pad 28 input enable"]
#[inline(always)]
pub fn pad28inpen(&self) -> PAD28INPEN_R {
PAD28INPEN_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 0 - Pad 28 pullup enable"]
#[inline(always)]
pub fn pad28pull(&self) -> PAD28PULL_R {
PAD28PULL_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 27:29 - Pad 31 function select"]
#[inline(always)]
pub fn pad31fncsel(&mut self) -> PAD31FNCSEL_W {
PAD31FNCSEL_W { w: self }
}
#[doc = "Bit 26 - Pad 31 drive strength"]
#[inline(always)]
pub fn pad31strng(&mut self) -> PAD31STRNG_W {
PAD31STRNG_W { w: self }
}
#[doc = "Bit 25 - Pad 31 input enable"]
#[inline(always)]
pub fn pad31inpen(&mut self) -> PAD31INPEN_W {
PAD31INPEN_W { w: self }
}
#[doc = "Bit 24 - Pad 31 pullup enable"]
#[inline(always)]
pub fn pad31pull(&mut self) -> PAD31PULL_W {
PAD31PULL_W { w: self }
}
#[doc = "Bits 19:21 - Pad 30 function select"]
#[inline(always)]
pub fn pad30fncsel(&mut self) -> PAD30FNCSEL_W {
PAD30FNCSEL_W { w: self }
}
#[doc = "Bit 18 - Pad 30 drive strength"]
#[inline(always)]
pub fn pad30strng(&mut self) -> PAD30STRNG_W {
PAD30STRNG_W { w: self }
}
#[doc = "Bit 17 - Pad 30 input enable"]
#[inline(always)]
pub fn pad30inpen(&mut self) -> PAD30INPEN_W {
PAD30INPEN_W { w: self }
}
#[doc = "Bit 16 - Pad 30 pullup enable"]
#[inline(always)]
pub fn pad30pull(&mut self) -> PAD30PULL_W {
PAD30PULL_W { w: self }
}
#[doc = "Bits 11:13 - Pad 29 function select"]
#[inline(always)]
pub fn pad29fncsel(&mut self) -> PAD29FNCSEL_W {
PAD29FNCSEL_W { w: self }
}
#[doc = "Bit 10 - Pad 29 drive strength"]
#[inline(always)]
pub fn pad29strng(&mut self) -> PAD29STRNG_W {
PAD29STRNG_W { w: self }
}
#[doc = "Bit 9 - Pad 29 input enable"]
#[inline(always)]
pub fn pad29inpen(&mut self) -> PAD29INPEN_W {
PAD29INPEN_W { w: self }
}
#[doc = "Bit 8 - Pad 29 pullup enable"]
#[inline(always)]
pub fn pad29pull(&mut self) -> PAD29PULL_W {
PAD29PULL_W { w: self }
}
#[doc = "Bits 3:5 - Pad 28 function select"]
#[inline(always)]
pub fn pad28fncsel(&mut self) -> PAD28FNCSEL_W {
PAD28FNCSEL_W { w: self }
}
#[doc = "Bit 2 - Pad 28 drive strength"]
#[inline(always)]
pub fn pad28strng(&mut self) -> PAD28STRNG_W {
PAD28STRNG_W { w: self }
}
#[doc = "Bit 1 - Pad 28 input enable"]
#[inline(always)]
pub fn pad28inpen(&mut self) -> PAD28INPEN_W {
PAD28INPEN_W { w: self }
}
#[doc = "Bit 0 - Pad 28 pullup enable"]
#[inline(always)]
pub fn pad28pull(&mut self) -> PAD28PULL_W {
PAD28PULL_W { w: self }
}
}
| PAD28STRNG_A |
IOTSocketClient.py | '''
Developed by Abhijith Boppe - linkedin.com/in/abhijith-boppe/
'''
import socket
import ssl
import time
data_maxLength = 65535
fields_maxLength =1024
sock = ''
device_id = ''
device_key = ''
time_stamps = []
def connectionSet(host, port, id_, key, Encrypt=1, cert_path=None):
global sock, device_id, device_key, time_stamps
device_id = id_
device_key = key
time_stamps = []
sock = socket.create_connection((host, port))
if Encrypt == 1:
ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT).load_verify_locations(cert_path)
sock = ssl.wrap_socket(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_SSLv23)
sock.settimeout(1)
def chkTime(server_time, device_time):
"""
Check if the time matches the server time and
to make sure there are no reused time (no replay attacks)
"""
global time_stamps
time_drop_max = 3 # packet with time difference 30sec will not be accepted
device_time = float(device_time)
server_time = float(server_time)
if(server_time in time_stamps):
raise Exception(f"ERROR: Replay attack observer. Time stamps:{time_stamps}, Replayed time: {server_time}")
return False
else:
if len(time_stamps) < 100: # if 100 req in less than 30sec
time_diff = abs(device_time - server_time)
if len(time_stamps) > 1: # to remove old time stamps (to reduce memory usage)
if (abs(time_stamps[-1] - server_time) > time_drop_max):
time_stamps = []
if (time_diff > time_drop_max):
return 0
elif (time_diff < time_drop_max):
time_stamps.append(server_time)
return 1
else:
raise Exception(
"ERROR: DOS attack more than 100 requests from server in 30sec")
def recvData():
time_now = f'{time.time():.4f}'
try:
# 65535 max data (including headers)
data = sock.recv(data_maxLength)
except socket.timeout as _:
data = b''
pass
except Exception as _:
raise Exception("socket closed/refused by server")
data = data.decode()
if not data:
return ''
else:
data = data.split('|#|') # split data at delimeter
while '' in data:
data.remove('')
if data[0]: # clear the remaining queue/buffer and read only first element/data
data = data[0]
# split headers and data
fields, data = data.split("\r\n\r\n", 1)
fields, data = fields.strip() if len(
fields) < fields_maxLength else 0, data.strip() if len(data) < (data_maxLength-3000) else ''
headers = {}
for field in fields.split('\r\n'):
# split each line by http field name and value
key, value = field.split(':')
headers[key] = value
if len(headers) > 10:
break
if len(headers) != 5 or len(data) < 5:
raise Exception("ERROR: Header length issue ")
else:
if(headers['IOT'] == '1.1'):
time_chk = chkTime(headers['TIME'], time_now)
if(time_chk):
return data
else:
raise Exception(
f"ERROR: Incorrect time stamp. server time {headers['TIME']} client time {time_now}")
else:
raise Exception(
f"ERROR: Incorrect IOT version detected {headers['IOT']}")
def | ():
time_now = f'{time.time():.4f}'
headers = '''IOT:1.1
DATE:12/12/2019
TIME:{time_now}
DEVICE:{device_id}
KEY:{device_key}
'''.format(time_now=time_now, device_id= device_id, device_key=device_key)
return headers
def sendData(data):
if len(data) > 5 and len(data) < 60000:
try:
headers = _headers()
data = headers.replace('\n','\r\n') + data.replace('|#|','') + '|#|'
sock.send(data.encode())
except socket.timeout as e:
raise Exception("Socket time out")
except Exception as e:
raise Exception("Socket closed by server")
# ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None)
| _headers |
update_secret_urlbuilder.go | ///////////////////////////////////////////////////////////////////////
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
///////////////////////////////////////////////////////////////////////
// Code generated by go-swagger; DO NOT EDIT.
package secret
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"errors"
"net/url"
golangswaggerpaths "path"
"strings"
"github.com/go-openapi/swag"
)
// UpdateSecretURL generates an URL for the update secret operation
type UpdateSecretURL struct {
SecretName string
Tags []string
_basePath string
// avoid unkeyed usage
_ struct{}
}
// WithBasePath sets the base path for this url builder, only required when it's different from the
// base path specified in the swagger spec.
// When the value of the base path is an empty string
func (o *UpdateSecretURL) WithBasePath(bp string) *UpdateSecretURL {
o.SetBasePath(bp)
return o
}
// SetBasePath sets the base path for this url builder, only required when it's different from the
// base path specified in the swagger spec.
// When the value of the base path is an empty string
func (o *UpdateSecretURL) SetBasePath(bp string) {
o._basePath = bp
}
// Build a url path and query string
func (o *UpdateSecretURL) Build() (*url.URL, error) {
var result url.URL
var _path = "/{secretName}"
secretName := o.SecretName
if secretName != "" | else {
return nil, errors.New("SecretName is required on UpdateSecretURL")
}
_basePath := o._basePath
if _basePath == "" {
_basePath = "/v1/secret"
}
result.Path = golangswaggerpaths.Join(_basePath, _path)
qs := make(url.Values)
var tagsIR []string
for _, tagsI := range o.Tags {
tagsIS := tagsI
if tagsIS != "" {
tagsIR = append(tagsIR, tagsIS)
}
}
tags := swag.JoinByFormat(tagsIR, "multi")
for _, qsv := range tags {
qs.Add("tags", qsv)
}
result.RawQuery = qs.Encode()
return &result, nil
}
// Must is a helper function to panic when the url builder returns an error
func (o *UpdateSecretURL) Must(u *url.URL, err error) *url.URL {
if err != nil {
panic(err)
}
if u == nil {
panic("url can't be nil")
}
return u
}
// String returns the string representation of the path with query string
func (o *UpdateSecretURL) String() string {
return o.Must(o.Build()).String()
}
// BuildFull builds a full url with scheme, host, path and query string
func (o *UpdateSecretURL) BuildFull(scheme, host string) (*url.URL, error) {
if scheme == "" {
return nil, errors.New("scheme is required for a full url on UpdateSecretURL")
}
if host == "" {
return nil, errors.New("host is required for a full url on UpdateSecretURL")
}
base, err := o.Build()
if err != nil {
return nil, err
}
base.Scheme = scheme
base.Host = host
return base, nil
}
// StringFull returns the string representation of a complete url
func (o *UpdateSecretURL) StringFull(scheme, host string) string {
return o.Must(o.BuildFull(scheme, host)).String()
}
| {
_path = strings.Replace(_path, "{secretName}", secretName, -1)
} |
cache.rs | use std::time::{Duration, Instant};
use tokio::sync::Mutex;
use trust_dns_proto::op::Message;
use crate::doh::config::CacheConfiguration;
use crate::doh::request_key::RequestKey;
#[derive(Clone)]
pub struct CacheObject {
message: Message,
cache_time: Instant,
expiration_time: Instant,
}
impl CacheObject {
pub fn new(message: Message, cache_time: Instant, cache_duration: Duration) -> Self {
let expiration_time = cache_time + cache_duration;
CacheObject {
message,
cache_time,
expiration_time,
}
}
pub fn message(self) -> Message {
self.message
}
pub fn | (&mut self) -> &mut Message {
&mut self.message
}
pub fn expired(&self, now: Instant) -> bool {
now > self.expiration_time
}
pub fn duration_in_cache(&self, now: Instant) -> Duration {
now - self.cache_time
}
}
pub struct Cache {
cache_configuration: CacheConfiguration,
cache: Mutex<lru::LruCache<RequestKey, CacheObject>>,
}
impl Cache {
pub fn new(cache_configuration: CacheConfiguration) -> Self {
let max_size = cache_configuration.max_size();
Cache {
cache_configuration,
cache: Mutex::new(lru::LruCache::new(max_size)),
}
}
pub async fn get(&self, key: &RequestKey) -> Option<CacheObject> {
let mut mut_cache = self.cache.lock().await;
match mut_cache.get(key) {
Some(v) => Some(v.clone()),
None => None,
}
}
pub async fn put(&self, key: RequestKey, cache_object: CacheObject) {
let mut mut_cache = self.cache.lock().await;
mut_cache.put(key, cache_object);
}
pub async fn periodic_purge(&self) -> (usize, usize) {
let mut mut_cache = self.cache.lock().await;
let mut items_purged = 0;
let now = Instant::now();
while items_purged < self.cache_configuration.max_purges_per_timer_pop() {
let lru_key_and_value = match mut_cache.peek_lru() {
None => break,
Some(lru_key_and_value) => lru_key_and_value,
};
if lru_key_and_value.1.expired(now) {
let key_clone = lru_key_and_value.0.clone();
mut_cache.pop(&key_clone);
items_purged += 1;
} else {
break;
}
}
(mut_cache.len(), items_purged)
}
}
| message_mut |
home.go | package handlers
import "net/http"
func HomeHandler(w http.ResponseWriter, r *http.Request) { | w.Write([]byte("Gorilla!\n"))
} |
|
force_login.js | import {
getUserInfo
} from "../../api/users"
export default {
created: function () {
if (!this.$isLoggedIn()) {
this.$router.push("/login");
} else {
getUserInfo(this.storage.token, this.storage.loggedInUser.username) | this.storage.loggedInUser = user;
}
});
}
},
} | .then(([user, err]) => {
if (err == null) { |
macpmtcsr.rs | #[doc = "Register `MACPMTCSR` reader"]
pub struct R(crate::R<MACPMTCSR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<MACPMTCSR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<MACPMTCSR_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<MACPMTCSR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `MACPMTCSR` writer"]
pub struct W(crate::W<MACPMTCSR_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<MACPMTCSR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<MACPMTCSR_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<MACPMTCSR_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Power down\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PD_A {
#[doc = "1: All received frames will be dropped. Cleared automatically when a magic packet or wakeup frame is received"]
ENABLED = 1,
}
impl From<PD_A> for bool {
#[inline(always)]
fn from(variant: PD_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `PD` reader - Power down"]
pub struct PD_R(crate::FieldReader<bool, PD_A>);
impl PD_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
PD_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<PD_A> {
match self.bits {
true => Some(PD_A::ENABLED),
_ => None,
}
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == PD_A::ENABLED
}
}
impl core::ops::Deref for PD_R {
type Target = crate::FieldReader<bool, PD_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `PD` writer - Power down"]
pub struct PD_W<'a> {
w: &'a mut W,
}
impl<'a> PD_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PD_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "All received frames will be dropped. Cleared automatically when a magic packet or wakeup frame is received"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(PD_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
#[doc = "Magic packet enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MPE_A {
#[doc = "0: No power management event generated due to Magic Packet reception"]
DISABLED = 0,
#[doc = "1: Enable generation of a power management event due to Magic Packet reception"]
ENABLED = 1,
}
impl From<MPE_A> for bool {
#[inline(always)]
fn from(variant: MPE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `MPE` reader - Magic packet enable"]
pub struct MPE_R(crate::FieldReader<bool, MPE_A>);
impl MPE_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
MPE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> MPE_A {
match self.bits {
false => MPE_A::DISABLED,
true => MPE_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
**self == MPE_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == MPE_A::ENABLED
}
}
impl core::ops::Deref for MPE_R {
type Target = crate::FieldReader<bool, MPE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `MPE` writer - Magic packet enable"]
pub struct MPE_W<'a> {
w: &'a mut W,
}
impl<'a> MPE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: MPE_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "No power management event generated due to Magic Packet reception"]
#[inline(always)]
pub fn disabled(self) -> &'a mut W {
self.variant(MPE_A::DISABLED)
}
#[doc = "Enable generation of a power management event due to Magic Packet reception"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(MPE_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1);
self.w
}
}
#[doc = "Wakeup frame enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum WFE_A {
#[doc = "0: No power management event generated due to wakeup frame reception"]
DISABLED = 0,
#[doc = "1: Enable generation of a power management event due to wakeup frame reception"]
ENABLED = 1,
}
impl From<WFE_A> for bool {
#[inline(always)]
fn from(variant: WFE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `WFE` reader - Wakeup frame enable"]
pub struct WFE_R(crate::FieldReader<bool, WFE_A>);
impl WFE_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
WFE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> WFE_A {
match self.bits {
false => WFE_A::DISABLED,
true => WFE_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
**self == WFE_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == WFE_A::ENABLED
}
}
impl core::ops::Deref for WFE_R {
type Target = crate::FieldReader<bool, WFE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `WFE` writer - Wakeup frame enable"]
pub struct WFE_W<'a> {
w: &'a mut W,
}
impl<'a> WFE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: WFE_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "No power management event generated due to wakeup frame reception"]
#[inline(always)]
pub fn disabled(self) -> &'a mut W {
self.variant(WFE_A::DISABLED)
}
#[doc = "Enable generation of a power management event due to wakeup frame reception"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(WFE_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2);
self.w
}
}
#[doc = "Field `MPR` reader - Magic packet received"]
pub struct MPR_R(crate::FieldReader<bool, bool>);
impl MPR_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
MPR_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for MPR_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `MPR` writer - Magic packet received"]
pub struct MPR_W<'a> {
w: &'a mut W,
}
impl<'a> MPR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5);
self.w
}
}
#[doc = "Field `WFR` reader - Wakeup frame received"]
pub struct WFR_R(crate::FieldReader<bool, bool>);
impl WFR_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
WFR_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for WFR_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `WFR` writer - Wakeup frame received"]
pub struct WFR_W<'a> {
w: &'a mut W,
}
impl<'a> WFR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6);
self.w
}
}
#[doc = "Global unicast\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum GU_A {
#[doc = "0: Normal operation"]
DISABLED = 0,
#[doc = "1: Any unicast packet filtered by the MAC address recognition may be a wakeup frame"]
ENABLED = 1,
}
impl From<GU_A> for bool {
#[inline(always)]
fn from(variant: GU_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `GU` reader - Global unicast"]
pub struct GU_R(crate::FieldReader<bool, GU_A>);
impl GU_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
GU_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> GU_A {
match self.bits {
false => GU_A::DISABLED,
true => GU_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
**self == GU_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == GU_A::ENABLED
}
}
impl core::ops::Deref for GU_R {
type Target = crate::FieldReader<bool, GU_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `GU` writer - Global unicast"]
pub struct GU_W<'a> {
w: &'a mut W,
}
impl<'a> GU_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: GU_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Normal operation"]
#[inline(always)]
pub fn disabled(self) -> &'a mut W {
self.variant(GU_A::DISABLED)
}
#[doc = "Any unicast packet filtered by the MAC address recognition may be a wakeup frame"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(GU_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | ((value as u32 & 0x01) << 9);
self.w
}
}
#[doc = "Wakeup frame filter register pointer reset\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum WFFRPR_A {
#[doc = "1: Reset wakeup frame filter register point to 0b000. Automatically cleared"]
RESET = 1,
}
impl From<WFFRPR_A> for bool {
#[inline(always)]
fn from(variant: WFFRPR_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `WFFRPR` reader - Wakeup frame filter register pointer reset"]
pub struct WFFRPR_R(crate::FieldReader<bool, WFFRPR_A>);
impl WFFRPR_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
WFFRPR_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<WFFRPR_A> {
match self.bits {
true => Some(WFFRPR_A::RESET),
_ => None,
}
}
#[doc = "Checks if the value of the field is `RESET`"]
#[inline(always)]
pub fn is_reset(&self) -> bool {
**self == WFFRPR_A::RESET
}
}
impl core::ops::Deref for WFFRPR_R {
type Target = crate::FieldReader<bool, WFFRPR_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `WFFRPR` writer - Wakeup frame filter register pointer reset"]
pub struct WFFRPR_W<'a> {
w: &'a mut W,
}
impl<'a> WFFRPR_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: WFFRPR_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Reset wakeup frame filter register point to 0b000. Automatically cleared"]
#[inline(always)]
pub fn reset(self) -> &'a mut W {
self.variant(WFFRPR_A::RESET)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | ((value as u32 & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bit 0 - Power down"]
#[inline(always)]
pub fn pd(&self) -> PD_R {
PD_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Magic packet enable"]
#[inline(always)]
pub fn mpe(&self) -> MPE_R {
MPE_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Wakeup frame enable"]
#[inline(always)]
pub fn wfe(&self) -> WFE_R {
WFE_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 5 - Magic packet received"]
#[inline(always)]
pub fn mpr(&self) -> MPR_R {
MPR_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - Wakeup frame received"]
#[inline(always)]
pub fn wfr(&self) -> WFR_R {
WFR_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 9 - Global unicast"]
#[inline(always)]
pub fn gu(&self) -> GU_R {
GU_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 31 - Wakeup frame filter register pointer reset"]
#[inline(always)]
pub fn wffrpr(&self) -> WFFRPR_R {
WFFRPR_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Power down"]
#[inline(always)]
pub fn pd(&mut self) -> PD_W {
PD_W { w: self }
}
#[doc = "Bit 1 - Magic packet enable"]
#[inline(always)]
pub fn mpe(&mut self) -> MPE_W {
MPE_W { w: self }
}
#[doc = "Bit 2 - Wakeup frame enable"]
#[inline(always)]
pub fn wfe(&mut self) -> WFE_W {
WFE_W { w: self }
}
#[doc = "Bit 5 - Magic packet received"]
#[inline(always)]
pub fn mpr(&mut self) -> MPR_W {
MPR_W { w: self }
}
#[doc = "Bit 6 - Wakeup frame received"]
#[inline(always)]
pub fn wfr(&mut self) -> WFR_W {
WFR_W { w: self }
}
#[doc = "Bit 9 - Global unicast"]
#[inline(always)]
pub fn gu(&mut self) -> GU_W {
GU_W { w: self }
}
#[doc = "Bit 31 - Wakeup frame filter register pointer reset"]
#[inline(always)]
pub fn wffrpr(&mut self) -> WFFRPR_W {
WFFRPR_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Ethernet MAC PMT control and status register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [macpmtcsr](index.html) module"]
pub struct MACPMTCSR_SPEC;
impl crate::RegisterSpec for MACPMTCSR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [macpmtcsr::R](R) reader structure"]
impl crate::Readable for MACPMTCSR_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [macpmtcsr::W](W) writer structure"]
impl crate::Writable for MACPMTCSR_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets MACPMTCSR to value 0"]
impl crate::Resettable for MACPMTCSR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
} | } | |
TidbitUser.tsx | import { Link } from "react-router-dom"
import React from "react"
import * as urls from "../../urls"
import TidbitItem from "./TidbitItem"
interface TidbitUserProps {
user?: {
id: string
slug: string
name: string
} | null
name?: string | null
}
const TidbitUser: React.FC<TidbitUserProps> = ({ user, name }) => (
<TidbitItem className="tidbit-user">
{user ? <Link to={urls.user(user)}>{user.name}</Link> : name}
</TidbitItem>
) |
export default TidbitUser |
|
TransactionHandler.js | Object.defineProperty(exports,"__esModule",{value:true});exports.default=undefined;var _createClass=function(){function defineProperties(target,props){for(var i=0;i<props.length;i++){var descriptor=props[i];descriptor.enumerable=descriptor.enumerable||false;descriptor.configurable=true;if("value"in descriptor)descriptor.writable=true;Object.defineProperty(target,descriptor.key,descriptor);}}return function(Constructor,protoProps,staticProps){if(protoProps)defineProperties(Constructor.prototype,protoProps);if(staticProps)defineProperties(Constructor,staticProps);return Constructor;};}();
var _events=require('../../utils/events');
var _native=require('../../utils/native');
var _Transaction=require('./Transaction');var _Transaction2=_interopRequireDefault(_Transaction);function _interopRequireDefault(obj){return obj&&obj.__esModule?obj:{default:obj};}function _classCallCheck(instance,Constructor){if(!(instance instanceof Constructor)){throw new TypeError("Cannot call a class as a function");}}
var transactionId=0;
var generateTransactionId=function generateTransactionId(){return transactionId++;};var
TransactionHandler=function(){
function | (firestore){_classCallCheck(this,TransactionHandler);
this._pending={};
this._firestore=firestore;
_events.SharedEventEmitter.addListener(
(0,_events.getAppEventName)(this._firestore,'firestore_transaction_event'),
this._handleTransactionEvent.bind(this));
}_createClass(TransactionHandler,[{key:'_add',value:function _add(
updateFunction)
{var _this=this;
var id=generateTransactionId();
var meta={
id:id,
updateFunction:updateFunction,
stack:new Error().stack.
split('\n').
slice(2).
join('\n')};
this._pending[id]={
meta:meta,
transaction:new _Transaction2.default(this._firestore,meta)};
return new Promise(function(resolve,reject){
(0,_native.getNativeModule)(_this._firestore).transactionBegin(id);
meta.resolve=function(r){
resolve(r);
_this._remove(id);
};
meta.reject=function(e){
reject(e);
_this._remove(id);
};
});
}},{key:'_remove',value:function _remove(
id){
(0,_native.getNativeModule)(this._firestore).transactionDispose(id);
delete this._pending[id];
}},{key:'_handleTransactionEvent',value:function _handleTransactionEvent(
event){
switch(event.type){
case'update':
this._handleUpdate(event);
break;
case'error':
this._handleError(event);
break;
case'complete':
this._handleComplete(event);
break;}
}},{key:'_handleUpdate',value:function _handleUpdate(
event){var id,_pending$id,meta,transaction,updateFunction,reject,finalError,updateFailed,pendingResult,possiblePromise;return regeneratorRuntime.async(function _handleUpdate$(_context){while(1){switch(_context.prev=_context.next){case 0:
id=event.id;if(
this._pending[id]){_context.next=3;break;}return _context.abrupt('return',this._remove(id));case 3:_pending$id=
this._pending[id],meta=_pending$id.meta,transaction=_pending$id.transaction;
updateFunction=meta.updateFunction,reject=meta.reject;
transaction._prepare();
finalError=void 0;
updateFailed=void 0;
pendingResult=void 0;_context.prev=9;
possiblePromise=updateFunction(transaction);if(!(
!possiblePromise||!possiblePromise.then)){_context.next=15;break;}
finalError=new Error(
'Update function for `firestore.runTransaction(updateFunction)` must return a Promise.');_context.next=18;break;case 15:_context.next=17;return regeneratorRuntime.awrap(
possiblePromise);case 17:pendingResult=_context.sent;case 18:_context.next=24;break;case 20:_context.prev=20;_context.t0=_context['catch'](9);
updateFailed=true;
finalError=_context.t0;case 24:if(!(
updateFailed||finalError)){_context.next=26;break;}return _context.abrupt('return',
reject(finalError));case 26:
transaction._pendingResult=pendingResult;return _context.abrupt('return',
(0,_native.getNativeModule)(this._firestore).transactionApplyBuffer(
id,
transaction._commandBuffer));case 28:case'end':return _context.stop();}}},null,this,[[9,20]]);}},{key:'_handleError',value:function _handleError(
event){var
id=event.id,error=event.error;var
meta=this._pending[id].meta;
if(meta&&error){var
_code=error.code,_message=error.message;
var errorWithStack=new Error(_message);
errorWithStack.code=_code;
errorWithStack.stack='Error: '+_message+'\n'+meta.stack;
meta.reject(errorWithStack);
}
}},{key:'_handleComplete',value:function _handleComplete(
event){var
id=event.id;var _pending$id2=
this._pending[id],meta=_pending$id2.meta,transaction=_pending$id2.transaction;
if(meta){
var pendingResult=transaction._pendingResult;
meta.resolve(pendingResult);
}
}}]);return TransactionHandler;}();exports.default=TransactionHandler; | TransactionHandler |
lib.rs | pub mod actions;
mod collection; | pub mod resource_types;
pub mod values_ui;
mod versions;
pub use actions::*;
pub use collection::*;
pub use error::*;
pub use ext_type::*;
pub use features::*;
pub use values_ui::*; | mod error;
pub mod ext_type;
pub mod features; |
paymentMethodCard.ts | /**
* Adyen Checkout API
* Adyen Checkout API provides a simple and flexible way to initiate and authorise online payments. You can use the same integration for payments made with cards (including 3D Secure), mobile wallets, and local payment methods (for example, iDEAL and Sofort). This API reference provides information on available endpoints and how to interact with them. To learn more about the API, visit [Checkout documentation](https://docs.adyen.com/checkout). ## Authentication Each request to the Checkout API must be signed with an API key. For this, obtain an API Key from your Customer Area, as described in [How to get the API key](https://docs.adyen.com/user-management/how-to-get-the-api-key). Then set this key to the `X-API-Key` header value, for example: ``` curl -H \"Content-Type: application/json\" \\ -H \"X-API-Key: Your_Checkout_API_key\" \\ ... ``` Note that when going live, you need to generate a new API Key to access the [live endpoints](https://docs.adyen.com/development-resources/live-endpoints). ## Versioning Checkout API supports versioning of its endpoints through a version suffix in the endpoint URL. This suffix has the following format: \"vXX\", where XX is the version number. For example: ``` https://checkout-test.adyen.com/v52/payments ```
*
* The version of the OpenAPI document: 52
* Contact: [email protected]
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
export class | {
/**
* Must be set to **scheme**.
*/
'type': string;
/**
* The [card verification code](https://docs.adyen.com/payments-fundamentals/payment-glossary#card-security-code-cvc-cvv-cid) (1-20 characters). Depending on the card brand, it is known also as: * CVV2/CVC2 – length: 3 digits * CID – length: 4 digits
*/
'cvc'?: string;
/**
* The card expiry month. Format: 2 digits, zero-padded for single digits. For example: * 03 = March * 11 = November
*/
'expiryMonth': string;
/**
* The card expiry year. Format: 4 digits. For example: 2020
*/
'expiryYear': string;
/**
* The name of the cardholder, as printed on the card.
*/
'holderName': string;
/**
* The card number (4-19 characters). Do not use any separators.
*/
'number': string;
static discriminator: string | undefined = undefined;
static attributeTypeMap: Array<{name: string, baseName: string, type: string}> = [
{
"name": "type",
"baseName": "type",
"type": "string"
},
{
"name": "cvc",
"baseName": "cvc",
"type": "string"
},
{
"name": "expiryMonth",
"baseName": "expiryMonth",
"type": "string"
},
{
"name": "expiryYear",
"baseName": "expiryYear",
"type": "string"
},
{
"name": "holderName",
"baseName": "holderName",
"type": "string"
},
{
"name": "number",
"baseName": "number",
"type": "string"
} ];
static getAttributeTypeMap() {
return PaymentMethodCard.attributeTypeMap;
}
}
| PaymentMethodCard |
stub_test.go | package stub
import (
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/source"
"github.com/golang-migrate/migrate/v4/source/stub"
"testing"
dt "github.com/golang-migrate/migrate/v4/database/testing"
)
func | (t *testing.T) {
s := &Stub{}
d, err := s.Open("")
if err != nil {
t.Fatal(err)
}
dt.Test(t, d, []byte("/* foobar migration */"))
}
func TestMigrate(t *testing.T) {
s := &Stub{}
d, err := s.Open("")
if err != nil {
t.Fatal(err)
}
stubMigrations := source.NewMigrations()
stubMigrations.Append(&source.Migration{Version: 1, Direction: source.Up, Identifier: "CREATE 1"})
stubMigrations.Append(&source.Migration{Version: 1, Direction: source.Down, Identifier: "DROP 1"})
src := &stub.Stub{}
srcDrv, err := src.Open("")
srcDrv.(*stub.Stub).Migrations = stubMigrations
m, err := migrate.NewWithInstance("stub", srcDrv, "", d)
if err != nil {
t.Fatalf("%v", err)
}
dt.TestMigrate(t, m, []byte("/* foobar migration */"))
}
| Test |
market_making.go | package marketmaking
import (
"github.com/shopspring/decimal"
)
// MakingStrategy defines the automated market making strategy, usingi a formula to be applied to calculate the price of next trade.
type MakingStrategy struct {
Type int
formula MakingFormula
}
// MakingFormula defines the interface for implementing the formula to derive the spot price
type MakingFormula interface {
SpotPrice(spotPriceOpts interface{}) (spotPrice decimal.Decimal, err error)
OutGivenIn(outGivenInOpts interface{}, amountIn uint64) (amountOut uint64, err error)
InGivenOut(inGivenOutOpts interface{}, amountOut uint64) (amountIn uint64, err error)
FormulaType() int
}
// NewStrategyFromFormula returns the strategy struct with the name
func | (
formula MakingFormula,
) MakingStrategy {
strategy := MakingStrategy{
Type: formula.FormulaType(),
formula: formula,
}
return strategy
}
// IsZero checks if the given strategy is the zero value
func (ms MakingStrategy) IsZero() bool {
return ms.Type == 0
}
// Formula returns the mathematical formula of the MM strategy
func (ms MakingStrategy) Formula() MakingFormula {
return ms.formula
}
| NewStrategyFromFormula |
mod.rs | use std::fmt;
use std::net::{Ipv4Addr, Ipv6Addr};
use futures::future::BoxFuture;
extern crate clap;
use clap::{App, ArgMatches};
mod get_ip_by_url_detector;
mod set_ip_detector;
pub type SetIpDetector = set_ip_detector::SetIpDetector;
pub type GetIpByUrlDetector = get_ip_by_url_detector::GetIpByUrlDetector;
pub type SharedProgramOptions = super::option::SharedProgramOptions;
pub type HttpMethod = super::option::HttpMethod;
#[derive(Debug, Clone, PartialEq)]
pub enum Record {
A(Ipv4Addr),
AAAA(Ipv6Addr),
#[allow(dead_code)]
CNAME(String),
#[allow(dead_code)]
MX(String),
#[allow(dead_code)]
TXT(String),
}
impl fmt::Display for Record {
fn | (&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Record::A(ref v) => f.write_fmt(format_args!("A: {}", v)),
Record::AAAA(ref v) => f.write_fmt(format_args!("AAAA: {}", v)),
Record::CNAME(ref v) => f.write_fmt(format_args!("CNAME: {}", v)),
Record::MX(ref v) => f.write_fmt(format_args!("MX: {}", v)),
Record::TXT(ref v) => f.write_fmt(format_args!("TXT: {}", v)),
}
}
}
pub type DetectorResult<'a> = Result<&'a Vec<Record>, ()>;
pub trait Detector {
fn initialize<'a>(&mut self, app: App<'a>) -> App<'a>;
fn parse_options(&mut self, matches: &ArgMatches, options: &mut SharedProgramOptions);
fn run<'a, 'b>(
&'a mut self,
options: &mut SharedProgramOptions,
) -> BoxFuture<'b, DetectorResult<'a>>
where
'a: 'b;
}
| fmt |
public_api.rs | use std::error;
use static_assertions::assert_impl_all;
use heim_process::ProcessError;
#[test]
fn test_public_api_contract() { | assert_impl_all!(ProcessError: Send, Sync, error::Error);
} |
|
LevelInput.tsx | import { Button, MenuItem, Select, TextField } from "@material-ui/core";
import React, { useContext } from "react";
import { Action, ActionType, AppContext } from "./state";
import { sampleLevels } from "./sampleLevel";
export const selectValueReducer = (
selectValue: string,
action: Action
): string => {
switch (action.type) {
case ActionType.SetSelectValue:
return action.value;
default:
return selectValue;
}
};
export const levelStringReducer = (
levelString: string,
action: Action
): string => {
switch (action.type) {
case ActionType.SetLevelString:
return action.value;
default:
return levelString;
}
};
const LevelInput = () => {
const { state, dispatch } = useContext(AppContext)!;
const setLevelString = (value: string) =>
dispatch({ type: ActionType.SetLevelString, value: value });
const setSelectValue = (value: string) =>
dispatch({ type: ActionType.SetSelectValue, value: value });
function onLevelStringChange(event: React.ChangeEvent<{ value: string }>) {
setLevelString(event.target.value);
}
function onSelectChange(event: React.ChangeEvent<{ value: unknown }>) {
const newLevelString = event.target.value as string;
setLevelString(newLevelString);
setSelectValue(newLevelString);
loadLevel(newLevelString);
}
function onClick() {
loadLevel(state.levelString);
}
function loadLevel(levelString: string) {
dispatch({ type: ActionType.Load, levelString: levelString });
}
return (
<>
<TextField
value={state.levelString}
rows={3}
onChange={onLevelStringChange}
multiline
variant="outlined"
style={{ width: 480 }} | onChange={onSelectChange}
style={{ width: 320 }}
>
{sampleLevels.map(({ label, data }) => (
<MenuItem key={label} value={data}>
{label}
</MenuItem>
))}
</Select>
<Button onClick={onClick} variant="contained" children="Refresh" />
</>
);
};
export default LevelInput; | />
<Select
variant="outlined"
value={state.selectValue} |
element.ts | import { SandKeyType, SandPropsType, SandTagType } from './type';
export class SandElement {
type: SandTagType;
key?: SandKeyType;
props: SandPropsType;
constructor(
type: SandTagType,
key: SandKeyType | undefined,
props: SandPropsType
) {
this.type = type;
this.key = key;
this.props = props; | }
} |
|
use-after-move-self.rs | struct S {
x: Box<isize>,
}
impl S {
pub fn foo(self) -> isize {
self.bar();
return *self.x; //~ ERROR use of moved value: `self`
}
pub fn bar(self) {}
}
fn main() | {
let x = S { x: 1.into() };
println!("{}", x.foo());
} |
|
add-chord.js | var RelativeElement = require('./abc_relative_element');
var spacing = require('./abc_spacing');
var addChord;
(function () {
"use strict";
addChord = function (getTextSize, abselem, elem, roomTaken, roomTakenRight) {
for (var i = 0; i < elem.chord.length; i++) {
var chord = elem.chord[i];
var x = 0;
var y;
var font;
var klass;
if (chord.position === "left" || chord.position === "right" || chord.position === "below" || chord.position === "above") {
font = 'annotationfont';
klass = "annotation";
} else {
font = 'gchordfont';
klass = "chord";
}
var attr = getTextSize.attr(font, klass);
var dim = getTextSize.calc(chord.name, font, klass);
var chordWidth = dim.width;
var chordHeight = dim.height / spacing.STEP;
switch (chord.position) {
case "left":
roomTaken += chordWidth + 7;
x = -roomTaken; // TODO-PER: This is just a guess from trial and error
y = elem.averagepitch;
abselem.addExtra(new RelativeElement(chord.name, x, chordWidth + 4, y, {
type: "text",
height: chordHeight,
dim: attr
}));
break;
case "right":
roomTakenRight += 4;
x = roomTakenRight;// TODO-PER: This is just a guess from trial and error
y = elem.averagepitch;
abselem.addRight(new RelativeElement(chord.name, x, chordWidth + 4, y, {
type: "text",
height: chordHeight,
dim: attr
}));
break; | // setting the y-coordinate to undefined for now: it will be overwritten later on, after we figure out what the highest element on the line is.
abselem.addRight(new RelativeElement(chord.name, 0, 0, undefined, {
type: "text",
position: "below",
height: chordHeight,
dim: attr
}));
break;
case "above":
// setting the y-coordinate to undefined for now: it will be overwritten later on, after we figure out what the highest element on the line is.
abselem.addRight(new RelativeElement(chord.name, 0, 0, undefined, {
type: "text",
height: chordHeight,
dim: attr
}));
break;
default:
if (chord.rel_position) {
var relPositionY = chord.rel_position.y + 3 * spacing.STEP; // TODO-PER: this is a fudge factor to make it line up with abcm2ps
abselem.addChild(new RelativeElement(chord.name, x + chord.rel_position.x, 0, elem.minpitch + relPositionY / spacing.STEP, {
type: "text",
height: chordHeight,
dim: attr
}));
} else {
// setting the y-coordinate to undefined for now: it will be overwritten later on, after we figure out what the highest element on the line is.
var pos2 = 'above';
if (elem.positioning && elem.positioning.chordPosition)
pos2 = elem.positioning.chordPosition;
abselem.addCentered(new RelativeElement(chord.name, x, chordWidth, undefined, {
type: "chord",
position: pos2,
height: chordHeight,
dim: attr
}));
}
}
}
return {roomTaken: roomTaken, roomTakenRight: roomTakenRight};
};
})();
module.exports = addChord; | case "below": |
test_introduction.py | # # Introduction
# In this notebook, we will load an example time series, fit a growth model
# and plot the signals.
#
# ## Load example time series
#
# Let's start by loading example time series data.
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
from typing import Iterable, List, Optional, cast
import matplotlib.pyplot as plt
import pytest
import seaborn as sns
import staticchar as ch
from psbutils.filecheck import Plottable, figure_found
from psbutils.misc import find_subrepo_directory
from staticchar.plotting.core import AnnotationSpec
SUBREPO_DIR = find_subrepo_directory()
S_SHAPE_FOLDER = SUBREPO_DIR / "tests/test_data/S-shape"
def | (name: str, ax: Optional[Plottable] = None) -> List[str]:
sns.despine()
found = figure_found(ax, f"test_introduction/{name}")
plt.clf()
return [] if found else [name]
@pytest.mark.timeout(10)
def test_introduction():
dataset = ch.datasets.Dataset(S_SHAPE_FOLDER) # type: ignore # auto
raw_timeseries = dataset.get_a_frame()
rth = raw_timeseries.head()
# As we can see, there is some non-zero signal at the beginning, which we attribute to
# the media absorbance and media fluorescence (as initially we have very low cell density).
assert sorted(rth.keys().to_list()) == sorted([ch.TIME, "EYFP", "OD", "ECFP", "OD700", "mRFP1"])
colors = {"EYFP": "yellow", "ECFP": "cyan", "mRFP1": "red", "OD": "black"}
plt.figure(figsize=(6.4, 4.8))
ax = cast(plt.Axes, plt.subplot())
ch.plot_signals_against_time(raw_timeseries, signals=colors.keys(), time_column="time", ax=ax, colors=colors)
ax.legend()
figures_not_found = []
figures_not_found += plot_figure("plot1_raw_timeseries", ax)
# ## Pre-processing
# Let's assume this is the background and subtract it.
# (A more precise, but also costly alternative is to estimate this using several blanks).
# In[ ]:
subtracted = ch.subtract_background(
raw_timeseries, columns=["OD", "ECFP", "EYFP", "mRFP1"], strategy=ch.BackgroundChoices.Minimum
)
ax = cast(plt.Axes, plt.subplot())
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax, colors=colors)
ax.legend()
figures_not_found += plot_figure("plot2_subtracted_timeseries", ax)
# ## Run characterization on an example
# In[ ]:
yaml_path = find_subrepo_directory() / "tests/configs/integral_basic.yml"
config = ch.config.load(yaml_path, ch.config.CharacterizationConfig)
# config
# ### Fitting a growth model
#
# Let's fit a growth model to the OD signal.
model_params = ch.LogisticModel.fit(subtracted["time"], subtracted[config.growth_signal]) # type: ignore # auto
model = ch.LogisticModel(model_params)
# model_params = ch.GompertzModel.fit(subtracted["time"], subtracted[config.growth_signal])
# model = ch.GompertzModel(model_params)
print(f"Inferred parameters: {model_params}")
print(f"Growth phase: {model.growth_period}")
print(f"Time of maximal activity: {model.time_maximal_activity}")
print(f"Inferred (log of) initial density: {model.initial_density(log=True)}")
ch.plot_growth_model(subtracted["time"], subtracted[config.growth_signal], model=model) # type: ignore # auto
figures_not_found += plot_figure("plot3_growth_model_fit")
# ### Plotting the data
#
# Some time after the growth phase, we should observe a similar exponential production
# of the proteins. Suppose that this maturation time is about 50 minutes,
# that is about 0.85 hours.
#
# Then, fluorescence signals should be linear when drawn with respect to each other.
# Add offset to the growth phase
production_phase = model.growth_period + config.maturation_offset
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) # type: ignore
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax1, colors=colors)
# Visualise the production phase
ch.mark_phase(ax1, interval=production_phase, color="green", alpha=0.1)
ch.plot_signals_against_reference(subtracted, signals=("EYFP", "ECFP"), reference="mRFP1", colors=colors, ax=ax2)
figures_not_found += plot_figure("plot4_fluorescence_signals", f)
# ### Truncate the time-series
#
# We see that this very well captures the growth phase of mRFP1 (the reference signal),
# but is a bit too late for EYFP and ECFP -- we won't have a linear dependence between
# the signals...
#
# Let's choose a more narrow interval.
another_production_phase = ch.TimePeriod(reference=12, left=2, right=2)
truncated_timeseries = ch.select_time_interval(subtracted, interval=another_production_phase)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) # type: ignore
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax1, colors=colors)
# Visualise the production phase
ch.mark_phase(ax1, interval=another_production_phase, color="green", alpha=0.1)
ch.plot_signals_against_reference(
truncated_timeseries, signals=("EYFP", "ECFP"), reference="mRFP1", colors=colors, ax=ax2 # type: ignore # auto
)
figures_not_found += plot_figure("plot5_truncated")
# Run method
gradient, gradient_error = ch.transcriptional_activity_ratio(
truncated_timeseries, # type: ignore # auto
config.signals,
config.reference,
config.signal_properties,
model_params.growth_rate,
model.growth_period,
maturation_offset=config.maturation_offset,
)
# gradient
# ### Integration-based characterization
# Now assume that we want to integrate the signals over the production period.
signals = ["EYFP", "ECFP"]
ch.integrate(data=subtracted, signals=signals, interval=config.time_window)
# Now plot the output
f, axs = plt.subplots(1, len(config.signals), figsize=(12, 4))
for signal, ax in zip(config.signals, cast(Iterable, axs)):
ch.plot_integration(
subtracted,
signal,
config.time_window,
ax,
fillcolor=colors[signal],
annotation_spec=AnnotationSpec(title=True),
)
figures_not_found += plot_figure("plot6_integration", f)
assert figures_not_found == [], f"Figures not found: {', '.join(figures_not_found)}"
| plot_figure |
AlipayInsUnderwriteUserPolicyQueryResponse.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.InsPolicy import InsPolicy
class AlipayInsUnderwriteUserPolicyQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayInsUnderwriteUserPolicyQueryResponse, self).__init__()
self._policys = None
self._total = None
@property
def policys(self):
return self._policys
@policys.setter
def policys(self, value):
if isinstance(value, list):
self._policys = list()
for i in value:
if isinstance(i, InsPolicy):
self._policys.append(i)
else:
|
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
def parse_response_content(self, response_content):
response = super(AlipayInsUnderwriteUserPolicyQueryResponse, self).parse_response_content(response_content)
if 'policys' in response:
self.policys = response['policys']
if 'total' in response:
self.total = response['total']
| self._policys.append(InsPolicy.from_alipay_dict(i)) |
config.py | # -*- coding: utf-8 -*-
"""Config component.
This module defines the config Component.
<config>
</config>
"""
from . import AbstractComponent
class configComponent(AbstractComponent):
def __init__(self):
self._xmlns = {}
self.attributes = {}
self.parent_xmlns = {}
self._children: List[AbstractComponent] = []
self.childrenData = []
self.tag = 'config'
@property
def xmlns(self):
return self._xmlns
@xmlns.setter
def xmlns(self, xmlns):
|
def add(self, component) -> None:
self._children.append(component)
def remove(self, component) -> None:
self._children.remove(component)
def is_composite(self) -> bool:
return False
def getXMLNS(self):
childrenData = []
for child in self._children:
child.getXMLNS()
return self._xmlns
def parse(self, serializer):
self.childrenData = []
self.getXMLNS()
for child in self._children:
self.childrenData.append(child.parse(serializer))
return serializer.parse(self)
| self._xmlns = xmlns |
wkt.go | // Package wkt implements Well Known Text encoding and decoding.
package wkt
import (
"errors"
"github.com/twpayne/go-geom"
)
const (
tPoint = "POINT "
tMultiPoint = "MULTIPOINT "
tLineString = "LINESTRING "
tMultiLineString = "MULTILINESTRING "
tPolygon = "POLYGON "
tMultiPolygon = "MULTIPOLYGON "
tGeometryCollection = "GEOMETRYCOLLECTION "
tZ = "Z "
tM = "M "
tZm = "ZM "
tEmpty = "EMPTY"
)
// ErrBraceMismatch is returned when braces do not match.
var ErrBraceMismatch = errors.New("wkt: brace mismatch")
// Encoder encodes WKT based on specified parameters.
type Encoder struct {
maxDecimalDigits int
}
// NewEncoder returns a new encoder with the given options set.
func NewEncoder(applyOptFns ...EncodeOption) *Encoder {
encoder := &Encoder{
maxDecimalDigits: -1,
}
for _, applyOptFn := range applyOptFns {
applyOptFn(encoder)
}
return encoder
}
// EncodeOptions specify options to apply to the encoder.
type EncodeOption func(*Encoder)
// EncodeWithMaxDecimalDigits sets the maximum decimal digits to encode.
func EncodeOptionWithMaxDecimalDigits(maxDecimalDigits int) EncodeOption { | }
// Marshal translates a geometry to the corresponding WKT.
func Marshal(g geom.T, applyOptFns ...EncodeOption) (string, error) {
return NewEncoder(applyOptFns...).Encode(g)
}
// Unmarshal translates a WKT to the corresponding geometry.
func Unmarshal(wkt string) (geom.T, error) {
return decode(wkt)
} | return func(e *Encoder) {
e.maxDecimalDigits = maxDecimalDigits
} |
rtc.rs | //! Support for the Real Time Clock (RTC) peripheral.
//! For more details, see
//! [ST AN4759](https:/www.st.com%2Fresource%2Fen%2Fapplication_note%2Fdm00226326-using-the-hardware-realtime-clock-rtc-and-the-tamper-management-unit-tamp-with-stm32-microcontrollers-stmicroelectronics.pdf&usg=AOvVaw3PzvL2TfYtwS32fw-Uv37h)
//! Uses [Chrono](https://docs.rs/chrono) for dates and times.
use crate::pac::{EXTI, PWR, RCC, RTC};
use core::convert::TryInto;
use cortex_m::interrupt::free;
use chrono::{Datelike, NaiveDate, NaiveDateTime, NaiveTime, Timelike};
use cfg_if::cfg_if;
// todo: QC use of ICSR vice SR and ISR wherever used in this module!
/// RTC Clock source.
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum RtcClockSource {
/// 01: LSE oscillator clock used as RTC clock
Lse = 0b01,
/// 10: LSI oscillator clock used as RTC clock
Lsi = 0b10,
/// 11: HSE oscillator clock divided by 32 used as RTC clock
Hse = 0b11,
}
/// RTC error type
#[derive(Debug)]
pub enum Error {
/// Invalid input error
InvalidInputData,
}
/// See ref man, section 27.6.3, or AN4769, section 2.4.2.
/// To be used with WakeupPrescaler
#[derive(Clone, Copy, Debug)]
enum WakeupDivision {
Sixteen,
Eight,
Four,
Two,
}
/// See AN4759, table 13.
#[derive(Clone, Copy, Debug)]
enum ClockConfig {
One(WakeupDivision),
Two,
Three,
}
/// Interrupt event
pub enum Event {
WakeupTimer,
AlarmA,
AlarmB,
Timestamp,
}
pub enum Alarm {
AlarmA,
AlarmB,
}
impl From<Alarm> for Event {
fn from(a: Alarm) -> Self {
match a {
Alarm::AlarmA => Event::AlarmA,
Alarm::AlarmB => Event::AlarmB,
}
}
}
/// Represents a Real Time Clock (RTC) peripheral.
pub struct Rtc {
/// RTC Peripheral register definition
regs: RTC,
config: RtcConfig,
}
#[derive(Copy, Clone, Debug, PartialEq)]
/// Configuration data for the RTC.
pub struct RtcConfig {
/// RTC clock source. Defaults to LSI (Low speed internal oscillator)
pub clock_source: RtcClockSource,
/// Asynchronous prescaler factor
/// This is the asynchronous division factor:
/// ck_apre frequency = RTCCLK frequency/(PREDIV_A+1)
/// ck_apre drives the subsecond register. Defaults to 127.
pub async_prescaler: u8,
/// Synchronous prescaler factor
/// This is the synchronous division factor:
/// ck_spre frequency = ck_apre frequency/(PREDIV_S+1)
/// ck_spre must be 1Hz. Defaults to 255.
pub sync_prescaler: u16,
/// Bypass LSE output - eg if you're using a self-powered external oscillator. This
/// saves power, and lets you use the LSE output pin as a GPIO.
pub bypass_lse_output: bool,
}
impl Default for RtcConfig {
/// LSI with prescalers assuming 32.768 kHz.
/// Raw sub-seconds in 1/256.
fn default() -> Self {
RtcConfig {
clock_source: RtcClockSource::Lsi,
async_prescaler: 127,
sync_prescaler: 255,
bypass_lse_output: false,
}
}
}
impl Rtc {
/// Initialize the RTC, including configuration register writes.
pub fn new(regs: RTC, config: RtcConfig) -> Self {
let mut result = Self { regs, config };
// Enable the peripheral clock for communication
// You must enable the `pwren()` bit before making RTC register writes, or they won't stay
// set. Enable the backup interface by setting PWREN
// Note that unlock other RCC enableing processes, there's no corresponding reset
// field here.
// See L4 RM, `Backup domain access` section.
free(|_| {
let rcc = unsafe { &(*RCC::ptr()) };
let mut pwr = unsafe { &(*PWR::ptr()) };
cfg_if! {
if #[cfg(any(feature = "f3", feature = "f4"))] {
rcc.apb1enr.modify(|_, w| w.pwren().set_bit());
pwr.cr.read(); // read to allow the pwr clock to enable
pwr.cr.modify(|_, w| w.dbp().set_bit());
while pwr.cr.read().dbp().bit_is_clear() {}
} else if #[cfg(any(feature = "l4", feature = "l5", feature = "g4", feature = "l412", feature = "wb", feature = "wl"))] {
// 1. Enable the power interface clock by setting the PWREN bits in the Section 6.4.18:
// APB1 peripheral clock enable register 1 (RCC_APB1ENR1)
#[cfg(not(any(feature = "wb", feature = "wl")))]
rcc.apb1enr1.modify(|_, w| {
w.pwren().set_bit();
w.rtcapben().set_bit()
});
#[cfg(any(feature = "wb", feature = "wl"))]
rcc.apb1enr1.modify(|_, w| w.rtcapben().set_bit());
rcc.apb1smenr1.modify(|_, w| w.rtcapbsmen().set_bit()); // In sleep and stop modes.
pwr.cr1.read(); // Read to allow the pwr clock to enable
// 2. Set the DBP bit in the Power control register 1 (PWR_CR1) to enable access to the
// backup domain
pwr.cr1.modify( | _, w| w.dbp().set_bit()); // Unlock the backup domain
while pwr.cr1.read().dbp().bit_is_clear() {}
} else if #[cfg(any(feature = "g0"))] {
rcc.apbenr1.modify(|_, w| {
w.pwren().set_bit();
w.rtcapben().set_bit()
});
rcc.apbsmenr1.modify(|_, w| w.rtcapbsmen().set_bit()); // In sleep and stop modes.
pwr.cr1.read();
pwr.cr1.modify( | _, w| w.dbp().set_bit());
while pwr.cr1.read().dbp().bit_is_clear() {}
} else { // eg h7
rcc.apb4enr.modify(|_, w| w.rtcapben().set_bit());
rcc.apb4lpenr.modify(|_, w| w.rtcapblpen().set_bit()); // In sleep and stop modes.
pwr.cr1.read(); // read to allow the pwr clock to enable
pwr.cr1.modify( | _, w| w.dbp().set_bit());
while pwr.cr1.read().dbp().bit_is_clear() {}
}
}
// Set up the LSI or LSE as required.
match config.clock_source {
RtcClockSource::Lsi => {
cfg_if! {
if #[cfg(feature = "wb")] {
// todo: LSI2?
rcc.csr.modify(|_, w| w.lsi1on().set_bit());
while rcc.csr.read().lsi1rdy().bit_is_clear() {}
} else {
rcc.csr.modify(|_, w| w.lsion().set_bit());
while rcc.csr.read().lsirdy().bit_is_clear() {}
}
}
}
RtcClockSource::Lse => {
// Can only set lsebyp when lse is off, so do this as a separate step.
rcc.bdcr
.modify(|_, w| w.lsebyp().bit(config.bypass_lse_output));
rcc.bdcr.modify(|_, w| w.lseon().set_bit());
while rcc.bdcr.read().lserdy().bit_is_clear() {}
}
_ => (),
}
rcc.bdcr.modify(|_, w| {
// 3. Select the RTC clock source in the Backup domain control register (RCC_BDCR).
unsafe { w.rtcsel().bits(result.config.clock_source as u8) };
// 4. Enable the RTC clock by setting the RTCEN [15] bit in the Backup domain control
// register (RCC_BDCR)
w.rtcen().set_bit()
});
});
result.edit_regs(false, |regs| {
regs.cr.modify(
|_, w| {
unsafe {
w.fmt()
.clear_bit() // 24hr
.osel()
/*
00: Output disabled
01: Alarm A output enabled
10: Alarm B output enabled
11: Wakeup output enabled
*/
.bits(0b00)
.pol()
.clear_bit()
}
}, // pol high
);
regs.prer.modify(|_, w| unsafe {
w.prediv_s().bits(config.sync_prescaler);
w.prediv_a().bits(config.async_prescaler)
});
});
result
}
/// Sets calendar clock to 24 hr format
pub fn set_24h_fmt(&mut self) {
self.edit_regs(true, |regs| regs.cr.modify(|_, w| w.fmt().set_bit()));
}
/// Sets calendar clock to 12 hr format
pub fn set_12h_fmt(&mut self) {
self.edit_regs(true, |regs| regs.cr.modify(|_, w| w.fmt().clear_bit()));
}
/// Reads current hour format selection
pub fn is_24h_fmt(&self) -> bool {
self.regs.cr.read().fmt().bit()
}
// /// Setup the alarm. See AN4759, section 2.3.1.
// /// `sleep_time` is in ms. `Table 8` desribes these steps.
// pub fn set_alarm(&mut self, exti: &mut EXTI) {
// note: STM3241x and 42x have diff addresses, and are PAC incompatible!
// exti.imr1.modify(|_, w| w.mr18().unmasked());
// exti.rtsr1.modify(|_, w| w.tr18().set_bit());
// exti.ftsr1.modify(|_, w| w.tr18().clear_bit());
//
// self.edit_regs(false, |regs| {
// regs.cr.modify(|_, w| w.alrae().clear_bit());
//
// while regs.cr.read().alrae().bit_is_set() {}
//
// // todo: Set the alarm time. This function will be broken until this is accomplished.
// // self.regs.alrmar.modify(|_, w| unsafe {});
//
// regs.cr.modify(|_, w| w.alrae().set_bit());
// while regs.cr.read().alrae().bit_is_clear() {}
// })
// }
/// Helper fn, to do the important bits of setting the interval, with
/// the registers already unlocked.
fn set_wakeup_interval_inner(&mut self, sleep_time: f32) {
// Program the value into the wakeup timer
// Set WUT[15:0] in RTC_WUTR register. For RTC3 the user must also program
// WUTOCLR bits.
// See ref man Section 2.4.2: Maximum and minimum RTC wakeup period.
// todo check ref man register table
// See notes reffed below about WUCKSEL. We choose one of 3 "modes" described in AN4759 based
// on sleep time. If in the overlap area, choose the lower (more precise) mode.
// These all assume a 1hz `ck_spre`.
let lfe_freq = match self.config.clock_source {
RtcClockSource::Lse => 32_768.,
RtcClockSource::Lsi => 40_000.,
RtcClockSource::Hse => 250_000., // Assuming 8Mhz HSE, which may not be the case
};
// sleep_time = (1/lfe_freq) * div * (wutr + 1)
// res = 1/lfe_freq * div
// sleep_time = res * WUTR = 1/lfe_freq * div * (wutr + 1)
// wutr = sleep_time * lfe_freq / div - 1
let clock_cfg;
let wutr;
if sleep_time >= 0.00012207 && sleep_time < 32. {
let division;
let div;
if sleep_time < 4. {
division = WakeupDivision::Two; // Resolution: 61.035µs
div = 2.;
} else if sleep_time < 8. {
division = WakeupDivision::Four; // Resolution: 122.08µs
div = 4.;
} else if sleep_time < 16. {
division = WakeupDivision::Eight; // Resolution: 244.141
div = 8.;
} else {
division = WakeupDivision::Sixteen; // Resolution: 488.281
div = 16.;
}
clock_cfg = ClockConfig::One(division);
wutr = sleep_time * lfe_freq / div - 1.
} else if sleep_time < 65_536. {
// 32s to 18 hours (This mode goes 1s to 18 hours; we use Config1 for the overlap)
clock_cfg = ClockConfig::Two;
wutr = sleep_time; // This works out conveniently!
} else if sleep_time < 131_072. {
// 18 to 36 hours
clock_cfg = ClockConfig::Three;
wutr = sleep_time - 65_537.;
} else {
panic!("Wakeup period must be between 0122.07µs and 36 hours.")
}
self.regs
.wutr
.modify(|_, w| unsafe { w.wut().bits(wutr as u16) });
// Select the desired clock source. Program WUCKSEL[2:0] bits in RTC_CR register.
// See ref man Section 2.4.2: Maximum and minimum RTC wakeup period.
// todo: Check register docs and see what to set here.
// See AN4759, Table 13. RM, 27.3.6
// When ck_spre frequency is 1Hz, this allows to achieve a wakeup time from 1 s to
// around 36 hours with one-second resolution. This large programmable time range is
// divided in 2 parts:
// – from 1s to 18 hours when WUCKSEL [2:1] = 10
// – and from around 18h to 36h when WUCKSEL[2:1] = 11. In this last case 216 is
// added to the 16-bit counter current value.When the initialization sequence is
// complete (see Programming the wakeup timer on page 781), the timer starts
// counting down.When the wakeup function is enabled, the down-counting remains
// active in low-power modes. In addition, when it reaches 0, the WUTF flag is set in
// the RTC_ISR register, and the wakeup counter is automatically reloaded with its
// reload value (RTC_WUTR register value).
let word = match clock_cfg {
ClockConfig::One(division) => match division {
WakeupDivision::Sixteen => 0b000,
WakeupDivision::Eight => 0b001,
WakeupDivision::Four => 0b010,
WakeupDivision::Two => 0b011,
},
// for 2 and 3, what does `x` mean in the docs? Best guess is it doesn't matter.
ClockConfig::Two => 0b100, // eg 1s to 18h.
ClockConfig::Three => 0b110, // eg 18h to 36h
};
// 000: RTC/16 clock is selected
// 001: RTC/8 clock is selected
// 010: RTC/4 clock is selected
// 011: RTC/2 clock is selected
// 10x: ck_spre (usually 1 Hz) clock is selected
// 11x: ck_spre (usually 1 Hz) clock is selected and 216 is added to the WUT counter value
self.regs
.cr
.modify(|_, w| unsafe { w.wucksel().bits(word) });
}
#[cfg(not(feature = "f373"))]
/// Setup periodic auto-wakeup interrupts. See ST AN4759, Table 11, and more broadly,
/// section 2.4.1. See also reference manual, section 27.5.
/// In addition to running this function, set up the interrupt handling function by
/// adding the line `make_rtc_interrupt_handler!(RTC_WKUP);` somewhere in the body
/// of your program.
/// `sleep_time` is in ms.
pub fn set_wakeup(&mut self, sleep_time: f32) {
// Configure and enable the EXTI line corresponding to the Wakeup timer even in
// interrupt mode and select the rising edge sensitivity.
// Sleep time is in seconds. See L4 RM, Table 47 to see that exti line 20 is the RTC wakeup
// timer. This appears to be the case for all families.
// L4 RM, 5.3.11: To wakeup from Stop mode with an RTC wakeup event, it is necessary to:
// • Configure the EXTI Line 20 to be sensitive to rising edge
// • Configure the RTC to generate the RTC alarm
let mut exti = unsafe { &(*EXTI::ptr()) };
cfg_if! {
if #[cfg(any(feature = "f3", feature = "l4"))] {
exti.imr1.modify(|_, w| w.mr20().unmasked());
exti.rtsr1.modify(|_, w| w.tr20().set_bit());
exti.ftsr1.modify(|_, w| w.tr20().clear_bit());
} else if #[cfg(feature = "f4")] {
exti.imr.modify(|_, w| w.mr20().unmasked());
exti.rtsr.modify(|_, w| w.tr20().set_bit());
exti.ftsr.modify(|_, w| w.tr20().clear_bit());
} else if #[cfg(feature = "g4")]{
exti.imr1.modify(|_, w| w.im20().unmasked());
exti.rtsr1.modify(|_, w| w.rt20().set_bit());
exti.ftsr1.modify(|_, w| w.ft20().clear_bit());
} else if #[cfg(any(feature = "l5", feature = "g0", feature = "wb", feature = "wl"))] {
// exti.imr1.modify(|_, w| w.mr20().unmasked());
// exti.rtsr1.modify(|_, w| w.rt20().set_bit());
// exti.ftsr1.modify(|_, w| w.ft20().clear_bit());
} else if #[cfg(any(feature = "h747cm4", feature = "h747cm7"))] {
exti.c1imr1.modify(|_, w| w.mr20().unmasked());
exti.rtsr1.modify(|_, w| w.tr20().set_bit());
exti.ftsr1.modify(|_, w| w.tr20().clear_bit());
} else { // H7
exti.cpuimr1.modify(|_, w| w.mr20().unmasked());
exti.rtsr1.modify(|_, w| w.tr20().set_bit());
exti.ftsr1.modify(|_, w| w.tr20().clear_bit());
}
}
// We can't use the `edit_regs` abstraction here due to being unable to call a method
// in the closure.
self.regs.wpr.write(|w| unsafe { w.bits(0xCA) });
self.regs.wpr.write(|w| unsafe { w.bits(0x53) });
// Disable the wakeup timer. Clear WUTE bit in RTC_CR register
self.regs.cr.modify(|_, w| w.wute().clear_bit());
// Ensure access to Wakeup auto-reload counter and bits WUCKSEL[2:0] is allowed.
// Poll WUTWF until it is set in RTC_ISR (RTC2)/RTC_ICSR (RTC3) (May not be avail on F3)
cfg_if! {
if #[cfg(any(feature = "l5", feature = "g0", feature = "g4", feature = "l412", feature = "wl"))] {
while self.regs.icsr.read().wutwf().bit_is_clear() {}
} else {
while self.regs.isr.read().wutwf().bit_is_clear() {}
}
}
self.set_wakeup_interval_inner(sleep_time);
// Re-enable the wakeup timer. Set WUTE bit in RTC_CR register.
// The wakeup timer restarts counting down.
self.regs.cr.modify(|_, w| w.wute().set_bit());
// Enable the wakeup timer interrupt.
self.regs.cr.modify(|_, w| w.wutie().set_bit());
cfg_if! {
if #[cfg(any(feature = "l412", feature = "l5", feature = "g0", feature = "g4", feature = "l412", feature = "wl"))] {
self.regs.scr.write(|w| w.cwutf().set_bit());
} else {
self.regs.isr.modify(|_, w| w.wutf().clear_bit());
}
}
self.regs.wpr.write(|w| unsafe { w.bits(0xFF) });
}
/// Enable the wakeup timer.
pub fn enable_wakeup(&mut self) {
unsafe {
self.regs.wpr.write(|w| w.bits(0xCA));
self.regs.wpr.write(|w| w.bits(0x53));
self.regs.cr.modify(|_, w| w.wute().set_bit());
self.regs.wpr.write(|w| w.bits(0xFF));
}
}
/// Disable the wakeup timer.
pub fn disable_wakeup(&mut self) {
unsafe {
self.regs.wpr.write(|w| w.bits(0xCA));
self.regs.wpr.write(|w| w.bits(0x53));
self.regs.cr.modify(|_, w| w.wute().clear_bit());
self.regs.wpr.write(|w| w.bits(0xFF));
}
}
/// Change the sleep time for the auto wakeup, after it's been set up.
/// Sleep time is in MS. Major DRY from `set_wakeup`.
pub fn set_wakeup_interval(&mut self, sleep_time: f32) {
// `sleep_time` is in seconds.
// See comments in `set_auto_wakeup` for what these writes do.
// We can't use the `edit_regs` abstraction here due to being unable to call a method
// in the closure.
self.regs.wpr.write(|w| unsafe { w.bits(0xCA) });
self.regs.wpr.write(|w| unsafe { w.bits(0x53) });
let started_enabled = self.regs.cr.read().wute().bit_is_set();
if started_enabled {
self.regs.cr.modify(|_, w| w.wute().clear_bit());
}
cfg_if! {
if #[cfg(any(feature = "l5", feature = "g0", feature = "g4", feature = "l412", feature = "wl"))] {
while self.regs.icsr.read().wutwf().bit_is_clear() {}
} else {
while self.regs.isr.read().wutwf().bit_is_clear() {}
}
}
self.set_wakeup_interval_inner(sleep_time);
if started_enabled {
self.regs.cr.modify(|_, w| w.wute().set_bit());
}
self.regs.wpr.write(|w| unsafe { w.bits(0xFF) });
}
/// Clears the wakeup flag. Must be cleared manually after every RTC wakeup.
/// Alternatively, you could call this in the RTC wakeup interrupt handler.
pub fn clear_wakeup_flag(&mut self) {
self.edit_regs(false, |regs| {
regs.cr.modify(|_, w| w.wute().clear_bit());
cfg_if! {
if #[cfg(any(feature = "l412", feature = "l5", feature = "g0", feature = "g4", feature = "l412", feature = "wl"))] {
regs.scr.write(|w| w.cwutf().set_bit());
} else {
// Note that we clear this by writing 0, which isn't
// the standard convention, eg in other families, and
// other peripherals.
regs.isr.modify(|_, w| w.wutf().clear_bit());
}
}
regs.cr.modify(|_, w| w.wute().set_bit());
});
}
/// this function is used to disable write protection when modifying an RTC register.
/// It also optionally handles the additional step required to set a clock or calendar
/// value.
fn edit_regs<F>(&mut self, init_mode: bool, mut closure: F)
where
F: FnMut(&mut RTC),
{
// Disable write protection
// This is safe, as we're only writin the correct and expected values.
self.regs.wpr.write(|w| unsafe { w.bits(0xCA) });
self.regs.wpr.write(|w| unsafe { w.bits(0x53) });
// todo: L4 has ICSR and ISR regs. Maybe both for backwards compat?
cfg_if! {
if #[cfg(any(feature = "l5", feature = "g0", feature = "g4", feature = "l412", feature = "wl"))] {
// Enter init mode if required. This is generally used to edit the clock or calendar,
// but not for initial enabling steps.
if init_mode && self.regs.icsr.read().initf().bit_is_clear() {
// are we already in init mode?
self.regs.icsr.modify(|_, w| w.init().set_bit());
while self.regs.icsr.read().initf().bit_is_clear() {} // wait to return to init state
}
// Edit the regs specified in the closure, now that they're writable.
closure(&mut self.regs);
if init_mode {
self.regs.icsr.modify(|_, w| w.init().clear_bit()); // Exits init mode
while self.regs.icsr.read().initf().bit_is_set() {}
}
// } else if #[cfg(feature = "wl")] {
// if init_mode && self.regs.isr.read().initf().bit_is_clear() {
// self.regs.icsr.modify(|_, w| w.init().set_bit());
// while self.regs.icsr.read().initf().bit_is_clear() {} // wait to return to init state
// }
// closure(&mut self.regs);
// if init_mode {
// self.regs.icsr.modify(|_, w| w.init().clear_bit()); // Exits init mode
// while self.regs.sr.read().initf().bit_is_set() {}
// }
} else {
if init_mode && self.regs.isr.read().initf().bit_is_clear() {
self.regs.isr.modify(|_, w| w.init().set_bit());
while self.regs.isr.read().initf().bit_is_clear() {} // wait to return to init state
}
closure(&mut self.regs);
if init_mode {
self.regs.isr.modify(|_, w| w.init().clear_bit()); // Exits init mode
while self.regs.isr.read().initf().bit_is_set() {}
}
}
}
// Re-enable write protection.
// This is safe, as the field accepts the full range of 8-bit values.
self.regs.wpr.write(|w| unsafe { w.bits(0xFF) });
}
/// set time using NaiveTime (ISO 8601 time without timezone)
/// Hour format is 24h
pub fn set_time(&mut self, time: &NaiveTime) -> Result<(), Error> {
self.set_24h_fmt();
let (ht, hu) = bcd2_encode(time.hour())?;
let (mnt, mnu) = bcd2_encode(time.minute())?;
let (st, su) = bcd2_encode(time.second())?;
self.edit_regs(true, |regs| {
regs.tr.write(|w| unsafe {
w.ht().bits(ht);
w.hu().bits(hu);
w.mnt().bits(mnt);
w.mnu().bits(mnu);
w.st().bits(st);
w.su().bits(su);
w.pm().clear_bit()
})
});
Ok(())
}
/// Set the seconds component of the RTC's current time.
pub fn set_seconds(&mut self, seconds: u8) -> Result<(), Error> {
if seconds > 59 {
return Err(Error::InvalidInputData);
}
let (st, su) = bcd2_encode(seconds as u32)?;
self.edit_regs(true, |regs| {
regs.tr
.modify(|_, w| unsafe { w.st().bits(st).su().bits(su) })
});
Ok(())
}
/// Set the minutes component of the RTC's current time.
pub fn set_minutes(&mut self, minutes: u8) -> Result<(), Error> {
if minutes > 59 {
return Err(Error::InvalidInputData);
}
let (mnt, mnu) = bcd2_encode(minutes as u32)?;
self.edit_regs(true, |regs| {
regs.tr
.modify(|_, w| unsafe { w.mnt().bits(mnt).mnu().bits(mnu) })
});
Ok(())
}
/// Set the hours component of the RTC's current time.
pub fn set_hours(&mut self, hours: u8) -> Result<(), Error> {
let (ht, hu) = bcd2_encode(hours as u32)?;
self.edit_regs(true, |regs| {
regs.tr
.modify(|_, w| unsafe { w.ht().bits(ht).hu().bits(hu) })
});
Ok(())
}
/// Set the weekday component of the RTC's current date.
pub fn set_weekday(&mut self, weekday: u8) -> Result<(), Error> {
if !(1..=7).contains(&weekday) {
return Err(Error::InvalidInputData);
}
self.edit_regs(true, |regs| {
regs.dr.modify(|_, w| unsafe { w.wdu().bits(weekday) })
});
Ok(())
}
/// Set the day component of the RTC's current date.
pub fn set_day(&mut self, day: u8) -> Result<(), Error> {
if !(1..=31).contains(&day) {
return Err(Error::InvalidInputData);
}
let (dt, du) = bcd2_encode(day as u32)?;
self.edit_regs(true, |regs| {
regs.dr
.modify(unsafe { |_, w| w.dt().bits(dt).du().bits(du) })
});
Ok(())
}
/// Set the month component of the RTC's current date.
pub fn set_month(&mut self, month: u8) -> Result<(), Error> {
if !(1..=12).contains(&month) {
return Err(Error::InvalidInputData);
}
let (mt, mu) = bcd2_encode(month as u32)?;
self.edit_regs(true, |regs| {
regs.dr
.modify(|_, w| unsafe { w.mt().bit(mt > 0).mu().bits(mu) })
});
Ok(())
}
/// Set the year component of the RTC's current date.
pub fn set_year(&mut self, year: u16) -> Result<(), Error> {
if !(1970..=2038).contains(&year) {
// todo: Is this right?
return Err(Error::InvalidInputData);
}
let (yt, yu) = bcd2_encode(year as u32 - 2_000)?;
// todo RTC is 2000 based ? Not sure best way to handle this.
self.edit_regs(true, |regs| {
regs.dr
.modify(|_, w| unsafe { w.yt().bits(yt).yu().bits(yu) })
});
Ok(())
}
/// Set the date using NaiveDate (ISO 8601 calendar date without timezone).
/// WeekDay is set using the `set_weekday` method
pub fn set_date(&mut self, date: &NaiveDate) -> Result<(), Error> {
if date.year() < 1970 {
// todo: Is this right?
return Err(Error::InvalidInputData);
}
let (yt, yu) = bcd2_encode((date.year() - 2_000) as u32)?;
let (mt, mu) = bcd2_encode(date.month())?;
let (dt, du) = bcd2_encode(date.day())?;
self.edit_regs(true, |regs| {
regs.dr.write(|w| unsafe {
w.dt().bits(dt);
w.du().bits(du);
w.mt().bit(mt > 0);
w.mu().bits(mu);
w.yt().bits(yt);
w.yu().bits(yu)
})
});
Ok(())
}
/// Set the current datetime.
pub fn set_datetime(&mut self, date: &NaiveDateTime) -> Result<(), Error> {
if date.year() < 1970 {
// todo is this right?
return Err(Error::InvalidInputData);
}
self.set_24h_fmt();
let (yt, yu) = bcd2_encode((date.year() - 2_000) as u32)?;
let (mt, mu) = bcd2_encode(date.month())?;
let (dt, du) = bcd2_encode(date.day())?;
let (ht, hu) = bcd2_encode(date.hour())?;
let (mnt, mnu) = bcd2_encode(date.minute())?;
let (st, su) = bcd2_encode(date.second())?;
self.edit_regs(true, |regs| {
regs.dr.write(|w| unsafe {
w.dt().bits(dt);
w.du().bits(du);
w.mt().bit(mt > 0);
w.mu().bits(mu);
w.yt().bits(yt); | });
self.edit_regs(true, |regs| {
regs.tr.write(|w| unsafe {
w.ht().bits(ht);
w.hu().bits(hu);
w.mnt().bits(mnt);
w.mnu().bits(mnu);
w.st().bits(st);
w.su().bits(su);
w.pm().clear_bit()
})
});
Ok(())
}
/// Get the seconds component of the current time.
pub fn get_seconds(&mut self) -> u8 {
let tr = self.regs.tr.read();
bcd2_decode(tr.st().bits(), tr.su().bits()) as u8
}
/// Get the minutes component of the current time.
pub fn get_minutes(&mut self) -> u8 {
let tr = self.regs.tr.read();
bcd2_decode(tr.mnt().bits(), tr.mnu().bits()) as u8
}
/// Get the hours component of the current time.
pub fn get_hours(&mut self) -> u8 {
let tr = self.regs.tr.read();
bcd2_decode(tr.ht().bits(), tr.hu().bits()) as u8
}
/// Get the current time.
pub fn get_time(&mut self) -> NaiveTime {
NaiveTime::from_hms(
self.get_hours().into(),
self.get_minutes().into(),
self.get_seconds().into(),
)
}
/// Get the weekday component of the current date.
pub fn get_weekday(&mut self) -> u8 {
let dr = self.regs.dr.read();
bcd2_decode(dr.wdu().bits(), 0x00) as u8
}
/// Get the day component of the current date.
pub fn get_day(&mut self) -> u8 {
let dr = self.regs.dr.read();
bcd2_decode(dr.dt().bits(), dr.du().bits()) as u8
}
/// Get the month component of the current date.
pub fn get_month(&mut self) -> u8 {
let dr = self.regs.dr.read();
let mt: u8 = if dr.mt().bit() { 1 } else { 0 };
bcd2_decode(mt, dr.mu().bits()) as u8
}
/// Get the year component of the current date.
pub fn get_year(&mut self) -> u16 {
let dr = self.regs.dr.read();
bcd2_decode(dr.yt().bits(), dr.yu().bits()) as u16
}
/// Get the current date.
pub fn get_date(&mut self) -> NaiveDate {
NaiveDate::from_ymd(
self.get_year().into(),
self.get_month().into(),
self.get_day().into(),
)
}
/// Get the current datetime.
pub fn get_datetime(&mut self) -> NaiveDateTime {
NaiveDate::from_ymd(
self.get_year().into(),
self.get_month().into(),
self.get_day().into(),
)
.and_hms(
self.get_hours().into(),
self.get_minutes().into(),
self.get_seconds().into(),
)
}
}
// Two 32-bit registers (RTC_TR and RTC_DR) contain the seconds, minutes, hours (12- or 24-hour format), day (day
// of week), date (day of month), month, and year, expressed in binary coded decimal format
// (BCD). The sub-seconds value is also available in binary format.
//
// The following helper functions encode into BCD format from integer and
// decode to an integer from a BCD value respectively.
fn bcd2_encode(word: u32) -> Result<(u8, u8), Error> {
let l = match (word / 10).try_into() {
Ok(v) => v,
Err(_) => {
return Err(Error::InvalidInputData);
}
};
let r = match (word % 10).try_into() {
Ok(v) => v,
Err(_) => {
return Err(Error::InvalidInputData);
}
};
Ok((l, r))
}
fn bcd2_decode(fst: u8, snd: u8) -> u32 {
(fst * 10 + snd).into()
} | w.yu().bits(yu)
}) |
StateHistory.tsx | import React, { FC } from 'react';
import { uniqueId } from 'lodash';
import { AlertState, dateTimeFormat, GrafanaTheme } from '@grafana/data';
import { Alert, LoadingPlaceholder, useStyles } from '@grafana/ui';
import { css } from '@emotion/css';
import { StateHistoryItem, StateHistoryItemData } from 'app/types/unified-alerting';
import { DynamicTable, DynamicTableColumnProps, DynamicTableItemProps } from '../DynamicTable';
import { AlertStateTag } from './AlertStateTag';
import { useManagedAlertStateHistory } from '../../hooks/useManagedAlertStateHistory';
import { AlertLabel } from '../AlertLabel';
import { GrafanaAlertState, PromAlertingRuleState } from 'app/types/unified-alerting-dto';
type StateHistoryRowItem = {
id: string;
state: PromAlertingRuleState | GrafanaAlertState | AlertState;
text?: string;
data?: StateHistoryItemData;
timestamp?: number;
};
type StateHistoryRow = DynamicTableItemProps<StateHistoryRowItem>;
interface RuleStateHistoryProps {
alertId: string;
}
const StateHistory: FC<RuleStateHistoryProps> = ({ alertId }) => {
const { loading, error, result = [] } = useManagedAlertStateHistory(alertId);
if (loading && !error) {
return <LoadingPlaceholder text={'Loading history...'} />;
}
if (error && !loading) {
return <Alert title={'Failed to fetch alert state history'}>{error.message}</Alert>;
}
const columns: Array<DynamicTableColumnProps<StateHistoryRowItem>> = [
{ id: 'state', label: 'State', size: 'max-content', renderCell: renderStateCell },
{ id: 'value', label: '', size: 'auto', renderCell: renderValueCell },
{ id: 'timestamp', label: 'Time', size: 'max-content', renderCell: renderTimestampCell },
];
| acc.push({
id: String(item.id),
state: item.newState,
text: item.text,
data: item.data,
timestamp: item.updated,
});
// if the preceding state is not the same, create a separate state entry – this likely means the state was reset
if (!hasMatchingPrecedingState(index, result)) {
acc.push({ id: uniqueId(), state: item.prevState });
}
return acc;
}, [])
.map((historyItem) => ({
id: historyItem.id,
data: historyItem,
}));
return <DynamicTable cols={columns} items={items} />;
};
function renderValueCell(item: StateHistoryRow) {
const matches = item.data.data?.evalMatches ?? [];
return (
<>
{item.data.text}
<LabelsWrapper>
{matches.map((match) => (
<AlertLabel key={match.metric} labelKey={match.metric} value={String(match.value)} />
))}
</LabelsWrapper>
</>
);
}
function renderStateCell(item: StateHistoryRow) {
return <AlertStateTag state={item.data.state} />;
}
function renderTimestampCell(item: StateHistoryRow) {
return (
<div className={TimestampStyle}>{item.data.timestamp && <span>{dateTimeFormat(item.data.timestamp)}</span>}</div>
);
}
const LabelsWrapper: FC<{}> = ({ children }) => {
const { wrapper } = useStyles(getStyles);
return <div className={wrapper}>{children}</div>;
};
const TimestampStyle = css`
display: flex;
align-items: flex-end;
flex-direction: column;
`;
const getStyles = (theme: GrafanaTheme) => ({
wrapper: css`
& > * {
margin-right: ${theme.spacing.xs};
}
`,
});
// this function will figure out if a given historyItem has a preceding historyItem where the states match - in other words
// the newState of the previous historyItem is the same as the prevState of the current historyItem
function hasMatchingPrecedingState(index: number, items: StateHistoryItem[]): boolean {
const currentHistoryItem = items[index];
const previousHistoryItem = items[index + 1];
if (!previousHistoryItem) {
return false;
}
return previousHistoryItem.newState === currentHistoryItem.prevState;
}
export { StateHistory }; | const items: StateHistoryRow[] = result
.reduce((acc: StateHistoryRowItem[], item, index) => { |
fz.go | /*
Copyright © 2021 Michael Bruskov <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/spf13/cobra"
)
// fzCmd represents the fz command
var fzCmd = &cobra.Command{
Use: "fz",
Short: "Management of forwarding zones",
}
func init() { | rootCmd.AddCommand(fzCmd)
} |
|
child-grid-row.component.ts | import {
ChangeDetectionStrategy,
ChangeDetectorRef,
Component,
ElementRef,
HostBinding,
Input,
OnInit,
ViewChild,
AfterViewInit,
SimpleChanges,
ComponentFactoryResolver
} from '@angular/core';
import { GridBaseAPIService } from '.././api.service';
import { IgxRowIslandComponent } from './row-island.component';
import { IgxGridComponent } from '../grid/grid.component';
import { takeUntil } from 'rxjs/operators';
@Component({
changeDetection: ChangeDetectionStrategy.OnPush,
preserveWhitespaces: false,
selector: 'igx-child-grid-row',
templateUrl: './child-grid-row.component.html'
})
export class | implements AfterViewInit, OnInit {
private resolver;
/**
* Returns whether the row is expanded.
* ```typescript
* const RowExpanded = this.grid1.rowList.first.expanded;
* ```
*/
public expanded = false;
@Input()
layout: IgxRowIslandComponent;
/**
* @hidden
*/
public get parentHasScroll() {
return !this.parentGrid.verticalScrollContainer.dc.instance.notVirtual;
}
/**
* @hidden
*/
@Input()
public parentGridID: string;
/**
* The data passed to the row component.
*
* ```typescript
* // get the row data for the first selected row
* let selectedRowData = this.grid.selectedRows[0].rowData;
* ```
*/
@Input()
public rowData: any = [];
/**
* The index of the row.
*
* ```typescript
* // get the index of the second selected row
* let selectedRowIndex = this.grid.selectedRows[1].index;
* ```
*/
@Input()
public index: number;
@ViewChild('hgrid', { static: true })
private hGrid: any/* TODO: IgxHierarchicalGridComponent*/;
/**
* @hidden
*/
@HostBinding('attr.tabindex')
public tabindex = 0;
/**
* @hidden
*/
@HostBinding('attr.role')
public role = 'row';
/**
* Get a reference to the grid that contains the selected row.
*
* ```typescript
* handleRowSelection(event) {
* // the grid on which the onRowSelectionChange event was triggered
* const grid = event.row.grid;
* }
* ```
*
* ```html
* <igx-grid
* [data]="data"
* (onRowSelectionChange)="handleRowSelection($event)">
* </igx-grid>
* ```
*/
// TODO: Refactor
get parentGrid(): any/* TODO: IgxHierarchicalGridComponent*/ {
return this.gridAPI.grid;
}
@HostBinding('attr.data-level')
get level() {
return this.layout.level;
}
/**
* The native DOM element representing the row. Could be null in certain environments.
*
* ```typescript
* // get the nativeElement of the second selected row
* let selectedRowNativeElement = this.grid.selectedRows[1].nativeElement;
* ```
*/
get nativeElement() {
return this.element.nativeElement;
}
constructor(public gridAPI: GridBaseAPIService<any/* TODO: IgxHierarchicalGridComponent*/>,
public element: ElementRef,
resolver: ComponentFactoryResolver,
public cdr: ChangeDetectorRef) {
this.resolver = resolver;
}
/**
* @hidden
*/
ngOnInit() {
this.layout.onLayoutChange.subscribe((ch) => {
this._handleLayoutChanges(ch);
});
const changes = this.layout.initialChanges;
changes.forEach(change => {
this._handleLayoutChanges(change);
});
this.hGrid.parent = this.parentGrid;
this.hGrid.parentIsland = this.layout;
this.hGrid.childRow = this;
// handler logic that re-emits hgrid events on the row island
this.setupEventEmitters();
this.layout.onGridCreated.emit({
owner: this.layout,
parentID: this.rowData.rowID,
grid: this.hGrid
});
}
/**
* @hidden
*/
ngAfterViewInit() {
this.hGrid.childLayoutList = this.layout.children;
if (this.layout.childColumns.length > 0 && !this.hGrid.autoGenerate) {
this.hGrid.createColumnsList(this.layout.childColumns.toArray());
}
const layouts = this.hGrid.childLayoutList.toArray();
layouts.forEach((l) => this.hGrid.hgridAPI.registerChildRowIsland(l));
this.parentGrid.hgridAPI.registerChildGrid(this.rowData.rowID, this.layout.key, this.hGrid);
this.layout.rowIslandAPI.registerChildGrid(this.rowData.rowID, this.hGrid);
this.hGrid.cdr.detectChanges();
}
private setupEventEmitters() {
const destructor = takeUntil(this.hGrid.destroy$);
const factory = this.resolver.resolveComponentFactory(IgxGridComponent);
// exclude outputs related to two-way binding functionality
const inputNames = factory.inputs.map(input => input.propName);
const outputs = factory.outputs.filter(o => {
const matchingInputPropName = o.propName.slice(0, o.propName.indexOf('Change'));
return inputNames.indexOf(matchingInputPropName) === -1;
});
outputs.forEach(output => {
if (this.hGrid[output.propName]) {
this.hGrid[output.propName].pipe(destructor).subscribe((args) => {
if (!args) {
args = {};
}
args.owner = this.hGrid;
this.layout[output.propName].emit(args);
});
}
});
}
private _handleLayoutChanges(changes: SimpleChanges) {
for (const change in changes) {
if (changes.hasOwnProperty(change)) {
this.hGrid[change] = changes[change].currentValue;
}
}
}
}
| IgxChildGridRowComponent |
show_sp_res_response.py | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowSpResResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'used_accounts_count': 'int'
}
attribute_map = {
'used_accounts_count': 'usedAccountsCount'
}
def __init__(self, used_accounts_count=None):
"""ShowSpResResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._used_accounts_count = None
self.discriminator = None
if used_accounts_count is not None:
self.used_accounts_count = used_accounts_count
@property
def used_accounts_count(self):
"""Gets the used_accounts_count of this ShowSpResResponse.
已用的企业并发数
:return: The used_accounts_count of this ShowSpResResponse.
:rtype: int
"""
return self._used_accounts_count
@used_accounts_count.setter
def used_accounts_count(self, used_accounts_count):
"""Sets the used_accounts_count of this ShowSpResResponse.
已用的企业并发数
:param used_accounts_count: The used_accounts_count of this ShowSpResResponse.
:type: int
"""
self._used_accounts_count = used_accounts_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict() | result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowSpResResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | elif isinstance(value, dict): |
event_listener_handlers.py | import sys
sys.path.append('/opt')
from common.logger import get_logger
from common.utils import handle_exception_with_slack_notification
from common.exception_handler import exception_handler
from event_pubsub.config import NETWORK_ID, SLACK_HOOK
from event_pubsub.listeners.event_listeners import MPEEventListener, RFAIEventListener, RegistryEventListener, \
TokenStakeEventListener, AirdropEventListener, OccamAirdropEventListener, ConverterAGIXEventListener, \
ConverterNTXEventListener
logger = get_logger(__name__)
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def registry_event_listener_handler(event, context):
RegistryEventListener().listen_and_publish_registry_events()
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def mpe_event_listener_handler(event, context):
MPEEventListener().listen_and_publish_mpe_events()
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def rfai_event_listener_handler(event, context):
RFAIEventListener().listen_and_publish_rfai_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def token_stake_event_listener_handler(event, context):
TokenStakeEventListener().listen_and_publish_token_stake_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def airdrop_event_listener_handler(event, context):
AirdropEventListener().listen_and_publish_airdrop_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def occam_airdrop_event_listener_handler(event, context):
OccamAirdropEventListener().listen_and_publish_occam_airdrop_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def | (event, context):
ConverterAGIXEventListener().listen_and_publish_converter_agix_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def converter_ntx_event_listener_handler(event, context):
ConverterNTXEventListener().listen_and_publish_converter_ntx_events() | converter_agix_event_listener_handler |
lib.rs | use error::*;
#[macro_use]
mod error;
pub mod client;
mod http;
mod protocol; | mod shared;
mod util; | pub mod server; |
01_operations_on_a_graph.py | # Operations on a Computational Graph
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Create graph
sess = tf.Session()
# Create tensors
# Create data to feed in
x_vals = np.array([1., 3., 5., 7., 9.]) | prod = tf.mul(x_data, m)
for x_val in x_vals:
print(sess.run(prod, feed_dict={x_data: x_val}))
merged = tf.merge_all_summaries()
if not os.path.exists('tensorboard_logs/'):
os.makedirs('tensorboard_logs/')
my_writer = tf.train.SummaryWriter('tensorboard_logs/', sess.graph) | x_data = tf.placeholder(tf.float32)
m = tf.constant(3.)
# Multiplication |
cervical-cancer-screening-summary-resource.service.ts | import { map } from 'rxjs/operators';
import { Injectable } from '@angular/core';
import { AppSettingsService } from '../app-settings/app-settings.service';
import { Observable } from 'rxjs';
import { HttpClient, HttpParams } from '@angular/common/http';
@Injectable()
export class | {
constructor(
protected http: HttpClient,
protected appSettingsService: AppSettingsService
) {}
public getUrl(): string {
return this.appSettingsService.getEtlRestbaseurl().trim();
}
public getCervicalCancerScreeningSummary(
patientUuid: string
): Observable<any> {
let url = this.getUrl();
url += 'patient-cervical-cancer-screening-summary';
const params: HttpParams = new HttpParams().set(
'uuid',
patientUuid as string
);
return this.http
.get<any>(url, {
params: params
})
.pipe(
map((response) => {
return response.result;
})
);
}
}
| CervicalCancerScreeningSummaResourceService |
nedb-error.model.ts | export class NeDBError { | name: string = '';
stack: string = '';
errorType: string = '';
key: string = '';
} | message: string = ''; |
avatar.js | const Command = require("../../base/Command.js");
const Discord = require("discord.js")
const axios = require('axios')
class Avatar extends Command {
constructor(client) {
super(client, {
name: "avatar",
usage: ".avatar [@user]",
category: "Global",
description: "Belirttiğiniz kullanıcının avatarını görüntülersiniz.", | }
//
async run(message, args, data) {
//if(!message.member.permissions.has("VIEW_AUDIT_LOG")) return
let user = message.mentions.users.first() || this.client.users.cache.get(args[0]) || (args[0] && args[0].length ? this.client.users.cache.find(x => x.username.match(new RegExp(args.join(" "), "mgi"))) : null) || null;
if (!user)
try { user = await this.client.users.fetch(args[0]); }
catch (err) { user = message.author; }
const row = new Discord.MessageActionRow()
.addComponents(
new Discord.MessageSelectMenu()
.setCustomId('banner')
.setPlaceholder('Kullanıcının bannerini görüntülemek için tıkla!')
.addOptions([
{
label: 'Banner',
description: 'Kullanıcının bannerini görüntüleyin.',
value: 'banner',
}
]),
);
/*let embed = new Discord.MessageEmbed()
.setDescription(`**${user.tag}** adlı kullanıcının profil fotoğrafı!`)
.setImage(user.displayAvatarURL({dynamic: true, size: 2048}))
.setFooter({ text: `${message.author.tag} tarafından istendi!` })
*/
const avatar = `${user.displayAvatarURL({ dynamic: true, size: 4096 })}`
let msg = await message.channel.send({ content: avatar, components: [row] })
var filter = (menu) => menu.user.id === message.author.id;
const collector = msg.createMessageComponentCollector({ filter, max: 1, time: 30000 })
collector.on("collect", async (menu) => {
if(menu.values[0] === "banner") {
async function bannerURL(user, client) {
const response = await axios.get(`https://discord.com/api/v9/users/${user}`, { headers: { 'Authorization': `Bot ${client.token}` } });
if(!response.data.banner) return "Kullanıcının banneri bulunmamakta!"
if(response.data.banner.startsWith('a_')) return `https://cdn.discordapp.com/banners/${response.data.id}/${response.data.banner}.gif?size=512`
else return(`https://cdn.discordapp.com/banners/${response.data.id}/${response.data.banner}.png?size=512`)
}
let bannerurl = await bannerURL(user.id,this.client)
menu.reply({content: `> ${Discord.Formatters.hyperlink(`${user.tag}`, `${bannerurl}`, "Resimi büyütmek için tıkla")}`, ephemeral: true})
}
})
}
}
module.exports = Avatar; | aliases: ["av", "pp"]
}); |
configurations.py | import os
VERSION = '1.0.1'
SIEM_NAME = 'SentinelAddon'
XDR_HOSTS = {
'us': 'https://api.xdr.trendmicro.com',
'eu': 'https://api.eu.xdr.trendmicro.com',
'in': 'https://api.in.xdr.trendmicro.com',
'jp': 'https://api.xdr.trendmicro.co.jp',
'sg': 'https://api.sg.xdr.trendmicro.com',
'au': 'https://api.au.xdr.trendmicro.com',
'uae': 'https://api.uae.xdr.trendmicro.com/',
}
def get_workspace_id():
return os.environ['workspaceId']
def get_workspace_key():
return os.environ['workspaceKey']
def get_api_tokens():
is_key_vault_enabled = (
os.getenv('keyVaultUrl')
and os.getenv('keyVaultIdentityClientId')
and os.getenv('clpIds')
)
if is_key_vault_enabled:
# get tokens from key vault
from azure.keyvault.secrets import SecretClient
from azure.identity import DefaultAzureCredential
clp_ids = list(filter(None, os.getenv('clpIds').split(',')))
credential = DefaultAzureCredential(
managed_identity_client_id=os.getenv('keyVaultIdentityClientId')
)
client = SecretClient(vault_url=os.getenv('keyVaultUrl'), credential=credential)
return [client.get_secret(get_secret_name(clp_id)).value for clp_id in clp_ids]
else:
return list(filter(None, os.environ.get('apiTokens', '').split(',')))
def get_xdr_host_url():
xdr_host_url = os.environ.get('xdrHostUrl')
return xdr_host_url or XDR_HOSTS[os.environ['regionCode']]
def get_storage_connection_string():
return os.environ['AzureWebJobsStorage']
def get_max_workbench_query_minutes():
return int(os.environ.get('maxWorkbenchQueryMinutes', 60))
def get_default_workbench_query_minutes():
return int(os.environ.get('defaultWorkbenchQueryMinutes', 5))
def get_max_oat_query_minutes():
return int(os.environ.get('maxOatQueryMinutes', 30)) |
def get_oat_query_time_buffer_minutes():
return int(os.environ.get('defaultOatQueryTimeBufferMinutes', 15))
def get_datetime_format():
return '%Y-%m-%dT%H:%M:%S.000Z'
def get_wb_log_type():
return 'TrendMicro_XDR_WORKBENCH'
def get_health_check_log_type():
return 'TrendMicro_XDR_Health_Check'
def get_oat_health_check_log_type():
return 'TrendMicro_XDR_OAT_Health_Check'
def get_rca_log_type():
return 'TrendMicro_XDR_RCA_Result'
def get_rca_task_log_type():
return 'TrendMicro_XDR_RCA_Task'
def get_oat_log_type():
return 'TrendMicro_XDR_OAT'
def get_user_agent():
return f'TMXDR{SIEM_NAME}/{VERSION}'
def get_secret_name(clp_id):
return f'tmv1-entity-{clp_id}' |
def get_default_oat_query_minutes():
return int(os.environ.get('defaultOatQueryMinutes', 5)) |
mocks.go | package cltest
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/smartcontractkit/chainlink/core/cmd"
"github.com/smartcontractkit/chainlink/core/logger"
"github.com/smartcontractkit/chainlink/core/services/chainlink"
"github.com/smartcontractkit/chainlink/core/services/keystore"
"github.com/smartcontractkit/chainlink/core/store"
"github.com/smartcontractkit/chainlink/core/store/config"
"github.com/smartcontractkit/chainlink/core/store/models"
"github.com/smartcontractkit/chainlink/core/web"
gethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/robfig/cron/v3"
"github.com/stretchr/testify/assert"
)
// MockSubscription a mock subscription
type MockSubscription struct {
mut sync.Mutex
channel interface{}
unsubscribed bool
Errors chan error
}
// EmptyMockSubscription return empty MockSubscription
func EmptyMockSubscription() *MockSubscription {
return &MockSubscription{Errors: make(chan error, 1), channel: make(chan struct{})}
}
// Err returns error channel from mes
func (mes *MockSubscription) Err() <-chan error { return mes.Errors }
// Unsubscribe closes the subscription
func (mes *MockSubscription) Unsubscribe() {
mes.mut.Lock()
defer mes.mut.Unlock()
if mes.unsubscribed {
return
}
mes.unsubscribed = true
switch mes.channel.(type) {
case chan struct{}:
close(mes.channel.(chan struct{}))
case chan gethTypes.Log:
close(mes.channel.(chan gethTypes.Log))
case chan *models.Head:
close(mes.channel.(chan *models.Head))
default:
logger.Fatal(fmt.Sprintf("Unable to close MockSubscription channel of type %T", mes.channel))
}
close(mes.Errors)
}
// InstantClock an InstantClock
type InstantClock struct{}
// Now returns the current local time
func (InstantClock) Now() time.Time {
return time.Now()
}
// After return channel of time
func (InstantClock) After(_ time.Duration) <-chan time.Time {
c := make(chan time.Time, 100)
c <- time.Now()
return c
}
// TriggerClock implements the AfterNower interface, but must be manually triggered
// to resume computation on After.
type TriggerClock struct {
triggers chan time.Time
t testing.TB
}
// NewTriggerClock returns a new TriggerClock, that a test can manually fire
// to continue processing in a Clock dependency.
func NewTriggerClock(t testing.TB) *TriggerClock {
return &TriggerClock{
triggers: make(chan time.Time),
t: t,
}
}
// Trigger sends a time to unblock the After call.
func (t *TriggerClock) Trigger() {
select {
case t.triggers <- time.Now():
case <-time.After(60 * time.Second):
t.t.Error("timed out while trying to trigger clock")
}
}
// TriggerWithoutTimeout is a special case where we know the trigger might
// block but don't care
func (t *TriggerClock) TriggerWithoutTimeout() {
t.triggers <- time.Now()
}
// Now returns the current local time
func (t TriggerClock) Now() time.Time {
return time.Now()
}
// After waits on a manual trigger.
func (t *TriggerClock) After(_ time.Duration) <-chan time.Time {
return t.triggers
}
// RendererMock a mock renderer
type RendererMock struct {
Renders []interface{}
}
// Render appends values to renderer mock
func (rm *RendererMock) Render(v interface{}, headers ...string) error {
rm.Renders = append(rm.Renders, v)
return nil
}
// InstanceAppFactory is an InstanceAppFactory
type InstanceAppFactory struct {
App chainlink.Application
}
// NewApplication creates a new application with specified config
func (f InstanceAppFactory) NewApplication(config config.EVMConfig) (chainlink.Application, error) {
return f.App, nil
}
type seededAppFactory struct {
Application chainlink.Application
}
func (s seededAppFactory) NewApplication(config config.EVMConfig) (chainlink.Application, error) {
return noopStopApplication{s.Application}, nil
}
type noopStopApplication struct {
chainlink.Application
}
// FIXME: Why bother with this wrapper?
func (a noopStopApplication) Stop() error {
return nil
}
// CallbackAuthenticator contains a call back authenticator method
type CallbackAuthenticator struct {
Callback func(*keystore.Eth, string) (string, error)
}
// Authenticate authenticates store and pwd with the callback authenticator
func (a CallbackAuthenticator) AuthenticateEthKey(ethKeyStore *keystore.Eth, pwd string) (string, error) {
return a.Callback(ethKeyStore, pwd)
}
func (a CallbackAuthenticator) AuthenticateVRFKey(vrfKeyStore *keystore.VRF, pwd string) error {
return nil
}
func (a CallbackAuthenticator) AuthenticateOCRKey(*keystore.OCR, string) error {
return nil
}
func (a CallbackAuthenticator) AuthenticateCSAKey(*keystore.CSA, string) error {
return nil
}
var _ cmd.KeyStoreAuthenticator = CallbackAuthenticator{}
// BlockedRunner is a Runner that blocks until its channel is posted to
type BlockedRunner struct {
Done chan struct{}
}
// Run runs the blocked runner, doesn't return until the channel is signalled
func (r BlockedRunner) Run(app chainlink.Application) error {
<-r.Done
return nil
}
// EmptyRunner is an EmptyRunner
type EmptyRunner struct{}
// Run runs the empty runner
func (r EmptyRunner) Run(app chainlink.Application) error {
return nil
}
// MockCountingPrompter is a mock counting prompt
type MockCountingPrompter struct {
T *testing.T
EnteredStrings []string
Count int
NotTerminal bool
}
// Prompt returns an entered string
func (p *MockCountingPrompter) Prompt(string) string {
i := p.Count
p.Count++
if len(p.EnteredStrings)-1 < i {
p.T.Errorf("Not enough passwords supplied to MockCountingPrompter, wanted %d", i)
p.T.FailNow()
}
return p.EnteredStrings[i]
}
// PasswordPrompt returns an entered string
func (p *MockCountingPrompter) PasswordPrompt(string) string {
i := p.Count
p.Count++
if len(p.EnteredStrings)-1 < i {
p.T.Errorf("Not enough passwords supplied to MockCountingPrompter, wanted %d", i)
p.T.FailNow()
}
return p.EnteredStrings[i]
}
// IsTerminal always returns true in tests
func (p *MockCountingPrompter) IsTerminal() bool {
return !p.NotTerminal
}
// NewHTTPMockServer create http test server with passed in parameters
func NewHTTPMockServer(
t *testing.T,
status int,
wantMethod string,
response string,
callback ...func(http.Header, string),
) (*httptest.Server, func()) {
called := false
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadAll(r.Body)
assert.NoError(t, err)
assert.Equal(t, wantMethod, r.Method)
if len(callback) > 0 {
callback[0](r.Header, string(b))
}
called = true
w.WriteHeader(status)
_, _ = io.WriteString(w, response) // Assignment for errcheck. Only used in tests so we can ignore.
})
server := httptest.NewServer(handler)
return server, func() {
server.Close()
assert.True(t, called, "expected call Mock HTTP endpoint '%s'", server.URL)
}
}
// NewHTTPMockServerWithRequest creates http test server that makes the request
// available in the callback
func NewHTTPMockServerWithRequest(
t *testing.T,
status int,
response string,
callback func(r *http.Request),
) (*httptest.Server, func()) {
called := false
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
callback(r)
called = true
w.WriteHeader(status)
_, _ = io.WriteString(w, response) // Assignment for errcheck. Only used in tests so we can ignore.
})
server := httptest.NewServer(handler)
return server, func() {
server.Close()
assert.True(t, called, "expected call Mock HTTP endpoint '%s'", server.URL)
}
}
func NewHTTPMockServerWithAlterableResponse(
t *testing.T, response func() string) (server *httptest.Server) {
server = httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = io.WriteString(w, response())
}))
return server
}
func NewHTTPMockServerWithAlterableResponseAndRequest(t *testing.T, response func() string, callback func(r *http.Request)) (server *httptest.Server) {
server = httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
callback(r)
w.WriteHeader(http.StatusOK)
_, _ = io.WriteString(w, response())
}))
return server
}
// MockCron represents a mock cron
type MockCron struct {
Entries []MockCronEntry
nextID cron.EntryID
}
// NewMockCron returns a new mock cron
func NewMockCron() *MockCron {
return &MockCron{}
}
// Start starts the mockcron
func (*MockCron) Start() {}
// Stop stops the mockcron
func (*MockCron) Stop() context.Context {
ctx, cancel := context.WithCancel(context.Background())
cancel()
return ctx
}
// AddFunc appends a schedule to mockcron entries
func (mc *MockCron) AddFunc(schd string, fn func()) (cron.EntryID, error) {
mc.Entries = append(mc.Entries, MockCronEntry{
Schedule: schd,
Function: fn,
})
mc.nextID++
return mc.nextID, nil
}
// RunEntries run every function for each mockcron entry
func (mc *MockCron) RunEntries() {
for _, entry := range mc.Entries {
entry.Function()
}
}
// MockCronEntry a cron schedule and function
type MockCronEntry struct {
Schedule string
Function func()
}
// MockHeadTrackable allows you to mock HeadTrackable
type MockHeadTrackable struct {
onNewHeadCount int32
}
// OnNewLongestChain increases the OnNewLongestChainCount count by one
func (m *MockHeadTrackable) OnNewLongestChain(context.Context, models.Head) {
atomic.AddInt32(&m.onNewHeadCount, 1)
}
// OnNewLongestChainCount returns the count of new heads, safely.
func (m *MockHeadTrackable) OnNewLongestChainCount() int32 {
return atomic.LoadInt32(&m.onNewHeadCount)
}
// NeverSleeper is a struct that never sleeps
type NeverSleeper struct{}
// Reset resets the never sleeper
func (ns NeverSleeper) Reset() {}
// Sleep puts the never sleeper to sleep
func (ns NeverSleeper) Sleep() {}
// After returns a duration
func (ns NeverSleeper) After() time.Duration { return 0 * time.Microsecond }
// Duration returns a duration
func (ns NeverSleeper) Duration() time.Duration { return 0 * time.Microsecond }
func MustRandomUser() models.User {
email := fmt.Sprintf("user-%[email protected]", NewRandomInt64())
r, err := models.NewUser(email, Password)
if err != nil {
logger.Panic(err)
}
return r
}
func | (t *testing.T, email, password string) models.User {
r, err := models.NewUser(email, password)
if err != nil {
t.Fatal(err)
}
return r
}
type MockAPIInitializer struct {
Count int
}
func (m *MockAPIInitializer) Initialize(store *store.Store) (models.User, error) {
if user, err := store.FindUser(); err == nil {
return user, err
}
m.Count++
user := MustRandomUser()
return user, store.SaveUser(&user)
}
func NewMockAuthenticatedHTTPClient(cfg cmd.HTTPClientConfig, sessionID string) cmd.HTTPClient {
return cmd.NewAuthenticatedHTTPClient(cfg, MockCookieAuthenticator{SessionID: sessionID}, models.SessionRequest{})
}
type MockCookieAuthenticator struct {
SessionID string
Error error
}
func (m MockCookieAuthenticator) Cookie() (*http.Cookie, error) {
return MustGenerateSessionCookie(m.SessionID), m.Error
}
func (m MockCookieAuthenticator) Authenticate(models.SessionRequest) (*http.Cookie, error) {
return MustGenerateSessionCookie(m.SessionID), m.Error
}
type MockSessionRequestBuilder struct {
Count int
Error error
}
func (m *MockSessionRequestBuilder) Build(string) (models.SessionRequest, error) {
m.Count++
if m.Error != nil {
return models.SessionRequest{}, m.Error
}
return models.SessionRequest{Email: APIEmail, Password: Password}, nil
}
type MockSecretGenerator struct{}
func (m MockSecretGenerator) Generate(string) ([]byte, error) {
return []byte(SessionSecret), nil
}
type MockChangePasswordPrompter struct {
web.UpdatePasswordRequest
err error
}
func (m MockChangePasswordPrompter) Prompt() (web.UpdatePasswordRequest, error) {
return m.UpdatePasswordRequest, m.err
}
type MockPasswordPrompter struct {
Password string
}
func (m MockPasswordPrompter) Prompt() string {
return m.Password
}
| MustNewUser |
forms.py | from django import forms
from django.forms import ModelForm, modelformset_factory
from .models import (
AcademicSession,
AcademicTerm,
SiteConfig,
StudentClass,
Subject,
)
SiteConfigForm = modelformset_factory(
SiteConfig,
fields=(
"key",
"value",
),
extra=0,
)
class AcademicSessionForm(ModelForm):
prefix = "Academic Session"
class Meta:
model = AcademicSession
fields = ["name", "current"]
class AcademicTermForm(ModelForm):
|
class SubjectForm(ModelForm):
prefix = "Subject"
class Meta:
model = Subject
fields = ["name"]
class StudentClassForm(ModelForm):
prefix = "Class"
class Meta:
model = StudentClass
fields = ["name"]
class CurrentSessionForm(forms.Form):
current_session = forms.ModelChoiceField(
queryset=AcademicSession.objects.all(),
help_text='Click <a href="/session/create/?next=current-session/">here</a> to add new session',
)
current_term = forms.ModelChoiceField(
queryset=AcademicTerm.objects.all(),
help_text='Click <a href="/term/create/?next=current-session/">here</a> to add new term',
)
| prefix = "Academic Term"
class Meta:
model = AcademicTerm
fields = ["name", "current"] |
CirclePlus.js | });
exports.default = void 0;
var _CirclePlus = _interopRequireDefault(require('./lib/icons/CirclePlus'));
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
var _default = _CirclePlus;
exports.default = _default;
module.exports = _default; | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true |
|
rule.go | package rule
import (
"fmt"
"sync"
"time"
"k8s.io/klog/v2"
"github.com/kubeedge/beehive/pkg/core/model"
routerv1 "github.com/kubeedge/kubeedge/cloud/pkg/apis/rules/v1"
"github.com/kubeedge/kubeedge/cloud/pkg/common/modules"
"github.com/kubeedge/kubeedge/cloud/pkg/router/listener"
"github.com/kubeedge/kubeedge/cloud/pkg/router/provider"
)
var (
rules sync.Map
ruleEndpoints sync.Map
)
func init() {
registerListener()
}
func registerListener() {
endpointKey := fmt.Sprintf("%s/%s", modules.EdgeControllerModuleName, model.ResourceTypeRuleEndpoint)
listener.MessageHandlerInstance.AddListener(endpointKey, handleRuleEndpoint)
ruleKey := fmt.Sprintf("%s/%s", modules.EdgeControllerModuleName, model.ResourceTypeRule)
listener.MessageHandlerInstance.AddListener(ruleKey, handleRule)
}
// implement listener.Handle
func handleRuleEndpoint(data interface{}) (interface{}, error) {
message, ok := data.(*model.Message)
if !ok {
klog.Warningf("object type: %T unsupported", data)
return nil, fmt.Errorf("data type is %T", data)
}
ruleEndpoint, ok := message.Content.(*routerv1.RuleEndpoint)
if !ok {
klog.Warningf("object type: %T unsupported", message.Content)
return nil, fmt.Errorf("message content type should be ruleEndpoint type. operation: %s, resource: %s",
message.GetOperation(), message.GetResource())
}
switch message.GetOperation() {
case model.InsertOperation:
addRuleEndpoint(ruleEndpoint)
case model.DeleteOperation:
deleteRuleEndpoint(ruleEndpoint.Namespace, ruleEndpoint.Name)
default:
klog.Warningf("invalid message operation.")
}
return nil, nil
}
// implement listener.Handle
func handleRule(data interface{}) (interface{}, error) {
message, ok := data.(*model.Message)
if !ok {
klog.Warningf("object type: %T unsupported", data)
return nil, fmt.Errorf("data type is %T", data)
}
rule, ok := message.Content.(*routerv1.Rule)
if !ok {
klog.Warningf("object type: %T unsupported", message.Content)
return nil, fmt.Errorf("message content type should be rule type. operation: %s, resource: %s",
message.GetOperation(), message.GetResource())
}
switch message.GetOperation() {
case model.InsertOperation:
addRuleWithRetry(rule)
case model.DeleteOperation:
delRule(rule.Namespace, rule.Name)
default:
klog.Warningf("invalid message operation.")
}
return nil, nil
}
func addRuleEndpoint(ruleEndpoint *routerv1.RuleEndpoint) {
key := getKey(ruleEndpoint.Namespace, ruleEndpoint.Name)
ruleEndpoints.Store(key, ruleEndpoint)
klog.Infof("add ruleendpoint %s success.", key)
}
func deleteRuleEndpoint(namespace, name string) {
key := getKey(namespace, name)
ruleEndpoints.Delete(key)
klog.Infof("delete ruleendpoint %s success.", key)
}
// AddRule add rule
func addRule(rule *routerv1.Rule) error {
source, err := getSourceOfRule(rule)
if err != nil {
klog.Error(err)
return err
}
target, err := getTargetOfRule(rule)
if err != nil {
klog.Error(err)
return err
}
ruleKey := getKey(rule.Namespace, rule.Name)
if err := source.RegisterListener(func(data interface{}) (interface{}, error) {
//TODO Use goroutine pool later
var execResult ExecResult
resp, err := source.Forward(target, data)
if err != nil {
// rule.Status.Fail++
// record error info for rule
errMsg := ErrorMsg{Detail: err.Error(), Timestamp: time.Now()}
execResult = ExecResult{RuleID: rule.Name, ProjectID: rule.Namespace, Status: "FAIL", Error: errMsg}
} else {
execResult = ExecResult{RuleID: rule.Name, ProjectID: rule.Namespace, Status: "SUCCESS"}
}
ResultChannel <- execResult
return resp, nil
}); err != nil {
klog.Errorf("add rule %s failed, err: %v", ruleKey, err)
errMsg := ErrorMsg{Detail: err.Error(), Timestamp: time.Now()}
execResult := ExecResult{RuleID: rule.Name, ProjectID: rule.Namespace, Status: "FAIL", Error: errMsg}
ResultChannel <- execResult
return nil
}
rules.Store(ruleKey, rule)
klog.Infof("add rule success: %+v", rule)
return nil
}
// DelRule delete rule by rule id
func delRule(namespace, name string) {
ruleKey := getKey(namespace, name)
v, exist := rules.Load(ruleKey)
if !exist { |
source, err := getSourceOfRule(rule)
if err != nil {
klog.Error(err)
return
}
source.UnregisterListener()
rules.Delete(ruleKey)
klog.Infof("delete rule success: %s", ruleKey)
}
func getSourceOfRule(rule *routerv1.Rule) (provider.Source, error) {
sourceKey := getKey(rule.Namespace, rule.Spec.Source)
v, exist := ruleEndpoints.Load(sourceKey)
if !exist {
return nil, fmt.Errorf("source rule endpoint %s does not existing", sourceKey)
}
sourceEp := v.(*routerv1.RuleEndpoint)
sf, exist := provider.GetSourceFactory(sourceEp.Spec.RuleEndpointType)
if !exist {
return nil, fmt.Errorf("source definition %s does not existing", sourceEp.Spec.RuleEndpointType)
}
source := sf.GetSource(sourceEp, rule.Spec.SourceResource)
if source == nil {
return nil, fmt.Errorf("can't get source: %s", rule.Spec.Source)
}
return source, nil
}
func getTargetOfRule(rule *routerv1.Rule) (provider.Target, error) {
targetKey := getKey(rule.Namespace, rule.Spec.Target)
v, exist := ruleEndpoints.Load(targetKey)
if !exist {
return nil, fmt.Errorf("target rule endpoint %s does not existing", targetKey)
}
targetEp := v.(*routerv1.RuleEndpoint)
tf, exist := provider.GetTargetFactory(targetEp.Spec.RuleEndpointType)
if !exist {
return nil, fmt.Errorf("target definition %s does not existing", targetEp.Spec.RuleEndpointType)
}
target := tf.GetTarget(targetEp, rule.Spec.TargetResource)
if target == nil {
return nil, fmt.Errorf("can't get target: %s", rule.Spec.Target)
}
return target, nil
}
func getKey(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func addRuleWithRetry(rule *routerv1.Rule) {
retry, waitTime := 3, 5
for i := 0; i <= retry; i++ {
if err := addRule(rule); err == nil {
break
}
klog.Errorf("add rule fail, wait to retry. retry time: %d", i+1)
time.Sleep(time.Duration(waitTime*(i+1)) * time.Second)
}
} | klog.Warningf("rule %s does not exist", ruleKey)
return
}
rule := v.(*routerv1.Rule) |
consolecapture.py | from typing import Any
import sys
import time
import os
import tempfile
class Logger2():
def __init__(self, file1: Any, file2: Any):
self.file1 = file1
self.file2 = file2
def write(self, data: str) -> None:
self.file1.write(data)
self.file2.write(data)
def | (self) -> None:
self.file1.flush()
self.file2.flush()
class ConsoleCapture():
def __init__(self):
self._console_out = ''
self._tmp_fname = None
self._file_handle = None
self._time_start = None
self._time_stop = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
def start_capturing(self) -> None:
self._tmp_fname = tempfile.mktemp(suffix='.txt')
self._file_handle = open(self._tmp_fname, 'w')
sys.stdout = Logger2(self._file_handle, self._original_stdout)
sys.stderr = Logger2(self._file_handle, self._original_stderr)
self._time_start = time.time()
def stop_capturing(self) -> None:
assert self._tmp_fname is not None
self._time_stop = time.time()
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._file_handle.close()
with open(self._tmp_fname, 'r') as f:
self._console_out = f.read()
os.unlink(self._tmp_fname)
def addToConsoleOut(self, txt: str) -> None:
self._file_handle.write(txt)
def runtimeInfo(self) -> dict:
assert self._time_start is not None
return dict(
start_time=self._time_start - 0,
end_time=self._time_stop - 0,
elapsed_sec=self._time_stop - self._time_start
)
def consoleOut(self) -> str:
return self._console_out
| flush |
wire.go | package commands
import (
amino "my-tendermint/go-amino"
cryptoAmino "my-tendermint/tendermint/crypto/encoding/amino"
) | cryptoAmino.RegisterAmino(cdc)
} |
var cdc = amino.NewCodec()
func init() { |
karma.conf.js | const tmp = require('tmp');
tmp.setGracefulCleanup();
const webpack = require('webpack');
module.exports = function(config) {
config.set({
frameworks: ['mocha', 'chai'],
files: [
'test/**/*.spec.ts'
],
mime: { 'text/x-typescript': ['ts'] },
webpack: {
mode: 'development',
devtool: 'source-map',
resolve: {
extensions: ['.ts', '.js'],
alias: {
// Here we put stubs for non-browser modules that are used by tests, not core code.
// Core code stubs are set in pkgJson.browser.
"http-proxy-agent$": require.resolve('./test/empty-stub.js'),
"https-proxy-agent$": require.resolve('./test/empty-stub.js'),
"request-promise-native$": require.resolve('./test/empty-stub.js'),
"fs-extra$": require.resolve('./test/empty-stub.js'),
"portfinder$": require.resolve('./test/empty-stub.js')
}
},
module: {
rules: [
{ test: /\.ts$/, loader: 'ts-loader', exclude: /node_modules/ }
]
},
node: {
fs: 'empty', // Only required because brotli includes an unused 'fs' dependency | plugins: [
new webpack.DefinePlugin({
"process.version": '"' + process.version + '"'
})
],
output: {
path: tmp.dirSync()
}
},
webpackMiddleware: {
stats: 'error-only'
},
preprocessors: {
'src/**/*.ts': ['webpack', 'sourcemap'],
'test/**/*.ts': ['webpack', 'sourcemap']
},
reporters: ['progress'],
port: 9876,
logLevel: config.LOG_INFO,
browsers: ['ChromeHeadlessWithCert'],
customLaunchers: {
ChromeHeadlessWithCert: {
base: 'ChromeHeadless',
// This is the fingerprint for the test-ca.pem CA cert
flags: ['--ignore-certificate-errors-spki-list=dV1LxiEDeQEtLjeMCGZ4ON7Mu1TvULkgt/kg1DGk/vM=']
},
// Used for debugging (npm run test:browser:debug)
ChromeWithCert: {
base: 'Chrome',
// This is the fingerprint for the test-ca.pem CA cert
flags: ['--ignore-certificate-errors-spki-list=dV1LxiEDeQEtLjeMCGZ4ON7Mu1TvULkgt/kg1DGk/vM=']
}
},
autoWatch: false,
singleRun: true,
concurrency: Infinity
});
}; | __dirname: true
}, |
psftuneraliases.py | #!/usr/bin/env python
__doc__ = '''Merge lookup and feature aliases into TypeTuner feature file'''
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2019 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'Bob Hallissy'
from silfont.core import execute
from xml.etree import ElementTree as ET
from fontTools import ttLib
import csv
import struct
argspec = [
('input', {'help': 'Input TypeTuner feature file'}, {'type': 'infile'}),
('output', {'help': 'Output TypeTuner feature file'}, {}),
('-m','--mapping', {'help': 'Input csv mapping file'}, {'type': 'incsv'}),
('-f','--ttf', {'help': 'Compiled TTF file'}, {}),
('-l','--log',{'help': 'Optional log file'}, {'type': 'outfile', 'def': '_tuneraliases.log', 'optlog': True}),
]
def doit(args) :
logger = args.logger
if args.mapping is None and args.ttf is None:
logger.log("One or both of -m and -f must be provided", "S")
featdoc = ET.parse(args.input)
root = featdoc.getroot()
if root.tag != 'all_features':
logger.log("Invalid TypeTuner feature file: missing root element", "S")
# Whitespace to add after each new alias:
tail = '\n\t\t'
# Find or add alliaes element
aliases = root.find('aliases')
if aliases is None:
aliases = ET.SubElement(root,'aliases')
aliases.tail = '\n'
added = set()
duplicates = set()
def setalias(name, value):
# detect duplicate names in input
if name in added:
duplicates.add(name)
else:
added.add(name)
# modify existing or add new alias
alias = aliases.find('alias[@name="{}"]'.format(name))
if alias is None:
alias = ET.SubElement(aliases, 'alias', {'name': name, 'value': value})
alias.tail = tail
else:
alias.set('value', value)
# Process mapping file if present:
if args.mapping:
# Mapping file is assumed to come from psfbuildfea, and should look like:
# lookupname,table,index
# e.g. DigitAlternates,GSUB,51
for (name,table,value) in args.mapping:
setalias(name, value)
# Process the ttf file if present
if args.ttf:
# Generate aliases for features.
# In this code featureID means the key used in FontUtils for finding the feature, e.g., "calt _2"
def dotable(t): # Common routine for GPOS and GSUB
currtag = None
currtagindex = None
flist = [] # list, in order, of (featureTag, featureID), per Font::TTF
for i in range(0,t.FeatureList.FeatureCount):
newtag = str(t.FeatureList.FeatureRecord[i].FeatureTag)
if currtag is None or currtag != newtag:
flist.append((newtag, newtag))
currtag = newtag
currtagindex = 0
else:
flist.append( (currtag, '{} _{}'.format(currtag, currtagindex)))
currtagindex += 1
fslList = {} # dictionary keyed by feature_script_lang values returning featureID
for s in t.ScriptList.ScriptRecord:
currtag = str(s.ScriptTag)
# At present only looking at the dflt lang entries
for findex in s.Script.DefaultLangSys.FeatureIndex:
fslList['{}_{}_dflt'.format(flist[findex][0],currtag)] = flist[findex][1]
# Now that we have them all, add them in sorted order.
for name, value in sorted(fslList.items()):
setalias(name,value)
# Open the TTF for processing
try:
f = ttLib.TTFont(args.ttf)
except Exception as e:
logger.log("Couldn't open font '{}' for reading : {}".format(args.ttf, str(e)),"S")
# Grab features from GSUB and GPOS
for tag in ('GSUB', 'GPOS'):
try:
dotable(f[tag].table)
except Exception as e:
logger.log("Failed to process {} table: {}".format(tag, str(e)), "W")
# Grab features from Graphite:
try:
for tag in sorted(f['Feat'].features.keys()):
if tag == '1':
continue
name = 'gr_' + tag
value = str(struct.unpack('>L', tag.encode())[0])
setalias(name,value)
except Exception as e:
logger.log("Failed to process Feat table: {}".format(str(e)), "W")
if len(duplicates):
logger.log("The following aliases defined more than once in input: {}".format(", ".join(sorted(duplicates))), "S")
# Success. Write the result
featdoc.write(args.output, encoding='UTF-8', xml_declaration=True)
def | () : execute(None,doit,argspec)
if __name__ == "__main__": cmd()
| cmd |
model_update_route_request.go | /*
* 3DS OUTSCALE API
*
* Welcome to the OUTSCALE API documentation.<br /> The OUTSCALE API enables you to manage your resources in the OUTSCALE Cloud. This documentation describes the different actions available along with code examples.<br /><br /> You can learn more about errors returned by the API in the dedicated [errors page](api/errors).<br /><br /> Note that the OUTSCALE Cloud is compatible with Amazon Web Services (AWS) APIs, but there are [differences in resource names](https://docs.outscale.com/en/userguide/OUTSCALE-APIs-Reference.html) between AWS and the OUTSCALE API.<br /> You can also manage your resources using the [Cockpit](https://docs.outscale.com/en/userguide/About-Cockpit.html) web interface.
*
* API version: 1.20
* Contact: [email protected]
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package osc
import (
"encoding/json"
)
// UpdateRouteRequest struct for UpdateRouteRequest
type UpdateRouteRequest struct {
// The IP range used for the destination match, in CIDR notation (for example, `10.0.0.0/24`).
DestinationIpRange string `json:"DestinationIpRange"`
// If true, checks whether you have the required permissions to perform the action.
DryRun *bool `json:"DryRun,omitempty"`
// The ID of an Internet service or virtual gateway attached to your Net.
GatewayId *string `json:"GatewayId,omitempty"`
// The ID of a NAT service.
NatServiceId *string `json:"NatServiceId,omitempty"`
// The ID of a Net peering connection.
NetPeeringId *string `json:"NetPeeringId,omitempty"`
// The ID of a network interface card (NIC).
NicId *string `json:"NicId,omitempty"`
// The ID of the route table.
RouteTableId string `json:"RouteTableId"`
// The ID of a NAT VM in your Net.
VmId *string `json:"VmId,omitempty"`
}
// NewUpdateRouteRequest instantiates a new UpdateRouteRequest object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewUpdateRouteRequest(destinationIpRange string, routeTableId string) *UpdateRouteRequest {
this := UpdateRouteRequest{}
this.DestinationIpRange = destinationIpRange
this.RouteTableId = routeTableId
return &this
}
// NewUpdateRouteRequestWithDefaults instantiates a new UpdateRouteRequest object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewUpdateRouteRequestWithDefaults() *UpdateRouteRequest {
this := UpdateRouteRequest{}
return &this
}
// GetDestinationIpRange returns the DestinationIpRange field value
func (o *UpdateRouteRequest) GetDestinationIpRange() string {
if o == nil {
var ret string
return ret
}
return o.DestinationIpRange
}
// GetDestinationIpRangeOk returns a tuple with the DestinationIpRange field value
// and a boolean to check if the value has been set.
func (o *UpdateRouteRequest) GetDestinationIpRangeOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.DestinationIpRange, true
}
// SetDestinationIpRange sets field value
func (o *UpdateRouteRequest) SetDestinationIpRange(v string) {
o.DestinationIpRange = v
}
// GetDryRun returns the DryRun field value if set, zero value otherwise.
func (o *UpdateRouteRequest) GetDryRun() bool {
if o == nil || o.DryRun == nil {
var ret bool
return ret
}
return *o.DryRun
}
// GetDryRunOk returns a tuple with the DryRun field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UpdateRouteRequest) GetDryRunOk() (*bool, bool) {
if o == nil || o.DryRun == nil {
return nil, false
}
return o.DryRun, true
}
// HasDryRun returns a boolean if a field has been set.
func (o *UpdateRouteRequest) HasDryRun() bool {
if o != nil && o.DryRun != nil |
return false
}
// SetDryRun gets a reference to the given bool and assigns it to the DryRun field.
func (o *UpdateRouteRequest) SetDryRun(v bool) {
o.DryRun = &v
}
// GetGatewayId returns the GatewayId field value if set, zero value otherwise.
func (o *UpdateRouteRequest) GetGatewayId() string {
if o == nil || o.GatewayId == nil {
var ret string
return ret
}
return *o.GatewayId
}
// GetGatewayIdOk returns a tuple with the GatewayId field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UpdateRouteRequest) GetGatewayIdOk() (*string, bool) {
if o == nil || o.GatewayId == nil {
return nil, false
}
return o.GatewayId, true
}
// HasGatewayId returns a boolean if a field has been set.
func (o *UpdateRouteRequest) HasGatewayId() bool {
if o != nil && o.GatewayId != nil {
return true
}
return false
}
// SetGatewayId gets a reference to the given string and assigns it to the GatewayId field.
func (o *UpdateRouteRequest) SetGatewayId(v string) {
o.GatewayId = &v
}
// GetNatServiceId returns the NatServiceId field value if set, zero value otherwise.
func (o *UpdateRouteRequest) GetNatServiceId() string {
if o == nil || o.NatServiceId == nil {
var ret string
return ret
}
return *o.NatServiceId
}
// GetNatServiceIdOk returns a tuple with the NatServiceId field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UpdateRouteRequest) GetNatServiceIdOk() (*string, bool) {
if o == nil || o.NatServiceId == nil {
return nil, false
}
return o.NatServiceId, true
}
// HasNatServiceId returns a boolean if a field has been set.
func (o *UpdateRouteRequest) HasNatServiceId() bool {
if o != nil && o.NatServiceId != nil {
return true
}
return false
}
// SetNatServiceId gets a reference to the given string and assigns it to the NatServiceId field.
func (o *UpdateRouteRequest) SetNatServiceId(v string) {
o.NatServiceId = &v
}
// GetNetPeeringId returns the NetPeeringId field value if set, zero value otherwise.
func (o *UpdateRouteRequest) GetNetPeeringId() string {
if o == nil || o.NetPeeringId == nil {
var ret string
return ret
}
return *o.NetPeeringId
}
// GetNetPeeringIdOk returns a tuple with the NetPeeringId field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UpdateRouteRequest) GetNetPeeringIdOk() (*string, bool) {
if o == nil || o.NetPeeringId == nil {
return nil, false
}
return o.NetPeeringId, true
}
// HasNetPeeringId returns a boolean if a field has been set.
func (o *UpdateRouteRequest) HasNetPeeringId() bool {
if o != nil && o.NetPeeringId != nil {
return true
}
return false
}
// SetNetPeeringId gets a reference to the given string and assigns it to the NetPeeringId field.
func (o *UpdateRouteRequest) SetNetPeeringId(v string) {
o.NetPeeringId = &v
}
// GetNicId returns the NicId field value if set, zero value otherwise.
func (o *UpdateRouteRequest) GetNicId() string {
if o == nil || o.NicId == nil {
var ret string
return ret
}
return *o.NicId
}
// GetNicIdOk returns a tuple with the NicId field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UpdateRouteRequest) GetNicIdOk() (*string, bool) {
if o == nil || o.NicId == nil {
return nil, false
}
return o.NicId, true
}
// HasNicId returns a boolean if a field has been set.
func (o *UpdateRouteRequest) HasNicId() bool {
if o != nil && o.NicId != nil {
return true
}
return false
}
// SetNicId gets a reference to the given string and assigns it to the NicId field.
func (o *UpdateRouteRequest) SetNicId(v string) {
o.NicId = &v
}
// GetRouteTableId returns the RouteTableId field value
func (o *UpdateRouteRequest) GetRouteTableId() string {
if o == nil {
var ret string
return ret
}
return o.RouteTableId
}
// GetRouteTableIdOk returns a tuple with the RouteTableId field value
// and a boolean to check if the value has been set.
func (o *UpdateRouteRequest) GetRouteTableIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.RouteTableId, true
}
// SetRouteTableId sets field value
func (o *UpdateRouteRequest) SetRouteTableId(v string) {
o.RouteTableId = v
}
// GetVmId returns the VmId field value if set, zero value otherwise.
func (o *UpdateRouteRequest) GetVmId() string {
if o == nil || o.VmId == nil {
var ret string
return ret
}
return *o.VmId
}
// GetVmIdOk returns a tuple with the VmId field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UpdateRouteRequest) GetVmIdOk() (*string, bool) {
if o == nil || o.VmId == nil {
return nil, false
}
return o.VmId, true
}
// HasVmId returns a boolean if a field has been set.
func (o *UpdateRouteRequest) HasVmId() bool {
if o != nil && o.VmId != nil {
return true
}
return false
}
// SetVmId gets a reference to the given string and assigns it to the VmId field.
func (o *UpdateRouteRequest) SetVmId(v string) {
o.VmId = &v
}
func (o UpdateRouteRequest) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["DestinationIpRange"] = o.DestinationIpRange
}
if o.DryRun != nil {
toSerialize["DryRun"] = o.DryRun
}
if o.GatewayId != nil {
toSerialize["GatewayId"] = o.GatewayId
}
if o.NatServiceId != nil {
toSerialize["NatServiceId"] = o.NatServiceId
}
if o.NetPeeringId != nil {
toSerialize["NetPeeringId"] = o.NetPeeringId
}
if o.NicId != nil {
toSerialize["NicId"] = o.NicId
}
if true {
toSerialize["RouteTableId"] = o.RouteTableId
}
if o.VmId != nil {
toSerialize["VmId"] = o.VmId
}
return json.Marshal(toSerialize)
}
type NullableUpdateRouteRequest struct {
value *UpdateRouteRequest
isSet bool
}
func (v NullableUpdateRouteRequest) Get() *UpdateRouteRequest {
return v.value
}
func (v *NullableUpdateRouteRequest) Set(val *UpdateRouteRequest) {
v.value = val
v.isSet = true
}
func (v NullableUpdateRouteRequest) IsSet() bool {
return v.isSet
}
func (v *NullableUpdateRouteRequest) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableUpdateRouteRequest(val *UpdateRouteRequest) *NullableUpdateRouteRequest {
return &NullableUpdateRouteRequest{value: val, isSet: true}
}
func (v NullableUpdateRouteRequest) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableUpdateRouteRequest) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
| {
return true
} |
hexahue_map.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import yaml
class HexahueMap():
def __init__(self, space_color):
| pink = (255, 0, 255)
red = (255, 0, 0)
green = (0, 255, 0)
yellow = (255, 255, 0)
blue = (0, 0, 255)
sky = (0, 255, 255)
white = (255, 255, 255)
gray = (128, 128, 128)
black = (0, 0, 0)
self.hmap = {}
self.hmap[(pink, red, green, yellow, blue, sky)] = 'A'
self.hmap[(red, pink, green, yellow, blue, sky)] = 'B'
self.hmap[(red, green, pink, yellow, blue, sky)] = 'C'
self.hmap[(red, green, yellow, pink, blue, sky)] = 'D'
self.hmap[(red, green, yellow, blue, pink, sky)] = 'E'
self.hmap[(red, green, yellow, blue, sky, pink)] = 'F'
self.hmap[(green, red, yellow, blue, sky, pink)] = 'G'
self.hmap[(green, yellow, red, blue, sky, pink)] = 'H'
self.hmap[(green, yellow, blue, red, sky, pink)] = 'I'
self.hmap[(green, yellow, blue, sky, red, pink)] = 'J'
self.hmap[(green, yellow, blue, sky, pink, red)] = 'K'
self.hmap[(yellow, green, blue, sky, pink, red)] = 'L'
self.hmap[(yellow, blue, green, sky, pink, red)] = 'M'
self.hmap[(yellow, blue, sky, green, pink, red)] = 'N'
self.hmap[(yellow, blue, sky, pink, green, red)] = 'O'
self.hmap[(yellow, blue, sky, pink, red, green)] = 'P'
self.hmap[(blue, yellow, sky, pink, red, green)] = 'Q'
self.hmap[(blue, sky, yellow, pink, red, green)] = 'R'
self.hmap[(blue, sky, pink, yellow, red, green)] = 'S'
self.hmap[(blue, sky, pink, red, yellow, green)] = 'T'
self.hmap[(blue, sky, pink, red, green, yellow)] = 'U'
self.hmap[(sky, blue, pink, red, green, yellow)] = 'V'
self.hmap[(sky, pink, blue, red, green, yellow)] = 'W'
self.hmap[(sky, pink, red, blue, green, yellow)] = 'X'
self.hmap[(sky, pink, red, green, blue, yellow)] = 'Y'
self.hmap[(sky, pink, red, green, yellow, blue)] = 'Z'
self.hmap[(black, white, white, black, black, white)] = '.'
self.hmap[(white, black, black, white, white, black)] = ','
if space_color == 'black':
self.hmap[(black, black, black, black, black, black)] = ' '
elif space_color == 'white':
self.hmap[(white, white, white, white, white, white)] = ' '
elif space_color == 'all':
self.hmap[(black, black, black, black, black, black)] = ' '
self.hmap[(white, white, white, white, white, white)] = ' '
else:
raise Exception('[Error] invalid space setting: ' + space_color)
self.hmap[(black, gray, white, black, gray, white)] = '0'
self.hmap[(gray, black, white, black, gray, white)] = '1'
self.hmap[(gray, white, black, black, gray, white)] = '2'
self.hmap[(gray, white, black, gray, black, white)] = '3'
self.hmap[(gray, white, black, gray, white, black)] = '4'
self.hmap[(white, gray, black, gray, white, black)] = '5'
self.hmap[(white, black, gray, gray, white, black)] = '6'
self.hmap[(white, black, gray, white, gray, black)] = '7'
self.hmap[(white, black, gray, white, black, gray)] = '8'
self.hmap[(black, white, gray, white, black, gray)] = '9' |
|
multitrait.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct S {
y: int
}
impl Cmp, ToStr for S { //~ ERROR: expected `{` but found `,`
fn eq(&&other: S) |
fn to_str(&self) -> String { "hi".to_string() }
}
| { false } |
non.py | from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import math
import scipy.stats as stats
def inter_p_value(p_value):
# interpretation
if p_value >= 0 and p_value < 0.01:
inter_p = 'Overwhelming Evidence'
elif p_value >= 0.01 and p_value < 0.05:
inter_p = 'Strong Evidence'
elif p_value >= 0.05 and p_value < 0.1:
inter_p = 'Weak Evidence'
elif p_value >= .1:
inter_p = 'No Evidence'
return inter_p
def grank(data):
if type(data) == np.ndarray or type(data) == list:
alldata = data.copy()
data = data.copy()
else:
alldata = data.values.copy()
data = data.values.copy()
alldata.sort()
tmp_df = pd.DataFrame({'value': alldata})
tmp_df['rank'] = tmp_df.index + 1
value_to_rank = tmp_df.groupby('value').mean().reset_index()
samp = pd.DataFrame({'value': data})
samp = pd.merge(samp, value_to_rank, how='left')
return samp['rank']
def ranksum_z_test(df=None, to_compute='', alternative=None, precision=4, alpha=0.05):
"""
df can only have two columns and df.shape[0] > 10
alternative has three options: 'two-sided', 'less', 'greater'
"""
# sort all data points by values
tmp_values = df.values.reshape(-1)
tmp_values = tmp_values[~np.isnan(tmp_values)]
tmp_values.sort()
# assign ranks
updated_df = pd.DataFrame({'value': tmp_values})
updated_df['rank'] = updated_df.index + 1
# average rank for identical value
updated_df = updated_df.groupby('value').mean().reset_index()
# display(updated_df)
# Compute Sum of Ranks
samp1 = pd.DataFrame({'value': df[to_compute].dropna().values})
samp1 = pd.merge(samp1, updated_df)
T = samp1['rank'].sum()
# compute mean and standard deviation
n1 = df.iloc[:, 0].dropna().shape[0]
n2 = df.iloc[:, 1].dropna().shape[0]
E_T = n1*(n1+n2+1)/2
sigmaT = (n1*n2*(n1+n2+1)/12) ** 0.5
z = (T-E_T)/sigmaT
# compute p-value
# right (greater)
p_value = 1 - stats.norm.cdf(z)
if alternative == 'greater':
pass
elif alternative == 'less':
p_value = stats.norm.cdf(z)
elif alternative == 'two-sided':
# two-tail
if p_value > 0.5:
p_value = stats.norm.cdf(z)
p_value *= 2
flag = False
if p_value < alpha:
flag = True
result = f'''======= z-test =======
T (sum of ranks) = {T}
(n1, n2) = ({n1}, {n2})
mu_t = {E_T}
sigma_t = {sigmaT}
z statistic value (observed) = {z:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 ({alternative}) → {flag}
'''
print(result)
result_dict = {'T': T, 'ET': E_T,
'sigmaT': sigmaT, 'z': z, 'p-value': p_value}
return updated_df, result_dict
def sign_binom_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):
n = diff.size - np.sum(diff == 0)
if sign == '+':
sign_count = np.sum(diff > 0)
else:
sign_count = np.sum(diff < 0)
if alternative == 'greater' or alternative == 'less':
# 如果超過一半就要切換
if sign_count > n / 2:
p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5)
else:
p_value = stats.binom.cdf(sign_count, n=n, p=0.5)
elif alternative == 'two-sided':
p_value = stats.binom.cdf(sign_count, n=n, p=0.5)
if p_value > 0.5:
p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5)
p_value *= 2
flag = False
if p_value < alpha:
flag = True
result = f'''======= Sign Test - Binomial Distribution =======
(For small sample size (<= 10))
Targeted Sign: {sign}
n = {n}
Sign counts = {sign_count}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 ({alternative}) → {flag}
'''
print(result)
return sign_count, p_value
def sign_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):
diff = diff[~(diff == 0)]
n = len(diff)
if sign == '+':
T = np.sum(diff > 0)
else:
T = np.sum(diff < 0)
z_stat = (T - 0.5 * n) / (.5 * (n ** 0.5))
# right tail
if alternative == 'greater':
p_value = 1 - stats.norm.cdf(z_stat)
elif alternative == 'less':
p_value = stats.norm.cdf(z_stat)
elif alternative == 'two-sided':
p_value = 1 - stats.norm.cdf(z_stat)
if p_value > 0.5:
p_value = stats.norm.cdf(z_stat)
p_value *= 2
flag = False
if p_value < alpha:
flag = True
result = f'''======= Sign Test - z Statistic =======
(For large sample size (> 10))
Targeted Sign: {sign}
n = {n}
Sign counts = {T}
z statistic = {z_stat:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 ({alternative}) → {flag}
'''
print(result)
return T, p_value
def wilcoxon_signed_ranksum_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):
diff = diff[~(diff == 0)]
n = len(diff)
diff_abs = np.sort(np.abs(diff).to_numpy())
updated_diff = pd.DataFrame({'diff_abs': diff_abs})
updated_diff['rank'] = updated_diff.index + 1
updated_diff = updated_diff.groupby('diff_abs').mean().reset_index()
new_df = pd.DataFrame({'diff': diff, 'diff_abs': np.abs(diff)})
new_df = pd.merge(new_df, updated_diff)
if sign == '+':
T = np.sum(new_df['rank'][new_df['diff'] > 0])
else:
T = np.sum(new_df['rank'][new_df['diff'] < 0])
E_T = n * (n + 1) / 4
sigma_T = (n * (n + 1) * (2 * n + 1) / 24) ** 0.5
z_stat = (T - E_T) / sigma_T
if alternative == 'greater':
# right tail test
p_value = 1 - stats.norm.cdf(z_stat)
elif alternative == 'less':
# left tail test
p_value = stats.norm.cdf(z_stat)
elif alternative == 'two-sided':
# two-tailed test
p_value = 1 - stats.norm.cdf(z_stat)
if p_value > 0.5:
p_value = stats.norm.cdf(z_stat)
p_value *= 2
flag = False
if p_value < alpha:
flag = True
result = f'''======= Wilcoxon Signed Rank Sum Test - z Statistic =======
(For large sample size (> 30))
Targeted Sign: {sign}
n = {n}
Sum of rank (T statistic) = {T}
mu_t = {E_T}
sigma_t = {sigma_T}
z statistic value (observed) = {z_stat:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 ({alternative}) → {flag}
'''
print(result)
result_dict = {'n': n, 'T': T, 'E_T': E_T,
'sigma_T': sigma_T, 'z_stat': z_stat, 'p_value': p_value}
return new_df, result_dict
def kruskal_chi2_test(data=None, alpha=0.05, precision=4):
"""
col = 要比較的 target
row = data for each target
"""
if type(data) == pd.DataFrame:
data = data.copy().to_numpy()
alldata = np.concatenate(data.copy())
else:
alldata = np.concatenate(data.copy())
k = data.shape[1]
alldata.sort()
tmp_df = pd.DataFrame(({'value': alldata}))
tmp_df['rank'] = tmp_df.index + 1 # rank
value_to_rank = tmp_df.groupby('value').mean().reset_index()
T = []
sample_rank_df = []
for i in range(k):
samp = pd.DataFrame(
{'value': data[:, i][~np.isnan(data[:, i])]})
samp = pd.merge(samp, value_to_rank)
sample_rank_df.append(samp)
T.append(samp['rank'].sum())
n = [len(data[:, i][~np.isnan(data[:, i])]) for i in range(k)]
# print(T)
# print(n)
rule_of_five_str = ""
if (np.sum(np.array(n) < 5) > 0):
rule_of_five_str += "!(At least one sample size is less than 5)"
else:
rule_of_five_str += "(All sample size >= 5)"
N = np.sum(n)
t_over_n = 0
for i in range(k):
t_over_n += T[i] ** 2 / n[i]
H = 12 / N / (N + 1) * t_over_n - 3 * (N + 1)
p_value = 1 - stats.chi2.cdf(H, k - 1)
chi2_stat = stats.chi2.ppf(1 - alpha, k - 1)
result_dict = {'H': H, 'p-value': p_value,
'T': T, 'sample_rank_df': sample_rank_df}
flag = p_value < alpha
result = f'''======= Kruskal-Wallis Test with Chi-squared Test =======
{rule_of_five_str}
H statistic value (observed) = {H:.{precision}f}
chi2 critical value = {chi2_stat:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 (Not all {k} population locations are the same) → {flag}
'''
print(result)
return result_dict
def friedman_chi2_test(data=None, alpha=0.05, precision=4):
"""
col = 要比較的 target
row = blocked data for each target
"""
if type(data) == np.ndarray:
data = pd.DataFrame(data)
new_df = data.apply(grank, axis=1)
b, k = new_df.shape
rule_of_five_str = ""
if (b < 5 and k < 5):
rule_of_five_str += f"!(Number of blocks = {b} < 5 and number of populations = {k} < 5)"
else:
rule_of_five_str += f"(Number of blocks = {b} >= 5 or number of populations {k} >= 5)"
T = new_df.sum().to_numpy()
F_r = 12 / b / k / (k + 1) * np.sum(T ** 2) - 3 * b * (k + 1)
p_value = 1 - stats.chi2.cdf(F_r, k - 1)
chi2_stat = stats.chi2.ppf(1 - alpha, k - 1)
result_dict = {'F_r': F_r, 'p-value': p_value,
'T': T, 'sample_ranked_df': new_df}
flag = p_value < alpha
result = f'''======= Friedman Test with Chi-squared Test =======
{rule_of_five_str}
F_r statistic value (observed) = {F_r:.{precision}f}
chi2 critical value = {chi2_stat:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 (Not all {k} population locations are the same) → {flag}
'''
print(result)
return result_dict
def pearson_test(data=None, a=None, b=None, alpha=0.05, precision=4):
"""
a, b 還不能傳入東西
Make sure that data is | spearman_restult_cor, spearman_restult_p_value = stats.spearmanr(a, b)
# print(f'Correlation = {cor:.4f}, p-value={p_value:.4f}')
n = len(a)
rule_of_30_str = ''
results = f"""======= Spearman Rank Correlation Coefficient =======
[scipy.stats.spearmanr]
Coefficient of Correlation: {spearman_restult_cor:.{precision}f}
p-value={spearman_restult_p_value:.{precision}f} ({inter_p_value(spearman_restult_p_value)})
"""
if (n < 30):
rule_of_30_str += f"!(n = {n} < 30)"
flag = spearman_restult_p_value < alpha
results += f"""
Reject H_0 (There are relationship between two variables) → {flag}
"""
result_dict = {'spearman_result': [
spearman_restult_cor, spearman_restult_p_value]}
else:
rule_of_30_str += f"(n = {n} >= 30)"
flag = spearman_restult_p_value < alpha
results += f"""
Reject H_0 (There are relationship between two variables) → {flag}
"""
z_stat = spearman_restult_cor * ((n - 1) ** 0.5)
z_cv = stats.norm.ppf(1 - alpha/2)
p_value = stats.norm.sf(z_stat) * 2
if p_value > 1:
p_value = stats.norm.cdf(z_stat) * 2
flag = p_value < alpha
results += f"""
[z test statistic]
{rule_of_30_str}
r_s: {spearman_restult_cor:.{precision}f} (using spearmanr's result)
z stat (observed value) = {z_stat:.{precision}f}
z (critical value) = {z_cv:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 (There are relationship between two variables) → {flag}
"""
result_dict = {'spearman_result': [
spearman_restult_cor, spearman_restult_p_value], 'z_stat': z_stat, 'z_cv': z_cv, 'p-value': p_value}
print(results)
return result_dict
| in the form of [a, b]
"""
cov_mat = np.cov(data.values, rowvar=False)
cor_mat = np.corrcoef(data.values, rowvar=False)
cov = cov_mat[0][1]
cor = cor_mat[0][1]
n = data.shape[0]
d_of_f = n - 2
t_c = stats.t.ppf(1 - alpha / 2, df=d_of_f)
t_stat = cor * (((n - 2) / (1 - cor ** 2)) ** 0.5)
flag = abs(t_stat) > t_c
result_dict = {'cov': cov, 't_stat': t_stat, 'cor': cor, 't_c': t_c}
results = f"""======= Pearson Correlation Coefficient =======
Covariance: {cov:.{precision}f}
Coefficient of Correlation: {cor:.{precision}f}
t (Critical Value) = {t_c:.{precision}f}
t (Observed Value) = {t_stat:.{precision}f}
Reject H_0 (There are linear relationship between two variables) → {flag}
"""
print(results)
return result_dict
def spearman_test(a=None, b=None, alpha=0.05, precision=4):
|
vms.go | // Copyright 2020 VMware, Inc.
// SPDX-License-Identifier: BSD-2-Clause
package main
import (
"encoding/json"
"fmt"
"log"
"math/rand"
"time"
)
// VMState represents the current state of a VM
type VMState string
const (
// STOPPED VM is stopped at this state it can be removed
STOPPED VMState = "Stopped"
// STARTING VM is transitioning from Stopped to Starting in about 10 minutes
STARTING VMState = "Starting"
// RUNNING VM is online
RUNNING VMState = "Running"
// STOPPING VM is transitioning from Running to Stopped
STOPPING VMState = "Stopping"
)
const (
// DefaultStartDelay Start VM process simulated delay, measured in timeUnits
DefaultStartDelay = 10
// DefaultStopDelay Stop VM process simulated delay, measured in timeUnits
DefaultStopDelay = 5
)
// timeUnit allows unit tests to change the timescale
var timeUnit = time.Second
// StartDelay for launch operations
func StartDelay() time.Duration {
return randomDuration(timeUnit, 2*(DefaultStartDelay*timeUnit)-timeUnit)
}
// StopDelay for stop operations
func StopDelay() time.Duration {
return randomDuration(timeUnit, 2*(DefaultStopDelay*timeUnit)-timeUnit)
}
func | (min, max time.Duration) time.Duration {
return time.Duration(rand.Intn(int(max-min+1))) + min
}
// VMsJSON filename where to store initial VMs state list
const VMsJSON = "vms.json"
func dieOnError(err error, format string, args ...interface{}) {
if err != nil {
log.Fatalf("%s: %v\n", fmt.Sprintf(format, args...), err)
}
}
// VM is a Virtual Machine
type VM struct {
VCPUS int `json:"vcpus,omitempty"` // Number of processors
Clock float32 `json:"clock,omitempty"` // Frequency of 1 processor, in MHz (Megahertz)
RAM int `json:"ram,omitempty"` // Amount of internal memory, in MB (Megabytes)
Storage int `json:"storage,omitempty"` // Amount of persistent storage, in GB (Gigabytes)
Network int `json:"network,omitempty"` // Network device speed in Gb/s (Gigabits per second)
State VMState `json:"state,omitempty"` // Value within [Running, Stopped, Starting, Stopping]
}
// VM by default dumps itself in JSON format
func (vm VM) String() string {
vmJSON, err := json.Marshal(vm)
dieOnError(err, "Can't generate JSON for VM object %#v", vm)
return string(vmJSON)
}
// AllowedTransition lists allowed state transitions
var AllowedTransition = map[VMState]VMState{
STOPPED: STARTING,
STARTING: RUNNING,
RUNNING: STOPPING,
STOPPING: STOPPED,
}
// WithState returns a VM on the requested end state or an error,
// if the transition was illegal
func (vm VM) WithState(state VMState) (VM, error) {
if state == vm.State {
return vm, nil // NOP
}
if AllowedTransition[vm.State] != state {
return VM{}, fmt.Errorf("illegal transition from %q to %q", vm.State, state)
}
vm.State = state
return vm, nil
}
// VMs defines a map of VMs with attached methods
type VMs map[int]VM
// clone returns a deep clone of the list, useful for snapshots
func (vms VMs) clone() VMs {
cloneList := make(VMs, len(vms))
for k, v := range vms {
cloneList[k] = v
}
return cloneList
}
// String in VMs by default dumps itself in JSON format skipping empty entries
func (vms VMs) String() string {
vmJSON, err := json.Marshal(vms)
dieOnError(err, "Can't generate JSON for VM object %#v", vms)
return string(vmJSON)
}
var defaultVMs = VMs{
0: {
VCPUS: 1, // Number of processors
Clock: 1500, // Frequency of 1 processor, expressed in MHz (Megahertz)
RAM: 4096, // Amount of internal memory, expressed in MB (Megabytes)
Storage: 128, // Amount of internal space available for storage, expressed in GB (Gigabytes)
Network: 1000, // Speed of the networking device, expressed in Gb/s (Gigabits per second)
State: STOPPED, // Value from within the set [Running, Stopped, Starting, Stopping]
},
1: {
VCPUS: 4,
Clock: 3600,
RAM: 32768,
Storage: 512,
Network: 10000,
State: STOPPED,
},
2: {
VCPUS: 2,
Clock: 2200,
RAM: 8192,
Storage: 256,
Network: 1000,
State: STOPPED,
},
}
| randomDuration |
api.py | # Databricks CLI
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"), except
# that the use of services to which certain application programming
# interfaces (each, an "API") connect requires that the user first obtain
# a license for the use of the APIs from Databricks, Inc. ("Databricks"),
# by creating an account at www.databricks.com and agreeing to either (a)
# the Community Edition Terms of Service, (b) the Databricks Terms of
# Service, or (c) another written agreement between Licensee and Databricks
# for the use of the APIs.
#
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hashlib import sha1
import os
from six.moves import urllib
from databricks_cli.sdk import DeltaPipelinesService
from databricks_cli.dbfs.api import DbfsApi
from databricks_cli.dbfs.dbfs_path import DbfsPath
BUFFER_SIZE = 1024 * 64
base_pipelines_dir = 'dbfs:/pipelines/code'
supported_lib_types = {'jar', 'whl', 'maven'}
class PipelinesApi(object):
def __init__(self, api_client):
self.client = DeltaPipelinesService(api_client)
self.dbfs_client = DbfsApi(api_client)
def deploy(self, spec, headers=None):
lib_objects = LibraryObject.from_json(spec.get('libraries', []))
local_lib_objects, external_lib_objects = \
self._identify_local_libraries(lib_objects)
spec['libraries'] = LibraryObject.to_json(external_lib_objects +
self._upload_local_libraries(local_lib_objects))
pipeline_id = spec['id']
self.client.client.perform_query('PUT', '/pipelines/{}'.format(pipeline_id), data=spec,
headers=headers)
def delete(self, pipeline_id, headers=None):
self.client.delete(pipeline_id, headers)
def get(self, pipeline_id, headers=None):
return self.client.get(pipeline_id, headers)
def reset(self, pipeline_id, headers=None):
self.client.reset(pipeline_id, headers)
@staticmethod
def _identify_local_libraries(lib_objects):
"""
Partitions the given set of libraries into local and those already present in dbfs/s3 etc.
Local libraries are (currently) jar files with a file scheme or no scheme at all.
All other libraries should be present in a supported external source.
:param lib_objects: List[LibraryObject]
:return: List[List[LibraryObject], List[LibraryObject]] ([Local, External])
"""
local_lib_objects, external_lib_objects = [], []
for lib_object in lib_objects:
if lib_object.lib_type == 'maven':
external_lib_objects.append(lib_object)
continue
parsed_uri = urllib.parse.urlparse(lib_object.path)
if lib_object.lib_type in supported_lib_types and parsed_uri.scheme == '':
local_lib_objects.append(lib_object)
elif lib_object.lib_type in supported_lib_types and parsed_uri.scheme.lower() == 'file':
# exactly 1 or 3
if parsed_uri.path.startswith('//') or parsed_uri.netloc != '':
raise RuntimeError('invalid file uri scheme, '
'did you mean to use file:/ or file:///')
local_lib_objects.append(LibraryObject(lib_object.lib_type, parsed_uri.path))
else:
external_lib_objects.append(lib_object)
return local_lib_objects, external_lib_objects
def _upload_local_libraries(self, local_lib_objects):
remote_lib_objects = [LibraryObject(llo.lib_type, self._get_hashed_path(llo.path))
for llo in local_lib_objects]
transformed_remote_lib_objects = [LibraryObject(rlo.lib_type, DbfsPath(rlo.path))
for rlo in remote_lib_objects]
upload_files = [llo_tuple for llo_tuple in
zip(local_lib_objects, transformed_remote_lib_objects)
if not self.dbfs_client.file_exists(llo_tuple[1].path)]
| self.dbfs_client.put_file(llo.path, rlo.path, False)
return remote_lib_objects
@staticmethod
def _get_hashed_path(path):
"""
Finds the corresponding dbfs file path for the file located at the supplied path by
calculating its hash using SHA1.
:param path: Local File Path
:return: Remote Path (pipeline_base_dir + file_hash (dot) file_extension)
"""
hash_buffer = sha1()
with open(path, 'rb') as f:
while True:
data = f.read(BUFFER_SIZE)
if not data:
break
hash_buffer.update(data)
file_hash = hash_buffer.hexdigest()
# splitext includes the period in the extension
extension = os.path.splitext(path)[1][1:]
if extension == 'whl':
# Wheels need to follow the format described in the PEP, so we simply
# pre-pend the content hash to the wheel_name
# basename in Python returns the extension as well
wheel_name = os.path.basename(path)
path = '{}/{}/{}'.format(base_pipelines_dir, file_hash, wheel_name)
else:
path = '{}/{}.{}'.format(base_pipelines_dir, file_hash, extension)
return path
class LibraryObject(object):
def __init__(self, lib_type, lib_path):
self.path = lib_path
self.lib_type = lib_type
@classmethod
def from_json(cls, libraries):
"""
Serialize Libraries into LibraryObjects
:param libraries: List[Dictionary{String, String}]
:return: List[LibraryObject]
"""
lib_objects = []
for library in libraries:
for lib_type, path in library.items():
lib_objects.append(LibraryObject(lib_type, path))
return lib_objects
@classmethod
def to_json(cls, lib_objects):
"""
Deserialize LibraryObjects
:param lib_objects: List[LibraryObject]
:return: List[Dictionary{String, String}]
"""
libraries = []
for lib_object in lib_objects:
libraries.append({lib_object.lib_type: lib_object.path})
return libraries
def __eq__(self, other):
if not isinstance(other, LibraryObject):
return NotImplemented
return self.path == other.path and self.lib_type == other.lib_type | for llo, rlo in upload_files: |
config_schema.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from schema import Schema, And, Optional, Regex, Or
from .constants import SCHEMA_TYPE_ERROR, SCHEMA_RANGE_ERROR, SCHEMA_PATH_ERROR
def setType(key, valueType):
'''check key type'''
return And(valueType, error=SCHEMA_TYPE_ERROR % (key, valueType.__name__))
def setChoice(key, *args):
'''check choice'''
return And(lambda n: n in args, error=SCHEMA_RANGE_ERROR % (key, str(args)))
def setNumberRange(key, keyType, start, end):
'''check number range'''
return And(
And(keyType, error=SCHEMA_TYPE_ERROR % (key, keyType.__name__)),
And(lambda n: start <= n <= end, error=SCHEMA_RANGE_ERROR % (key, '(%s,%s)' % (start, end))),
)
def setPathCheck(key):
|
common_schema = {
'authorName': setType('authorName', str),
'experimentName': setType('experimentName', str),
Optional('description'): setType('description', str),
'trialConcurrency': setNumberRange('trialConcurrency', int, 1, 99999),
Optional('maxExecDuration'): And(Regex(r'^[1-9][0-9]*[s|m|h|d]$', error='ERROR: maxExecDuration format is [digit]{s,m,h,d}')),
Optional('maxTrialNum'): setNumberRange('maxTrialNum', int, 1, 99999),
'trainingServicePlatform': setChoice('trainingServicePlatform', 'remote', 'local', 'pai', 'kubeflow', 'frameworkcontroller'),
Optional('searchSpacePath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'searchSpacePath'),
Optional('multiPhase'): setType('multiPhase', bool),
Optional('multiThread'): setType('multiThread', bool),
Optional('nniManagerIp'): setType('nniManagerIp', str),
Optional('logDir'): And(os.path.isdir, error=SCHEMA_PATH_ERROR % 'logDir'),
Optional('debug'): setType('debug', bool),
Optional('versionCheck'): setType('versionCheck', bool),
Optional('logLevel'): setChoice('logLevel', 'trace', 'debug', 'info', 'warning', 'error', 'fatal'),
Optional('logCollection'): setChoice('logCollection', 'http', 'none'),
'useAnnotation': setType('useAnnotation', bool),
Optional('tuner'): dict,
Optional('advisor'): dict,
Optional('assessor'): dict,
Optional('localConfig'): {
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
}
}
tuner_schema_dict = {
('Anneal', 'SMAC'): {
'builtinTunerName': setChoice('builtinTunerName', 'Anneal', 'SMAC'),
Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
('Evolution'): {
'builtinTunerName': setChoice('builtinTunerName', 'Evolution'),
Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('population_size'): setNumberRange('population_size', int, 0, 99999),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
('BatchTuner', 'GridSearch', 'Random'): {
'builtinTunerName': setChoice('builtinTunerName', 'BatchTuner', 'GridSearch', 'Random'),
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'TPE': {
'builtinTunerName': 'TPE',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('parallel_optimize'): setType('parallel_optimize', bool),
Optional('constant_liar_type'): setChoice('constant_liar_type', 'min', 'max', 'mean')
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'NetworkMorphism': {
'builtinTunerName': 'NetworkMorphism',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('task'): setChoice('task', 'cv', 'nlp', 'common'),
Optional('input_width'): setType('input_width', int),
Optional('input_channel'): setType('input_channel', int),
Optional('n_output_node'): setType('n_output_node', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'MetisTuner': {
'builtinTunerName': 'MetisTuner',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('no_resampling'): setType('no_resampling', bool),
Optional('no_candidates'): setType('no_candidates', bool),
Optional('selection_num_starting_points'): setType('selection_num_starting_points', int),
Optional('cold_start_num'): setType('cold_start_num', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'GPTuner': {
'builtinTunerName': 'GPTuner',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('utility'): setChoice('utility', 'ei', 'ucb', 'poi'),
Optional('kappa'): setType('kappa', float),
Optional('xi'): setType('xi', float),
Optional('nu'): setType('nu', float),
Optional('alpha'): setType('alpha', float),
Optional('cold_start_num'): setType('cold_start_num', int),
Optional('selection_num_warm_up'): setType('selection_num_warm_up', int),
Optional('selection_num_starting_points'): setType('selection_num_starting_points', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'PPOTuner': {
'builtinTunerName': 'PPOTuner',
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('trials_per_update'): setNumberRange('trials_per_update', int, 0, 99999),
Optional('epochs_per_update'): setNumberRange('epochs_per_update', int, 0, 99999),
Optional('minibatch_size'): setNumberRange('minibatch_size', int, 0, 99999),
Optional('ent_coef'): setType('ent_coef', float),
Optional('lr'): setType('lr', float),
Optional('vf_coef'): setType('vf_coef', float),
Optional('max_grad_norm'): setType('max_grad_norm', float),
Optional('gamma'): setType('gamma', float),
Optional('lam'): setType('lam', float),
Optional('cliprange'): setType('cliprange', float),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'customized': {
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
}
}
advisor_schema_dict = {
'Hyperband':{
'builtinAdvisorName': Or('Hyperband'),
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('R'): setType('R', int),
Optional('eta'): setType('eta', int)
},
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'BOHB':{
'builtinAdvisorName': Or('BOHB'),
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('min_budget'): setNumberRange('min_budget', int, 0, 9999),
Optional('max_budget'): setNumberRange('max_budget', int, 0, 9999),
Optional('eta'):setNumberRange('eta', int, 0, 9999),
Optional('min_points_in_model'): setNumberRange('min_points_in_model', int, 0, 9999),
Optional('top_n_percent'): setNumberRange('top_n_percent', int, 1, 99),
Optional('num_samples'): setNumberRange('num_samples', int, 1, 9999),
Optional('random_fraction'): setNumberRange('random_fraction', float, 0, 9999),
Optional('bandwidth_factor'): setNumberRange('bandwidth_factor', float, 0, 9999),
Optional('min_bandwidth'): setNumberRange('min_bandwidth', float, 0, 9999),
},
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'customized':{
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
}
}
assessor_schema_dict = {
'Medianstop': {
'builtinAssessorName': 'Medianstop',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
},
},
'Curvefitting': {
'builtinAssessorName': 'Curvefitting',
Optional('classArgs'): {
'epoch_num': setNumberRange('epoch_num', int, 0, 9999),
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
Optional('threshold'): setNumberRange('threshold', float, 0, 9999),
Optional('gap'): setNumberRange('gap', int, 1, 9999),
},
},
'customized': {
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
}
}
common_trial_schema = {
'trial':{
'command': setType('command', str),
'codeDir': setPathCheck('codeDir'),
Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode')
}
}
pai_trial_schema = {
'trial':{
'command': setType('command', str),
'codeDir': setPathCheck('codeDir'),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('authFile'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'authFile'),
Optional('shmMB'): setType('shmMB', int),
Optional('dataDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
error='ERROR: dataDir format error, dataDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
Optional('outputDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
error='ERROR: outputDir format error, outputDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
Optional('virtualCluster'): setType('virtualCluster', str),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'),
Optional('portList'): [{
"label": setType('label', str),
"beginAt": setType('beginAt', int),
"portNumber": setType('portNumber', int)
}]
}
}
pai_config_schema = {
'paiConfig': Or({
'userName': setType('userName', str),
'passWord': setType('passWord', str),
'host': setType('host', str)
}, {
'userName': setType('userName', str),
'token': setType('token', str),
'host': setType('host', str)
})
}
kubeflow_trial_schema = {
'trial':{
'codeDir': setPathCheck('codeDir'),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'),
Optional('ps'): {
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
},
Optional('master'): {
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
},
Optional('worker'):{
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
}
}
}
kubeflow_config_schema = {
'kubeflowConfig':Or({
'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
'apiVersion': setType('apiVersion', str),
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
'nfs': {
'server': setType('server', str),
'path': setType('path', str)
}
}, {
'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
'apiVersion': setType('apiVersion', str),
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
'keyVault': {
'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
},
'azureStorage': {
'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
},
Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999)
})
}
frameworkcontroller_trial_schema = {
'trial':{
'codeDir': setPathCheck('codeDir'),
'taskRoles': [{
'name': setType('name', str),
'taskNum': setType('taskNum', int),
'frameworkAttemptCompletionPolicy': {
'minFailedTaskCount': setType('minFailedTaskCount', int),
'minSucceededTaskCount': setType('minSucceededTaskCount', int),
},
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
}]
}
}
frameworkcontroller_config_schema = {
'frameworkcontrollerConfig':Or({
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
Optional('serviceAccountName'): setType('serviceAccountName', str),
'nfs': {
'server': setType('server', str),
'path': setType('path', str)
}
}, {
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
Optional('serviceAccountName'): setType('serviceAccountName', str),
'keyVault': {
'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
},
'azureStorage': {
'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
},
Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999)
})
}
machine_list_schema = {
Optional('machineList'):[Or({
'ip': setType('ip', str),
Optional('port'): setNumberRange('port', int, 1, 65535),
'username': setType('username', str),
'passwd': setType('passwd', str),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
}, {
'ip': setType('ip', str),
Optional('port'): setNumberRange('port', int, 1, 65535),
'username': setType('username', str),
'sshKeyPath': setPathCheck('sshKeyPath'),
Optional('passphrase'): setType('passphrase', str),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
})]
}
LOCAL_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema})
REMOTE_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema, **machine_list_schema})
PAI_CONFIG_SCHEMA = Schema({**common_schema, **pai_trial_schema, **pai_config_schema})
KUBEFLOW_CONFIG_SCHEMA = Schema({**common_schema, **kubeflow_trial_schema, **kubeflow_config_schema})
FRAMEWORKCONTROLLER_CONFIG_SCHEMA = Schema({**common_schema, **frameworkcontroller_trial_schema, **frameworkcontroller_config_schema})
| '''check if path exist'''
return And(os.path.exists, error=SCHEMA_PATH_ERROR % key) |
2fa.go | // Copyright (c) 2020 KHS Films
//
// This file is a part of mtproto package.
// See https://github.com/xelaj/mtproto/blob/master/LICENSE for details
package srp
//! WARNING: if you want to understand this algorithm, go to https://core.telegram.org/api/srp, and try to
// this code on right side, and algorith description on left side. Then, try to search via Cmd+F func
// descriptions in algo descriptions. Then bless your god and drink few whiskey. As far as this way i can help
// you to understand this secure-like shit created by telegram developers.
import (
"crypto/sha256"
"crypto/sha512"
"math/big"
"github.com/pkg/errors"
"github.com/xelaj/go-dry"
"golang.org/x/crypto/pbkdf2"
)
const (
randombyteLen = 256 // 2048 bit
)
// GetInputCheckPassword считает нужные для 2FA хеши, описан в доке телеграма:
// https://core.telegram.org/api/srp#checking-the-password-with-srp
func GetInputCheckPassword(password string, srpB []byte, mp *ModPow) (*SrpAnswer, error) {
return getInputCheckPassword(password, srpB, mp, dry.RandomBytes(randombyteLen))
}
func getInputCheckPassword(
password string,
srpB []byte,
mp *ModPow,
random []byte,
) (
*SrpAnswer, error,
) {
if password == "" {
return nil, nil
}
err := validateCurrentAlgo(srpB, mp)
if err != nil {
return nil, errors.Wrap(err, "validating CurrentAlgo")
}
p := bytesToBig(mp.P)
g := big.NewInt(int64(mp.G))
gBytes := pad256(g.Bytes())
// random 2048-bit number a
a := bytesToBig(random)
// g_a = pow(g, a) mod p
ga := pad256(bigExp(g, a, p).Bytes())
// g_b = srp_B
gb := pad256(srpB)
// u = H(g_a | g_b)
u := bytesToBig(calcSHA256(ga, gb))
// x = PH2(password, salt1, salt2)
x := bytesToBig(passwordHash2([]byte(password), mp.Salt1, mp.Salt2))
// v = pow(g, x) mod p
v := bigExp(g, x, p)
// k = (k * v) mod p
k := bytesToBig(calcSHA256(mp.P, gBytes))
// k_v = (k * v) % p
kv := k.Mul(k, v).Mod(k, p)
// t = (g_b - k_v) % p
t := bytesToBig(srpB)
if t.Sub(t, kv).Cmp(big.NewInt(0)) == -1 {
t.Add(t, p)
}
// s_a = pow(t, a + u * x) mod p
sa := pad256(bigExp(t, u.Mul(u, x).Add(u, a), p).Bytes())
// k_a = H(s_a)
ka := calcSHA256(sa)
// M1 := H(H(p) xor H(g) | H2(salt1) | H2(salt2) | g_a | g_b | k_a)
M1 := calcSHA256(
dry.BytesXor(calcSHA256(mp.P), calcSHA256(gBytes)),
calcSHA256(mp.Salt1),
calcSHA256(mp.Salt2),
ga,
gb,
ka,
)
return &SrpAnswer{
GA: ga,
M1: M1,
}, nil
}
// this is simpler struct, copied from PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow
type ModPow struct {
Salt1 []byte
Salt2 []byte
G int32
P []byte
}
// copy of InputCheckPasswordSRPObj
type SrpAnswer struct {
GA []byte
M1 []byte
}
// Validating mod pow from server side. just works, don't touch.
func validateCurrentAlgo(srpB []byte, mp *ModPow) error {
if dhHandshakeCheckConfigIsError(mp.G, mp.P) {
return errors.New("receive invalid config g")
}
p := bytesToBig(mp.P)
gb := bytesToBig(srpB)
//? awwww so cute ref (^_^), try to guess ↓↓↓
if big.NewInt(0).Cmp(gb) != -1 || gb.Cmp(p) != -1 || len(srpB) < 248 || len(srpB) > 256 {
return errors.New("receive invalid value of B")
}
return nil
}
// SH(data, salt) := H(salt | data | salt)
func saltingHashing(data, salt []byte) []byte {
return calcSHA256(salt, data, salt)
}
func passwordHash1(password, salt1, salt2 []byte) []byte {
return saltingHashing(saltingHashing(password, salt1), salt2)
}
func passwordHash2(password, salt1, salt2 []byte) []byte {
return saltingHashing(pbkdf2sha512(passwordHash1(password, salt1, salt2), salt1, 100000), salt2)
}
func pbkdf2sha512(hash1 []byte, salt1 []byte, i int) []byte {
return pbkdf2.Key(hash1, salt1, i, 64, sha512.New)
}
func pad256(b []byte) []byte {
if len(b) >= 256 {
return b[len(b)-256:]
}
tmp := make([]b | b):], b)
return tmp
}
// joining arrays into single one and calculating hash
// H(a | b | c)
func calcSHA256(arrays ...[]byte) []byte {
h := sha256.New()
for _, arr := range arrays {
h.Write(arr)
}
return h.Sum(nil)
}
func bytesToBig(b []byte) *big.Int {
return new(big.Int).SetBytes(b)
}
func bigExp(x, y, m *big.Int) *big.Int {
return new(big.Int).Exp(x, y, m)
}
func dhHandshakeCheckConfigIsError(gInt int32, primeStr []byte) bool {
//prime := new(big.Int).SetBytes(primeStr)
//_ = prime
// Функция описана здесь, и что-то проверяет.
// Реализовывать пока что лень.
// TODO: запилить
// или болт положить, ¯\_(ツ)_/¯
// https://github.com/tdlib/td/blob/f9009cbc01e9c4c77d31120a61feb9c639c6aeda/td/mtproto/DhHandshake.cpp
return false
}
| yte, 256)
copy(tmp[256-len( |
mod.rs | pub(crate) mod multiple_keys;
use polars_arrow::utils::CustomIterTools;
use crate::frame::hash_join::multiple_keys::{
inner_join_multiple_keys, left_join_multiple_keys, outer_join_multiple_keys,
};
use crate::frame::select::Selection;
use crate::prelude::*;
use crate::utils::{set_partition_size, split_ca};
use crate::vector_hasher::{
create_hash_and_keys_threaded_vectorized, prepare_hashed_relation_threaded, this_partition,
AsU64, StrHash,
};
use crate::{datatypes::PlHashMap, POOL};
use ahash::RandomState;
use hashbrown::hash_map::{Entry, RawEntryMut};
use hashbrown::HashMap;
use itertools::Itertools;
use rayon::prelude::*;
use std::collections::HashSet;
use std::fmt::Debug;
use std::hash::{BuildHasher, Hash, Hasher};
use std::ops::Deref;
use unsafe_unwrap::UnsafeUnwrap;
#[cfg(feature = "private")]
pub use self::multiple_keys::private_left_join_multiple_keys;
use crate::frame::groupby::hashing::HASHMAP_INIT_SIZE;
use crate::utils::series::to_physical;
/// If Categorical types are created without a global string cache or under
/// a different global string cache the mapping will be incorrect.
#[cfg(feature = "dtype-categorical")]
pub(crate) fn check_categorical_src(l: &Series, r: &Series) -> Result<()> {
if let (Ok(l), Ok(r)) = (l.categorical(), r.categorical()) {
let l = l.categorical_map.as_ref().unwrap();
let r = r.categorical_map.as_ref().unwrap();
if !l.same_src(&*r) {
return Err(PolarsError::ValueError("joins on categorical dtypes can only happen if they are created under the same global string cache".into()));
}
}
Ok(())
}
macro_rules! det_hash_prone_order {
($self:expr, $other:expr) => {{
// The shortest relation will be used to create a hash table.
let left_first = $self.len() > $other.len();
let a;
let b;
if left_first {
a = $self;
b = $other;
} else {
b = $self;
a = $other;
}
(a, b, !left_first)
}};
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum JoinType {
Left,
Inner,
Outer,
#[cfg(feature = "asof_join")]
AsOf,
#[cfg(feature = "cross_join")]
Cross,
}
pub(crate) unsafe fn get_hash_tbl_threaded_join_partitioned<T, H>(
h: u64,
hash_tables: &[HashMap<T, Vec<u32>, H>],
len: u64,
) -> &HashMap<T, Vec<u32>, H> {
let mut idx = 0;
for i in 0..len {
// can only be done for powers of two.
// n % 2^i = n & (2^i - 1)
if (h + i) & (len - 1) == 0 {
idx = i as usize;
}
}
hash_tables.get_unchecked(idx)
}
#[allow(clippy::type_complexity)]
unsafe fn get_hash_tbl_threaded_join_mut_partitioned<T, H>(
h: u64,
hash_tables: &mut [HashMap<T, (bool, Vec<u32>), H>],
len: u64,
) -> &mut HashMap<T, (bool, Vec<u32>), H> {
let mut idx = 0;
for i in 0..len {
// can only be done for powers of two.
// n % 2^i = n & (2^i - 1)
if (h + i) & (len - 1) == 0 {
idx = i as usize;
}
}
hash_tables.get_unchecked_mut(idx)
}
/// Probe the build table and add tuples to the results (inner join)
fn probe_inner<T, F>(
probe: &[T],
hash_tbls: &[PlHashMap<T, Vec<u32>>],
results: &mut Vec<(u32, u32)>,
local_offset: usize,
n_tables: u64,
swap_fn: F,
) where
T: Send + Hash + Eq + Sync + Copy + AsU64,
F: Fn(u32, u32) -> (u32, u32),
{
assert!(hash_tbls.len().is_power_of_two());
probe.iter().enumerate().for_each(|(idx_a, k)| {
let idx_a = (idx_a + local_offset) as u32;
// probe table that contains the hashed value
let current_probe_table =
unsafe { get_hash_tbl_threaded_join_partitioned(k.as_u64(), hash_tbls, n_tables) };
let value = current_probe_table.get(k);
if let Some(indexes_b) = value {
let tuples = indexes_b.iter().map(|&idx_b| swap_fn(idx_a, idx_b));
results.extend(tuples);
}
});
}
pub(crate) fn create_probe_table<T, IntoSlice>(keys: Vec<IntoSlice>) -> Vec<PlHashMap<T, Vec<u32>>>
where
T: Send + Hash + Eq + Sync + Copy + AsU64,
IntoSlice: AsRef<[T]> + Send + Sync,
{
let n_partitions = set_partition_size();
// We will create a hashtable in every thread.
// We use the hash to partition the keys to the matching hashtable.
// Every thread traverses all keys/hashes and ignores the ones that doesn't fall in that partition.
POOL.install(|| {
(0..n_partitions).into_par_iter().map(|partition_no| {
let partition_no = partition_no as u64;
let mut hash_tbl: PlHashMap<T, Vec<u32>> = PlHashMap::with_capacity(HASHMAP_INIT_SIZE);
let n_partitions = n_partitions as u64;
let mut offset = 0;
for keys in &keys {
let keys = keys.as_ref();
let len = keys.len() as u32;
let mut cnt = 0;
keys.iter().for_each(|k| {
let idx = cnt + offset;
cnt += 1;
if this_partition(k.as_u64(), partition_no, n_partitions) {
let entry = hash_tbl.entry(*k);
match entry {
Entry::Vacant(entry) => {
entry.insert(vec![idx]);
}
Entry::Occupied(mut entry) => {
let v = entry.get_mut();
v.push(idx);
}
}
}
});
offset += len;
}
hash_tbl
})
})
.collect()
}
fn hash_join_tuples_inner<T, IntoSlice>(
probe: Vec<IntoSlice>,
build: Vec<IntoSlice>,
// Because b should be the shorter relation we could need to swap to keep left left and right right.
swap: bool,
) -> Vec<(u32, u32)>
where
IntoSlice: AsRef<[T]> + Send + Sync,
T: Send + Hash + Eq + Sync + Copy + AsU64,
{
// NOTE: see the left join for more elaborate comments
// first we hash one relation
let hash_tbls = create_probe_table(build);
let n_tables = hash_tbls.len() as u64;
debug_assert!(n_tables.is_power_of_two());
let offsets = probe
.iter()
.map(|ph| ph.as_ref().len())
.scan(0, |state, val| {
let out = *state;
*state += val;
Some(out)
})
.collect::<Vec<_>>();
// next we probe the other relation
// code duplication is because we want to only do the swap check once
POOL.install(|| {
probe
.into_par_iter()
.zip(offsets)
.map(|(probe, offset)| {
let probe = probe.as_ref();
// local reference
let hash_tbls = &hash_tbls;
let mut results = Vec::with_capacity(probe.len());
let local_offset = offset;
// branch is to hoist swap out of the inner loop.
if swap {
probe_inner(
probe,
hash_tbls,
&mut results,
local_offset,
n_tables,
|idx_a, idx_b| (idx_b, idx_a),
)
} else {
probe_inner(
probe,
hash_tbls,
&mut results,
local_offset,
n_tables,
|idx_a, idx_b| (idx_a, idx_b),
)
}
results
})
.flatten()
.collect()
})
}
fn hash_join_tuples_left<T, IntoSlice>(
probe: Vec<IntoSlice>,
build: Vec<IntoSlice>,
) -> Vec<(u32, Option<u32>)>
where
IntoSlice: AsRef<[T]> + Send + Sync,
T: Send + Hash + Eq + Sync + Copy + AsU64,
{
// first we hash one relation
let hash_tbls = create_probe_table(build);
// we determine the offset so that we later know which index to store in the join tuples
let offsets = probe
.iter()
.map(|ph| ph.as_ref().len())
.scan(0, |state, val| {
let out = *state;
*state += val;
Some(out)
})
.collect::<Vec<_>>();
let n_tables = hash_tbls.len() as u64;
debug_assert!(n_tables.is_power_of_two());
// next we probe the other relation
POOL.install(|| {
probe
.into_par_iter()
.zip(offsets)
// probes_hashes: Vec<u64> processed by this thread
// offset: offset index
.map(|(probe, offset)| {
// local reference
let hash_tbls = &hash_tbls;
let probe = probe.as_ref();
// assume the result tuples equal lenght of the no. of hashes processed by this thread.
let mut results = Vec::with_capacity(probe.len());
probe.iter().enumerate().for_each(|(idx_a, k)| {
let idx_a = (idx_a + offset) as u32;
// probe table that contains the hashed value
let current_probe_table = unsafe {
get_hash_tbl_threaded_join_partitioned(k.as_u64(), hash_tbls, n_tables)
};
// we already hashed, so we don't have to hash again.
let value = current_probe_table.get(k);
match value {
// left and right matches
Some(indexes_b) => {
results.extend(indexes_b.iter().map(|&idx_b| (idx_a, Some(idx_b))))
}
// only left values, right = null
None => results.push((idx_a, None)),
}
});
results
})
.flatten()
.collect()
})
}
/// Probe the build table and add tuples to the results (inner join)
fn probe_outer<T, F, G, H>(
probe_hashes: &[Vec<(u64, T)>],
hash_tbls: &mut [PlHashMap<T, (bool, Vec<u32>)>],
results: &mut Vec<(Option<u32>, Option<u32>)>,
n_tables: u64,
// Function that get index_a, index_b when there is a match and pushes to result
swap_fn_match: F,
// Function that get index_a when there is no match and pushes to result
swap_fn_no_match: G,
// Function that get index_b from the build table that did not match any in A and pushes to result
swap_fn_drain: H,
) where
T: Send + Hash + Eq + Sync + Copy,
// idx_a, idx_b -> ...
F: Fn(u32, u32) -> (Option<u32>, Option<u32>),
// idx_a -> ...
G: Fn(u32) -> (Option<u32>, Option<u32>),
// idx_b -> ...
H: Fn(u32) -> (Option<u32>, Option<u32>),
{
// needed for the partition shift instead of modulo to make sense
assert!(n_tables.is_power_of_two());
let mut idx_a = 0;
for probe_hashes in probe_hashes {
for (h, key) in probe_hashes {
let h = *h;
// probe table that contains the hashed value
let current_probe_table =
unsafe { get_hash_tbl_threaded_join_mut_partitioned(h, hash_tbls, n_tables) };
let entry = current_probe_table
.raw_entry_mut()
.from_key_hashed_nocheck(h, key);
match entry {
// match and remove
RawEntryMut::Occupied(mut occupied) => {
let (tracker, indexes_b) = occupied.get_mut();
*tracker = true;
results.extend(indexes_b.iter().map(|&idx_b| swap_fn_match(idx_a, idx_b)))
}
// no match
RawEntryMut::Vacant(_) => results.push(swap_fn_no_match(idx_a)),
}
idx_a += 1;
}
}
for hash_tbl in hash_tbls {
hash_tbl.iter().for_each(|(_k, (tracker, indexes_b))| {
// remaining joined values from the right table
if !*tracker {
results.extend(indexes_b.iter().map(|&idx_b| swap_fn_drain(idx_b)))
}
});
}
}
/// Hash join outer. Both left and right can have no match so Options
fn hash_join_tuples_outer<T, I, J>(
a: Vec<I>,
b: Vec<J>,
swap: bool,
) -> Vec<(Option<u32>, Option<u32>)>
where
I: Iterator<Item = T> + Send + TrustedLen,
J: Iterator<Item = T> + Send + TrustedLen,
T: Hash + Eq + Copy + Sync + Send,
{
// This function is partially multi-threaded.
// Parts that are done in parallel:
// - creation of the probe tables
// - creation of the hashes
// during the probe phase values are removed from the tables, that's done single threaded to
// keep it lock free.
let size = a.iter().map(|a| a.size_hint().0).sum::<usize>()
+ b.iter().map(|b| b.size_hint().0).sum::<usize>();
let mut results = Vec::with_capacity(size);
// prepare hash table
let mut hash_tbls = prepare_hashed_relation_threaded(b);
let random_state = hash_tbls[0].hasher().clone();
// we pre hash the probing values
let (probe_hashes, _) = create_hash_and_keys_threaded_vectorized(a, Some(random_state));
let n_tables = hash_tbls.len() as u64;
// probe the hash table.
// Note: indexes from b that are not matched will be None, Some(idx_b)
// Therefore we remove the matches and the remaining will be joined from the right
// branch is because we want to only do the swap check once
if swap {
probe_outer(
&probe_hashes,
&mut hash_tbls,
&mut results,
n_tables,
|idx_a, idx_b| (Some(idx_b), Some(idx_a)),
|idx_a| (None, Some(idx_a)),
|idx_b| (Some(idx_b), None),
)
} else {
probe_outer(
&probe_hashes,
&mut hash_tbls,
&mut results,
n_tables,
|idx_a, idx_b| (Some(idx_a), Some(idx_b)),
|idx_a| (Some(idx_a), None),
|idx_b| (None, Some(idx_b)),
)
}
results
}
pub(crate) trait HashJoin<T> {
fn hash_join_inner(&self, _other: &ChunkedArray<T>) -> Vec<(u32, u32)> {
unimplemented!()
}
fn hash_join_left(&self, _other: &ChunkedArray<T>) -> Vec<(u32, Option<u32>)> {
unimplemented!()
}
fn hash_join_outer(&self, _other: &ChunkedArray<T>) -> Vec<(Option<u32>, Option<u32>)> {
unimplemented!()
}
}
impl HashJoin<Float32Type> for Float32Chunked {
fn hash_join_inner(&self, other: &Float32Chunked) -> Vec<(u32, u32)> {
let ca = self.bit_repr_small();
let other = other.bit_repr_small();
ca.hash_join_inner(&other)
}
fn hash_join_left(&self, other: &Float32Chunked) -> Vec<(u32, Option<u32>)> {
let ca = self.bit_repr_small();
let other = other.bit_repr_small();
ca.hash_join_left(&other)
}
fn hash_join_outer(&self, other: &Float32Chunked) -> Vec<(Option<u32>, Option<u32>)> {
let ca = self.bit_repr_small();
let other = other.bit_repr_small();
ca.hash_join_outer(&other)
}
}
impl HashJoin<Float64Type> for Float64Chunked {
fn hash_join_inner(&self, other: &Float64Chunked) -> Vec<(u32, u32)> {
let ca = self.bit_repr_large();
let other = other.bit_repr_large();
ca.hash_join_inner(&other)
}
fn hash_join_left(&self, other: &Float64Chunked) -> Vec<(u32, Option<u32>)> {
let ca = self.bit_repr_large();
let other = other.bit_repr_large();
ca.hash_join_left(&other)
}
fn hash_join_outer(&self, other: &Float64Chunked) -> Vec<(Option<u32>, Option<u32>)> {
let ca = self.bit_repr_large();
let other = other.bit_repr_large();
ca.hash_join_outer(&other)
}
}
impl HashJoin<CategoricalType> for CategoricalChunked {
fn hash_join_inner(&self, other: &CategoricalChunked) -> Vec<(u32, u32)> {
self.deref().hash_join_inner(other.deref())
}
fn hash_join_left(&self, other: &CategoricalChunked) -> Vec<(u32, Option<u32>)> {
self.deref().hash_join_left(other.deref())
}
fn hash_join_outer(&self, other: &CategoricalChunked) -> Vec<(Option<u32>, Option<u32>)> {
self.deref().hash_join_outer(other.deref())
}
}
fn num_group_join_inner<T>(left: &ChunkedArray<T>, right: &ChunkedArray<T>) -> Vec<(u32, u32)>
where
T: PolarsIntegerType,
T::Native: Hash + Eq + Send + AsU64 + Copy,
Option<T::Native>: AsU64,
{
let n_threads = POOL.current_num_threads();
let (a, b, swap) = det_hash_prone_order!(left, right);
let splitted_a = split_ca(a, n_threads).unwrap();
let splitted_b = split_ca(b, n_threads).unwrap();
match (
left.has_validity(),
right.has_validity(),
left.chunks.len(),
right.chunks.len(),
) {
(false, false, 1, 1) => {
let keys_a = splitted_a
.iter()
.map(|ca| ca.cont_slice().unwrap())
.collect::<Vec<_>>();
let keys_b = splitted_b
.iter()
.map(|ca| ca.cont_slice().unwrap())
.collect::<Vec<_>>();
hash_join_tuples_inner(keys_a, keys_b, swap)
}
(false, false, _, _) => {
let keys_a = splitted_a
.iter()
.map(|ca| ca.into_no_null_iter().collect::<Vec<_>>())
.collect::<Vec<_>>();
let keys_b = splitted_b
.iter()
.map(|ca| ca.into_no_null_iter().collect::<Vec<_>>())
.collect::<Vec<_>>();
hash_join_tuples_inner(keys_a, keys_b, swap)
}
(_, _, 1, 1) => {
let keys_a = splitted_a
.iter()
.map(|ca| {
ca.downcast_iter()
.map(|v| v.into_iter().map(|v| v.copied().as_u64()))
.flatten()
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let keys_b = splitted_b
.iter()
.map(|ca| {
ca.downcast_iter()
.map(|v| v.into_iter().map(|v| v.copied().as_u64()))
.flatten()
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
hash_join_tuples_inner(keys_a, keys_b, swap)
}
_ => {
let keys_a = splitted_a
.iter()
.map(|ca| ca.into_iter().map(|v| v.as_u64()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let keys_b = splitted_b
.iter()
.map(|ca| ca.into_iter().map(|v| v.as_u64()).collect::<Vec<_>>())
.collect::<Vec<_>>();
hash_join_tuples_inner(keys_a, keys_b, swap)
}
}
}
fn num_group_join_left<T>(
left: &ChunkedArray<T>,
right: &ChunkedArray<T>,
) -> Vec<(u32, Option<u32>)>
where
T: PolarsIntegerType,
T::Native: Hash + Eq + Send + AsU64,
Option<T::Native>: AsU64,
{
let n_threads = POOL.current_num_threads();
let splitted_a = split_ca(left, n_threads).unwrap();
let splitted_b = split_ca(right, n_threads).unwrap();
match (
left.has_validity(),
right.has_validity(),
left.chunks.len(),
right.chunks.len(),
) {
(false, false, 1, 1) => {
let keys_a = splitted_a
.iter()
.map(|ca| ca.cont_slice().unwrap())
.collect::<Vec<_>>();
let keys_b = splitted_b
.iter()
.map(|ca| ca.cont_slice().unwrap())
.collect::<Vec<_>>();
hash_join_tuples_left(keys_a, keys_b)
}
(false, false, _, _) => {
let keys_a = splitted_a
.iter()
.map(|ca| ca.into_no_null_iter().collect_trusted::<Vec<_>>())
.collect::<Vec<_>>();
let keys_b = splitted_b
.iter()
.map(|ca| ca.into_no_null_iter().collect_trusted::<Vec<_>>())
.collect::<Vec<_>>();
hash_join_tuples_left(keys_a, keys_b)
}
(_, _, 1, 1) => {
let keys_a = splitted_a
.iter()
.map(|ca| {
ca.downcast_iter()
.map(|v| v.into_iter().map(|v| v.copied().as_u64()))
.flatten()
.trust_my_length(ca.len())
.collect_trusted::<Vec<_>>()
})
.collect::<Vec<_>>();
let keys_b = splitted_b
.iter()
.map(|ca| {
ca.downcast_iter()
.map(|v| v.into_iter().map(|v| v.copied().as_u64()))
.flatten()
.trust_my_length(ca.len())
.collect_trusted::<Vec<_>>()
})
.collect::<Vec<_>>();
hash_join_tuples_left(keys_a, keys_b)
}
_ => {
let keys_a = splitted_a
.iter()
.map(|ca| {
ca.into_iter()
.map(|v| v.as_u64())
.collect_trusted::<Vec<_>>()
})
.collect::<Vec<_>>();
let keys_b = splitted_b
.iter()
.map(|ca| {
ca.into_iter()
.map(|v| v.as_u64())
.collect_trusted::<Vec<_>>()
})
.collect::<Vec<_>>();
hash_join_tuples_left(keys_a, keys_b)
}
}
}
impl<T> HashJoin<T> for ChunkedArray<T>
where
T: PolarsIntegerType + Sync,
T::Native: Eq + Hash + num::NumCast,
{
fn hash_join_inner(&self, other: &ChunkedArray<T>) -> Vec<(u32, u32)> {
match self.dtype() {
DataType::UInt64 => {
// convince the compiler that we are this type.
let ca: &UInt64Chunked = unsafe {
&*(self as *const ChunkedArray<T> as *const ChunkedArray<UInt64Type>)
};
let other: &UInt64Chunked = unsafe {
&*(other as *const ChunkedArray<T> as *const ChunkedArray<UInt64Type>)
};
num_group_join_inner(ca, other)
}
DataType::UInt32 => {
// convince the compiler that we are this type.
let ca: &UInt32Chunked = unsafe {
&*(self as *const ChunkedArray<T> as *const ChunkedArray<UInt32Type>)
};
let other: &UInt32Chunked = unsafe {
&*(other as *const ChunkedArray<T> as *const ChunkedArray<UInt32Type>)
};
num_group_join_inner(ca, other)
}
DataType::Int64 | DataType::Float64 => {
let ca = self.bit_repr_large();
let other = other.bit_repr_large();
num_group_join_inner(&ca, &other)
}
DataType::Int32 | DataType::Float32 => {
let ca = self.bit_repr_small();
let other = other.bit_repr_small();
num_group_join_inner(&ca, &other)
}
_ => {
let ca = self.cast(&DataType::UInt32).unwrap();
let ca = ca.u32().unwrap();
let other = other.cast(&DataType::UInt32).unwrap();
let other = other.u32().unwrap();
num_group_join_inner(ca, other)
}
}
}
fn hash_join_left(&self, other: &ChunkedArray<T>) -> Vec<(u32, Option<u32>)> {
match self.dtype() {
DataType::UInt64 => {
// convince the compiler that we are this type.
let ca: &UInt64Chunked = unsafe {
&*(self as *const ChunkedArray<T> as *const ChunkedArray<UInt64Type>)
};
let other: &UInt64Chunked = unsafe {
&*(other as *const ChunkedArray<T> as *const ChunkedArray<UInt64Type>)
};
num_group_join_left(ca, other)
}
DataType::UInt32 => {
// convince the compiler that we are this type.
let ca: &UInt32Chunked = unsafe {
&*(self as *const ChunkedArray<T> as *const ChunkedArray<UInt32Type>)
};
let other: &UInt32Chunked = unsafe {
&*(other as *const ChunkedArray<T> as *const ChunkedArray<UInt32Type>)
};
num_group_join_left(ca, other)
}
DataType::Int64 | DataType::Float64 => {
let ca = self.bit_repr_large();
let other = other.bit_repr_large();
num_group_join_left(&ca, &other)
}
DataType::Int32 | DataType::Float32 => {
let ca = self.bit_repr_small();
let other = other.bit_repr_small();
num_group_join_left(&ca, &other)
}
_ => {
let ca = self.cast(&DataType::UInt32).unwrap();
let ca = ca.u32().unwrap();
let other = other.cast(&DataType::UInt32).unwrap();
let other = other.u32().unwrap();
num_group_join_left(ca, other)
}
}
}
fn hash_join_outer(&self, other: &ChunkedArray<T>) -> Vec<(Option<u32>, Option<u32>)> {
let (a, b, swap) = det_hash_prone_order!(self, other);
let n_partitions = set_partition_size();
let splitted_a = split_ca(a, n_partitions).unwrap();
let splitted_b = split_ca(b, n_partitions).unwrap();
match (a.has_validity(), b.has_validity()) {
(false, false) => {
let iters_a = splitted_a
.iter()
.map(|ca| ca.into_no_null_iter())
.collect_vec();
let iters_b = splitted_b
.iter()
.map(|ca| ca.into_no_null_iter())
.collect_vec();
hash_join_tuples_outer(iters_a, iters_b, swap)
}
_ => {
let iters_a = splitted_a.iter().map(|ca| ca.into_iter()).collect_vec();
let iters_b = splitted_b.iter().map(|ca| ca.into_iter()).collect_vec();
hash_join_tuples_outer(iters_a, iters_b, swap)
}
}
}
}
impl HashJoin<BooleanType> for BooleanChunked {
fn hash_join_inner(&self, other: &BooleanChunked) -> Vec<(u32, u32)> {
let ca = self.cast(&DataType::UInt32).unwrap();
let ca = ca.u32().unwrap();
let other = other.cast(&DataType::UInt32).unwrap();
let other = other.u32().unwrap();
ca.hash_join_inner(other)
}
fn hash_join_left(&self, other: &BooleanChunked) -> Vec<(u32, Option<u32>)> {
let ca = self.cast(&DataType::UInt32).unwrap();
let ca = ca.u32().unwrap();
let other = other.cast(&DataType::UInt32).unwrap();
let other = other.u32().unwrap();
ca.hash_join_left(other)
}
fn hash_join_outer(&self, other: &BooleanChunked) -> Vec<(Option<u32>, Option<u32>)> {
let (a, b, swap) = det_hash_prone_order!(self, other);
let n_partitions = set_partition_size();
let splitted_a = split_ca(a, n_partitions).unwrap();
let splitted_b = split_ca(b, n_partitions).unwrap();
match (a.has_validity(), b.has_validity()) {
(false, false) => {
let iters_a = splitted_a
.iter()
.map(|ca| ca.into_no_null_iter())
.collect_vec();
let iters_b = splitted_b
.iter()
.map(|ca| ca.into_no_null_iter())
.collect_vec();
hash_join_tuples_outer(iters_a, iters_b, swap)
}
_ => {
let iters_a = splitted_a.iter().map(|ca| ca.into_iter()).collect_vec();
let iters_b = splitted_b.iter().map(|ca| ca.into_iter()).collect_vec();
hash_join_tuples_outer(iters_a, iters_b, swap)
}
}
}
}
fn prepare_strs<'a>(been_split: &'a [Utf8Chunked], hb: &RandomState) -> Vec<Vec<StrHash<'a>>> {
POOL.install(|| {
been_split
.par_iter()
.map(|ca| {
ca.into_iter()
.map(|opt_s| {
let mut state = hb.build_hasher();
opt_s.hash(&mut state);
let hash = state.finish();
StrHash::new(opt_s, hash)
})
.collect::<Vec<_>>()
})
.collect()
})
}
impl HashJoin<Utf8Type> for Utf8Chunked {
fn hash_join_inner(&self, other: &Utf8Chunked) -> Vec<(u32, u32)> {
let n_threads = POOL.current_num_threads();
let (a, b, swap) = det_hash_prone_order!(self, other);
let hb = RandomState::default();
let splitted_a = split_ca(a, n_threads).unwrap();
let splitted_b = split_ca(b, n_threads).unwrap();
let str_hashes_a = prepare_strs(&splitted_a, &hb);
let str_hashes_b = prepare_strs(&splitted_b, &hb);
hash_join_tuples_inner(str_hashes_a, str_hashes_b, swap)
}
fn hash_join_left(&self, other: &Utf8Chunked) -> Vec<(u32, Option<u32>)> {
let n_threads = POOL.current_num_threads();
let hb = RandomState::default();
let splitted_a = split_ca(self, n_threads).unwrap();
let splitted_b = split_ca(other, n_threads).unwrap();
let str_hashes_a = prepare_strs(&splitted_a, &hb);
let str_hashes_b = prepare_strs(&splitted_b, &hb);
hash_join_tuples_left(str_hashes_a, str_hashes_b)
}
fn hash_join_outer(&self, other: &Utf8Chunked) -> Vec<(Option<u32>, Option<u32>)> {
let (a, b, swap) = det_hash_prone_order!(self, other);
let n_partitions = set_partition_size();
let splitted_a = split_ca(a, n_partitions).unwrap();
let splitted_b = split_ca(b, n_partitions).unwrap();
match (a.has_validity(), b.has_validity()) {
(false, false) => {
let iters_a = splitted_a
.iter()
.map(|ca| ca.into_no_null_iter())
.collect_vec();
let iters_b = splitted_b
.iter()
.map(|ca| ca.into_no_null_iter())
.collect_vec();
hash_join_tuples_outer(iters_a, iters_b, swap)
}
_ => {
let iters_a = splitted_a
.iter()
.map(|ca| ca.into_iter())
.collect::<Vec<_>>();
let iters_b = splitted_b
.iter()
.map(|ca| ca.into_iter())
.collect::<Vec<_>>();
hash_join_tuples_outer(iters_a, iters_b, swap)
}
}
}
}
pub trait ZipOuterJoinColumn {
fn zip_outer_join_column(
&self,
_right_column: &Series,
_opt_join_tuples: &[(Option<u32>, Option<u32>)],
) -> Series {
unimplemented!()
}
}
impl<T> ZipOuterJoinColumn for ChunkedArray<T>
where
T: PolarsIntegerType,
ChunkedArray<T>: IntoSeries,
{
fn zip_outer_join_column(
&self,
right_column: &Series,
opt_join_tuples: &[(Option<u32>, Option<u32>)],
) -> Series {
let right_ca = self.unpack_series_matching_type(right_column).unwrap();
let left_rand_access = self.take_rand();
let right_rand_access = right_ca.take_rand();
opt_join_tuples
.iter()
.map(|(opt_left_idx, opt_right_idx)| {
if let Some(left_idx) = opt_left_idx {
unsafe { left_rand_access.get_unchecked(*left_idx as usize) }
} else {
unsafe {
let right_idx = opt_right_idx.unsafe_unwrap();
right_rand_access.get_unchecked(right_idx as usize)
}
}
})
.collect_trusted::<ChunkedArray<T>>()
.into_series()
}
}
macro_rules! impl_zip_outer_join {
($chunkedtype:ident) => {
impl ZipOuterJoinColumn for $chunkedtype {
fn zip_outer_join_column(
&self,
right_column: &Series,
opt_join_tuples: &[(Option<u32>, Option<u32>)],
) -> Series {
let right_ca = self.unpack_series_matching_type(right_column).unwrap();
let left_rand_access = self.take_rand();
let right_rand_access = right_ca.take_rand();
opt_join_tuples
.iter()
.map(|(opt_left_idx, opt_right_idx)| {
if let Some(left_idx) = opt_left_idx {
unsafe { left_rand_access.get_unchecked(*left_idx as usize) }
} else {
unsafe {
let right_idx = opt_right_idx.unsafe_unwrap();
right_rand_access.get_unchecked(right_idx as usize)
}
}
})
.collect::<$chunkedtype>()
.into_series()
}
}
};
}
impl_zip_outer_join!(BooleanChunked);
impl_zip_outer_join!(Utf8Chunked);
impl ZipOuterJoinColumn for Float32Chunked {
fn zip_outer_join_column(
&self,
right_column: &Series,
opt_join_tuples: &[(Option<u32>, Option<u32>)],
) -> Series |
}
impl ZipOuterJoinColumn for Float64Chunked {
fn zip_outer_join_column(
&self,
right_column: &Series,
opt_join_tuples: &[(Option<u32>, Option<u32>)],
) -> Series {
self.apply_as_ints(|s| {
s.zip_outer_join_column(
&right_column.bit_repr_large().into_series(),
opt_join_tuples,
)
})
}
}
impl DataFrame {
/// Utility method to finish a join.
pub(crate) fn finish_join(
&self,
mut df_left: DataFrame,
mut df_right: DataFrame,
suffix: Option<String>,
) -> Result<DataFrame> {
let mut left_names = HashSet::with_capacity_and_hasher(df_left.width(), RandomState::new());
df_left.columns.iter().for_each(|series| {
left_names.insert(series.name());
});
let mut rename_strs = Vec::with_capacity(df_right.width());
df_right.columns.iter().for_each(|series| {
if left_names.contains(series.name()) {
rename_strs.push(series.name().to_owned())
}
});
let suffix = suffix.as_deref().unwrap_or("_right");
for name in rename_strs {
df_right.rename(&name, &format!("{}{}", name, suffix))?;
}
df_left.hstack_mut(&df_right.columns)?;
Ok(df_left)
}
fn create_left_df<B: Sync>(&self, join_tuples: &[(u32, B)], left_join: bool) -> DataFrame {
if left_join && join_tuples.len() == self.height() {
self.clone()
} else {
unsafe {
self.take_iter_unchecked(join_tuples.iter().map(|(left, _right)| *left as usize))
}
}
}
/// Generic join method. Can be used to join on multiple columns.
///
/// # Example
///
/// ```rust
/// use polars_core::df;
/// use polars_core::prelude::*;
///
/// fn example() -> Result<()> {
/// let df1: DataFrame = df!("Fruit" => &["Apple", "Banana", "Pear"],
/// "Phosphorus (mg/100g)" => &[11, 22, 12])?;
/// let df2: DataFrame = df!("Name" => &["Apple", "Banana", "Pear"],
/// "Potassium (mg/100g)" => &[107, 358, 115])?;
///
/// let df3: DataFrame = df1.join(&df2, "Fruit", "Name", JoinType::Inner, None)?;
/// assert_eq!(df3.shape(), (3, 3));
/// println!("{}", df3);
///
/// Ok(())
/// }
/// ```
///
/// Output:
///
/// ```text
/// shape: (3, 3)
/// +--------+----------------------+---------------------+
/// | Fruit | Phosphorus (mg/100g) | Potassium (mg/100g) |
/// | --- | --- | --- |
/// | str | i32 | i32 |
/// +========+======================+=====================+
/// | Apple | 11 | 107 |
/// +--------+----------------------+---------------------+
/// | Banana | 22 | 358 |
/// +--------+----------------------+---------------------+
/// | Pear | 12 | 115 |
/// +--------+----------------------+---------------------+
/// ```
pub fn join<'a, J, S1: Selection<'a, J>, S2: Selection<'a, J>>(
&self,
other: &DataFrame,
left_on: S1,
right_on: S2,
how: JoinType,
suffix: Option<String>,
) -> Result<DataFrame> {
#[cfg(feature = "cross_join")]
if let JoinType::Cross = how {
return self.cross_join(other);
}
#[allow(unused_mut)]
let mut selected_left = self.select_series(left_on)?;
#[allow(unused_mut)]
let mut selected_right = other.select_series(right_on)?;
if selected_right.len() != selected_left.len() {
return Err(PolarsError::ValueError(
"the number of columns given as join key should be equal".into(),
));
}
if selected_left
.iter()
.zip(&selected_right)
.any(|(l, r)| l.dtype() != r.dtype())
{
return Err(PolarsError::ValueError("the dtype of the join keys don't match. first cast your columns to the correct dtype".into()));
}
#[cfg(feature = "dtype-categorical")]
for (l, r) in selected_left.iter().zip(&selected_right) {
check_categorical_src(l, r)?
}
// Single keys
if selected_left.len() == 1 {
let s_left = self.column(selected_left[0].name())?;
let s_right = other.column(selected_right[0].name())?;
return match how {
JoinType::Inner => self.inner_join_from_series(other, s_left, s_right, suffix),
JoinType::Left => self.left_join_from_series(other, s_left, s_right, suffix),
JoinType::Outer => self.outer_join_from_series(other, s_left, s_right, suffix),
#[cfg(feature = "asof_join")]
JoinType::AsOf => {
self.join_asof(other, selected_left[0].name(), selected_right[0].name())
}
#[cfg(feature = "cross_join")]
JoinType::Cross => {
unreachable!()
}
};
}
fn remove_selected(df: &DataFrame, selected: &[Series]) -> DataFrame {
let mut new = None;
for s in selected {
new = match new {
None => Some(df.drop(s.name()).unwrap()),
Some(new) => Some(new.drop(s.name()).unwrap()),
}
}
new.unwrap()
}
// hack for a macro
impl DataFrame {
fn len(&self) -> usize {
self.height()
}
}
// make sure that we don't have logical types.
// we don't overwrite the original selected as that might be used to create a column in the new df
let selected_left_physical = to_physical(&selected_left);
let selected_right_physical = to_physical(&selected_right);
// multiple keys
match how {
JoinType::Inner => {
let left = DataFrame::new_no_checks(selected_left_physical);
let right = DataFrame::new_no_checks(selected_right_physical);
let (left, right, swap) = det_hash_prone_order!(left, right);
let join_tuples = inner_join_multiple_keys(&left, &right, swap);
let (df_left, df_right) = POOL.join(
|| self.create_left_df(&join_tuples, false),
|| unsafe {
// remove join columns
remove_selected(other, &selected_right).take_iter_unchecked(
join_tuples.iter().map(|(_left, right)| *right as usize),
)
},
);
self.finish_join(df_left, df_right, suffix)
}
JoinType::Left => {
let left = DataFrame::new_no_checks(selected_left_physical);
let right = DataFrame::new_no_checks(selected_right_physical);
let join_tuples = left_join_multiple_keys(&left, &right);
let (df_left, df_right) = POOL.join(
|| self.create_left_df(&join_tuples, true),
|| unsafe {
// remove join columns
remove_selected(other, &selected_right).take_opt_iter_unchecked(
join_tuples
.iter()
.map(|(_left, right)| right.map(|i| i as usize)),
)
},
);
self.finish_join(df_left, df_right, suffix)
}
JoinType::Outer => {
let left = DataFrame::new_no_checks(selected_left_physical);
let right = DataFrame::new_no_checks(selected_right_physical);
let (left, right, swap) = det_hash_prone_order!(left, right);
let opt_join_tuples = outer_join_multiple_keys(&left, &right, swap);
// Take the left and right dataframes by join tuples
let (mut df_left, df_right) = POOL.join(
|| unsafe {
remove_selected(self, &selected_left).take_opt_iter_unchecked(
opt_join_tuples
.iter()
.map(|(left, _right)| left.map(|i| i as usize)),
)
},
|| unsafe {
remove_selected(other, &selected_right).take_opt_iter_unchecked(
opt_join_tuples
.iter()
.map(|(_left, right)| right.map(|i| i as usize)),
)
},
);
for (s_left, s_right) in selected_left.iter().zip(&selected_right) {
let mut s = s_left.zip_outer_join_column(s_right, &opt_join_tuples);
s.rename(s_left.name());
df_left.hstack_mut(&[s])?;
}
self.finish_join(df_left, df_right, suffix)
}
#[cfg(feature = "asof_join")]
JoinType::AsOf => Err(PolarsError::ValueError(
"asof join not supported for join on multiple keys".into(),
)),
#[cfg(feature = "cross_join")]
JoinType::Cross => {
unreachable!()
}
}
}
/// Perform an inner join on two DataFrames.
///
/// # Example
///
/// ```
/// use polars_core::prelude::*;
/// fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> {
/// left.inner_join(right, "join_column_left", "join_column_right")
/// }
/// ```
pub fn inner_join(
&self,
other: &DataFrame,
left_on: &str,
right_on: &str,
) -> Result<DataFrame> {
let s_left = self.column(left_on)?;
let s_right = other.column(right_on)?;
self.inner_join_from_series(other, s_left, s_right, None)
}
pub(crate) fn inner_join_from_series(
&self,
other: &DataFrame,
s_left: &Series,
s_right: &Series,
suffix: Option<String>,
) -> Result<DataFrame> {
#[cfg(feature = "dtype-categorical")]
check_categorical_src(s_left, s_right)?;
let join_tuples = s_left.hash_join_inner(s_right);
let (df_left, df_right) = POOL.join(
|| self.create_left_df(&join_tuples, false),
|| unsafe {
other
.drop(s_right.name())
.unwrap()
.take_iter_unchecked(join_tuples.iter().map(|(_left, right)| *right as usize))
},
);
self.finish_join(df_left, df_right, suffix)
}
/// Perform a left join on two DataFrames
/// # Example
///
/// ```
/// use polars_core::prelude::*;
/// fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> {
/// left.left_join(right, "join_column_left", "join_column_right")
/// }
/// ```
pub fn left_join(&self, other: &DataFrame, left_on: &str, right_on: &str) -> Result<DataFrame> {
let s_left = self.column(left_on)?;
let s_right = other.column(right_on)?;
self.left_join_from_series(other, s_left, s_right, None)
}
pub(crate) fn left_join_from_series(
&self,
other: &DataFrame,
s_left: &Series,
s_right: &Series,
suffix: Option<String>,
) -> Result<DataFrame> {
#[cfg(feature = "dtype-categorical")]
check_categorical_src(s_left, s_right)?;
let opt_join_tuples = s_left.hash_join_left(s_right);
let (df_left, df_right) = POOL.join(
|| self.create_left_df(&opt_join_tuples, true),
|| unsafe {
other.drop(s_right.name()).unwrap().take_opt_iter_unchecked(
opt_join_tuples
.iter()
.map(|(_left, right)| right.map(|i| i as usize)),
)
},
);
self.finish_join(df_left, df_right, suffix)
}
/// Perform an outer join on two DataFrames
/// # Example
///
/// ```
/// use polars_core::prelude::*;
/// fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> {
/// left.outer_join(right, "join_column_left", "join_column_right")
/// }
/// ```
pub fn outer_join(
&self,
other: &DataFrame,
left_on: &str,
right_on: &str,
) -> Result<DataFrame> {
let s_left = self.column(left_on)?;
let s_right = other.column(right_on)?;
self.outer_join_from_series(other, s_left, s_right, None)
}
pub(crate) fn outer_join_from_series(
&self,
other: &DataFrame,
s_left: &Series,
s_right: &Series,
suffix: Option<String>,
) -> Result<DataFrame> {
#[cfg(feature = "dtype-categorical")]
check_categorical_src(s_left, s_right)?;
// Get the indexes of the joined relations
let opt_join_tuples = s_left.hash_join_outer(s_right);
// Take the left and right dataframes by join tuples
let (mut df_left, df_right) = POOL.join(
|| unsafe {
self.drop(s_left.name()).unwrap().take_opt_iter_unchecked(
opt_join_tuples
.iter()
.map(|(left, _right)| left.map(|i| i as usize)),
)
},
|| unsafe {
other.drop(s_right.name()).unwrap().take_opt_iter_unchecked(
opt_join_tuples
.iter()
.map(|(_left, right)| right.map(|i| i as usize)),
)
},
);
let mut s = s_left.zip_outer_join_column(s_right, &opt_join_tuples);
s.rename(s_left.name());
df_left.hstack_mut(&[s])?;
self.finish_join(df_left, df_right, suffix)
}
}
#[cfg(test)]
mod test {
use crate::df;
use crate::prelude::*;
fn create_frames() -> (DataFrame, DataFrame) {
let s0 = Series::new("days", &[0, 1, 2]);
let s1 = Series::new("temp", &[22.1, 19.9, 7.]);
let s2 = Series::new("rain", &[0.2, 0.1, 0.3]);
let temp = DataFrame::new(vec![s0, s1, s2]).unwrap();
let s0 = Series::new("days", &[1, 2, 3, 1]);
let s1 = Series::new("rain", &[0.1, 0.2, 0.3, 0.4]);
let rain = DataFrame::new(vec![s0, s1]).unwrap();
(temp, rain)
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_inner_join() {
let (temp, rain) = create_frames();
for i in 1..8 {
std::env::set_var("POLARS_MAX_THREADS", format!("{}", i));
let joined = temp.inner_join(&rain, "days", "days").unwrap();
let join_col_days = Series::new("days", &[1, 2, 1]);
let join_col_temp = Series::new("temp", &[19.9, 7., 19.9]);
let join_col_rain = Series::new("rain", &[0.1, 0.3, 0.1]);
let join_col_rain_right = Series::new("rain_right", [0.1, 0.2, 0.4].as_ref());
let true_df = DataFrame::new(vec![
join_col_days,
join_col_temp,
join_col_rain,
join_col_rain_right,
])
.unwrap();
println!("{}", joined);
assert!(joined.frame_equal(&true_df));
}
}
#[test]
#[allow(clippy::float_cmp)]
#[cfg_attr(miri, ignore)]
fn test_left_join() {
for i in 1..8 {
std::env::set_var("POLARS_MAX_THREADS", format!("{}", i));
let s0 = Series::new("days", &[0, 1, 2, 3, 4]);
let s1 = Series::new("temp", &[22.1, 19.9, 7., 2., 3.]);
let temp = DataFrame::new(vec![s0, s1]).unwrap();
let s0 = Series::new("days", &[1, 2]);
let s1 = Series::new("rain", &[0.1, 0.2]);
let rain = DataFrame::new(vec![s0, s1]).unwrap();
let joined = temp.left_join(&rain, "days", "days").unwrap();
println!("{}", &joined);
assert_eq!(
(joined.column("rain").unwrap().sum::<f32>().unwrap() * 10.).round(),
3.
);
assert_eq!(joined.column("rain").unwrap().null_count(), 3);
// test join on utf8
let s0 = Series::new("days", &["mo", "tue", "wed", "thu", "fri"]);
let s1 = Series::new("temp", &[22.1, 19.9, 7., 2., 3.]);
let temp = DataFrame::new(vec![s0, s1]).unwrap();
let s0 = Series::new("days", &["tue", "wed"]);
let s1 = Series::new("rain", &[0.1, 0.2]);
let rain = DataFrame::new(vec![s0, s1]).unwrap();
let joined = temp.left_join(&rain, "days", "days").unwrap();
println!("{}", &joined);
assert_eq!(
(joined.column("rain").unwrap().sum::<f32>().unwrap() * 10.).round(),
3.
);
assert_eq!(joined.column("rain").unwrap().null_count(), 3);
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_outer_join() -> Result<()> {
let (temp, rain) = create_frames();
let joined = temp.outer_join(&rain, "days", "days")?;
println!("{:?}", &joined);
assert_eq!(joined.height(), 5);
assert_eq!(joined.column("days")?.sum::<i32>(), Some(7));
let df_left = df!(
"a"=> ["a", "b", "a", "z"],
"b"=>[1, 2, 3, 4],
"c"=>[6, 5, 4, 3]
)?;
let df_right = df!(
"a"=> ["b", "c", "b", "a"],
"k"=> [0, 3, 9, 6],
"c"=> [1, 0, 2, 1]
)?;
let out = df_left.outer_join(&df_right, "a", "a")?;
assert_eq!(out.column("c_right")?.null_count(), 1);
Ok(())
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_join_with_nulls() {
let dts = &[20, 21, 22, 23, 24, 25, 27, 28];
let vals = &[1.2, 2.4, 4.67, 5.8, 4.4, 3.6, 7.6, 6.5];
let df = DataFrame::new(vec![Series::new("date", dts), Series::new("val", vals)]).unwrap();
let vals2 = &[Some(1.1), None, Some(3.3), None, None];
let df2 = DataFrame::new(vec![
Series::new("date", &dts[3..]),
Series::new("val2", vals2),
])
.unwrap();
let joined = df.left_join(&df2, "date", "date").unwrap();
assert_eq!(
joined
.column("val2")
.unwrap()
.f64()
.unwrap()
.get(joined.height() - 1),
None
);
}
fn get_dfs() -> (DataFrame, DataFrame) {
let df_a = df! {
"a" => &[1, 2, 1, 1],
"b" => &["a", "b", "c", "c"],
"c" => &[0, 1, 2, 3]
}
.unwrap();
let df_b = df! {
"foo" => &[1, 1, 1],
"bar" => &["a", "c", "c"],
"ham" => &["let", "var", "const"]
}
.unwrap();
(df_a, df_b)
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_join_multiple_columns() {
let (mut df_a, mut df_b) = get_dfs();
// First do a hack with concatenated string dummy column
let mut s = df_a
.column("a")
.unwrap()
.cast(&DataType::Utf8)
.unwrap()
.utf8()
.unwrap()
+ df_a.column("b").unwrap().utf8().unwrap();
s.rename("dummy");
df_a.with_column(s).unwrap();
let mut s = df_b
.column("foo")
.unwrap()
.cast(&DataType::Utf8)
.unwrap()
.utf8()
.unwrap()
+ df_b.column("bar").unwrap().utf8().unwrap();
s.rename("dummy");
df_b.with_column(s).unwrap();
let joined = df_a.left_join(&df_b, "dummy", "dummy").unwrap();
let ham_col = joined.column("ham").unwrap();
let ca = ham_col.utf8().unwrap();
let correct_ham = &[
Some("let"),
None,
Some("var"),
Some("const"),
Some("var"),
Some("const"),
];
assert_eq!(Vec::from(ca), correct_ham);
// now check the join with multiple columns
let joined = df_a
.join(&df_b, &["a", "b"], &["foo", "bar"], JoinType::Left, None)
.unwrap();
let ca = joined.column("ham").unwrap().utf8().unwrap();
dbg!(&df_a, &df_b);
assert_eq!(Vec::from(ca), correct_ham);
let joined_inner_hack = df_a.inner_join(&df_b, "dummy", "dummy").unwrap();
let joined_inner = df_a
.join(&df_b, &["a", "b"], &["foo", "bar"], JoinType::Inner, None)
.unwrap();
dbg!(&joined_inner_hack, &joined_inner);
assert!(joined_inner_hack
.column("ham")
.unwrap()
.series_equal_missing(joined_inner.column("ham").unwrap()));
let joined_outer_hack = df_a.outer_join(&df_b, "dummy", "dummy").unwrap();
let joined_outer = df_a
.join(&df_b, &["a", "b"], &["foo", "bar"], JoinType::Outer, None)
.unwrap();
assert!(joined_outer_hack
.column("ham")
.unwrap()
.series_equal_missing(joined_outer.column("ham").unwrap()));
}
#[test]
#[cfg_attr(miri, ignore)]
#[cfg(feature = "dtype-categorical")]
fn test_join_categorical() {
use crate::toggle_string_cache;
let _lock = crate::SINGLE_LOCK.lock();
toggle_string_cache(true);
let (mut df_a, mut df_b) = get_dfs();
df_a.may_apply("b", |s| s.cast(&DataType::Categorical))
.unwrap();
df_b.may_apply("bar", |s| s.cast(&DataType::Categorical))
.unwrap();
let out = df_a.join(&df_b, "b", "bar", JoinType::Left, None).unwrap();
assert_eq!(out.shape(), (6, 5));
let correct_ham = &[
Some("let"),
None,
Some("var"),
Some("const"),
Some("var"),
Some("const"),
];
let ham_col = out.column("ham").unwrap();
let ca = ham_col.utf8().unwrap();
assert_eq!(Vec::from(ca), correct_ham);
// Test an error when joining on different string cache
let (mut df_a, mut df_b) = get_dfs();
df_a.may_apply("b", |s| s.cast(&DataType::Categorical))
.unwrap();
// create a new cache
toggle_string_cache(false);
toggle_string_cache(true);
df_b.may_apply("bar", |s| s.cast(&DataType::Categorical))
.unwrap();
let out = df_a.join(&df_b, "b", "bar", JoinType::Left, None);
assert!(out.is_err())
}
#[test]
#[cfg_attr(miri, ignore)]
fn empty_df_join() {
let empty: Vec<String> = vec![];
let left = DataFrame::new(vec![
Series::new("key", &empty),
Series::new("lval", &empty),
])
.unwrap();
let right = DataFrame::new(vec![
Series::new("key", &["foo"]),
Series::new("rval", &[4]),
])
.unwrap();
let res = left.inner_join(&right, "key", "key");
assert!(res.is_ok());
assert_eq!(res.unwrap().height(), 0);
right.left_join(&left, "key", "key").unwrap();
right.inner_join(&left, "key", "key").unwrap();
right.outer_join(&left, "key", "key").unwrap();
}
#[test]
#[cfg_attr(miri, ignore)]
fn unit_df_join() -> Result<()> {
let df1 = df![
"a" => [1],
"b" => [2]
]?;
let df2 = df![
"a" => [1, 2, 3, 4],
"b" => [Some(1), None, Some(3), Some(4)]
]?;
let out = df1.left_join(&df2, "a", "a")?;
let expected = df![
"a" => [1],
"b" => [2],
"b_right" => [1]
]?;
assert!(out.frame_equal(&expected));
Ok(())
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_join_err() -> Result<()> {
let df1 = df![
"a" => [1, 2],
"b" => ["foo", "bar"]
]?;
let df2 = df![
"a" => [1, 2, 3, 4],
"b" => [true, true, true, false]
]?;
// dtypes don't match, error
assert!(df1
.join(&df2, vec!["a", "b"], vec!["a", "b"], JoinType::Left, None)
.is_err());
// length of join keys don't match error
assert!(df1
.join(&df2, vec!["a"], vec!["a", "b"], JoinType::Left, None)
.is_err());
Ok(())
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_joins_with_duplicates() -> Result<()> {
// test joins with duplicates in both dataframes
let df_left = df![
"col1" => [1, 1, 2],
"int_col" => [1, 2, 3]
]
.unwrap();
let df_right = df![
"join_col1" => [1, 1, 1, 1, 1, 3],
"dbl_col" => [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
]
.unwrap();
let df_inner_join = df_left.inner_join(&df_right, "col1", "join_col1").unwrap();
assert_eq!(df_inner_join.height(), 10);
assert_eq!(df_inner_join.column("col1")?.null_count(), 0);
assert_eq!(df_inner_join.column("int_col")?.null_count(), 0);
assert_eq!(df_inner_join.column("dbl_col")?.null_count(), 0);
let df_left_join = df_left.left_join(&df_right, "col1", "join_col1").unwrap();
assert_eq!(df_left_join.height(), 11);
assert_eq!(df_left_join.column("col1")?.null_count(), 0);
assert_eq!(df_left_join.column("int_col")?.null_count(), 0);
assert_eq!(df_left_join.column("dbl_col")?.null_count(), 1);
let df_outer_join = df_left.outer_join(&df_right, "col1", "join_col1").unwrap();
assert_eq!(df_outer_join.height(), 12);
assert_eq!(df_outer_join.column("col1")?.null_count(), 0);
assert_eq!(df_outer_join.column("int_col")?.null_count(), 1);
assert_eq!(df_outer_join.column("dbl_col")?.null_count(), 1);
Ok(())
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_multi_joins_with_duplicates() -> Result<()> {
// test joins with multiple join columns and duplicates in both
// dataframes
let df_left = df![
"col1" => [1, 1, 1],
"join_col2" => ["a", "a", "b"],
"int_col" => [1, 2, 3]
]
.unwrap();
let df_right = df![
"join_col1" => [1, 1, 1, 1, 1, 2],
"col2" => ["a", "a", "a", "a", "a", "c"],
"dbl_col" => [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
]
.unwrap();
let df_inner_join = df_left
.join(
&df_right,
&["col1", "join_col2"],
&["join_col1", "col2"],
JoinType::Inner,
None,
)
.unwrap();
assert_eq!(df_inner_join.height(), 10);
assert_eq!(df_inner_join.column("col1")?.null_count(), 0);
assert_eq!(df_inner_join.column("join_col2")?.null_count(), 0);
assert_eq!(df_inner_join.column("int_col")?.null_count(), 0);
assert_eq!(df_inner_join.column("dbl_col")?.null_count(), 0);
let df_left_join = df_left
.join(
&df_right,
&["col1", "join_col2"],
&["join_col1", "col2"],
JoinType::Left,
None,
)
.unwrap();
assert_eq!(df_left_join.height(), 11);
assert_eq!(df_left_join.column("col1")?.null_count(), 0);
assert_eq!(df_left_join.column("join_col2")?.null_count(), 0);
assert_eq!(df_left_join.column("int_col")?.null_count(), 0);
assert_eq!(df_left_join.column("dbl_col")?.null_count(), 1);
let df_outer_join = df_left
.join(
&df_right,
&["col1", "join_col2"],
&["join_col1", "col2"],
JoinType::Outer,
None,
)
.unwrap();
assert_eq!(df_outer_join.height(), 12);
assert_eq!(df_outer_join.column("col1")?.null_count(), 0);
assert_eq!(df_outer_join.column("join_col2")?.null_count(), 0);
assert_eq!(df_outer_join.column("int_col")?.null_count(), 1);
assert_eq!(df_outer_join.column("dbl_col")?.null_count(), 1);
Ok(())
}
#[test]
#[cfg_attr(miri, ignore)]
#[cfg(feature = "dtype-u64")]
fn test_join_floats() -> Result<()> {
let df_a = df! {
"a" => &[1.0, 2.0, 1.0, 1.0],
"b" => &["a", "b", "c", "c"],
"c" => &[0.0, 1.0, 2.0, 3.0]
}?;
let df_b = df! {
"foo" => &[1.0, 2.0, 1.0],
"bar" => &[1.0, 1.0, 1.0],
"ham" => &["let", "var", "const"]
}?;
let out = df_a.join(
&df_b,
vec!["a", "c"],
vec!["foo", "bar"],
JoinType::Left,
None,
)?;
assert_eq!(
Vec::from(out.column("ham")?.utf8()?),
&[None, Some("var"), None, None]
);
let out = df_a.join(
&df_b,
vec!["a", "c"],
vec!["foo", "bar"],
JoinType::Outer,
None,
)?;
assert_eq!(
out.dtypes(),
&[
DataType::Utf8,
DataType::Float64,
DataType::Float64,
DataType::Utf8
]
);
Ok(())
}
}
| {
self.apply_as_ints(|s| {
s.zip_outer_join_column(
&right_column.bit_repr_small().into_series(),
opt_join_tuples,
)
})
} |
order_manager.py | from typing import List
from tmtrader.entity.order import BasicOrder, FilledBasicOrder
from tmtrader.entity.trade import Trade
class OrderManager:
def __init__(self):
self.__open_orders: List[BasicOrder] = list()
self.__filled_orders: List[FilledBasicOrder] = list()
# TODO: refactor and define ClosedOrder and CancelledOrder
self.__cancelled_orders: List[BasicOrder] = list()
self.__trades: List[Trade] = list()
@property
def open_orders(self) -> List[BasicOrder]:
return self.__open_orders
@property
def filled_orders(self) -> List[FilledBasicOrder]:
return self.__filled_orders
@property
def cancelled_orders(self) -> List[BasicOrder]:
|
@property
def trades(self) -> List[Trade]:
return self.__trades
def add_open_orders(self, orders: List[BasicOrder]):
self.__open_orders.extend(orders)
def add_filled_orders(self, orders: List[FilledBasicOrder]):
self.__filled_orders.extend(orders)
def add_cancelled_orders(self, orders: List[BasicOrder]):
self.__cancelled_orders.extend(orders)
def add_trades(self, trades: List[Trade]):
self.__trades.extend(trades)
| return self.__cancelled_orders |
database.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::output_manager_service::{
error::OutputManagerStorageError,
service::Balance,
storage::models::{DbUnblindedOutput, KnownOneSidedPaymentScript, OutputStatus},
TxId,
};
use aes_gcm::Aes256Gcm;
use log::*;
use std::{
fmt::{Display, Error, Formatter},
sync::Arc,
};
use tari_common_types::types::{BlindingFactor, Commitment, HashOutput, PrivateKey};
use tari_core::transactions::transaction::TransactionOutput;
const LOG_TARGET: &str = "wallet::output_manager_service::database";
/// This trait defines the required behaviour that a storage backend must provide for the Output Manager service.
/// Data is passed to and from the backend via the [DbKey], [DbValue], and [DbValueKey] enums. If new data types are
/// required to be supported by the backends then these enums can be updated to reflect this requirement and the trait
/// will remain the same
pub trait OutputManagerBackend: Send + Sync + Clone {
/// Retrieve the record associated with the provided DbKey
fn fetch(&self, key: &DbKey) -> Result<Option<DbValue>, OutputManagerStorageError>;
/// Retrieve outputs that have been mined but not spent yet (have not been deleted)
fn fetch_mined_unspent_outputs(&self) -> Result<Vec<DbUnblindedOutput>, OutputManagerStorageError>;
/// Retrieve outputs that have not been found or confirmed in the block chain yet
fn fetch_unconfirmed_outputs(&self) -> Result<Vec<DbUnblindedOutput>, OutputManagerStorageError>;
/// Modify the state the of the backend with a write operation
fn write(&self, op: WriteOperation) -> Result<Option<DbValue>, OutputManagerStorageError>;
fn fetch_pending_incoming_outputs(&self) -> Result<Vec<DbUnblindedOutput>, OutputManagerStorageError>;
fn set_received_output_mined_height(
&self,
hash: Vec<u8>,
mined_height: u64,
mined_in_block: Vec<u8>,
mmr_position: u64,
confirmed: bool,
) -> Result<(), OutputManagerStorageError>;
fn set_output_to_unmined(&self, hash: Vec<u8>) -> Result<(), OutputManagerStorageError>;
fn mark_output_as_spent(
&self,
hash: Vec<u8>,
mark_deleted_at_height: u64,
mark_deleted_in_block: Vec<u8>,
confirmed: bool,
) -> Result<(), OutputManagerStorageError>;
fn mark_output_as_unspent(&self, hash: Vec<u8>) -> Result<(), OutputManagerStorageError>;
/// This method encumbers the specified outputs into a `PendingTransactionOutputs` record. This is a short term
/// encumberance in case the app is closed or crashes before transaction neogtiation is complete. These will be
/// cleared on startup of the service.
fn short_term_encumber_outputs(
&self,
tx_id: TxId,
outputs_to_send: &[DbUnblindedOutput],
outputs_to_receive: &[DbUnblindedOutput],
) -> Result<(), OutputManagerStorageError>;
/// This method confirms that a transaction negotiation is complete and outputs can be fully encumbered. This
/// reserves these outputs until the transaction is confirmed or cancelled
fn confirm_encumbered_outputs(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError>;
/// Clear all pending transaction encumberances marked as short term. These are the result of an unfinished
/// transaction negotiation
fn clear_short_term_encumberances(&self) -> Result<(), OutputManagerStorageError>;
/// This method must take all the `outputs_to_be_spent` from the specified transaction and move them back into the
/// `UnspentOutputs` pool. The `outputs_to_be_received`'` will be marked as cancelled inbound outputs in case they
/// need to be recovered.
fn cancel_pending_transaction(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError>;
/// This method will increment the currently stored key index for the key manager config. Increment this after each
/// key is generated
fn increment_key_index(&self) -> Result<(), OutputManagerStorageError>;
/// This method will set the currently stored key index for the key manager
fn set_key_index(&self, index: u64) -> Result<(), OutputManagerStorageError>;
/// This method will update an output's metadata signature, akin to 'finalize output'
fn update_output_metadata_signature(&self, output: &TransactionOutput) -> Result<(), OutputManagerStorageError>;
/// If an invalid output is found to be valid this function will turn it back into an unspent output
fn revalidate_unspent_output(&self, spending_key: &Commitment) -> Result<(), OutputManagerStorageError>;
/// Apply encryption to the backend.
fn apply_encryption(&self, cipher: Aes256Gcm) -> Result<(), OutputManagerStorageError>;
/// Remove encryption from the backend.
fn remove_encryption(&self) -> Result<(), OutputManagerStorageError>;
/// Get the output that was most recently mined, ordered descending by mined height
fn get_last_mined_output(&self) -> Result<Option<DbUnblindedOutput>, OutputManagerStorageError>;
/// Get the output that was most recently spent, ordered descending by mined height
fn get_last_spent_output(&self) -> Result<Option<DbUnblindedOutput>, OutputManagerStorageError>;
/// Check if there is a pending coinbase transaction at this block height, if there is clear it.
fn clear_pending_coinbase_transaction_at_block_height(
&self,
block_height: u64,
) -> Result<(), OutputManagerStorageError>;
/// Set if a coinbase output is abandoned or not
fn set_coinbase_abandoned(&self, tx_id: TxId, abandoned: bool) -> Result<(), OutputManagerStorageError>;
/// Reinstate a cancelled inbound output
fn reinstate_cancelled_inbound_output(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError>;
/// Return the available, time locked, pending incoming and pending outgoing balance
fn get_balance(
&self,
current_tip_for_time_lock_calculation: Option<u64>,
) -> Result<Balance, OutputManagerStorageError>;
}
/// Holds the state of the KeyManager being used by the Output Manager Service
#[derive(Clone, Debug, PartialEq)]
pub struct KeyManagerState {
pub master_key: PrivateKey,
pub branch_seed: String,
pub primary_key_index: u64,
}
#[derive(Debug, Clone, PartialEq)]
pub enum DbKey {
SpentOutput(BlindingFactor),
UnspentOutput(BlindingFactor),
AnyOutputByCommitment(Commitment),
TimeLockedUnspentOutputs(u64),
UnspentOutputs,
SpentOutputs,
KeyManagerState,
InvalidOutputs,
KnownOneSidedPaymentScripts,
OutputsByTxIdAndStatus(TxId, OutputStatus),
}
#[derive(Debug)]
pub enum DbValue {
SpentOutput(Box<DbUnblindedOutput>),
UnspentOutput(Box<DbUnblindedOutput>),
UnspentOutputs(Vec<DbUnblindedOutput>),
SpentOutputs(Vec<DbUnblindedOutput>),
InvalidOutputs(Vec<DbUnblindedOutput>),
KeyManagerState(KeyManagerState),
KnownOneSidedPaymentScripts(Vec<KnownOneSidedPaymentScript>),
AnyOutput(Box<DbUnblindedOutput>),
AnyOutputs(Vec<DbUnblindedOutput>),
}
pub enum DbKeyValuePair {
UnspentOutput(Commitment, Box<DbUnblindedOutput>),
UnspentOutputWithTxId(Commitment, (TxId, Box<DbUnblindedOutput>)),
OutputToBeReceived(Commitment, (TxId, Box<DbUnblindedOutput>, Option<u64>)),
KeyManagerState(KeyManagerState),
KnownOneSidedPaymentScripts(KnownOneSidedPaymentScript),
}
pub enum WriteOperation {
Insert(DbKeyValuePair),
Remove(DbKey),
}
/// This structure holds an inner type that implements the `OutputManagerBackend` trait and contains the more complex
/// data access logic required by the module built onto the functionality defined by the trait
#[derive(Clone)]
pub struct OutputManagerDatabase<T> {
db: Arc<T>,
}
impl<T> OutputManagerDatabase<T>
where T: OutputManagerBackend + 'static
{
pub fn new(db: T) -> Self {
Self { db: Arc::new(db) }
}
pub async fn get_key_manager_state(&self) -> Result<Option<KeyManagerState>, OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || match db_clone.fetch(&DbKey::KeyManagerState) {
Ok(None) => Ok(None),
Ok(Some(DbValue::KeyManagerState(c))) => Ok(Some(c)),
Ok(Some(other)) => unexpected_result(DbKey::KeyManagerState, other),
Err(e) => log_error(DbKey::KeyManagerState, e),
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))
.and_then(|inner_result| inner_result)
}
pub async fn set_key_manager_state(&self, state: KeyManagerState) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || {
db_clone.write(WriteOperation::Insert(DbKeyValuePair::KeyManagerState(state)))
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn increment_key_index(&self) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.increment_key_index())
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn set_key_index(&self, index: u64) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.set_key_index(index))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn add_unspent_output(&self, output: DbUnblindedOutput) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || {
db_clone.write(WriteOperation::Insert(DbKeyValuePair::UnspentOutput(
output.commitment.clone(),
Box::new(output),
)))
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn add_unspent_output_with_tx_id(
&self,
tx_id: TxId,
output: DbUnblindedOutput,
) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || {
db_clone.write(WriteOperation::Insert(DbKeyValuePair::UnspentOutputWithTxId(
output.commitment.clone(),
(tx_id, Box::new(output)),
)))
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn add_output_to_be_received(
&self,
tx_id: TxId,
output: DbUnblindedOutput,
coinbase_block_height: Option<u64>,
) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || {
db_clone.write(WriteOperation::Insert(DbKeyValuePair::OutputToBeReceived(
output.commitment.clone(),
(tx_id, Box::new(output), coinbase_block_height),
)))
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn get_balance(
&self,
current_tip_for_time_lock_calculation: Option<u64>,
) -> Result<Balance, OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.get_balance(current_tip_for_time_lock_calculation))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))?
}
/// This method is called when a transaction is built to be sent. It will encumber unspent outputs against a pending
/// transaction in the short term.
pub async fn | (
&self,
tx_id: TxId,
outputs_to_send: Vec<DbUnblindedOutput>,
outputs_to_receive: Vec<DbUnblindedOutput>,
) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || {
db_clone.short_term_encumber_outputs(tx_id, &outputs_to_send, &outputs_to_receive)
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))
.and_then(|inner_result| inner_result)
}
/// This method is called when a transaction is finished being negotiated. This will fully encumber the outputs
/// against a pending transaction.
pub async fn confirm_encumbered_outputs(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.confirm_encumbered_outputs(tx_id))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))
.and_then(|inner_result| inner_result)
}
/// Clear all pending transaction encumberances marked as short term. These are the result of an unfinished
/// transaction negotiation
pub async fn clear_short_term_encumberances(&self) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.clear_short_term_encumberances())
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))
.and_then(|inner_result| inner_result)
}
/// When a pending transaction is cancelled the encumbered outputs are moved back to the `unspent_outputs`
/// collection.
pub async fn cancel_pending_transaction_outputs(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.cancel_pending_transaction(tx_id))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))
.and_then(|inner_result| inner_result)
}
/// Check if there is a pending coinbase transaction at this block height, if there is clear it.
pub async fn clear_pending_coinbase_transaction_at_block_height(
&self,
block_height: u64,
) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.clear_pending_coinbase_transaction_at_block_height(block_height))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))
.and_then(|inner_result| inner_result)
}
/// Retrieves UTXOs sorted by value from smallest to largest.
pub async fn fetch_sorted_unspent_outputs(&self) -> Result<Vec<DbUnblindedOutput>, OutputManagerStorageError> {
let db_clone = self.db.clone();
let mut uo = tokio::task::spawn_blocking(move || match db_clone.fetch(&DbKey::UnspentOutputs) {
Ok(None) => log_error(
DbKey::UnspentOutputs,
OutputManagerStorageError::UnexpectedResult("Could not retrieve unspent outputs".to_string()),
),
Ok(Some(DbValue::UnspentOutputs(uo))) => Ok(uo),
Ok(Some(other)) => unexpected_result(DbKey::UnspentOutputs, other),
Err(e) => log_error(DbKey::UnspentOutputs, e),
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
uo.sort();
Ok(uo)
}
pub async fn fetch_spent_outputs(&self) -> Result<Vec<DbUnblindedOutput>, OutputManagerStorageError> {
let db_clone = self.db.clone();
let uo = tokio::task::spawn_blocking(move || match db_clone.fetch(&DbKey::SpentOutputs) {
Ok(None) => log_error(
DbKey::SpentOutputs,
OutputManagerStorageError::UnexpectedResult("Could not retrieve spent outputs".to_string()),
),
Ok(Some(DbValue::SpentOutputs(uo))) => Ok(uo),
Ok(Some(other)) => unexpected_result(DbKey::SpentOutputs, other),
Err(e) => log_error(DbKey::SpentOutputs, e),
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(uo)
}
pub async fn fetch_unconfirmed_outputs(&self) -> Result<Vec<DbUnblindedOutput>, OutputManagerStorageError> {
let db_clone = self.db.clone();
let utxos = tokio::task::spawn_blocking(move || db_clone.fetch_unconfirmed_outputs())
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(utxos)
}
pub async fn fetch_mined_unspent_outputs(&self) -> Result<Vec<DbUnblindedOutput>, OutputManagerStorageError> {
let db_clone = self.db.clone();
let utxos = tokio::task::spawn_blocking(move || db_clone.fetch_mined_unspent_outputs())
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(utxos)
}
pub async fn get_timelocked_outputs(&self, tip: u64) -> Result<Vec<DbUnblindedOutput>, OutputManagerStorageError> {
let db_clone = self.db.clone();
let uo = tokio::task::spawn_blocking(move || match db_clone.fetch(&DbKey::TimeLockedUnspentOutputs(tip)) {
Ok(None) => log_error(
DbKey::UnspentOutputs,
OutputManagerStorageError::UnexpectedResult("Could not retrieve unspent outputs".to_string()),
),
Ok(Some(DbValue::UnspentOutputs(uo))) => Ok(uo),
Ok(Some(other)) => unexpected_result(DbKey::UnspentOutputs, other),
Err(e) => log_error(DbKey::UnspentOutputs, e),
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(uo)
}
pub async fn get_invalid_outputs(&self) -> Result<Vec<DbUnblindedOutput>, OutputManagerStorageError> {
let db_clone = self.db.clone();
let uo = tokio::task::spawn_blocking(move || match db_clone.fetch(&DbKey::InvalidOutputs) {
Ok(None) => log_error(
DbKey::InvalidOutputs,
OutputManagerStorageError::UnexpectedResult("Could not retrieve invalid outputs".to_string()),
),
Ok(Some(DbValue::InvalidOutputs(uo))) => Ok(uo),
Ok(Some(other)) => unexpected_result(DbKey::InvalidOutputs, other),
Err(e) => log_error(DbKey::InvalidOutputs, e),
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(uo)
}
pub async fn update_output_metadata_signature(
&self,
output: TransactionOutput,
) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.update_output_metadata_signature(&output))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))
.and_then(|inner_result| inner_result)
}
pub async fn revalidate_output(&self, commitment: Commitment) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.revalidate_unspent_output(&commitment))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))
.and_then(|inner_result| inner_result)
}
pub async fn reinstate_cancelled_inbound_output(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.reinstate_cancelled_inbound_output(tx_id))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))
.and_then(|inner_result| inner_result)
}
pub async fn apply_encryption(&self, cipher: Aes256Gcm) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.apply_encryption(cipher))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))
.and_then(|inner_result| inner_result)
}
pub async fn remove_encryption(&self) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || db_clone.remove_encryption())
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))
.and_then(|inner_result| inner_result)
}
pub async fn get_all_known_one_sided_payment_scripts(
&self,
) -> Result<Vec<KnownOneSidedPaymentScript>, OutputManagerStorageError> {
let db_clone = self.db.clone();
let scripts = tokio::task::spawn_blocking(move || match db_clone.fetch(&DbKey::KnownOneSidedPaymentScripts) {
Ok(None) => log_error(
DbKey::KnownOneSidedPaymentScripts,
OutputManagerStorageError::UnexpectedResult("Could not retrieve known scripts".to_string()),
),
Ok(Some(DbValue::KnownOneSidedPaymentScripts(scripts))) => Ok(scripts),
Ok(Some(other)) => unexpected_result(DbKey::KnownOneSidedPaymentScripts, other),
Err(e) => log_error(DbKey::KnownOneSidedPaymentScripts, e),
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(scripts)
}
pub async fn get_last_mined_output(&self) -> Result<Option<DbUnblindedOutput>, OutputManagerStorageError> {
self.db.get_last_mined_output()
}
pub async fn get_last_spent_output(&self) -> Result<Option<DbUnblindedOutput>, OutputManagerStorageError> {
self.db.get_last_spent_output()
}
pub async fn add_known_script(
&self,
known_script: KnownOneSidedPaymentScript,
) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || {
db_clone.write(WriteOperation::Insert(DbKeyValuePair::KnownOneSidedPaymentScripts(
known_script,
)))
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn remove_output_by_commitment(&self, commitment: Commitment) -> Result<(), OutputManagerStorageError> {
let db_clone = self.db.clone();
tokio::task::spawn_blocking(move || {
match db_clone.write(WriteOperation::Remove(DbKey::AnyOutputByCommitment(commitment.clone()))) {
Ok(None) => Ok(()),
Ok(Some(DbValue::AnyOutput(_))) => Ok(()),
Ok(Some(other)) => unexpected_result(DbKey::AnyOutputByCommitment(commitment.clone()), other),
Err(e) => log_error(DbKey::AnyOutputByCommitment(commitment), e),
}
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn set_received_output_mined_height(
&self,
hash: HashOutput,
mined_height: u64,
mined_in_block: HashOutput,
mmr_position: u64,
confirmed: bool,
) -> Result<(), OutputManagerStorageError> {
let db = self.db.clone();
tokio::task::spawn_blocking(move || {
db.set_received_output_mined_height(hash, mined_height, mined_in_block, mmr_position, confirmed)
})
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn set_output_to_unmined(&self, hash: HashOutput) -> Result<(), OutputManagerStorageError> {
let db = self.db.clone();
tokio::task::spawn_blocking(move || db.set_output_to_unmined(hash))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn mark_output_as_spent(
&self,
hash: HashOutput,
deleted_height: u64,
deleted_in_block: HashOutput,
confirmed: bool,
) -> Result<(), OutputManagerStorageError> {
let db = self.db.clone();
tokio::task::spawn_blocking(move || db.mark_output_as_spent(hash, deleted_height, deleted_in_block, confirmed))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn mark_output_as_unspent(&self, hash: HashOutput) -> Result<(), OutputManagerStorageError> {
let db = self.db.clone();
tokio::task::spawn_blocking(move || db.mark_output_as_unspent(hash))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
pub async fn set_coinbase_abandoned(&self, tx_id: TxId, abandoned: bool) -> Result<(), OutputManagerStorageError> {
let db = self.db.clone();
tokio::task::spawn_blocking(move || db.set_coinbase_abandoned(tx_id, abandoned))
.await
.map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??;
Ok(())
}
}
fn unexpected_result<T>(req: DbKey, res: DbValue) -> Result<T, OutputManagerStorageError> {
let msg = format!("Unexpected result for database query {}. Response: {}", req, res);
error!(target: LOG_TARGET, "{}", msg);
Err(OutputManagerStorageError::UnexpectedResult(msg))
}
impl Display for DbKey {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
DbKey::SpentOutput(_) => f.write_str(&"Spent Output Key".to_string()),
DbKey::UnspentOutput(_) => f.write_str(&"Unspent Output Key".to_string()),
DbKey::UnspentOutputs => f.write_str(&"Unspent Outputs Key".to_string()),
DbKey::SpentOutputs => f.write_str(&"Spent Outputs Key".to_string()),
DbKey::KeyManagerState => f.write_str(&"Key Manager State".to_string()),
DbKey::InvalidOutputs => f.write_str("Invalid Outputs Key"),
DbKey::TimeLockedUnspentOutputs(_t) => f.write_str("Timelocked Outputs"),
DbKey::KnownOneSidedPaymentScripts => f.write_str("Known claiming scripts"),
DbKey::AnyOutputByCommitment(_) => f.write_str("AnyOutputByCommitment"),
DbKey::OutputsByTxIdAndStatus(_, _) => f.write_str("OutputsByTxIdAndStatus"),
}
}
}
impl Display for DbValue {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
DbValue::SpentOutput(_) => f.write_str("Spent Output"),
DbValue::UnspentOutput(_) => f.write_str("Unspent Output"),
DbValue::UnspentOutputs(_) => f.write_str("Unspent Outputs"),
DbValue::SpentOutputs(_) => f.write_str("Spent Outputs"),
DbValue::KeyManagerState(_) => f.write_str("Key Manager State"),
DbValue::InvalidOutputs(_) => f.write_str("Invalid Outputs"),
DbValue::KnownOneSidedPaymentScripts(_) => f.write_str("Known claiming scripts"),
DbValue::AnyOutput(_) => f.write_str("Any Output"),
DbValue::AnyOutputs(_) => f.write_str("Any Outputs"),
}
}
}
fn log_error<T>(req: DbKey, err: OutputManagerStorageError) -> Result<T, OutputManagerStorageError> {
error!(
target: LOG_TARGET,
"Database access error on request: {}: {}",
req,
err.to_string()
);
Err(err)
}
| encumber_outputs |
action-factory.ts | import {
AbortErroredProcess,
AbortMissingProcess,
AbortRunningProcess,
AbortStoppedProcess,
AbortUnknownProcess,
DaemonizeProcess,
RestartProcess,
RestartRunningProcess,
RestartRunningProcessWithPrompt,
} from "./actions";
import { AnyObject, Application, ProcessOptions } from "./contracts";
import { Identifiers, inject, injectable } from "./ioc";
/**
* @export
* @class ActionFactory
*/
@injectable()
export class ActionFactory {
/**
* @private
* @type {Application}
* @memberof ActionFactory
*/
@inject(Identifiers.Application)
protected readonly app!: Application;
/**
* @param {string} processName
* @returns {void}
* @memberof ActionFactory
*/
public abortErroredProcess(processName: string): void {
return this.app.get<AbortErroredProcess>(Identifiers.AbortErroredProcess).execute(processName);
}
/**
* @param {string} processName
* @returns {void}
* @memberof ActionFactory
*/
public abortMissingProcess(processName: string): void {
return this.app.get<AbortMissingProcess>(Identifiers.AbortMissingProcess).execute(processName);
}
/**
* @param {string} processName
* @returns {void}
* @memberof ActionFactory
*/
public abortRunningProcess(processName: string): void {
return this.app.get<AbortRunningProcess>(Identifiers.AbortRunningProcess).execute(processName);
}
/**
* @param {string} processName
* @returns {void}
* @memberof ActionFactory
*/
public abortStoppedProcess(processName: string): void {
return this.app.get<AbortStoppedProcess>(Identifiers.AbortStoppedProcess).execute(processName);
}
/**
* @param {string} processName
* @returns {void}
* @memberof ActionFactory
*/ |
/**
* @param {ProcessOptions} options
* @param {*} flags
* @returns {Promise<void>}
* @memberof ActionFactory
*/
public async daemonizeProcess(options: ProcessOptions, flags: AnyObject): Promise<void> {
return this.app.get<DaemonizeProcess>(Identifiers.DaemonizeProcess).execute(options, flags);
}
/**
* @param {string} processName
* @returns {void}
* @memberof ActionFactory
*/
public restartProcess(processName: string): void {
return this.app.get<RestartProcess>(Identifiers.RestartProcess).execute(processName);
}
/**
* @param {string} processName
* @returns {Promise<void>}
* @memberof ActionFactory
*/
public async restartRunningProcessWithPrompt(processName: string): Promise<void> {
return this.app
.get<RestartRunningProcessWithPrompt>(Identifiers.RestartRunningProcessWithPrompt)
.execute(processName);
}
/**
* @param {string} processName
* @returns {void}
* @memberof ActionFactory
*/
public restartRunningProcess(processName: string): void {
return this.app.get<RestartRunningProcess>(Identifiers.RestartRunningProcess).execute(processName);
}
} | public abortUnknownProcess(processName: string): void {
return this.app.get<AbortUnknownProcess>(Identifiers.AbortUnknownProcess).execute(processName);
} |
listener.go | package transport
import (
"github.com/micro/network/mucp/tunnel"
"github.com/micro/network/mucp/transport"
)
type tunListener struct {
l tunnel.Listener
}
func (t *tunListener) Addr() string {
return t.l.Channel()
}
func (t *tunListener) Close() error {
return t.l.Close()
}
func (t *tunListener) Accept(fn func(socket transport.Socket)) error {
for {
// accept connection
c, err := t.l.Accept()
if err != nil |
// execute the function
go fn(c)
}
}
| {
return err
} |
request.py | from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
List,
NamedTuple,
Optional,
Tuple,
Union,
)
from sanic_routing.route import Route # type: ignore
from sanic.models.http_types import Credentials
if TYPE_CHECKING: # no cov
from sanic.server import ConnInfo
from sanic.app import Sanic
import email.utils
import uuid
from collections import defaultdict
from http.cookies import SimpleCookie
from types import SimpleNamespace
from urllib.parse import parse_qs, parse_qsl, unquote, urlunparse
from httptools import parse_url # type: ignore
from sanic.compat import CancelledErrors, Header
from sanic.constants import DEFAULT_HTTP_CONTENT_TYPE
from sanic.exceptions import InvalidUsage, ServerError
from sanic.headers import (
AcceptContainer,
Options,
parse_accept,
parse_content_header,
parse_credentials,
parse_forwarded,
parse_host,
parse_xforwarded,
)
from sanic.http import Http, Stage
from sanic.log import error_logger, logger
from sanic.models.protocol_types import TransportProtocol
from sanic.response import BaseHTTPResponse, HTTPResponse
try:
from ujson import loads as json_loads # type: ignore
except ImportError:
from json import loads as json_loads # type: ignore
class RequestParameters(dict):
"""
Hosts a dict with lists as values where get returns the first
value of the list and getlist returns the whole shebang
"""
def get(self, name: str, default: Optional[Any] = None) -> Optional[Any]:
"""Return the first value, either the default or actual"""
return super().get(name, [default])[0]
def getlist(
self, name: str, default: Optional[Any] = None
) -> Optional[Any]:
"""
Return the entire list
"""
return super().get(name, default)
class Request:
"""
Properties of an HTTP request such as URL, headers, etc.
"""
__slots__ = (
"__weakref__",
"_cookies",
"_id",
"_ip",
"_parsed_url",
"_port",
"_protocol",
"_remote_addr",
"_socket",
"_match_info",
"_name",
"app",
"body",
"conn_info",
"ctx",
"head",
"headers",
"method",
"parsed_accept",
"parsed_args",
"parsed_credentials",
"parsed_files",
"parsed_form",
"parsed_forwarded",
"parsed_json",
"parsed_not_grouped_args",
"parsed_token",
"raw_url",
"responded",
"request_middleware_started",
"route",
"stream",
"transport",
"version",
)
def __init__(
self,
url_bytes: bytes,
headers: Header,
version: str,
method: str,
transport: TransportProtocol,
app: Sanic,
head: bytes = b"",
):
self.raw_url = url_bytes
# TODO: Content-Encoding detection
self._parsed_url = parse_url(url_bytes)
self._id: Optional[Union[uuid.UUID, str, int]] = None
self._name: Optional[str] = None
self.app = app
self.headers = Header(headers)
self.version = version
self.method = method
self.transport = transport
self.head = head
# Init but do not inhale
self.body = b""
self.conn_info: Optional[ConnInfo] = None
self.ctx = SimpleNamespace()
self.parsed_forwarded: Optional[Options] = None
self.parsed_accept: Optional[AcceptContainer] = None
self.parsed_credentials: Optional[Credentials] = None
self.parsed_json = None
self.parsed_form = None
self.parsed_files = None
self.parsed_token: Optional[str] = None
self.parsed_args: DefaultDict[
Tuple[bool, bool, str, str], RequestParameters
] = defaultdict(RequestParameters)
self.parsed_not_grouped_args: DefaultDict[
Tuple[bool, bool, str, str], List[Tuple[str, str]]
] = defaultdict(list)
self.request_middleware_started = False
self._cookies: Optional[Dict[str, str]] = None
self._match_info: Dict[str, Any] = {}
self.stream: Optional[Http] = None
self.route: Optional[Route] = None
self._protocol = None
self.responded: bool = False
def __repr__(self):
class_name = self.__class__.__name__
return f"<{class_name}: {self.method} {self.path}>"
@classmethod
def generate_id(*_):
return uuid.uuid4()
def reset_response(self):
try:
if (
self.stream is not None
and self.stream.stage is not Stage.HANDLER
):
raise ServerError(
"Cannot reset response because previous response was sent."
)
self.stream.response.stream = None
self.stream.response = None
self.responded = False
except AttributeError:
pass
async def respond(
self,
response: Optional[BaseHTTPResponse] = None,
*,
status: int = 200,
headers: Optional[Union[Header, Dict[str, str]]] = None,
content_type: Optional[str] = None,
):
try:
if self.stream is not None and self.stream.response:
raise ServerError("Second respond call is not allowed.")
except AttributeError:
pass
# This logic of determining which response to use is subject to change
if response is None:
response = HTTPResponse(
status=status,
headers=headers,
content_type=content_type,
)
# Connect the response
if isinstance(response, BaseHTTPResponse) and self.stream:
response = self.stream.respond(response)
# Run response middleware
try:
response = await self.app._run_response_middleware(
self, response, request_name=self.name
)
except CancelledErrors:
raise
except Exception:
error_logger.exception(
"Exception occurred in one of response middleware handlers"
)
self.responded = True
return response
async def receive_body(self):
"""Receive request.body, if not already received.
Streaming handlers may call this to receive the full body. Sanic calls
this function before running any handlers of non-streaming routes.
Custom request classes can override this for custom handling of both
streaming and non-streaming routes.
"""
if not self.body:
self.body = b"".join([data async for data in self.stream])
@property
def name(self):
if self._name:
return self._name
elif self.route:
return self.route.name
return None
@property
def endpoint(self):
return self.name
@property
def uri_template(self):
return f"/{self.route.path}"
@property
def protocol(self):
if not self._protocol:
self._protocol = self.transport.get_protocol()
return self._protocol
@property
def raw_headers(self):
_, headers = self.head.split(b"\r\n", 1)
return bytes(headers)
@property
def request_line(self):
reqline, _ = self.head.split(b"\r\n", 1)
return bytes(reqline)
@property
def id(self) -> Optional[Union[uuid.UUID, str, int]]:
"""
A request ID passed from the client, or generated from the backend.
By default, this will look in a request header defined at:
``self.app.config.REQUEST_ID_HEADER``. It defaults to
``X-Request-ID``. Sanic will try to cast the ID into a ``UUID`` or an
``int``. If there is not a UUID from the client, then Sanic will try
to generate an ID by calling ``Request.generate_id()``. The default
behavior is to generate a ``UUID``. You can customize this behavior
by subclassing ``Request``.
.. code-block:: python
from sanic import Request, Sanic
from itertools import count
class IntRequest(Request):
counter = count()
def generate_id(self):
return next(self.counter)
app = Sanic("MyApp", request_class=IntRequest)
"""
if not self._id:
self._id = self.headers.getone(
self.app.config.REQUEST_ID_HEADER,
self.__class__.generate_id(self), # type: ignore
)
# Try casting to a UUID or an integer
if isinstance(self._id, str):
try:
self._id = uuid.UUID(self._id)
except ValueError:
try:
self._id = int(self._id) # type: ignore
except ValueError:
...
return self._id # type: ignore
@property
def json(self):
if self.parsed_json is None:
self.load_json()
return self.parsed_json
def load_json(self, loads=json_loads):
try:
self.parsed_json = loads(self.body)
except Exception:
if not self.body:
return None
raise InvalidUsage("Failed when parsing body as json")
return self.parsed_json
@property
def accept(self) -> AcceptContainer:
if self.parsed_accept is None:
accept_header = self.headers.getone("accept", "")
self.parsed_accept = parse_accept(accept_header)
return self.parsed_accept
@property
def token(self) -> Optional[str]:
"""Attempt to return the auth header token.
:return: token related to request
"""
if self.parsed_token is None:
prefixes = ("Bearer", "Token")
_, token = parse_credentials(
self.headers.getone("authorization", None), prefixes
)
self.parsed_token = token
return self.parsed_token
@property
def credentials(self) -> Optional[Credentials]:
"""Attempt to return the auth header value.
Covers NoAuth, Basic Auth, Bearer Token, Api Token authentication
schemas.
:return: A named tuple with token or username and password related
to request
"""
if self.parsed_credentials is None:
try:
prefix, credentials = parse_credentials(
self.headers.getone("authorization", None)
)
if credentials:
self.parsed_credentials = Credentials(
auth_type=prefix, token=credentials
)
except ValueError:
pass
return self.parsed_credentials
@property
def form(self):
if self.parsed_form is None:
self.parsed_form = RequestParameters()
self.parsed_files = RequestParameters()
content_type = self.headers.getone(
"content-type", DEFAULT_HTTP_CONTENT_TYPE
)
content_type, parameters = parse_content_header(content_type)
try:
if content_type == "application/x-www-form-urlencoded":
self.parsed_form = RequestParameters(
parse_qs(self.body.decode("utf-8"))
)
elif content_type == "multipart/form-data":
# TODO: Stream this instead of reading to/from memory
boundary = parameters["boundary"].encode("utf-8")
self.parsed_form, self.parsed_files = parse_multipart_form(
self.body, boundary
)
except Exception:
error_logger.exception("Failed when parsing form")
return self.parsed_form
@property
def | (self):
if self.parsed_files is None:
self.form # compute form to get files
return self.parsed_files
def get_args(
self,
keep_blank_values: bool = False,
strict_parsing: bool = False,
encoding: str = "utf-8",
errors: str = "replace",
) -> RequestParameters:
"""
Method to parse `query_string` using `urllib.parse.parse_qs`.
This methods is used by `args` property.
Can be used directly if you need to change default parameters.
:param keep_blank_values:
flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
:type keep_blank_values: bool
:param strict_parsing:
flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
:type strict_parsing: bool
:param encoding:
specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type encoding: str
:param errors:
specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type errors: str
:return: RequestParameters
"""
if (
keep_blank_values,
strict_parsing,
encoding,
errors,
) not in self.parsed_args:
if self.query_string:
self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
] = RequestParameters(
parse_qs(
qs=self.query_string,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing,
encoding=encoding,
errors=errors,
)
)
return self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
]
args = property(get_args)
def get_query_args(
self,
keep_blank_values: bool = False,
strict_parsing: bool = False,
encoding: str = "utf-8",
errors: str = "replace",
) -> list:
"""
Method to parse `query_string` using `urllib.parse.parse_qsl`.
This methods is used by `query_args` property.
Can be used directly if you need to change default parameters.
:param keep_blank_values:
flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
:type keep_blank_values: bool
:param strict_parsing:
flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
:type strict_parsing: bool
:param encoding:
specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type encoding: str
:param errors:
specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type errors: str
:return: list
"""
if (
keep_blank_values,
strict_parsing,
encoding,
errors,
) not in self.parsed_not_grouped_args:
if self.query_string:
self.parsed_not_grouped_args[
(keep_blank_values, strict_parsing, encoding, errors)
] = parse_qsl(
qs=self.query_string,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing,
encoding=encoding,
errors=errors,
)
return self.parsed_not_grouped_args[
(keep_blank_values, strict_parsing, encoding, errors)
]
query_args = property(get_query_args)
"""
Convenience property to access :meth:`Request.get_query_args` with
default values.
"""
@property
def cookies(self) -> Dict[str, str]:
"""
:return: Incoming cookies on the request
:rtype: Dict[str, str]
"""
if self._cookies is None:
cookie = self.headers.getone("cookie", None)
if cookie is not None:
cookies: SimpleCookie = SimpleCookie()
cookies.load(cookie)
self._cookies = {
name: cookie.value for name, cookie in cookies.items()
}
else:
self._cookies = {}
return self._cookies
@property
def content_type(self) -> str:
"""
:return: Content-Type header form the request
:rtype: str
"""
return self.headers.getone("content-type", DEFAULT_HTTP_CONTENT_TYPE)
@property
def match_info(self):
"""
:return: matched info after resolving route
"""
return self._match_info
@match_info.setter
def match_info(self, value):
self._match_info = value
# Transport properties (obtained from local interface only)
@property
def ip(self) -> str:
"""
:return: peer ip of the socket
:rtype: str
"""
return self.conn_info.client_ip if self.conn_info else ""
@property
def port(self) -> int:
"""
:return: peer port of the socket
:rtype: int
"""
return self.conn_info.client_port if self.conn_info else 0
@property
def socket(self):
return self.conn_info.peername if self.conn_info else (None, None)
@property
def path(self) -> str:
"""
:return: path of the local HTTP request
:rtype: str
"""
return self._parsed_url.path.decode("utf-8")
# Proxy properties (using SERVER_NAME/forwarded/request/transport info)
@property
def forwarded(self) -> Options:
"""
Active proxy information obtained from request headers, as specified in
Sanic configuration.
Field names by, for, proto, host, port and path are normalized.
- for and by IPv6 addresses are bracketed
- port (int) is only set by port headers, not from host.
- path is url-unencoded
Additional values may be available from new style Forwarded headers.
:return: forwarded address info
:rtype: Dict[str, str]
"""
if self.parsed_forwarded is None:
self.parsed_forwarded = (
parse_forwarded(self.headers, self.app.config)
or parse_xforwarded(self.headers, self.app.config)
or {}
)
return self.parsed_forwarded
@property
def remote_addr(self) -> str:
"""
Client IP address, if available.
1. proxied remote address `self.forwarded['for']`
2. local remote address `self.ip`
:return: IPv4, bracketed IPv6, UNIX socket name or arbitrary string
:rtype: str
"""
if not hasattr(self, "_remote_addr"):
self._remote_addr = str(
self.forwarded.get("for", "")
) # or self.ip
return self._remote_addr
@property
def scheme(self) -> str:
"""
Determine request scheme.
1. `config.SERVER_NAME` if in full URL format
2. proxied proto/scheme
3. local connection protocol
:return: http|https|ws|wss or arbitrary value given by the headers.
:rtype: str
"""
if "//" in self.app.config.get("SERVER_NAME", ""):
return self.app.config.SERVER_NAME.split("//")[0]
if "proto" in self.forwarded:
return str(self.forwarded["proto"])
if (
self.app.websocket_enabled
and self.headers.getone("upgrade", "").lower() == "websocket"
):
scheme = "ws"
else:
scheme = "http"
if self.transport.get_extra_info("sslcontext"):
scheme += "s"
return scheme
@property
def host(self) -> str:
"""
The currently effective server 'host' (hostname or hostname:port).
1. `config.SERVER_NAME` overrides any client headers
2. proxied host of original request
3. request host header
hostname and port may be separated by
`sanic.headers.parse_host(request.host)`.
:return: the first matching host found, or empty string
:rtype: str
"""
server_name = self.app.config.get("SERVER_NAME")
if server_name:
return server_name.split("//", 1)[-1].split("/", 1)[0]
return str(
self.forwarded.get("host") or self.headers.getone("host", "")
)
@property
def server_name(self) -> str:
"""
:return: hostname the client connected to, by ``request.host``
:rtype: str
"""
return parse_host(self.host)[0] or ""
@property
def server_port(self) -> int:
"""
The port the client connected to, by forwarded ``port`` or
``request.host``.
Default port is returned as 80 and 443 based on ``request.scheme``.
:return: port number
:rtype: int
"""
port = self.forwarded.get("port") or parse_host(self.host)[1]
return int(port or (80 if self.scheme in ("http", "ws") else 443))
@property
def server_path(self) -> str:
"""
:return: full path of current URL; uses proxied or local path
:rtype: str
"""
return str(self.forwarded.get("path") or self.path)
@property
def query_string(self) -> str:
"""
:return: representation of the requested query
:rtype: str
"""
if self._parsed_url.query:
return self._parsed_url.query.decode("utf-8")
else:
return ""
@property
def url(self) -> str:
"""
:return: the URL
:rtype: str
"""
return urlunparse(
(self.scheme, self.host, self.path, None, self.query_string, None)
)
def url_for(self, view_name: str, **kwargs) -> str:
"""
Same as :func:`sanic.Sanic.url_for`, but automatically determine
`scheme` and `netloc` base on the request. Since this method is aiming
to generate correct schema & netloc, `_external` is implied.
:param kwargs: takes same parameters as in :func:`sanic.Sanic.url_for`
:return: an absolute url to the given view
:rtype: str
"""
# Full URL SERVER_NAME can only be handled in app.url_for
try:
if "//" in self.app.config.SERVER_NAME:
return self.app.url_for(view_name, _external=True, **kwargs)
except AttributeError:
pass
scheme = self.scheme
host = self.server_name
port = self.server_port
if (scheme.lower() in ("http", "ws") and port == 80) or (
scheme.lower() in ("https", "wss") and port == 443
):
netloc = host
else:
netloc = f"{host}:{port}"
return self.app.url_for(
view_name, _external=True, _scheme=scheme, _server=netloc, **kwargs
)
class File(NamedTuple):
"""
Model for defining a file. It is a ``namedtuple``, therefore you can
iterate over the object, or access the parameters by name.
:param type: The mimetype, defaults to text/plain
:param body: Bytes of the file
:param name: The filename
"""
type: str
body: bytes
name: str
def parse_multipart_form(body, boundary):
"""
Parse a request body and returns fields and files
:param body: bytes request body
:param boundary: bytes multipart boundary
:return: fields (RequestParameters), files (RequestParameters)
"""
files = RequestParameters()
fields = RequestParameters()
form_parts = body.split(boundary)
for form_part in form_parts[1:-1]:
file_name = None
content_type = "text/plain"
content_charset = "utf-8"
field_name = None
line_index = 2
line_end_index = 0
while not line_end_index == -1:
line_end_index = form_part.find(b"\r\n", line_index)
form_line = form_part[line_index:line_end_index].decode("utf-8")
line_index = line_end_index + 2
if not form_line:
break
colon_index = form_line.index(":")
idx = colon_index + 2
form_header_field = form_line[0:colon_index].lower()
form_header_value, form_parameters = parse_content_header(
form_line[idx:]
)
if form_header_field == "content-disposition":
field_name = form_parameters.get("name")
file_name = form_parameters.get("filename")
# non-ASCII filenames in RFC2231, "filename*" format
if file_name is None and form_parameters.get("filename*"):
encoding, _, value = email.utils.decode_rfc2231(
form_parameters["filename*"]
)
file_name = unquote(value, encoding=encoding)
elif form_header_field == "content-type":
content_type = form_header_value
content_charset = form_parameters.get("charset", "utf-8")
if field_name:
post_data = form_part[line_index:-4]
if file_name is None:
value = post_data.decode(content_charset)
if field_name in fields:
fields[field_name].append(value)
else:
fields[field_name] = [value]
else:
form_file = File(
type=content_type, name=file_name, body=post_data
)
if field_name in files:
files[field_name].append(form_file)
else:
files[field_name] = [form_file]
else:
logger.debug(
"Form-data field does not have a 'name' parameter "
"in the Content-Disposition header"
)
return fields, files
| files |
constructors.go | /* SPDX-License-Identifier: Apache-2.0 */
/* Copyright(c) 2019 Wind River Systems, Inc. */
package v1
import (
"fmt"
"github.com/alecthomas/units"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/addresspools"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/certificates"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/controllerFilesystems"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/cpus"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/datanetworks"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/hostFilesystems"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/hosts"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/interfaces"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/licenses"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/memory"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/networks"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/physicalvolumes"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/snmpCommunity"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/snmpTrapDest"
"github.com/gophercloud/gophercloud/starlingx/inventory/v1/volumegroups"
v1info "github.com/wind-river/cloud-platform-deployment-manager/pkg/platform"
v1 "k8s.io/api/core/v1"
v1types "k8s.io/apimachinery/pkg/apis/meta/v1"
"regexp"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"strings"
)
var log = logf.Log.WithName("controller").WithName("host")
const (
ControllerToolsLabel = "controller-tools.k8s.io"
ControllerToolsVersion = "1.0"
)
const (
// Secret map key names.
SecretUsernameKey = "username"
SecretPasswordKey = "password"
)
// group defines the current in use API group.
const Group = "starlingx.windriver.com"
// version defines the curent in use API version.
const Version = "v1"
const APIVersion = Group + "/" + Version
// Defines the current list of resource kinds.
const (
KindHost = "Host"
KindHostProfile = "HostProfile"
KindPlatformNetwork = "PlatformNetwork"
KindDataNetwork = "DataNetwork"
KindSystem = "System"
)
type PageSize string
// Defines the accepted hugepage memory page sizes.
const (
PageSize4K PageSize = "4KB"
PageSize2M PageSize = "2MB"
PageSize1G PageSize = "1GB"
)
// Bytes returns the page size in bytes.
func (v PageSize) Bytes() int {
switch v {
case PageSize1G:
return int(units.Gibibyte)
case PageSize2M:
return 2 * int(units.Mebibyte)
case PageSize4K:
return 4 * int(units.KiB)
}
// This is never expected to happen so no error is returned.
return 0
}
// Gibibytes returns the page size in megabytes.
func (v PageSize) Megabytes() int {
switch v {
case PageSize1G:
return 1024
case PageSize2M:
return 2
}
// This is never expected to happen so no error is returned.
return 0
}
// Defines the valid host provisioning modes
const (
ProvioningModeStatic = "static"
ProvioningModeDynamic = "dynamic"
)
// Defines the default Secret name used for tracking license files.
const SystemDefaultLicenseName = "system-license"
// ErrMissingSystemResource defines an error to be used when reporting that
// an operation is unable to find a required system resource from the
// system API. This error is not intended for kubernetes resources that are
// missing. For those use ErrMissingKubernetesResource
type ErrMissingSystemResource struct {
message string
}
// Error returns the message associated with an error of this type.
func (in ErrMissingSystemResource) Error() string {
return in.message
}
// NewMissingSystemResource defines a constructor for the
// ErrMissingSystemResource error type.
func NewMissingSystemResource(msg string) error {
return ErrMissingSystemResource{msg}
}
// stripPartitionNumber is a utility function that removes the "-partNNN" suffix
// from the partition device path.
func stripPartitionNumber(path string) string {
re := regexp.MustCompile("-part[0-9]*")
return re.ReplaceAllString(path, "")
}
// parseLabelInfo is a utility which parses the label data as it is presented
// by the system API and stores the data in the form required by a profile spec.
func parseLabelInfo(profile *HostProfileSpec, host v1info.HostInfo) error {
result := make(map[string]string)
for _, l := range host.Labels {
result[l.Key] = l.Value
}
if len(result) > 0 {
profile.Labels = result
}
return nil
}
// parseProcessorInfo is a utility which parses the CPU data as it is presented
// by the system API and stores the data in the form required by a profile spec.
func parseProcessorInfo(profile *HostProfileSpec, host v1info.HostInfo) error {
var result []ProcessorInfo
// First, organize the data by node and function.
nodes := make(map[int]map[string]int)
for _, c := range host.CPU {
if c.Thread != 0 {
// Processor configurations are always done on a physical core
// basis so do not include hyper-thread cores.
continue
}
function := strings.ToLower(c.Function)
if function == cpus.CPUFunctionApplication {
// These cannot be configured. They are simply a placeholder
// for those CPUs that are not allocated for any other function.
continue
}
if f, ok := nodes[c.Processor]; !ok {
nodes[c.Processor] = map[string]int{function: 1}
if profile.HasWorkerSubFunction() && function != cpus.CPUFunctionVSwitch {
// Always add the vswitch function since if it is set to 0
// it won't show up in the list and will be missing from the
// profile that we create.
nodes[c.Processor][cpus.CPUFunctionVSwitch] = 0
}
} else {
f[function] = f[function] + 1
}
}
// Second, prepare the final data by converting the maps to arrays.
for key := range nodes {
node := ProcessorInfo{
Node: key,
}
for function, count := range nodes[key] {
data := ProcessorFunctionInfo{
Function: strings.ToLower(function),
Count: count,
}
node.Functions = append(node.Functions, data)
}
result = append(result, node)
}
profile.Processors = result
return nil
}
// parseMemoryInfo is a utility which parses the memory data as it is presented
// by the system API and stores the data in the form required by a profile spec.
func parseMemoryInfo(profile *HostProfileSpec, host v1info.HostInfo) error {
var result []MemoryNodeInfo
for _, m := range host.Memory {
info := MemoryNodeInfo{
Node: m.Processor,
}
// Platform memory allocations
platform := MemoryFunctionInfo{
Function: memory.MemoryFunctionPlatform,
PageSize: string(PageSize4K),
PageCount: (m.Platform * int(units.Mebibyte)) / PageSize4K.Bytes(),
}
info.Functions = append(info.Functions, platform)
if profile.HasWorkerSubFunction() {
// VSwitch memory allocations
vswitch := MemoryFunctionInfo{
Function: memory.MemoryFunctionVSwitch,
PageCount: m.VSwitchHugepagesCount,
}
if m.VSwitchHugepagesSize == PageSize2M.Megabytes() {
vswitch.PageSize = string(PageSize2M)
} else if m.VSwitchHugepagesSize == PageSize1G.Megabytes() {
vswitch.PageSize = string(PageSize1G)
} else {
vswitch.PageSize = string(PageSize4K)
}
info.Functions = append(info.Functions, vswitch)
// VM memory allocations
vm2m := MemoryFunctionInfo{
Function: memory.MemoryFunctionVM,
PageSize: string(PageSize2M),
PageCount: m.VM2MHugepagesCount,
}
if m.VSwitchHugepagesSize == PageSize2M.Megabytes() {
// The system API does not properly report the 2M pages that are
// exclusively reserved for VM use. If vswitch is also using
// 2M pages then its total is lumped in with the VM total so
// we need to separate them.
// TODO(alegacy): This needs to be fixed whenever the system
// api reports unique values.
if vm2m.PageCount >= vswitch.PageCount {
// On initial provisioning the memory does not seem to be
// accounted for properly so only do this if it does not
// result in a negative error.
vm2m.PageCount -= vswitch.PageCount
}
}
info.Functions = append(info.Functions, vm2m)
vm1g := MemoryFunctionInfo{
Function: memory.MemoryFunctionVM,
PageSize: string(PageSize1G),
PageCount: m.VM1GHugepagesCount,
}
info.Functions = append(info.Functions, vm1g)
}
result = append(result, info)
}
profile.Memory = result
return nil
}
// parseInterfaceInfo is a utility which parses the interface data as it is
// presented by the system API and stores the data in the form required by a
// profile spec.
func parseInterfaceInfo(profile *HostProfileSpec, host v1info.HostInfo) error {
result := InterfaceInfo{}
ethernets := make([]EthernetInfo, 0)
bonds := make([]BondInfo, 0)
vlans := make([]VLANInfo, 0)
vfs := make([]VFInfo, 0)
for _, iface := range host.Interfaces {
data := CommonInterfaceInfo{
Name: iface.Name,
Class: iface.Class,
}
mtu := iface.MTU
data.MTU = &mtu
if iface.Class == "" {
data.Class = interfaces.IFClassNone
}
nets := host.BuildInterfaceNetworkList(iface)
if iface.IPv4Pool != nil {
// TODO(alegacy): platform networks of type "other" exist to map
// address pools to data interfaces. This is (hopefully) a
// temporary measure until we can support these as actual networks.
pool := host.FindAddressPool(*iface.IPv4Pool)
if pool != nil {
nets = append(nets, pool.Name)
}
}
if iface.IPv6Pool != nil {
// TODO(alegacy): platform networks of type "other" exist to map
// address pools to data interfaces. This is (hopefully) a
// temporary measure until we can support these as actual networks.
pool := host.FindAddressPool(*iface.IPv6Pool)
if pool != nil {
nets = append(nets, pool.Name)
}
}
netList := StringList(nets)
data.PlatformNetworks = &netList
dataNets := host.BuildInterfaceDataNetworkList(iface)
dataNetList := StringList(dataNets)
data.DataNetworks = &dataNetList
data.PTPRole = iface.PTPRole
switch iface.Type {
case interfaces.IFTypeEthernet:
portname, found := host.FindInterfacePortName(iface.ID)
if !found {
msg := fmt.Sprintf("unable to find port name for interface id %s", iface.ID)
return NewMissingSystemResource(msg)
}
ethernet := EthernetInfo{
Port: EthernetPortInfo{
Name: portname}}
ethernet.CommonInterfaceInfo = data
if strings.EqualFold(iface.Class, interfaces.IFClassPCISRIOV) {
ethernet.VFCount = iface.VFCount
ethernet.VFDriver = iface.VFDriver
}
ethernets = append(ethernets, ethernet)
case interfaces.IFTypeVLAN:
vlan := VLANInfo{ |
case interfaces.IFTypeAE:
bond := BondInfo{
Mode: *iface.AEMode,
TransmitHashPolicy: iface.AETransmitHash,
Members: iface.Uses}
bond.CommonInterfaceInfo = data
bonds = append(bonds, bond)
case interfaces.IFTypeVirtual:
// Virtual interfaces are only used on AIO-SX systems so manage
// them as an Ethernet interface for simplicity sake.
ethernet := EthernetInfo{
Port: EthernetPortInfo{
Name: data.Name}}
ethernet.CommonInterfaceInfo = data
ethernets = append(ethernets, ethernet)
case interfaces.IFTypeVF:
vf := VFInfo{
VFCount: *iface.VFCount,
Lower: iface.Uses[0],
VFDriver: iface.VFDriver}
vf.CommonInterfaceInfo = data
vfs = append(vfs, vf)
}
}
if len(ethernets) > 0 {
result.Ethernet = ethernets
}
if len(vlans) > 0 {
result.VLAN = vlans
}
if len(bonds) > 0 {
result.Bond = bonds
}
if len(vfs) > 0 {
result.VF = vfs
}
profile.Interfaces = &result
return nil
}
// parseAddressInfo is a utility which parses the address data as it is
// presented by the system API and stores the data in the form required by a
// profile spec.
func parseAddressInfo(profile *HostProfileSpec, host v1info.HostInfo) error {
result := make([]AddressInfo, 0)
for _, a := range host.Addresses {
if host.IsSystemAddress(&a) {
// ignore these because they appear after creating or modifying
// interfaces and that makes it difficult to compare the current
// config to the desired profile because it always looks like we
// need to deal with a difference in the address list.
continue
}
address := AddressInfo{
Interface: a.InterfaceName,
Address: a.Address,
Prefix: a.Prefix,
}
result = append(result, address)
}
if len(result) > 0 {
profile.Addresses = result
}
return nil
}
// parseRouteInfo is a utility which parses the route data as it is presented
// by the system API and stores the data in the form required by a profile spec.
func parseRouteInfo(profile *HostProfileSpec, host v1info.HostInfo) error {
result := make([]RouteInfo, len(host.Routes))
for i, r := range host.Routes {
metric := r.Metric
route := RouteInfo{
Interface: r.InterfaceName,
Network: r.Network,
Prefix: r.Prefix,
Gateway: r.Gateway,
Metric: &metric,
}
result[i] = route
}
if len(result) > 0 {
profile.Routes = result
}
return nil
}
// parsePhysicalVolumeInfo is a utility which parses the physical volume data as
// it is presented by the system API and stores the data in the form required by
// a profile spec.
func parsePhysicalVolumeInfo(group *VolumeGroupInfo, vg *volumegroups.VolumeGroup, host v1info.HostInfo) error {
result := make([]PhysicalVolumeInfo, 0)
for _, pv := range host.PhysicalVolumes {
if pv.VolumeGroupID != vg.ID {
continue
}
physicalVolume := PhysicalVolumeInfo{
Type: pv.Type,
Path: pv.DevicePath,
}
if pv.Type == physicalvolumes.PVTypePartition {
if partition, ok := host.FindPartition(pv.DeviceUUID); ok {
size := partition.Gibibytes()
physicalVolume.Size = &size
physicalVolume.Path = stripPartitionNumber(partition.DevicePath)
} else {
msg := fmt.Sprintf("failed to lookup partition %s", pv.DeviceUUID)
return NewMissingSystemResource(msg)
}
}
result = append(result, physicalVolume)
}
group.PhysicalVolumes = result
return nil
}
// parsePartitionInfo is a utility which parses the partition data as it is
// presented by the system API and stores the data in the form required by a
// profile spec.
func parseVolumeGroupInfo(profile *HostProfileSpec, host v1info.HostInfo) error {
result := make([]VolumeGroupInfo, len(host.VolumeGroups))
for i, vg := range host.VolumeGroups {
group := VolumeGroupInfo{
Name: vg.Name,
}
if value := vg.Capabilities.LVMType; value != nil {
lvmType := *value
group.LVMType = &lvmType
}
if value := vg.Capabilities.ConcurrentDiskOperations; value != nil {
concurrentDiskOperations := *value
group.ConcurrentDiskOperations = &concurrentDiskOperations
}
err := parsePhysicalVolumeInfo(&group, &vg, host)
if err != nil {
return err
}
result[i] = group
}
if len(result) > 0 {
list := VolumeGroupList(result)
profile.Storage.VolumeGroups = &list
}
return nil
}
// parseOSDInfo is a utility which parses the OSD data as it is presented by the
// system API and stores the data in the form required by a profile spec.
func parseOSDInfo(profile *HostProfileSpec, host v1info.HostInfo) error {
result := make([]OSDInfo, 0)
for _, o := range host.OSDs {
osd := OSDInfo{
Function: o.Function,
}
clusterName, found := host.FindClusterNameByTier(o.TierUUID)
if found {
osd.ClusterName = &clusterName
}
disk, _ := host.FindDisk(o.DiskID)
if disk == nil {
log.Info("unable to find disk for OSD", "uuid", o.ID)
continue // skip
}
osd.Path = disk.DevicePath
if o.JournalInfo.Location != nil && *o.JournalInfo.Location != o.ID {
// If the journal points to a separate OSD then use that information
// to populate the profile info; otherwise if the journal is
// pointing to itself then there is no need to save that in the
// profile because that is system generated.
if o.JournalInfo.Path != nil {
path := stripPartitionNumber(*o.JournalInfo.Path)
journal := JournalInfo{
Location: path,
Size: o.JournalInfo.Gibibytes(),
}
osd.Journal = &journal
} else {
log.Info("unexpected nil OSD journal path", "uuid", o.ID)
}
}
result = append(result, osd)
}
if len(result) > 0 {
list := OSDList(result)
profile.Storage.OSDs = &list
}
return nil
}
// parseMonitorInfo is a utility which parses the Ceph Monitor data as it is
// presented by the system API and stores the data in the form required by a
// profile spec.
func parseMonitorInfo(profile *HostProfileSpec, host v1info.HostInfo) error {
for _, m := range host.Monitors {
if m.Hostname == host.Hostname {
size := m.Size
profile.Storage.Monitor = &MonitorInfo{
Size: &size,
}
return nil
}
}
return nil
}
func parseHostFileSystemInfo(spec *HostProfileSpec, fileSystems []hostFilesystems.FileSystem) error {
result := make([]FileSystemInfo, 0)
for _, fs := range fileSystems {
info := FileSystemInfo{
Name: fs.Name,
Size: fs.Size,
}
result = append(result, info)
}
list := FileSystemList(result)
spec.Storage.FileSystems = &list
return nil
}
// parseStorageInfo is a utility which parses the storage data as it is
// presented by the system API and stores the data in the form required by a
// profile spec.
func parseStorageInfo(profile *HostProfileSpec, host v1info.HostInfo) error {
var err error
storage := ProfileStorageInfo{}
profile.Storage = &storage
if host.Personality == hosts.PersonalityWorker {
// The monitors on the controllers are handled automatically so to avoid
// creating differences between the controller profiles and current
// configurations just avoid adding these to the dynamic profiles.
err = parseMonitorInfo(profile, host)
if err != nil {
return err
}
}
// Fill-in partition attributes
err = parseVolumeGroupInfo(profile, host)
if err != nil {
return err
}
// Fill-in partition attributes
err = parseOSDInfo(profile, host)
if err != nil {
return err
}
// Fill-in filesystem attributes
err = parseHostFileSystemInfo(profile, host.FileSystems)
if err != nil {
return err
}
if storage.OSDs == nil && storage.VolumeGroups == nil && storage.FileSystems == nil {
profile.Storage = nil
}
return nil
}
// autoGenerateBMSecretName is utility which generates a host specific secret
// name to refer to the board management credentials for that specific host.
// NOTE: for now we are only going to generate a single secret for all nodes.
// If customization is required the user can manually clone what they need.
func autoGenerateBMSecretName() string {
return fmt.Sprintf("bmc-secret")
}
// parseBoardManagementInfo is a utility which parses the board management data
// as it is presented by the system API and stores the data in the form required
// by a profile spec. Since the credentials are only partially presented by
// the API they are not stored in the profile.
func parseBoardManagementInfo(profile *HostProfileSpec, host v1info.HostInfo) error {
if host.BMType != nil {
info := BMInfo{
Type: host.BMType,
}
if host.BMAddress != nil {
info.Address = host.BMAddress
}
if host.BMUsername != nil {
info.Credentials = &BMCredentials{
Password: &BMPasswordInfo{
Secret: autoGenerateBMSecretName()},
}
}
profile.BoardManagement = &info
} else {
bmType := "none"
info := BMInfo{
Type: &bmType,
Address: nil,
Credentials: nil,
}
profile.BoardManagement = &info
}
return nil
}
func NewNamespace(name string) (*v1.Namespace, error) {
namespace := v1.Namespace{
TypeMeta: v1types.TypeMeta{
APIVersion: "v1",
Kind: "Namespace",
},
ObjectMeta: v1types.ObjectMeta{
Name: name,
},
}
return &namespace, nil
}
// fixDevicePath is a utility function that take a legacy formatted device
// path (e.g., sda or /dev/sda) and convert it to the newer format which is
// more explicit
// (e.g., /dev/disk/by-path/pci-0000:00:14.0-usb-0:1:1.0-scsi-0:0:0:0).
func fixDevicePath(path string, host v1info.HostInfo) string {
shortFormNode := regexp.MustCompile(`(?s)^\w+$`)
longFormNode := regexp.MustCompile(`(?s)^/dev/\w+$`)
var searchPath string
if shortFormNode.MatchString(path) {
searchPath = fmt.Sprintf("/dev/%s", path)
} else if longFormNode.MatchString(path) {
searchPath = path
} else {
// Likely already in the devicePath format
return path
}
if disk, ok := host.FindDiskByNode(searchPath); ok {
return disk.DevicePath
}
// No alternative found
return path
}
const zeroMAC = "00:00:00:00:00:00"
// BuildHostProfile takes the current set of host attributes and builds a
// fake host profile that can be used as a reference for the current settings
// applied to the host.
func NewHostProfileSpec(host v1info.HostInfo) (*HostProfileSpec, error) {
var err error
spec := HostProfileSpec{}
// Fill-in the basic attributes
spec.Personality = &host.Personality
subfunctions := strings.Split(host.SubFunctions, ",")
spec.SubFunctions = subfunctions
spec.AdministrativeState = &host.AdministrativeState
if host.BootMAC != zeroMAC {
// During initial configuration the first controller has a zero MAC
// address set as its boot MAC address. Storing that value in the
// defaults causes a conflict once the real MAC is setup in the system
// therefore we continuously try to set it back to the zero MAC but
// the system rejects it.
spec.BootMAC = &host.BootMAC
}
spec.Console = &host.Console
spec.InstallOutput = &host.InstallOutput
if host.Location.Name != nil && *host.Location.Name != "" {
spec.Location = host.Location.Name
}
bootDevice := fixDevicePath(host.BootDevice, host)
spec.BootDevice = &bootDevice
rootDevice := fixDevicePath(host.RootDevice, host)
spec.RootDevice = &rootDevice
spec.ClockSynchronization = host.ClockSynchronization
// Assume that the board is powered on unless there is a clear indication
// that it is not.
powerState := true
if host.AvailabilityStatus == hosts.AvailPowerOff {
if host.Task == nil || *host.Task != hosts.TaskPoweringOn {
powerState = false
}
} else if host.Task != nil && *host.Task == hosts.TaskPoweringOff {
powerState = false
}
spec.PowerOn = &powerState
err = parseBoardManagementInfo(&spec, host)
if err != nil {
return nil, err
}
err = parseLabelInfo(&spec, host)
if err != nil {
return nil, err
}
if spec.HasWorkerSubFunction() {
// Fill-in CPU attributes
err := parseProcessorInfo(&spec, host)
if err != nil {
return nil, err
}
// Fill-in Memory attributes
err = parseMemoryInfo(&spec, host)
if err != nil {
return nil, err
}
}
// Fill-in Interface attributes
err = parseInterfaceInfo(&spec, host)
if err != nil {
return nil, err
}
// Fill-in Address attributes
err = parseAddressInfo(&spec, host)
if err != nil {
return nil, err
}
// Fill-in Route attributes
err = parseRouteInfo(&spec, host)
if err != nil {
return nil, err
}
// Fill-in Route attributes
err = parseStorageInfo(&spec, host)
if err != nil {
return nil, err
}
return &spec, nil
}
func NewHostProfile(name string, namespace string, hostInfo v1info.HostInfo) (*HostProfile, error) {
name = fmt.Sprintf("%s-profile", name)
profile := HostProfile{
TypeMeta: v1types.TypeMeta{
APIVersion: APIVersion,
Kind: KindHostProfile,
},
ObjectMeta: v1types.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
ControllerToolsLabel: ControllerToolsVersion,
},
},
}
spec, err := NewHostProfileSpec(hostInfo)
if err != nil {
return nil, err
}
spec.DeepCopyInto(&profile.Spec)
return &profile, nil
}
func autoGenerateCertName(certType string, certIndex int) string {
// Kubernetes does not accept underscores in resource names.
certType = strings.Replace(certType, "_", "-", -1)
return fmt.Sprintf("%s-cert-secret-%d", certType, certIndex)
}
func parseCertificateInfo(spec *SystemSpec, certificates []certificates.Certificate) error {
result := make([]CertificateInfo, 0)
for index, c := range certificates {
cert := CertificateInfo{
Type: c.Type,
// Use a fixed naming so that we can document how we auto-generate
// a full system description for cloning purposes
Secret: autoGenerateCertName(c.Type, index),
}
result = append(result, cert)
}
list := CertificateList(result)
spec.Certificates = &list
return nil
}
func parseSNMPCommunityInfo(spec *SystemSpec, communities []snmpCommunity.SNMPCommunity) error {
result := make([]string, 0)
for _, c := range communities {
result = append(result, c.Community)
}
if spec.SNMP == nil {
spec.SNMP = &SNMPInfo{}
}
list := StringList(result)
spec.SNMP.Communities = &list
return nil
}
func parseSNMPTrapDestInfo(spec *SystemSpec, trapDestinations []snmpTrapDest.SNMPTrapDest) error {
result := make([]TrapDestInfo, 0)
for _, t := range trapDestinations {
info := TrapDestInfo{
Community: t.Community,
IPAddress: t.IPAddress,
}
result = append(result, info)
}
if spec.SNMP == nil {
spec.SNMP = &SNMPInfo{}
}
list := TrapDestList(result)
spec.SNMP.TrapDestinations = &list
return nil
}
func parseFileSystemInfo(spec *SystemSpec, fileSystems []controllerFilesystems.FileSystem) error {
result := make([]ControllerFileSystemInfo, 0)
for _, fs := range fileSystems {
info := ControllerFileSystemInfo{
Name: fs.Name,
Size: fs.Size,
}
result = append(result, info)
}
if spec.Storage == nil {
spec.Storage = &SystemStorageInfo{}
}
list := ControllerFileSystemList(result)
spec.Storage.FileSystems = &list
return nil
}
func parseLicenseInfo(spec *SystemSpec, license *licenses.License) error {
if license != nil {
// Populate a Secret name reference but for now don't bother trying
// to setup the actual Secret with the license data. That may be
// necessary some day in order to properly compare the desired config
// with the current config.
spec.License = &LicenseInfo{Secret: SystemDefaultLicenseName}
}
return nil
}
func NewSystemStatus(systemInfo v1info.SystemInfo) (*SystemStatus, error) {
status := SystemStatus{}
if systemInfo.SystemType != "" {
status.SystemType = systemInfo.SystemType
}
if systemInfo.SystemMode != "" {
status.SystemMode = systemInfo.SystemMode
}
return &status, nil
}
func NewSystemSpec(systemInfo v1info.SystemInfo) (*SystemSpec, error) {
spec := SystemSpec{}
// Fill-in the basic attributes
if systemInfo.Location != "" {
spec.Location = &systemInfo.Location
}
if systemInfo.Description != "" {
spec.Description = &systemInfo.Description
}
if systemInfo.Contact != "" {
spec.Contact = &systemInfo.Contact
}
spec.VSwitchType = &systemInfo.Capabilities.VSwitchType
if systemInfo.DRBD != nil {
spec.Storage = &SystemStorageInfo{
DRBD: &DRBDConfiguration{
LinkUtilization: systemInfo.DRBD.LinkUtilization,
},
}
}
if systemInfo.DNS != nil {
if systemInfo.DNS.Nameservers != "" {
nameservers := StringList(strings.Split(systemInfo.DNS.Nameservers, ","))
spec.DNSServers = &nameservers
} else {
empty := StringList(make([]string, 0))
spec.DNSServers = &empty
}
}
if systemInfo.NTP != nil {
if systemInfo.NTP.NTPServers != "" {
nameservers := StringList(strings.Split(systemInfo.NTP.NTPServers, ","))
spec.NTPServers = &nameservers
} else {
empty := StringList(make([]string, 0))
spec.NTPServers = &empty
}
}
if systemInfo.PTP != nil {
spec.PTP = &PTPInfo{
Mode: &systemInfo.PTP.Mode,
Transport: &systemInfo.PTP.Transport,
Mechanism: &systemInfo.PTP.Mechanism,
}
}
if len(systemInfo.Certificates) > 0 {
err := parseCertificateInfo(&spec, systemInfo.Certificates)
if err != nil {
return nil, err
}
}
if len(systemInfo.SNMPCommunities) > 0 {
err := parseSNMPCommunityInfo(&spec, systemInfo.SNMPCommunities)
if err != nil {
return nil, err
}
}
if len(systemInfo.SNMPTrapDestinations) > 0 {
err := parseSNMPTrapDestInfo(&spec, systemInfo.SNMPTrapDestinations)
if err != nil {
return nil, err
}
}
if len(systemInfo.FileSystems) > 0 {
err := parseFileSystemInfo(&spec, systemInfo.FileSystems)
if err != nil {
return nil, err
}
}
if systemInfo.License != nil {
err := parseLicenseInfo(&spec, systemInfo.License)
if err != nil {
return nil, err
}
}
return &spec, nil
}
func NewSystem(namespace string, name string, systemInfo v1info.SystemInfo) (*System, error) {
system := System{
TypeMeta: v1types.TypeMeta{
APIVersion: APIVersion,
Kind: KindSystem,
},
ObjectMeta: v1types.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
ControllerToolsLabel: ControllerToolsVersion,
},
},
}
spec, err := NewSystemSpec(systemInfo)
if err != nil {
return nil, err
}
spec.DeepCopyInto(&system.Spec)
status, err := NewSystemStatus(systemInfo)
if err != nil {
return nil, err
}
status.DeepCopyInto(&system.Status)
return &system, nil
}
func NewBMSecret(name string, namespace string, username string) (*v1.Secret, error) {
// It is not possible to reconstruct the password info from a running
// system so scaffold it and allow the user to fill in the blanks.
fakePassword := []byte("")
secret := v1.Secret{
TypeMeta: v1types.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: v1types.ObjectMeta{
Name: name,
Namespace: namespace,
},
Type: v1.SecretTypeBasicAuth,
Data: map[string][]byte{
v1.BasicAuthUsernameKey: []byte(username),
v1.BasicAuthPasswordKey: fakePassword,
},
}
return &secret, nil
}
func NewLicenseSecret(name string, namespace string, content string) (*v1.Secret, error) {
secret := v1.Secret{
TypeMeta: v1types.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: v1types.ObjectMeta{
Name: name,
Namespace: namespace,
},
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
SecretLicenseContentKey: []byte(content),
},
}
return &secret, nil
}
func NewCertificateSecret(name string, namespace string) (*v1.Secret, error) {
// It is not possible to reconstruct the certificate info from a running
// system so scaffold it and allow the user to fill in the blanks.
fakeInput := []byte("")
secret := v1.Secret{
TypeMeta: v1types.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: v1types.ObjectMeta{
Name: name,
Namespace: namespace,
},
Type: v1.SecretTypeTLS,
Data: map[string][]byte{
v1.TLSCertKey: fakeInput,
v1.TLSPrivateKeyKey: fakeInput,
v1.ServiceAccountRootCAKey: fakeInput,
},
}
return &secret, nil
}
func NewHostSpec(hostInfo v1info.HostInfo) (*HostSpec, error) {
spec := HostSpec{}
// Fill-in the basic attributes
hostname := hostInfo.Hostname
if hostname == "" {
hostname = hostInfo.ID
}
spec.Profile = hostname
spec.Overrides = &HostProfileSpec{
ProfileBaseAttributes: ProfileBaseAttributes{
// Assume that hosts will all be statically provisioned for now.
BootMAC: &hostInfo.BootMAC},
}
return &spec, nil
}
func NewHost(name string, namespace string, hostInfo v1info.HostInfo) (*Host, error) {
host := Host{
TypeMeta: v1types.TypeMeta{
APIVersion: APIVersion,
Kind: KindHost,
},
ObjectMeta: v1types.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
ControllerToolsLabel: ControllerToolsVersion,
},
},
}
spec, err := NewHostSpec(hostInfo)
if err != nil {
return nil, err
}
spec.DeepCopyInto(&host.Spec)
return &host, nil
}
func NewDataNetworkSpec(net datanetworks.DataNetwork) (*DataNetworkSpec, error) {
spec := DataNetworkSpec{
Type: net.Type,
}
if net.MTU != datanetworks.DefaultMTU {
spec.MTU = &net.MTU
}
if net.Description != "" {
spec.Description = &net.Description
}
if net.Type == datanetworks.TypeVxLAN {
spec.VxLAN = &VxLANInfo{
EndpointMode: net.Mode,
UDPPortNumber: net.UDPPortNumber,
TTL: net.TTL,
}
if net.Mode != nil && *net.Mode == datanetworks.EndpointModeDynamic {
spec.VxLAN.MulticastGroup = net.MulticastGroup
}
}
return &spec, nil
}
func NewDataNetwork(name string, namespace string, net datanetworks.DataNetwork) (*DataNetwork, error) {
dataNetwork := DataNetwork{
TypeMeta: v1types.TypeMeta{
APIVersion: APIVersion,
Kind: KindDataNetwork,
},
ObjectMeta: v1types.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
ControllerToolsLabel: ControllerToolsVersion,
},
},
}
spec, err := NewDataNetworkSpec(net)
if err != nil {
return nil, err
}
spec.DeepCopyInto(&dataNetwork.Spec)
return &dataNetwork, nil
}
func NewPlatformNetworkSpec(pool addresspools.AddressPool) (*PlatformNetworkSpec, error) {
spec := PlatformNetworkSpec{
Type: networks.NetworkTypeOther,
Subnet: pool.Network,
Prefix: pool.Prefix,
Gateway: pool.Gateway,
Allocation: AllocationInfo{
Type: networks.AllocationOrderDynamic,
},
}
ranges := make([]AllocationRange, 0)
for _, r := range pool.Ranges {
obj := AllocationRange{
Start: r[0],
End: r[1],
}
ranges = append(ranges, obj)
}
spec.Allocation.Ranges = ranges
return &spec, nil
}
func NewPlatformNetwork(name string, namespace string, pool addresspools.AddressPool) (*PlatformNetwork, error) {
platformNetwork := PlatformNetwork{
TypeMeta: v1types.TypeMeta{
APIVersion: APIVersion,
Kind: KindPlatformNetwork,
},
ObjectMeta: v1types.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
ControllerToolsLabel: ControllerToolsVersion,
},
},
}
spec, err := NewPlatformNetworkSpec(pool)
if err != nil {
return nil, err
}
spec.DeepCopyInto(&platformNetwork.Spec)
return &platformNetwork, nil
} | VID: *iface.VID,
Lower: iface.Uses[0]}
vlan.CommonInterfaceInfo = data
vlans = append(vlans, vlan) |
api.py | # -*- coding: utf-8 -*-
# ======================================================================================================================
# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =
# ======================================================================================================================
from spectrochempy.utils import generate_api
# generate api
__all__ = generate_api(__file__)
# ======================================================================================================================
if __name__ == '__main__':
p | ass
|
|
cb_ping.rs | use std::thread;
use crossbeam_channel::bounded;
pub fn cb_ping(count: usize) -> usize { | tx.send(());
}
});
let t1 = thread::spawn(move || {
let mut count = 0_usize;
for _ in rx {
count += 1;
}
count
});
t0.join().unwrap();
t1.join().unwrap()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn run() {
assert_eq!(cb_ping(1000), 1000);
}
} | let (tx, rx) = bounded(0);
let t0 = thread::spawn(move || {
for _ in 0..count { |
index_test.go | package gosync
import (
"testing"
"github.com/rkcloudchain/gosync/syncpb"
"github.com/stretchr/testify/assert"
)
var weakA = uint32(1)
var weakB = uint32(2)
func TestMakeChecksumIndex(t *testing.T) {
i := makeChecksumIndex([]*syncpb.ChunkChecksum{})
assert.Equal(t, 0, i.blockCount)
i = makeChecksumIndex([]*syncpb.ChunkChecksum{
{BlockIndex: 0, WeakHash: weakA, StrongHash: []byte("b")},
{BlockIndex: 1, WeakHash: weakB, StrongHash: []byte("c")},
})
assert.Equal(t, 2, i.blockCount)
}
func TestFindWeakChecksum(t *testing.T) {
i := makeChecksumIndex([]*syncpb.ChunkChecksum{
{BlockIndex: 0, WeakHash: weakA, StrongHash: []byte("b")},
{BlockIndex: 1, WeakHash: weakB, StrongHash: []byte("c")},
{BlockIndex: 2, WeakHash: weakB, StrongHash: []byte("d")},
})
result := i.FindWeakChecksum(weakA)
assert.NotNil(t, result)
assert.Len(t, result, 1)
assert.Equal(t, uint32(0), result[0].BlockIndex)
result = i.FindWeakChecksum(weakB)
assert.NotNil(t, result)
assert.Len(t, result, 2)
assert.Equal(t, uint32(1), result[0].BlockIndex)
result = i.FindWeakChecksum(uint32(3))
assert.Nil(t, result)
}
func TestFindStrongChecksum(t *testing.T) {
i := makeChecksumIndex([]*syncpb.ChunkChecksum{
{BlockIndex: 0, WeakHash: weakA, StrongHash: []byte("b")},
{BlockIndex: 1, WeakHash: weakB, StrongHash: []byte("c")},
{BlockIndex: 2, WeakHash: weakB, StrongHash: []byte("d")},
})
result := i.FindWeakChecksum(weakB)
strong := result.FindStrongChecksum([]byte("s"))
assert.Nil(t, strong)
strong = result.FindStrongChecksum([]byte("d"))
assert.NotNil(t, strong)
assert.Equal(t, uint32(2), strong.BlockIndex)
}
func TestFindStrongChecksum2(t *testing.T) | {
i := makeChecksumIndex([]*syncpb.ChunkChecksum{
{BlockIndex: 0, WeakHash: weakA, StrongHash: []byte("b")},
{BlockIndex: 1, WeakHash: weakB, StrongHash: []byte("c")},
{BlockIndex: 2, WeakHash: weakB, StrongHash: []byte("d")},
})
result := i.FindWeakChecksum(weakA)
strong := i.FindStrongChecksum(result, []byte("b"))
assert.NotNil(t, strong)
assert.Equal(t, uint32(0), strong.BlockIndex)
} |
|
comparison.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Defines basic comparison kernels for `PrimitiveArrays`.
//!
//! These kernels can leverage SIMD if available on your system. Currently no runtime
//! detection is provided, you should enable the specific SIMD intrinsics using
//! `RUSTFLAGS="-C target-feature=+avx2"` for example. See the documentation
//! [here](https://doc.rust-lang.org/stable/core/arch/) for more information.
use regex::Regex;
use std::collections::HashMap;
use std::sync::Arc;
use crate::array::*;
use crate::buffer::{Buffer, MutableBuffer};
use crate::compute::util::combine_option_bitmap;
use crate::datatypes::{ArrowNumericType, DataType};
use crate::error::{ArrowError, Result};
use crate::util::bit_util;
/// Helper function to perform boolean lambda function on values from two arrays, this
/// version does not attempt to use SIMD.
macro_rules! compare_op {
($left: expr, $right:expr, $op:expr) => {{
if $left.len() != $right.len() {
return Err(ArrowError::ComputeError(
"Cannot perform comparison operation on arrays of different length"
.to_string(),
));
}
let null_bit_buffer =
combine_option_bitmap($left.data_ref(), $right.data_ref(), $left.len())?;
let byte_capacity = bit_util::ceil($left.len(), 8);
let actual_capacity = bit_util::round_upto_multiple_of_64(byte_capacity);
let mut buffer = MutableBuffer::new(actual_capacity);
buffer.resize(byte_capacity);
let data = buffer.raw_data_mut();
for i in 0..$left.len() {
if $op($left.value(i), $right.value(i)) {
// SAFETY: this is safe as `data` has at least $left.len() elements.
// and `i` is bound by $left.len()
unsafe {
bit_util::set_bit_raw(data, i);
}
}
}
let data = ArrayData::new(
DataType::Boolean,
$left.len(),
None,
null_bit_buffer,
0,
vec![buffer.freeze()],
vec![],
);
Ok(BooleanArray::from(Arc::new(data)))
}};
}
macro_rules! compare_op_scalar {
($left: expr, $right:expr, $op:expr) => {{
let null_bit_buffer = $left.data().null_buffer().cloned();
let byte_capacity = bit_util::ceil($left.len(), 8);
let actual_capacity = bit_util::round_upto_multiple_of_64(byte_capacity);
let mut buffer = MutableBuffer::new(actual_capacity);
buffer.resize(byte_capacity);
let data = buffer.raw_data_mut();
for i in 0..$left.len() {
if $op($left.value(i), $right) {
// SAFETY: this is safe as `data` has at least $left.len() elements
// and `i` is bound by $left.len()
unsafe {
bit_util::set_bit_raw(data, i);
}
}
}
let data = ArrayData::new(
DataType::Boolean,
$left.len(),
None,
null_bit_buffer,
0,
vec![buffer.freeze()],
vec![],
);
Ok(BooleanArray::from(Arc::new(data)))
}};
}
pub fn no_simd_compare_op<T, F>(
left: &PrimitiveArray<T>,
right: &PrimitiveArray<T>,
op: F,
) -> Result<BooleanArray>
where
T: ArrowNumericType,
F: Fn(T::Native, T::Native) -> bool,
{
compare_op!(left, right, op)
}
pub fn no_simd_compare_op_scalar<T, F>(
left: &PrimitiveArray<T>,
right: T::Native,
op: F,
) -> Result<BooleanArray>
where
T: ArrowNumericType,
F: Fn(T::Native, T::Native) -> bool,
{
compare_op_scalar!(left, right, op)
}
pub fn like_utf8(left: &StringArray, right: &StringArray) -> Result<BooleanArray> {
let mut map = HashMap::new();
if left.len() != right.len() {
return Err(ArrowError::ComputeError(
"Cannot perform comparison operation on arrays of different length"
.to_string(),
));
}
let null_bit_buffer =
combine_option_bitmap(left.data_ref(), right.data_ref(), left.len())?;
let mut result = BooleanBufferBuilder::new(left.len());
for i in 0..left.len() {
let haystack = left.value(i);
let pat = right.value(i);
let re = if let Some(ref regex) = map.get(pat) {
regex
} else {
let re_pattern = pat.replace("%", ".*").replace("_", ".");
let re = Regex::new(&format!("^{}$", re_pattern)).map_err(|e| {
ArrowError::ComputeError(format!(
"Unable to build regex from LIKE pattern: {}",
e
))
})?;
map.insert(pat, re);
map.get(pat).unwrap()
};
result.append(re.is_match(haystack));
}
let data = ArrayData::new(
DataType::Boolean,
left.len(),
None,
null_bit_buffer,
0,
vec![result.finish()],
vec![],
);
Ok(BooleanArray::from(Arc::new(data)))
}
fn is_like_pattern(c: char) -> bool {
c == '%' || c == '_'
}
pub fn like_utf8_scalar(left: &StringArray, right: &str) -> Result<BooleanArray> {
let null_bit_buffer = left.data().null_buffer().cloned();
let mut result = BooleanBufferBuilder::new(left.len());
if !right.contains(is_like_pattern) {
// fast path, can use equals
for i in 0..left.len() {
result.append(left.value(i) == right);
}
} else if right.ends_with('%') && !right[..right.len() - 1].contains(is_like_pattern)
{
// fast path, can use starts_with
for i in 0..left.len() {
result.append(left.value(i).starts_with(&right[..right.len() - 1]));
}
} else if right.starts_with('%') && !right[1..].contains(is_like_pattern) {
// fast path, can use ends_with
for i in 0..left.len() {
result.append(left.value(i).ends_with(&right[1..]));
}
} else {
let re_pattern = right.replace("%", ".*").replace("_", ".");
let re = Regex::new(&format!("^{}$", re_pattern)).map_err(|e| {
ArrowError::ComputeError(format!(
"Unable to build regex from LIKE pattern: {}",
e
))
})?;
for i in 0..left.len() {
let haystack = left.value(i);
result.append(re.is_match(haystack));
}
};
let data = ArrayData::new(
DataType::Boolean,
left.len(),
None,
null_bit_buffer,
0,
vec![result.finish()],
vec![],
);
Ok(BooleanArray::from(Arc::new(data)))
}
pub fn nlike_utf8(left: &StringArray, right: &StringArray) -> Result<BooleanArray> {
let mut map = HashMap::new();
if left.len() != right.len() {
return Err(ArrowError::ComputeError(
"Cannot perform comparison operation on arrays of different length"
.to_string(),
));
}
let null_bit_buffer =
combine_option_bitmap(left.data_ref(), right.data_ref(), left.len())?;
let mut result = BooleanBufferBuilder::new(left.len());
for i in 0..left.len() {
let haystack = left.value(i);
let pat = right.value(i);
let re = if let Some(ref regex) = map.get(pat) {
regex
} else {
let re_pattern = pat.replace("%", ".*").replace("_", ".");
let re = Regex::new(&format!("^{}$", re_pattern)).map_err(|e| {
ArrowError::ComputeError(format!(
"Unable to build regex from LIKE pattern: {}",
e
))
})?;
map.insert(pat, re);
map.get(pat).unwrap()
};
result.append(!re.is_match(haystack));
}
let data = ArrayData::new(
DataType::Boolean,
left.len(),
None,
null_bit_buffer,
0,
vec![result.finish()],
vec![],
);
Ok(BooleanArray::from(Arc::new(data)))
}
pub fn nlike_utf8_scalar(left: &StringArray, right: &str) -> Result<BooleanArray> {
let null_bit_buffer = left.data().null_buffer().cloned();
let mut result = BooleanBufferBuilder::new(left.len());
if !right.contains(is_like_pattern) {
// fast path, can use equals
for i in 0..left.len() {
result.append(left.value(i) != right);
}
} else if right.ends_with('%') && !right[..right.len() - 1].contains(is_like_pattern)
{
// fast path, can use ends_with
for i in 0..left.len() {
result.append(!left.value(i).starts_with(&right[..right.len() - 1]));
}
} else if right.starts_with('%') && !right[1..].contains(is_like_pattern) {
// fast path, can use starts_with
for i in 0..left.len() {
result.append(!left.value(i).ends_with(&right[1..]));
}
} else {
let re_pattern = right.replace("%", ".*").replace("_", ".");
let re = Regex::new(&format!("^{}$", re_pattern)).map_err(|e| {
ArrowError::ComputeError(format!(
"Unable to build regex from LIKE pattern: {}",
e
))
})?;
for i in 0..left.len() {
let haystack = left.value(i);
result.append(!re.is_match(haystack));
}
}
let data = ArrayData::new(
DataType::Boolean,
left.len(),
None,
null_bit_buffer,
0,
vec![result.finish()],
vec![],
);
Ok(BooleanArray::from(Arc::new(data)))
}
pub fn eq_utf8(left: &StringArray, right: &StringArray) -> Result<BooleanArray> {
compare_op!(left, right, |a, b| a == b)
}
pub fn eq_utf8_scalar(left: &StringArray, right: &str) -> Result<BooleanArray> {
compare_op_scalar!(left, right, |a, b| a == b)
}
pub fn neq_utf8(left: &StringArray, right: &StringArray) -> Result<BooleanArray> {
compare_op!(left, right, |a, b| a != b)
}
pub fn neq_utf8_scalar(left: &StringArray, right: &str) -> Result<BooleanArray> {
compare_op_scalar!(left, right, |a, b| a != b)
}
pub fn lt_utf8(left: &StringArray, right: &StringArray) -> Result<BooleanArray> {
compare_op!(left, right, |a, b| a < b)
}
pub fn lt_utf8_scalar(left: &StringArray, right: &str) -> Result<BooleanArray> {
compare_op_scalar!(left, right, |a, b| a < b)
}
pub fn lt_eq_utf8(left: &StringArray, right: &StringArray) -> Result<BooleanArray> {
compare_op!(left, right, |a, b| a <= b)
}
pub fn lt_eq_utf8_scalar(left: &StringArray, right: &str) -> Result<BooleanArray> {
compare_op_scalar!(left, right, |a, b| a <= b)
}
pub fn gt_utf8(left: &StringArray, right: &StringArray) -> Result<BooleanArray> {
compare_op!(left, right, |a, b| a > b)
}
pub fn gt_utf8_scalar(left: &StringArray, right: &str) -> Result<BooleanArray> {
compare_op_scalar!(left, right, |a, b| a > b)
}
pub fn gt_eq_utf8(left: &StringArray, right: &StringArray) -> Result<BooleanArray> {
compare_op!(left, right, |a, b| a >= b)
}
pub fn gt_eq_utf8_scalar(left: &StringArray, right: &str) -> Result<BooleanArray> {
compare_op_scalar!(left, right, |a, b| a >= b)
}
/// Helper function to perform boolean lambda function on values from two arrays using
/// SIMD.
#[cfg(simd_x86)]
fn simd_compare_op<T, F>(
left: &PrimitiveArray<T>,
right: &PrimitiveArray<T>,
op: F,
) -> Result<BooleanArray>
where
T: ArrowNumericType,
F: Fn(T::Simd, T::Simd) -> T::SimdMask,
{
use std::mem;
let len = left.len();
if len != right.len() {
return Err(ArrowError::ComputeError(
"Cannot perform comparison operation on arrays of different length"
.to_string(),
));
}
let null_bit_buffer = combine_option_bitmap(left.data_ref(), right.data_ref(), len)?;
let lanes = T::lanes();
let mut result = MutableBuffer::new(left.len() * mem::size_of::<bool>());
let rem = len % lanes;
for i in (0..len - rem).step_by(lanes) {
let simd_left = T::load(unsafe { left.value_slice(i, lanes) });
let simd_right = T::load(unsafe { right.value_slice(i, lanes) });
let simd_result = op(simd_left, simd_right);
T::bitmask(&simd_result, |b| {
result.extend_from_slice(b);
});
}
if rem > 0 {
//Soundness
// This is not sound because it can read past the end of PrimitiveArray buffer (lanes is always greater than rem), see ARROW-10990
let simd_left = T::load(unsafe { left.value_slice(len - rem, lanes) });
let simd_right = T::load(unsafe { right.value_slice(len - rem, lanes) });
let simd_result = op(simd_left, simd_right);
let rem_buffer_size = (rem as f32 / 8f32).ceil() as usize;
T::bitmask(&simd_result, |b| {
result.extend_from_slice(&b[0..rem_buffer_size]);
});
}
let data = ArrayData::new(
DataType::Boolean,
left.len(),
None,
null_bit_buffer,
0,
vec![result.freeze()],
vec![],
);
Ok(BooleanArray::from(Arc::new(data)))
}
/// Helper function to perform boolean lambda function on values from an array and a scalar value using
/// SIMD.
#[cfg(simd_x86)]
fn simd_compare_op_scalar<T, F>(
left: &PrimitiveArray<T>,
right: T::Native,
op: F,
) -> Result<BooleanArray>
where
T: ArrowNumericType,
F: Fn(T::Simd, T::Simd) -> T::SimdMask,
{
use std::mem;
let len = left.len();
let null_bit_buffer = left.data().null_buffer().cloned();
let lanes = T::lanes();
let mut result = MutableBuffer::new(left.len() * mem::size_of::<bool>());
let simd_right = T::init(right);
let rem = len % lanes;
for i in (0..len - rem).step_by(lanes) {
let simd_left = T::load(unsafe { left.value_slice(i, lanes) });
let simd_result = op(simd_left, simd_right);
T::bitmask(&simd_result, |b| {
result.extend_from_slice(b);
});
}
if rem > 0 {
//Soundness
// This is not sound because it can read past the end of PrimitiveArray buffer (lanes is always greater than rem), see ARROW-10990
let simd_left = T::load(unsafe { left.value_slice(len - rem, lanes) });
let simd_result = op(simd_left, simd_right);
let rem_buffer_size = (rem as f32 / 8f32).ceil() as usize;
T::bitmask(&simd_result, |b| {
result.extend_from_slice(&b[0..rem_buffer_size]);
});
}
let data = ArrayData::new(
DataType::Boolean,
left.len(),
None,
null_bit_buffer,
0,
vec![result.freeze()],
vec![],
);
Ok(BooleanArray::from(Arc::new(data)))
}
/// Perform `left == right` operation on two arrays.
pub fn eq<T>(left: &PrimitiveArray<T>, right: &PrimitiveArray<T>) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op(left, right, T::eq);
#[cfg(not(simd_x86))]
return compare_op!(left, right, |a, b| a == b);
}
/// Perform `left == right` operation on an array and a scalar value.
pub fn eq_scalar<T>(left: &PrimitiveArray<T>, right: T::Native) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op_scalar(left, right, T::eq);
#[cfg(not(simd_x86))]
return compare_op_scalar!(left, right, |a, b| a == b);
}
/// Perform `left != right` operation on two arrays.
pub fn neq<T>(left: &PrimitiveArray<T>, right: &PrimitiveArray<T>) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op(left, right, T::ne);
#[cfg(not(simd_x86))]
return compare_op!(left, right, |a, b| a != b);
}
/// Perform `left != right` operation on an array and a scalar value.
pub fn neq_scalar<T>(left: &PrimitiveArray<T>, right: T::Native) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op_scalar(left, right, T::ne);
#[cfg(not(simd_x86))]
return compare_op_scalar!(left, right, |a, b| a != b);
}
/// Perform `left < right` operation on two arrays. Null values are less than non-null
/// values.
pub fn lt<T>(left: &PrimitiveArray<T>, right: &PrimitiveArray<T>) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op(left, right, T::lt);
#[cfg(not(simd_x86))]
return compare_op!(left, right, |a, b| a < b);
}
/// Perform `left < right` operation on an array and a scalar value.
/// Null values are less than non-null values.
pub fn lt_scalar<T>(left: &PrimitiveArray<T>, right: T::Native) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op_scalar(left, right, T::lt);
#[cfg(not(simd_x86))]
return compare_op_scalar!(left, right, |a, b| a < b);
}
/// Perform `left <= right` operation on two arrays. Null values are less than non-null
/// values.
pub fn lt_eq<T>(
left: &PrimitiveArray<T>,
right: &PrimitiveArray<T>,
) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op(left, right, T::le);
#[cfg(not(simd_x86))]
return compare_op!(left, right, |a, b| a <= b);
}
/// Perform `left <= right` operation on an array and a scalar value.
/// Null values are less than non-null values.
pub fn lt_eq_scalar<T>(left: &PrimitiveArray<T>, right: T::Native) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op_scalar(left, right, T::le);
#[cfg(not(simd_x86))]
return compare_op_scalar!(left, right, |a, b| a <= b);
}
/// Perform `left > right` operation on two arrays. Non-null values are greater than null
/// values.
pub fn gt<T>(left: &PrimitiveArray<T>, right: &PrimitiveArray<T>) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op(left, right, T::gt);
#[cfg(not(simd_x86))]
return compare_op!(left, right, |a, b| a > b);
}
/// Perform `left > right` operation on an array and a scalar value.
/// Non-null values are greater than null values.
pub fn gt_scalar<T>(left: &PrimitiveArray<T>, right: T::Native) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op_scalar(left, right, T::gt);
#[cfg(not(simd_x86))]
return compare_op_scalar!(left, right, |a, b| a > b);
}
/// Perform `left >= right` operation on two arrays. Non-null values are greater than null
/// values.
pub fn gt_eq<T>(
left: &PrimitiveArray<T>,
right: &PrimitiveArray<T>,
) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op(left, right, T::ge);
#[cfg(not(simd_x86))]
return compare_op!(left, right, |a, b| a >= b);
}
/// Perform `left >= right` operation on an array and a scalar value.
/// Non-null values are greater than null values.
pub fn gt_eq_scalar<T>(left: &PrimitiveArray<T>, right: T::Native) -> Result<BooleanArray>
where
T: ArrowNumericType,
{
#[cfg(simd_x86)]
return simd_compare_op_scalar(left, right, T::ge);
#[cfg(not(simd_x86))]
return compare_op_scalar!(left, right, |a, b| a >= b);
}
/// Checks if a `GenericListArray` contains a value in the `PrimitiveArray`
pub fn contains<T, OffsetSize>(
left: &PrimitiveArray<T>,
right: &GenericListArray<OffsetSize>,
) -> Result<BooleanArray>
where
T: ArrowNumericType,
OffsetSize: OffsetSizeTrait,
{
let left_len = left.len();
if left_len != right.len() {
return Err(ArrowError::ComputeError(
"Cannot perform comparison operation on arrays of different length"
.to_string(),
));
}
let num_bytes = bit_util::ceil(left_len, 8);
let not_both_null_bit_buffer =
match combine_option_bitmap(left.data_ref(), right.data_ref(), left_len)? {
Some(buff) => buff,
None => new_all_set_buffer(num_bytes),
};
let not_both_null_bitmap = not_both_null_bit_buffer.data();
let mut bool_buf = MutableBuffer::new(num_bytes).with_bitset(num_bytes, false);
let bool_slice = bool_buf.data_mut();
// if both array slots are valid, check if list contains primitive
for i in 0..left_len {
if bit_util::get_bit(not_both_null_bitmap, i) {
let list = right.value(i);
let list = list.as_any().downcast_ref::<PrimitiveArray<T>>().unwrap();
for j in 0..list.len() {
if list.is_valid(j) && (left.value(i) == list.value(j)) {
bit_util::set_bit(bool_slice, i);
continue;
}
}
}
}
let data = ArrayData::new(
DataType::Boolean,
left.len(),
None,
None,
0,
vec![bool_buf.freeze()],
vec![],
);
Ok(BooleanArray::from(Arc::new(data)))
}
/// Checks if a `GenericListArray` contains a value in the `GenericStringArray`
pub fn contains_utf8<OffsetSize>(
left: &GenericStringArray<OffsetSize>,
right: &ListArray,
) -> Result<BooleanArray>
where
OffsetSize: StringOffsetSizeTrait,
{
let left_len = left.len();
if left_len != right.len() {
return Err(ArrowError::ComputeError(
"Cannot perform comparison operation on arrays of different length"
.to_string(),
));
}
let num_bytes = bit_util::ceil(left_len, 8);
let not_both_null_bit_buffer =
match combine_option_bitmap(left.data_ref(), right.data_ref(), left_len)? {
Some(buff) => buff,
None => new_all_set_buffer(num_bytes),
};
let not_both_null_bitmap = not_both_null_bit_buffer.data();
let mut bool_buf = MutableBuffer::new(num_bytes).with_bitset(num_bytes, false);
let bool_slice = bool_buf.data_mut();
for i in 0..left_len {
// contains(null, null) = false
if bit_util::get_bit(not_both_null_bitmap, i) {
let list = right.value(i);
let list = list
.as_any()
.downcast_ref::<GenericStringArray<OffsetSize>>()
.unwrap();
for j in 0..list.len() {
if list.is_valid(j) && (left.value(i) == list.value(j)) {
bit_util::set_bit(bool_slice, i);
continue;
}
}
}
}
let data = ArrayData::new(
DataType::Boolean,
left.len(),
None,
None,
0,
vec![bool_buf.freeze()],
vec![],
);
Ok(BooleanArray::from(Arc::new(data)))
}
// create a buffer and fill it with valid bits
#[inline]
fn new_all_set_buffer(len: usize) -> Buffer {
let buffer = MutableBuffer::new(len);
let buffer = buffer.with_bitset(len, true);
buffer.freeze()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::datatypes::{Int8Type, ToByteSlice};
use crate::{array::Int32Array, datatypes::Field};
#[test]
fn test_primitive_array_eq() {
let a = Int32Array::from(vec![8, 8, 8, 8, 8]);
let b = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = eq(&a, &b).unwrap();
assert_eq!(false, c.value(0));
assert_eq!(false, c.value(1));
assert_eq!(true, c.value(2));
assert_eq!(false, c.value(3));
assert_eq!(false, c.value(4));
}
#[test]
fn test_primitive_array_eq_scalar() {
let a = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = eq_scalar(&a, 8).unwrap();
assert_eq!(false, c.value(0));
assert_eq!(false, c.value(1));
assert_eq!(true, c.value(2));
assert_eq!(false, c.value(3));
assert_eq!(false, c.value(4));
}
#[test]
fn test_primitive_array_eq_with_slice() {
let a = Int32Array::from(vec![6, 7, 8, 8, 10]);
let b = Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
let b_slice = b.slice(5, 5);
let c = b_slice.as_any().downcast_ref().unwrap();
let d = eq(&c, &a).unwrap();
assert_eq!(true, d.value(0));
assert_eq!(true, d.value(1));
assert_eq!(true, d.value(2));
assert_eq!(false, d.value(3));
assert_eq!(true, d.value(4));
}
#[test]
fn test_primitive_array_neq() {
let a = Int32Array::from(vec![8, 8, 8, 8, 8]);
let b = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = neq(&a, &b).unwrap();
assert_eq!(true, c.value(0));
assert_eq!(true, c.value(1));
assert_eq!(false, c.value(2));
assert_eq!(true, c.value(3));
assert_eq!(true, c.value(4));
}
#[test]
fn test_primitive_array_neq_scalar() {
let a = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = neq_scalar(&a, 8).unwrap();
assert_eq!(true, c.value(0));
assert_eq!(true, c.value(1));
assert_eq!(false, c.value(2));
assert_eq!(true, c.value(3));
assert_eq!(true, c.value(4));
}
#[test]
fn test_primitive_array_lt() {
let a = Int32Array::from(vec![8, 8, 8, 8, 8]);
let b = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = lt(&a, &b).unwrap();
assert_eq!(false, c.value(0));
assert_eq!(false, c.value(1));
assert_eq!(false, c.value(2));
assert_eq!(true, c.value(3));
assert_eq!(true, c.value(4));
}
#[test]
fn test_primitive_array_lt_scalar() {
let a = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = lt_scalar(&a, 8).unwrap();
assert_eq!(true, c.value(0));
assert_eq!(true, c.value(1));
assert_eq!(false, c.value(2));
assert_eq!(false, c.value(3));
assert_eq!(false, c.value(4));
}
#[test]
fn test_primitive_array_lt_nulls() {
let a = Int32Array::from(vec![None, None, Some(1)]);
let b = Int32Array::from(vec![None, Some(1), None]);
let c = lt(&a, &b).unwrap();
assert_eq!(false, c.value(0));
assert_eq!(true, c.value(1));
assert_eq!(false, c.value(2));
}
#[test]
fn test_primitive_array_lt_scalar_nulls() {
let a = Int32Array::from(vec![None, Some(1), Some(2)]);
let c = lt_scalar(&a, 2).unwrap();
assert_eq!(true, c.value(0));
assert_eq!(true, c.value(1));
assert_eq!(false, c.value(2));
}
#[test]
fn test_primitive_array_lt_eq() {
let a = Int32Array::from(vec![8, 8, 8, 8, 8]);
let b = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = lt_eq(&a, &b).unwrap();
assert_eq!(false, c.value(0));
assert_eq!(false, c.value(1));
assert_eq!(true, c.value(2));
assert_eq!(true, c.value(3));
assert_eq!(true, c.value(4));
}
#[test]
fn test_primitive_array_lt_eq_scalar() {
let a = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = lt_eq_scalar(&a, 8).unwrap();
assert_eq!(true, c.value(0));
assert_eq!(true, c.value(1));
assert_eq!(true, c.value(2));
assert_eq!(false, c.value(3));
assert_eq!(false, c.value(4));
}
#[test]
fn test_primitive_array_lt_eq_nulls() {
let a = Int32Array::from(vec![None, None, Some(1)]);
let b = Int32Array::from(vec![None, Some(1), None]);
let c = lt_eq(&a, &b).unwrap();
assert_eq!(true, c.value(0));
assert_eq!(true, c.value(1));
assert_eq!(false, c.value(2));
}
#[test]
fn test_primitive_array_lt_eq_scalar_nulls() {
let a = Int32Array::from(vec![None, Some(1), Some(2)]);
let c = lt_eq_scalar(&a, 1).unwrap();
assert_eq!(true, c.value(0));
assert_eq!(true, c.value(1));
assert_eq!(false, c.value(2));
}
#[test]
fn test_primitive_array_gt() {
let a = Int32Array::from(vec![8, 8, 8, 8, 8]);
let b = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = gt(&a, &b).unwrap();
assert_eq!(true, c.value(0));
assert_eq!(true, c.value(1));
assert_eq!(false, c.value(2));
assert_eq!(false, c.value(3));
assert_eq!(false, c.value(4));
}
#[test]
fn test_primitive_array_gt_scalar() {
let a = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = gt_scalar(&a, 8).unwrap();
assert_eq!(false, c.value(0));
assert_eq!(false, c.value(1));
assert_eq!(false, c.value(2));
assert_eq!(true, c.value(3));
assert_eq!(true, c.value(4));
}
#[test]
fn test_primitive_array_gt_nulls() {
let a = Int32Array::from(vec![None, None, Some(1)]);
let b = Int32Array::from(vec![None, Some(1), None]);
let c = gt(&a, &b).unwrap();
assert_eq!(false, c.value(0));
assert_eq!(false, c.value(1));
assert_eq!(true, c.value(2));
}
#[test]
fn test_primitive_array_gt_scalar_nulls() {
let a = Int32Array::from(vec![None, Some(1), Some(2)]);
let c = gt_scalar(&a, 1).unwrap();
assert_eq!(false, c.value(0));
assert_eq!(false, c.value(1));
assert_eq!(true, c.value(2));
}
#[test]
fn test_primitive_array_gt_eq() {
let a = Int32Array::from(vec![8, 8, 8, 8, 8]);
let b = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = gt_eq(&a, &b).unwrap();
assert_eq!(true, c.value(0));
assert_eq!(true, c.value(1));
assert_eq!(true, c.value(2));
assert_eq!(false, c.value(3));
assert_eq!(false, c.value(4));
}
#[test]
fn test_primitive_array_gt_eq_scalar() {
let a = Int32Array::from(vec![6, 7, 8, 9, 10]);
let c = gt_eq_scalar(&a, 8).unwrap();
assert_eq!(false, c.value(0));
assert_eq!(false, c.value(1));
assert_eq!(true, c.value(2));
assert_eq!(true, c.value(3));
assert_eq!(true, c.value(4));
}
#[test]
fn test_primitive_array_gt_eq_nulls() {
let a = Int32Array::from(vec![None, None, Some(1)]);
let b = Int32Array::from(vec![None, Some(1), None]);
let c = gt_eq(&a, &b).unwrap();
assert_eq!(true, c.value(0));
assert_eq!(false, c.value(1));
assert_eq!(true, c.value(2));
}
#[test]
fn test_primitive_array_gt_eq_scalar_nulls() {
let a = Int32Array::from(vec![None, Some(1), Some(2)]);
let c = gt_eq_scalar(&a, 1).unwrap();
assert_eq!(false, c.value(0));
assert_eq!(true, c.value(1));
assert_eq!(true, c.value(2));
}
#[test]
fn test_length_of_result_buffer() {
// `item_count` is chosen to not be a multiple of the number of SIMD lanes for this
// type (`Int8Type`), 64.
let item_count = 130;
let select_mask: BooleanArray = vec![true; item_count].into();
let array_a: PrimitiveArray<Int8Type> = vec![1; item_count].into();
let array_b: PrimitiveArray<Int8Type> = vec![2; item_count].into();
let result_mask = gt_eq(&array_a, &array_b).unwrap();
assert_eq!(
result_mask.data().buffers()[0].len(),
select_mask.data().buffers()[0].len()
);
}
// Expected behaviour:
// contains(1, [1, 2, null]) = true
// contains(3, [1, 2, null]) = false
// contains(null, [1, 2, null]) = false
// contains(null, null) = false
#[test]
fn test_contains() |
// Expected behaviour:
// contains("ab", ["ab", "cd", null]) = true
// contains("ef", ["ab", "cd", null]) = false
// contains(null, ["ab", "cd", null]) = false
// contains(null, null) = false
#[test]
fn test_contains_utf8() {
let values_builder = StringBuilder::new(10);
let mut builder = ListBuilder::new(values_builder);
builder.values().append_value("Lorem").unwrap();
builder.values().append_value("ipsum").unwrap();
builder.values().append_null().unwrap();
builder.append(true).unwrap();
builder.values().append_value("sit").unwrap();
builder.values().append_value("amet").unwrap();
builder.values().append_value("Lorem").unwrap();
builder.append(true).unwrap();
builder.append(false).unwrap();
builder.values().append_value("ipsum").unwrap();
builder.append(true).unwrap();
// [["Lorem", "ipsum", null], ["sit", "amet", "Lorem"], null, ["ipsum"]]
// value_offsets = [0, 3, 6, 6]
let list_array = builder.finish();
let nulls = StringArray::from(vec![None, None, None, None]);
let nulls_result = contains_utf8(&nulls, &list_array).unwrap();
assert_eq!(
nulls_result
.as_any()
.downcast_ref::<BooleanArray>()
.unwrap(),
&BooleanArray::from(vec![false, false, false, false]),
);
let values = StringArray::from(vec![
Some("Lorem"),
Some("Lorem"),
Some("Lorem"),
Some("Lorem"),
]);
let values_result = contains_utf8(&values, &list_array).unwrap();
assert_eq!(
values_result
.as_any()
.downcast_ref::<BooleanArray>()
.unwrap(),
&BooleanArray::from(vec![true, true, false, false]),
);
}
macro_rules! test_utf8 {
($test_name:ident, $left:expr, $right:expr, $op:expr, $expected:expr) => {
#[test]
fn $test_name() {
let left = StringArray::from($left);
let right = StringArray::from($right);
let res = $op(&left, &right).unwrap();
let expected = $expected;
assert_eq!(expected.len(), res.len());
for i in 0..res.len() {
let v = res.value(i);
assert_eq!(v, expected[i]);
}
}
};
}
macro_rules! test_utf8_scalar {
($test_name:ident, $left:expr, $right:expr, $op:expr, $expected:expr) => {
#[test]
fn $test_name() {
let left = StringArray::from($left);
let res = $op(&left, $right).unwrap();
let expected = $expected;
assert_eq!(expected.len(), res.len());
for i in 0..res.len() {
let v = res.value(i);
assert_eq!(
v,
expected[i],
"unexpected result when comparing {} at position {} to {} ",
left.value(i),
i,
$right
);
}
}
};
}
test_utf8!(
test_utf8_array_like,
vec!["arrow", "arrow", "arrow", "arrow", "arrow", "arrows", "arrow"],
vec!["arrow", "ar%", "%ro%", "foo", "arr", "arrow_", "arrow_"],
like_utf8,
vec![true, true, true, false, false, true, false]
);
test_utf8_scalar!(
test_utf8_array_like_scalar,
vec!["arrow", "parquet", "datafusion", "flight"],
"%ar%",
like_utf8_scalar,
vec![true, true, false, false]
);
test_utf8_scalar!(
test_utf8_array_like_scalar_start,
vec!["arrow", "parrow", "arrows", "arr"],
"arrow%",
like_utf8_scalar,
vec![true, false, true, false]
);
test_utf8_scalar!(
test_utf8_array_like_scalar_end,
vec!["arrow", "parrow", "arrows", "arr"],
"%arrow",
like_utf8_scalar,
vec![true, true, false, false]
);
test_utf8_scalar!(
test_utf8_array_like_scalar_equals,
vec!["arrow", "parrow", "arrows", "arr"],
"arrow",
like_utf8_scalar,
vec![true, false, false, false]
);
test_utf8_scalar!(
test_utf8_array_like_scalar_one,
vec!["arrow", "arrows", "parrow", "arr"],
"arrow_",
like_utf8_scalar,
vec![false, true, false, false]
);
test_utf8!(
test_utf8_array_nlike,
vec!["arrow", "arrow", "arrow", "arrow", "arrow", "arrows", "arrow"],
vec!["arrow", "ar%", "%ro%", "foo", "arr", "arrow_", "arrow_"],
nlike_utf8,
vec![false, false, false, true, true, false, true]
);
test_utf8_scalar!(
test_utf8_array_nlike_scalar,
vec!["arrow", "parquet", "datafusion", "flight"],
"%ar%",
nlike_utf8_scalar,
vec![false, false, true, true]
);
test_utf8!(
test_utf8_array_eq,
vec!["arrow", "arrow", "arrow", "arrow"],
vec!["arrow", "parquet", "datafusion", "flight"],
eq_utf8,
vec![true, false, false, false]
);
test_utf8_scalar!(
test_utf8_array_eq_scalar,
vec!["arrow", "parquet", "datafusion", "flight"],
"arrow",
eq_utf8_scalar,
vec![true, false, false, false]
);
test_utf8_scalar!(
test_utf8_array_nlike_scalar_start,
vec!["arrow", "parrow", "arrows", "arr"],
"arrow%",
nlike_utf8_scalar,
vec![false, true, false, true]
);
test_utf8_scalar!(
test_utf8_array_nlike_scalar_end,
vec!["arrow", "parrow", "arrows", "arr"],
"%arrow",
nlike_utf8_scalar,
vec![false, false, true, true]
);
test_utf8_scalar!(
test_utf8_array_nlike_scalar_equals,
vec!["arrow", "parrow", "arrows", "arr"],
"arrow",
nlike_utf8_scalar,
vec![false, true, true, true]
);
test_utf8_scalar!(
test_utf8_array_nlike_scalar_one,
vec!["arrow", "arrows", "parrow", "arr"],
"arrow_",
nlike_utf8_scalar,
vec![true, false, true, true]
);
test_utf8!(
test_utf8_array_neq,
vec!["arrow", "arrow", "arrow", "arrow"],
vec!["arrow", "parquet", "datafusion", "flight"],
neq_utf8,
vec![false, true, true, true]
);
test_utf8_scalar!(
test_utf8_array_neq_scalar,
vec!["arrow", "parquet", "datafusion", "flight"],
"arrow",
neq_utf8_scalar,
vec![false, true, true, true]
);
test_utf8!(
test_utf8_array_lt,
vec!["arrow", "datafusion", "flight", "parquet"],
vec!["flight", "flight", "flight", "flight"],
lt_utf8,
vec![true, true, false, false]
);
test_utf8_scalar!(
test_utf8_array_lt_scalar,
vec!["arrow", "datafusion", "flight", "parquet"],
"flight",
lt_utf8_scalar,
vec![true, true, false, false]
);
test_utf8!(
test_utf8_array_lt_eq,
vec!["arrow", "datafusion", "flight", "parquet"],
vec!["flight", "flight", "flight", "flight"],
lt_eq_utf8,
vec![true, true, true, false]
);
test_utf8_scalar!(
test_utf8_array_lt_eq_scalar,
vec!["arrow", "datafusion", "flight", "parquet"],
"flight",
lt_eq_utf8_scalar,
vec![true, true, true, false]
);
test_utf8!(
test_utf8_array_gt,
vec!["arrow", "datafusion", "flight", "parquet"],
vec!["flight", "flight", "flight", "flight"],
gt_utf8,
vec![false, false, false, true]
);
test_utf8_scalar!(
test_utf8_array_gt_scalar,
vec!["arrow", "datafusion", "flight", "parquet"],
"flight",
gt_utf8_scalar,
vec![false, false, false, true]
);
test_utf8!(
test_utf8_array_gt_eq,
vec!["arrow", "datafusion", "flight", "parquet"],
vec!["flight", "flight", "flight", "flight"],
gt_eq_utf8,
vec![false, false, true, true]
);
test_utf8_scalar!(
test_utf8_array_gt_eq_scalar,
vec!["arrow", "datafusion", "flight", "parquet"],
"flight",
gt_eq_utf8_scalar,
vec![false, false, true, true]
);
}
| {
let value_data = Int32Array::from(vec![
Some(0),
Some(1),
Some(2),
Some(3),
Some(4),
Some(5),
Some(6),
None,
Some(7),
])
.data();
let value_offsets = Buffer::from(&[0i64, 3, 6, 6, 9].to_byte_slice());
let list_data_type =
DataType::LargeList(Box::new(Field::new("item", DataType::Int32, true)));
let list_data = ArrayData::builder(list_data_type)
.len(4)
.add_buffer(value_offsets)
.null_count(1)
.add_child_data(value_data)
.null_bit_buffer(Buffer::from([0b00001011]))
.build();
// [[0, 1, 2], [3, 4, 5], null, [6, null, 7]]
let list_array = LargeListArray::from(list_data);
let nulls = Int32Array::from(vec![None, None, None, None]);
let nulls_result = contains(&nulls, &list_array).unwrap();
assert_eq!(
nulls_result
.as_any()
.downcast_ref::<BooleanArray>()
.unwrap(),
&BooleanArray::from(vec![false, false, false, false]),
);
let values = Int32Array::from(vec![Some(0), Some(0), Some(0), Some(0)]);
let values_result = contains(&values, &list_array).unwrap();
assert_eq!(
values_result
.as_any()
.downcast_ref::<BooleanArray>()
.unwrap(),
&BooleanArray::from(vec![true, false, false, false]),
);
} |
test_preproc.py | '''Tests for bdpy.preprocessor'''
from unittest import TestCase, TestLoader, TextTestRunner
import numpy as np
from scipy.signal import detrend
from bdpy import preproc
class TestPreprocessor(TestCase):
'''Tests of 'preprocessor' module'''
@classmethod
def test_average_sample(cls):
'''Test for average_sample'''
x = np.random.rand(10, 100)
group = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2])
exp_output_x = np.vstack((np.average(x[0:5, :], axis=0),
np.average(x[5:10, :], axis=0)))
exp_output_ind = np.array([0, 5])
test_output_x, test_output_ind = preproc.average_sample(x, group,
verbose=True)
np.testing.assert_array_equal(test_output_x, exp_output_x)
np.testing.assert_array_equal(test_output_ind, exp_output_ind)
@classmethod
def test_detrend_sample_default(cls):
'''Test for detrend_sample (default)'''
x = np.random.rand(20, 10)
group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
exp_output = np.vstack((detrend(x[0:10, :], axis=0, type='linear')
+ np.mean(x[0:10, :], axis=0),
detrend(x[10:20, :], axis=0, type='linear')
+ np.mean(x[10:20, :], axis=0)))
test_output = preproc.detrend_sample(x, group, verbose=True)
np.testing.assert_array_equal(test_output, exp_output)
@classmethod
def test_detrend_sample_nokeepmean(cls):
'''Test for detrend_sample (keep_mean=False)'''
x = np.random.rand(20, 10)
group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
exp_output = np.vstack((detrend(x[0:10, :], axis=0, type='linear'),
detrend(x[10:20, :], axis=0, type='linear')))
test_output = preproc.detrend_sample(x, group, keep_mean=False,
verbose=True)
np.testing.assert_array_equal(test_output, exp_output)
@classmethod
def test_normalize_sample(cls):
'''Test for normalize_sample (default)'''
x = np.random.rand(20, 10)
group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
mean_a = np.mean(x[0:10, :], axis=0)
mean_b = np.mean(x[10:20, :], axis=0)
exp_output = np.vstack((100 * (x[0:10, :] - mean_a) / mean_a,
100 * (x[10:20, :] - mean_b) / mean_b))
test_output = preproc.normalize_sample(x, group, verbose=True)
np.testing.assert_array_equal(test_output, exp_output)
@classmethod
def test_shift_sample_singlegroup(cls):
'''Test for shift_sample (single group, shift_size=1)'''
x = np.array([[1, 2, 3],
[11, 12, 13],
[21, 22, 23],
[31, 32, 33],
[41, 42, 43]])
grp = np.array([1, 1, 1, 1, 1])
exp_output_data = np.array([[11, 12, 13],
[21, 22, 23],
[31, 32, 33],
[41, 42, 43]])
exp_output_ind = [0, 1, 2, 3]
# Default shift_size = 1
test_output_data, test_output_ind = preproc.shift_sample(x, grp,
verbose=True)
np.testing.assert_array_equal(test_output_data, exp_output_data)
np.testing.assert_array_equal(test_output_ind, exp_output_ind)
@classmethod
def test_shift_sample_twogroup(cls):
'''Test for shift_sample (two groups, shift_size=1)'''
x = np.array([[1, 2, 3],
[11, 12, 13],
[21, 22, 23],
[31, 32, 33],
[41, 42, 43],
[51, 52, 53]])
grp = np.array([1, 1, 1, 2, 2, 2])
exp_output_data = np.array([[11, 12, 13],
[21, 22, 23],
[41, 42, 43],
[51, 52, 53]])
exp_output_ind = [0, 1, 3, 4]
# Default shift_size=1
test_output_data, test_output_ind = preproc.shift_sample(x, grp,
verbose=True)
np.testing.assert_array_equal(test_output_data, exp_output_data)
np.testing.assert_array_equal(test_output_ind, exp_output_ind)
@classmethod
def test_select_top_default(cls):
'''Test for select_top (default, axis=0)'''
test_data = np.array([[1, 2, 3, 4, 5], | test_value = np.array([15, 3, 6, 20, 0])
test_num = 3
exp_output_data = np.array([[1, 2, 3, 4, 5],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35]])
exp_output_index = np.array([0, 2, 3])
test_output_data, test_output_index = preproc.select_top(test_data,
test_value,
test_num)
np.testing.assert_array_equal(test_output_data, exp_output_data)
np.testing.assert_array_equal(test_output_index, exp_output_index)
@classmethod
def test_select_top_axisone(cls):
'''Test for select_top (axis=1)'''
test_data = np.array([[1, 2, 3, 4, 5],
[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
[41, 42, 43, 44, 45]])
test_value = np.array([15, 3, 6, 20, 0])
test_num = 3
exp_output_data = np.array([[1, 3, 4],
[11, 13, 14],
[21, 23, 24],
[31, 33, 34],
[41, 43, 44]])
exp_output_index = np.array([0, 2, 3])
test_output_data, test_output_index = preproc.select_top(test_data,
test_value,
test_num,
axis=1)
np.testing.assert_array_equal(test_output_data, exp_output_data)
np.testing.assert_array_equal(test_output_index, exp_output_index)
if __name__ == '__main__':
test_suite = TestLoader().loadTestsFromTestCase(TestPreprocessor)
TextTestRunner(verbosity=2).run(test_suite) | [11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
[41, 42, 43, 44, 45]]) |
system_check.py | #!/usr/bin/env python3
import sys
import time
import datetime
import os
import psutil
def main():
| __name__ == '__main__':
sys.exit(main())
| CurrentTime = datetime.datetime.now()
with open(r"/sys/class/thermal/thermal_zone0/temp") as f:
CurrentTemp0 = f.readline()
with open(r"/sys/class/thermal/thermal_zone1/temp") as f:
CurrentTemp1 = f.readline()
freq = []
for i in range(4):
with open(f"/sys/devices/system/cpu/cpu{i}/cpufreq/cpuinfo_cur_freq") as f:
freq.append(f.readline())
with open(r"/sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state") as f:
time_in_state = f.read()
print(f"\n{CurrentTime.strftime('%H:%M:%S')}\t CPU0-1: {float(CurrentTemp0) / 1000} ℃\t\tCPU2-3: {float(CurrentTemp1) / 1000} ℃")
cpu = psutil.cpu_times_percent(percpu=True)
time.sleep(1)
cpu = psutil.cpu_times_percent(percpu=True)
print(f"\nCPU busy (%) (1-4) : {100-cpu[0].idle:.2f} {100-cpu[1].idle:.2f} {100-cpu[2].idle:.2f} {100-cpu[3].idle:.2f}")
print(f"\nCPU freq (kHz) (1-4) : {int(freq[0])/1000} {int(freq[1])/1000} {int(freq[2])/1000} {int(freq[3])/1000}")
print("\nTIME IN STATE\n-------------\nkHz Percent\n-------------")
total = 0
for t in time_in_state.split('\n'):
if t:
freq, per = t.split()
total += int(per)
for t in time_in_state.split('\n'):
if t:
freq, per = t.split()
freq = int(int(freq)/1000)
per = int(int(per) / total * 100)
print(f"{freq} {per}")
print("\nOSP Status")
os.system('ps -T -p `pgrep OSP` -o cpuid,cls,pri,pcpu,lwp,comm')
diskfree = psutil.disk_usage('/').percent
print(f"\nDiskfree: {diskfree}%")
print("\nCharge Log\n----------")
with open(r"/var/log/charge.log") as f:
print(f.read())
if |
lang-item-missing.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that a missing lang item (in this case `sized`) does not cause an ICE,
// see #17392.
// error-pattern: requires `sized` lang_item
#![no_std]
#[start]
fn start(argc: isize, argv: *const *const u8) -> isize | {
0
} |
|
sensor.py | """
Generic GeoRSS events service.
Retrieves current events (typically incidents or alerts) in GeoRSS format, and
shows information on events filtered by distance to the HA instance's location
and grouped by category.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.geo_rss_events/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_UNIT_OF_MEASUREMENT, CONF_NAME,
CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_URL)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['georss_generic_client==0.2']
_LOGGER = logging.getLogger(__name__)
ATTR_CATEGORY = 'category'
ATTR_DISTANCE = 'distance'
ATTR_TITLE = 'title'
CONF_CATEGORIES = 'categories'
DEFAULT_ICON = 'mdi:alert'
DEFAULT_NAME = "Event Service"
DEFAULT_RADIUS_IN_KM = 20.0
DEFAULT_UNIT_OF_MEASUREMENT = 'Events'
DOMAIN = 'geo_rss_events'
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CATEGORIES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_UNIT_OF_MEASUREMENT,
default=DEFAULT_UNIT_OF_MEASUREMENT): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the GeoRSS component."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
url = config.get(CONF_URL)
radius_in_km = config.get(CONF_RADIUS)
name = config.get(CONF_NAME)
categories = config.get(CONF_CATEGORIES)
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
_LOGGER.debug("latitude=%s, longitude=%s, url=%s, radius=%s",
latitude, longitude, url, radius_in_km)
# Create all sensors based on categories.
devices = []
if not categories:
device = GeoRssServiceSensor((latitude, longitude), url,
radius_in_km, None, name,
unit_of_measurement)
devices.append(device)
else:
for category in categories:
device = GeoRssServiceSensor((latitude, longitude), url,
radius_in_km, category, name,
unit_of_measurement)
devices.append(device)
add_entities(devices, True)
class GeoRssServiceSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, coordinates, url, radius, category, service_name,
unit_of_measurement):
"""Initialize the sensor."""
self._category = category
self._service_name = service_name
self._state = None
self._state_attributes = None
self._unit_of_measurement = unit_of_measurement
from georss_client.generic_feed import GenericFeed
self._feed = GenericFeed(coordinates, url, filter_radius=radius,
filter_categories=None if not category
else [category])
@property
def name(self):
|
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the default icon to use in the frontend."""
return DEFAULT_ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attributes
def update(self):
"""Update this sensor from the GeoRSS service."""
import georss_client
status, feed_entries = self._feed.update()
if status == georss_client.UPDATE_OK:
_LOGGER.debug("Adding events to sensor %s: %s", self.entity_id,
feed_entries)
self._state = len(feed_entries)
# And now compute the attributes from the filtered events.
matrix = {}
for entry in feed_entries:
matrix[entry.title] = '{:.0f}km'.format(
entry.distance_to_home)
self._state_attributes = matrix
elif status == georss_client.UPDATE_OK_NO_DATA:
_LOGGER.debug("Update successful, but no data received from %s",
self._feed)
# Don't change the state or state attributes.
else:
_LOGGER.warning("Update not successful, no data received from %s",
self._feed)
# If no events were found due to an error then just set state to
# zero.
self._state = 0
self._state_attributes = {}
| """Return the name of the sensor."""
return '{} {}'.format(self._service_name,
'Any' if self._category is None
else self._category) |
TodoApp.js | import React, { useEffect } from 'react';
import TodoList from './TodoList';
import TodoForm from './TodoForm';
import useTodoState from './Hooks/useTodoState';
import { Typography, Paper, AppBar, Toolbar, Grid } from "@material-ui/core";
function | () {
const initialTodos = JSON.parse(window.localStorage.getItem('todos') || "[]");
const { todos, addTodo, removeTodo, toggleTodo, editTodo } = useTodoState(initialTodos);
useEffect(() => {
window.localStorage.setItem("todos", JSON.stringify(todos));
}, [todos]);
return(
<Paper
style={{
padding: 0,
margin: 0,
height: "100vh",
backgroundColor: "#fafafa"
}}
elevation={0}
>
<AppBar color="primary" position="static" style={{height: '64px'}}>
<Toolbar>
<Typography color="inherit">TODOS WITH HOOKS</Typography>
</Toolbar>
</AppBar>
<Grid container justify="center" style={{marginTop: '1rem'}}>
<Grid item xs={11} md={8} lg={4}>
<TodoForm addTodo={addTodo}/>
<TodoList
todos={todos}
removeTodo={removeTodo}
toggleTodo={toggleTodo}
editTodo={editTodo}
/>
</Grid>
</Grid>
</Paper>
);
}
export default TodoApp;
| TodoApp |
lib.rs | use hdk::prelude::*;
#[hdk_entry(id = "thing")]
struct Thing;
entry_defs![Thing::entry_def()];
#[hdk_extern]
fn set_access(_: ()) -> ExternResult<()> |
#[hdk_extern]
fn zome_info(_: ()) -> ExternResult<ZomeInfo> {
hdk::prelude::zome_info()
}
#[hdk_extern]
fn call_info(_: ()) -> ExternResult<CallInfo> {
// Commit something here so we can show the as_at won't shift in the call
// info returned.
create_entry(Thing)?;
hdk::prelude::call_info()
}
#[hdk_extern]
fn remote_call_info(agent: AgentPubKey) -> ExternResult<CallInfo> {
match call_remote(
agent,
hdk::prelude::zome_info()?.name,
"call_info".to_string().into(),
None,
&(),
)? {
ZomeCallResponse::Ok(extern_io) => Ok(extern_io.decode()?),
not_ok => {
tracing::warn!(?not_ok);
Err(WasmError::Guest(format!("{:?}", not_ok)))
},
}
}
#[hdk_extern]
fn remote_remote_call_info(agent: AgentPubKey) -> ExternResult<CallInfo> {
match call_remote(
agent,
hdk::prelude::zome_info()?.name,
"remote_call_info".to_string().into(),
None,
agent_info()?.agent_initial_pubkey,
)? {
ZomeCallResponse::Ok(extern_io) => Ok(extern_io.decode()?),
not_ok => {
tracing::warn!(?not_ok);
Err(WasmError::Guest(format!("{:?}", not_ok)))
},
}
}
#[hdk_extern]
fn dna_info(_: ()) -> ExternResult<DnaInfo> {
hdk::prelude::dna_info()
}
#[cfg(test)]
pub mod tests {
use hdk::prelude::*;
use ::fixt::prelude::*;
#[test]
fn zome_info_smoke() {
let mut mock_hdk = hdk::prelude::MockHdkT::new();
let output = fixt!(ZomeInfo);
let output_closure = output.clone();
mock_hdk.expect_zome_info()
.with(hdk::prelude::mockall::predicate::eq(()))
.times(1)
.return_once(move |_| Ok(output_closure));
hdk::prelude::set_hdk(mock_hdk);
let result = super::zome_info(());
assert_eq!(
result,
Ok(
output
)
);
}
} | {
let mut functions: GrantedFunctions = BTreeSet::new();
functions.insert((hdk::prelude::zome_info()?.name, "call_info".into()));
functions.insert((hdk::prelude::zome_info()?.name, "remote_call_info".into()));
create_cap_grant(CapGrantEntry {
tag: "".into(),
// empty access converts to unrestricted
access: ().into(),
functions,
})?;
Ok(())
} |
mask3.rs | #[doc = "Reader of register MASK3"]
pub type R = crate::R<u32, super::MASK3>;
#[doc = "Writer for register MASK3"]
pub type W = crate::W<u32, super::MASK3>;
#[doc = "Register MASK3 `reset()`'s with value 0"]
impl crate::ResetValue for super::MASK3 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `RESERVED4`"]
pub type RESERVED4_R = crate::R<u32, u32>;
#[doc = "Write proxy for field `RESERVED4`"]
pub struct RESERVED4_W<'a> {
w: &'a mut W,
}
impl<'a> RESERVED4_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0fff_ffff << 4)) | (((value as u32) & 0x0fff_ffff) << 4);
self.w
}
}
#[doc = "Reader of field `MASK`"]
pub type MASK_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `MASK`"]
pub struct MASK_W<'a> {
w: &'a mut W,
}
impl<'a> MASK_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn | (self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f);
self.w
}
}
impl R {
#[doc = "Bits 4:31 - 31:4\\]
Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."]
#[inline(always)]
pub fn reserved4(&self) -> RESERVED4_R {
RESERVED4_R::new(((self.bits >> 4) & 0x0fff_ffff) as u32)
}
#[doc = "Bits 0:3 - 3:0\\]
Mask on data address when matching against COMP3. This is the size of the ignore mask. That is, DWT matching is performed as:(ADDR ANDed with (0xFFFF left bit-shifted by MASK)) == COMP3. However, the actual comparison is slightly more complex to enable matching an address wherever it appears on a bus. So, if COMP3 is 3, this matches a word access of 0, because 3 would be within the word."]
#[inline(always)]
pub fn mask(&self) -> MASK_R {
MASK_R::new((self.bits & 0x0f) as u8)
}
}
impl W {
#[doc = "Bits 4:31 - 31:4\\]
Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."]
#[inline(always)]
pub fn reserved4(&mut self) -> RESERVED4_W {
RESERVED4_W { w: self }
}
#[doc = "Bits 0:3 - 3:0\\]
Mask on data address when matching against COMP3. This is the size of the ignore mask. That is, DWT matching is performed as:(ADDR ANDed with (0xFFFF left bit-shifted by MASK)) == COMP3. However, the actual comparison is slightly more complex to enable matching an address wherever it appears on a bus. So, if COMP3 is 3, this matches a word access of 0, because 3 would be within the word."]
#[inline(always)]
pub fn mask(&mut self) -> MASK_W {
MASK_W { w: self }
}
}
| bits |
_tickfont.py | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Tickfont(BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
*Arial*, *Balto*, *Courier New*, *Droid Sans*,, *Droid Serif*,
*Droid Sans Mono*, *Gravitas One*, *Old Standard TT*, *Open
Sans*, *Overpass*, *PT Sans Narrow*, *Raleway*, *Times New
Roman*.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'heatmapgl.colorbar'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include *Arial*, *Balto*, *Courier New*, *Droid Sans*,,
*Droid Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.heatmapgl.colorbar.Tickfont
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include *Arial*, *Balto*, *Courier New*, *Droid Sans*,,
*Droid Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__('tickfont')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
|
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmapgl.colorbar.Tickfont
constructor must be a dict or
an instance of plotly.graph_objs.heatmapgl.colorbar.Tickfont"""
)
# Import validators
# -----------------
from plotly.validators.heatmapgl.colorbar import (
tickfont as v_tickfont
)
# Initialize validators
# ---------------------
self._validators['color'] = v_tickfont.ColorValidator()
self._validators['family'] = v_tickfont.FamilyValidator()
self._validators['size'] = v_tickfont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
v = arg.pop('color', None)
self.color = color if color is not None else v
v = arg.pop('family', None)
self.family = family if family is not None else v
v = arg.pop('size', None)
self.size = size if size is not None else v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
| arg = arg.to_plotly_json() |
losses.py | import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from sklearn.utils import class_weight
from utils.lovasz_losses import lovasz_softmax
import pdb
def make_one_hot(labels, classes):
one_hot = torch.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_().to(labels.device)
target = one_hot.scatter_(1, labels.data, 1)
return target
def get_weights(target):
t_np = target.view(-1).data.cpu().numpy()
classes, counts = np.unique(t_np, return_counts=True)
cls_w = np.median(counts) / counts
#cls_w = class_weight.compute_class_weight('balanced', classes, t_np)
weights = np.ones(7)
weights[classes] = cls_w
return torch.from_numpy(weights).float().cuda()
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, ignore_index=255, reduction='mean'):
super(CrossEntropyLoss2d, self).__init__()
self.CE = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)
def forward(self, output, target):
loss = self.CE(output, target)
return loss
class DiceLoss(nn.Module):
def __init__(self, smooth=1., ignore_index=255):
super(DiceLoss, self).__init__()
self.ignore_index = ignore_index
self.smooth = smooth
def forward(self, output, target):
if self.ignore_index not in range(target.min(), target.max()):
if (target == self.ignore_index).sum() > 0:
target[target == self.ignore_index] = target.min()
target = make_one_hot(target.unsqueeze(dim=1), classes=output.size()[1])
output = F.softmax(output, dim=1)
output_flat = output.contiguous().view(-1)
target_flat = target.contiguous().view(-1)
intersection = (output_flat * target_flat).sum()
loss = 1 - ((2. * intersection + self.smooth) /
(output_flat.sum() + target_flat.sum() + self.smooth))
return loss
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=None, ignore_index=255, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
self.CE_loss = nn.CrossEntropyLoss(reduce=False, ignore_index=ignore_index, weight=alpha)
def forward(self, output, target):
logpt = self.CE_loss(output, target)
pt = torch.exp(-logpt)
loss = ((1-pt)**self.gamma) * logpt
if self.size_average:
return loss.mean()
return loss.sum()
class CE_DiceLoss(nn.Module):
def __init__(self, smooth=1, reduction='mean', ignore_index=255, weight=None):
super(CE_DiceLoss, self).__init__()
self.smooth = smooth
self.dice = DiceLoss()
self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction, ignore_index=ignore_index)
def forward(self, output, target):
CE_loss = self.cross_entropy(output, target)
dice_loss = self.dice(output, target)
return CE_loss + dice_loss
class LovaszSoftmax(nn.Module):
| def __init__(self, classes='present', per_image=False, ignore_index=255):
super(LovaszSoftmax, self).__init__()
self.smooth = classes
self.per_image = per_image
self.ignore_index = ignore_index
def forward(self, output, target):
logits = F.softmax(output, dim=1)
loss = lovasz_softmax(logits, target, ignore=self.ignore_index)
return loss |
|
gpio6_ctrl.rs | #[doc = "Reader of register GPIO6_CTRL"]
pub type R = crate::R<u32, super::GPIO6_CTRL>;
#[doc = "Writer for register GPIO6_CTRL"]
pub type W = crate::W<u32, super::GPIO6_CTRL>;
#[doc = "Register GPIO6_CTRL `reset()`'s with value 0x1f"]
impl crate::ResetValue for super::GPIO6_CTRL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x1f
}
}
#[doc = "\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum IRQOVER_A {
#[doc = "0: don't invert the interrupt"]
NORMAL = 0,
#[doc = "1: invert the interrupt"]
INVERT = 1,
#[doc = "2: drive interrupt low"]
LOW = 2,
#[doc = "3: drive interrupt high"]
HIGH = 3,
}
impl From<IRQOVER_A> for u8 {
#[inline(always)]
fn from(variant: IRQOVER_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `IRQOVER`"]
pub type IRQOVER_R = crate::R<u8, IRQOVER_A>;
impl IRQOVER_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> IRQOVER_A {
match self.bits {
0 => IRQOVER_A::NORMAL,
1 => IRQOVER_A::INVERT,
2 => IRQOVER_A::LOW,
3 => IRQOVER_A::HIGH,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `NORMAL`"]
#[inline(always)]
pub fn is_normal(&self) -> bool {
*self == IRQOVER_A::NORMAL
}
#[doc = "Checks if the value of the field is `INVERT`"]
#[inline(always)]
pub fn is_invert(&self) -> bool {
*self == IRQOVER_A::INVERT
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == IRQOVER_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == IRQOVER_A::HIGH
}
}
#[doc = "Write proxy for field `IRQOVER`"]
pub struct IRQOVER_W<'a> {
w: &'a mut W,
}
impl<'a> IRQOVER_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: IRQOVER_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "don't invert the interrupt"]
#[inline(always)]
pub fn normal(self) -> &'a mut W {
self.variant(IRQOVER_A::NORMAL)
}
#[doc = "invert the interrupt"]
#[inline(always)]
pub fn invert(self) -> &'a mut W {
self.variant(IRQOVER_A::INVERT)
}
#[doc = "drive interrupt low"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(IRQOVER_A::LOW)
}
#[doc = "drive interrupt high"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(IRQOVER_A::HIGH)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 28)) | (((value as u32) & 0x03) << 28);
self.w
}
}
#[doc = "\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum | {
#[doc = "0: don't invert the peri input"]
NORMAL = 0,
#[doc = "1: invert the peri input"]
INVERT = 1,
#[doc = "2: drive peri input low"]
LOW = 2,
#[doc = "3: drive peri input high"]
HIGH = 3,
}
impl From<INOVER_A> for u8 {
#[inline(always)]
fn from(variant: INOVER_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `INOVER`"]
pub type INOVER_R = crate::R<u8, INOVER_A>;
impl INOVER_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> INOVER_A {
match self.bits {
0 => INOVER_A::NORMAL,
1 => INOVER_A::INVERT,
2 => INOVER_A::LOW,
3 => INOVER_A::HIGH,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `NORMAL`"]
#[inline(always)]
pub fn is_normal(&self) -> bool {
*self == INOVER_A::NORMAL
}
#[doc = "Checks if the value of the field is `INVERT`"]
#[inline(always)]
pub fn is_invert(&self) -> bool {
*self == INOVER_A::INVERT
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == INOVER_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == INOVER_A::HIGH
}
}
#[doc = "Write proxy for field `INOVER`"]
pub struct INOVER_W<'a> {
w: &'a mut W,
}
impl<'a> INOVER_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: INOVER_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "don't invert the peri input"]
#[inline(always)]
pub fn normal(self) -> &'a mut W {
self.variant(INOVER_A::NORMAL)
}
#[doc = "invert the peri input"]
#[inline(always)]
pub fn invert(self) -> &'a mut W {
self.variant(INOVER_A::INVERT)
}
#[doc = "drive peri input low"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(INOVER_A::LOW)
}
#[doc = "drive peri input high"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(INOVER_A::HIGH)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 16)) | (((value as u32) & 0x03) << 16);
self.w
}
}
#[doc = "\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum OEOVER_A {
#[doc = "0: drive output enable from peripheral signal selected by funcsel"]
NORMAL = 0,
#[doc = "1: drive output enable from inverse of peripheral signal selected by funcsel"]
INVERT = 1,
#[doc = "2: disable output"]
DISABLE = 2,
#[doc = "3: enable output"]
ENABLE = 3,
}
impl From<OEOVER_A> for u8 {
#[inline(always)]
fn from(variant: OEOVER_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `OEOVER`"]
pub type OEOVER_R = crate::R<u8, OEOVER_A>;
impl OEOVER_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> OEOVER_A {
match self.bits {
0 => OEOVER_A::NORMAL,
1 => OEOVER_A::INVERT,
2 => OEOVER_A::DISABLE,
3 => OEOVER_A::ENABLE,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `NORMAL`"]
#[inline(always)]
pub fn is_normal(&self) -> bool {
*self == OEOVER_A::NORMAL
}
#[doc = "Checks if the value of the field is `INVERT`"]
#[inline(always)]
pub fn is_invert(&self) -> bool {
*self == OEOVER_A::INVERT
}
#[doc = "Checks if the value of the field is `DISABLE`"]
#[inline(always)]
pub fn is_disable(&self) -> bool {
*self == OEOVER_A::DISABLE
}
#[doc = "Checks if the value of the field is `ENABLE`"]
#[inline(always)]
pub fn is_enable(&self) -> bool {
*self == OEOVER_A::ENABLE
}
}
#[doc = "Write proxy for field `OEOVER`"]
pub struct OEOVER_W<'a> {
w: &'a mut W,
}
impl<'a> OEOVER_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: OEOVER_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "drive output enable from peripheral signal selected by funcsel"]
#[inline(always)]
pub fn normal(self) -> &'a mut W {
self.variant(OEOVER_A::NORMAL)
}
#[doc = "drive output enable from inverse of peripheral signal selected by funcsel"]
#[inline(always)]
pub fn invert(self) -> &'a mut W {
self.variant(OEOVER_A::INVERT)
}
#[doc = "disable output"]
#[inline(always)]
pub fn disable(self) -> &'a mut W {
self.variant(OEOVER_A::DISABLE)
}
#[doc = "enable output"]
#[inline(always)]
pub fn enable(self) -> &'a mut W {
self.variant(OEOVER_A::ENABLE)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 12)) | (((value as u32) & 0x03) << 12);
self.w
}
}
#[doc = "\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum OUTOVER_A {
#[doc = "0: drive output from peripheral signal selected by funcsel"]
NORMAL = 0,
#[doc = "1: drive output from inverse of peripheral signal selected by funcsel"]
INVERT = 1,
#[doc = "2: drive output low"]
LOW = 2,
#[doc = "3: drive output high"]
HIGH = 3,
}
impl From<OUTOVER_A> for u8 {
#[inline(always)]
fn from(variant: OUTOVER_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `OUTOVER`"]
pub type OUTOVER_R = crate::R<u8, OUTOVER_A>;
impl OUTOVER_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> OUTOVER_A {
match self.bits {
0 => OUTOVER_A::NORMAL,
1 => OUTOVER_A::INVERT,
2 => OUTOVER_A::LOW,
3 => OUTOVER_A::HIGH,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `NORMAL`"]
#[inline(always)]
pub fn is_normal(&self) -> bool {
*self == OUTOVER_A::NORMAL
}
#[doc = "Checks if the value of the field is `INVERT`"]
#[inline(always)]
pub fn is_invert(&self) -> bool {
*self == OUTOVER_A::INVERT
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == OUTOVER_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == OUTOVER_A::HIGH
}
}
#[doc = "Write proxy for field `OUTOVER`"]
pub struct OUTOVER_W<'a> {
w: &'a mut W,
}
impl<'a> OUTOVER_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: OUTOVER_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "drive output from peripheral signal selected by funcsel"]
#[inline(always)]
pub fn normal(self) -> &'a mut W {
self.variant(OUTOVER_A::NORMAL)
}
#[doc = "drive output from inverse of peripheral signal selected by funcsel"]
#[inline(always)]
pub fn invert(self) -> &'a mut W {
self.variant(OUTOVER_A::INVERT)
}
#[doc = "drive output low"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(OUTOVER_A::LOW)
}
#[doc = "drive output high"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(OUTOVER_A::HIGH)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 8)) | (((value as u32) & 0x03) << 8);
self.w
}
}
#[doc = "0-31 -> selects pin function according to the gpio table\\n 31 == NULL\n\nValue on reset: 31"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum FUNCSEL_A {
#[doc = "1: `1`"]
SPI0_SCLK = 1,
#[doc = "2: `10`"]
UART1_CTS = 2,
#[doc = "3: `11`"]
I2C1_SDA = 3,
#[doc = "4: `100`"]
PWM_A_3 = 4,
#[doc = "5: `101`"]
SIO_6 = 5,
#[doc = "6: `110`"]
PIO0_6 = 6,
#[doc = "7: `111`"]
PIO1_6 = 7,
#[doc = "8: `1000`"]
USB_MUXING_EXTPHY_SOFTCON = 8,
#[doc = "9: `1001`"]
USB_MUXING_OVERCURR_DETECT = 9,
#[doc = "31: `11111`"]
NULL = 31,
}
impl From<FUNCSEL_A> for u8 {
#[inline(always)]
fn from(variant: FUNCSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `FUNCSEL`"]
pub type FUNCSEL_R = crate::R<u8, FUNCSEL_A>;
impl FUNCSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, FUNCSEL_A> {
use crate::Variant::*;
match self.bits {
1 => Val(FUNCSEL_A::SPI0_SCLK),
2 => Val(FUNCSEL_A::UART1_CTS),
3 => Val(FUNCSEL_A::I2C1_SDA),
4 => Val(FUNCSEL_A::PWM_A_3),
5 => Val(FUNCSEL_A::SIO_6),
6 => Val(FUNCSEL_A::PIO0_6),
7 => Val(FUNCSEL_A::PIO1_6),
8 => Val(FUNCSEL_A::USB_MUXING_EXTPHY_SOFTCON),
9 => Val(FUNCSEL_A::USB_MUXING_OVERCURR_DETECT),
31 => Val(FUNCSEL_A::NULL),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `SPI0_SCLK`"]
#[inline(always)]
pub fn is_spi0_sclk(&self) -> bool {
*self == FUNCSEL_A::SPI0_SCLK
}
#[doc = "Checks if the value of the field is `UART1_CTS`"]
#[inline(always)]
pub fn is_uart1_cts(&self) -> bool {
*self == FUNCSEL_A::UART1_CTS
}
#[doc = "Checks if the value of the field is `I2C1_SDA`"]
#[inline(always)]
pub fn is_i2c1_sda(&self) -> bool {
*self == FUNCSEL_A::I2C1_SDA
}
#[doc = "Checks if the value of the field is `PWM_A_3`"]
#[inline(always)]
pub fn is_pwm_a_3(&self) -> bool {
*self == FUNCSEL_A::PWM_A_3
}
#[doc = "Checks if the value of the field is `SIO_6`"]
#[inline(always)]
pub fn is_sio_6(&self) -> bool {
*self == FUNCSEL_A::SIO_6
}
#[doc = "Checks if the value of the field is `PIO0_6`"]
#[inline(always)]
pub fn is_pio0_6(&self) -> bool {
*self == FUNCSEL_A::PIO0_6
}
#[doc = "Checks if the value of the field is `PIO1_6`"]
#[inline(always)]
pub fn is_pio1_6(&self) -> bool {
*self == FUNCSEL_A::PIO1_6
}
#[doc = "Checks if the value of the field is `USB_MUXING_EXTPHY_SOFTCON`"]
#[inline(always)]
pub fn is_usb_muxing_extphy_softcon(&self) -> bool {
*self == FUNCSEL_A::USB_MUXING_EXTPHY_SOFTCON
}
#[doc = "Checks if the value of the field is `USB_MUXING_OVERCURR_DETECT`"]
#[inline(always)]
pub fn is_usb_muxing_overcurr_detect(&self) -> bool {
*self == FUNCSEL_A::USB_MUXING_OVERCURR_DETECT
}
#[doc = "Checks if the value of the field is `NULL`"]
#[inline(always)]
pub fn is_null(&self) -> bool {
*self == FUNCSEL_A::NULL
}
}
#[doc = "Write proxy for field `FUNCSEL`"]
pub struct FUNCSEL_W<'a> {
w: &'a mut W,
}
impl<'a> FUNCSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: FUNCSEL_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "`1`"]
#[inline(always)]
pub fn spi0_sclk(self) -> &'a mut W {
self.variant(FUNCSEL_A::SPI0_SCLK)
}
#[doc = "`10`"]
#[inline(always)]
pub fn uart1_cts(self) -> &'a mut W {
self.variant(FUNCSEL_A::UART1_CTS)
}
#[doc = "`11`"]
#[inline(always)]
pub fn i2c1_sda(self) -> &'a mut W {
self.variant(FUNCSEL_A::I2C1_SDA)
}
#[doc = "`100`"]
#[inline(always)]
pub fn pwm_a_3(self) -> &'a mut W {
self.variant(FUNCSEL_A::PWM_A_3)
}
#[doc = "`101`"]
#[inline(always)]
pub fn sio_6(self) -> &'a mut W {
self.variant(FUNCSEL_A::SIO_6)
}
#[doc = "`110`"]
#[inline(always)]
pub fn pio0_6(self) -> &'a mut W {
self.variant(FUNCSEL_A::PIO0_6)
}
#[doc = "`111`"]
#[inline(always)]
pub fn pio1_6(self) -> &'a mut W {
self.variant(FUNCSEL_A::PIO1_6)
}
#[doc = "`1000`"]
#[inline(always)]
pub fn usb_muxing_extphy_softcon(self) -> &'a mut W {
self.variant(FUNCSEL_A::USB_MUXING_EXTPHY_SOFTCON)
}
#[doc = "`1001`"]
#[inline(always)]
pub fn usb_muxing_overcurr_detect(self) -> &'a mut W {
self.variant(FUNCSEL_A::USB_MUXING_OVERCURR_DETECT)
}
#[doc = "`11111`"]
#[inline(always)]
pub fn null(self) -> &'a mut W {
self.variant(FUNCSEL_A::NULL)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x1f) | ((value as u32) & 0x1f);
self.w
}
}
impl R {
#[doc = "Bits 28:29"]
#[inline(always)]
pub fn irqover(&self) -> IRQOVER_R {
IRQOVER_R::new(((self.bits >> 28) & 0x03) as u8)
}
#[doc = "Bits 16:17"]
#[inline(always)]
pub fn inover(&self) -> INOVER_R {
INOVER_R::new(((self.bits >> 16) & 0x03) as u8)
}
#[doc = "Bits 12:13"]
#[inline(always)]
pub fn oeover(&self) -> OEOVER_R {
OEOVER_R::new(((self.bits >> 12) & 0x03) as u8)
}
#[doc = "Bits 8:9"]
#[inline(always)]
pub fn outover(&self) -> OUTOVER_R {
OUTOVER_R::new(((self.bits >> 8) & 0x03) as u8)
}
#[doc = "Bits 0:4 - 0-31 -> selects pin function according to the gpio table\\n 31 == NULL"]
#[inline(always)]
pub fn funcsel(&self) -> FUNCSEL_R {
FUNCSEL_R::new((self.bits & 0x1f) as u8)
}
}
impl W {
#[doc = "Bits 28:29"]
#[inline(always)]
pub fn irqover(&mut self) -> IRQOVER_W {
IRQOVER_W { w: self }
}
#[doc = "Bits 16:17"]
#[inline(always)]
pub fn inover(&mut self) -> INOVER_W {
INOVER_W { w: self }
}
#[doc = "Bits 12:13"]
#[inline(always)]
pub fn oeover(&mut self) -> OEOVER_W {
OEOVER_W { w: self }
}
#[doc = "Bits 8:9"]
#[inline(always)]
pub fn outover(&mut self) -> OUTOVER_W {
OUTOVER_W { w: self }
}
#[doc = "Bits 0:4 - 0-31 -> selects pin function according to the gpio table\\n 31 == NULL"]
#[inline(always)]
pub fn funcsel(&mut self) -> FUNCSEL_W {
FUNCSEL_W { w: self }
}
}
| INOVER_A |
base.go | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package avaabi
import (
"context"
"errors"
"fmt"
"math/big"
"github.com/ava-labs/coreth"
"github.com/ava-labs/coreth/core/types"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
)
// SignerFn is a signer function callback when a contract requires a method to
// sign the transaction before submission.
type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Transaction, error)
// CallOpts is the collection of options to fine tune a contract call request.
type CallOpts struct {
Pending bool // Whether to operate on the pending state or the last known one
From common.Address // Optional the sender address, otherwise the first account is used
BlockNumber *big.Int // Optional the block number on which the call should be performed
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// TransactOpts is the collection of authorization data required to create a
// valid Ethereum transaction.
type TransactOpts struct {
From common.Address // Ethereum account to send the transaction from
Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state)
Signer SignerFn // Method to use for signing the transaction (mandatory)
Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds)
GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle)
GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate)
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// FilterOpts is the collection of options to fine tune filtering for events
// within a bound contract.
type FilterOpts struct {
Start uint64 // Start of the queried range
End *uint64 // End of the range (nil = latest)
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// WatchOpts is the collection of options to fine tune subscribing for events
// within a bound contract.
type WatchOpts struct {
Start *uint64 // Start of the queried range (nil = latest)
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// BoundContract is the base wrapper object that reflects a contract on the
// Ethereum network. It contains a collection of methods that are used by the
// higher level contract bindings to operate.
type BoundContract struct {
address common.Address // Deployment address of the contract on the Ethereum blockchain
abi abi.ABI // Reflect based ABI to access the correct Ethereum methods
caller ContractCaller // Read interface to interact with the blockchain
transactor ContractTransactor // Write interface to interact with the blockchain
filterer ContractFilterer // Event filtering to interact with the blockchain
}
// NewBoundContract creates a low level contract interface through which calls
// and transactions may be made through.
func NewBoundContract(address common.Address, abi abi.ABI, caller ContractCaller, transactor ContractTransactor, filterer ContractFilterer) *BoundContract {
return &BoundContract{
address: address,
abi: abi,
caller: caller,
transactor: transactor,
filterer: filterer,
}
}
// DeployContract deploys a contract onto the Ethereum blockchain and binds the
// deployment address with a Go wrapper.
func DeployContract(opts *TransactOpts, abi abi.ABI, bytecode []byte, backend ContractBackend, params ...interface{}) (common.Address, *types.Transaction, *BoundContract, error) {
// Otherwise try to deploy the contract
c := NewBoundContract(common.Address{}, abi, backend, backend, backend)
input, err := c.abi.Pack("", params...)
if err != nil {
return common.Address{}, nil, nil, err
}
tx, err := c.transact(opts, nil, append(bytecode, input...))
if err != nil {
return common.Address{}, nil, nil, err
}
c.address = crypto.CreateAddress(opts.From, tx.Nonce())
return c.address, tx, c, nil
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string, params ...interface{}) error {
// Don't crash on a lazy user
if opts == nil {
opts = new(CallOpts)
}
// Pack the input, call and unpack the results
input, err := c.abi.Pack(method, params...)
if err != nil {
return err
}
var (
msg = coreth.CallMsg{From: opts.From, To: &c.address, Data: input}
ctx = ensureContext(opts.Context)
code []byte
output []byte
)
if opts.Pending {
pb, ok := c.caller.(PendingContractCaller)
if !ok {
return ErrNoPendingState
}
output, err = pb.PendingCallContract(ctx, msg)
if err == nil && len(output) == 0 {
// Make sure we have a contract to operate on, and bail out otherwise.
if code, err = pb.PendingCodeAt(ctx, c.address); err != nil {
return err
} else if len(code) == 0 {
return ErrNoCode
}
}
} else {
output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber)
if err == nil && len(output) == 0 {
// Make sure we have a contract to operate on, and bail out otherwise.
if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil {
return err
} else if len(code) == 0 {
return ErrNoCode
}
}
}
if err != nil {
return err
}
return c.abi.Unpack(result, method, output)
}
// Transact invokes the (paid) contract method with params as input values.
func (c *BoundContract) Transact(opts *TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
// Otherwise pack up the parameters and invoke the contract
input, err := c.abi.Pack(method, params...)
if err != nil {
return nil, err
}
// todo(rjl493456442) check the method is payable or not,
// reject invalid transaction at the first place
return c.transact(opts, &c.address, input)
}
// RawTransact initiates a transaction with the given raw calldata as the input.
// It's usually used to initiates transaction for invoking **Fallback** function.
func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (*types.Transaction, error) {
// todo(rjl493456442) check the method is payable or not,
// reject invalid transaction at the first place
return c.transact(opts, &c.address, calldata)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error) {
// todo(rjl493456442) check the payable fallback or receive is defined
// or not, reject invalid transaction at the first place
return c.transact(opts, &c.address, nil)
}
// transact executes an actual transaction invocation, first deriving any missing
// authorization fields, and then scheduling the transaction for execution.
func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
var err error
// Ensure a valid value field and resolve the account nonce
value := opts.Value | if value == nil {
value = new(big.Int)
}
var nonce uint64
if opts.Nonce == nil {
nonce, err = c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From)
if err != nil {
return nil, fmt.Errorf("failed to retrieve account nonce: %v", err)
}
} else {
nonce = opts.Nonce.Uint64()
}
// Figure out the gas allowance and gas price values
gasPrice := opts.GasPrice
if gasPrice == nil {
gasPrice, err = c.transactor.SuggestGasPrice(ensureContext(opts.Context))
if err != nil {
return nil, fmt.Errorf("failed to suggest gas price: %v", err)
}
}
gasLimit := opts.GasLimit
if gasLimit == 0 {
// Gas estimation cannot succeed without code for method invocations
if contract != nil {
if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil {
return nil, err
} else if len(code) == 0 {
return nil, ErrNoCode
}
}
// If the contract surely has code (or code is not needed), estimate the transaction
msg := coreth.CallMsg{From: opts.From, To: contract, GasPrice: gasPrice, Value: value, Data: input}
gasLimit, err = c.transactor.EstimateGas(ensureContext(opts.Context), msg)
if err != nil {
return nil, fmt.Errorf("failed to estimate gas needed: %v", err)
}
}
// Create the transaction, sign it and schedule it for execution
var rawTx *types.Transaction
if contract == nil {
rawTx = types.NewContractCreation(nonce, value, gasLimit, gasPrice, input)
} else {
rawTx = types.NewTransaction(nonce, c.address, value, gasLimit, gasPrice, input)
}
if opts.Signer == nil {
return nil, errors.New("no signer to authorize the transaction with")
}
signedTx, err := opts.Signer(types.HomesteadSigner{}, opts.From, rawTx)
if err != nil {
return nil, err
}
if err := c.transactor.SendTransaction(ensureContext(opts.Context), signedTx); err != nil {
return nil, err
}
return signedTx, nil
}
// FilterLogs filters contract logs for past blocks, returning the necessary
// channels to construct a strongly typed bound iterator on top of them.
func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]interface{}) (chan types.Log, event.Subscription, error) {
// Don't crash on a lazy user
if opts == nil {
opts = new(FilterOpts)
}
// Append the event selector to the query parameters and construct the topic set
query = append([][]interface{}{{c.abi.Events[name].ID}}, query...)
topics, err := abi.MakeTopics(query...)
if err != nil {
return nil, nil, err
}
// Start the background filtering
logs := make(chan types.Log, 128)
config := coreth.FilterQuery{
Addresses: []common.Address{c.address},
Topics: topics,
FromBlock: new(big.Int).SetUint64(opts.Start),
}
if opts.End != nil {
config.ToBlock = new(big.Int).SetUint64(*opts.End)
}
/* TODO(karalabe): Replace the rest of the method below with this when supported
sub, err := c.filterer.SubscribeFilterLogs(ensureContext(opts.Context), config, logs)
*/
buff, err := c.filterer.FilterLogs(ensureContext(opts.Context), config)
if err != nil {
return nil, nil, err
}
sub, err := event.NewSubscription(func(quit <-chan struct{}) error {
for _, log := range buff {
select {
case logs <- log:
case <-quit:
return nil
}
}
return nil
}), nil
if err != nil {
return nil, nil, err
}
return logs, sub, nil
}
// WatchLogs filters subscribes to contract logs for future blocks, returning a
// subscription object that can be used to tear down the watcher.
func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]interface{}) (chan types.Log, event.Subscription, error) {
// Don't crash on a lazy user
if opts == nil {
opts = new(WatchOpts)
}
// Append the event selector to the query parameters and construct the topic set
query = append([][]interface{}{{c.abi.Events[name].ID}}, query...)
topics, err := abi.MakeTopics(query...)
if err != nil {
return nil, nil, err
}
// Start the background filtering
logs := make(chan types.Log, 128)
config := coreth.FilterQuery{
Addresses: []common.Address{c.address},
Topics: topics,
}
if opts.Start != nil {
config.FromBlock = new(big.Int).SetUint64(*opts.Start)
}
sub, err := c.filterer.SubscribeFilterLogs(ensureContext(opts.Context), config, logs)
if err != nil {
return nil, nil, err
}
return logs, sub, nil
}
// UnpackLog unpacks a retrieved log into the provided output structure.
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
if len(log.Data) > 0 {
if err := c.abi.Unpack(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.abi.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error {
if len(log.Data) > 0 {
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.abi.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:])
}
// ensureContext is a helper method to ensure a context is not nil, even if the
// user specified it as such.
func ensureContext(ctx context.Context) context.Context {
if ctx == nil {
return context.TODO()
}
return ctx
} | |
conn.go | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// The MIT License (MIT)
//
// Copyright (c) 2014 wandoulabs
// Copyright (c) 2014 siddontang
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
package server
import (
"bytes"
"context"
"crypto/tls"
"encoding/binary"
goerr "errors"
"fmt"
"io"
"net"
"os/user"
"runtime"
"runtime/pprof"
"runtime/trace"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/plugin"
"github.com/pingcap/tidb/privilege"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
storeerr "github.com/pingcap/tidb/store/driver/error"
"github.com/pingcap/tidb/tablecodec"
tidbutil "github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/arena"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/prometheus/client_golang/prometheus"
"github.com/tikv/client-go/v2/util"
"go.uber.org/zap"
)
const (
connStatusDispatching int32 = iota
connStatusReading
connStatusShutdown // Closed by server.
connStatusWaitShutdown // Notified by server to close.
)
var (
queryTotalCountOk = [...]prometheus.Counter{
mysql.ComSleep: metrics.QueryTotalCounter.WithLabelValues("Sleep", "OK"),
mysql.ComQuit: metrics.QueryTotalCounter.WithLabelValues("Quit", "OK"),
mysql.ComInitDB: metrics.QueryTotalCounter.WithLabelValues("InitDB", "OK"),
mysql.ComQuery: metrics.QueryTotalCounter.WithLabelValues("Query", "OK"),
mysql.ComPing: metrics.QueryTotalCounter.WithLabelValues("Ping", "OK"),
mysql.ComFieldList: metrics.QueryTotalCounter.WithLabelValues("FieldList", "OK"),
mysql.ComStmtPrepare: metrics.QueryTotalCounter.WithLabelValues("StmtPrepare", "OK"),
mysql.ComStmtExecute: metrics.QueryTotalCounter.WithLabelValues("StmtExecute", "OK"),
mysql.ComStmtFetch: metrics.QueryTotalCounter.WithLabelValues("StmtFetch", "OK"),
mysql.ComStmtClose: metrics.QueryTotalCounter.WithLabelValues("StmtClose", "OK"),
mysql.ComStmtSendLongData: metrics.QueryTotalCounter.WithLabelValues("StmtSendLongData", "OK"),
mysql.ComStmtReset: metrics.QueryTotalCounter.WithLabelValues("StmtReset", "OK"),
mysql.ComSetOption: metrics.QueryTotalCounter.WithLabelValues("SetOption", "OK"),
}
queryTotalCountErr = [...]prometheus.Counter{
mysql.ComSleep: metrics.QueryTotalCounter.WithLabelValues("Sleep", "Error"),
mysql.ComQuit: metrics.QueryTotalCounter.WithLabelValues("Quit", "Error"),
mysql.ComInitDB: metrics.QueryTotalCounter.WithLabelValues("InitDB", "Error"),
mysql.ComQuery: metrics.QueryTotalCounter.WithLabelValues("Query", "Error"),
mysql.ComPing: metrics.QueryTotalCounter.WithLabelValues("Ping", "Error"),
mysql.ComFieldList: metrics.QueryTotalCounter.WithLabelValues("FieldList", "Error"),
mysql.ComStmtPrepare: metrics.QueryTotalCounter.WithLabelValues("StmtPrepare", "Error"),
mysql.ComStmtExecute: metrics.QueryTotalCounter.WithLabelValues("StmtExecute", "Error"),
mysql.ComStmtFetch: metrics.QueryTotalCounter.WithLabelValues("StmtFetch", "Error"),
mysql.ComStmtClose: metrics.QueryTotalCounter.WithLabelValues("StmtClose", "Error"),
mysql.ComStmtSendLongData: metrics.QueryTotalCounter.WithLabelValues("StmtSendLongData", "Error"),
mysql.ComStmtReset: metrics.QueryTotalCounter.WithLabelValues("StmtReset", "Error"),
mysql.ComSetOption: metrics.QueryTotalCounter.WithLabelValues("SetOption", "Error"),
}
queryDurationHistogramUse = metrics.QueryDurationHistogram.WithLabelValues("Use")
queryDurationHistogramShow = metrics.QueryDurationHistogram.WithLabelValues("Show")
queryDurationHistogramBegin = metrics.QueryDurationHistogram.WithLabelValues("Begin")
queryDurationHistogramCommit = metrics.QueryDurationHistogram.WithLabelValues("Commit")
queryDurationHistogramRollback = metrics.QueryDurationHistogram.WithLabelValues("Rollback")
queryDurationHistogramInsert = metrics.QueryDurationHistogram.WithLabelValues("Insert")
queryDurationHistogramReplace = metrics.QueryDurationHistogram.WithLabelValues("Replace")
queryDurationHistogramDelete = metrics.QueryDurationHistogram.WithLabelValues("Delete")
queryDurationHistogramUpdate = metrics.QueryDurationHistogram.WithLabelValues("Update")
queryDurationHistogramSelect = metrics.QueryDurationHistogram.WithLabelValues("Select")
queryDurationHistogramExecute = metrics.QueryDurationHistogram.WithLabelValues("Execute")
queryDurationHistogramSet = metrics.QueryDurationHistogram.WithLabelValues("Set")
queryDurationHistogramGeneral = metrics.QueryDurationHistogram.WithLabelValues(metrics.LblGeneral)
disconnectNormal = metrics.DisconnectionCounter.WithLabelValues(metrics.LblOK)
disconnectByClientWithError = metrics.DisconnectionCounter.WithLabelValues(metrics.LblError)
disconnectErrorUndetermined = metrics.DisconnectionCounter.WithLabelValues("undetermined")
connIdleDurationHistogramNotInTxn = metrics.ConnIdleDurationHistogram.WithLabelValues("0")
connIdleDurationHistogramInTxn = metrics.ConnIdleDurationHistogram.WithLabelValues("1")
)
// newClientConn creates a *clientConn object.
func newClientConn(s *Server) *clientConn {
return &clientConn{
server: s,
connectionID: s.globalConnID.NextID(),
collation: mysql.DefaultCollationID,
alloc: arena.NewAllocator(32 * 1024),
chunkAlloc: chunk.NewAllocator(),
status: connStatusDispatching,
lastActive: time.Now(),
authPlugin: mysql.AuthNativePassword,
}
}
// clientConn represents a connection between server and client, it maintains connection specific state,
// handles client query.
type clientConn struct {
pkt *packetIO // a helper to read and write data in packet format.
bufReadConn *bufferedReadConn // a buffered-read net.Conn or buffered-read tls.Conn.
tlsConn *tls.Conn // TLS connection, nil if not TLS.
server *Server // a reference of server instance.
capability uint32 // client capability affects the way server handles client request.
connectionID uint64 // atomically allocated by a global variable, unique in process scope.
user string // user of the client.
dbname string // default database name.
salt []byte // random bytes used for authentication.
alloc arena.Allocator // an memory allocator for reducing memory allocation.
chunkAlloc chunk.Allocator
lastPacket []byte // latest sql query string, currently used for logging error.
ctx *TiDBContext // an interface to execute sql statements.
attrs map[string]string // attributes parsed from client handshake response, not used for now.
peerHost string // peer host
peerPort string // peer port
status int32 // dispatching/reading/shutdown/waitshutdown
lastCode uint16 // last error code
collation uint8 // collation used by client, may be different from the collation used by database.
lastActive time.Time // last active time
authPlugin string // default authentication plugin
isUnixSocket bool // connection is Unix Socket file
rsEncoder *resultEncoder // rsEncoder is used to encode the string result to different charsets.
socketCredUID uint32 // UID from the other end of the Unix Socket
// mu is used for cancelling the execution of current transaction.
mu struct {
sync.RWMutex
cancelFunc context.CancelFunc
}
}
func (cc *clientConn) String() string {
collationStr := mysql.Collations[cc.collation]
return fmt.Sprintf("id:%d, addr:%s status:%b, collation:%s, user:%s",
cc.connectionID, cc.bufReadConn.RemoteAddr(), cc.ctx.Status(), collationStr, cc.user,
)
}
// authSwitchRequest is used by the server to ask the client to switch to a different authentication
// plugin. MySQL 8.0 libmysqlclient based clients by default always try `caching_sha2_password`, even
// when the server advertises the its default to be `mysql_native_password`. In addition to this switching
// may be needed on a per user basis as the authentication method is set per user.
// https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest
// https://bugs.mysql.com/bug.php?id=93044
func (cc *clientConn) authSwitchRequest(ctx context.Context, plugin string) ([]byte, error) {
enclen := 1 + len(plugin) + 1 + len(cc.salt) + 1
data := cc.alloc.AllocWithLen(4, enclen)
data = append(data, mysql.AuthSwitchRequest) // switch request
data = append(data, []byte(plugin)...)
data = append(data, byte(0x00)) // requires null
data = append(data, cc.salt...)
data = append(data, 0)
err := cc.writePacket(data)
if err != nil {
logutil.Logger(ctx).Debug("write response to client failed", zap.Error(err))
return nil, err
}
err = cc.flush(ctx)
if err != nil {
logutil.Logger(ctx).Debug("flush response to client failed", zap.Error(err))
return nil, err
}
resp, err := cc.readPacket()
if err != nil {
err = errors.SuspendStack(err)
if errors.Cause(err) == io.EOF {
logutil.Logger(ctx).Warn("authSwitchRequest response fail due to connection has be closed by client-side")
} else {
logutil.Logger(ctx).Warn("authSwitchRequest response fail", zap.Error(err))
}
return nil, err
}
cc.authPlugin = plugin
return resp, nil
}
// handshake works like TCP handshake, but in a higher level, it first writes initial packet to client,
// during handshake, client and server negotiate compatible features and do authentication.
// After handshake, client can send sql query to server.
func (cc *clientConn) handshake(ctx context.Context) error {
if err := cc.writeInitialHandshake(ctx); err != nil {
if errors.Cause(err) == io.EOF {
logutil.Logger(ctx).Debug("Could not send handshake due to connection has be closed by client-side")
} else {
logutil.Logger(ctx).Debug("Write init handshake to client fail", zap.Error(errors.SuspendStack(err)))
}
return err
}
if err := cc.readOptionalSSLRequestAndHandshakeResponse(ctx); err != nil {
err1 := cc.writeError(ctx, err)
if err1 != nil {
logutil.Logger(ctx).Debug("writeError failed", zap.Error(err1))
}
return err
}
// MySQL supports an "init_connect" query, which can be run on initial connection.
// The query must return a non-error or the client is disconnected.
if err := cc.initConnect(ctx); err != nil {
logutil.Logger(ctx).Warn("init_connect failed", zap.Error(err))
initErr := errNewAbortingConnection.FastGenByArgs(cc.connectionID, "unconnected", cc.user, cc.peerHost, "init_connect command failed")
if err1 := cc.writeError(ctx, initErr); err1 != nil {
terror.Log(err1)
}
return initErr
}
data := cc.alloc.AllocWithLen(4, 32)
data = append(data, mysql.OKHeader)
data = append(data, 0, 0)
if cc.capability&mysql.ClientProtocol41 > 0 {
data = dumpUint16(data, mysql.ServerStatusAutocommit)
data = append(data, 0, 0)
}
err := cc.writePacket(data)
cc.pkt.sequence = 0
if err != nil {
err = errors.SuspendStack(err)
logutil.Logger(ctx).Debug("write response to client failed", zap.Error(err))
return err
}
err = cc.flush(ctx)
if err != nil {
err = errors.SuspendStack(err)
logutil.Logger(ctx).Debug("flush response to client failed", zap.Error(err))
return err
}
return err
}
func (cc *clientConn) Close() error {
cc.server.rwlock.Lock()
delete(cc.server.clients, cc.connectionID)
connections := len(cc.server.clients)
cc.server.rwlock.Unlock()
return closeConn(cc, connections)
}
func closeConn(cc *clientConn, connections int) error {
metrics.ConnGauge.Set(float64(connections))
if cc.bufReadConn != nil {
err := cc.bufReadConn.Close()
terror.Log(err)
}
if cc.ctx != nil {
return cc.ctx.Close()
}
return nil
}
func (cc *clientConn) closeWithoutLock() error {
delete(cc.server.clients, cc.connectionID)
return closeConn(cc, len(cc.server.clients))
}
// writeInitialHandshake sends server version, connection ID, server capability, collation, server status
// and auth salt to the client.
func (cc *clientConn) writeInitialHandshake(ctx context.Context) error {
data := make([]byte, 4, 128)
// min version 10
data = append(data, 10)
// server version[00]
data = append(data, mysql.ServerVersion...)
data = append(data, 0)
// connection id
data = append(data, byte(cc.connectionID), byte(cc.connectionID>>8), byte(cc.connectionID>>16), byte(cc.connectionID>>24))
// auth-plugin-data-part-1
data = append(data, cc.salt[0:8]...)
// filler [00]
data = append(data, 0)
// capability flag lower 2 bytes, using default capability here
data = append(data, byte(cc.server.capability), byte(cc.server.capability>>8))
// charset
if cc.collation == 0 {
cc.collation = uint8(mysql.DefaultCollationID)
}
data = append(data, cc.collation)
// status
data = dumpUint16(data, mysql.ServerStatusAutocommit)
// below 13 byte may not be used
// capability flag upper 2 bytes, using default capability here
data = append(data, byte(cc.server.capability>>16), byte(cc.server.capability>>24))
// length of auth-plugin-data
data = append(data, byte(len(cc.salt)+1))
// reserved 10 [00]
data = append(data, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
// auth-plugin-data-part-2
data = append(data, cc.salt[8:]...)
data = append(data, 0)
// auth-plugin name
if cc.ctx == nil {
if err := cc.openSession(); err != nil {
return err
}
}
defAuthPlugin, err := variable.GetGlobalSystemVar(cc.ctx.GetSessionVars(), variable.DefaultAuthPlugin)
if err != nil {
return err
}
cc.authPlugin = defAuthPlugin
data = append(data, []byte(defAuthPlugin)...)
// Close the session to force this to be re-opened after we parse the response. This is needed
// to ensure we use the collation and client flags from the response for the session.
if err = cc.ctx.Close(); err != nil {
return err
}
cc.ctx = nil
data = append(data, 0)
if err = cc.writePacket(data); err != nil {
return err
}
return cc.flush(ctx)
}
func (cc *clientConn) readPacket() ([]byte, error) {
return cc.pkt.readPacket()
}
func (cc *clientConn) writePacket(data []byte) error {
failpoint.Inject("FakeClientConn", func() {
if cc.pkt == nil {
failpoint.Return(nil)
}
})
return cc.pkt.writePacket(data)
}
// getSessionVarsWaitTimeout get session variable wait_timeout
func (cc *clientConn) getSessionVarsWaitTimeout(ctx context.Context) uint64 {
valStr, exists := cc.ctx.GetSessionVars().GetSystemVar(variable.WaitTimeout)
if !exists {
return variable.DefWaitTimeout
}
waitTimeout, err := strconv.ParseUint(valStr, 10, 64)
if err != nil {
logutil.Logger(ctx).Warn("get sysval wait_timeout failed, use default value", zap.Error(err))
// if get waitTimeout error, use default value
return variable.DefWaitTimeout
}
return waitTimeout
}
type handshakeResponse41 struct {
Capability uint32
Collation uint8
User string
DBName string
Auth []byte
AuthPlugin string
Attrs map[string]string
}
// parseOldHandshakeResponseHeader parses the old version handshake header HandshakeResponse320
func parseOldHandshakeResponseHeader(ctx context.Context, packet *handshakeResponse41, data []byte) (parsedBytes int, err error) {
// Ensure there are enough data to read:
// https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse320
logutil.Logger(ctx).Debug("try to parse hanshake response as Protocol::HandshakeResponse320", zap.ByteString("packetData", data))
if len(data) < 2+3 {
logutil.Logger(ctx).Error("got malformed handshake response", zap.ByteString("packetData", data))
return 0, mysql.ErrMalformPacket
}
offset := 0
// capability
capability := binary.LittleEndian.Uint16(data[:2])
packet.Capability = uint32(capability)
// be compatible with Protocol::HandshakeResponse41
packet.Capability |= mysql.ClientProtocol41
offset += 2
// skip max packet size
offset += 3
// usa default CharsetID
packet.Collation = mysql.CollationNames["utf8mb4_general_ci"]
return offset, nil
}
// parseOldHandshakeResponseBody parse the HandshakeResponse for Protocol::HandshakeResponse320 (except the common header part).
func parseOldHandshakeResponseBody(ctx context.Context, packet *handshakeResponse41, data []byte, offset int) (err error) {
defer func() {
// Check malformat packet cause out of range is disgusting, but don't panic!
if r := recover(); r != nil {
logutil.Logger(ctx).Error("handshake panic", zap.ByteString("packetData", data), zap.Stack("stack"))
err = mysql.ErrMalformPacket
}
}()
// user name
packet.User = string(data[offset : offset+bytes.IndexByte(data[offset:], 0)])
offset += len(packet.User) + 1
if packet.Capability&mysql.ClientConnectWithDB > 0 {
if len(data[offset:]) > 0 {
idx := bytes.IndexByte(data[offset:], 0)
packet.DBName = string(data[offset : offset+idx])
offset = offset + idx + 1
}
if len(data[offset:]) > 0 {
packet.Auth = data[offset : offset+bytes.IndexByte(data[offset:], 0)]
}
} else {
packet.Auth = data[offset : offset+bytes.IndexByte(data[offset:], 0)]
}
return nil
}
// parseHandshakeResponseHeader parses the common header of SSLRequest and HandshakeResponse41.
func parseHandshakeResponseHeader(ctx context.Context, packet *handshakeResponse41, data []byte) (parsedBytes int, err error) {
// Ensure there are enough data to read:
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
if len(data) < 4+4+1+23 {
logutil.Logger(ctx).Error("got malformed handshake response", zap.ByteString("packetData", data))
return 0, mysql.ErrMalformPacket
}
offset := 0
// capability
capability := binary.LittleEndian.Uint32(data[:4])
packet.Capability = capability
offset += 4
// skip max packet size
offset += 4
// charset, skip, if you want to use another charset, use set names
packet.Collation = data[offset]
offset++
// skip reserved 23[00]
offset += 23
return offset, nil
}
// parseHandshakeResponseBody parse the HandshakeResponse (except the common header part).
func parseHandshakeResponseBody(ctx context.Context, packet *handshakeResponse41, data []byte, offset int) (err error) {
defer func() {
// Check malformat packet cause out of range is disgusting, but don't panic!
if r := recover(); r != nil {
logutil.Logger(ctx).Error("handshake panic", zap.ByteString("packetData", data))
err = mysql.ErrMalformPacket
}
}()
// user name
packet.User = string(data[offset : offset+bytes.IndexByte(data[offset:], 0)])
offset += len(packet.User) + 1
if packet.Capability&mysql.ClientPluginAuthLenencClientData > 0 {
// MySQL client sets the wrong capability, it will set this bit even server doesn't
// support ClientPluginAuthLenencClientData.
// https://github.com/mysql/mysql-server/blob/5.7/sql-common/client.c#L3478
if data[offset] == 0x1 { // No auth data
offset += 2
} else {
num, null, off := parseLengthEncodedInt(data[offset:])
offset += off
if !null {
packet.Auth = data[offset : offset+int(num)]
offset += int(num)
}
}
} else if packet.Capability&mysql.ClientSecureConnection > 0 {
// auth length and auth
authLen := int(data[offset])
offset++
packet.Auth = data[offset : offset+authLen]
offset += authLen
} else {
packet.Auth = data[offset : offset+bytes.IndexByte(data[offset:], 0)]
offset += len(packet.Auth) + 1
}
if packet.Capability&mysql.ClientConnectWithDB > 0 {
if len(data[offset:]) > 0 {
idx := bytes.IndexByte(data[offset:], 0)
packet.DBName = string(data[offset : offset+idx])
offset += idx + 1
}
}
if packet.Capability&mysql.ClientPluginAuth > 0 {
idx := bytes.IndexByte(data[offset:], 0)
s := offset
f := offset + idx
if s < f { // handle unexpected bad packets
packet.AuthPlugin = string(data[s:f])
}
offset += idx + 1
}
if packet.Capability&mysql.ClientConnectAtts > 0 {
if len(data[offset:]) == 0 {
// Defend some ill-formated packet, connection attribute is not important and can be ignored.
return nil
}
if num, null, off := parseLengthEncodedInt(data[offset:]); !null {
offset += off
row := data[offset : offset+int(num)]
attrs, err := parseAttrs(row)
if err != nil {
logutil.Logger(ctx).Warn("parse attrs failed", zap.Error(err))
return nil
}
packet.Attrs = attrs
}
}
return nil
}
func parseAttrs(data []byte) (map[string]string, error) |
func (cc *clientConn) readOptionalSSLRequestAndHandshakeResponse(ctx context.Context) error {
// Read a packet. It may be a SSLRequest or HandshakeResponse.
data, err := cc.readPacket()
if err != nil {
err = errors.SuspendStack(err)
if errors.Cause(err) == io.EOF {
logutil.Logger(ctx).Debug("wait handshake response fail due to connection has be closed by client-side")
} else {
logutil.Logger(ctx).Debug("wait handshake response fail", zap.Error(err))
}
return err
}
isOldVersion := false
var resp handshakeResponse41
var pos int
if len(data) < 2 {
logutil.Logger(ctx).Error("got malformed handshake response", zap.ByteString("packetData", data))
return mysql.ErrMalformPacket
}
capability := uint32(binary.LittleEndian.Uint16(data[:2]))
if capability&mysql.ClientProtocol41 > 0 {
pos, err = parseHandshakeResponseHeader(ctx, &resp, data)
} else {
pos, err = parseOldHandshakeResponseHeader(ctx, &resp, data)
isOldVersion = true
}
if err != nil {
terror.Log(err)
return err
}
if resp.Capability&mysql.ClientSSL > 0 {
tlsConfig := (*tls.Config)(atomic.LoadPointer(&cc.server.tlsConfig))
if tlsConfig != nil {
// The packet is a SSLRequest, let's switch to TLS.
if err = cc.upgradeToTLS(tlsConfig); err != nil {
return err
}
// Read the following HandshakeResponse packet.
data, err = cc.readPacket()
if err != nil {
logutil.Logger(ctx).Warn("read handshake response failure after upgrade to TLS", zap.Error(err))
return err
}
if isOldVersion {
pos, err = parseOldHandshakeResponseHeader(ctx, &resp, data)
} else {
pos, err = parseHandshakeResponseHeader(ctx, &resp, data)
}
if err != nil {
terror.Log(err)
return err
}
}
} else if config.GetGlobalConfig().Security.RequireSecureTransport {
err := errSecureTransportRequired.FastGenByArgs()
terror.Log(err)
return err
}
// Read the remaining part of the packet.
if isOldVersion {
err = parseOldHandshakeResponseBody(ctx, &resp, data, pos)
} else {
err = parseHandshakeResponseBody(ctx, &resp, data, pos)
}
if err != nil {
terror.Log(err)
return err
}
cc.capability = resp.Capability & cc.server.capability
cc.user = resp.User
cc.dbname = resp.DBName
cc.collation = resp.Collation
cc.attrs = resp.Attrs
err = cc.handleAuthPlugin(ctx, &resp)
if err != nil {
return err
}
switch resp.AuthPlugin {
case mysql.AuthCachingSha2Password:
resp.Auth, err = cc.authSha(ctx)
if err != nil {
return err
}
case mysql.AuthNativePassword:
case mysql.AuthSocket:
default:
return errors.New("Unknown auth plugin")
}
err = cc.openSessionAndDoAuth(resp.Auth, resp.AuthPlugin)
if err != nil {
logutil.Logger(ctx).Warn("open new session or authentication failure", zap.Error(err))
}
return err
}
func (cc *clientConn) handleAuthPlugin(ctx context.Context, resp *handshakeResponse41) error {
if resp.Capability&mysql.ClientPluginAuth > 0 {
newAuth, err := cc.checkAuthPlugin(ctx, resp)
if err != nil {
logutil.Logger(ctx).Warn("failed to check the user authplugin", zap.Error(err))
}
if len(newAuth) > 0 {
resp.Auth = newAuth
}
switch resp.AuthPlugin {
case mysql.AuthCachingSha2Password:
resp.Auth, err = cc.authSha(ctx)
if err != nil {
return err
}
case mysql.AuthNativePassword:
case mysql.AuthSocket:
default:
logutil.Logger(ctx).Warn("Unknown Auth Plugin", zap.String("plugin", resp.AuthPlugin))
}
} else {
logutil.Logger(ctx).Warn("Client without Auth Plugin support; Please upgrade client")
}
return nil
}
func (cc *clientConn) authSha(ctx context.Context) ([]byte, error) {
const (
ShaCommand = 1
RequestRsaPubKey = 2
FastAuthOk = 3
FastAuthFail = 4
)
err := cc.writePacket([]byte{0, 0, 0, 0, ShaCommand, FastAuthFail})
if err != nil {
logutil.Logger(ctx).Error("authSha packet write failed", zap.Error(err))
return nil, err
}
err = cc.flush(ctx)
if err != nil {
logutil.Logger(ctx).Error("authSha packet flush failed", zap.Error(err))
return nil, err
}
data, err := cc.readPacket()
if err != nil {
logutil.Logger(ctx).Error("authSha packet read failed", zap.Error(err))
return nil, err
}
return bytes.Trim(data, "\x00"), nil
}
func (cc *clientConn) SessionStatusToString() string {
status := cc.ctx.Status()
inTxn, autoCommit := 0, 0
if status&mysql.ServerStatusInTrans > 0 {
inTxn = 1
}
if status&mysql.ServerStatusAutocommit > 0 {
autoCommit = 1
}
return fmt.Sprintf("inTxn:%d, autocommit:%d",
inTxn, autoCommit,
)
}
func (cc *clientConn) openSession() error {
var tlsStatePtr *tls.ConnectionState
if cc.tlsConn != nil {
tlsState := cc.tlsConn.ConnectionState()
tlsStatePtr = &tlsState
}
var err error
cc.ctx, err = cc.server.driver.OpenCtx(cc.connectionID, cc.capability, cc.collation, cc.dbname, tlsStatePtr)
if err != nil {
return err
}
err = cc.server.checkConnectionCount()
if err != nil {
return err
}
return nil
}
func (cc *clientConn) openSessionAndDoAuth(authData []byte, authPlugin string) error {
// Open a context unless this was done before.
if cc.ctx == nil {
err := cc.openSession()
if err != nil {
return err
}
}
hasPassword := "YES"
if len(authData) == 0 {
hasPassword = "NO"
}
host, port, err := cc.PeerHost(hasPassword)
if err != nil {
return err
}
if !cc.isUnixSocket && authPlugin == mysql.AuthSocket {
return errAccessDeniedNoPassword.FastGenByArgs(cc.user, host)
}
if !cc.ctx.Auth(&auth.UserIdentity{Username: cc.user, Hostname: host}, authData, cc.salt) {
return errAccessDenied.FastGenByArgs(cc.user, host, hasPassword)
}
cc.ctx.SetPort(port)
if cc.dbname != "" {
err = cc.useDB(context.Background(), cc.dbname)
if err != nil {
return err
}
}
cc.ctx.SetSessionManager(cc.server)
return nil
}
// Check if the Authentication Plugin of the server, client and user configuration matches
func (cc *clientConn) checkAuthPlugin(ctx context.Context, resp *handshakeResponse41) ([]byte, error) {
// Open a context unless this was done before.
if cc.ctx == nil {
err := cc.openSession()
if err != nil {
return nil, err
}
}
authData := resp.Auth
hasPassword := "YES"
if len(authData) == 0 {
hasPassword = "NO"
}
host, _, err := cc.PeerHost(hasPassword)
if err != nil {
return nil, err
}
userplugin, err := cc.ctx.AuthPluginForUser(&auth.UserIdentity{Username: cc.user, Hostname: host})
if err != nil {
return nil, err
}
if userplugin == mysql.AuthSocket {
resp.AuthPlugin = mysql.AuthSocket
user, err := user.LookupId(fmt.Sprint(cc.socketCredUID))
if err != nil {
return nil, err
}
return []byte(user.Username), nil
}
if len(userplugin) == 0 {
logutil.Logger(ctx).Warn("No user plugin set, assuming MySQL Native Password",
zap.String("user", cc.user), zap.String("host", cc.peerHost))
resp.AuthPlugin = mysql.AuthNativePassword
return nil, nil
}
// If the authentication method send by the server (cc.authPlugin) doesn't match
// the plugin configured for the user account in the mysql.user.plugin column
// or if the authentication method send by the server doesn't match the authentication
// method send by the client (*authPlugin) then we need to switch the authentication
// method to match the one configured for that specific user.
if (cc.authPlugin != userplugin) || (cc.authPlugin != resp.AuthPlugin) {
authData, err := cc.authSwitchRequest(ctx, userplugin)
if err != nil {
return nil, err
}
resp.AuthPlugin = userplugin
return authData, nil
}
return nil, nil
}
func (cc *clientConn) PeerHost(hasPassword string) (host, port string, err error) {
if len(cc.peerHost) > 0 {
return cc.peerHost, "", nil
}
host = variable.DefHostname
if cc.isUnixSocket {
cc.peerHost = host
return
}
addr := cc.bufReadConn.RemoteAddr().String()
host, port, err = net.SplitHostPort(addr)
if err != nil {
err = errAccessDenied.GenWithStackByArgs(cc.user, addr, hasPassword)
return
}
cc.peerHost = host
cc.peerPort = port
return
}
// skipInitConnect follows MySQL's rules of when init-connect should be skipped.
// In 5.7 it is any user with SUPER privilege, but in 8.0 it is:
// - SUPER or the CONNECTION_ADMIN dynamic privilege.
// - (additional exception) users with expired passwords (not yet supported)
// In TiDB CONNECTION_ADMIN is satisfied by SUPER, so we only need to check once.
func (cc *clientConn) skipInitConnect() bool {
checker := privilege.GetPrivilegeManager(cc.ctx.Session)
activeRoles := cc.ctx.GetSessionVars().ActiveRoles
return checker != nil && checker.RequestDynamicVerification(activeRoles, "CONNECTION_ADMIN", false)
}
// initResultEncoder initialize the result encoder for current connection.
func (cc *clientConn) initResultEncoder(ctx context.Context) {
chs, err := variable.GetSessionOrGlobalSystemVar(cc.ctx.GetSessionVars(), variable.CharacterSetResults)
if err != nil {
chs = ""
logutil.Logger(ctx).Warn("get character_set_results system variable failed", zap.Error(err))
}
cc.rsEncoder = newResultEncoder(chs)
}
// initConnect runs the initConnect SQL statement if it has been specified.
// The semantics are MySQL compatible.
func (cc *clientConn) initConnect(ctx context.Context) error {
val, err := cc.ctx.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.InitConnect)
if err != nil {
return err
}
if val == "" || cc.skipInitConnect() {
return nil
}
logutil.Logger(ctx).Debug("init_connect starting")
stmts, err := cc.ctx.Parse(ctx, val)
if err != nil {
return err
}
for _, stmt := range stmts {
rs, err := cc.ctx.ExecuteStmt(ctx, stmt)
if err != nil {
return err
}
// init_connect does not care about the results,
// but they need to be drained because of lazy loading.
if rs != nil {
req := rs.NewChunk(nil)
for {
if err = rs.Next(ctx, req); err != nil {
return err
}
if req.NumRows() == 0 {
break
}
}
if err := rs.Close(); err != nil {
return err
}
}
}
logutil.Logger(ctx).Debug("init_connect complete")
return nil
}
// Run reads client query and writes query result to client in for loop, if there is a panic during query handling,
// it will be recovered and log the panic error.
// This function returns and the connection is closed if there is an IO error or there is a panic.
func (cc *clientConn) Run(ctx context.Context) {
const size = 4096
defer func() {
r := recover()
if r != nil {
buf := make([]byte, size)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
logutil.Logger(ctx).Error("connection running loop panic",
zap.Stringer("lastSQL", getLastStmtInConn{cc}),
zap.String("err", fmt.Sprintf("%v", r)),
zap.String("stack", string(buf)),
)
err := cc.writeError(ctx, errors.New(fmt.Sprintf("%v", r)))
terror.Log(err)
metrics.PanicCounter.WithLabelValues(metrics.LabelSession).Inc()
}
if atomic.LoadInt32(&cc.status) != connStatusShutdown {
err := cc.Close()
terror.Log(err)
}
}()
// Usually, client connection status changes between [dispatching] <=> [reading].
// When some event happens, server may notify this client connection by setting
// the status to special values, for example: kill or graceful shutdown.
// The client connection would detect the events when it fails to change status
// by CAS operation, it would then take some actions accordingly.
for {
if !atomic.CompareAndSwapInt32(&cc.status, connStatusDispatching, connStatusReading) ||
// The judge below will not be hit by all means,
// But keep it stayed as a reminder and for the code reference for connStatusWaitShutdown.
atomic.LoadInt32(&cc.status) == connStatusWaitShutdown {
return
}
cc.alloc.Reset()
// close connection when idle time is more than wait_timeout
waitTimeout := cc.getSessionVarsWaitTimeout(ctx)
cc.pkt.setReadTimeout(time.Duration(waitTimeout) * time.Second)
start := time.Now()
data, err := cc.readPacket()
if err != nil {
if terror.ErrorNotEqual(err, io.EOF) {
if netErr, isNetErr := errors.Cause(err).(net.Error); isNetErr && netErr.Timeout() {
idleTime := time.Since(start)
logutil.Logger(ctx).Info("read packet timeout, close this connection",
zap.Duration("idle", idleTime),
zap.Uint64("waitTimeout", waitTimeout),
zap.Error(err),
)
} else {
errStack := errors.ErrorStack(err)
if !strings.Contains(errStack, "use of closed network connection") {
logutil.Logger(ctx).Warn("read packet failed, close this connection",
zap.Error(errors.SuspendStack(err)))
}
}
}
disconnectByClientWithError.Inc()
return
}
if !atomic.CompareAndSwapInt32(&cc.status, connStatusReading, connStatusDispatching) {
return
}
startTime := time.Now()
err = cc.dispatch(ctx, data)
cc.chunkAlloc.Reset()
if err != nil {
cc.audit(plugin.Error) // tell the plugin API there was a dispatch error
if terror.ErrorEqual(err, io.EOF) {
cc.addMetrics(data[0], startTime, nil)
disconnectNormal.Inc()
return
} else if terror.ErrResultUndetermined.Equal(err) {
logutil.Logger(ctx).Error("result undetermined, close this connection", zap.Error(err))
disconnectErrorUndetermined.Inc()
return
} else if terror.ErrCritical.Equal(err) {
metrics.CriticalErrorCounter.Add(1)
logutil.Logger(ctx).Fatal("critical error, stop the server", zap.Error(err))
}
var txnMode string
if cc.ctx != nil {
txnMode = cc.ctx.GetSessionVars().GetReadableTxnMode()
}
metrics.ExecuteErrorCounter.WithLabelValues(metrics.ExecuteErrorToLabel(err)).Inc()
if storeerr.ErrLockAcquireFailAndNoWaitSet.Equal(err) {
logutil.Logger(ctx).Debug("Expected error for FOR UPDATE NOWAIT", zap.Error(err))
} else {
logutil.Logger(ctx).Info("command dispatched failed",
zap.String("connInfo", cc.String()),
zap.String("command", mysql.Command2Str[data[0]]),
zap.String("status", cc.SessionStatusToString()),
zap.Stringer("sql", getLastStmtInConn{cc}),
zap.String("txn_mode", txnMode),
zap.String("err", errStrForLog(err, cc.ctx.GetSessionVars().EnableRedactLog)),
)
}
err1 := cc.writeError(ctx, err)
terror.Log(err1)
}
cc.addMetrics(data[0], startTime, err)
cc.pkt.sequence = 0
}
}
// ShutdownOrNotify will Shutdown this client connection, or do its best to notify.
func (cc *clientConn) ShutdownOrNotify() bool {
if (cc.ctx.Status() & mysql.ServerStatusInTrans) > 0 {
return false
}
// If the client connection status is reading, it's safe to shutdown it.
if atomic.CompareAndSwapInt32(&cc.status, connStatusReading, connStatusShutdown) {
return true
}
// If the client connection status is dispatching, we can't shutdown it immediately,
// so set the status to WaitShutdown as a notification, the loop in clientConn.Run
// will detect it and then exit.
atomic.StoreInt32(&cc.status, connStatusWaitShutdown)
return false
}
func errStrForLog(err error, enableRedactLog bool) string {
if enableRedactLog {
// currently, only ErrParse is considered when enableRedactLog because it may contain sensitive information like
// password or accesskey
if parser.ErrParse.Equal(err) {
return "fail to parse SQL and can't redact when enable log redaction"
}
}
if kv.ErrKeyExists.Equal(err) || parser.ErrParse.Equal(err) || infoschema.ErrTableNotExists.Equal(err) {
// Do not log stack for duplicated entry error.
return err.Error()
}
return errors.ErrorStack(err)
}
func (cc *clientConn) addMetrics(cmd byte, startTime time.Time, err error) {
if cmd == mysql.ComQuery && cc.ctx.Value(sessionctx.LastExecuteDDL) != nil {
// Don't take DDL execute time into account.
// It's already recorded by other metrics in ddl package.
return
}
var counter prometheus.Counter
if err != nil && int(cmd) < len(queryTotalCountErr) {
counter = queryTotalCountErr[cmd]
} else if err == nil && int(cmd) < len(queryTotalCountOk) {
counter = queryTotalCountOk[cmd]
}
if counter != nil {
counter.Inc()
} else {
label := strconv.Itoa(int(cmd))
if err != nil {
metrics.QueryTotalCounter.WithLabelValues(label, "Error").Inc()
} else {
metrics.QueryTotalCounter.WithLabelValues(label, "OK").Inc()
}
}
stmtType := cc.ctx.GetSessionVars().StmtCtx.StmtType
sqlType := metrics.LblGeneral
if stmtType != "" {
sqlType = stmtType
}
cost := time.Since(startTime)
sessionVar := cc.ctx.GetSessionVars()
cc.ctx.GetTxnWriteThroughputSLI().FinishExecuteStmt(cost, cc.ctx.AffectedRows(), sessionVar.InTxn())
switch sqlType {
case "Use":
queryDurationHistogramUse.Observe(cost.Seconds())
case "Show":
queryDurationHistogramShow.Observe(cost.Seconds())
case "Begin":
queryDurationHistogramBegin.Observe(cost.Seconds())
case "Commit":
queryDurationHistogramCommit.Observe(cost.Seconds())
case "Rollback":
queryDurationHistogramRollback.Observe(cost.Seconds())
case "Insert":
queryDurationHistogramInsert.Observe(cost.Seconds())
case "Replace":
queryDurationHistogramReplace.Observe(cost.Seconds())
case "Delete":
queryDurationHistogramDelete.Observe(cost.Seconds())
case "Update":
queryDurationHistogramUpdate.Observe(cost.Seconds())
case "Select":
queryDurationHistogramSelect.Observe(cost.Seconds())
case "Execute":
queryDurationHistogramExecute.Observe(cost.Seconds())
case "Set":
queryDurationHistogramSet.Observe(cost.Seconds())
case metrics.LblGeneral:
queryDurationHistogramGeneral.Observe(cost.Seconds())
default:
metrics.QueryDurationHistogram.WithLabelValues(sqlType).Observe(cost.Seconds())
}
}
// dispatch handles client request based on command which is the first byte of the data.
// It also gets a token from server which is used to limit the concurrently handling clients.
// The most frequently used command is ComQuery.
func (cc *clientConn) dispatch(ctx context.Context, data []byte) error {
defer func() {
// reset killed for each request
atomic.StoreUint32(&cc.ctx.GetSessionVars().Killed, 0)
}()
t := time.Now()
if (cc.ctx.Status() & mysql.ServerStatusInTrans) > 0 {
connIdleDurationHistogramInTxn.Observe(t.Sub(cc.lastActive).Seconds())
} else {
connIdleDurationHistogramNotInTxn.Observe(t.Sub(cc.lastActive).Seconds())
}
span := opentracing.StartSpan("server.dispatch")
cfg := config.GetGlobalConfig()
if cfg.OpenTracing.Enable {
ctx = opentracing.ContextWithSpan(ctx, span)
}
var cancelFunc context.CancelFunc
ctx, cancelFunc = context.WithCancel(ctx)
cc.mu.Lock()
cc.mu.cancelFunc = cancelFunc
cc.mu.Unlock()
cc.lastPacket = data
cmd := data[0]
data = data[1:]
if variable.TopSQLEnabled() {
defer pprof.SetGoroutineLabels(ctx)
}
if variable.EnablePProfSQLCPU.Load() {
label := getLastStmtInConn{cc}.PProfLabel()
if len(label) > 0 {
defer pprof.SetGoroutineLabels(ctx)
ctx = pprof.WithLabels(ctx, pprof.Labels("sql", label))
pprof.SetGoroutineLabels(ctx)
}
}
if trace.IsEnabled() {
lc := getLastStmtInConn{cc}
sqlType := lc.PProfLabel()
if len(sqlType) > 0 {
var task *trace.Task
ctx, task = trace.NewTask(ctx, sqlType)
defer task.End()
trace.Log(ctx, "sql", lc.String())
ctx = logutil.WithTraceLogger(ctx, cc.connectionID)
taskID := *(*uint64)(unsafe.Pointer(task))
ctx = pprof.WithLabels(ctx, pprof.Labels("trace", strconv.FormatUint(taskID, 10)))
pprof.SetGoroutineLabels(ctx)
}
}
token := cc.server.getToken()
defer func() {
// if handleChangeUser failed, cc.ctx may be nil
if cc.ctx != nil {
cc.ctx.SetProcessInfo("", t, mysql.ComSleep, 0)
}
cc.server.releaseToken(token)
span.Finish()
cc.lastActive = time.Now()
}()
vars := cc.ctx.GetSessionVars()
// reset killed for each request
atomic.StoreUint32(&vars.Killed, 0)
if cmd < mysql.ComEnd {
cc.ctx.SetCommandValue(cmd)
}
dataStr := string(hack.String(data))
switch cmd {
case mysql.ComPing, mysql.ComStmtClose, mysql.ComStmtSendLongData, mysql.ComStmtReset,
mysql.ComSetOption, mysql.ComChangeUser:
cc.ctx.SetProcessInfo("", t, cmd, 0)
case mysql.ComInitDB:
cc.ctx.SetProcessInfo("use "+dataStr, t, cmd, 0)
}
switch cmd {
case mysql.ComSleep:
// TODO: According to mysql document, this command is supposed to be used only internally.
// So it's just a temp fix, not sure if it's done right.
// Investigate this command and write test case later.
return nil
case mysql.ComQuit:
return io.EOF
case mysql.ComInitDB:
if err := cc.useDB(ctx, dataStr); err != nil {
return err
}
return cc.writeOK(ctx)
case mysql.ComQuery: // Most frequently used command.
// For issue 1989
// Input payload may end with byte '\0', we didn't find related mysql document about it, but mysql
// implementation accept that case. So trim the last '\0' here as if the payload an EOF string.
// See http://dev.mysql.com/doc/internals/en/com-query.html
if len(data) > 0 && data[len(data)-1] == 0 {
data = data[:len(data)-1]
dataStr = string(hack.String(data))
}
return cc.handleQuery(ctx, dataStr)
case mysql.ComFieldList:
return cc.handleFieldList(ctx, dataStr)
// ComCreateDB, ComDropDB
case mysql.ComRefresh:
return cc.handleRefresh(ctx, data[0])
case mysql.ComShutdown: // redirect to SQL
if err := cc.handleQuery(ctx, "SHUTDOWN"); err != nil {
return err
}
return cc.writeOK(ctx)
case mysql.ComStatistics:
return cc.writeStats(ctx)
// ComProcessInfo, ComConnect, ComProcessKill, ComDebug
case mysql.ComPing:
return cc.writeOK(ctx)
case mysql.ComChangeUser:
return cc.handleChangeUser(ctx, data)
// ComBinlogDump, ComTableDump, ComConnectOut, ComRegisterSlave
case mysql.ComStmtPrepare:
return cc.handleStmtPrepare(ctx, dataStr)
case mysql.ComStmtExecute:
return cc.handleStmtExecute(ctx, data)
case mysql.ComStmtSendLongData:
return cc.handleStmtSendLongData(data)
case mysql.ComStmtClose:
return cc.handleStmtClose(data)
case mysql.ComStmtReset:
return cc.handleStmtReset(ctx, data)
case mysql.ComSetOption:
return cc.handleSetOption(ctx, data)
case mysql.ComStmtFetch:
return cc.handleStmtFetch(ctx, data)
// ComDaemon, ComBinlogDumpGtid
case mysql.ComResetConnection:
return cc.handleResetConnection(ctx)
// ComEnd
default:
return mysql.NewErrf(mysql.ErrUnknown, "command %d not supported now", nil, cmd)
}
}
func (cc *clientConn) writeStats(ctx context.Context) error {
msg := []byte("Uptime: 0 Threads: 0 Questions: 0 Slow queries: 0 Opens: 0 Flush tables: 0 Open tables: 0 Queries per second avg: 0.000")
data := cc.alloc.AllocWithLen(4, len(msg))
data = append(data, msg...)
err := cc.writePacket(data)
if err != nil {
return err
}
return cc.flush(ctx)
}
func (cc *clientConn) useDB(ctx context.Context, db string) (err error) {
// if input is "use `SELECT`", mysql client just send "SELECT"
// so we add `` around db.
stmts, err := cc.ctx.Parse(ctx, "use `"+db+"`")
if err != nil {
return err
}
_, err = cc.ctx.ExecuteStmt(ctx, stmts[0])
if err != nil {
return err
}
cc.dbname = db
return
}
func (cc *clientConn) flush(ctx context.Context) error {
defer func() {
trace.StartRegion(ctx, "FlushClientConn").End()
if cc.ctx != nil && cc.ctx.WarningCount() > 0 {
for _, err := range cc.ctx.GetWarnings() {
var warn *errors.Error
if ok := goerr.As(err.Err, &warn); ok {
code := uint16(warn.Code())
errno.IncrementWarning(code, cc.user, cc.peerHost)
}
}
}
}()
failpoint.Inject("FakeClientConn", func() {
if cc.pkt == nil {
failpoint.Return(nil)
}
})
return cc.pkt.flush()
}
func (cc *clientConn) writeOK(ctx context.Context) error {
msg := cc.ctx.LastMessage()
return cc.writeOkWith(ctx, msg, cc.ctx.AffectedRows(), cc.ctx.LastInsertID(), cc.ctx.Status(), cc.ctx.WarningCount())
}
func (cc *clientConn) writeOkWith(ctx context.Context, msg string, affectedRows, lastInsertID uint64, status, warnCnt uint16) error {
enclen := 0
if len(msg) > 0 {
enclen = lengthEncodedIntSize(uint64(len(msg))) + len(msg)
}
data := cc.alloc.AllocWithLen(4, 32+enclen)
data = append(data, mysql.OKHeader)
data = dumpLengthEncodedInt(data, affectedRows)
data = dumpLengthEncodedInt(data, lastInsertID)
if cc.capability&mysql.ClientProtocol41 > 0 {
data = dumpUint16(data, status)
data = dumpUint16(data, warnCnt)
}
if enclen > 0 {
// although MySQL manual says the info message is string<EOF>(https://dev.mysql.com/doc/internals/en/packet-OK_Packet.html),
// it is actually string<lenenc>
data = dumpLengthEncodedString(data, []byte(msg))
}
err := cc.writePacket(data)
if err != nil {
return err
}
return cc.flush(ctx)
}
func (cc *clientConn) writeError(ctx context.Context, e error) error {
var (
m *mysql.SQLError
te *terror.Error
ok bool
)
originErr := errors.Cause(e)
if te, ok = originErr.(*terror.Error); ok {
m = terror.ToSQLError(te)
} else {
e := errors.Cause(originErr)
switch y := e.(type) {
case *terror.Error:
m = terror.ToSQLError(y)
default:
m = mysql.NewErrf(mysql.ErrUnknown, "%s", nil, e.Error())
}
}
cc.lastCode = m.Code
defer errno.IncrementError(m.Code, cc.user, cc.peerHost)
data := cc.alloc.AllocWithLen(4, 16+len(m.Message))
data = append(data, mysql.ErrHeader)
data = append(data, byte(m.Code), byte(m.Code>>8))
if cc.capability&mysql.ClientProtocol41 > 0 {
data = append(data, '#')
data = append(data, m.State...)
}
data = append(data, m.Message...)
err := cc.writePacket(data)
if err != nil {
return err
}
return cc.flush(ctx)
}
// writeEOF writes an EOF packet.
// Note this function won't flush the stream because maybe there are more
// packets following it.
// serverStatus, a flag bit represents server information
// in the packet.
func (cc *clientConn) writeEOF(serverStatus uint16) error {
data := cc.alloc.AllocWithLen(4, 9)
data = append(data, mysql.EOFHeader)
if cc.capability&mysql.ClientProtocol41 > 0 {
data = dumpUint16(data, cc.ctx.WarningCount())
status := cc.ctx.Status()
status |= serverStatus
data = dumpUint16(data, status)
}
err := cc.writePacket(data)
return err
}
func (cc *clientConn) writeReq(ctx context.Context, filePath string) error {
data := cc.alloc.AllocWithLen(4, 5+len(filePath))
data = append(data, mysql.LocalInFileHeader)
data = append(data, filePath...)
err := cc.writePacket(data)
if err != nil {
return err
}
return cc.flush(ctx)
}
func insertDataWithCommit(ctx context.Context, prevData,
curData []byte, loadDataInfo *executor.LoadDataInfo) ([]byte, error) {
var err error
var reachLimit bool
for {
prevData, reachLimit, err = loadDataInfo.InsertData(ctx, prevData, curData)
if err != nil {
return nil, err
}
if !reachLimit {
break
}
// push into commit task queue
err = loadDataInfo.EnqOneTask(ctx)
if err != nil {
return prevData, err
}
curData = prevData
prevData = nil
}
return prevData, nil
}
// processStream process input stream from network
func processStream(ctx context.Context, cc *clientConn, loadDataInfo *executor.LoadDataInfo, wg *sync.WaitGroup) {
var err error
var shouldBreak bool
var prevData, curData []byte
defer func() {
r := recover()
if r != nil {
logutil.Logger(ctx).Error("process routine panicked",
zap.Reflect("r", r),
zap.Stack("stack"))
}
if err != nil || r != nil {
loadDataInfo.ForceQuit()
} else {
loadDataInfo.CloseTaskQueue()
}
wg.Done()
}()
for {
curData, err = cc.readPacket()
if err != nil {
if terror.ErrorNotEqual(err, io.EOF) {
logutil.Logger(ctx).Error("read packet failed", zap.Error(err))
break
}
}
if len(curData) == 0 {
loadDataInfo.Drained = true
shouldBreak = true
if len(prevData) == 0 {
break
}
}
select {
case <-loadDataInfo.QuitCh:
err = errors.New("processStream forced to quit")
default:
}
if err != nil {
break
}
// prepare batch and enqueue task
prevData, err = insertDataWithCommit(ctx, prevData, curData, loadDataInfo)
if err != nil {
break
}
if shouldBreak {
break
}
}
if err != nil {
logutil.Logger(ctx).Error("load data process stream error", zap.Error(err))
return
}
if err = loadDataInfo.EnqOneTask(ctx); err != nil {
logutil.Logger(ctx).Error("load data process stream error", zap.Error(err))
return
}
}
// handleLoadData does the additional work after processing the 'load data' query.
// It sends client a file path, then reads the file content from client, inserts data into database.
func (cc *clientConn) handleLoadData(ctx context.Context, loadDataInfo *executor.LoadDataInfo) error {
// If the server handles the load data request, the client has to set the ClientLocalFiles capability.
if cc.capability&mysql.ClientLocalFiles == 0 {
return errNotAllowedCommand
}
if loadDataInfo == nil {
return errors.New("load data info is empty")
}
if !loadDataInfo.Table.Meta().IsBaseTable() {
return errors.New("can only load data into base tables")
}
err := cc.writeReq(ctx, loadDataInfo.Path)
if err != nil {
return err
}
loadDataInfo.InitQueues()
loadDataInfo.SetMaxRowsInBatch(uint64(loadDataInfo.Ctx.GetSessionVars().DMLBatchSize))
loadDataInfo.StartStopWatcher()
// let stop watcher goroutine quit
defer loadDataInfo.ForceQuit()
err = loadDataInfo.Ctx.NewTxn(ctx)
if err != nil {
return err
}
// processStream process input data, enqueue commit task
wg := new(sync.WaitGroup)
wg.Add(1)
go processStream(ctx, cc, loadDataInfo, wg)
err = loadDataInfo.CommitWork(ctx)
wg.Wait()
if err != nil {
if !loadDataInfo.Drained {
logutil.Logger(ctx).Info("not drained yet, try reading left data from client connection")
}
// drain the data from client conn util empty packet received, otherwise the connection will be reset
for !loadDataInfo.Drained {
// check kill flag again, let the draining loop could quit if empty packet could not be received
if atomic.CompareAndSwapUint32(&loadDataInfo.Ctx.GetSessionVars().Killed, 1, 0) {
logutil.Logger(ctx).Warn("receiving kill, stop draining data, connection may be reset")
return executor.ErrQueryInterrupted
}
curData, err1 := cc.readPacket()
if err1 != nil {
logutil.Logger(ctx).Error("drain reading left data encounter errors", zap.Error(err1))
break
}
if len(curData) == 0 {
loadDataInfo.Drained = true
logutil.Logger(ctx).Info("draining finished for error", zap.Error(err))
break
}
}
}
loadDataInfo.SetMessage()
return err
}
// getDataFromPath gets file contents from file path.
func (cc *clientConn) getDataFromPath(ctx context.Context, path string) ([]byte, error) {
err := cc.writeReq(ctx, path)
if err != nil {
return nil, err
}
var prevData, curData []byte
for {
curData, err = cc.readPacket()
if err != nil && terror.ErrorNotEqual(err, io.EOF) {
return nil, err
}
if len(curData) == 0 {
break
}
prevData = append(prevData, curData...)
}
return prevData, nil
}
// handleLoadStats does the additional work after processing the 'load stats' query.
// It sends client a file path, then reads the file content from client, loads it into the storage.
func (cc *clientConn) handleLoadStats(ctx context.Context, loadStatsInfo *executor.LoadStatsInfo) error {
// If the server handles the load data request, the client has to set the ClientLocalFiles capability.
if cc.capability&mysql.ClientLocalFiles == 0 {
return errNotAllowedCommand
}
if loadStatsInfo == nil {
return errors.New("load stats: info is empty")
}
data, err := cc.getDataFromPath(ctx, loadStatsInfo.Path)
if err != nil {
return err
}
if len(data) == 0 {
return nil
}
return loadStatsInfo.Update(data)
}
// handleIndexAdvise does the index advise work and returns the advise result for index.
func (cc *clientConn) handleIndexAdvise(ctx context.Context, indexAdviseInfo *executor.IndexAdviseInfo) error {
if cc.capability&mysql.ClientLocalFiles == 0 {
return errNotAllowedCommand
}
if indexAdviseInfo == nil {
return errors.New("Index Advise: info is empty")
}
data, err := cc.getDataFromPath(ctx, indexAdviseInfo.Path)
if err != nil {
return err
}
if len(data) == 0 {
return errors.New("Index Advise: infile is empty")
}
if err := indexAdviseInfo.GetIndexAdvice(ctx, data); err != nil {
return err
}
// TODO: Write the rss []ResultSet. It will be done in another PR.
return nil
}
func (cc *clientConn) handlePlanReplayerLoad(ctx context.Context, planReplayerLoadInfo *executor.PlanReplayerLoadInfo) error {
if cc.capability&mysql.ClientLocalFiles == 0 {
return errNotAllowedCommand
}
if planReplayerLoadInfo == nil {
return errors.New("plan replayer load: info is empty")
}
data, err := cc.getDataFromPath(ctx, planReplayerLoadInfo.Path)
if err != nil {
return err
}
if len(data) == 0 {
return nil
}
return planReplayerLoadInfo.Update(data)
}
func (cc *clientConn) audit(eventType plugin.GeneralEvent) {
err := plugin.ForeachPlugin(plugin.Audit, func(p *plugin.Plugin) error {
audit := plugin.DeclareAuditManifest(p.Manifest)
if audit.OnGeneralEvent != nil {
cmd := mysql.Command2Str[byte(atomic.LoadUint32(&cc.ctx.GetSessionVars().CommandValue))]
ctx := context.WithValue(context.Background(), plugin.ExecStartTimeCtxKey, cc.ctx.GetSessionVars().StartTime)
audit.OnGeneralEvent(ctx, cc.ctx.GetSessionVars(), eventType, cmd)
}
return nil
})
if err != nil {
terror.Log(err)
}
}
// handleQuery executes the sql query string and writes result set or result ok to the client.
// As the execution time of this function represents the performance of TiDB, we do time log and metrics here.
// There is a special query `load data` that does not return result, which is handled differently.
// Query `load stats` does not return result either.
func (cc *clientConn) handleQuery(ctx context.Context, sql string) (err error) {
defer trace.StartRegion(ctx, "handleQuery").End()
sc := cc.ctx.GetSessionVars().StmtCtx
prevWarns := sc.GetWarnings()
stmts, err := cc.ctx.Parse(ctx, sql)
if err != nil {
return err
}
if len(stmts) == 0 {
return cc.writeOK(ctx)
}
warns := sc.GetWarnings()
parserWarns := warns[len(prevWarns):]
var pointPlans []plannercore.Plan
if len(stmts) > 1 {
// The client gets to choose if it allows multi-statements, and
// probably defaults OFF. This helps prevent against SQL injection attacks
// by early terminating the first statement, and then running an entirely
// new statement.
capabilities := cc.ctx.GetSessionVars().ClientCapability
if capabilities&mysql.ClientMultiStatements < 1 {
// The client does not have multi-statement enabled. We now need to determine
// how to handle an unsafe situation based on the multiStmt sysvar.
switch cc.ctx.GetSessionVars().MultiStatementMode {
case variable.OffInt:
err = errMultiStatementDisabled
return err
case variable.OnInt:
// multi statement is fully permitted, do nothing
default:
warn := stmtctx.SQLWarn{Level: stmtctx.WarnLevelWarning, Err: errMultiStatementDisabled}
parserWarns = append(parserWarns, warn)
}
}
// Only pre-build point plans for multi-statement query
pointPlans, err = cc.prefetchPointPlanKeys(ctx, stmts)
if err != nil {
return err
}
}
if len(pointPlans) > 0 {
defer cc.ctx.ClearValue(plannercore.PointPlanKey)
}
var retryable bool
for i, stmt := range stmts {
if len(pointPlans) > 0 {
// Save the point plan in Session, so we don't need to build the point plan again.
cc.ctx.SetValue(plannercore.PointPlanKey, plannercore.PointPlanVal{Plan: pointPlans[i]})
}
retryable, err = cc.handleStmt(ctx, stmt, parserWarns, i == len(stmts)-1)
if err != nil {
if !retryable || !errors.ErrorEqual(err, storeerr.ErrTiFlashServerTimeout) {
break
}
_, allowTiFlashFallback := cc.ctx.GetSessionVars().AllowFallbackToTiKV[kv.TiFlash]
if !allowTiFlashFallback {
break
}
// When the TiFlash server seems down, we append a warning to remind the user to check the status of the TiFlash
// server and fallback to TiKV.
warns := append(parserWarns, stmtctx.SQLWarn{Level: stmtctx.WarnLevelError, Err: err})
delete(cc.ctx.GetSessionVars().IsolationReadEngines, kv.TiFlash)
_, err = cc.handleStmt(ctx, stmt, warns, i == len(stmts)-1)
cc.ctx.GetSessionVars().IsolationReadEngines[kv.TiFlash] = struct{}{}
if err != nil {
break
}
}
}
return err
}
// prefetchPointPlanKeys extracts the point keys in multi-statement query,
// use BatchGet to get the keys, so the values will be cached in the snapshot cache, save RPC call cost.
// For pessimistic transaction, the keys will be batch locked.
func (cc *clientConn) prefetchPointPlanKeys(ctx context.Context, stmts []ast.StmtNode) ([]plannercore.Plan, error) {
txn, err := cc.ctx.Txn(false)
if err != nil {
return nil, err
}
if !txn.Valid() {
// Only prefetch in-transaction query for simplicity.
// Later we can support out-transaction multi-statement query.
return nil, nil
}
vars := cc.ctx.GetSessionVars()
if vars.TxnCtx.IsPessimistic {
if vars.IsIsolation(ast.ReadCommitted) {
// TODO: to support READ-COMMITTED, we need to avoid getting new TS for each statement in the query.
return nil, nil
}
if vars.TxnCtx.GetForUpdateTS() != vars.TxnCtx.StartTS {
// Do not handle the case that ForUpdateTS is changed for simplicity.
return nil, nil
}
}
pointPlans := make([]plannercore.Plan, len(stmts))
var idxKeys []kv.Key
var rowKeys []kv.Key
sc := vars.StmtCtx
for i, stmt := range stmts {
switch stmt.(type) {
case *ast.UseStmt:
// If there is a "use db" statement, we shouldn't cache even if it's possible.
// Consider the scenario where there are statements that could execute on multiple
// schemas, but the schema is actually different.
return nil, nil
}
// TODO: the preprocess is run twice, we should find some way to avoid do it again.
// TODO: handle the PreprocessorReturn.
if err = plannercore.Preprocess(cc.ctx, stmt); err != nil {
return nil, err
}
p := plannercore.TryFastPlan(cc.ctx.Session, stmt)
pointPlans[i] = p
if p == nil {
continue
}
// Only support Update for now.
// TODO: support other point plans.
switch x := p.(type) {
case *plannercore.Update:
updateStmt := stmt.(*ast.UpdateStmt)
if pp, ok := x.SelectPlan.(*plannercore.PointGetPlan); ok {
if pp.PartitionInfo != nil {
continue
}
if pp.IndexInfo != nil {
executor.ResetUpdateStmtCtx(sc, updateStmt, vars)
idxKey, err1 := executor.EncodeUniqueIndexKey(cc.ctx, pp.TblInfo, pp.IndexInfo, pp.IndexValues, pp.TblInfo.ID)
if err1 != nil {
return nil, err1
}
idxKeys = append(idxKeys, idxKey)
} else {
rowKeys = append(rowKeys, tablecodec.EncodeRowKeyWithHandle(pp.TblInfo.ID, pp.Handle))
}
}
}
}
if len(idxKeys) == 0 && len(rowKeys) == 0 {
return pointPlans, nil
}
snapshot := txn.GetSnapshot()
idxVals, err1 := snapshot.BatchGet(ctx, idxKeys)
if err1 != nil {
return nil, err1
}
for idxKey, idxVal := range idxVals {
h, err2 := tablecodec.DecodeHandleInUniqueIndexValue(idxVal, false)
if err2 != nil {
return nil, err2
}
tblID := tablecodec.DecodeTableID(hack.Slice(idxKey))
rowKeys = append(rowKeys, tablecodec.EncodeRowKeyWithHandle(tblID, h))
}
if vars.TxnCtx.IsPessimistic {
allKeys := append(rowKeys, idxKeys...)
err = executor.LockKeys(ctx, cc.ctx, vars.LockWaitTimeout, allKeys...)
if err != nil {
// suppress the lock error, we are not going to handle it here for simplicity.
err = nil
logutil.BgLogger().Warn("lock keys error on prefetch", zap.Error(err))
}
} else {
_, err = snapshot.BatchGet(ctx, rowKeys)
if err != nil {
return nil, err
}
}
return pointPlans, nil
}
// The first return value indicates whether the call of handleStmt has no side effect and can be retried.
// Currently, the first return value is used to fall back to TiKV when TiFlash is down.
func (cc *clientConn) handleStmt(ctx context.Context, stmt ast.StmtNode, warns []stmtctx.SQLWarn, lastStmt bool) (bool, error) {
ctx = context.WithValue(ctx, execdetails.StmtExecDetailKey, &execdetails.StmtExecDetails{})
ctx = context.WithValue(ctx, util.ExecDetailsKey, &util.ExecDetails{})
reg := trace.StartRegion(ctx, "ExecuteStmt")
cc.audit(plugin.Starting)
rs, err := cc.ctx.ExecuteStmt(ctx, stmt)
reg.End()
// The session tracker detachment from global tracker is solved in the `rs.Close` in most cases.
// If the rs is nil, the detachment will be done in the `handleNoDelay`.
if rs != nil {
defer terror.Call(rs.Close)
}
if err != nil {
return true, err
}
status := cc.ctx.Status()
if lastStmt {
cc.ctx.GetSessionVars().StmtCtx.AppendWarnings(warns)
} else {
status |= mysql.ServerMoreResultsExists
}
if rs != nil {
if connStatus := atomic.LoadInt32(&cc.status); connStatus == connStatusShutdown {
return false, executor.ErrQueryInterrupted
}
if retryable, err := cc.writeResultset(ctx, rs, false, status, 0); err != nil {
return retryable, err
}
return false, nil
}
handled, err := cc.handleQuerySpecial(ctx, status)
if handled {
if execStmt := cc.ctx.Value(session.ExecStmtVarKey); execStmt != nil {
execStmt.(*executor.ExecStmt).FinishExecuteStmt(0, err, false)
}
}
if err != nil {
return false, err
}
return false, nil
}
func (cc *clientConn) handleQuerySpecial(ctx context.Context, status uint16) (bool, error) {
handled := false
loadDataInfo := cc.ctx.Value(executor.LoadDataVarKey)
if loadDataInfo != nil {
handled = true
defer cc.ctx.SetValue(executor.LoadDataVarKey, nil)
if err := cc.handleLoadData(ctx, loadDataInfo.(*executor.LoadDataInfo)); err != nil {
return handled, err
}
}
loadStats := cc.ctx.Value(executor.LoadStatsVarKey)
if loadStats != nil {
handled = true
defer cc.ctx.SetValue(executor.LoadStatsVarKey, nil)
if err := cc.handleLoadStats(ctx, loadStats.(*executor.LoadStatsInfo)); err != nil {
return handled, err
}
}
indexAdvise := cc.ctx.Value(executor.IndexAdviseVarKey)
if indexAdvise != nil {
handled = true
defer cc.ctx.SetValue(executor.IndexAdviseVarKey, nil)
if err := cc.handleIndexAdvise(ctx, indexAdvise.(*executor.IndexAdviseInfo)); err != nil {
return handled, err
}
}
planReplayerLoad := cc.ctx.Value(executor.PlanReplayerLoadVarKey)
if planReplayerLoad != nil {
handled = true
defer cc.ctx.SetValue(executor.PlanReplayerLoadVarKey, nil)
if err := cc.handlePlanReplayerLoad(ctx, planReplayerLoad.(*executor.PlanReplayerLoadInfo)); err != nil {
return handled, err
}
}
return handled, cc.writeOkWith(ctx, cc.ctx.LastMessage(), cc.ctx.AffectedRows(), cc.ctx.LastInsertID(), status, cc.ctx.WarningCount())
}
// handleFieldList returns the field list for a table.
// The sql string is composed of a table name and a terminating character \x00.
func (cc *clientConn) handleFieldList(ctx context.Context, sql string) (err error) {
parts := strings.Split(sql, "\x00")
columns, err := cc.ctx.FieldList(parts[0])
if err != nil {
return err
}
data := cc.alloc.AllocWithLen(4, 1024)
cc.initResultEncoder(ctx)
defer cc.rsEncoder.clean()
for _, column := range columns {
// Current we doesn't output defaultValue but reserve defaultValue length byte to make mariadb client happy.
// https://dev.mysql.com/doc/internals/en/com-query-response.html#column-definition
// TODO: fill the right DefaultValues.
column.DefaultValueLength = 0
column.DefaultValue = []byte{}
data = data[0:4]
data = column.Dump(data, cc.rsEncoder)
if err := cc.writePacket(data); err != nil {
return err
}
}
if err := cc.writeEOF(0); err != nil {
return err
}
return cc.flush(ctx)
}
// writeResultset writes data into a resultset and uses rs.Next to get row data back.
// If binary is true, the data would be encoded in BINARY format.
// serverStatus, a flag bit represents server information.
// fetchSize, the desired number of rows to be fetched each time when client uses cursor.
// retryable indicates whether the call of writeResultset has no side effect and can be retried to correct error. The call
// has side effect in cursor mode or once data has been sent to client. Currently retryable is used to fallback to TiKV when
// TiFlash is down.
func (cc *clientConn) writeResultset(ctx context.Context, rs ResultSet, binary bool, serverStatus uint16, fetchSize int) (retryable bool, runErr error) {
defer func() {
// close ResultSet when cursor doesn't exist
r := recover()
if r == nil {
return
}
if str, ok := r.(string); !ok || !strings.HasPrefix(str, memory.PanicMemoryExceed) {
panic(r)
}
// TODO(jianzhang.zj: add metrics here)
runErr = errors.Errorf("%v", r)
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
logutil.Logger(ctx).Error("write query result panic", zap.Stringer("lastSQL", getLastStmtInConn{cc}), zap.String("stack", string(buf)))
}()
cc.initResultEncoder(ctx)
defer cc.rsEncoder.clean()
if mysql.HasCursorExistsFlag(serverStatus) {
if err := cc.writeChunksWithFetchSize(ctx, rs, serverStatus, fetchSize); err != nil {
return false, err
}
return false, cc.flush(ctx)
}
if retryable, err := cc.writeChunks(ctx, rs, binary, serverStatus); err != nil {
return retryable, err
}
return false, cc.flush(ctx)
}
func (cc *clientConn) writeColumnInfo(columns []*ColumnInfo, serverStatus uint16) error {
data := cc.alloc.AllocWithLen(4, 1024)
data = dumpLengthEncodedInt(data, uint64(len(columns)))
if err := cc.writePacket(data); err != nil {
return err
}
for _, v := range columns {
data = data[0:4]
data = v.Dump(data, cc.rsEncoder)
if err := cc.writePacket(data); err != nil {
return err
}
}
return cc.writeEOF(serverStatus)
}
// writeChunks writes data from a Chunk, which filled data by a ResultSet, into a connection.
// binary specifies the way to dump data. It throws any error while dumping data.
// serverStatus, a flag bit represents server information
// The first return value indicates whether error occurs at the first call of ResultSet.Next.
func (cc *clientConn) writeChunks(ctx context.Context, rs ResultSet, binary bool, serverStatus uint16) (bool, error) {
data := cc.alloc.AllocWithLen(4, 1024)
req := rs.NewChunk(cc.chunkAlloc)
gotColumnInfo := false
firstNext := true
var stmtDetail *execdetails.StmtExecDetails
stmtDetailRaw := ctx.Value(execdetails.StmtExecDetailKey)
if stmtDetailRaw != nil {
stmtDetail = stmtDetailRaw.(*execdetails.StmtExecDetails)
}
for {
failpoint.Inject("fetchNextErr", func(value failpoint.Value) {
switch value.(string) {
case "firstNext":
failpoint.Return(firstNext, storeerr.ErrTiFlashServerTimeout)
case "secondNext":
if !firstNext {
failpoint.Return(firstNext, storeerr.ErrTiFlashServerTimeout)
}
}
})
// Here server.tidbResultSet implements Next method.
err := rs.Next(ctx, req)
if err != nil {
return firstNext, err
}
firstNext = false
if !gotColumnInfo {
// We need to call Next before we get columns.
// Otherwise, we will get incorrect columns info.
columns := rs.Columns()
if err = cc.writeColumnInfo(columns, serverStatus); err != nil {
return false, err
}
gotColumnInfo = true
}
rowCount := req.NumRows()
if rowCount == 0 {
break
}
reg := trace.StartRegion(ctx, "WriteClientConn")
start := time.Now()
for i := 0; i < rowCount; i++ {
data = data[0:4]
if binary {
data, err = dumpBinaryRow(data, rs.Columns(), req.GetRow(i), cc.rsEncoder)
} else {
data, err = dumpTextRow(data, rs.Columns(), req.GetRow(i), cc.rsEncoder)
}
if err != nil {
reg.End()
return false, err
}
if err = cc.writePacket(data); err != nil {
reg.End()
return false, err
}
}
reg.End()
if stmtDetail != nil {
stmtDetail.WriteSQLRespDuration += time.Since(start)
}
}
return false, cc.writeEOF(serverStatus)
}
// writeChunksWithFetchSize writes data from a Chunk, which filled data by a ResultSet, into a connection.
// binary specifies the way to dump data. It throws any error while dumping data.
// serverStatus, a flag bit represents server information.
// fetchSize, the desired number of rows to be fetched each time when client uses cursor.
func (cc *clientConn) writeChunksWithFetchSize(ctx context.Context, rs ResultSet, serverStatus uint16, fetchSize int) error {
fetchedRows := rs.GetFetchedRows()
for len(fetchedRows) < fetchSize {
// if fetchedRows is not enough, getting data from recordSet.
req := rs.NewChunk(cc.chunkAlloc)
// Here server.tidbResultSet implements Next method.
if err := rs.Next(ctx, req); err != nil {
return err
}
rowCount := req.NumRows()
if rowCount == 0 {
break
}
// filling fetchedRows with chunk
for i := 0; i < rowCount; i++ {
fetchedRows = append(fetchedRows, req.GetRow(i))
}
req = chunk.Renew(req, cc.ctx.GetSessionVars().MaxChunkSize)
}
// tell the client COM_STMT_FETCH has finished by setting proper serverStatus,
// and close ResultSet.
if len(fetchedRows) == 0 {
serverStatus &^= mysql.ServerStatusCursorExists
serverStatus |= mysql.ServerStatusLastRowSend
terror.Call(rs.Close)
return cc.writeEOF(serverStatus)
}
// construct the rows sent to the client according to fetchSize.
var curRows []chunk.Row
if fetchSize < len(fetchedRows) {
curRows = fetchedRows[:fetchSize]
fetchedRows = fetchedRows[fetchSize:]
} else {
curRows = fetchedRows
fetchedRows = fetchedRows[:0]
}
rs.StoreFetchedRows(fetchedRows)
data := cc.alloc.AllocWithLen(4, 1024)
var stmtDetail *execdetails.StmtExecDetails
stmtDetailRaw := ctx.Value(execdetails.StmtExecDetailKey)
if stmtDetailRaw != nil {
stmtDetail = stmtDetailRaw.(*execdetails.StmtExecDetails)
}
start := time.Now()
var err error
for _, row := range curRows {
data = data[0:4]
data, err = dumpBinaryRow(data, rs.Columns(), row, cc.rsEncoder)
if err != nil {
return err
}
if err = cc.writePacket(data); err != nil {
return err
}
}
if stmtDetail != nil {
stmtDetail.WriteSQLRespDuration += time.Since(start)
}
if cl, ok := rs.(fetchNotifier); ok {
cl.OnFetchReturned()
}
return cc.writeEOF(serverStatus)
}
func (cc *clientConn) setConn(conn net.Conn) {
cc.bufReadConn = newBufferedReadConn(conn)
if cc.pkt == nil {
cc.pkt = newPacketIO(cc.bufReadConn)
} else {
// Preserve current sequence number.
cc.pkt.setBufferedReadConn(cc.bufReadConn)
}
}
func (cc *clientConn) upgradeToTLS(tlsConfig *tls.Config) error {
// Important: read from buffered reader instead of the original net.Conn because it may contain data we need.
tlsConn := tls.Server(cc.bufReadConn, tlsConfig)
if err := tlsConn.Handshake(); err != nil {
return err
}
cc.setConn(tlsConn)
cc.tlsConn = tlsConn
return nil
}
func (cc *clientConn) handleChangeUser(ctx context.Context, data []byte) error {
user, data := parseNullTermString(data)
cc.user = string(hack.String(user))
if len(data) < 1 {
return mysql.ErrMalformPacket
}
passLen := int(data[0])
data = data[1:]
if passLen > len(data) {
return mysql.ErrMalformPacket
}
pass := data[:passLen]
data = data[passLen:]
dbName, _ := parseNullTermString(data)
cc.dbname = string(hack.String(dbName))
if err := cc.ctx.Close(); err != nil {
logutil.Logger(ctx).Debug("close old context failed", zap.Error(err))
}
if err := cc.openSessionAndDoAuth(pass, ""); err != nil {
return err
}
return cc.handleCommonConnectionReset(ctx)
}
func (cc *clientConn) handleResetConnection(ctx context.Context) error {
user := cc.ctx.GetSessionVars().User
err := cc.ctx.Close()
if err != nil {
logutil.Logger(ctx).Debug("close old context failed", zap.Error(err))
}
var tlsStatePtr *tls.ConnectionState
if cc.tlsConn != nil {
tlsState := cc.tlsConn.ConnectionState()
tlsStatePtr = &tlsState
}
cc.ctx, err = cc.server.driver.OpenCtx(cc.connectionID, cc.capability, cc.collation, cc.dbname, tlsStatePtr)
if err != nil {
return err
}
if !cc.ctx.AuthWithoutVerification(user) {
return errors.New("Could not reset connection")
}
if cc.dbname != "" { // Restore the current DB
err = cc.useDB(context.Background(), cc.dbname)
if err != nil {
return err
}
}
cc.ctx.SetSessionManager(cc.server)
return cc.handleCommonConnectionReset(ctx)
}
func (cc *clientConn) handleCommonConnectionReset(ctx context.Context) error {
if plugin.IsEnable(plugin.Audit) {
cc.ctx.GetSessionVars().ConnectionInfo = cc.connectInfo()
}
err := plugin.ForeachPlugin(plugin.Audit, func(p *plugin.Plugin) error {
authPlugin := plugin.DeclareAuditManifest(p.Manifest)
if authPlugin.OnConnectionEvent != nil {
connInfo := cc.ctx.GetSessionVars().ConnectionInfo
err := authPlugin.OnConnectionEvent(context.Background(), plugin.ChangeUser, connInfo)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
return cc.writeOK(ctx)
}
// safe to noop except 0x01 "FLUSH PRIVILEGES"
func (cc *clientConn) handleRefresh(ctx context.Context, subCommand byte) error {
if subCommand == 0x01 {
if err := cc.handleQuery(ctx, "FLUSH PRIVILEGES"); err != nil {
return err
}
}
return cc.writeOK(ctx)
}
var _ fmt.Stringer = getLastStmtInConn{}
type getLastStmtInConn struct {
*clientConn
}
func (cc getLastStmtInConn) String() string {
if len(cc.lastPacket) == 0 {
return ""
}
cmd, data := cc.lastPacket[0], cc.lastPacket[1:]
switch cmd {
case mysql.ComInitDB:
return "Use " + string(data)
case mysql.ComFieldList:
return "ListFields " + string(data)
case mysql.ComQuery, mysql.ComStmtPrepare:
sql := string(hack.String(data))
if cc.ctx.GetSessionVars().EnableRedactLog {
sql = parser.Normalize(sql)
}
return tidbutil.QueryStrForLog(sql)
case mysql.ComStmtExecute, mysql.ComStmtFetch:
stmtID := binary.LittleEndian.Uint32(data[0:4])
return tidbutil.QueryStrForLog(cc.preparedStmt2String(stmtID))
case mysql.ComStmtClose, mysql.ComStmtReset:
stmtID := binary.LittleEndian.Uint32(data[0:4])
return mysql.Command2Str[cmd] + " " + strconv.Itoa(int(stmtID))
default:
if cmdStr, ok := mysql.Command2Str[cmd]; ok {
return cmdStr
}
return string(hack.String(data))
}
}
// PProfLabel return sql label used to tag pprof.
func (cc getLastStmtInConn) PProfLabel() string {
if len(cc.lastPacket) == 0 {
return ""
}
cmd, data := cc.lastPacket[0], cc.lastPacket[1:]
switch cmd {
case mysql.ComInitDB:
return "UseDB"
case mysql.ComFieldList:
return "ListFields"
case mysql.ComStmtClose:
return "CloseStmt"
case mysql.ComStmtReset:
return "ResetStmt"
case mysql.ComQuery, mysql.ComStmtPrepare:
return parser.Normalize(tidbutil.QueryStrForLog(string(hack.String(data))))
case mysql.ComStmtExecute, mysql.ComStmtFetch:
stmtID := binary.LittleEndian.Uint32(data[0:4])
return tidbutil.QueryStrForLog(cc.preparedStmt2StringNoArgs(stmtID))
default:
return ""
}
}
| {
attrs := make(map[string]string)
pos := 0
for pos < len(data) {
key, _, off, err := parseLengthEncodedBytes(data[pos:])
if err != nil {
return attrs, err
}
pos += off
value, _, off, err := parseLengthEncodedBytes(data[pos:])
if err != nil {
return attrs, err
}
pos += off
attrs[string(key)] = string(value)
}
return attrs, nil
} |
FlashlightOutline.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _propTypes = require('prop-types');
var _propTypes2 = _interopRequireDefault(_propTypes);
var _SvgContainer = require('./SvgContainer');
var _SvgContainer2 = _interopRequireDefault(_SvgContainer);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var FlashlightOutline = function FlashlightOutline(props) {
return _react2.default.createElement(
_SvgContainer2.default,
{
height: props.height,
width: props.width,
color: props.color,
onClick: props.onClick,
rotate: props.rotate ? 1 : 0,
shake: props.shake ? 1 : 0,
beat: props.beat ? 1 : 0,
className: props.className
},
_react2.default.createElement(
'svg',
{ style: props.style, className: props.cssClasses, xmlns: 'http://www.w3.org/2000/svg', viewBox: '0 0 512 512' },
props.title ? _react2.default.createElement(
'title',
null,
props.title
) : '',
_react2.default.createElement('path', { d: 'M456.64 162.86L349.12 55.36c-13.15-13.14-28.68-7.17-41.82 6l-11.95 12c-26.13 26.13-27.62 58.38-29.42 83.31-.89 12.24-9.78 27.55-18.51 36.28L58.58 381.67c-16.35 16.33-12.69 39.42 3.73 55.84l12.17 12.17c16.36 16.35 39.43 20.16 55.86 3.74l188.83-188.8c8.74-8.74 24-17.55 36.29-18.52 24.87-1.86 58.62-4.85 83.26-29.49l11.94-11.94c13.15-13.14 19.12-28.67 5.98-41.81z', fill: 'none', stroke: 'currentColor', strokeMiterlimit: '10', strokeWidth: '32' }),
_react2.default.createElement('circle', { cx: '224.68', cy: '287.28', r: '20' }),
_react2.default.createElement('path', { fill: 'none', stroke: 'currentColor', strokeLinecap: 'round', strokeMiterlimit: '10', strokeWidth: '32', d: 'M289 81l142 142' })
)
);
};
FlashlightOutline.defaultProps = { | // style
style: {},
color: '#000000',
height: '22px',
width: '22px',
cssClasses: '',
title: '',
// animation
shake: false,
beat: false,
rotate: false
};
FlashlightOutline.propTypes = {
// style
style: _propTypes2.default.object,
color: _propTypes2.default.string,
height: _propTypes2.default.string,
width: _propTypes2.default.string,
cssClasses: _propTypes2.default.string,
title: _propTypes2.default.string,
// animation
shake: _propTypes2.default.bool,
beat: _propTypes2.default.bool,
rotate: _propTypes2.default.bool,
// functions
onClick: _propTypes2.default.func
};
exports.default = FlashlightOutline;
module.exports = exports['default'];
//# sourceMappingURL=FlashlightOutline.js.map | |
server.go | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package cdc
import (
"context"
"fmt"
"net/http"
"strings"
"sync"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/ticdc/cdc/capture"
"github.com/pingcap/ticdc/cdc/kv"
"github.com/pingcap/ticdc/cdc/puller/sorter"
"github.com/pingcap/ticdc/pkg/config"
cerror "github.com/pingcap/ticdc/pkg/errors"
"github.com/pingcap/ticdc/pkg/httputil"
"github.com/pingcap/ticdc/pkg/util"
"github.com/pingcap/ticdc/pkg/version"
tidbkv "github.com/pingcap/tidb/kv"
"github.com/prometheus/client_golang/prometheus"
pd "github.com/tikv/pd/client"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/mvcc"
"go.etcd.io/etcd/pkg/logutil"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/sync/errgroup"
"golang.org/x/time/rate"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
)
const (
ownerRunInterval = time.Millisecond * 500
)
// Server is the capture server
type Server struct {
captureV2 *capture.Capture
capture *Capture
owner *Owner
ownerLock sync.RWMutex
statusServer *http.Server
pdClient pd.Client
etcdClient *kv.CDCEtcdClient
kvStorage tidbkv.Storage
pdEndpoints []string
}
// NewServer creates a Server instance.
func | (pdEndpoints []string) (*Server, error) {
conf := config.GetGlobalServerConfig()
log.Info("creating CDC server",
zap.Strings("pd-addrs", pdEndpoints),
zap.Stringer("config", conf),
)
s := &Server{
pdEndpoints: pdEndpoints,
}
return s, nil
}
// Run runs the server.
func (s *Server) Run(ctx context.Context) error {
conf := config.GetGlobalServerConfig()
grpcTLSOption, err := conf.Security.ToGRPCDialOption()
if err != nil {
return errors.Trace(err)
}
pdClient, err := pd.NewClientWithContext(
ctx, s.pdEndpoints, conf.Security.PDSecurityOption(),
pd.WithGRPCDialOptions(
grpcTLSOption,
grpc.WithBlock(),
grpc.WithConnectParams(grpc.ConnectParams{
Backoff: backoff.Config{
BaseDelay: time.Second,
Multiplier: 1.1,
Jitter: 0.1,
MaxDelay: 3 * time.Second,
},
MinConnectTimeout: 3 * time.Second,
}),
))
if err != nil {
return cerror.WrapError(cerror.ErrServerNewPDClient, err)
}
s.pdClient = pdClient
if config.NewReplicaImpl {
tlsConfig, err := conf.Security.ToTLSConfig()
if err != nil {
return errors.Trace(err)
}
logConfig := logutil.DefaultZapLoggerConfig
logConfig.Level = zap.NewAtomicLevelAt(zapcore.ErrorLevel)
etcdCli, err := clientv3.New(clientv3.Config{
Endpoints: s.pdEndpoints,
TLS: tlsConfig,
Context: ctx,
LogConfig: &logConfig,
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{
grpcTLSOption,
grpc.WithBlock(),
grpc.WithConnectParams(grpc.ConnectParams{
Backoff: backoff.Config{
BaseDelay: time.Second,
Multiplier: 1.1,
Jitter: 0.1,
MaxDelay: 3 * time.Second,
},
MinConnectTimeout: 3 * time.Second,
}),
},
})
if err != nil {
return errors.Annotate(cerror.WrapError(cerror.ErrNewCaptureFailed, err), "new etcd client")
}
etcdClient := kv.NewCDCEtcdClient(ctx, etcdCli)
s.etcdClient = &etcdClient
}
// To not block CDC server startup, we need to warn instead of error
// when TiKV is incompatible.
errorTiKVIncompatible := false
err = version.CheckClusterVersion(ctx, s.pdClient, s.pdEndpoints[0], conf.Security, errorTiKVIncompatible)
if err != nil {
return err
}
err = s.startStatusHTTP()
if err != nil {
return err
}
kv.InitWorkerPool()
kvStore, err := kv.CreateTiStore(strings.Join(s.pdEndpoints, ","), conf.Security)
if err != nil {
return errors.Trace(err)
}
defer func() {
err := kvStore.Close()
if err != nil {
log.Warn("kv store close failed", zap.Error(err))
}
}()
s.kvStorage = kvStore
ctx = util.PutKVStorageInCtx(ctx, kvStore)
if config.NewReplicaImpl {
s.captureV2 = capture.NewCapture(s.pdClient, s.kvStorage, s.etcdClient)
return s.run(ctx)
}
// When a capture suicided, restart it
for {
if err := s.run(ctx); cerror.ErrCaptureSuicide.NotEqual(err) {
return err
}
log.Info("server recovered", zap.String("capture-id", s.capture.info.ID))
}
}
func (s *Server) setOwner(owner *Owner) {
s.ownerLock.Lock()
defer s.ownerLock.Unlock()
s.owner = owner
}
func (s *Server) campaignOwnerLoop(ctx context.Context) error {
// In most failure cases, we don't return error directly, just run another
// campaign loop. We treat campaign loop as a special background routine.
conf := config.GetGlobalServerConfig()
rl := rate.NewLimiter(0.05, 2)
for {
err := rl.Wait(ctx)
if err != nil {
if errors.Cause(err) == context.Canceled {
return nil
}
return errors.Trace(err)
}
// Campaign to be an owner, it blocks until it becomes the owner
if err := s.capture.Campaign(ctx); err != nil {
switch errors.Cause(err) {
case context.Canceled:
return nil
case mvcc.ErrCompacted:
continue
}
log.Warn("campaign owner failed", zap.Error(err))
continue
}
captureID := s.capture.info.ID
log.Info("campaign owner successfully", zap.String("capture-id", captureID))
owner, err := NewOwner(ctx, s.pdClient, conf.Security, s.capture.session, conf.GcTTL, time.Duration(conf.OwnerFlushInterval))
if err != nil {
log.Warn("create new owner failed", zap.Error(err))
continue
}
s.setOwner(owner)
if err := owner.Run(ctx, ownerRunInterval); err != nil {
if errors.Cause(err) == context.Canceled {
log.Info("owner exited", zap.String("capture-id", captureID))
select {
case <-ctx.Done():
// only exits the campaignOwnerLoop if parent context is done
return ctx.Err()
default:
}
log.Info("owner exited", zap.String("capture-id", captureID))
}
err2 := s.capture.Resign(ctx)
if err2 != nil {
// if resign owner failed, return error to let capture exits
return errors.Annotatef(err2, "resign owner failed, capture: %s", captureID)
}
log.Warn("run owner failed", zap.Error(err))
}
// owner is resigned by API, reset owner and continue the campaign loop
s.setOwner(nil)
}
}
func (s *Server) etcdHealthChecker(ctx context.Context) error {
ticker := time.NewTicker(time.Second * 3)
defer ticker.Stop()
conf := config.GetGlobalServerConfig()
httpCli, err := httputil.NewClient(conf.Security)
if err != nil {
return err
}
defer httpCli.CloseIdleConnections()
metrics := make(map[string]prometheus.Observer)
for _, pdEndpoint := range s.pdEndpoints {
metrics[pdEndpoint] = etcdHealthCheckDuration.WithLabelValues(conf.AdvertiseAddr, pdEndpoint)
}
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
for _, pdEndpoint := range s.pdEndpoints {
start := time.Now()
ctx, cancel := context.WithTimeout(ctx, time.Duration(time.Second*10))
req, err := http.NewRequestWithContext(
ctx, http.MethodGet, fmt.Sprintf("%s/health", pdEndpoint), nil)
if err != nil {
log.Warn("etcd health check failed", zap.Error(err))
cancel()
continue
}
_, err = httpCli.Do(req)
if err != nil {
log.Warn("etcd health check error", zap.Error(err))
} else {
metrics[pdEndpoint].Observe(float64(time.Since(start)) / float64(time.Second))
}
cancel()
}
}
}
}
func (s *Server) run(ctx context.Context) (err error) {
if !config.NewReplicaImpl {
kvStorage, err := util.KVStorageFromCtx(ctx)
if err != nil {
return errors.Trace(err)
}
capture, err := NewCapture(ctx, s.pdEndpoints, s.pdClient, kvStorage)
if err != nil {
return err
}
s.capture = capture
s.etcdClient = &capture.etcdClient
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
wg, cctx := errgroup.WithContext(ctx)
if config.NewReplicaImpl {
wg.Go(func() error {
return s.captureV2.Run(cctx)
})
} else {
wg.Go(func() error {
return s.campaignOwnerLoop(cctx)
})
wg.Go(func() error {
return s.capture.Run(cctx)
})
}
wg.Go(func() error {
return s.etcdHealthChecker(cctx)
})
wg.Go(func() error {
return sorter.RunWorkerPool(cctx)
})
wg.Go(func() error {
return kv.RunWorkerPool(cctx)
})
return wg.Wait()
}
// Close closes the server.
func (s *Server) Close() {
if s.capture != nil {
if !config.NewReplicaImpl {
s.capture.Cleanup()
}
closeCtx, closeCancel := context.WithTimeout(context.Background(), time.Second*2)
err := s.capture.Close(closeCtx)
if err != nil {
log.Error("close capture", zap.Error(err))
}
closeCancel()
}
if s.captureV2 != nil {
s.captureV2.AsyncClose()
}
if s.statusServer != nil {
err := s.statusServer.Close()
if err != nil {
log.Error("close status server", zap.Error(err))
}
s.statusServer = nil
}
}
| NewServer |
structs3.rs | // structs3.rs
// Structs contain data, but can also have logic. In this exercise we have
// defined the Package struct and we want to test some logic attached to it.
// Make the code compile and the tests pass!
// If you have issues execute `rustlings hint structs3`
#[derive(Debug)]
struct Package {
sender_country: String,
recipient_country: String,
weight_in_grams: i32,
}
impl Package {
fn new(sender_country: String, recipient_country: String, weight_in_grams: i32) -> Package {
if weight_in_grams <= 0 {
// panic statement goes here...
panic!("Value must be bigger than 0, got {}.", weight_in_grams);
} else {
Package {
sender_country,
recipient_country,
weight_in_grams,
}
}
}
fn is_international(&self) -> bool {
// Something goes here...
if self.recipient_country!="Canada"
{
return true;
}
return false;
}
fn get_fees(&self, cents_per_gram: i32) -> i32 {
// Something goes here...
cents_per_gram*self.weight_in_grams
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn | () {
let sender_country = String::from("Spain");
let recipient_country = String::from("Austria");
Package::new(sender_country, recipient_country, -2210);
}
#[test]
fn create_international_package() {
let sender_country = String::from("Spain");
let recipient_country = String::from("Russia");
let package = Package::new(sender_country, recipient_country, 1200);
assert!(package.is_international());
}
#[test]
fn create_local_package() {
let sender_country = String::from("Canada");
let recipient_country = sender_country.clone();
let package = Package::new(sender_country, recipient_country, 1200);
assert!(!package.is_international());
}
#[test]
fn calculate_transport_fees() {
let sender_country = String::from("Spain");
let recipient_country = String::from("Spain");
let cents_per_gram = 3;
let package = Package::new(sender_country, recipient_country, 1500);
assert_eq!(package.get_fees(cents_per_gram), 4500);
}
}
| fail_creating_weightless_package |
linux.go | package linux
import (
"os"
"path"
"fmt"
log "github.com/sirupsen/logrus"
gofig "github.com/akutz/gofig/types"
"github.com/akutz/goof"
"github.com/AVENTER-UG/rexray/libstorage/api/context"
"github.com/AVENTER-UG/rexray/libstorage/api/registry"
"github.com/AVENTER-UG/rexray/libstorage/api/types"
"github.com/AVENTER-UG/rexray/libstorage/api/utils"
apiconfig "github.com/AVENTER-UG/rexray/libstorage/api/utils/config"
)
const (
providerName = "linux"
defaultVolumeSize int64 = 16
)
var ctxExactMountKey = interface{}("exactmount")
type driver struct {
config gofig.Config
}
type volumeMapping struct {
Name string `json:"Name"`
VolumeMountPoint string `json:"Mountpoint"`
VolumeStatus map[string]interface{} `json:"Status"`
}
func (v *volumeMapping) VolumeName() string {
return v.Name
}
func (v *volumeMapping) MountPoint() string {
return v.VolumeMountPoint
}
func (v *volumeMapping) Status() map[string]interface{} {
return v.VolumeStatus
}
func init() {
registry.RegisterIntegrationDriver(providerName, newDriver)
}
func newDriver() types.IntegrationDriver {
return &driver{}
}
func (d *driver) Init(ctx types.Context, config gofig.Config) error {
d.config = config
ctx.WithFields(log.Fields{
types.ConfigIgVolOpsMountRootPath: d.volumeRootPath(),
types.ConfigIgVolOpsCreateDefaultType: d.volumeType(),
types.ConfigIgVolOpsCreateDefaultIOPS: d.iops(),
types.ConfigIgVolOpsCreateDefaultSize: d.size(),
types.ConfigIgVolOpsCreateDefaultAZ: d.availabilityZone(),
types.ConfigIgVolOpsCreateDefaultFsType: d.fsType(),
types.ConfigIgVolOpsMountPath: d.mountDirPath(),
types.ConfigIgVolOpsCreateImplicit: d.volumeCreateImplicit(),
}).Info("linux integration driver successfully initialized")
return nil
}
func (d *driver) Name() string {
return providerName
}
func buildVolumeStatus(v *types.Volume, service string) map[string]interface{} {
vs := make(map[string]interface{})
vs["name"] = v.Name
vs["size"] = v.Size
vs["iops"] = v.IOPS
vs["type"] = v.Type
vs["availabilityZone"] = v.AvailabilityZone
vs["fields"] = v.Fields
vs["service"] = service
vs["server"] = service
return vs
}
// List returns all available volume mappings.
func (d *driver) List(
ctx types.Context,
opts types.Store) ([]types.VolumeMapping, error) {
client := context.MustClient(ctx)
vols, err := client.Storage().Volumes(
ctx,
&types.VolumesOpts{
Attachments: opts.GetAttachments(),
Opts: opts,
},
)
if err != nil {
return nil, err
}
serviceName, serviceNameOK := context.ServiceName(ctx)
if !serviceNameOK {
return nil, goof.New("service name is missing")
}
volMaps := []types.VolumeMapping{}
for _, v := range vols {
vs := buildVolumeStatus(v, serviceName)
volMaps = append(volMaps, &volumeMapping{
Name: v.Name,
VolumeMountPoint: v.MountPoint(),
VolumeStatus: vs,
})
}
return volMaps, nil
}
// Inspect returns a specific volume as identified by the provided
// volume name.
func (d *driver) Inspect(
ctx types.Context,
volumeName string,
opts types.Store) (types.VolumeMapping, error) {
fields := log.Fields{
"volumeName": volumeName,
"opts": opts}
ctx.WithFields(fields).Info("inspecting volume")
serviceName, serviceNameOK := context.ServiceName(ctx)
if !serviceNameOK {
return nil, goof.New("service name is missing")
}
vol, err := d.volumeInspectByName(
ctx,
volumeName,
opts.GetAttachments(),
opts,
)
if err != nil {
return nil, err
}
if vol == nil {
return nil, utils.NewNotFoundError(volumeName)
}
vs := buildVolumeStatus(vol, serviceName)
obj := &volumeMapping{
Name: vol.Name,
VolumeMountPoint: vol.MountPoint(),
VolumeStatus: vs,
}
fields = log.Fields{
"volumeName": volumeName,
"volume": obj}
ctx.WithFields(fields).Info("volume inspected")
return obj, nil
}
// Mount will return a mount point path when specifying either a volumeName
// or volumeID. If a overwriteFs boolean is specified it will overwrite
// the FS based on newFsType if it is detected that there is no FS present.
func (d *driver) Mount(
ctx types.Context,
volumeID, volumeName string,
opts *types.VolumeMountOpts) (string, *types.Volume, error) {
ctx.WithFields(log.Fields{
"volumeName": volumeName,
"volumeID": volumeID,
"opts": opts}).Info("mounting volume")
lsAtt := types.VolAttReqWithDevMapOnlyVolsAttachedToInstanceOrUnattachedVols
if opts.Preempt {
lsAtt = types.VolAttReqWithDevMapForInstance
}
vol, err := d.volumeInspectByIDOrName(
ctx, volumeID, volumeName, lsAtt, opts.Opts)
if isErrNotFound(err) && d.volumeCreateImplicit() {
if vol, err = d.Create(ctx, volumeName, &types.VolumeCreateOpts{
Opts: utils.NewStore(),
}); err != nil {
return "", nil, goof.WithError(
"problem creating volume implicitly", err)
}
} else if err != nil {
return "", nil, err
}
if vol == nil {
return "", nil, goof.New("no volume returned or created")
}
client := context.MustClient(ctx)
if vol.AttachmentState == types.VolumeAvailable ||
(opts.Preempt && vol.AttachmentState != types.VolumeAttached) {
mp, err := d.getVolumeMountPath(vol.Name)
if err != nil {
return "", nil, err
}
ctx.Debug("performing precautionary unmount")
_ = client.OS().Unmount(ctx, mp, opts.Opts)
var token string
vol, token, err = client.Storage().VolumeAttach(
ctx, vol.ID, &types.VolumeAttachOpts{
Force: opts.Preempt,
Opts: utils.NewStore(),
})
if err != nil {
return "", nil, err
}
if token != "" {
opts := &types.WaitForDeviceOpts{
LocalDevicesOpts: types.LocalDevicesOpts{
ScanType: apiconfig.DeviceScanType(d.config),
Opts: opts.Opts,
},
Token: token,
Timeout: apiconfig.DeviceAttachTimeout(d.config),
}
_, _, err = client.Executor().WaitForDevice(ctx, opts)
if err != nil {
return "", nil, goof.WithError(
"problem with device discovery", err)
}
}
vol, err = d.volumeInspectByIDOrName(
ctx, vol.ID, "", types.VolAttReqTrue, opts.Opts)
if err != nil {
return "", nil, err
}
}
if len(vol.Attachments) == 0 {
return "", nil, goof.New("volume did not attach")
}
inst, err := client.Storage().InstanceInspect(ctx, utils.NewStore())
if err != nil {
return "", nil, goof.New("problem getting instance ID")
}
var ma *types.VolumeAttachment
for _, att := range vol.Attachments {
if att.InstanceID.ID == inst.InstanceID.ID {
ma = att
break
}
}
if ma == nil {
return "", nil, goof.New("no local attachment found")
}
if ma.DeviceName == "" {
return "", nil, goof.New("no device name returned")
}
mountPath, err := d.getVolumeMountPath(vol.Name)
if err != nil {
return "", nil, err
}
mounts, err := client.OS().Mounts(
ctx, ma.DeviceName, "", opts.Opts)
if err != nil {
return "", nil, err
}
if len(mounts) > 0 {
if _, ok := ctx.Value(ctxExactMountKey).(interface{}); ok {
for _, m := range mounts {
if m.MountPoint == mountPath {
ctx.Debug("returning existing mount")
return d.volumeMountPath(
m.MountPoint), vol, nil
}
}
return "", nil, goof.New("device is already mounted")
}
return d.volumeMountPath(mounts[0].MountPoint), vol, nil
}
if opts.NewFSType == "" {
opts.NewFSType = d.fsType()
}
if err := client.OS().Format(
ctx,
ma.DeviceName,
&types.DeviceFormatOpts{
NewFSType: opts.NewFSType,
OverwriteFS: opts.OverwriteFS,
}); err != nil {
return "", nil, err
}
if err := os.MkdirAll(mountPath, 0755); err != nil {
return "", nil, err
}
if err := client.OS().Mount(
ctx,
ma.DeviceName,
mountPath,
&types.DeviceMountOpts{}); err != nil {
return "", nil, err
}
mntPath := d.volumeMountPath(mountPath)
fields := log.Fields{
"vol": vol,
"mntPath": mntPath,
}
ctx.WithFields(fields).Info("volume mounted")
return mntPath, vol, nil
}
// Unmount will unmount the specified volume by volumeName or volumeID.
func (d *driver) Unmount(
ctx types.Context,
volumeID, volumeName string,
opts types.Store) (*types.Volume, error) {
ctx.WithFields(log.Fields{
"volumeName": volumeName,
"volumeID": volumeID,
"opts": opts}).Info("unmounting volume")
if volumeName == "" && volumeID == "" {
return nil, goof.New("missing volume name or ID")
}
vol, err := d.volumeInspectByIDOrName(
ctx, volumeID, volumeName,
types.VolAttReqWithDevMapOnlyVolsAttachedToInstance, opts)
if err != nil {
return nil, err
}
if len(vol.Attachments) == 0 {
return nil, nil
}
client := context.MustClient(ctx)
inst, err := client.Storage().InstanceInspect(ctx, utils.NewStore())
if err != nil {
return nil, goof.New("problem getting instance ID")
}
var ma *types.VolumeAttachment
for _, att := range vol.Attachments {
if att.InstanceID.ID == inst.InstanceID.ID {
ma = att
break
}
}
if ma == nil {
return nil, goof.New("no attachment found for instance")
}
if ma.DeviceName == "" {
return nil, goof.New("no device name found for attachment")
}
mounts, err := client.OS().Mounts(
ctx, ma.DeviceName, "", opts)
if err != nil {
return nil, err
}
for _, mount := range mounts {
ctx.WithField("mount", mount).Debug("retrieved mount")
}
if len(mounts) > 0 {
for _, mount := range mounts {
ctx.WithField("mount", mount).Debug("unmounting mount point")
err = client.OS().Unmount(ctx, mount.MountPoint, opts)
if err != nil {
return nil, err
}
}
}
vol, err = client.Storage().VolumeDetach(ctx, vol.ID,
&types.VolumeDetachOpts{
Force: opts.GetBool("force"),
Opts: utils.NewStore(),
})
if err != nil {
return nil, err
}
ctx.WithFields(log.Fields{
"vol": vol}).Info("unmounted and detached volume")
return vol, nil
}
// Path will return the mounted path of the volumeName or volumeID.
func (d *driver) Path(
ctx types.Context,
volumeID, volumeName string,
opts types.Store) (string, error) {
ctx.WithFields(log.Fields{
"volumeName": volumeName,
"volumeID": volumeID,
"opts": opts}).Info("getting path to volume")
vol, err := d.volumeInspectByIDOrName(
ctx, volumeID, volumeName, types.VolAttReqTrue, opts)
if err != nil {
return "", err
} else if vol == nil {
return "", utils.NewNotFoundError(
fmt.Sprintf("volumeID=%s,volumeName=%s", volumeID, volumeName))
}
if len(vol.Attachments) == 0 {
return "", nil
}
client := context.MustClient(ctx)
mounts, err := client.OS().Mounts(
ctx, vol.Attachments[0].DeviceName, "", opts)
if err != nil {
return "", err
}
if len(mounts) == 0 {
return "", nil
}
volPath := d.volumeMountPath(mounts[0].MountPoint)
ctx.WithFields(log.Fields{
"volPath": volPath,
"vol": vol}).Info("returning path to volume")
return volPath, nil
}
// Create will create a new volume with the volumeName and opts.
func (d *driver) Create(
ctx types.Context,
volumeName string,
opts *types.VolumeCreateOpts) (*types.Volume, error) {
if volumeName == "" {
return nil, goof.New("missing volume name or ID")
}
optsNew := &types.VolumeCreateOpts{}
az := d.availabilityZone()
optsNew.AvailabilityZone = &az
size := d.size()
optsNew.Size = &size
volumeType := d.volumeType()
optsNew.Type = &volumeType
iops := d.iops()
optsNew.IOPS = &iops
optsNew.Encrypted = opts.Encrypted
optsNew.EncryptionKey = opts.EncryptionKey
if opts.Opts.IsSet("availabilityZone") {
az = opts.Opts.GetString("availabilityZone")
}
if opts.Opts.IsSet("size") {
size = opts.Opts.GetInt64("size")
}
if opts.Opts.IsSet("volumeType") {
volumeType = opts.Opts.GetString("volumeType")
}
if opts.Opts.IsSet("type") {
volumeType = opts.Opts.GetString("type")
}
if opts.Opts.IsSet("iops") {
iops = opts.Opts.GetInt64("iops")
}
optsNew.Opts = opts.Opts
ctx.WithFields(log.Fields{
"volumeName": volumeName,
"availabilityZone": az,
"size": size,
"volumeType": volumeType,
"IOPS": iops,
"encrypted": optsNew.Encrypted,
"encryptionKey": optsNew.EncryptionKey,
"opts": opts}).Info("creating volume")
client := context.MustClient(ctx)
vol, err := client.Storage().VolumeCreate(ctx, volumeName, optsNew)
if err != nil {
return nil, err
}
ctx.WithFields(log.Fields{
"volumeName": volumeName,
"vol": vol}).Info("volume created")
return vol, nil
}
// Remove will remove a volume of volumeName.
func (d *driver) Remove(
ctx types.Context,
volumeName string,
opts *types.VolumeRemoveOpts) error {
if volumeName == "" {
return goof.New("missing volume name or ID")
}
vol, err := d.volumeInspectByIDOrName(
ctx, "", volumeName, 0, opts.Opts)
if err != nil {
return err
}
if vol == nil {
return goof.New("volume not found")
}
client := context.MustClient(ctx)
// check to see if there is a config override for force remove
if !opts.Force {
opts.Force = d.volumeRemoveForce()
}
return client.Storage().VolumeRemove(ctx, vol.ID, opts)
}
// Attach will attach a volume based on volumeName to the instance of
// instanceID.
func (d *driver) Attach(
ctx types.Context,
volumeName string,
opts *types.VolumeAttachOpts) (string, error) {
return "", nil
}
// Detach will detach a volume based on volumeName to the instance of
// instanceID.
func (d *driver) Detach(
ctx types.Context,
volumeName string,
opts *types.VolumeDetachOpts) error {
return nil
}
// NetworkName will return an identifier of a volume that is relevant when
// corelating a local device to a device that is the volumeName to the
// local instanceID.
func (d *driver) NetworkName(
ctx types.Context,
volumeName string,
opts types.Store) (string, error) {
return "", nil
}
func (d *driver) volumeRootPath() string {
return d.config.GetString(types.ConfigIgVolOpsMountRootPath)
}
func (d *driver) volumeType() string { | }
func (d *driver) iops() int64 {
return int64(d.config.GetInt(types.ConfigIgVolOpsCreateDefaultIOPS))
}
func (d *driver) size() int64 {
return int64(d.config.GetInt(types.ConfigIgVolOpsCreateDefaultSize))
}
func (d *driver) availabilityZone() string {
return d.config.GetString(types.ConfigIgVolOpsCreateDefaultAZ)
}
func (d *driver) fsType() string {
return d.config.GetString(types.ConfigIgVolOpsCreateDefaultFsType)
}
func (d *driver) mountDirPath() string {
return d.config.GetString(types.ConfigIgVolOpsMountPath)
}
func (d *driver) volumeCreateImplicit() bool {
return d.config.GetBool(types.ConfigIgVolOpsCreateImplicit)
}
func (d *driver) volumeRemoveForce() bool {
return d.config.GetBool(types.ConfigIgVolOpsRemoveForce)
}
// register the gofig configuration
func init() {
registry.RegisterConfigReg(
"Integration",
func(ctx types.Context, r gofig.ConfigRegistration) {
r.Key(
gofig.String,
"", "ext4", "",
types.ConfigIgVolOpsCreateDefaultFsType)
r.Key(
gofig.String,
"", "", "", types.ConfigIgVolOpsCreateDefaultType)
r.Key(
gofig.String,
"", "", "",
types.ConfigIgVolOpsCreateDefaultIOPS)
r.Key(
gofig.String,
"", "16", "",
types.ConfigIgVolOpsCreateDefaultSize)
r.Key(
gofig.String,
"", "", "",
types.ConfigIgVolOpsCreateDefaultAZ)
r.Key(
gofig.String,
"",
path.Join(context.MustPathConfig(ctx).Lib, "volumes"),
"",
types.ConfigIgVolOpsMountPath)
r.Key(
gofig.String,
"", "/data", "",
types.ConfigIgVolOpsMountRootPath)
r.Key(
gofig.Bool,
"", true, "",
types.ConfigIgVolOpsCreateImplicit)
r.Key(
gofig.Bool,
"", false, "",
types.ConfigIgVolOpsMountPreempt)
})
} | return d.config.GetString(types.ConfigIgVolOpsCreateDefaultType) |
gc.rs | use std::{
alloc::{Alloc, Layout},
any::Any,
fmt,
marker::Unsize,
mem::{forget, transmute, ManuallyDrop},
ops::{CoerceUnsized, Deref, DerefMut},
ptr::NonNull,
};
use crate::allocator::Block;
use crate::GC_ALLOCATOR;
/// A garbage collected pointer. 'Gc' stands for 'Garbage collected'.
///
/// The type `Gc<T>` provides shared ownership of a value of type `T`,
/// allocted in the heap. `Gc` pointers are `Copyable`, so new pointers to
/// the same value in the heap can be produced trivially. The lifetime of
/// `T` is tracked automatically: it is freed when the application
/// determines that no references to `T` are in scope. This does not happen
/// deterministically, and no guarantees are given about when a value
/// managed by `Gc` is freed.
///
/// Shared references in Rust disallow mutation by default, and `Gc` is no
/// exception: you cannot generally obtain a mutable reference to something
/// inside an `Gc`. If you need mutability, put a `Cell` or `RefCell` inside
/// the `Gc`.
///
/// Unlike `Rc<T>`, cycles between `Gc` pointers are allowed and can be
/// deallocated without issue.
///
/// `Gc<T>` automatically dereferences to `T` (via the `Deref` trait), so
/// you can call `T`'s methods on a value of type `Gc<T>`.
///
/// `Gc<T>` is implemented using a tracing mark-sweep garbage collection
/// algorithm. This means that by using `Gc` pointers in a Rust application,
/// you pull in the overhead of a run-time garbage collector to manage and
/// free `Gc` values behind the scenes.
#[derive(PartialEq, Eq, Debug)]
pub struct Gc<T: ?Sized> {
pub(crate) objptr: NonNull<GcBox<T>>,
}
impl<T> Gc<T> {
/// Constructs a new `Gc<T>`.
pub fn new(v: T) -> Self {
Gc {
objptr: unsafe { NonNull::new_unchecked(GcBox::new(v)) },
}
}
}
impl Gc<dyn Any> {
pub fn downcast<T: Any>(&self) -> Result<Gc<T>, Gc<dyn Any>> {
if (*self).is::<T>() {
let ptr = self.objptr.cast::<GcBox<T>>();
forget(self);
Ok(Gc { objptr: ptr })
} else {
Err(Gc {
objptr: self.objptr,
})
}
}
}
impl<T: ?Sized> Gc<T> {
/// Get a raw pointer to the underlying value `T`.
pub fn into_raw(this: Self) -> *const T {
let ptr: *const T = &*this;
ptr
}
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.objptr.as_ptr() == other.objptr.as_ptr()
}
}
impl<T: ?Sized + fmt::Display> fmt::Display for Gc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
/// A `GcBox` is a 0-cost wrapper which allows a single `Drop` implementation
/// while also permitting multiple, copyable `Gc` references. The `drop` method
/// on `GcBox` acts as a guard, preventing the destructors on its contents from
/// running unless the object is really dead.
pub(crate) struct GcBox<T: ?Sized>(ManuallyDrop<T>);
impl<T> GcBox<T> {
fn new(value: T) -> *mut GcBox<T> {
let layout = Layout::new::<T>();
let ptr = unsafe { GC_ALLOCATOR.alloc(layout).unwrap().as_ptr() } as *mut GcBox<T>;
let gcbox = GcBox(ManuallyDrop::new(value));
unsafe {
ptr.copy_from_nonoverlapping(&gcbox, 1);
}
forget(gcbox);
unsafe {
let fatptr: &dyn Drop = &*ptr;
let vptr = transmute::<*const dyn Drop, (usize, *mut u8)>(fatptr).1;
(*ptr).block().set_drop_vptr(vptr);
}
ptr
}
}
impl<T: ?Sized> GcBox<T> {
fn block(&self) -> Block {
Block::new(self as *const GcBox<T> as *mut u8)
}
}
impl<T: ?Sized> Deref for Gc<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*(self.objptr.as_ptr() as *const T) }
}
}
impl<T: ?Sized> DerefMut for Gc<T> {
fn deref_mut(&mut self) -> &mut Self::Target |
}
impl<T: ?Sized> Drop for GcBox<T> {
fn drop(&mut self) {
println!("Dropping GcBox");
if self.block().colour() == Colour::Black {
return;
}
unsafe { ManuallyDrop::drop(&mut self.0) };
}
}
/// `Copy` and `Clone` are implemented manually because a reference to `Gc<T>`
/// should be copyable regardless of `T`. It differs subtly from `#[derive(Copy,
/// Clone)]` in that the latter only makes `Gc<T>` copyable if `T` is.
impl<T: ?Sized> Copy for Gc<T> {}
impl<T: ?Sized> Clone for Gc<T> {
fn clone(&self) -> Self {
Gc {
objptr: self.objptr,
}
}
}
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Gc<U>> for Gc<T> {}
/// Colour of an object used during marking phase (see Dijkstra tri-colour
/// abstraction)
#[derive(PartialEq, Eq, Debug, Clone, Copy)]
pub(crate) enum Colour {
Black,
White,
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem::size_of;
#[test]
fn test_trait_obj() {
trait HelloWorld {
fn hello(&self) -> usize;
}
struct HelloWorldStruct(usize);
impl HelloWorld for HelloWorldStruct {
fn hello(&self) -> usize {
self.0
}
}
let s = HelloWorldStruct(123);
let gcto: Gc<dyn HelloWorld> = Gc::new(s);
assert_eq!(size_of::<Gc<dyn HelloWorld>>(), 2 * size_of::<usize>());
assert_eq!(gcto.hello(), 123);
}
#[test]
fn test_unsized() {
let foo: Gc<[i32]> = Gc::new([1, 2, 3]);
assert_eq!(foo, foo.clone());
}
#[test]
fn test_nonnull_opt() {
assert_eq!(size_of::<Option<Gc<usize>>>(), size_of::<usize>())
}
}
| {
unsafe { &mut *(self.objptr.as_ptr() as *mut T) }
} |
index.ts | import chalk from 'chalk';
import axios from 'axios';
class | {
listMyTeams = async () => {
const response = await axios.create({
baseURL: `https://api.clickup.com/api/v2/team/`,
timeout: 30000,
headers: {
Authorization: process.env.CLICKUP_TOKEN,
'Content-Type': 'application/json; charset=utf-8',
},
}).get('');
if (response.status === 200) {
const teams = response.data.teams;
console.log(`Your ${chalk.red('teams')}:`);
teams.forEach((team, index) => {
console.log(`Team [ ${chalk.green(index + 1)} ]`)
console.log(`\tid:${' '.repeat(5)}${chalk.red(team.id)}`);
console.log(`\tmembers: `);
const members = [...team.members].sort((a, b) => {
if (a.user.username >= b.user.username) {
return 1;
}
return -1;
})
members.forEach(mem => {
const name = mem.user.username
if (name) {
const memberId = String(mem.user.id)
console.log(`\t${' '.repeat(5)}[ ${chalk.blue(memberId.padEnd(7, ' '))} ] ${mem.user.username}`)
}
})
})
}
}
}
const ClickUpTeam = new ClickUpTeamConstructor()
export default ClickUpTeam; | ClickUpTeamConstructor |
lazer_reader.rs | use anyhow::Result;
use byteorder::{LittleEndian, ReadBytesExt};
use lz4::Decoder;
use pasture_core::{
containers::{
InterleavedPointBufferMut, PerAttributePointView, PerAttributeVecPointStorage,
PointBufferWriteable,
},
layout::{
attributes::{CLASSIFICATION, COLOR_RGB, INTENSITY, POSITION_3D},
conversion::{get_converter_for_attributes, AttributeConversionFn},
FieldAlignment, PointLayout,
},
nalgebra::Vector3,
util::view_raw_bytes,
};
use pasture_io::{
base::{PointReader, SeekToPoint},
las::LASMetadata,
las_rs::{
raw::{self},
Header,
},
};
use std::collections::HashMap;
use std::convert::TryInto;
use std::fs::File;
use std::io::{BufReader, Cursor, Read, Seek, SeekFrom};
use std::path::Path;
trait SeekRead: Seek + Read {}
impl<T: Seek + Read> SeekRead for T {}
pub struct LAZERSource {
reader: Box<dyn SeekRead>,
raw_las_header: raw::Header,
metadata: LASMetadata,
default_point_layout: PointLayout,
has_colors: bool,
current_point_index: usize,
block_size: u64,
number_of_attributes: usize,
block_offsets: Vec<u64>,
block_byte_sizes: Vec<u64>,
current_block_cache: Box<[u8]>,
decoders_per_attribute: HashMap<&'static str, Decoder<Cursor<&'static [u8]>>>,
}
impl LAZERSource {
pub fn new<P: AsRef<Path>>(path: P) -> Result<Self> {
let reader = BufReader::new(File::open(path)?);
Self::from(reader)
}
pub fn from<R: 'static + std::io::Read + Seek>(mut reader: R) -> Result<Self> {
let raw_las_header = Self::read_las_header(&mut reader)?;
let las_header = Header::from_raw(raw_las_header.clone())?;
let offset_to_point_data: u64 = raw_las_header.offset_to_point_data.into();
reader.seek(SeekFrom::Start(offset_to_point_data))?;
// Read the block size and the block offsets
let block_size = reader.read_u64::<LittleEndian>()?;
let num_blocks = (las_header.number_of_points() + (block_size - 1)) / block_size;
let mut block_offsets = Vec::with_capacity(num_blocks as usize);
for _ in 0..num_blocks {
block_offsets.push(reader.read_u64::<LittleEndian>()?);
}
// Get size of file
let cur_reader_pos = reader.seek(SeekFrom::Current(0))?;
let file_size = reader.seek(SeekFrom::End(0))?;
reader.seek(SeekFrom::Start(cur_reader_pos))?;
let mut block_byte_sizes = Vec::with_capacity(num_blocks as usize);
for block_idx in 0..num_blocks {
let size_of_block = if block_idx == num_blocks - 1 {
file_size - block_offsets[block_idx as usize]
} else {
block_offsets[block_idx as usize + 1] - block_offsets[block_idx as usize]
};
block_byte_sizes.push(size_of_block);
}
// TODO More attributes from LAS format are supported by LAZER, but for this showcase, we only use these 4
let mut point_layout =
PointLayout::from_attributes(&[POSITION_3D, INTENSITY, CLASSIFICATION]);
let mut number_of_attributes = 8;
if las_header.point_format().has_color {
number_of_attributes += 1;
point_layout.add_attribute(COLOR_RGB, FieldAlignment::Packed(1));
}
if las_header.point_format().has_gps_time {
number_of_attributes += 1;
}
if las_header.point_format().has_waveform {
number_of_attributes += 1;
}
if las_header.point_format().has_nir {
number_of_attributes += 1;
}
let mut myself = Self {
reader: Box::new(reader),
raw_las_header: raw_las_header,
metadata: LASMetadata::from(&las_header),
default_point_layout: point_layout,
has_colors: las_header.point_format().has_color,
current_point_index: 0,
number_of_attributes: number_of_attributes,
block_size: block_size,
block_offsets: block_offsets,
block_byte_sizes: block_byte_sizes,
current_block_cache: Vec::new().into_boxed_slice(),
decoders_per_attribute: HashMap::new(),
};
myself.move_decoders_to_point_in_block(0, 0)?;
Ok(myself)
}
pub fn block_size(&self) -> u64 {
self.block_size
}
fn read_las_header<R: std::io::Read>(reader: R) -> Result<raw::Header> {
let raw_header = raw::Header::read_from(reader)?;
Ok(raw_header)
}
fn move_decoders_to_point_in_block(
&mut self,
block_index: usize,
point_in_block: usize,
) -> Result<()> {
// This only works by reading the full compressed block into a temporary memory buffer
// and pointing each decoder to a specific non-overlapping section in this buffer
// Block contains one u64 attribute offset per attribute, followed by the compressed attribute blobs
let block_offset_in_file = self.block_offsets[block_index];
self.reader.seek(SeekFrom::Start(block_offset_in_file))?;
// read the offsets to the compressed blobs
let offsets_to_compressed_blobs = (0..self.number_of_attributes)
.map(|_| self.reader.read_u64::<LittleEndian>())
.collect::<Result<Vec<_>, _>>()?;
// The offsets in the block header are relative to the file, we want them relative to the
// first byte of the first compressed attribute in the block because we read only the compressed
// attributes into self.current_block_cache
let offset_to_compressed_blobs_in_cache = offsets_to_compressed_blobs
.iter()
.map(|offset| offset - offsets_to_compressed_blobs[0])
.collect::<Vec<_>>();
// read all compressed blobs into a single buffer
let block_size = self.block_byte_sizes[block_index];
let compressed_attributes_size = block_size - (self.number_of_attributes as u64 * 8);
if self.current_block_cache.len() < compressed_attributes_size as usize {
self.current_block_cache =
(vec![0; compressed_attributes_size as usize]).into_boxed_slice();
}
// Read exactly compressed_attributes_size bytes. self.current_block_cache may be larger
// because a previous block was larger
self.reader
.read_exact(&mut self.current_block_cache[0..compressed_attributes_size as usize])?;
// split the buffer into chunks based on the offsets that we read. Create Decoders for each chunk
let offset_to_positions = offset_to_compressed_blobs_in_cache[0] as usize;
let offset_to_blob_after_positions = offset_to_compressed_blobs_in_cache[1] as usize;
// TODO The Cursor inside the Decoder references memory that belongs to self, so we need
// an appropriate lifetime. The current method is called form a trait method however and
// this trait method has no lifetime...
let mut positions_decoder = unsafe {
let ptr = self.current_block_cache.as_ptr();
let start_ptr = ptr.add(offset_to_positions);
let slice = std::slice::from_raw_parts(
start_ptr,
offset_to_blob_after_positions - offset_to_positions,
);
Decoder::new(Cursor::new(slice))?
};
// Move decoder to correct point index
for _ in 0..point_in_block {
// TODO Could maybe be a single read call because we know the size (point_in_block * 12)
positions_decoder.read_i32::<LittleEndian>()?;
positions_decoder.read_i32::<LittleEndian>()?;
positions_decoder.read_i32::<LittleEndian>()?;
}
self.decoders_per_attribute
.insert(POSITION_3D.name(), positions_decoder);
let offset_to_intensities = offset_to_compressed_blobs_in_cache[1] as usize;
let offset_to_blob_after_intensities = offset_to_compressed_blobs_in_cache[2] as usize;
let mut intensities_decoder = unsafe {
let ptr = self.current_block_cache.as_ptr();
let start_ptr = ptr.add(offset_to_intensities);
let slice = std::slice::from_raw_parts(
start_ptr,
offset_to_blob_after_intensities - offset_to_intensities,
);
Decoder::new(Cursor::new(slice))?
};
for _ in 0..point_in_block {
intensities_decoder.read_u16::<LittleEndian>()?;
}
self.decoders_per_attribute
.insert(INTENSITY.name(), intensities_decoder);
let offset_to_classifications = offset_to_compressed_blobs_in_cache[3] as usize;
let offset_to_blob_after_classifications = offset_to_compressed_blobs_in_cache[4] as usize;
let mut classifications_decoder = unsafe {
let ptr = self.current_block_cache.as_ptr();
let start_ptr = ptr.add(offset_to_classifications);
let slice = std::slice::from_raw_parts(
start_ptr,
offset_to_blob_after_classifications - offset_to_classifications,
);
Decoder::new(Cursor::new(slice))?
};
for _ in 0..point_in_block {
classifications_decoder.read_u8()?;
}
self.decoders_per_attribute
.insert(CLASSIFICATION.name(), classifications_decoder);
if self.has_colors {
let offset_to_colors = offset_to_compressed_blobs_in_cache[8] as usize;
let offset_to_blob_after_colors = if offset_to_compressed_blobs_in_cache.len() > 9 {
offset_to_compressed_blobs_in_cache[9] as usize
} else {
block_offset_in_file as usize + block_size as usize
};
let mut colors_decoder = unsafe {
let ptr = self.current_block_cache.as_ptr();
let start_ptr = ptr.add(offset_to_colors);
let slice = std::slice::from_raw_parts(
start_ptr,
offset_to_blob_after_colors - offset_to_colors,
);
Decoder::new(Cursor::new(slice))?
};
for _ in 0..point_in_block {
colors_decoder.read_u16::<LittleEndian>()?;
colors_decoder.read_u16::<LittleEndian>()?;
colors_decoder.read_u16::<LittleEndian>()?;
}
self.decoders_per_attribute
.insert(COLOR_RGB.name(), colors_decoder);
}
Ok(())
}
fn is_last_block(&self, block_index: u64) -> bool {
(block_index as usize) == (self.block_offsets.len() - 1)
}
fn _read_into_interleaved(
&mut self,
_point_buffer: &dyn InterleavedPointBufferMut,
_count: usize,
) -> Result<usize> {
todo!()
// let num_points_to_read = usize::min(
// count,
// self.metadata.point_count() - self.current_point_index,
// ) as u64;
// if num_points_to_read == 0 {
// return Ok(0);
// }
// let first_block_index = self.current_point_index as u64 / self.block_size;
// let last_block_index_inclusive =
// (self.current_point_index as u64 + num_points_to_read) / self.block_size;
// let first_point_index = self.current_point_index as u64;
// let last_point_index = first_point_index + num_points_to_read;
// fn get_attribute_parser(
// name: &str,
// source_layout: &PointLayout,
// target_layout: &PointLayout,
// ) -> Option<(usize, usize, Option<AttributeConversionFn>)> {
// target_layout
// .get_attribute_by_name(name)
// .map_or(None, |target_attribute| {
// let converter =
// source_layout
// .get_attribute_by_name(name)
// .and_then(|source_attribute| {
// get_converter_for_attributes(
// &source_attribute.into(),
// &target_attribute.into(),
// )
// });
// let offset_of_attribute = target_attribute.offset() as usize;
// let size_of_attribute = target_attribute.size() as usize;
// Some((offset_of_attribute, size_of_attribute, converter))
// })
// }
// fn run_parser<U>(
// decoder_fn: impl Fn(Option<&mut Decoder<Cursor<&[u8]>>>) -> Result<U>,
// maybe_parser: Option<(usize, usize, Option<AttributeConversionFn>)>,
// start_of_target_point_in_chunk: usize,
// size_of_attribute: Option<usize>,
// decoder: Option<&mut Decoder<Cursor<&[u8]>>>,
// chunk_buffer: &mut [u8],
// ) -> Result<()> {
// if let Some((offset, size, maybe_converter)) = maybe_parser {
// let source_data = decoder_fn(decoder)?;
// let source_slice = unsafe { view_raw_bytes(&source_data) };
// let pos_start = start_of_target_point_in_chunk + offset;
// let pos_end = pos_start + size;
// let target_slice = &mut chunk_buffer[pos_start..pos_end];
// if let Some(converter) = maybe_converter {
// unsafe {
// converter(source_slice, target_slice);
// }
// } else {
// target_slice.copy_from_slice(source_slice);
// }
// } else if let Some(bytes_to_skip) = size_of_attribute {
// if let Some(actual_decoder) = decoder {
// for _ in 0..bytes_to_skip {
// actual_decoder.read_u8()?;
// }
// }
// }
// Ok(())
// }
// let source_layout = self.get_default_point_layout();
// let target_layout = point_buffer.point_layout().clone();
// let target_point_size = target_layout.size_of_point_entry();
// // This format currently only supports positions, intensities, classififcations and colors
// let position_parser = get_attribute_parser(POSITION_3D.name(), source_layout, &target_layout);
// let intensity_parser = get_attribute_parser(INTENSITY.name(), source_layout, &target_layout);
// let classification_parser = get_attribute_parser(CLASSIFICATION.name(), source_layout, &target_layout);
// let color_parser = get_attribute_parser(COLOR_RGB.name(), source_layout, &target_layout);
// let mut block_buffer = vec![0u8; (target_point_size * self.block_size).try_into().unwrap()];
// for block_idx in first_block_index..last_block_index_inclusive + 1 {
// let block_start_point = block_idx * self.block_size;
// let point_in_block_start = if first_point_index < block_start_point {
// 0
// } else {
// first_point_index - block_start_point
// };
// let point_in_block_end =
// u64::min(last_point_index - block_start_point, self.block_size);
// let num_points_to_read_cur_block = point_in_block_end - point_in_block_start;
// // Read data from block
// {
// let positions_decoder = self
// .decoders_per_attribute
// .get_mut(&POSITION_3D.name())
// .expect("Positions Decoder was None");
// let offset_x = self.raw_las_header.x_offset;
// let offset_y = self.raw_las_header.y_offset;
// let offset_z = self.raw_las_header.z_offset;
// let scale_x = self.raw_las_header.x_scale_factor;
// let scale_y = self.raw_las_header.y_scale_factor;
// let scale_z = self.raw_las_header.z_scale_factor;
// for idx in 0..num_points_to_read_cur_block {
// let start_of_target_point_in_chunk = (idx * target_point_size) as usize;
// run_parser(|maybe_decoder| {
// let decoder = maybe_decoder.unwrap();
// let x = decoder.read_i32::<LittleEndian>()?;
// let y = decoder.read_i32::<LittleEndian>()?;
// let z = decoder.read_i32::<LittleEndian>()?;
// Ok(Vector3::new(
// offset_x + (x as f64 * scale_x),
// offset_y + (y as f64 * scale_y),
// offset_z + (z as f64 * scale_z),
// ))
// },
// position_parser, start_of_target_point_in_chunk, Some(12), Some(positions_decoder), &mut block_buffer)?;
// }
// }
// {
// let intensities_decoder = self
// .decoders_per_attribute
// .get_mut(&INTENSITY.name())
// .expect("Intensity Decoder was None");
// for idx in 0..num_points_to_read_cur_block {
// let start_of_target_point_in_chunk = (idx * target_point_size) as usize;
// run_parser(|maybe_decoder| {
// let decoder = maybe_decoder.unwrap();
// let intensity = decoder.read_u16::<LittleEndian>()?;
// Ok(intensity)
// },
// intensity_parser, start_of_target_point_in_chunk, Some(2), Some(intensities_decoder), &mut block_buffer)?;
// }
// }
// {
// let classifications_decoder = self
// .decoders_per_attribute
// .get_mut(&CLASSIFICATION.name())
// .expect("Classification Decoder was None");
// for idx in 0..num_points_to_read_cur_block {
// let start_of_target_point_in_chunk = (idx * target_point_size) as usize;
// run_parser(|maybe_decoder| {
// let decoder = maybe_decoder.unwrap();
// let classification = decoder.read_u8()?;
// Ok(classification)
// },
// classification_parser, start_of_target_point_in_chunk, Some(1), Some(classifications_decoder), &mut block_buffer)?;
// }
// }
// {
// let colors_decoder = self
// .decoders_per_attribute
// .get_mut(&COLOR_RGB.name());
// match colors_decoder {
// Some(dec) => {
// for idx in 0..num_points_to_read_cur_block {
// let start_of_target_point_in_chunk = (idx * target_point_size) as usize;
// run_parser(|decoder| {
// match decoder {
// Some(dec) => {
// let r = dec.read_u16::<LittleEndian>()?;
// let g = dec.read_u16::<LittleEndian>()?;
// let b = dec.read_u16::<LittleEndian>()?;
// Ok(Vector3::new(r,g,b))
// },
// None => {
// Ok(Vector3::default())
// }
// }
// },
// color_parser, start_of_target_point_in_chunk, Some(6), Some(dec), &mut block_buffer)?;
// }
// },
// None => {
// for idx in 0..num_points_to_read_cur_block {
// let start_of_target_point_in_chunk = (idx * target_point_size) as usize;
// run_parser(|decoder| {
// match decoder {
// Some(dec) => {
// let r = dec.read_u16::<LittleEndian>()?;
// let g = dec.read_u16::<LittleEndian>()?;
// let b = dec.read_u16::<LittleEndian>()?;
// Ok(Vector3::new(r,g,b))
// },
// None => {
// Ok(Vector3::default())
// }
// }
// },
// color_parser, start_of_target_point_in_chunk, Some(6), None, &mut block_buffer)?;
// }
// }
// }
// }
// // Push data into point_buffer
// // TODO The parsers assume that the 'block_buffer' stores interleaved data. This might be inefficient in case that the
// // 'point_buffer' stores per-attribute data, because LAZER also stores per-attribute data...
// point_buffer.push(&InterleavedPointView::from_raw_slice(block_buffer.as_slice(), target_layout.clone()));
// // If we finished reading this block, then we have to move to the next block
// // This is because read_into assumes that the decoders are already at the correct
// // position within the file. If we skip moving to the next block, then a future
// // call to read_into will read garbage!
// if !self.is_last_block(block_idx) && point_in_block_end == self.block_size {
// self.move_decoders_to_point_in_block(block_idx as usize + 1, 0)?;
// }
// self.current_point_index += num_points_to_read_cur_block as usize;
// }
// Ok(num_points_to_read as usize)
}
}
impl PointReader for LAZERSource {
fn read_into(
&mut self,
point_buffer: &mut dyn PointBufferWriteable,
count: usize,
) -> Result<usize> {
// Since moving around in the compressed file is slow (Decoder has no random access), we move only in the seek
// function. read_into assumes that we are at the correct position!
// TODO read_into for now only works on per-attribute buffers
point_buffer
.as_per_attribute()
.expect("LAZERSource currently only supports reading into PerAttribute buffers");
let num_points_to_read = usize::min(
count,
self.metadata.point_count() - self.current_point_index,
) as u64;
if num_points_to_read == 0 {
return Ok(0);
}
let first_block_index = self.current_point_index as u64 / self.block_size;
let last_block_index_inclusive =
(self.current_point_index as u64 + num_points_to_read) / self.block_size;
let first_point_index = self.current_point_index as u64;
let last_point_index = first_point_index + num_points_to_read;
let source_layout = self.get_default_point_layout();
let target_layout = point_buffer.point_layout().clone();
let mut temporary_buffers_per_attribute = target_layout
.attributes()
.map(|a| vec![0; (a.size() * self.block_size).try_into().unwrap()])
.collect::<Vec<Vec<u8>>>();
fn get_attribute_parser(
name: &str,
source_layout: &PointLayout,
target_layout: &PointLayout,
) -> Option<AttributeConversionFn> |
let positions_converter =
get_attribute_parser(POSITION_3D.name(), source_layout, &target_layout);
let intensity_converter =
get_attribute_parser(INTENSITY.name(), source_layout, &target_layout);
let class_converter =
get_attribute_parser(CLASSIFICATION.name(), source_layout, &target_layout);
let color_converter = get_attribute_parser(COLOR_RGB.name(), source_layout, &target_layout);
for block_idx in first_block_index..last_block_index_inclusive + 1 {
let block_start_point = block_idx * self.block_size;
let point_in_block_start = if first_point_index < block_start_point {
0
} else {
first_point_index - block_start_point
};
let point_in_block_end =
u64::min(last_point_index - block_start_point, self.block_size);
let num_points_to_read_cur_block = point_in_block_end - point_in_block_start;
// Read data from block
if let Some(pos_buffer) = target_layout
.index_of(&POSITION_3D)
.map(|idx| &mut temporary_buffers_per_attribute[idx])
{
let pos_decoder = self
.decoders_per_attribute
.get_mut(POSITION_3D.name())
.expect("No positions decoder found");
let pos_size_in_target = target_layout
.get_attribute_by_name(POSITION_3D.name())
.unwrap()
.size();
for idx in 0..num_points_to_read_cur_block {
let x = pos_decoder.read_i32::<LittleEndian>()?;
let y = pos_decoder.read_i32::<LittleEndian>()?;
let z = pos_decoder.read_i32::<LittleEndian>()?;
let world_space_pos = Vector3::new(
self.raw_las_header.x_offset
+ self.raw_las_header.x_scale_factor * x as f64,
self.raw_las_header.y_offset
+ self.raw_las_header.y_scale_factor * y as f64,
self.raw_las_header.z_offset
+ self.raw_las_header.z_scale_factor * z as f64,
);
let pos_start_idx = (idx * pos_size_in_target) as usize;
let pos_end_idx = pos_start_idx + pos_size_in_target as usize;
let target_slice = &mut pos_buffer[pos_start_idx..pos_end_idx];
if let Some(ref converter) = positions_converter {
unsafe {
converter(view_raw_bytes(&world_space_pos), target_slice);
}
} else {
unsafe {
target_slice.copy_from_slice(view_raw_bytes(&world_space_pos));
}
}
}
}
if let Some(intensity_buffer) = target_layout
.index_of(&INTENSITY)
.map(|idx| &mut temporary_buffers_per_attribute[idx])
{
let intensity_decoder = self
.decoders_per_attribute
.get_mut(INTENSITY.name())
.expect("No intensity decoder found");
let intensity_size_in_target = target_layout
.get_attribute_by_name(INTENSITY.name())
.unwrap()
.size();
for idx in 0..num_points_to_read_cur_block {
let intensity = intensity_decoder.read_i16::<LittleEndian>()?;
let intensity_start_idx = (idx * intensity_size_in_target) as usize;
let intensity_end_idx = intensity_start_idx + intensity_size_in_target as usize;
let target_slice =
&mut intensity_buffer[intensity_start_idx..intensity_end_idx];
if let Some(ref converter) = intensity_converter {
unsafe {
converter(view_raw_bytes(&intensity), target_slice);
}
} else {
unsafe {
target_slice.copy_from_slice(view_raw_bytes(&intensity));
}
}
}
}
if let Some(class_buffer) = target_layout
.index_of(&CLASSIFICATION)
.map(|idx| &mut temporary_buffers_per_attribute[idx])
{
let class_decoder = self
.decoders_per_attribute
.get_mut(CLASSIFICATION.name())
.expect("No classification decoder found");
let class_size_in_target = target_layout
.get_attribute_by_name(CLASSIFICATION.name())
.unwrap()
.size();
for idx in 0..num_points_to_read_cur_block {
let class = class_decoder.read_u8()?;
let class_start_idx = (idx * class_size_in_target) as usize;
let class_end_idx = class_start_idx + class_size_in_target as usize;
let target_slice = &mut class_buffer[class_start_idx..class_end_idx];
if let Some(ref converter) = class_converter {
unsafe {
converter(view_raw_bytes(&class), target_slice);
}
} else {
unsafe {
target_slice.copy_from_slice(view_raw_bytes(&class));
}
}
}
}
if let Some(color_buffer) = target_layout
.index_of(&COLOR_RGB)
.map(|idx| &mut temporary_buffers_per_attribute[idx])
{
if let Some(color_decoder) = self.decoders_per_attribute.get_mut(COLOR_RGB.name()) {
let color_size_in_target = target_layout
.get_attribute_by_name(COLOR_RGB.name())
.unwrap()
.size();
for idx in 0..num_points_to_read_cur_block {
let r = color_decoder.read_u16::<LittleEndian>()?;
let g = color_decoder.read_u16::<LittleEndian>()?;
let b = color_decoder.read_u16::<LittleEndian>()?;
let color = Vector3::new(r, g, b);
let color_start_idx = (idx * color_size_in_target) as usize;
let color_end_idx = color_start_idx + color_size_in_target as usize;
let target_slice = &mut color_buffer[color_start_idx..color_end_idx];
if let Some(ref converter) = color_converter {
unsafe {
converter(view_raw_bytes(&color), target_slice);
}
} else {
unsafe {
target_slice.copy_from_slice(view_raw_bytes(&color));
}
}
}
}
}
let current_slices = target_layout
.attributes()
.enumerate()
.map(|(idx, attribute)| {
let size = (num_points_to_read_cur_block * attribute.size()) as usize;
&temporary_buffers_per_attribute[idx][..size]
})
.collect::<Vec<_>>();
point_buffer.push(&PerAttributePointView::from_slices(
current_slices,
target_layout.clone(),
));
// If we finished reading this block, then we have to move to the next block
// This is because read_into assumes that the decoders are already at the correct
// position within the file. If we skip moving to the next block, then a future
// call to read_into will read garbage!
if !self.is_last_block(block_idx) && point_in_block_end == self.block_size {
self.move_decoders_to_point_in_block(block_idx as usize + 1, 0)?;
}
self.current_point_index += num_points_to_read_cur_block as usize;
}
Ok(num_points_to_read as usize)
}
// fn seek(&mut self, index: usize) {
// if index == self.current_point_index {
// return;
// }
// let block_index_of_point = index / self.block_size as usize;
// let block_start_index = block_index_of_point * self.block_size as usize;
// let index_within_block = index - block_start_index;
// self.move_decoders_to_point_in_block(block_index_of_point, index_within_block)
// .expect("Seek failed (TODO make seek return Result)");
// self.current_point_index = index;
// }
// fn current_index(&self) -> usize {
// self.current_point_index
// }
fn read(&mut self, count: usize) -> Result<Box<dyn pasture_core::containers::PointBuffer>> {
let mut buffer = PerAttributeVecPointStorage::new(self.default_point_layout.clone());
self.read_into(&mut buffer, count)?;
Ok(Box::new(buffer))
}
fn get_metadata(&self) -> &dyn pasture_core::meta::Metadata {
&self.metadata
}
fn get_default_point_layout(&self) -> &pasture_core::layout::PointLayout {
&self.default_point_layout
}
}
impl SeekToPoint for LAZERSource {
fn seek_point(&mut self, _position: SeekFrom) -> Result<usize> {
todo!()
// if index == self.current_point_index {
// return;
// }
// let block_index_of_point = index / self.block_size as usize;
// let block_start_index = block_index_of_point * self.block_size as usize;
// let index_within_block = index - block_start_index;
// self.move_decoders_to_point_in_block(block_index_of_point, index_within_block)
// .expect("Seek failed (TODO make seek return Result)");
// self.current_point_index = index;
}
}
#[cfg(test)]
mod tests {
use pasture_core::containers::PointBufferExt;
use pasture_io::las::LASReader;
use super::*;
#[test]
fn test_lazer_reader() -> Result<()> {
let mut las_source = LASReader::from_path("/Users/pbormann/data/geodata/pointclouds/datasets/navvis_m6_3rdFloor/navvis_m6_HQ3rdFloor.laz")?;
let mut lazer_source = LAZERSource::new("/Users/pbormann/data/projects/progressive_indexing/experiment_data/navvis_m6_HQ3rdFloor.lazer")?;
let count = lazer_source.block_size() as usize;
for _ in 0..10 {
let las_points = las_source.read(count)?;
let lazer_points = lazer_source.read(count)?;
for idx in 0..count {
let las_pos = las_points.get_attribute::<Vector3<f64>>(&POSITION_3D, idx);
let lazer_pos = lazer_points.get_attribute::<Vector3<f64>>(&POSITION_3D, idx);
assert_eq!(las_pos, lazer_pos);
let las_col = las_points.get_attribute::<Vector3<u16>>(&COLOR_RGB, idx);
let lazer_col = lazer_points.get_attribute::<Vector3<u16>>(&COLOR_RGB, idx);
assert_eq!(las_col, lazer_col);
}
}
Ok(())
}
}
| {
target_layout
.get_attribute_by_name(name)
.map_or(None, |target_attribute| {
source_layout
.get_attribute_by_name(name)
.and_then(|source_attribute| {
get_converter_for_attributes(
&source_attribute.into(),
&target_attribute.into(),
)
})
})
} |
search-indexer.js | 'use strict';
const Writer = require('broccoli-caching-writer');
const lunr = require('lunr');
const striptags = require('striptags');
const Entities = require('html-entities').AllHtmlEntities;
const fs = require('fs-extra');
const path = require('path');
const htmlEntities = new Entities();
module.exports = class SearchIndexCompiler extends Writer {
constructor(input, options) {
super([input]);
this.config = options.config;
this.outputFile = options.outputFile;
}
build() {
let writer = this;
let documents = {};
let index = lunr(function() {
this.ref('id');
this.metadataWhitelist = ['position'];
this.field('title');
this.field('text');
this.field('keywords');
this.tokenizer.separator = new RegExp(writer.config['ember-cli-addon-docs'].searchTokenSeparator);
for (let doc of writer.buildDocuments()) {
if (doc) {
documents[doc.id] = doc;
this.add(doc);
}
}
});
let destFile = path.join(this.outputPath, this.outputFile);
let output = { index: index.toJSON(), documents };
fs.ensureDirSync(path.dirname(destFile));
fs.writeJsonSync(destFile, output);
}
*buildDocuments() {
let { projectName } = this.config['ember-cli-addon-docs'];
for (let filePath of this.listFiles()) {
if (/\.template-contents$/.test(filePath)) {
yield this.buildTemplateDocument(filePath);
} else if (new RegExp(`${projectName}.json`).test(filePath)) {
yield* this.buildApiItemDocuments(filePath);
}
}
}
buildTemplateDocument(filePath) {
let relativePath = filePath.replace(this.inputPaths[0] + '/', '');
let contents = fs.readJsonSync(filePath, 'utf-8');
// This will need to change for module unification...
let modulePrefix = this.config.modulePrefix;
let podModulePrefix = this.config.podModulePrefix || modulePrefix;
let routePath;
if (relativePath.indexOf(podModulePrefix) === 0 && POD_TEMPLATE_REGEX.test(relativePath)) {
routePath = relativePath.replace(`${podModulePrefix}/`, '').replace(POD_TEMPLATE_REGEX, '');
} else if (relativePath.indexOf(modulePrefix) === 0 && /\.template-contents$/.test(relativePath)) {
routePath = relativePath.replace(`${modulePrefix}/templates/`, '').replace(/\.template-contents$/, '');
}
if (routePath && routePath.indexOf('components/') !== 0) {
return {
id: `template:${routePath}`,
type: 'template',
title: normalizeText(contents.title),
text: normalizeText(contents.body),
route: routePath.replace(/\//g, '.'),
keywords: [], // TODO allow for specifying keywords
};
}
}
*buildApiItemDocuments(filePath) {
let projectVersionDoc = fs.readJsonSync(filePath, 'utf-8');
let apiItems = projectVersionDoc.included || [];
for (let item of apiItems) {
if (item.type === 'module') {
yield this.buildModuleDocument(item);
} else if (item.type === 'component') {
yield this.buildComponentDocument(item);
} else {
continue;
}
}
}
buildModuleDocument(item) {
let keywords = [
...item.attributes.functions.map(x => x.name),
...item.attributes.variables.map(x => x.name)
];
return {
id: `module:${item.id}`,
type: 'module',
title: item.attributes.file,
keywords: keywords,
item
};
}
buildComponentDocument(item) {
let keywords = [
...item.attributes.yields.map(x => x.name),
...item.attributes.fields.map(x => x.name),
...item.attributes.accessors.map(x => x.name),
...item.attributes.methods.map(x => x.name)
];
return {
id: `component:${item.id}`,
type: 'component',
title: item.attributes.name,
keywords: keywords,
text: htmlEntities.decode(striptags(normalizeText(item.attributes.description))),
item
};
}
}
const POD_TEMPLATE_REGEX = /\/template\.template-contents$/;
function | (text) {
if (!text) { return text; }
return text.replace(/\s+/g, ' ');
}
| normalizeText |
webtrekk.js | /**
* Copyright 2018 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export const WEBTREKK_CONFIG = /** @type {!JsonObject} */ ({
'requests': {
'trackURL': 'https://${trackDomain}/${trackId}/wt',
'parameterPrefix':
'?p=432,${contentId},1,' +
'${screenWidth}x${screenHeight},${screenColorDepth},1,' +
'${timestamp},${documentReferrer},${viewportWidth}x' +
'${viewportHeight},0&tz=${timezone}' +
'&eid=${clientId(amp-wt3-eid)}&la=${browserLanguage}',
'parameterSuffix': '&pu=${sourceUrl}',
'pageParameter':
'&cp1=${pageParameter1}' +
'&cp2=${pageParameter2}&cp3=${pageParameter3}' +
'&cp4=${pageParameter4}&cp5=${pageParameter5}' +
'&cp6=${pageParameter6}&cp7=${pageParameter7}' +
'&cp8=${pageParameter8}&cp9=${pageParameter9}' +
'&cp10=${pageParameter10}',
'pageCategories': | '&cg6=${pageCategory6}&cg7=${pageCategory7}' +
'&cg8=${pageCategory8}&cg9=${pageCategory9}' +
'&cg10=${pageCategory10}',
'pageview':
'${trackURL}${parameterPrefix}${pageParameter}' +
'${pageCategories}${parameterSuffix}',
'actionParameter':
'&ck1=${actionParameter1}' +
'&ck2=${actionParameter2}&ck3=${actionParameter3}' +
'&ck4=${actionParameter4}&ck5=${actionParameter5}',
'event':
'${trackURL}${parameterPrefix}&ct=${actionName}' +
'${actionParameter}${parameterSuffix}',
},
'transport': {
'beacon': false,
'xhrpost': false,
'image': true,
},
}); | '&cg1=${pageCategory1}' +
'&cg2=${pageCategory2}&cg3=${pageCategory3}' +
'&cg4=${pageCategory4}&cg5=${pageCategory5}' + |
test_corosync_config.py | import sys
import mock
from iml_common.test.command_capture_testcase import (
CommandCaptureTestCase,
CommandCaptureCommand,
)
from iml_common.lib.firewall_control import FirewallControlEL7
from iml_common.lib.service_control import ServiceControlEL7
from iml_common.lib.agent_rpc import agent_result_ok
class FakeEtherInfo(object):
def __init__(self, attrs):
self.__dict__.update(attrs)
def __getattr__(self, attr):
return self.__dict__[attr]
class fake_ethtool(object):
IFF_SLAVE = 2048
def __init__(self, interfaces={}):
self.interfaces = interfaces
def get_interfaces_info(self, name):
return [FakeEtherInfo(self.interfaces[name])]
def get_devices(self):
return self.interfaces.keys()
def get_hwaddr(self, name):
return self.interfaces[name]["mac_address"]
def get_flags(self, name):
# just hard-code this for now
return 4163
class TestConfigureCorosync(CommandCaptureTestCase):
def setUp(self):
super(TestConfigureCorosync, self).setUp()
from chroma_agent.lib.corosync import CorosyncRingInterface
from chroma_agent.lib.corosync import env
def get_shared_ring():
return CorosyncRingInterface("eth0.1.1?1b34*430")
mock.patch("chroma_agent.lib.corosync.get_shared_ring", get_shared_ring).start()
self.interfaces = {
"eth0.1.1?1b34*430": {
"device": "eth0.1.1?1b34*430",
"mac_address": "de:ad:be:ef:ca:fe",
"ipv4_address": "192.168.1.1",
"ipv4_netmask": "255.255.255.0",
"link_up": True,
},
"eth1": {
"device": "eth1",
"mac_address": "ba:db:ee:fb:aa:af",
"ipv4_address": None,
"ipv4_netmask": 0,
"link_up": True,
},
}
# Just mock out the entire module ... This will make the tests
# run on OS X or on Linux without the python-ethtool package.
self.old_ethtool = sys.modules.get("ethtool", None)
ethtool = fake_ethtool(self.interfaces)
sys.modules["ethtool"] = ethtool
self.write_ifcfg = mock.patch("chroma_agent.lib.node_admin.write_ifcfg").start()
self.unmanage_network = mock.patch(
"chroma_agent.lib.node_admin.unmanage_network"
).start()
self.write_config_to_file = mock.patch(
"chroma_agent.action_plugins.manage_corosync.write_config_to_file"
).start()
mock.patch(
"chroma_agent.action_plugins.manage_pacemaker.unconfigure_pacemaker"
).start()
old_set_address = CorosyncRingInterface.set_address
def set_address(obj, address, prefix):
if self.interfaces[obj.name]["ipv4_address"] is None:
self.interfaces[obj.name]["ipv4_address"] = address
self.interfaces[obj.name]["ipv4_netmask"] = prefix
old_set_address(obj, address, prefix)
mock.patch(
"chroma_agent.lib.corosync.CorosyncRingInterface.set_address", set_address
).start()
@property
def has_link(obj):
return self.interfaces[obj.name]["link_up"]
self.link_patcher = mock.patch(
"chroma_agent.lib.corosync.CorosyncRingInterface.has_link", has_link
)
self.link_patcher.start()
mock.patch(
"chroma_agent.lib.corosync.find_unused_port", return_value=4242
).start()
mock.patch("chroma_agent.lib.corosync.discover_existing_mcastport").start()
self.conf_template = env.get_template("corosync.conf")
# mock out firewall control calls and check with assert_has_calls in tests
self.mock_add_port = mock.patch.object(
FirewallControlEL7, "_add_port", return_value=None
).start()
self.mock_remove_port = mock.patch.object(
FirewallControlEL7, "_remove_port", return_value=None
).start()
# mock out service control objects with ServiceControlEL7 spec and check with assert_has_calls in tests
# this assumes, quite rightly, that manage_corosync and manage_corosync2 will not both be used in the same test
self.mock_corosync_service = mock.create_autospec(ServiceControlEL7)
self.mock_corosync_service.enable.return_value = None
self.mock_corosync_service.disable.return_value = None
mock.patch(
"chroma_agent.action_plugins.manage_corosync.corosync_service",
self.mock_corosync_service,
).start()
mock.patch(
"chroma_agent.action_plugins.manage_corosync2.corosync_service",
self.mock_corosync_service,
).start()
self.mock_pcsd_service = mock.create_autospec(ServiceControlEL7)
self.mock_pcsd_service.enable.return_value = None
self.mock_pcsd_service.start.return_value = None
mock.patch(
"chroma_agent.action_plugins.manage_corosync2.pcsd_service",
self.mock_pcsd_service,
).start()
mock.patch(
"chroma_agent.action_plugins.manage_corosync.firewall_control",
FirewallControlEL7(),
).start()
mock.patch(
"chroma_agent.action_plugins.manage_corosync2.firewall_control",
FirewallControlEL7(),
).start()
# Guaranteed cleanup with unittest2
self.addCleanup(mock.patch.stopall)
def tearDown(self):
if self.old_ethtool:
sys.modules["ethtool"] = self.old_ethtool
def _ring_iface_info(self, mcast_port):
from netaddr import IPNetwork
interfaces = []
for name in sorted(self.interfaces.keys()):
interface = self.interfaces[name]
bindnetaddr = IPNetwork(
"%s/%s" % (interface["ipv4_address"], interface["ipv4_netmask"])
).network
ringnumber = name[-1]
interfaces.append(
FakeEtherInfo(
{
"ringnumber": ringnumber,
"bindnetaddr": bindnetaddr,
"mcastaddr": "226.94.%s.1" % ringnumber,
"mcastport": mcast_port,
}
)
)
return interfaces
def _render_test_config(self, mcast_port):
return self.conf_template.render(interfaces=self._ring_iface_info(mcast_port))
def test_manual_ring1_config(self):
from chroma_agent.action_plugins.manage_corosync_common import configure_network
from chroma_agent.action_plugins.manage_corosync import configure_corosync
ring0_name = "eth0.1.1?1b34*430"
ring1_name = "eth1"
ring1_ipaddr = "10.42.42.42"
ring1_netmask = "255.255.255.0"
old_mcast_port = None
new_mcast_port = "4242"
# add shell commands to be expected
self.add_commands(
CommandCaptureCommand(("/sbin/ip", "link", "set", "dev", ring1_name, "up")),
CommandCaptureCommand(
(
"/sbin/ip",
"addr",
"add",
"%s/%s" % (ring1_ipaddr, ring1_netmask),
"dev",
ring1_name,
)
),
)
# now a two-step process! first network...
self.assertEqual(
agent_result_ok,
configure_network(
ring0_name,
ring1_name=ring1_name,
ring1_ipaddr=ring1_ipaddr,
ring1_prefix=ring1_netmask,
),
)
self.write_ifcfg.assert_called_with(
ring1_name, "ba:db:ee:fb:aa:af", "10.42.42.42", "255.255.255.0"
)
self.unmanage_network.assert_called_with(ring1_name, "ba:db:ee:fb:aa:af")
# ...then corosync
self.assertEqual(
agent_result_ok,
configure_corosync(ring0_name, ring1_name, old_mcast_port, new_mcast_port),
)
test_config = self._render_test_config(new_mcast_port)
self.write_config_to_file.assert_called_with(
"/etc/corosync/corosync.conf", test_config
)
# check correct firewall and service calls were made
self.mock_add_port.assert_has_calls([mock.call(new_mcast_port, "udp")])
self.mock_remove_port.assert_not_called()
self.mock_corosync_service.enable.assert_called_once_with()
self.assertRanAllCommandsInOrder()
self.mock_remove_port.reset_mock()
self.mock_add_port.reset_mock()
self.mock_corosync_service.reset_mock()
# ...now change corosync mcast port
old_mcast_port = "4242"
new_mcast_port = "4246"
self.assertEqual(
agent_result_ok,
configure_corosync(ring0_name, ring1_name, old_mcast_port, new_mcast_port),
)
test_config = self._render_test_config(new_mcast_port)
# check we try to write template with new_mcast_port value
self.write_config_to_file.assert_called_with(
"/etc/corosync/corosync.conf", test_config
)
# check correct firewall and service calls were made
self.mock_remove_port.assert_has_calls([mock.call(old_mcast_port, "udp")])
self.mock_add_port.assert_has_calls([mock.call(new_mcast_port, "udp")])
self.mock_corosync_service.enable.assert_called_once_with()
def _test_manual_ring1_config_corosync2(self, fqdn=False):
import socket
from chroma_agent.action_plugins.manage_corosync2 import (
configure_corosync2_stage_1,
)
from chroma_agent.action_plugins.manage_corosync2 import (
configure_corosync2_stage_2,
)
from chroma_agent.action_plugins.manage_corosync2 import PCS_TCP_PORT
from chroma_agent.action_plugins.manage_corosync_common import configure_network
ring0_name = "eth0.1.1?1b34*430"
ring1_name = "eth1"
ring1_ipaddr = "10.42.42.42"
ring1_netmask = "255.255.255.0"
mcast_port = "4242"
new_node_fqdn = "servera.somewhere.org"
pcs_password = "bondJAMESbond"
# add shell commands to be expected
self.add_commands(
CommandCaptureCommand(("/sbin/ip", "link", "set", "dev", ring1_name, "up")),
CommandCaptureCommand(
(
"/sbin/ip",
"addr",
"add",
"/".join([ring1_ipaddr, ring1_netmask]),
"dev",
ring1_name,
)
),
)
if fqdn:
self.add_commands(
CommandCaptureCommand(("hostnamectl", "set-hostname", new_node_fqdn))
)
self.add_commands(
CommandCaptureCommand(
("bash", "-c", "echo bondJAMESbond | passwd --stdin hacluster")
),
CommandCaptureCommand(
tuple(
["pcs", "cluster", "auth"]
+ [new_node_fqdn]
+ ["-u", "hacluster", "-p", "bondJAMESbond"]
)
),
)
# now a two-step process! first network...
self.assertEqual(
agent_result_ok,
configure_network(
ring0_name,
ring1_name=ring1_name,
ring1_ipaddr=ring1_ipaddr,
ring1_prefix=ring1_netmask,
),
)
self.write_ifcfg.assert_called_with(
ring1_name, "ba:db:ee:fb:aa:af", "10.42.42.42", "255.255.255.0"
)
# fetch ring info
r0, r1 = self._ring_iface_info(mcast_port)
# add shell commands to be expected populated with ring interface info
self.add_command(
(
"pcs",
"cluster",
"setup",
"--name",
"lustre-ha-cluster",
"--force",
new_node_fqdn,
"--transport",
"udp",
"--rrpmode",
"passive",
"--addr0",
str(r0.bindnetaddr),
"--mcast0",
str(r0.mcastaddr),
"--mcastport0",
str(r0.mcastport),
"--addr1",
str(r1.bindnetaddr),
"--mcast1",
str(r1.mcastaddr),
"--mcastport1",
str(r1.mcastport),
"--token",
"17000",
"--fail_recv_const",
"10",
)
)
# ...then corosync / pcsd
if fqdn:
self.assertEqual(
agent_result_ok,
configure_corosync2_stage_1(mcast_port, pcs_password, new_node_fqdn),
)
else:
self.assertEqual(
agent_result_ok, configure_corosync2_stage_1(mcast_port, pcs_password)
)
self.assertEqual(
agent_result_ok,
configure_corosync2_stage_2(
ring0_name, ring1_name, new_node_fqdn, mcast_port, pcs_password, True
),
)
# check correct firewall and service calls were made
self.mock_add_port.assert_has_calls(
[mock.call(mcast_port, "udp"), mock.call(PCS_TCP_PORT, "tcp")]
)
self.mock_remove_port.assert_not_called()
self.mock_pcsd_service.start.assert_called_once_with()
self.mock_corosync_service.enable.assert_called_once_with()
self.mock_pcsd_service.enable.assert_called_once_with()
self.mock_remove_port.reset_mock()
self.mock_add_port.reset_mock()
self.mock_corosync_service.reset_mock()
self.assertRanAllCommandsInOrder()
def test_manual_ring1_config_corosync2(self):
self._test_manual_ring1_config_corosync2(False)
def test_manual_ring1_config_corosync2_fqdn(self):
self._test_manual_ring1_config_corosync2(True)
def | (self):
from chroma_agent.action_plugins.manage_corosync2 import unconfigure_corosync2
from chroma_agent.action_plugins.manage_corosync2 import PCS_TCP_PORT
host_fqdn = "serverb.somewhere.org"
mcast_port = "4242"
# add shell commands to be expected
self.add_commands(
CommandCaptureCommand(("pcs", "status", "nodes", "corosync")),
CommandCaptureCommand(
("pcs", "--force", "cluster", "node", "remove", host_fqdn)
),
)
self.assertEqual(agent_result_ok, unconfigure_corosync2(host_fqdn, mcast_port))
self.mock_corosync_service.disable.assert_called_once_with()
self.mock_remove_port.assert_has_calls(
[mock.call(PCS_TCP_PORT, "tcp"), mock.call(mcast_port, "udp")]
)
self.assertRanAllCommandsInOrder()
def test_find_subnet(self):
from chroma_agent.lib.corosync import find_subnet
from netaddr import IPNetwork
test_map = {
("192.168.1.0", "24"): IPNetwork("10.0.0.0/24"),
("10.0.1.0", "24"): IPNetwork("10.128.0.0/24"),
("10.128.0.0", "9"): IPNetwork("10.0.0.0/9"),
("10.127.255.254", "9"): IPNetwork("10.128.0.0/9"),
("10.255.255.255", "32"): IPNetwork("10.0.0.0/32"),
}
for args, output in test_map.items():
self.assertEqual(output, find_subnet(*args))
def test_link_state_unknown(self):
with mock.patch("__builtin__.open", mock.mock_open(read_data="unknown")):
with mock.patch(
"chroma_agent.lib.corosync.CorosyncRingInterface.__getattr__",
return_value=False,
):
with mock.patch("os.path.exists", return_value=True):
self.link_patcher.stop()
from chroma_agent.lib.corosync import get_shared_ring
iface = get_shared_ring()
# add shell commands to be expected
self.add_commands(
CommandCaptureCommand(
("/sbin/ip", "link", "set", "dev", iface.name, "up")
),
CommandCaptureCommand(
("/sbin/ip", "link", "set", "dev", iface.name, "down")
),
)
self.assertFalse(iface.has_link)
self.assertRanAllCommandsInOrder()
| test_unconfigure_corosync2 |
sidecarServer.go | package sidecar
import (
"fmt"
"github.com/Open-Twin/citymesh/complete_mesh/ddns"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"log"
"net"
_ "os"
)
const (
hostname = "Sidecar"
ipadr = "127.0.0.1"
rtype = "store"
listeningport = "9000"
)
func NewServer() {
// Service erreichbar + Ip von Service + Type des Services Store etc...
ddns.Register(hostname,ipadr,rtype)
// create a TCP Listener on Port 9000
lis, err := net.Listen("tcp", ":"+listeningport)
// this how you handle errors in Golang
if err != nil {
log.Fatalf("Failed to listen on port 9000 %v", err)
}
// this is just a structure that has an interface with needed function SayHello
s := Server{}
fmt.Println("Sidecar: GRPC-Server started ... ")
//create the GRCP Server |
creds, _ := credentials.NewServerTLSFromFile("cert/service.pem", "cert/service.key")
grpcServer := grpc.NewServer(grpc.Creds(creds), grpc.MaxSendMsgSize(1000*1024*1024), grpc.MaxRecvMsgSize(1000*1024*1024))
// error handling omitted
//grpcServer := grpc.NewServer()
RegisterChatServiceServer(grpcServer, &s)
grpcServer.Serve(lis)
fmt.Println("Sidecar: after test")
// start listening on port 9000 for rpc calls
if err := grpcServer.Serve(lis); err != nil {
log.Fatalf("Failed to serve gRPC server over port 9000 %v", err)
}
} | |
colorizer.py | from colored import *
import staticconf
"""
You might find the colored documentation very useful:
https://pypi.python.org/pypi/colored
"""
ENABLE_COLORIZER = staticconf.read_string('enable_colorizer', default='false').lower() == 'true'
def colorizer_enabled(function):
|
# attr and colors
ATTR_RESET = attr('reset')
COLOR_INDEX = fg(199)
COLOR_TITLE = fg(45)
COLOR_TAG_0 = fg(10) + attr('bold')
COLOR_TAG_1 = fg(10)
COLOR_TAG_2 = fg(87)
COLOR_TAG_3 = fg(208)
COLOR_TAG_4 = fg(252)
@colorizer_enabled
def color_index(index):
return COLOR_INDEX + index + ATTR_RESET
@colorizer_enabled
def color_title(title):
return COLOR_TITLE + title + ATTR_RESET
def _color_by_score(score):
if score >= 1:
return COLOR_TAG_0
elif score >= 0.9:
return COLOR_TAG_1
elif score >= 0.8:
return COLOR_TAG_2
elif score >= 0.7:
return COLOR_TAG_3
return COLOR_TAG_4
@colorizer_enabled
def _color_tag(tag, score):
return _color_by_score(score) + tag + ATTR_RESET
def color_tags(scored_tags):
return ", ".join((_color_tag(tag, score) for tag, score in scored_tags))
| """do not colorize if it's not enabled"""
def wrapper(*args):
if ENABLE_COLORIZER:
return function(*args)
elif args:
return args[0]
else:
return args
return wrapper |
FindSingleNumber.py | def find_single(arr, n): | return res | res = arr[0]
for i in range(1,n):
res = res ^ arr[i] |
application.rs | use std::marker::PhantomData;
use crate::{
runtime::Runtime,
window::{RuntimeWindow, Window, WindowCreator},
};
/// A trait that describes the application's behavior.
pub trait Application: Sized + Send + Sync {
/// Executed upon application launch.
fn initialize(&mut self) {}
/// Return true if the app should exit. Default implementation returns true
/// once [`Application::open_window_count()`] returns zero.
fn should_exit(&mut self) -> bool {
Self::open_window_count() == 0
}
/// Returns the number of open windows.
#[must_use]
fn open_window_count() -> usize {
RuntimeWindow::count()
}
}
/// An [`Application`] implementation that begins with a single window.
///
/// If feature `multiwindow` is enabled, multiple windows can still be opened.
/// This structure just provides a way to run an app without explicitly
/// implementing [`Application`] on one of your types.
pub struct | <T> {
phantom: PhantomData<T>,
}
impl<T> Application for SingleWindowApplication<T> where T: Window + WindowCreator + 'static {}
impl<T> SingleWindowApplication<T>
where
T: Window + WindowCreator + 'static,
{
/// Runs the app. Does not return.
pub fn run(window: T) -> ! {
let app = Self {
phantom: PhantomData::default(),
};
Runtime::new(app).run(window.get_window_builder(), window)
}
}
| SingleWindowApplication |
connections.rs | use super::error::ErrorKind;
use super::scalars::BlockCount;
use super::{Block, Context, ExplorerDB};
use blockcfg;
use futures::Future;
use juniper::{FieldResult, ParseScalarResult, ParseScalarValue, Value};
use std::convert::TryFrom;
#[derive(Clone)]
pub struct BlockCursor(blockcfg::ChainLength);
juniper::graphql_scalar!(BlockCursor where Scalar = <S> {
description: "Opaque cursor to use in block pagination, a client should not rely in its representation"
// FIXME: Cursors are recommended to be opaque, but I'm not sure it is worth to
// obfuscate its representation
resolve(&self) -> Value {
Value::scalar(self.0.to_string())
}
from_input_value(v: &InputValue) -> Option<BlockCursor> {
v.as_scalar_value::<String>()
.and_then(|s| s.parse::<u32>().ok())
.map(|n| BlockCursor(blockcfg::ChainLength::from(n)))
}
from_str<'a>(value: ScalarToken<'a>) -> ParseScalarResult<'a, S> {
<String as ParseScalarValue<S>>::from_str(value)
}
});
impl From<u32> for BlockCursor {
fn from(number: u32) -> BlockCursor {
BlockCursor(blockcfg::ChainLength::from(number))
}
}
impl From<BlockCursor> for u32 {
fn from(number: BlockCursor) -> u32 {
number.0.into()
}
}
impl From<blockcfg::ChainLength> for BlockCursor {
fn from(length: blockcfg::ChainLength) -> BlockCursor {
BlockCursor(length)
}
}
pub struct PageInfo {
pub has_next_page: bool,
pub has_previous_page: bool,
pub start_cursor: BlockCursor,
pub end_cursor: BlockCursor,
}
#[juniper::object(
Context = Context
)]
impl PageInfo {
pub fn has_next_page(&self) -> bool {
self.has_next_page
}
pub fn has_previous_page(&self) -> bool {
self.has_previous_page
}
pub fn start_cursor(&self) -> &BlockCursor {
&self.start_cursor
}
pub fn end_cursor(&self) -> &BlockCursor {
&self.end_cursor
}
}
pub struct BlockEdge {
pub node: Block,
pub cursor: BlockCursor,
}
#[juniper::object(
Context = Context
)]
impl BlockEdge {
pub fn node(&self) -> &Block {
&self.node
}
/// A cursor for use in pagination
pub fn cursor(&self) -> &BlockCursor {
&self.cursor
}
}
pub struct BlockConnection {
pub page_info: PageInfo,
pub edges: Vec<BlockEdge>,
pub total_count: BlockCount,
}
#[juniper::object(
Context = Context
)]
impl BlockConnection {
pub fn page_info(&self) -> &PageInfo {
&self.page_info
}
pub fn edges(&self) -> &Vec<BlockEdge> {
&self.edges
}
/// A count of the total number of objects in this connection, ignoring pagination.
pub fn total_count(&self) -> &BlockCount {
&self.total_count
}
}
impl BlockConnection {
// The lower and upper bound are used to define all the blocks this connection will show
// In particular, they are used to paginate Epoch blocks from first block in epoch to
// last.
pub fn new(
lower_bound: BlockCursor,
upper_bound: BlockCursor,
first: Option<i32>,
last: Option<i32>,
before: Option<BlockCursor>,
after: Option<BlockCursor>,
db: &ExplorerDB,
) -> FieldResult<BlockConnection> {
use std::cmp::{max, min};
let lower_bound = u32::from(lower_bound);
let upper_bound = u32::from(upper_bound);
// Compute the required range of blocks in two variables: [from, to]
// Both ends are inclusive
let mut from = match after {
Some(cursor) => u32::from(cursor) + 1,
// If `after` is not set, start from the beginning
None => lower_bound,
};
let mut to = match before {
Some(cursor) => u32::from(cursor) - 1,
// If `before` is not set, start from the beginning
None => upper_bound,
};
// Move `to` enough values to make the result have `first` blocks | if let Some(first) = first {
if first < 0 {
return Err(ErrorKind::ArgumentError(
"first argument should be positive".to_owned(),
)
.into());
} else {
to = min(
from.checked_add(u32::try_from(first).unwrap())
.or(Some(to))
.unwrap()
- 1,
to,
);
}
}
// Move `from` enough values to make the result have `last` blocks
if let Some(last) = last {
if last < 0 {
return Err(ErrorKind::ArgumentError(
"last argument should be positive".to_owned(),
)
.into());
} else {
from = max(
u32::from(to)
.checked_sub(u32::try_from(last).unwrap())
.or(Some(from))
.unwrap()
+ 1,
from,
);
}
}
let has_next_page = to < upper_bound;
let has_previous_page = from > lower_bound;
let edges: Vec<_> = db
.get_block_hash_range(from.into(), (to + 1).into())
.wait()?
.iter()
.map(|(hash, chain_length)| BlockEdge {
node: Block::from_valid_hash(*hash),
cursor: (*chain_length).into(),
})
.collect();
let start_cursor = edges.first().expect("to be at least 1 edge").cursor.clone();
let end_cursor = edges
.last()
.map(|e| e.cursor.clone())
.unwrap_or(start_cursor.clone());
Ok(BlockConnection {
edges,
page_info: PageInfo {
has_next_page,
has_previous_page,
start_cursor,
end_cursor,
},
total_count: (upper_bound - lower_bound).into(),
})
}
} | |
WebNativeCryptor.d.ts | import { CryptorService, KeyEncryptionProvider } from './CryptorService';
import { EncryptedData, HexString, Injectable, InternalReactNativeEncryptedKey } from 'ferrum-plumbing';
export declare function utf8ToHex(hexStr: HexString): HexString;
export declare function hexToUtf8(hexStr: HexString): string;
export declare function arrayBufferToHex(ab: Uint8Array): HexString;
export declare function ripemd160(hex: HexString): string;
export declare function randomBytes(size: number): HexString;
export declare function hexToArrayBuffer(hex: HexString): Uint8Array;
/**
* Convert a hex string to a byte array
*
* Note: Implementation from crypto-js
*
* @method hexToBytes
* @param {string} hex
* @return {Array} the byte array
*/
export declare function hexToBase64(hex: HexString): string;
export declare function hexToBase58(hex: HexString): string;
export declare function base64ToHex(base64: string): HexString;
export declare function sha256(hexData: string): Promise<HexString>;
export declare function sha256sync(hexData: string): HexString;
export declare function hmac(secret: HexString, dataUtf8: string): string;
export declare function sha1(hexData: HexString): HexString;
export declare function sha3(hexData: string): HexString;
export declare class | implements CryptorService, Injectable {
private keyProvider;
constructor(keyProvider: KeyEncryptionProvider);
protected decryptKey(key: InternalReactNativeEncryptedKey, overrideKey?: HexString): Promise<string>;
protected newKey(overrideKey?: HexString): Promise<{
encryptedKey: HexString;
keyId: string;
unEncrypedKey: string;
}>;
decryptToHex(encData: EncryptedData, overrideKey?: string): Promise<HexString>;
encryptHex(data: HexString, overrideKey?: HexString): Promise<EncryptedData>;
sha256(hexData: string): Promise<HexString>;
__name__(): string;
}
//# sourceMappingURL=WebNativeCryptor.d.ts.map | WebNativeCryptor |
gitfs.py | # -*- coding:utf-8 -*- | from common.sso import access_required
from common.audit_log import audit_log
from flask import g
from resources.sls import delete_sls
import base64
logger = loggers()
parser = reqparse.RequestParser()
parser.add_argument("product_id", type=str, required=True, trim=True)
parser.add_argument("branch", type=str, default="master", trim=True)
parser.add_argument("path", type=str, default="", trim=True)
parser.add_argument("project_type", type=str, required=True, trim=True)
parser.add_argument("action", type=str, default="", trim=True)
parser.add_argument("content", type=str, default="", trim=True)
# 获取所有分支
class BranchList(Resource):
@access_required(role_dict["common_user"])
def get(self):
args = parser.parse_args()
project, _ = gitlab_project(args["product_id"], args["project_type"])
if isinstance(project, dict):
return project, 500
else:
branch_list = []
try:
branch = project.branches.list()
for b in branch:
branch_list.append(b.name)
except Exception as e:
logger.error("Get branch error: %s" % e)
return {"status": False, "message": str(e)}, 500
return {"data": branch_list, "status": True, "message": ""}, 200
# 获取目录结构
class FilesList(Resource):
@access_required(role_dict["common_user"])
def get(self):
args = parser.parse_args()
project, product_name = gitlab_project(args["product_id"], args["project_type"])
if isinstance(project, dict):
return project, 500
else:
file_list = []
try:
items = project.repository_tree(path=args["path"], ref_name=args["branch"])
except Exception as e:
logger.error("Get file list error: %s" % e)
return {"status": False, "message": str(e)}, 404
if args["path"] == "/" or args["path"] is "":
for i in items:
if i["type"] == "tree":
file_list.append({"title": i["name"],
"type": i["type"],
"path": i["name"],
"loading": False,
"children": []
})
else:
file_list.append({"title": i["name"],
"type": i["type"],
"path": i["name"],
})
return {"data": [{
"title": product_name,
"expand": True,
"children": file_list,
"type": "tree",
}], "status": True, "message": ""}, 200
else:
for i in items:
if i["type"] == "tree":
file_list.append({"title": i["name"],
"type": i["type"],
"path": args["path"] + "/" + i["name"],
"loading": False,
"children": []
})
else:
file_list.append({"title": i["name"],
"type": i["type"],
"path": args["path"] + "/" + i["name"],
})
return {"data": file_list, "status": True, "message": ""}, 200
# 获取文件内容
class FileContent(Resource):
@access_required(role_dict["common_user"])
def get(self):
args = parser.parse_args()
project, _ = gitlab_project(args["product_id"], args["project_type"])
if isinstance(project, dict):
return project, 500
else:
try:
content = project.files.get(file_path=args["path"], ref=args["branch"])
content_decode = content.decode().decode("utf-8")
except Exception as e:
logger.error("Get file content: %s" % e)
return {"status": False, "message": str(e)}, 404
return {"data": content_decode, "status": True, "message": ""}, 200
# 创建修改提交文件
class Commit(Resource):
@access_required(role_dict["common_user"])
def post(self):
args = parser.parse_args()
user = g.user_info["username"]
project, _ = gitlab_project(args["product_id"], args["project_type"])
# 支持的action create, delete, move, update
data = {
'branch': args["branch"],
'commit_message': args["action"] + " " + args["path"],
'actions': [
{
'action': args["action"],
'file_path': args["path"],
'content': args["content"]
}
]
}
if isinstance(project, dict):
return project, 500
else:
try:
project.commits.create(data)
# 假如删除,删除数据库中封装的SLS信息
if args["action"] == "delete":
delete_sls(args["path"])
audit_log(user, args["path"], args["product_id"], "sls", args["action"])
except Exception as e:
logger.error("Commit file: %s" % e)
return {"status": False, "message": str(e)}, 500
return {"status": True, "message": ""}, 200
# 上传文件
class Upload(Resource):
@access_required(role_dict["common_user"])
def post(self):
args = parser.parse_args()
user = g.user_info["username"]
project, _ = gitlab_project(args["product_id"], args["project_type"])
file = request.files['file']
if args["path"]:
file_path = args["path"] + "/" + file.filename
content = file.read()
try:
content_decode = content.decode()
actions = [
{
'action': 'create',
'file_path': file_path,
'content': content_decode
}
]
except Exception as e:
return {"status": False, "message": str(e)}, 500
# try:
# content_decode = content.decode()
# actions = [
# {
# 'action': args["action"],
# 'file_path': file_path,
# 'content': base64.b64encode(content_decode),
# 'encoding': 'base64',
# }
# ]
# except Exception as e:
# print(e)
data = {
'branch': args["branch"],
'commit_message': args["action"] + " " + args["path"],
'actions': actions
}
if isinstance(project, dict):
return project, 500
else:
try:
project.commits.create(data)
audit_log(user, file_path, args["product_id"], "sls", "upload")
except Exception as e:
logger.error("Upload file: %s" % e)
return {"status": False, "message": str(e)}, 500
return {"status": True, "message": ""}, 200 | from flask_restful import Resource, reqparse, request
from fileserver.git_fs import gitlab_project
from common.const import role_dict
from common.log import loggers |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.