file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
validate.py | """Validate some things around restore."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from ..backups.const import BackupType
from ..const import (
ATTR_ADDONS,
ATTR_COMPRESSED,
ATTR_CRYPTO,
ATTR_DATE,
ATTR_DOCKER,
ATTR_FOLDERS,
ATTR_HOMEASSISTANT,
ATTR_NAME,
ATTR_PROTECTED,
ATTR_REPOSITORIES,
ATTR_SIZE,
ATTR_SLUG,
ATTR_TYPE,
ATTR_VERSION,
CRYPTO_AES128,
FOLDER_ADDONS,
FOLDER_HOMEASSISTANT,
FOLDER_MEDIA,
FOLDER_SHARE,
FOLDER_SSL,
)
from ..validate import SCHEMA_DOCKER_CONFIG, repositories, version_tag
ALL_FOLDERS = [
FOLDER_SHARE,
FOLDER_ADDONS,
FOLDER_SSL,
FOLDER_MEDIA,
]
def unique_addons(addons_list):
"""Validate that an add-on is unique."""
single = {addon[ATTR_SLUG] for addon in addons_list}
if len(single) != len(addons_list):
raise vol.Invalid("Invalid addon list in backup!") from None
return addons_list
def v1_homeassistant(
homeassistant_data: dict[str, Any] | None
) -> dict[str, Any] | None:
"""Cleanup homeassistant artefacts from v1."""
if not homeassistant_data:
return None
if homeassistant_data.get(ATTR_VERSION) is None:
return None
return homeassistant_data
def v1_folderlist(folder_data: list[str]) -> list[str]:
|
def v1_protected(protected: bool | str) -> bool:
"""Cleanup old protected handling."""
if isinstance(protected, bool):
return protected
return True
# pylint: disable=no-value-for-parameter
SCHEMA_BACKUP = vol.Schema(
{
vol.Optional(ATTR_VERSION, default=1): vol.All(vol.Coerce(int), vol.In((1, 2))),
vol.Required(ATTR_SLUG): str,
vol.Required(ATTR_TYPE): vol.Coerce(BackupType),
vol.Required(ATTR_NAME): str,
vol.Required(ATTR_DATE): str,
vol.Optional(ATTR_COMPRESSED, default=True): vol.Boolean(),
vol.Optional(ATTR_PROTECTED, default=False): vol.All(
v1_protected, vol.Boolean()
),
vol.Optional(ATTR_CRYPTO, default=None): vol.Maybe(CRYPTO_AES128),
vol.Optional(ATTR_HOMEASSISTANT, default=None): vol.All(
v1_homeassistant,
vol.Maybe(
vol.Schema(
{
vol.Required(ATTR_VERSION): version_tag,
vol.Optional(ATTR_SIZE, default=0): vol.Coerce(float),
},
extra=vol.REMOVE_EXTRA,
)
),
),
vol.Optional(ATTR_DOCKER, default=dict): SCHEMA_DOCKER_CONFIG,
vol.Optional(ATTR_FOLDERS, default=list): vol.All(
v1_folderlist, [vol.In(ALL_FOLDERS)], vol.Unique()
),
vol.Optional(ATTR_ADDONS, default=list): vol.All(
[
vol.Schema(
{
vol.Required(ATTR_SLUG): str,
vol.Required(ATTR_NAME): str,
vol.Required(ATTR_VERSION): version_tag,
vol.Optional(ATTR_SIZE, default=0): vol.Coerce(float),
},
extra=vol.REMOVE_EXTRA,
)
],
unique_addons,
),
vol.Optional(ATTR_REPOSITORIES, default=list): repositories,
},
extra=vol.ALLOW_EXTRA,
)
| """Cleanup folder artefacts from v1."""
if FOLDER_HOMEASSISTANT in folder_data:
folder_data.remove(FOLDER_HOMEASSISTANT)
return folder_data |
server_test.go | package server
import (
"testing" | func Test_startServer(t *testing.T) {
Start()
} | )
|
main.go | package main
import (
"context"
"net/http"
"github.com/wfnuser/gcloak"
oidc "github.com/coreos/go-oidc"
"github.com/gin-gonic/gin"
"golang.org/x/oauth2"
)
func setupRouter() *gin.Engine {
// Initialize the gin entity
r := gin.Default()
configURL := "http://localhost:8080/auth/realms/cloud"
ctx := context.Background()
provider, err := oidc.NewProvider(ctx, configURL)
if err != nil {
panic(err)
}
clientID := "myclient"
clientSecret := "FZu9jL7sG7A1lYvjTO9D7RzXkbNBAGa4"
redirectURL := "http://localhost:8181/v1/token"
// Configure an OpenID Connect aware OAuth2 client.
oauth2Config := oauth2.Config{
ClientID: clientID,
ClientSecret: clientSecret,
RedirectURL: redirectURL,
Endpoint: provider.Endpoint(),
Scopes: []string{oidc.ScopeOpenID, "profile", "email", "roles"},
}
conf := gcloak.GCloakConf{
Endpoint: "http://localhost:8080/auth/realms/cloud",
RedirectURL: "http://localhost:8181/v1/token",
IDTokenCookieName: "KeyCloakCloudID",
AccessTokenCookieName: "KeyCloakCloudAccess",
ClientID: "myclient",
ClientSecret: "FZu9jL7sG7A1lYvjTO9D7RzXkbNBAGa4",
TTL: 120,
}
var accessMap map[string][]string = map[string][]string{
"/v1/ping": {"dev"},
}
v1 := r.Group("v1")
{
// routes with authorization
a := v1.Group("")
a.Use(gcloak.KeyCloakAuth(conf, accessMap))
a.Use(gcloak.KeyCloakAuth(conf, accessMap))
// ping for testing
a.GET("/ping", func(c *gin.Context) {
c.String(http.StatusOK, "pong")
})
}
v1.GET("/token", gcloak.TokenHandler(conf))
v1.GET("/login", func(c *gin.Context) {
c.Redirect(http.StatusFound, oauth2Config.AuthCodeURL("state"))
})
return r
}
func main() | {
r := setupRouter()
r.Run(":8181")
} |
|
code.rs | use std::collections::HashMap;
use super::command::{Command, Comp, Dest, Jump};
struct SymbolTable {
table: HashMap<String, u16>,
}
impl SymbolTable {
pub fn new(program: &[Command]) -> Self {
let mut table = vec![
("SP".to_string(), 0),
("LCL".to_string(), 1),
("ARG".to_string(), 2),
("THIS".to_string(), 3),
("THAT".to_string(), 4),
("R0".to_string(), 0),
("R1".to_string(), 1),
("R2".to_string(), 2),
("R3".to_string(), 3),
("R4".to_string(), 4),
("R5".to_string(), 5),
("R6".to_string(), 6),
("R7".to_string(), 7),
("R8".to_string(), 8),
("R9".to_string(), 9),
("R10".to_string(), 10),
("R11".to_string(), 11),
("R12".to_string(), 12),
("R13".to_string(), 13),
("R14".to_string(), 14),
("R15".to_string(), 15),
("SCREEN".to_string(), 16384),
("KBD".to_string(), 24576),
].into_iter().collect::<HashMap::<_, _>>();
let mut pc = 0;
for command in program.iter() {
match command {
Command::L(label) => {
table.insert(label.clone(), pc);
},
_ => pc += 1,
}
}
let mut address = 0x10;
for command in program.iter() {
if let Command::ASymbol(label) = command {
if !table.contains_key(label) {
table.insert(label.clone(), address);
address += 1;
}
}
}
Self { table }
}
pub fn get(&self, label: &str) -> Option<u16> {
self.table.get(label).copied()
}
}
fn assemble_command(command: &Command, symbol_table: &SymbolTable) -> anyhow::Result<Option<u16>> {
match command {
Command::AImm(imm) => Ok(Some(*imm)),
Command::ASymbol(symbol) => {
if let Some(v) = symbol_table.get(symbol) {
Ok(Some(v))
} else {
Err(anyhow::anyhow!("Undefined symbol: {}", symbol))
}
}
Command::C(dest, comp, jump) => {
let bits = 0xe000 | (comp.assemble() << 6) | (dest.assemble() << 3) | jump.assemble();
Ok(Some(bits))
}
_ => Ok(None),
}
}
pub fn parse(lines: &[String]) -> anyhow::Result<Vec<Command>> |
pub fn assemble(program: &[Command]) -> anyhow::Result<Vec<u16>> {
let symbol_table = SymbolTable::new(program);
let mut result = Vec::new();
for command in program {
if let Some(bits) = assemble_command(command, &symbol_table)? {
result.push(bits);
}
}
Ok(result)
} | {
lines.iter()
.filter(|line| {
let line = line.trim();
!line.is_empty() && !line.starts_with("//")
})
.map(|s| Command::parse(&s))
.collect::<anyhow::Result<Vec<_>>>()
} |
models_info_m.go | // Beego (http://beego.me/)
// @description beego is an open-source, high-performance web framework for the Go programming language.
// @link http://github.com/astaxie/beego for the canonical source repository
// @license http://github.com/astaxie/beego/blob/master/LICENSE
// @authors astaxie, slene
package orm
import (
"errors"
"fmt"
"os"
"reflect"
)
// single model info
type modelInfo struct {
pkg string
name string
fullName string
table string
model interface{}
fields *fields
manual bool
addrField reflect.Value
uniques []string
isThrough bool
}
// new model info
func newModelInfo(val reflect.Value) (info *modelInfo) {
var (
err error
fi *fieldInfo
sf reflect.StructField
)
info = &modelInfo{}
info.fields = newFields()
ind := reflect.Indirect(val)
typ := ind.Type()
info.addrField = val
info.name = typ.Name()
info.fullName = getFullName(typ)
for i := 0; i < ind.NumField(); i++ {
field := ind.Field(i)
sf = ind.Type().Field(i)
if sf.PkgPath != "" {
continue
}
fi, err = newFieldInfo(info, field, sf)
if err != nil {
if err == errSkipField {
err = nil
continue
}
break
}
added := info.fields.Add(fi)
if added == false {
err = errors.New(fmt.Sprintf("duplicate column name: %s", fi.column))
break
}
if fi.pk {
if info.fields.pk != nil {
err = errors.New(fmt.Sprintf("one model must have one pk field only"))
break
} else {
info.fields.pk = fi
}
}
fi.fieldIndex = i
fi.mi = info
fi.inModel = true
}
if err != nil {
fmt.Println(fmt.Errorf("field: %s.%s, %s", ind.Type(), sf.Name, err))
os.Exit(2)
}
return
}
// combine related model info to new model info.
// prepare for relation models query.
func newM2MModelInfo(m1, m2 *modelInfo) (info *modelInfo) | {
info = new(modelInfo)
info.fields = newFields()
info.table = m1.table + "_" + m2.table + "s"
info.name = camelString(info.table)
info.fullName = m1.pkg + "." + info.name
fa := new(fieldInfo)
f1 := new(fieldInfo)
f2 := new(fieldInfo)
fa.fieldType = TypeBigIntegerField
fa.auto = true
fa.pk = true
fa.dbcol = true
fa.name = "Id"
fa.column = "id"
fa.fullName = info.fullName + "." + fa.name
f1.dbcol = true
f2.dbcol = true
f1.fieldType = RelForeignKey
f2.fieldType = RelForeignKey
f1.name = camelString(m1.table)
f2.name = camelString(m2.table)
f1.fullName = info.fullName + "." + f1.name
f2.fullName = info.fullName + "." + f2.name
f1.column = m1.table + "_id"
f2.column = m2.table + "_id"
f1.rel = true
f2.rel = true
f1.relTable = m1.table
f2.relTable = m2.table
f1.relModelInfo = m1
f2.relModelInfo = m2
f1.mi = info
f2.mi = info
info.fields.Add(fa)
info.fields.Add(f1)
info.fields.Add(f2)
info.fields.pk = fa
info.uniques = []string{f1.column, f2.column}
return
} |
|
auth-config.js | module.exports = {
"acme":
{
"displayName": "AcmeCorp",
"clientId": "",
"tenantId": "",
"secret": "",
"oauthServerUrl": ""
},
"allthings":
{
"displayName": "AllThings LTD",
"clientId": "", | "oauthServerUrl": ""
}
} | "tenantId": "",
"secret": "", |
client.py | """Library used by components for communication with Monitor Server
This module provides low-level interface (connect/Client) and high-level
interface (run_component) for communication with Monitor Server.
:func:`connect` is used for establishing single chatter based connection
with Monitor Server which is represented by :class:`Client`. Termination of
connection is signaled with :meth:`Client.closed`.
Example of low-level interface usage::
client = await hat.monitor.client.connect({
'name': 'client1',
'group': 'group1',
'monitor_address': 'tcp+sbs://127.0.0.1:23010',
'component_address': None})
assert client.info in client.components
try:
await client.closed
finally:
await client.async_close()
:func:`run_component` provide high-level interface for communication with
Monitor Server. This function first establishes connection to Monitor
Server and then listens component changes and in regard to blessing
and ready tokens calls or cancels `async_run_cb` callback.
In case blessing token matches ready token, `async_run_cb` is called.
While `async_run_cb` is running, once blessing token changes, `async_run_cb` is
canceled. If `async_run_cb` finishes or raises exception, this function closes
connection to monitor server and returns `async_run_cb` result. If connection
to monitor server is closed, this function raises exception.
Example of high-level interface usage::
async def monitor_async_run(monitor):
await asyncio.sleep(10)
return 13
res = await hat.monitor.client.run_component(
conf={'name': 'client',
'group': 'test clients',
'monitor_address': 'tcp+sbs://127.0.0.1:23010',
'component_address': None},
async_run_cb=monitor_async_run)
assert res == 13
Attributes:
mlog (logging.Logger): module logger
"""
import asyncio
import logging
from hat import chatter
from hat import util
from hat.monitor import common
from hat.util import aio
mlog = logging.getLogger(__name__)
async def connect(conf):
"""Connect to local monitor server
Connection is established once chatter communication is established.
Args:
conf (hat.json.Data): configuration as defined by
``hat://monitor/client.yaml#``
Returns:
Client
"""
client = Client()
client._name = conf['name']
client._group = conf['group']
client._address = conf['component_address']
client._components = []
client._info = None
client._ready = None
client._change_cbs = util.CallbackRegistry()
client._async_group = aio.Group()
client._conn = await chatter.connect(common.sbs_repo,
conf['monitor_address'])
client._async_group.spawn(aio.call_on_cancel, client._conn.async_close)
mlog.debug("connected to local monitor server %s", conf['monitor_address'])
client._async_group.spawn(client._receive_loop)
return client
class Client:
@property
def closed(self):
"""asyncio.Future: closed future"""
return self._async_group.closed
@property
def info(self):
"""Optional[common.ComponentInfo]: client's component info"""
return self._info
@property
def components(self):
"""List[common.ComponentInfo]: global component state"""
return self._components
def register_change_cb(self, cb):
"""Register change callback
Registered callback is called once info and/or components changes.
Args:
cb (Callable[[],None]): callback
Returns:
util.RegisterCallbackHandle
"""
return self._change_cbs.register(cb)
async def async_close(self):
"""Async close"""
await self._async_group.async_close()
def set_ready(self, token):
"""Set ready token
Args:
token (Optional[int]): ready token
"""
if token == self._ready:
return
self._ready = token
self._send_msg_client()
def _send_msg_client(self):
self._conn.send(chatter.Data(
module='HatMonitor',
type='MsgClient',
data=common.create_msg_client_sbs(
name=self._name,
group=self._group,
address=self._address,
ready=self._ready)))
def _set_components(self, msg_server):
if (msg_server.data.module != 'HatMonitor' or
msg_server.data.type != 'MsgServer'):
raise Exception('Message received from server malformed: message '
'MsgServer from HatMonitor module expected')
self._components = [common.component_info_from_sbs(i) | self._info = util.first(
self._components,
lambda i:
i.cid == msg_server.data.data['cid'] and
i.mid == msg_server.data.data['mid'])
self._change_cbs.notify()
async def _receive_loop(self):
try:
self._send_msg_client()
while True:
msg = await self._conn.receive()
self._set_components(msg)
except chatter.ConnectionClosedError:
mlog.debug('connection closed')
finally:
self._async_group.close()
async def run_component(conf, async_run_cb):
"""Run component
This method opens new connection to Monitor server and starts client's
loop which manages blessing/ready states.
When blessing token matches ready token, `async_run_cb` is called. While
`async_run_cb` is running, if blessing token changes, `async_run_cb` is
canceled.
If `async_run_cb` finishes or raises exception, this function closes
connection to monitor server and returns `async_run_cb` result. If
connection to monitor server is closed, this function raises exception.
TODO:
* provide opportunity for user to react to blessing token prior to
setting ready token (additional async_ready_cb)
Args:
conf (hat.json.Data): configuration as defined by
``hat://monitor/client.yaml#``
async_run_cb (Callable[[Client],None]): run callback
Returns:
Any
"""
client = await connect(conf)
try:
while True:
await _wait_until_blessed_and_ready(client)
async_group = aio.Group()
run_future = async_group.spawn(async_run_cb, client)
blessed_and_ready_future = async_group.spawn(
_wait_while_blessed_and_ready, client)
try:
done, _ = await asyncio.wait(
[run_future, blessed_and_ready_future, client.closed],
return_when=asyncio.FIRST_COMPLETED)
if run_future.done():
mlog.debug('async_run_cb finished or raised an exception')
return run_future.result()
if client.closed.done():
raise Exception('connection to monitor server closed!')
finally:
if not client.closed.done():
client.set_ready(None)
await async_group.async_close()
except asyncio.CancelledError:
raise
except Exception as e:
mlog.error('run component exception: %s', e, exc_info=e)
raise
finally:
await client.async_close()
mlog.debug('component closed')
async def _wait_until_blessed_and_ready(client):
queue = aio.Queue()
with client.register_change_cb(lambda: queue.put_nowait(None)):
while (client.info is None or client.info.blessing is None or
client.info.blessing != client.info.ready):
await queue.get_until_empty()
if client.info is None:
continue
client.set_ready(client.info.blessing)
async def _wait_while_blessed_and_ready(client):
queue = aio.Queue()
with client.register_change_cb(lambda: queue.put_nowait(None)):
while (client.info is not None and
client.info.blessing is not None and
client.info.blessing == client.info.ready):
await queue.get_until_empty() | for i in msg_server.data.data['components']] |
intflag.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u8,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u8,
}
impl super::INTFLAG {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits };
let mut w = W { bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct ALARM0R {
bits: bool,
}
impl ALARM0R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct SYNCRDYR {
bits: bool,
}
impl SYNCRDYR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct OVFR {
bits: bool,
}
impl OVFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Proxy"]
pub struct _ALARM0W<'a> {
w: &'a mut W,
}
impl<'a> _ALARM0W<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u8) << OFFSET);
self.w.bits |= ((value & MASK) as u8) << OFFSET;
self.w | }
}
#[doc = r" Proxy"]
pub struct _SYNCRDYW<'a> {
w: &'a mut W,
}
impl<'a> _SYNCRDYW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 6;
self.w.bits &= !((MASK as u8) << OFFSET);
self.w.bits |= ((value & MASK) as u8) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _OVFW<'a> {
w: &'a mut W,
}
impl<'a> _OVFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u8) << OFFSET);
self.w.bits |= ((value & MASK) as u8) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
#[doc = "Bit 0 - Alarm 0"]
#[inline]
pub fn alarm0(&self) -> ALARM0R {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u8) != 0
};
ALARM0R { bits }
}
#[doc = "Bit 6 - Synchronization Ready"]
#[inline]
pub fn syncrdy(&self) -> SYNCRDYR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u8) != 0
};
SYNCRDYR { bits }
}
#[doc = "Bit 7 - Overflow"]
#[inline]
pub fn ovf(&self) -> OVFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u8) != 0
};
OVFR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u8) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - Alarm 0"]
#[inline]
pub fn alarm0(&mut self) -> _ALARM0W {
_ALARM0W { w: self }
}
#[doc = "Bit 6 - Synchronization Ready"]
#[inline]
pub fn syncrdy(&mut self) -> _SYNCRDYW {
_SYNCRDYW { w: self }
}
#[doc = "Bit 7 - Overflow"]
#[inline]
pub fn ovf(&mut self) -> _OVFW {
_OVFW { w: self }
}
} | |
left_right.rs | use crate::*;
test_case!(left_right, async move {
use gluesql_core::{
executor::EvaluateError,
prelude::{Payload, Value::*},
translate::TranslateError,
};
let test_cases = vec![
(
r#"CREATE TABLE Item (name TEXT DEFAULT LEFT("abc", 1))"#,
Ok(Payload::Create),
),
(
r#"INSERT INTO Item VALUES ("Blop mc blee"), ("B"), ("Steven the &long named$ folken!")"#,
Ok(Payload::Insert(3)),
),
("CREATE TABLE SingleItem (id INTEGER)", Ok(Payload::Create)),
(
r#"INSERT INTO SingleItem VALUES (0)"#,
Ok(Payload::Insert(1)),
),
(
"CREATE TABLE NullName (name TEXT NULL)",
Ok(Payload::Create),
),
(
r#"INSERT INTO NullName VALUES (NULL)"#,
Ok(Payload::Insert(1)),
),
(
"CREATE TABLE NullNumber (number INTEGER NULL)",
Ok(Payload::Create),
),
(
r#"INSERT INTO NullNumber VALUES (NULL)"#,
Ok(Payload::Insert(1)),
),
(
"CREATE TABLE NullableName (name TEXT NULL)",
Ok(Payload::Create),
),
(
r#"INSERT INTO NullableName VALUES ('name')"#,
Ok(Payload::Insert(1)),
),
(
r#"SELECT LEFT(name, 3) AS test FROM Item"#,
Ok(select!(
"test"
Str;
"Blo".to_owned();
"B".to_owned();
"Ste".to_owned()
)),
),
(
r#"SELECT RIGHT(name, 10) AS test FROM Item"#,
Ok(select!(
"test"
Str;
"op mc blee".to_owned();
"B".to_owned();
"d$ folken!".to_owned()
)),
),
// TODO Concatenation
/*(
r#"SELECT LEFT((name + 'bobbert'), 10) AS test FROM Item"#,
Ok(select!(
"test"
OptStr;
"Blop mc blee".to_owned();
"Bbobbert".to_owned();
"Steven the".to_owned()
)),
),*/
( | "blue".to_owned()
)),
),
(
r#"SELECT LEFT("blunder", 3) AS test FROM SingleItem"#,
Ok(select!(
"test"
Str;
"blu".to_owned()
)),
),
(
r#"SELECT LEFT(name, 3) AS test FROM NullName"#,
Ok(select_with_null!(test; Null)),
),
(
r#"SELECT LEFT('Words', number) AS test FROM NullNumber"#,
Ok(select_with_null!(test; Null)),
),
(
r#"SELECT LEFT(name, number) AS test FROM NullNumber INNER JOIN NullName ON 1 = 1"#,
Ok(select_with_null!(test; Null)),
),
(
r#"SELECT LEFT(name, 1) AS test FROM NullableName"#,
Ok(select!(
"test"
Str;
"n".to_owned()
)),
),
(
r#"SELECT RIGHT(name, 10, 10) AS test FROM SingleItem"#,
Err(TranslateError::FunctionArgsLengthNotMatching {
name: "RIGHT".to_owned(),
expected: 2,
found: 3,
}
.into()),
),
(
r#"SELECT RIGHT(name) AS test FROM SingleItem"#,
Err(TranslateError::FunctionArgsLengthNotMatching {
name: "RIGHT".to_owned(),
expected: 2,
found: 1,
}
.into()),
),
(
r#"SELECT RIGHT() AS test FROM SingleItem"#,
Err(TranslateError::FunctionArgsLengthNotMatching {
name: "RIGHT".to_owned(),
expected: 2,
found: 0,
}
.into()),
),
(
r#"SELECT RIGHT(1, 1) AS test FROM SingleItem"#,
Err(EvaluateError::FunctionRequiresStringValue("RIGHT".to_string()).into()),
),
(
r#"SELECT RIGHT('Words', 1.1) AS test FROM SingleItem"#,
Err(EvaluateError::FunctionRequiresIntegerValue("RIGHT".to_string()).into()),
),
(
r#"SELECT RIGHT('Words', -4) AS test FROM SingleItem"#,
Err(EvaluateError::FunctionRequiresUSizeValue("RIGHT".to_string()).into()),
),
];
for (sql, expected) in test_cases {
test!(expected, sql);
}
}); | r#"SELECT LEFT('blue', 10) AS test FROM SingleItem"#,
Ok(select!(
"test"
Str; |
test_log_stream.py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
from channels.testing import WebsocketCommunicator
from backend.accounts.middlewares import BCSChannelAuthMiddlewareStack
from backend.container_service.observability.log_stream.views import LogStreamHandler
@pytest.fixture
def session_id(api_client, project_id, cluster_id, namespace, pod_name, container_name):
response = api_client.post(
f'/api/logs/projects/{project_id}/clusters/{cluster_id}/namespaces/{namespace}/pods/{pod_name}/stdlogs/sessions/', # noqa
{"container_name": container_name},
)
result = response.json()
return result['data']['session_id']
@pytest.mark.skip(reason='暂时跳过标准日志部分单元测试')
@pytest.mark.django_db
@pytest.mark.asyncio
async def test_log_stream(project_id, cluster_id, namesp | session_id):
app = BCSChannelAuthMiddlewareStack(LogStreamHandler.as_asgi())
# Test a normal connection
communicator = WebsocketCommunicator(
app,
f'/ws/logs/projects/{project_id}/clusters/{cluster_id}/namespaces/{namespace}/pods/{pod_name}/stdlogs/stream/?session_id={session_id}', # noqa
)
communicator.scope['url_route'] = {
'kwargs': {
'project_id': project_id,
'cluster_id': cluster_id,
'namespace': namespace,
'pod': pod_name,
}
}
connected, _ = await communicator.connect()
assert connected
# Test sending text
await communicator.send_to(text_data="hello")
# Close out
await communicator.disconnect()
| ace, pod_name, |
logic_delete.rs | use std::fmt::{Debug, Display};
use serde_json::Value;
use crate::core::db::DriverType;
use crate::core::Error;
use crate::sql::rule::SqlRule;
use crate::crud::CRUDTable;
use std::ops::Deref;
use std::collections::HashMap;
use serde::{Serialize, Deserialize, Serializer, Deserializer};
/// Logic Delete Plugin trait
pub trait LogicDelete: Send + Sync + Debug {
///the name
fn name(&self) -> &str {
std::any::type_name::<Self>()
}
/// database column
fn column(&self) -> &str;
/// deleted data,must be i32
fn deleted(&self) -> i32;
/// un deleted data,must be i32
fn un_deleted(&self) -> i32;
/// create_remove_sql
fn create_remove_sql(
&self,
driver_type: &DriverType,
table_name: &str,
table_fields: &str,
sql_where: &str,
) -> Result<String, crate::core::Error>;
}
#[derive(Debug)]
pub struct RbatisLogicDeletePlugin {
pub excludes: Vec<String>,
pub column: String,
pub deleted: i32,
pub un_deleted: i32,
}
impl RbatisLogicDeletePlugin {
pub fn new(column: &str) -> Self {
Self {
excludes: vec![],
column: column.to_string(),
deleted: 1,
un_deleted: 0,
}
}
pub fn new_opt(column: &str, deleted: i32, un_deleted: i32) -> Self {
if deleted == un_deleted {
panic!("[rbaits] deleted can not equal to un_deleted on RbatisLogicDeletePlugin::new_opt(column: &str, deleted: i32, un_deleted: i32)")
}
Self {
excludes: vec![],
column: column.to_string(),
deleted,
un_deleted,
}
}
}
impl LogicDelete for RbatisLogicDeletePlugin {
fn column(&self) -> &str {
self.column.as_str()
}
fn deleted(&self) -> i32 {
self.deleted
}
fn un_deleted(&self) -> i32 {
self.un_deleted
}
fn create_remove_sql(
&self,
driver_type: &DriverType,
table_name: &str,
table_fields: &str,
sql_where: &str,
) -> Result<String, Error> {
return if table_fields.contains(self.column()) {
//fields have column
if sql_where.is_empty() {
let new_sql = format!(
"{} {} {} {} = {}",
crate::sql::TEMPLATE.update.value,
table_name,
crate::sql::TEMPLATE.set.value,
self.column(),
self.deleted()
) + sql_where;
Ok(new_sql)
} else {
let new_sql = format!(
"{} {} {} {} = {} {}",
crate::sql::TEMPLATE.update.value,
table_name,
crate::sql::TEMPLATE.set.value,
self.column(),
self.deleted(),
sql_where.trim_start()
);
Ok(new_sql)
}
} else if !sql_where.is_empty() {
let new_sql = format!(
"{} {} {}",
crate::sql::TEMPLATE.delete_from.value,
table_name,
sql_where.trim_start()
);
Ok(new_sql)
} else {
Err(Error::from("[rbatis] del data must have where sql!"))
};
}
}
/// use this context will not use logic del
pub struct TableNoLogic<T> where T: CRUDTable {
pub table: T,
}
impl<T> Serialize for TableNoLogic<T> where T: CRUDTable {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> where
S: Serializer {
T::serialize(&self.table, serializer)
}
}
impl<'de, T> Deserialize<'de> for TableNoLogic<T> where T: CRUDTable {
fn deserialize<D>(deserializer: D) -> Result<Self, <D as Deserializer<'de>>::Error> where
D: Deserializer<'de> {
let result = T::deserialize(deserializer)?;
return Ok(TableNoLogic {
table: result,
});
}
}
impl<T> CRUDTable for TableNoLogic<T> where T: CRUDTable {
fn is_use_plugin(plugin_name: &str) -> bool {
if plugin_name.eq(std::any::type_name::<RbatisLogicDeletePlugin>()) {
return false;
}
return true;
} |
fn table_columns() -> String {
T::table_columns()
}
fn formats(driver_type: &DriverType) -> HashMap<String, fn(arg: &str) -> String> {
T::formats(driver_type)
}
fn make_value_sql_arg(
&self,
db_type: &DriverType,
index: &mut usize,
) -> crate::Result<(String, String, Vec<serde_json::Value>)> {
T::make_value_sql_arg(&self.table, db_type, index)
}
}
impl<T> Deref for TableNoLogic<T> where T: CRUDTable {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.table
}
}
impl<T> From<T> for TableNoLogic<T> where T: CRUDTable {
fn from(arg: T) -> Self {
TableNoLogic {
table: arg
}
}
} |
fn table_name() -> String {
T::table_name()
} |
httpclient.py | #!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 Abram Hindle, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
from urllib.parse import urlparse
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def get_code(self, data):
return int(data.splitlines()[0].split()[1])
def get_headers(self,data):
header = data.split("\r\n\r\n")[0].splitlines()
return " ".join(header[0].split()[1:]) + "\r\n" + "\r\n".join(header[1:]) + "\r\n"
def get_body(self, data):
return data.split("\r\n\r\n")[1]
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('utf-8')
def GET(self, url, args=None):
code = 500
body = ""
parsed_url = urlparse(url)
host = parsed_url.hostname
port = parsed_url.port
if not port:
if parsed_url.scheme.lower() == 'http':
port = 80
else:
port = 443
path = parsed_url.path if parsed_url.path else "/"
if parsed_url.query:
path += "?"
path += parsed_url.query
self.connect(host, port)
request = "GET {} HTTP/1.1\r\n".format(path)
request += "Host: {}\r\n".format(host)
request += "Accept: */*\r\n"
request += "Connection: close\r\n\r\n"
#print(request)
self.sendall(request)
# print("Request Sent")
response = self.recvall(self.socket)
# print("Response Recieved")
self.close()
code = self.get_code(response)
body = self.get_body(response)
header = self.get_headers(response)
print("\n#####Response Header#####")
print(header)
print("#######################\n")
print("\n*****Response Body*****")
print(body)
print("***********************\n")
return HTTPResponse(code, body)
def POST(self, url, args=None):
code = 500
body = ""
content = ""
parsed_url = urlparse(url)
host = parsed_url.hostname
port = parsed_url.port
if not port:
if parsed_url.scheme.lower() == 'http':
port = 80
else:
port = 443
path = parsed_url.path if parsed_url.path else "/"
if args:
content = ""
for key, value in args.items():
content += "{}={}&".format(key, value)
content = content[:-1]
content_len = len(content) |
request = "POST {} HTTP/1.1\r\n".format(path)
request += "Host: {}\r\n".format(host)
request += "Content-Type: {}\r\n".format("application/x-www-form-urlencoded")
request += "Content-Length: {}\r\n\r\n".format(content_len)
request += "{}\r\n\r\n".format(content)
self.sendall(request)
response = self.recvall(self.socket)
self.close()
code = self.get_code(response)
body = self.get_body(response)
header = self.get_headers(response)
print("\n#####Response Header#####")
print(header)
print("#######################\n")
print("\n*****Response Body*****")
print(body)
print("***********************\n")
return HTTPResponse(code, body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] )) |
self.connect(host, port) |
index.ts | import SupabaseClient from './SupabaseClient'
import { SupabaseClientOptions, SupabaseRealtimePayload } from './lib/types'
import { User as AuthUser, Session as AuthSession } from '@supabase/gotrue-js'
export * from '@supabase/gotrue-js'
export * from '@supabase/realtime-js'
/**
* Creates a new Supabase Client.
*/
const createClient = (
supabaseUrl: string,
supabaseKey: string,
options?: SupabaseClientOptions
) => {
return new SupabaseClient(supabaseUrl, supabaseKey, options)
}
export {
createClient,
SupabaseClient,
SupabaseClientOptions, | SupabaseRealtimePayload,
AuthUser,
AuthSession,
} | |
RNNSequenceTypeRename.py | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.back.replacement import BackReplacementPattern
from mo.graph.graph import Graph
class RNNSequence(BackReplacementPattern):
| """
This transform change type RNNSequence (internal MO type for all recurrent layers)
to correct operation name.
"""
enabled = True
def pattern(self):
return dict(
nodes=[
('rnn_layer', {'type': 'RNNSequence'})
],
edges=[]
)
_supported_ops = ['RNN', 'LSTM', 'GRU']
def replace_pattern(self, graph: Graph, match: dict):
rnn_layer = match['rnn_layer']
assert rnn_layer['op'] in self._supported_ops
rnn_layer['type'] = rnn_layer['op'] + 'Sequence' |
|
one.py | print(*range(1, int(input()) + 1), sep="") |
||
tmmPCECalc.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 12:29:21 2021
@author: aduell
"""
#import numpy as np
from numpy import pi, linspace, array, exp
#import tmm
from tmm import inc_tmm, inc_absorp_in_each_layer, inf
#import pandas as pd
#import tmm_vw as tmm
#import matplotlib.pyplot as plt
from matplotlib.pyplot import plot,figure,xlabel,ylabel,show,ylim,legend
from wpv import Layer, Stack
#import scipy.interpolate, scipy.integrate, pandas, sys
from scipy.interpolate import interp1d
from scipy.integrate import quad, trapz
from scipy.optimize import fsolve#, Bounds
import scipy.optimize
from pandas import read_excel
import sys
#import scipy
#from numericalunits import W, K, nm, m, cm, s, eV, meV, V, mA, c0, hPlanck, kB, e, A, ohm
#import sympy
#import sympy.solvers.solvers
assert sys.version_info >= (3,6), 'Requires Python 3.6+'
from pvlib.pvsystem import singlediode
import tmmPVColor as pvc
import CalculateVLTFromSpectrum as cvs
from CalculateVLTFromSpectrum import AM15G, cieplf
import vegas
# This whole thing uses microns for length
'''We determine the incident angle of the sun shining on the cell. Input is in degrees'''
def giveincangle(angle):
degree = pi/180
return angle*degree
inc_angle = giveincangle(0)
'''We determine the size and scaling of the photon wavelength scale. Units are um'''
num_lams = 500
lams = linspace(0.3,2.5,num=num_lams) #um
'''We are constants and help control units'''
q = 1.602176634e-19 #coulombs. elementary charge
c0 = 299792458 #m/s #Speed of light
hPlanck = 6.62607015e-34 #J*s 4.135667516e-15 #eV*s
kB = 1.380649e-23 #J/K 8.61733034e-5 #eV/K
'''Some units and terms'''
'''Tcell, Ti, To are cell temperature, inside temp and outside temp. Always in kelvin'''
'''Ui and Uo are overall heat-transfer coefficient ofr in side and outside. W/(m**2 *K)'''
'''AbsorberLayer is a number indicating the photoactive layer. If the fourth layer is the PV layer, input is 4'''
''''Rs is series resistance, Rsh is shunt resistance in ohms. See pveducation.org for more info'''
'''eta is the electron-hole pair extraction efficiency term. eta times all absorbed light in the PV layer gives the EQE'''
'''n = diode ideality factor. Used in singlediode equation
Ns = number of cells in series. Used in singlediode equation'''
'''Rtot is total thermal resistance of the window'''
'''We are all the different materials currently available
Thickness is in microns'''
def Glass(Thickness = 6000):
return Layer(Thickness,'nkLowFeGlass','i') | def MAPI(Thickness = 0.130):
return Layer(Thickness,'nkMAPI','c')
def AZO(Thickness = 0.200):
return Layer(Thickness,'nkAZO','c')
def ITO(Thickness = 0.200):
return Layer(Thickness,'nkITO','c')
def ITOlowE(Thickness = 0.075):
return Layer(Thickness,'nkITO','c')
def SnO2(Thickness = 0.05):
return Layer(Thickness,'nkSnO2','c')
def SnO2lowE(Thickness = 0.030):
return Layer(Thickness,'nkSnO2','c')
def SnO2lowEfat(Thickness = 0.050):
return Layer(Thickness,'nkSnO2','c')
def SiO2(Thickness = 0.024):
return Layer(Thickness,'nkSiO2','c')
def NiO(Thickness = 0.050):
return Layer(Thickness,'nkNiO','c')
def Ag(Thickness = 0.015):
return Layer(Thickness,'nkAg','c')
def TiO2lowE(Thickness = 0.030):
return Layer(Thickness,'nkTiO2','c')
def TiO2lowEfat(Thickness = 0.060):
return Layer(Thickness,'nkTiO2','c')
def Bleach(Thickness = 0.370):
return Layer(Thickness,'nkBleach','c')
def ClAlPc(Thickness = 0.300):
return Layer(Thickness,'nkClAlPc','c')
def C60(Thickness = 0.200):
return Layer(Thickness,'nkC60','c')
def IR(Thickness = 0.060):
return Layer(Thickness,'nkPTB7_ThIEICO_4F','c')
def MAPBr(Thickness = 0.500):
return Layer(Thickness,'nkMAPbBr3','c')
def EVA(Thickness = 3000):
return Layer(Thickness,'nkEVA','i')
'''We are boundary conditions corresponding to each material type
Can be changed to tune optimization range'''
GlassBound = (5999,6001)
TiO2Bound = (0.025,.1)
FTOBound = (0.1,0.5)
MAPIBound = (.06,.260)
AZOBound = (.1,.4)
ITOBound = (.1,.4)
ITOlowEBound = (0.03,.15)
SnO2Bound = (.025,.1)
SnO2lowEBound = (.015,.06)
SnO2lowEfatBound = (0.025,.1)
SiO2Bound = (.012,.05)
NiOBound = (.025,.1)
AgBound = (.0149, .0151)
TiO2lowEBound = (.015, .070)
TiO2lowEfatBound = (.03,.12)
BleachBound = (.180, .500)
ClAlPcBound = (.150, .600)
C60Bound = (.100,.400)
IRBound = (.030, .12)
MAPBrBound = (.250,1)
EVABound = (2999,3001)
'''I assemble a list of layer objects using Thicknesses and Materials'''
def GiveLayers(Thickness,Materials):
x = len(Materials)
if x == len(Thickness):
Layers = []
for i in range(x):
Layers.append(Materials[i](Thickness[i]))
return Layers
else:
raise ValueError ('layers and Thickness lengths do not match')
'''I give a list of boundaries from a list of materials. Dict is a dictionary containing the boundary conditions
All items in the dicitonary are labelled as 'Material'+'Bound' '''
'''
def GiveBounds(Materials, DictBound):
x = len(Materials)
Bounds = []
for i in range(x):
Bounds.append(DictBound[Materials[i].__name__ + 'Bound'])
Bounds = array(Bounds)
return Bounds
'''
'''I produce a Bounds object that defines the boundary conditions for optimization
The version above can be used to produce a list of bounds rather than an object'''
def GiveBounds(Materials, DictBound):
x = len(Materials)
lb = []
ub = []
for i in range(x):
lb.append(DictBound[Materials[i].__name__ + 'Bound'][0])
for i in range(x):
ub.append(DictBound[Materials[i].__name__ + 'Bound'][1])
bounds = scipy.optimize.Bounds(lb,ub)
return bounds
'''I give a list of thicknesses from a list of materials. Dict is a dictionary containing the thickness values
All items in the dicitonary are labelled as 'Material'+'Th' '''
def GiveThicks(Materials, DictTh):
x = len(Materials)
Th = []
for i in range(x):
Th.append(DictTh[Materials[i].__name__ + 'Th'])
return Th
'''Calculates Spectra Based on the layers of the cell
AbsorberLayer is an integer giving the position of the PV layer in the stack. Currently supports 1 PV layer'''
def Spectra(layers, AbsorberLayer):
thicks = [inf]
iorcs = ['i']
for layer in layers:
thicks.append(layer.d)
iorcs.append(layer.i_or_c)
thicks.append(inf)
iorcs.append('i')
thicks_bw = thicks[::-1]
iorcs_bw = iorcs[::-1]
Ts = []
Rfs = []
Rbs = []
AbsByAbsorbers = []
#EQEs2 = []
#IREQEs = []
layerchoice = AbsorberLayer
#layerchoice2 = 5
for lam in lams:
nks = [1]
for layer in layers:
nks.append(layer.nk(lam))
nks.append(1)
nks_bw = nks[::-1]
front_spol = inc_tmm('s',nks,thicks,iorcs,inc_angle,lam)
front_ppol = inc_tmm('p',nks,thicks,iorcs,inc_angle,lam)
back_spol = inc_tmm('s',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)
back_ppol = inc_tmm('p',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)
AbsByAbsorber_spol = inc_absorp_in_each_layer(front_spol)[layerchoice]
AbsByAbsorber_ppol = inc_absorp_in_each_layer(front_ppol)[layerchoice]
AbsByAbsorbers.append( (AbsByAbsorber_spol + AbsByAbsorber_ppol) / 2. )
# EQE_spol2 = tmm.inc_absorp_in_each_layer(front_spol)[layerchoice2]
# EQE_ppol2 = tmm.inc_absorp_in_each_layer(front_ppol)[layerchoice2]
# EQEs2.append( (EQE_spol2 + EQE_ppol2) / 2. )
Rfs.append( (front_spol['R']+front_ppol['R']) / 2.)
Rbs.append( (back_spol['R']+back_ppol['R']) / 2.)
Ts.append( (front_spol['T']+front_ppol['T']) / 2. )
Ts = array(Ts)
Rfs = array(Rfs)
Rbs = array(Rbs)
As = 1-Ts-Rfs
sanities = Ts+Rfs+As
AbsByAbsorbers = array(AbsByAbsorbers)
Spectra = {'AbsByAbsorbers':AbsByAbsorbers, 'Ts':Ts,'Rfs':Rfs,'Rbs':Rbs,'As':As,'Total':sanities}
return Spectra
''' Here I calculate VLT and spit it out to the screen'''
'''Gives a spectrum of VLT. Used for plotting'''
def VLTSpectrum(layers):
return Stack(layers)
'''Gives VLT as a single number'''
def VLT(layers):
VLTstack=Stack(layers)
return VLTstack.get_visible_light_transmission(lams,inc_angle)
'''This gives VLT as a single number. eliminates
need to recalculate AM15G and cieplf every iteration. Unclear if this will work for
optimization'''
def getFancyVLT(layers):#,lamrange,inc_angle):
integ = vegas.Integrator([lams])
Trans=Stack(layers)
numerator = integ(lambda lam: AM15G(lam)*cieplf(lam)*Trans.get_RAT(lam,inc_angle)[2], nitn=10, neval=100)[0]
denominator = integ(lambda lam: AM15G(lam)*cieplf(lam), nitn=10, neval=100)[0]
VLT = numerator/denominator
return VLT.mean
'''Gives minimum and maximum VLT based exclusively on the PV layer.
Only useful for judging VLT constraint for a given PV material
Requires input of single absorber layer with a tuple of (lb,ub)'''
def GiveMinMaxVLT(AbsorberType, Bounds):
minThick = GiveLayers([Bounds[0]], [AbsorberType])
maxThick = GiveLayers([Bounds[1]], [AbsorberType])
minimum = VLT(maxThick)
maximum = VLT(minThick)
return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],
'maxThick':Bounds[1]}
'''Gives minimum and maximum VLT based exclusively on the PV layer.
Requires list of materials, absorbing layer, and absorber bounds'''
def GiveMinMaxVLTFromMaterials(Materials, AbsorberLayer, Bounds):
AbsorberType = Materials[AbsorberLayer-1]
minThick = GiveLayers([Bounds[0]], [AbsorberType])
maxThick = GiveLayers([Bounds[1]], [AbsorberType])
minimum = VLT(maxThick)
maximum = VLT(minThick)
return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],
'maxThick':Bounds[1]}
# ******************** Here I add PCE calculation *********************#
'''This stuff imports a spreadsheet of the solar spectrum'''
#worksheet = pandas.read_excel('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')
worksheet = read_excel('./Data/ASTMG173.xls')#('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')
#worksheet = pandas.read_excel('/Users/lwheeler/Code/pv-window-bem/Data/astmg173.xls')
downloaded_array = array(worksheet)
# Wavelength is in column 0, AM1.5G data is column 2
AM15 = downloaded_array[1:, [0,2]]
# The first line should be 280.0 , 4.7309E-23
# The last line should be 4000.0, 7.1043E-03
# print(AM15)
# Interpolate to get a continuous function which I will be able to do integrals on:
'''Interpolated solar spectrum
when using, inputs must be within 300-2500 nm'''
AM15interp = interp1d(AM15[:,0]/1000, AM15[:,1])
# Here’s the plot, it looks correct:
'''Plot of the solar spectrum for verification'''
'''
y_values = np.array([AM15interp(x) for x in lams])
figure()
plot(lams , y_values)
xlabel("Wavelength (nm)")
ylabel("Spectral intensity (W/m$^2$/nm)")
title("Light from the sun");
show()
'''
'''I convert wavelength to energy. E_min and max are used for integration limits '''
Ephoton = hPlanck * c0 / lams *1e6 #J
E_min = min(Ephoton) #J energy units from hPlanck
E_max = max(Ephoton) #J energy units from hPlanck
'''I give the number of photons per......'''
def SPhotonsPerTEA(Ephoton):
λ = hPlanck * c0 / Ephoton *1e6 #um
return AM15interp(λ) * (1 / Ephoton) * (hPlanck * c0 / Ephoton**2) * 1e9
'''I give the power for each......'''
def PowerPerTEA(Ephoton):
return Ephoton * SPhotonsPerTEA(Ephoton)
'''I give the solar constant which is the W/m*2 emitted by the sun. Should be ~1000'''
def Solar_Constant(Ephoton):
#PowerPerTEA = lambda E : E * SPhotonsPerTEA(E)
return quad(PowerPerTEA,E_min,E_max, full_output=1)[0]
# quad() is ordinary integration; full_output=1 is (surprisingly) how you hide
# the messages warning about poor accuracy in integrating.
'''This is the solar constant value. It is called by optimization and used in a variety of functions here
Should always be ~1000'''
solar_constant = Solar_Constant(Ephoton)
'''I return an interpolated function of a spectrum relative to photon wavelength. Used for plotting'''
def GivelamsInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(lams, Curve)
'''I return an interpolated function of a spectrum relative to photon energy'''
def GiveEInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(Ephoton, Curve)
'''I give Q based on a given spectrum. Units are W/m^2
Input is a spectrum interpolated with respect to energy, E
eta should only be used if looking at a PV layer. Otherwise it is set to 1'''
def GiveQ(Spectra, eta = 1):#Spectra must be an interpolated function
def integrand(E):
return eta * Spectra(E) * PowerPerTEA(E)
return quad(integrand, E_min, E_max, full_output=1)[0]
'''
#trapz calcs
def GiveQ(Spectra, eta = 1):#Spectra must be an array
integrand = eta*Spectra*PowerPerTEA(Ephoton)
return -np.trapz(integrand, Ephoton)
'''
'''
def GivePhotons(Spectra, eta):#Spectra must be an interpolated function
def integrand(E):
return eta * Spectra(E) * SPhotonsPerTEA(E)
return quad(integrand, E_min, E_max)[0]
'''
# Here I input the spectrum of photons absorbed by the absorber material (Absorbed)
# and the electron-hole pair extraction efficiency (eta). EQE = eta * Absorbed
'''I give the rate of recombination for the solar cell, Units are photons/(s*m**2)'''
def RR0(eta,Absorbed,Tcell):
integrand = lambda E : eta * Absorbed(E) * (E)**2 / (exp(E / (kB * Tcell)) - 1)
integral = quad(integrand, E_min, E_max, full_output=1)[0]
return ((2 * pi) / (c0**2 * hPlanck**3)) * integral# / 1.60218e-19 #J/eV
#units = photons/(s*m**2)
'''I give the amount of energy converted to electricity in terms of photons, units are photons(s/m**2)'''
def Generated(eta,Absorbed):
integrand = lambda E : eta * Absorbed(E) * SPhotonsPerTEA(E)
# integral = quad(integrand, E_min, E_max, full_output=1)[0]
return quad(integrand, E_min, E_max, full_output=1)[0]
#units photons/(s*m**2)
'''
#Using trapezoidal rule for integration instaed of quad
#AbsByAbsorbers is an aray of intensities, not an interpolated function.
def RR0(eta,Absorbed,Tcell):
AbsByAbsorbers = AbsByAbsorbers.round(8)
integrand = eta * AbsByAbsorbers * (Ephoton)**2 / (np.exp(Ephoton / (kB * Tcell)) - 1)
integral = trapz(integrand, Ephoton)
return ((2 * np.pi) / (c0**2 * hPlanck**3)) * integral
def Generated(eta,Absorbed):
Absorbed = Absorbed.round(8)
integrand = eta * Absorbed * SPhotonsPerTEA(Ephoton)
# integral = quad(integrand, E_min, E_max, full_output=1)[0]
return np.trapz(integrand, Ephoton)
'''
'''I use the single diode equation to return the max power of the cell in watts
Check PVlib documentation for details'''
def Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed,Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
return data['p_mp']
'''I calculate equilibrium tmperature of the cell assuming the cell is infinitely thin
TotalAbs is the full absorptance of the stack as an array of intensities, uninterpolated.
Absorbed is PV layer absorptance interpolated
Temperature calculation is implicit so the numerical solver fsolve is used.
This equation is derived from Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows'''
def TcellCalc(TotalAbs, eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh):
AbsTotal = GiveEInterp(TotalAbs)
Qabs = GiveQ(AbsTotal)
Temp = lambda Tcell: (Qabs - Give_Pmp(eta,Absorbed,Rs,Rsh, Tcell) + Ui*Ti + Uo*To)/(Ui + Uo)-Tcell
return fsolve(Temp, 300)[0]
'''I use the single diode equation to produce an IV curve and power plot
I also return related values such as Voc, Isc, and Pmp in units volts, amps, and watts
See pvlib singlediode equation for more information'''
def GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed, Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
Vvalues = array(data['v'])
Ivalues = array(data['i'])
#print('Isc = ', Isc, ', Voc = ', Voc, ', Imp = ', Imp, ', Vmp = ', Vmp, ', Pmp =', Pmp)
figure()
plot(Vvalues,Ivalues, label = 'IV')
xlabel('Voltage, (V)')
ylabel('Current (A) or Power (W/m^2)')
ylabel('Power (W/m^2)')
P_values = array([Ivalues * Vvalues])
plot(Vvalues , P_values.T, label = 'Power')
ylim(-1, 150)
legend(loc = 'upper right')
show()
return data
'''I give the solar heat gain coefficient. unitless numebr between 0 and 1
Ts is the transmission spectra. Must be a list of intensities, not an interpolated function
This equation comes form a combination of Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows
and equation 3.18 from Fundamentals of Heat and Mass Transfer 6ed Incropera'''
def SHGC(Ts, Ti, To, Tcell, Ui):
#Tcell = TcellCalc(As,Ti,To,eta,Absorbed)
Rtot = 1/Ui #This is approximate because Ui is assumed
#Included in GiveQ for simplicity but should not be used for calculating SHGC
TransTotal = GiveEInterp(Ts)
Qtrans = GiveQ(TransTotal,1)
return (Qtrans + Ui*(Tcell-Ti) - ((To-Ti)/Rtot))/solar_constant
'''I give max efficiency also called PCE'''
'''Absorbed must be an interpolated function of the absorption spectrum of the PV layer'''
def max_efficiency(eta,Absorbed,Tcell, Rs, Rsh):
#Tcell = TcellCalc(As,Ti,To,eta,Absorbed)
return Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell) / solar_constant
'''I give important info about a solar cell such as PCE, SHGC, Temperature, etc'''
def GiveImportantInfo(Thickness, Materials,eta,Ti,To,Ui,Uo,Rs,Rsh,AbsorberLayer,Angle=0):
global inc_angle
inc_angle = giveincangle(Angle)
layers = GiveLayers(Thickness,Materials)
spectra = Spectra(layers ,AbsorberLayer)
AbsByAbsorbers = spectra['AbsByAbsorbers']
Ts = spectra['Ts']
Rfs = spectra['Rfs']
Rbs = spectra['Rbs']
As = spectra['As']
sanities = spectra['Total']
Absorbed = GiveEInterp(AbsByAbsorbers)
VLTcalc = cvs.getVLT(Ts,lams)#VLT(layers)
Tcell = TcellCalc(As,eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh)
#Absorbed = tpc.GiveEInterp(tpc.Spectra(tpc.GiveLayers(Thickness, Materials),4)['AbsByAbsorbers'])
data = GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
SHGCcalc = SHGC(Ts, Ti, To, Tcell, Ui)
PCE = max_efficiency(eta,Absorbed,Tcell, Rs, Rsh)
#Spectral Curves
figure()
plot(lams,Rfs,color='magenta',marker=None,label="$R_f$")
plot(lams,Ts,color='green',marker=None,label="$T$")
plot(lams,Rbs,color='purple',marker=None,label="$R_b$")
plot(lams,As,color='black',marker=None,label="A")
plot(lams,AbsByAbsorbers,color='black',linestyle='--',marker=None,label="AbsByAbsorber")
plot(lams,sanities,color='gold',marker=None,label="R+A+T")
plot(lams,VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label="photopic")
xlabel('wavelength, $\mu$m')
ylabel('Intensity')
legend(loc = 'upper right')
show()
EphotoneV = Ephoton*6.241509e+18
figure()
plot(EphotoneV, Ts, color='magenta',marker=None,label="$T$")
plot(EphotoneV, Rfs,color='green',marker=None,label="$R_f$")
plot(EphotoneV, Rbs,color='orange',marker=None,label="$R_b$")
plot(EphotoneV, AbsByAbsorbers,color='black',marker=None,label="Abs")
#plot(Ephoton,tpc.VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label="photopic")
legend(loc = 'upper right')
xlabel('Energy, eV')
ylabel('Intensity')
show()
pvc.GiveColorSwatch(Ts, Rfs)
pvc.plot_xy_on_fin(Ts, Rfs)
print('PCE = ',PCE,'VLT = ', VLTcalc, 'SHGC = ',SHGCcalc, 'Tcell = ',Tcell)#,'time to calculate PCE from scratch in seconds = ', TimePCE, 'Time to run optimizer in minutes = ',TimeOptimize/60)
return {'PCE':PCE, 'VLT':VLTcalc, 'SHGC':SHGCcalc, 'Tcell':Tcell,'Isc':Isc, 'Voc': Voc, 'Imp': Imp, 'Vmp': Vmp,'Pmp': Pmp} | def TiO2(Thickness = 0.050):
return Layer(Thickness,'nkTiO2','c')
def FTO(Thickness = 0.250):
return Layer(Thickness,'nkFTO','c') |
gossip_test.go | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver_test
import (
"context"
"fmt"
"reflect"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/stretchr/testify/require"
)
func TestGossipFirstRange(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
tc := testcluster.StartTestCluster(t, 3,
base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
})
defer tc.Stopper().Stop(context.Background())
errors := make(chan error, 1)
descs := make(chan *roachpb.RangeDescriptor)
unregister := tc.Servers[0].Gossip().RegisterCallback(gossip.KeyFirstRangeDescriptor,
func(_ string, content roachpb.Value) {
var desc roachpb.RangeDescriptor
if err := content.GetProto(&desc); err != nil {
select {
case errors <- err:
default:
}
} else {
select {
case descs <- &desc:
case <-time.After(45 * time.Second):
t.Logf("had to drop descriptor %+v", desc)
}
}
},
// Redundant callbacks are required by this test.
gossip.Redundant,
)
// Unregister the callback before attempting to stop the stopper to prevent
// deadlock. This is still flaky in theory since a callback can fire between
// the last read from the channels and this unregister, but testing has
// shown this solution to be sufficiently robust for now.
defer unregister()
// Wait for the specified descriptor to be gossiped for the first range. We
// loop because the timing of replica addition and lease transfer can cause
// extra gossiping of the first range.
waitForGossip := func(desc roachpb.RangeDescriptor) {
for {
select {
case err := <-errors:
t.Fatal(err)
case gossiped := <-descs:
if reflect.DeepEqual(&desc, gossiped) {
return
}
log.Infof(context.Background(), "expected\n%+v\nbut found\n%+v", desc, gossiped)
}
}
}
// Expect an initial callback of the first range descriptor.
select {
case err := <-errors:
t.Fatal(err)
case <-descs:
}
// Add two replicas. The first range descriptor should be gossiped after each
// addition.
var desc roachpb.RangeDescriptor
firstRangeKey := keys.MinKey
for i := 1; i <= 2; i++ {
var err error
if desc, err = tc.AddVoters(firstRangeKey, tc.Target(i)); err != nil {
t.Fatal(err)
}
waitForGossip(desc)
}
// Transfer the lease to a new node. This should cause the first range to be
// gossiped again.
if err := tc.TransferRangeLease(desc, tc.Target(1)); err != nil {
t.Fatal(err)
}
waitForGossip(desc)
// Remove a non-lease holder replica.
desc, err := tc.RemoveVoters(firstRangeKey, tc.Target(0))
if err != nil {
t.Fatal(err)
}
waitForGossip(desc)
// TODO(peter): Re-enable or remove when we've resolved the discussion
// about removing the lease-holder replica. See #7872.
// // Remove the lease holder replica.
// leaseHolder, err := tc.FindRangeLeaseHolder(desc, nil)
// desc, err = tc.RemoveVoters(firstRangeKey, leaseHolder)
// if err != nil {
// t.Fatal(err)
// }
// select {
// case err := <-errors:
// t.Fatal(err)
// case gossiped := <-descs:
// if !reflect.DeepEqual(desc, gossiped) {
// t.Fatalf("expected\n%+v\nbut found\n%+v", desc, gossiped)
// }
// }
}
// TestGossipHandlesReplacedNode tests that we can shut down a node and
// replace it with a new node at the same address (simulating a node getting
// restarted after losing its data) without the cluster breaking.
func TestGossipHandlesReplacedNode(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Skipping as part of test-infra-team flaky test cleanup.
skip.WithIssue(t, 50024)
// As of Nov 2018 it takes 3.6s.
skip.UnderShort(t)
ctx := context.Background()
// Shorten the raft tick interval and election timeout to make range leases
// much shorter than normal. This keeps us from having to wait so long for
// the replaced node's leases to time out, but has still shown itself to be
// long enough to avoid flakes.
serverArgs := base.TestServerArgs{
Addr: util.IsolatedTestAddr.String(),
Insecure: true, // because our certs are only valid for 127.0.0.1
RetryOptions: retry.Options{
InitialBackoff: 10 * time.Millisecond,
MaxBackoff: 50 * time.Millisecond,
},
}
serverArgs.RaftTickInterval = 50 * time.Millisecond
serverArgs.RaftElectionTimeoutTicks = 10
tc := testcluster.StartTestCluster(t, 3,
base.TestClusterArgs{
ServerArgs: serverArgs,
})
defer tc.Stopper().Stop(context.Background())
// Take down the first node and replace it with a new one.
oldNodeIdx := 0
newServerArgs := serverArgs
newServerArgs.Addr = tc.Servers[oldNodeIdx].ServingRPCAddr()
newServerArgs.SQLAddr = tc.Servers[oldNodeIdx].ServingSQLAddr()
newServerArgs.PartOfCluster = true
newServerArgs.JoinAddr = tc.Servers[1].ServingRPCAddr()
log.Infof(ctx, "stopping server %d", oldNodeIdx)
tc.StopServer(oldNodeIdx)
tc.AddAndStartServer(t, newServerArgs)
tc.WaitForNStores(t, tc.NumServers(), tc.Server(1).GossipI().(*gossip.Gossip))
// Ensure that all servers still running are responsive. If the two remaining
// original nodes don't refresh their connection to the address of the first
// node, they can get stuck here.
for i, server := range tc.Servers {
if i == oldNodeIdx {
continue
}
kvClient := server.DB()
if err := kvClient.Put(ctx, fmt.Sprintf("%d", i), i); err != nil {
t.Errorf("failed Put to node %d: %+v", i, err)
}
}
}
// TestGossipAfterAbortOfSystemConfigTransactionAfterFailureDueToIntents tests
// that failures to gossip the system config due to intents are rectified when
// later intents are aborted.
func TestGossipAfterAbortOfSystemConfigTransactionAfterFailureDueToIntents(t *testing.T) | {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{})
defer tc.Stopper().Stop(ctx)
require.NoError(t, tc.WaitForFullReplication())
db := tc.Server(0).DB()
txA := db.NewTxn(ctx, "a")
txB := db.NewTxn(ctx, "b")
require.NoError(t, txA.SetSystemConfigTrigger(true /* forSystemTenant */))
db1000 := dbdesc.NewInitial(1000, "1000", security.AdminRoleName())
require.NoError(t, txA.Put(ctx,
keys.SystemSQLCodec.DescMetadataKey(1000),
db1000.DescriptorProto()))
require.NoError(t, txB.SetSystemConfigTrigger(true /* forSystemTenant */))
db2000 := dbdesc.NewInitial(2000, "2000", security.AdminRoleName())
require.NoError(t, txB.Put(ctx,
keys.SystemSQLCodec.DescMetadataKey(2000),
db2000.DescriptorProto()))
const someTime = 10 * time.Millisecond
clearNotifictions := func(ch <-chan struct{}) {
for {
select {
case <-ch:
case <-time.After(someTime):
return
}
}
}
systemConfChangeCh := tc.Server(0).GossipI().(*gossip.Gossip).RegisterSystemConfigChannel()
clearNotifictions(systemConfChangeCh)
require.NoError(t, txB.Commit(ctx))
select {
case <-systemConfChangeCh:
// This case is rare but happens sometimes. We gossip the node liveness
// in a bunch of cases so we just let the test finish here. The important
// thing is that sometimes we get to the next phase.
t.Log("got unexpected update. This can happen for a variety of " +
"reasons like lease transfers. The test is exiting without testing anything")
return
case <-time.After(someTime):
// Did not expect an update so this is the happy case
}
// Roll back the transaction which had laid down the intent which blocked the
// earlier gossip update, make sure we get a gossip notification now.
const aLongTime = 20 * someTime
require.NoError(t, txA.Rollback(ctx))
select {
case <-systemConfChangeCh:
// Got an update.
case <-time.After(aLongTime):
t.Fatal("expected update")
}
} |
|
cloud.py | import taichi as ti
class Atom:
def | (self, radius, dim=3):
self.radius = radius
self.dim = dim
self.color = ti.Vector.field(dim, ti.f32, shape=1)
self.pos = ti.Vector.field(dim, ti.f32, shape=1)
def display(self, scene):
scene.particles(self.pos, self.radius, per_vertex_color=self.color)
@ti.data_oriented
class Proton(Atom):
@ti.kernel
def initialize(self, color: ti.template(), pos: ti.template()):
self.color[0] = color
self.pos[0] = pos
@ti.data_oriented
class Neutron(Atom):
@ti.kernel
def initialize(self, color: ti.template(), pos: ti.template()):
self.color[0] = color
self.pos[0] = pos
@ti.data_oriented
class Electron(Atom):
def __init__(self, radius, dim=3):
super().__init__(radius)
self.vel = ti.Vector.field(dim, ti.f32, shape=1)
@ti.kernel
def initialize(self, color: ti.template(), pos: ti.template(), vel: ti.template()):
self.color[0] = color
self.pos[0] = pos
self.vel[0] = vel
@ti.data_oriented
class ElectronCloud:
def __init__(self):
self.protons = []
self.neutrons = []
self.electrons = []
self.step = 0
self.time = 0.0
def add_proton(self, proton):
self.protons.append(proton)
def add_neutron(self, neutron):
self.neutrons.append(neutron)
def add_electron(self, electron):
self.electrons.append(electron)
def display(self, scene):
for i in self.protons:
i.display(scene)
for j in self.neutrons:
j.display(scene)
for k in self.electrons:
k.display(scene)
| __init__ |
Category.js | const { Model, DataTypes } = require('sequelize');
const sequelize = require('../config/connnection');
class Category extends Model {}
Category.init(
{
id: {
type: DataTypes.INTEGER,
allowNull: false,
primaryKey: true, | category_name: {
type: DataTypes.STRING,
allowNull: false,
},
},
{
sequelize,
timestamps: false,
freezeTableName: true,
underscored: true,
modelName: 'category',
}
);
module.exports = Category; | autoIncrement: true,
}, |
svg_test.py | import pytest
import numpy as np
import cirq
from cirq.contrib.svg import circuit_to_svg
def test_svg():
a, b, c = cirq.LineQubit.range(3)
svg_text = circuit_to_svg(
cirq.Circuit(
cirq.CNOT(a, b),
cirq.CZ(b, c),
cirq.SWAP(a, c),
cirq.PhasedXPowGate(exponent=0.123, phase_exponent=0.456).on(c),
cirq.Z(a),
cirq.measure(a, b, c, key='z'),
cirq.MatrixGate(np.eye(2)).on(a),
))
assert '<svg' in svg_text
assert '</svg>' in svg_text
def test_svg_noise():
noise_model = cirq.ConstantQubitNoiseModel(cirq.DepolarizingChannel(p=1e-3))
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.X(q))
circuit = cirq.Circuit(noise_model.noisy_moments(circuit.moments, [q]))
svg = circuit_to_svg(circuit)
assert '>D(0.001)</text>' in svg
def test_validation():
with pytest.raises(ValueError):
circuit_to_svg(cirq.Circuit())
| q0 = cirq.LineQubit(0)
with pytest.raises(ValueError):
circuit_to_svg(
cirq.Circuit([cirq.Moment([cirq.X(q0)]),
cirq.Moment([])])) | |
issue-10392.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct A { foo: int }
struct B { a: int, b: int, c: int }
fn mka() -> A { fail!() }
fn mkb() -> B { fail!() }
fn test() {
let A { foo, } = mka();
let A {
foo,
} = mka();
let B { a, b, c, } = mkb();
match mka() {
A { foo: _foo, } => {}
}
match Some(mka()) {
Some(A { foo: _foo, }) => {}
None => {}
}
}
pub fn main() {
if false { test() } | } |
|
scheduling_group_request_builder.go | package item
import (
ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go"
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87 "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph"
)
// SchedulingGroupRequestBuilder builds and executes requests for operations under \teams\{team-id}\schedule\schedulingGroups\{schedulingGroup-id}
type SchedulingGroupRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string;
// The request adapter to use to execute the requests.
requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter;
// Url template to use to build the URL for the current request builder
urlTemplate string;
}
// SchedulingGroupRequestBuilderDeleteOptions options for Delete | H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// SchedulingGroupRequestBuilderGetOptions options for Get
type SchedulingGroupRequestBuilderGetOptions struct {
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Request query parameters
Q *SchedulingGroupRequestBuilderGetQueryParameters;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// SchedulingGroupRequestBuilderGetQueryParameters the logical grouping of users in the schedule (usually by role).
type SchedulingGroupRequestBuilderGetQueryParameters struct {
// Select properties to be returned
Select_escaped []string;
}
// SchedulingGroupRequestBuilderPatchOptions options for Patch
type SchedulingGroupRequestBuilderPatchOptions struct {
//
Body *i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.SchedulingGroup;
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// NewSchedulingGroupRequestBuilderInternal instantiates a new SchedulingGroupRequestBuilder and sets the default values.
func NewSchedulingGroupRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*SchedulingGroupRequestBuilder) {
m := &SchedulingGroupRequestBuilder{
}
m.urlTemplate = "{+baseurl}/teams/{team_id}/schedule/schedulingGroups/{schedulingGroup_id}{?select}";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = pathParameters;
m.requestAdapter = requestAdapter;
return m
}
// NewSchedulingGroupRequestBuilder instantiates a new SchedulingGroupRequestBuilder and sets the default values.
func NewSchedulingGroupRequestBuilder(rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*SchedulingGroupRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewSchedulingGroupRequestBuilderInternal(urlParams, requestAdapter)
}
// CreateDeleteRequestInformation the logical grouping of users in the schedule (usually by role).
func (m *SchedulingGroupRequestBuilder) CreateDeleteRequestInformation(options *SchedulingGroupRequestBuilderDeleteOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.DELETE
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// CreateGetRequestInformation the logical grouping of users in the schedule (usually by role).
func (m *SchedulingGroupRequestBuilder) CreateGetRequestInformation(options *SchedulingGroupRequestBuilderGetOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.GET
if options != nil && options.Q != nil {
requestInfo.AddQueryParameters(*(options.Q))
}
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// CreatePatchRequestInformation the logical grouping of users in the schedule (usually by role).
func (m *SchedulingGroupRequestBuilder) CreatePatchRequestInformation(options *SchedulingGroupRequestBuilderPatchOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.PATCH
requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", options.Body)
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// Delete the logical grouping of users in the schedule (usually by role).
func (m *SchedulingGroupRequestBuilder) Delete(options *SchedulingGroupRequestBuilderDeleteOptions)(error) {
requestInfo, err := m.CreateDeleteRequestInformation(options);
if err != nil {
return err
}
err = m.requestAdapter.SendNoContentAsync(*requestInfo, nil)
if err != nil {
return err
}
return nil
}
// Get the logical grouping of users in the schedule (usually by role).
func (m *SchedulingGroupRequestBuilder) Get(options *SchedulingGroupRequestBuilderGetOptions)(*i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.SchedulingGroup, error) {
requestInfo, err := m.CreateGetRequestInformation(options);
if err != nil {
return nil, err
}
res, err := m.requestAdapter.SendAsync(*requestInfo, func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.NewSchedulingGroup() }, nil)
if err != nil {
return nil, err
}
return res.(*i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.SchedulingGroup), nil
}
// Patch the logical grouping of users in the schedule (usually by role).
func (m *SchedulingGroupRequestBuilder) Patch(options *SchedulingGroupRequestBuilderPatchOptions)(error) {
requestInfo, err := m.CreatePatchRequestInformation(options);
if err != nil {
return err
}
err = m.requestAdapter.SendNoContentAsync(*requestInfo, nil)
if err != nil {
return err
}
return nil
} | type SchedulingGroupRequestBuilderDeleteOptions struct {
// Request headers |
common.py | import numpy as np
from collections import defaultdict
# the type of float to use throughout the session.
_FLOATX = 'float32'
_EPSILON = 10e-8
_UID_PREFIXES = defaultdict(int)
_IMAGE_DIM_ORDERING = 'tf'
_LEGACY_WEIGHT_ORDERING = False
def epsilon():
'''Returns the value of the fuzz
factor used in numeric expressions.
# Returns
A float.
# Example
```python
>>> keras.backend.epsilon()
1e-08
```
'''
return _EPSILON
def set_epsilon(e):
'''Sets the value of the fuzz
factor used in numeric expressions.
# Arguments
e: float. New value of epsilon.
# Example
```python
>>> from keras import backend as K
>>> K.epsilon()
1e-08
>>> K.set_epsilon(1e-05)
>>> K.epsilon()
1e-05
```
'''
global _EPSILON
_EPSILON = e
def floatx():
'''Returns the default float type, as a string
(e.g. 'float16', 'float32', 'float64').
# Returns
String, the current default float type.
# Example
```python
>>> keras.backend.floatx()
'float32'
```
'''
return _FLOATX
def | (floatx):
'''Sets the default float type.
# Arguments
String: 'float16', 'float32', or 'float64'.
# Example
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> K.set_floatx('float16')
>>> K.floatx()
'float16'
```
'''
global _FLOATX
if floatx not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(floatx))
_FLOATX = str(floatx)
def cast_to_floatx(x):
'''Cast a Numpy array to the default Keras float type.
# Arguments
x: Numpy array.
# Returns
The same Numpy array, cast to its new type.
# Example
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
'''
return np.asarray(x, dtype=_FLOATX)
def image_dim_ordering():
'''Returns the default image dimension ordering
convention ('th' or 'tf').
# Returns
A string, either `'th'` or `'tf'`
# Example
```python
>>> keras.backend.image_dim_ordering()
'th'
```
'''
return _IMAGE_DIM_ORDERING
def set_image_dim_ordering(dim_ordering):
'''Sets the value of the image dimension
ordering convention ('th' or 'tf').
# Arguments
dim_ordering: string. `'th'` or `'tf'`.
# Example
```python
>>> from keras import backend as K
>>> K.image_dim_ordering()
'th'
>>> K.set_image_dim_ordering('tf')
>>> K.image_dim_ordering()
'tf'
```
'''
global _IMAGE_DIM_ORDERING
if dim_ordering not in {'tf', 'th'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
_IMAGE_DIM_ORDERING = str(dim_ordering)
def get_uid(prefix=''):
'''Provides a unique UID given a string prefix.
# Arguments
prefix: string.
# Returns
An integer.
# Example
```
>>> keras.backend.get_uid('dense')
>>> 1
>>> keras.backend.get_uid('dense')
>>> 2
```
'''
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def reset_uids():
global _UID_PREFIXES
_UID_PREFIXES = defaultdict(int)
def is_keras_tensor(x):
'''Returns whether `x` is a Keras tensor.
# Arguments
x: a potential tensor.
# Returns
A boolean: whether the argument is a Keras tensor.
# Examples
```python
>>> from keras import backend as K
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var)
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable is not a Tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is a Tensor.
True
```
'''
if hasattr(x, '_keras_shape'):
return True
else:
return False
def set_legacy_weight_ordering(value):
global _LEGACY_WEIGHT_ORDERING
assert value in {True, False}
_LEGACY_WEIGHT_ORDERING = value
def legacy_weight_ordering():
return _LEGACY_WEIGHT_ORDERING
| set_floatx |
examples_complete_test.go | package test
import (
"fmt"
"testing"
"github.com/gruntwork-io/terratest/modules/terraform"
"github.com/stretchr/testify/assert"
)
// Test the Terraform module in examples/complete using Terratest.
func TestExamplesComplete(t *testing.T) {
t.Parallel()
// We need to create the ALB first because terraform does not wwait for it to be in the ready state before creating ECS target group
terraformOptions := &terraform.Options{
// The path to where our Terraform code is located
TerraformDir: "../../examples/complete",
Upgrade: true,
// Variables to pass to our Terraform code using -var-file options
VarFiles: []string{"fixtures.us-east-2.tfvars"},
}
// At the end of the test, run `terraform destroy` to clean up any resources that were created
defer func() {
if r := recover(); r != nil {
terraform.Destroy(t, terraformOptions)
assert.Fail(t, fmt.Sprintf("Panicked: %v", r))
} else {
terraform.Destroy(t, terraformOptions)
}
}()
// This will run `terraform init` and `terraform apply` and fail the test if there are any errors
terraform.InitAndApply(t, terraformOptions)
// Run `terraform output` to get the value of an output variable
vpcCidr := terraform.Output(t, terraformOptions, "vpc_cidr")
// Verify we're getting back the outputs we expect
assert.Equal(t, "172.16.0.0/16", vpcCidr)
| assert.Equal(t, []string{"172.16.0.0/19", "172.16.32.0/19"}, privateSubnetCidrs)
// Run `terraform output` to get the value of an output variable
publicSubnetCidrs := terraform.OutputList(t, terraformOptions, "public_subnet_cidrs")
// Verify we're getting back the outputs we expect
assert.Equal(t, []string{"172.16.96.0/19", "172.16.128.0/19"}, publicSubnetCidrs)
// Run `terraform output` to get the value of an output variable
securityGroupName := terraform.Output(t, terraformOptions, "security_group_name")
// Verify we're getting back the outputs we expect
assert.Equal(t, "eg-test-documentdb-cluster", securityGroupName)
// Run `terraform output` to get the value of an output variable
clusterName := terraform.Output(t, terraformOptions, "cluster_name")
// Verify we're getting back the outputs we expect
assert.Equal(t, "eg-test-documentdb-cluster", clusterName)
// Run `terraform output` to get the value of an output variable
endpoint := terraform.Output(t, terraformOptions, "endpoint")
// Verify we're getting back the outputs we expect
assert.Contains(t, endpoint, "eg-test-documentdb-cluster.cluster")
// Run `terraform output` to get the value of an output variable
readerEndpoint := terraform.Output(t, terraformOptions, "reader_endpoint")
// Verify we're getting back the outputs we expect
assert.Contains(t, readerEndpoint, "eg-test-documentdb-cluster.cluster-ro")
} | // Run `terraform output` to get the value of an output variable
privateSubnetCidrs := terraform.OutputList(t, terraformOptions, "private_subnet_cidrs")
// Verify we're getting back the outputs we expect |
parser.go | package uql
import (
"errors"
"fmt"
"strings"
)
var errAlias = errors.New("alias is required: expr AS alias")
func (p *queryParser) parseQuery() (any, error) {
{
var conds []Cond
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := len(_tok.Text) == 5 && (_tok.Text[0] == 'w' || _tok.Text[0] == 'W') && (_tok.Text[1] == 'h' || _tok.Text[1] == 'H') && (_tok.Text[2] == 'e' || _tok.Text[2] == 'E') && (_tok.Text[3] == 'r' || _tok.Text[3] == 'R') && (_tok.Text[4] == 'e' || _tok.Text[4] == 'E')
if !_match {
p.ResetPos(_pos1)
goto i0_group_end
}
}
{
var _err error
conds, _err = p.conds()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
goto i0_group_end
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.ID == EOF_TOKEN
if !_match {
p.ResetPos(_pos1)
conds = nil
goto i0_group_end
}
}
return &Where{Conds: conds}, nil
i0_group_end:
}
{
var names []Name
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := len(_tok.Text) == 5 && (_tok.Text[0] == 'g' || _tok.Text[0] == 'G') && (_tok.Text[1] == 'r' || _tok.Text[1] == 'R') && (_tok.Text[2] == 'o' || _tok.Text[2] == 'O') && (_tok.Text[3] == 'u' || _tok.Text[3] == 'U') && (_tok.Text[4] == 'p' || _tok.Text[4] == 'P')
if !_match {
p.ResetPos(_pos1)
goto r1_i0_group_end
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := len(_tok.Text) == 2 && (_tok.Text[0] == 'b' || _tok.Text[0] == 'B') && (_tok.Text[1] == 'y' || _tok.Text[1] == 'Y')
if !_match {
p.ResetPos(_pos1)
goto r1_i0_group_end
}
}
{
var _err error
names, _err = p.names()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
goto r1_i0_group_end
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.ID == EOF_TOKEN
if !_match {
p.ResetPos(_pos1)
names = nil
goto r1_i0_group_end
}
}
return &Group{Names: names}, nil
r1_i0_group_end:
}
{
var columns []Name
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := len(_tok.Text) == 6 && (_tok.Text[0] == 's' || _tok.Text[0] == 'S') && (_tok.Text[1] == 'e' || _tok.Text[1] == 'E') && (_tok.Text[2] == 'l' || _tok.Text[2] == 'L') && (_tok.Text[3] == 'e' || _tok.Text[3] == 'E') && (_tok.Text[4] == 'c' || _tok.Text[4] == 'C') && (_tok.Text[5] == 't' || _tok.Text[5] == 'T')
if !_match {
p.ResetPos(_pos1)
goto r2_i0_group_end
}
}
{
var _err error
columns, _err = p.columns()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
goto r2_i0_group_end
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.ID == EOF_TOKEN
if !_match {
p.ResetPos(_pos1)
columns = nil
goto r2_i0_group_end
}
}
return &Columns{Names: columns}, nil
r2_i0_group_end:
}
var columns []Name
{
var _err error
columns, _err = p.columns()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
return nil, errBacktrack
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.ID == EOF_TOKEN
if !_match {
return nil, errBacktrack
}
}
return &Columns{Names: columns}, nil
}
//------------------------------------------------------------------------------
func (p *queryParser) conds() ([]Cond, error) {
var conds []Cond
{
var compOp string
var simples []string
var value Value
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.Text == "{"
if !_match {
p.ResetPos(_pos1)
goto i0_group_end
}
}
{
var _err error
simples, _err = p.simples()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
goto i0_group_end
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.Text == "}"
if !_match {
p.ResetPos(_pos1)
simples = nil
goto i0_group_end
}
}
{
var _err error
compOp, _err = p.compOp()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
simples = nil
goto i0_group_end
}
}
{
var _err error
value, _err = p.value()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
simples = nil
compOp = ""
goto i0_group_end
}
}
{
for _, attrKey := range simples {
conds = append(conds, Cond{
Sep: CondSep{Op: OrOp},
Left: Name{AttrKey: attrKey},
Op: compOp,
Right: value,
})
}
return conds, nil
}
i0_group_end:
}
var cond Cond
var not *Token
{
_pos1 := p.Pos()
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := len(_tok.Text) == 3 && (_tok.Text[0] == 'n' || _tok.Text[0] == 'N') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 't' || _tok.Text[2] == 'T')
if _match {
not = _tok
} else {
p.ResetPos(_pos1)
}
}
{
var _err error
cond, _err = p.cond()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
return nil, errBacktrack
}
}
{
if not != nil {
cond.Sep.Negate = true
}
conds = append(conds, cond)
p.cut()
}
{
var cond Cond
var condSep CondSep
var _matchCount int
for {
_pos1 := p.Pos()
{
var _err error
condSep, _err = p.condSep()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
goto r2_i0_no_match
} | var _err error
cond, _err = p.cond()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
condSep = CondSep{}
goto r2_i0_no_match
}
}
_matchCount = _matchCount + 1
{
cond.Sep = condSep
conds = append(conds, cond)
p.cut()
}
continue
r2_i0_no_match:
p.ResetPos(_pos1)
if _matchCount >= 0 {
break
}
return nil, errBacktrack
}
}
return conds, nil
}
func (p *queryParser) condSep() (CondSep, error) {
var sep CondSep
{
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return CondSep{}, _err
}
_match := len(_tok.Text) == 3 && (_tok.Text[0] == 'a' || _tok.Text[0] == 'A') && (_tok.Text[1] == 'n' || _tok.Text[1] == 'N') && (_tok.Text[2] == 'd' || _tok.Text[2] == 'D')
if !_match {
p.ResetPos(_pos1)
goto i0_group_end
}
}
sep.Op = AndOp
i0_group_end:
}
if sep.Op == "" {
{
_tok, _err := p.NextToken()
if _err != nil {
return CondSep{}, _err
}
_match := len(_tok.Text) == 2 && (_tok.Text[0] == 'o' || _tok.Text[0] == 'O') && (_tok.Text[1] == 'r' || _tok.Text[1] == 'R')
if !_match {
return CondSep{}, errBacktrack
}
}
sep.Op = OrOp
}
{
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return CondSep{}, _err
}
_match := len(_tok.Text) == 3 && (_tok.Text[0] == 'n' || _tok.Text[0] == 'N') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 't' || _tok.Text[2] == 'T')
if !_match {
p.ResetPos(_pos1)
goto r2_i0_group_end
}
}
sep.Negate = true
r2_i0_group_end:
}
return sep, nil
}
func (p *queryParser) cond() (Cond, error) {
{
var compOp string
var name Name
var value Value
_pos1 := p.Pos()
{
var _err error
name, _err = p.name()
if _err != nil && _err != errBacktrack {
return Cond{}, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
goto i0_group_end
}
}
{
var _err error
compOp, _err = p.compOp()
if _err != nil && _err != errBacktrack {
return Cond{}, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
name = Name{}
goto i0_group_end
}
}
{
var _err error
value, _err = p.value()
if _err != nil && _err != errBacktrack {
return Cond{}, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
name = Name{}
compOp = ""
goto i0_group_end
}
}
return Cond{
Left: name,
Op: compOp,
Right: value,
}, nil
i0_group_end:
}
{
var key *Token
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return Cond{}, _err
}
_match := _tok.ID == IDENT_TOKEN
if !_match {
p.ResetPos(_pos1)
goto r1_i0_group_end
}
key = _tok
}
{
_pos3 := p.Pos()
_tok, _err := p.NextToken()
if _err != nil {
return Cond{}, _err
}
_match := len(_tok.Text) == 4 && (_tok.Text[0] == 'd' || _tok.Text[0] == 'D') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 'e' || _tok.Text[2] == 'E') && (_tok.Text[3] == 's' || _tok.Text[3] == 'S')
if _match {
} else {
p.ResetPos(_pos3)
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return Cond{}, _err
}
_match := len(_tok.Text) == 3 && (_tok.Text[0] == 'n' || _tok.Text[0] == 'N') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 't' || _tok.Text[2] == 'T')
if !_match {
p.ResetPos(_pos1)
key = nil
goto r1_i0_group_end
}
}
// "exist"
{
_pos5 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return Cond{}, _err
}
_match := len(_tok.Text) == 5 && (_tok.Text[0] == 'e' || _tok.Text[0] == 'E') && (_tok.Text[1] == 'x' || _tok.Text[1] == 'X') && (_tok.Text[2] == 'i' || _tok.Text[2] == 'I') && (_tok.Text[3] == 's' || _tok.Text[3] == 'S') && (_tok.Text[4] == 't' || _tok.Text[4] == 'T')
if !_match {
p.ResetPos(_pos5)
goto r1_i0_i3_alt1
}
}
goto r1_i0_i3_has_match
}
r1_i0_i3_alt1:
// "exists"
{
{
_tok, _err := p.NextToken()
if _err != nil {
return Cond{}, _err
}
_match := len(_tok.Text) == 6 && (_tok.Text[0] == 'e' || _tok.Text[0] == 'E') && (_tok.Text[1] == 'x' || _tok.Text[1] == 'X') && (_tok.Text[2] == 'i' || _tok.Text[2] == 'I') && (_tok.Text[3] == 's' || _tok.Text[3] == 'S') && (_tok.Text[4] == 't' || _tok.Text[4] == 'T') && (_tok.Text[5] == 's' || _tok.Text[5] == 'S')
if !_match {
p.ResetPos(_pos1)
key = nil
goto r1_i0_group_end
}
}
}
r1_i0_i3_has_match:
return Cond{
Left: Name{AttrKey: key.Text},
Op: DoesNotExistOp,
}, nil
r1_i0_group_end:
}
{
var key *Token
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return Cond{}, _err
}
_match := _tok.ID == IDENT_TOKEN
if !_match {
p.ResetPos(_pos1)
goto r2_i0_group_end
}
key = _tok
}
// "exist"
{
_pos3 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return Cond{}, _err
}
_match := len(_tok.Text) == 5 && (_tok.Text[0] == 'e' || _tok.Text[0] == 'E') && (_tok.Text[1] == 'x' || _tok.Text[1] == 'X') && (_tok.Text[2] == 'i' || _tok.Text[2] == 'I') && (_tok.Text[3] == 's' || _tok.Text[3] == 'S') && (_tok.Text[4] == 't' || _tok.Text[4] == 'T')
if !_match {
p.ResetPos(_pos3)
goto r2_i0_i1_alt1
}
}
goto r2_i0_i1_has_match
}
r2_i0_i1_alt1:
// "exists"
{
{
_tok, _err := p.NextToken()
if _err != nil {
return Cond{}, _err
}
_match := len(_tok.Text) == 6 && (_tok.Text[0] == 'e' || _tok.Text[0] == 'E') && (_tok.Text[1] == 'x' || _tok.Text[1] == 'X') && (_tok.Text[2] == 'i' || _tok.Text[2] == 'I') && (_tok.Text[3] == 's' || _tok.Text[3] == 'S') && (_tok.Text[4] == 't' || _tok.Text[4] == 'T') && (_tok.Text[5] == 's' || _tok.Text[5] == 'S')
if !_match {
p.ResetPos(_pos1)
key = nil
goto r2_i0_group_end
}
}
}
r2_i0_i1_has_match:
return Cond{
Left: Name{AttrKey: key.Text},
Op: ExistsOp,
}, nil
r2_i0_group_end:
}
var key *Token
{
_tok, _err := p.NextToken()
if _err != nil {
return Cond{}, _err
}
_match := _tok.ID == IDENT_TOKEN
if !_match {
return Cond{}, errBacktrack
}
key = _tok
}
return Cond{
Left: Name{AttrKey: key.Text},
Op: EqualOp,
Right: Value{
Kind: NumberValue,
Text: "1",
},
}, nil
}
func (p *queryParser) compOp() (string, error) {
{
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 2 && _tok.Text[0] == '>' && _tok.Text[1] == '='
if !_match {
p.ResetPos(_pos1)
goto i0_group_end
}
}
return ">=", nil
i0_group_end:
}
{
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 2 && _tok.Text[0] == '<' && _tok.Text[1] == '='
if !_match {
p.ResetPos(_pos1)
goto r1_i0_group_end
}
}
return "<=", nil
r1_i0_group_end:
}
{
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 2 && _tok.Text[0] == '=' && _tok.Text[1] == '='
if !_match {
p.ResetPos(_pos1)
goto r2_i0_group_end
}
}
return EqualOp, nil
r2_i0_group_end:
}
{
_pos1 := p.Pos()
// "!="
{
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 2 && _tok.Text[0] == '!' && _tok.Text[1] == '='
if !_match {
p.ResetPos(_pos1)
goto r3_i0_alt1
}
}
goto r3_i0_has_match
}
r3_i0_alt1:
// "<>"
{
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 2 && _tok.Text[0] == '<' && _tok.Text[1] == '>'
if !_match {
p.ResetPos(_pos1)
goto r3_i0_group_end
}
}
}
r3_i0_has_match:
return NotEqualOp, nil
r3_i0_group_end:
}
{
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 2 && _tok.Text[0] == '!' && _tok.Text[1] == '~'
if !_match {
p.ResetPos(_pos1)
goto r4_i0_group_end
}
}
return DoesNotMatchOp, nil
r4_i0_group_end:
}
{
var t *Token
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := _tok.Text == "<" || _tok.Text == ">" || _tok.Text == "=" || _tok.Text == "~"
if !_match {
p.ResetPos(_pos1)
goto r5_i0_group_end
}
t = _tok
}
return t.Text, nil
r5_i0_group_end:
}
{
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 4 && (_tok.Text[0] == 'd' || _tok.Text[0] == 'D') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 'e' || _tok.Text[2] == 'E') && (_tok.Text[3] == 's' || _tok.Text[3] == 'S')
if _match {
} else {
p.ResetPos(_pos1)
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 3 && (_tok.Text[0] == 'n' || _tok.Text[0] == 'N') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 't' || _tok.Text[2] == 'T')
if !_match {
p.ResetPos(_pos1)
goto r6_i0_group_end
}
}
// "contain"
{
_pos4 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 7 && (_tok.Text[0] == 'c' || _tok.Text[0] == 'C') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 'n' || _tok.Text[2] == 'N') && (_tok.Text[3] == 't' || _tok.Text[3] == 'T') && (_tok.Text[4] == 'a' || _tok.Text[4] == 'A') && (_tok.Text[5] == 'i' || _tok.Text[5] == 'I') && (_tok.Text[6] == 'n' || _tok.Text[6] == 'N')
if !_match {
p.ResetPos(_pos4)
goto r6_i0_i2_alt1
}
}
goto r6_i0_i2_has_match
}
r6_i0_i2_alt1:
// "contains"
{
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 8 && (_tok.Text[0] == 'c' || _tok.Text[0] == 'C') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 'n' || _tok.Text[2] == 'N') && (_tok.Text[3] == 't' || _tok.Text[3] == 'T') && (_tok.Text[4] == 'a' || _tok.Text[4] == 'A') && (_tok.Text[5] == 'i' || _tok.Text[5] == 'I') && (_tok.Text[6] == 'n' || _tok.Text[6] == 'N') && (_tok.Text[7] == 's' || _tok.Text[7] == 'S')
if !_match {
p.ResetPos(_pos1)
goto r6_i0_group_end
}
}
}
r6_i0_i2_has_match:
return DoesNotContainOp, nil
r6_i0_group_end:
}
{
_pos1 := p.Pos()
// "contain"
{
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 7 && (_tok.Text[0] == 'c' || _tok.Text[0] == 'C') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 'n' || _tok.Text[2] == 'N') && (_tok.Text[3] == 't' || _tok.Text[3] == 'T') && (_tok.Text[4] == 'a' || _tok.Text[4] == 'A') && (_tok.Text[5] == 'i' || _tok.Text[5] == 'I') && (_tok.Text[6] == 'n' || _tok.Text[6] == 'N')
if !_match {
p.ResetPos(_pos1)
goto r7_i0_alt1
}
}
goto r7_i0_has_match
}
r7_i0_alt1:
// "contains"
{
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 8 && (_tok.Text[0] == 'c' || _tok.Text[0] == 'C') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 'n' || _tok.Text[2] == 'N') && (_tok.Text[3] == 't' || _tok.Text[3] == 'T') && (_tok.Text[4] == 'a' || _tok.Text[4] == 'A') && (_tok.Text[5] == 'i' || _tok.Text[5] == 'I') && (_tok.Text[6] == 'n' || _tok.Text[6] == 'N') && (_tok.Text[7] == 's' || _tok.Text[7] == 'S')
if !_match {
p.ResetPos(_pos1)
goto r7_i0_group_end
}
}
}
r7_i0_has_match:
return ContainsOp, nil
r7_i0_group_end:
}
{
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 3 && (_tok.Text[0] == 'n' || _tok.Text[0] == 'N') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 't' || _tok.Text[2] == 'T')
if !_match {
p.ResetPos(_pos1)
goto r8_i0_group_end
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 4 && (_tok.Text[0] == 'l' || _tok.Text[0] == 'L') && (_tok.Text[1] == 'i' || _tok.Text[1] == 'I') && (_tok.Text[2] == 'k' || _tok.Text[2] == 'K') && (_tok.Text[3] == 'e' || _tok.Text[3] == 'E')
if !_match {
p.ResetPos(_pos1)
goto r8_i0_group_end
}
}
return NotLikeOp, nil
r8_i0_group_end:
}
{
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 4 && (_tok.Text[0] == 'l' || _tok.Text[0] == 'L') && (_tok.Text[1] == 'i' || _tok.Text[1] == 'I') && (_tok.Text[2] == 'k' || _tok.Text[2] == 'K') && (_tok.Text[3] == 'e' || _tok.Text[3] == 'E')
if !_match {
p.ResetPos(_pos1)
goto r9_i0_group_end
}
}
return LikeOp, nil
r9_i0_group_end:
}
{
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 7 && (_tok.Text[0] == 'm' || _tok.Text[0] == 'M') && (_tok.Text[1] == 'a' || _tok.Text[1] == 'A') && (_tok.Text[2] == 't' || _tok.Text[2] == 'T') && (_tok.Text[3] == 'c' || _tok.Text[3] == 'C') && (_tok.Text[4] == 'h' || _tok.Text[4] == 'H') && (_tok.Text[5] == 'e' || _tok.Text[5] == 'E') && (_tok.Text[6] == 's' || _tok.Text[6] == 'S')
if !_match {
p.ResetPos(_pos1)
goto r10_i0_group_end
}
}
return MatchesOp, nil
r10_i0_group_end:
}
{
_pos1 := p.Pos()
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 4 && (_tok.Text[0] == 'd' || _tok.Text[0] == 'D') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 'e' || _tok.Text[2] == 'E') && (_tok.Text[3] == 's' || _tok.Text[3] == 'S')
if _match {
} else {
p.ResetPos(_pos1)
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 3 && (_tok.Text[0] == 'n' || _tok.Text[0] == 'N') && (_tok.Text[1] == 'o' || _tok.Text[1] == 'O') && (_tok.Text[2] == 't' || _tok.Text[2] == 'T')
if !_match {
return "", errBacktrack
}
}
// "match"
{
_pos3 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 5 && (_tok.Text[0] == 'm' || _tok.Text[0] == 'M') && (_tok.Text[1] == 'a' || _tok.Text[1] == 'A') && (_tok.Text[2] == 't' || _tok.Text[2] == 'T') && (_tok.Text[3] == 'c' || _tok.Text[3] == 'C') && (_tok.Text[4] == 'h' || _tok.Text[4] == 'H')
if !_match {
p.ResetPos(_pos3)
goto r11_i2_alt1
}
}
goto r11_i2_has_match
}
r11_i2_alt1:
// "matches"
{
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 7 && (_tok.Text[0] == 'm' || _tok.Text[0] == 'M') && (_tok.Text[1] == 'a' || _tok.Text[1] == 'A') && (_tok.Text[2] == 't' || _tok.Text[2] == 'T') && (_tok.Text[3] == 'c' || _tok.Text[3] == 'C') && (_tok.Text[4] == 'h' || _tok.Text[4] == 'H') && (_tok.Text[5] == 'e' || _tok.Text[5] == 'E') && (_tok.Text[6] == 's' || _tok.Text[6] == 'S')
if !_match {
return "", errBacktrack
}
}
}
r11_i2_has_match:
return DoesNotMatchOp, nil
}
func (p *queryParser) value() (Value, error) {
{
var t *Token
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return Value{}, _err
}
_match := _tok.ID == NUMBER_TOKEN
if !_match {
p.ResetPos(_pos1)
goto i0_group_end
}
t = _tok
}
return Value{
Kind: NumberValue,
Text: t.Text,
}, nil
i0_group_end:
}
{
var t *Token
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return Value{}, _err
}
_match := _tok.ID == DURATION_TOKEN
if !_match {
p.ResetPos(_pos1)
goto r1_i0_group_end
}
t = _tok
}
return Value{
Kind: DurationValue,
Text: t.Text,
}, nil
r1_i0_group_end:
}
var t *Token
// t=IDENT
{
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return Value{}, _err
}
_match := _tok.ID == IDENT_TOKEN
if !_match {
p.ResetPos(_pos1)
goto r2_i0_alt1
}
t = _tok
}
goto r2_i0_has_match
}
r2_i0_alt1:
// t=VALUE
{
{
_tok, _err := p.NextToken()
if _err != nil {
return Value{}, _err
}
_match := _tok.ID == VALUE_TOKEN
if !_match {
return Value{}, errBacktrack
}
t = _tok
}
}
r2_i0_has_match:
return Value{
Kind: StringValue,
Text: t.Text,
}, nil
}
//------------------------------------------------------------------------------
func (p *queryParser) names() ([]Name, error) {
var names []Name
var name Name
{
var _err error
name, _err = p.name()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
return nil, errBacktrack
}
}
{
names = append(names, name)
p.cut()
}
{
var name Name
var _matchCount int
for {
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.Text == ","
if !_match {
p.ResetPos(_pos1)
goto r1_i0_no_match
}
}
{
var _err error
name, _err = p.name()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
goto r1_i0_no_match
}
}
_matchCount = _matchCount + 1
{
names = append(names, name)
p.cut()
}
continue
r1_i0_no_match:
p.ResetPos(_pos1)
if _matchCount >= 0 {
break
}
return nil, errBacktrack
}
}
return names, nil
}
func (p *queryParser) name() (Name, error) {
{
var attr *Token
var fn *Token
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return Name{}, _err
}
_match := _tok.ID == IDENT_TOKEN
if !_match {
p.ResetPos(_pos1)
goto i0_group_end
}
fn = _tok
}
{
_tok, _err := p.NextToken()
if _err != nil {
return Name{}, _err
}
_match := _tok.Text == "("
if !_match {
p.ResetPos(_pos1)
fn = nil
goto i0_group_end
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return Name{}, _err
}
_match := _tok.ID == IDENT_TOKEN
if !_match {
p.ResetPos(_pos1)
fn = nil
goto i0_group_end
}
attr = _tok
}
{
_tok, _err := p.NextToken()
if _err != nil {
return Name{}, _err
}
_match := _tok.Text == ")"
if !_match {
p.ResetPos(_pos1)
fn = nil
attr = nil
goto i0_group_end
}
}
{
funcName := strings.ToLower(fn.Text)
switch funcName {
case "p50", "p75", "p90", "p95", "p99",
"min", "max", "sum", "avg",
"top3", "top10",
"any", "uniq":
return Name{
FuncName: funcName,
AttrKey: attr.Text,
}, nil
default:
return Name{}, fmt.Errorf("unknown function: %q", fn.Text)
}
}
i0_group_end:
}
var t *Token
{
_tok, _err := p.NextToken()
if _err != nil {
return Name{}, _err
}
_match := _tok.ID == IDENT_TOKEN
if !_match {
return Name{}, errBacktrack
}
t = _tok
}
return Name{
AttrKey: t.Text,
}, nil
}
func (p *queryParser) alias() (string, error) {
{
_tok, _err := p.NextToken()
if _err != nil {
return "", _err
}
_match := len(_tok.Text) == 2 && (_tok.Text[0] == 'a' || _tok.Text[0] == 'A') && (_tok.Text[1] == 's' || _tok.Text[1] == 'S')
if !_match {
return "", errBacktrack
}
}
tok, err := p.NextToken()
if err != nil {
return "", err
}
if tok.ID != IDENT_TOKEN {
return "", errAlias
}
return tok.Text, nil
}
//------------------------------------------------------------------------------
func (p *queryParser) simples() ([]string, error) {
var ss []string
var t *Token
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.ID == IDENT_TOKEN
if !_match {
return nil, errBacktrack
}
t = _tok
}
ss = append(ss, t.Text)
{
var t *Token
var _matchCount int
for {
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.Text == ","
if !_match {
p.ResetPos(_pos1)
goto r1_i0_no_match
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.ID == IDENT_TOKEN
if !_match {
p.ResetPos(_pos1)
goto r1_i0_no_match
}
t = _tok
}
_matchCount = _matchCount + 1
ss = append(ss, t.Text)
continue
r1_i0_no_match:
p.ResetPos(_pos1)
if _matchCount >= 0 {
break
}
return nil, errBacktrack
}
}
return ss, nil
}
func (p *queryParser) columns() ([]Name, error) {
var columns []Name
var column []Name
{
var _err error
column, _err = p.column()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
return nil, errBacktrack
}
}
{
columns = append(columns, column...)
p.cut()
}
{
var column []Name
var _matchCount int
for {
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.Text == ","
if !_match {
p.ResetPos(_pos1)
goto r1_i0_no_match
}
}
{
var _err error
column, _err = p.column()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
goto r1_i0_no_match
}
}
_matchCount = _matchCount + 1
{
columns = append(columns, column...)
p.cut()
}
continue
r1_i0_no_match:
p.ResetPos(_pos1)
if _matchCount >= 0 {
break
}
return nil, errBacktrack
}
}
return columns, nil
}
func (p *queryParser) column() ([]Name, error) {
{
var attr *Token
var simples []string
_pos1 := p.Pos()
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.Text == "{"
if !_match {
p.ResetPos(_pos1)
goto i0_group_end
}
}
{
var _err error
simples, _err = p.simples()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
p.ResetPos(_pos1)
goto i0_group_end
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.Text == "}"
if !_match {
p.ResetPos(_pos1)
simples = nil
goto i0_group_end
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.Text == "("
if !_match {
p.ResetPos(_pos1)
simples = nil
goto i0_group_end
}
}
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.ID == IDENT_TOKEN
if !_match {
p.ResetPos(_pos1)
simples = nil
goto i0_group_end
}
attr = _tok
}
{
_tok, _err := p.NextToken()
if _err != nil {
return nil, _err
}
_match := _tok.Text == ")"
if !_match {
p.ResetPos(_pos1)
simples = nil
attr = nil
goto i0_group_end
}
}
{
columns := make([]Name, len(simples))
for i, funcName := range simples {
columns[i] = Name{
FuncName: funcName,
AttrKey: attr.Text,
}
}
return columns, nil
}
i0_group_end:
}
var name Name
{
var _err error
name, _err = p.name()
if _err != nil && _err != errBacktrack {
return nil, _err
}
_match := _err == nil
if !_match {
return nil, errBacktrack
}
}
return []Name{name}, nil
} | }
{ |
data.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and DMLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT for QA datasets."""
import collections
import multiprocessing as mp
import time
from functools import partial
from mxnet.gluon.data import SimpleDataset
from gluonnlp.data.utils import whitespace_splitter
import numpy as np
__all__ = ['SQuADTransform', '\rocess_dataset']
class SquadExample:
"""A single training/test example for SQuAD question.
For examples without an answer, the start and end position are -1.
"""
def | (self,
qas_id,
question_text,
doc_tokens,
example_id,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.example_id = example_id
def _worker_fn(example, transform):
"""Function for processing data in worker process."""
feature = transform(example)
return feature
def preprocess_dataset(dataset, transform, num_workers=8):
"""Use multiprocessing to perform transform for dataset.
Parameters
----------
dataset: dataset-like object
Source dataset.
transform: callable
Transformer function.
num_workers: int, default 8
The number of multiprocessing workers to use for data preprocessing.
"""
worker_fn = partial(_worker_fn, transform=transform)
start = time.time()
pool = mp.Pool(num_workers)
dataset_transform = []
dataset_len = []
for data in pool.map(worker_fn, dataset):
if data:
for _data in data:
dataset_transform.append(_data[:-1])
dataset_len.append(_data[-1])
dataset = SimpleDataset(dataset_transform).transform(
lambda x: (x[0], x[1], x[2], x[3], x[4], x[5]))
end = time.time()
pool.close()
print('Done! Transform dataset costs %.2f seconds.' % (end-start))
return dataset, dataset_len
class SQuADFeature:
"""Single feature of a single example transform of the SQuAD question.
"""
def __init__(self,
example_id,
qas_id,
doc_tokens,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
valid_length,
segment_ids,
start_position,
end_position,
is_impossible):
self.example_id = example_id
self.qas_id = qas_id
self.doc_tokens = doc_tokens
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.valid_length = valid_length
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
class SQuADTransform:
"""Dataset Transformation for BERT-style QA.
The transformation is processed in the following steps:
- Convert from gluonnlp.data.SQuAD's record to SquadExample.
- Tokenize the question_text in the example.
- For examples where the document is too long,
use a sliding window to split into multiple features and
record whether each token is a maximum context.
- Tokenize the split document chunks.
- Combine the token of question_text with the token
of the document and insert [CLS] and [SEP].
- Generate the start position and end position of the answer.
- Generate valid length.
E.g:
Inputs:
question_text: 'When did BBC Japan begin broadcasting?'
doc_tokens: ['BBC','Japan','was','a','general','entertainment','channel,',
'which','operated','between','December','2004','and','April',
'2006.','It','ceased','operations','after','its','Japanese',
'distributor','folded.']
start_position: 10
end_position: 11
orig_answer_text: 'December 2004'
Processed:
tokens: ['[CLS]','when','did','bbc','japan','begin','broadcasting','?',
'[SEP]','bbc','japan','was','a','general','entertainment','channel',
',','which','operated','between','december','2004','and','april',
'2006','.','it','ceased','operations','after','its','japanese',
'distributor','folded','.','[SEP]']
segment_ids: [0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
start_position: 20
end_position: 21
valid_length: 36
Because of the sliding window approach taken to scoring documents, a single
token can appear in multiple documents.
So you need to record whether each token is a maximum context. E.g.
Doc: the man went to the store and bought a gallon of milk
Span A: the man went to the
Span B: to the store and bought
Span C: and bought a gallon of
...
Now the word 'bought' will have two scores from spans B and C. We only
want to consider the score with "maximum context", which we define as
the *minimum* of its left and right context (the *sum* of left and
right context will always be the same, of course).
In the example the maximum context for 'bought' would be span C since
it has 1 left context and 3 right context, while span B has 4 left context
and 0 right context.
Parameters
----------
tokenizer : BERTTokenizer.
Tokenizer for the sentences.
labels : list of int.
List of all label ids for the classification task.
max_seq_length : int, default 384
Maximum sequence length of the sentences.
doc_stride : int, default 128
When splitting up a long document into chunks,
how much stride to take between chunks.
max_query_length : int, default 64
The maximum length of the query tokens.
is_pad : bool, default True
Whether to pad the sentences to maximum length.
is_training : bool, default True
Whether to run training.
do_lookup : bool, default True
Whether to do vocabulary lookup for convert tokens to indices.
"""
def __init__(self,
tokenizer,
max_seq_length=384,
doc_stride=128,
max_query_length=64,
is_pad=True,
is_training=True,
do_lookup=True):
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.max_query_length = max_query_length
self.doc_stride = doc_stride
self.is_pad = is_pad
self.is_training = is_training
self.do_lookup = do_lookup
def _is_whitespace(self, c):
if c == ' ' or c == '\t' or c == '\r' or c == '\n' or ord(
c) == 0x202F:
return True
return False
def _toSquadExample(self, record):
example_id = record[0]
qas_id = record[1]
question_text = record[2]
paragraph_text = record[3]
orig_answer_text = record[4][0] if record[4] else ''
answer_offset = record[5][0] if record[5] else ''
is_impossible = record[6] if len(record) == 7 else False
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if self._is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
start_position = -1
end_position = -1
if self.is_training:
if not is_impossible:
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[
answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = ' '.join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = ' '.join(
whitespace_splitter(orig_answer_text.strip()))
if actual_text.find(cleaned_answer_text) == -1:
print('Could not find answer: %s vs. %s' %
(actual_text, cleaned_answer_text))
return None
else:
start_position = -1
end_position = -1
orig_answer_text = ''
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
example_id=example_id,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
return example
def _transform(self, *record):
example = self._toSquadExample(record)
if not example:
return None
padding = self.tokenizer.vocab.padding_token
if self.do_lookup:
padding = self.tokenizer.vocab[padding]
features = []
query_tokens = self.tokenizer(example.question_text)
if len(query_tokens) > self.max_query_length:
query_tokens = query_tokens[0:self.max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = self.tokenizer(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if self.is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if self.is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position +
1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position,
self.tokenizer, example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = self.max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
'DocSpan', ['start', 'length'])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, self.doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append(self.tokenizer.vocab.cls_token)
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(
tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(
doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(1)
if self.do_lookup:
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
else:
input_ids = tokens
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
valid_length = len(input_ids)
# Zero-pad up to the sequence length.
if self.is_pad:
while len(input_ids) < self.max_seq_length:
input_ids.append(padding)
segment_ids.append(padding)
assert len(input_ids) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
start_position = 0
end_position = 0
if self.is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start
and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if self.is_training and example.is_impossible:
start_position = 0
end_position = 0
features.append(SQuADFeature(example_id=example.example_id,
qas_id=example.qas_id,
doc_tokens=example.doc_tokens,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
valid_length=valid_length,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
return features
def __call__(self, record):
examples = self._transform(*record)
if not examples:
return None
features = []
for _example in examples:
feature = []
feature.append(_example.example_id)
feature.append(_example.input_ids)
feature.append(_example.segment_ids)
feature.append(_example.valid_length)
feature.append(_example.start_position)
feature.append(_example.end_position)
feature.append(len(_example.input_ids))
features.append(feature)
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = ' '.join(tokenizer(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = ' '.join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + \
0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
| __init__ |
models.py | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from passlib.apps import custom_app_context as pwd_context
import random, string
from itsdangerous import(TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
Base = declarative_base()
secret_key = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key = True)
username = Column(String)
picture = Column (String)
description = Column(String)
name = Column(String)
password_hash = Column(String(64))
def hash_password(self,password):
self.password_hash = pwd_context.hash(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration = 600):
s = Serializer(secret_key, expires_in = expiration)
return s.dumps({'id': self.id})
#Verify auth tokens
@staticmethod
def verify_auth_token(token):
s = Serializer(secret_key)
try:
data = s.loads(token)
except SignatureExpired:
#Valid but expired
return None
except BadSignature:
#Invalid token
return None
user_id = data['id']
return user_id
@property
def serialize(self):
return {
'id': self.id,
'user_about': self.description,
'username': self.username,
'picture': self.picture,
'name' : self.name
}
class Post(Base):
|
engine = create_engine('sqlite:///simpelapi.db')
Base.metadata.create_all(engine) | __tablename__ = 'posts'
id = Column(Integer, primary_key = True)
content = Column(String(250))
likes = Column(Integer)
user_id = Column(Integer,ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
return {
'id': self.id,
'zcontent': self.content,
'zlikes': self.likes,
'zauthor': self.user_id
} |
simple.rs | use rand::{Rng, SeedableRng};
use rand::rngs::SmallRng;
use diamond_types::list::ListCRDT;
fn random_str(len: usize, rng: &mut SmallRng) -> String {
let mut str = String::new();
let alphabet: Vec<char> = "abcdefghijklmnop ".chars().collect();
for _ in 0..len {
str.push(alphabet[rng.gen_range(0..alphabet.len())]);
}
str
}
fn random_inserts_deletes() {
let mut doc = ListCRDT::new();
let agent = doc.get_or_create_agent_id("seph"); // agent 0
// Stable between runs for reproducing bugs.
let mut rng = SmallRng::seed_from_u64(1234);
for i in 0..1000000 {
let doc_len = doc.len();
if i % 10000 == 0 {
println!("i {} doc len {}", i, doc_len);
}
let insert_weight = if doc_len < 100 { 0.55 } else { 0.45 };
if doc_len == 0 || rng.gen_bool(insert_weight) {
// Insert something.
let pos = rng.gen_range(0..=doc_len);
let len: usize = rng.gen_range(1..2); // Ideally skew toward smaller inserts.
// let len: usize = rng.gen_range(1..10); // Ideally skew toward smaller inserts.
let content = random_str(len as usize, &mut rng);
// println!("Inserting '{}' at position {}", content, pos);
// rope.insert(pos, content.as_str());
doc.local_insert(agent, pos, &content)
} else {
// Delete something
let pos = rng.gen_range(0..doc_len);
// println!("range {}", u32::min(10, doc_len - pos));
let span = rng.gen_range(1..=usize::min(10, doc_len - pos));
// dbg!(&state.marker_tree, pos, len);
// println!("deleting {} at position {}", span, pos);
// rope.remove(pos..pos+span);
doc.local_delete(agent, pos, span)
}
}
}
fn main() {
random_inserts_deletes();
// let mut state = CRDTState::new();
//
// state.insert_name("fred", 0, "a"); | // state.insert_name("george", 1, "bC");
//
// state.insert_name("fred", 3, "D");
// state.insert_name("george", 4, "EFgh");
//
// // println!("tree {:#?}", state.content_tree);
// // Delete CDEF
// let _result = state.delete_name("amanda", 2, 4);
// // eprintln!("delete result {:#?}", result);
// assert_eq!(state.len(), 4);
} | |
parse.ts | // Dependencies:
import * as esquery from 'esquery';
import { SyntaxKind } from 'typescript';
import { TSQuerySelectorNode } from './tsquery-types';
// Constants:
const IDENTIFIER_QUERY = 'identifier';
export function parse (selector: string): TSQuerySelectorNode { | if (!selector) {
return selector;
}
if (selector.selectors) {
selector.selectors.map(validateParse);
}
if (selector.left) {
validateParse(selector.left);
}
if (selector.right) {
validateParse(selector.right);
}
if (selector.type as string === IDENTIFIER_QUERY) {
if (SyntaxKind[selector.value as any] == null) {
throw SyntaxError(`"${selector.value}" is not a valid TypeScript Node kind.`);
}
}
return selector;
} | return validateParse(esquery.parse(selector));
}
function validateParse (selector: TSQuerySelectorNode): TSQuerySelectorNode { |
arc009b.py | def conv(x):
|
b = input().split()
N = int(input())
a = [input() for _ in range(N)]
t = {b[i]: str(i) for i in range(10)}
a.sort(key = lambda x: conv(x))
print(*a, sep='\n')
| return int(''.join(t[c] for c in x)) |
pidfile.py | import psutil
import logging
from pathlib import Path
from typing import Union
logger = logging.getLogger(__name__)
class | :
def __init__(self, path: Union[str, Path]):
self.path = Path(path)
self._pid = None
@property
def pid(self):
return self._pid or self.load_pid()
def load_pid(self) -> int:
try:
with self.path.open("r") as raw:
self._pid = int(raw.read())
except ValueError:
self.path.unlink()
except FileNotFoundError:
pass
logger.debug(f"Loaded PID {self._pid} from {self.path}")
return self._pid
def write_pid(self, pid: int):
with self.path.open("w") as raw:
raw.write(str(pid))
self._pid = pid
@property
def process(self) -> psutil.Process:
if self.pid is None:
raise UnknownProcessError(self)
try:
return psutil.Process(self.pid)
except psutil.NoSuchProcess:
raise UnknownProcessError(self)
def unlink(self):
return self.path.unlink()
def __str__(self) -> str:
return str(self.path)
class UnknownProcessError(Exception):
"""Occurs when the PIDFile doesn't yield a readable PID."""
def __init__(self, pid_file: PIDFile):
self.pid_file = pid_file
super().__init__(f"{pid_file} doesn't refer to a valid Process.")
| PIDFile |
th-training-summary.tsx | import { Component } from '@stencil/core';
@Component({
tag: 'th-training-summary',
styleUrl: 'th-training-summary.css',
})
export class | {
render() {
return [
<div class="container">
<div class="content">
<h1>The Theracode Advantage</h1>
<p>Our training sessions have proven effective time and time again. We understand that Ionic is a part of a broader solution, and in order for a team to be successful they need to understand and execute the entire software development life cycle. By the end of our workshops, attendees will have a clear picture of how to accomplish that.</p>
</div>
</div>
];
}
} | ThTrainingSummary |
any.rs | //! This module implements the `Any` trait, which enables dynamic typing
//! of any `'static` type through runtime reflection.
//!
//! `Any` itself can be used to get a `TypeId`, and has more features when used
//! as a trait object. As `&dyn Any` (a borrowed trait object), it has the `is`
//! and `downcast_ref` methods, to test if the contained value is of a given type,
//! and to get a reference to the inner value as a type. As `&mut dyn Any`, there
//! is also the `downcast_mut` method, for getting a mutable reference to the
//! inner value. `Box<dyn Any>` adds the `downcast` method, which attempts to
//! convert to a `Box<T>`. See the [`Box`] documentation for the full details.
//!
//! Note that `&dyn Any` is limited to testing whether a value is of a specified
//! concrete type, and cannot be used to test whether a type implements a trait.
//!
//! [`Box`]: ../../std/boxed/struct.Box.html
//!
//! # Examples
//!
//! Consider a situation where we want to log out a value passed to a function.
//! We know the value we're working on implements Debug, but we don't know its
//! concrete type. We want to give special treatment to certain types: in this
//! case printing out the length of String values prior to their value.
//! We don't know the concrete type of our value at compile time, so we need to
//! use runtime reflection instead.
//!
//! ```rust
//! use std::fmt::Debug;
//! use std::any::Any;
//!
//! // Logger function for any type that implements Debug.
//! fn log<T: Any + Debug>(value: &T) {
//! let value_any = value as &dyn Any;
//!
//! // Try to convert our value to a `String`. If successful, we want to
//! // output the String`'s length as well as its value. If not, it's a
//! // different type: just print it out unadorned.
//! match value_any.downcast_ref::<String>() {
//! Some(as_string) => {
//! println!("String ({}): {}", as_string.len(), as_string);
//! }
//! None => {
//! println!("{:?}", value);
//! }
//! }
//! }
//!
//! // This function wants to log its parameter out prior to doing work with it.
//! fn do_work<T: Any + Debug>(value: &T) {
//! log(value);
//! // ...do some other work
//! }
//!
//! fn main() {
//! let my_string = "Hello World".to_string();
//! do_work(&my_string);
//!
//! let my_i8: i8 = 100;
//! do_work(&my_i8);
//! }
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
use crate::fmt;
use crate::intrinsics;
///////////////////////////////////////////////////////////////////////////////
// Any trait
///////////////////////////////////////////////////////////////////////////////
/// A type to emulate dynamic typing.
///
/// Most types implement `Any`. However, any type which contains a non-`'static` reference does not.
/// See the [module-level documentation][mod] for more details.
///
/// [mod]: index.html
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Any: 'static {
/// Gets the `TypeId` of `self`.
///
/// # Examples
///
/// ```
/// use std::any::{Any, TypeId};
///
/// fn is_string(s: &dyn Any) -> bool {
/// TypeId::of::<String>() == s.type_id()
/// }
///
/// assert_eq!(is_string(&0), false);
/// assert_eq!(is_string(&"cookie monster".to_string()), true);
/// ```
#[stable(feature = "get_type_id", since = "1.34.0")]
fn type_id(&self) -> TypeId;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: 'static + ?Sized > Any for T {
fn type_id(&self) -> TypeId { TypeId::of::<T>() }
}
///////////////////////////////////////////////////////////////////////////////
// Extension methods for Any trait objects.
///////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for dyn Any {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Any")
}
}
// Ensure that the result of e.g., joining a thread can be printed and
// hence used with `unwrap`. May eventually no longer be needed if
// dispatch works with upcasting.
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for dyn Any + Send {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Any")
}
}
#[stable(feature = "any_send_sync_methods", since = "1.28.0")]
impl fmt::Debug for dyn Any + Send + Sync {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Any")
}
}
impl dyn Any {
/// Returns `true` if the boxed type is the same as `T`.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn is_string(s: &dyn Any) {
/// if s.is::<String>() {
/// println!("It's a string!");
/// } else {
/// println!("Not a string...");
/// }
/// }
///
/// is_string(&0);
/// is_string(&"cookie monster".to_string());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn is<T: Any>(&self) -> bool {
// Get `TypeId` of the type this function is instantiated with.
let t = TypeId::of::<T>();
// Get `TypeId` of the type in the trait object.
let concrete = self.type_id();
// Compare both `TypeId`s on equality.
t == concrete
}
/// Returns some reference to the boxed value if it is of type `T`, or
/// `None` if it isn't.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(s: &dyn Any) {
/// if let Some(string) = s.downcast_ref::<String>() {
/// println!("It's a string({}): '{}'", string.len(), string);
/// } else {
/// println!("Not a string...");
/// }
/// }
///
/// print_if_string(&0);
/// print_if_string(&"cookie monster".to_string());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
if self.is::<T>() {
unsafe {
Some(&*(self as *const dyn Any as *const T))
}
} else |
}
/// Returns some mutable reference to the boxed value if it is of type `T`, or
/// `None` if it isn't.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn modify_if_u32(s: &mut dyn Any) {
/// if let Some(num) = s.downcast_mut::<u32>() {
/// *num = 42;
/// }
/// }
///
/// let mut x = 10u32;
/// let mut s = "starlord".to_string();
///
/// modify_if_u32(&mut x);
/// modify_if_u32(&mut s);
///
/// assert_eq!(x, 42);
/// assert_eq!(&s, "starlord");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
if self.is::<T>() {
unsafe {
Some(&mut *(self as *mut dyn Any as *mut T))
}
} else {
None
}
}
}
impl dyn Any+Send {
/// Forwards to the method defined on the type `Any`.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn is_string(s: &(dyn Any + Send)) {
/// if s.is::<String>() {
/// println!("It's a string!");
/// } else {
/// println!("Not a string...");
/// }
/// }
///
/// is_string(&0);
/// is_string(&"cookie monster".to_string());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn is<T: Any>(&self) -> bool {
Any::is::<T>(self)
}
/// Forwards to the method defined on the type `Any`.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(s: &(dyn Any + Send)) {
/// if let Some(string) = s.downcast_ref::<String>() {
/// println!("It's a string({}): '{}'", string.len(), string);
/// } else {
/// println!("Not a string...");
/// }
/// }
///
/// print_if_string(&0);
/// print_if_string(&"cookie monster".to_string());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
Any::downcast_ref::<T>(self)
}
/// Forwards to the method defined on the type `Any`.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn modify_if_u32(s: &mut (dyn Any + Send)) {
/// if let Some(num) = s.downcast_mut::<u32>() {
/// *num = 42;
/// }
/// }
///
/// let mut x = 10u32;
/// let mut s = "starlord".to_string();
///
/// modify_if_u32(&mut x);
/// modify_if_u32(&mut s);
///
/// assert_eq!(x, 42);
/// assert_eq!(&s, "starlord");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
Any::downcast_mut::<T>(self)
}
}
impl dyn Any+Send+Sync {
/// Forwards to the method defined on the type `Any`.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn is_string(s: &(dyn Any + Send + Sync)) {
/// if s.is::<String>() {
/// println!("It's a string!");
/// } else {
/// println!("Not a string...");
/// }
/// }
///
/// is_string(&0);
/// is_string(&"cookie monster".to_string());
/// ```
#[stable(feature = "any_send_sync_methods", since = "1.28.0")]
#[inline]
pub fn is<T: Any>(&self) -> bool {
Any::is::<T>(self)
}
/// Forwards to the method defined on the type `Any`.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(s: &(dyn Any + Send + Sync)) {
/// if let Some(string) = s.downcast_ref::<String>() {
/// println!("It's a string({}): '{}'", string.len(), string);
/// } else {
/// println!("Not a string...");
/// }
/// }
///
/// print_if_string(&0);
/// print_if_string(&"cookie monster".to_string());
/// ```
#[stable(feature = "any_send_sync_methods", since = "1.28.0")]
#[inline]
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
Any::downcast_ref::<T>(self)
}
/// Forwards to the method defined on the type `Any`.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn modify_if_u32(s: &mut (dyn Any + Send + Sync)) {
/// if let Some(num) = s.downcast_mut::<u32>() {
/// *num = 42;
/// }
/// }
///
/// let mut x = 10u32;
/// let mut s = "starlord".to_string();
///
/// modify_if_u32(&mut x);
/// modify_if_u32(&mut s);
///
/// assert_eq!(x, 42);
/// assert_eq!(&s, "starlord");
/// ```
#[stable(feature = "any_send_sync_methods", since = "1.28.0")]
#[inline]
pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
Any::downcast_mut::<T>(self)
}
}
///////////////////////////////////////////////////////////////////////////////
// TypeID and its methods
///////////////////////////////////////////////////////////////////////////////
/// A `TypeId` represents a globally unique identifier for a type.
///
/// Each `TypeId` is an opaque object which does not allow inspection of what's
/// inside but does allow basic operations such as cloning, comparison,
/// printing, and showing.
///
/// A `TypeId` is currently only available for types which ascribe to `'static`,
/// but this limitation may be removed in the future.
///
/// While `TypeId` implements `Hash`, `PartialOrd`, and `Ord`, it is worth
/// noting that the hashes and ordering will vary between Rust releases. Beware
/// of relying on them inside of your code!
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TypeId {
t: u64,
}
impl TypeId {
/// Returns the `TypeId` of the type this generic function has been
/// instantiated with.
///
/// # Examples
///
/// ```
/// use std::any::{Any, TypeId};
///
/// fn is_string<T: ?Sized + Any>(_s: &T) -> bool {
/// TypeId::of::<String>() == TypeId::of::<T>()
/// }
///
/// assert_eq!(is_string(&0), false);
/// assert_eq!(is_string(&"cookie monster".to_string()), true);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature="const_type_id")]
pub const fn of<T: ?Sized + 'static>() -> TypeId {
TypeId {
t: unsafe { intrinsics::type_id::<T>() },
}
}
}
/// Returns the name of a type as a string slice.
///
/// # Note
///
/// This is intended for diagnostic use. The exact contents and format of the
/// string are not specified, other than being a best-effort description of the
/// type. For example, `type_name::<Option<String>>()` could return the
/// `"Option<String>"` or `"std::option::Option<std::string::String>"`, but not
/// `"foobar"`. In addition, the output may change between versions of the
/// compiler.
///
/// The type name should not be considered a unique identifier of a type;
/// multiple types may share the same type name.
///
/// The current implementation uses the same infrastructure as compiler
/// diagnostics and debuginfo, but this is not guaranteed.
#[stable(feature = "type_name", since = "1.38.0")]
#[rustc_const_unstable(feature = "const_type_name")]
pub const fn type_name<T: ?Sized>() -> &'static str {
intrinsics::type_name::<T>()
}
| {
None
} |
lib.rs | // Copyright 2018-2019 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! Postgres extension library for Rust.
#![warn(missing_docs)]
use std::mem;
use std::os::raw::c_int;
use std::sync::atomic::compiler_fence;
use std::sync::atomic::Ordering;
pub mod pg_alloc;
pub mod pg_sys;
#[macro_use]
pub mod pg_bool;
pub mod pg_datum;
pub mod pg_error;
pub mod pg_fdw;
pub mod pg_type;
pub mod log;
pub mod native;
/// A macro for marking a library compatible with the Postgres extension framework.
///
/// This macro was initially inspired from the `pg_module` macro in https://github.com/thehydroimpulse/postgres-extension.rs
#[macro_export]
macro_rules! pg_magic {
(version: $vers:expr) => {
#[no_mangle]
#[allow(non_snake_case)]
#[allow(unused)]
#[link_name = "Pg_magic_func"]
pub extern "C" fn Pg_magic_func() -> &'static pg_extend::pg_sys::Pg_magic_struct {
use pg_extend::{pg_sys, register_panic_handler};
use std::mem::size_of;
use std::os::raw::c_int;
const my_magic: pg_extend::pg_sys::Pg_magic_struct = pg_sys::Pg_magic_struct {
len: size_of::<pg_sys::Pg_magic_struct>() as c_int,
version: $vers as std::os::raw::c_int / 100,
funcmaxargs: pg_sys::FUNC_MAX_ARGS as c_int,
indexmaxkeys: pg_sys::INDEX_MAX_KEYS as c_int,
namedatalen: pg_sys::NAMEDATALEN as c_int,
float4byval: pg_sys::USE_FLOAT4_BYVAL as c_int,
float8byval: pg_sys::USE_FLOAT8_BYVAL as c_int,
};
// TODO: is this a good idea here?
// register panic_handler
register_panic_handler();
// return the magic
&my_magic
}
};
}
#[cfg(feature = "postgres-12")]
type FunctionCallInfoData = pg_sys::FunctionCallInfoBaseData;
#[cfg(not(feature = "postgres-12"))]
type FunctionCallInfoData = pg_sys::FunctionCallInfoData;
/// Returns an iterator of argument Datums
pub fn get_args<'a>(
func_call_info: &'a FunctionCallInfoData,
) -> impl 'a + Iterator<Item = Option<pg_sys::Datum>> {
let num_args = func_call_info.nargs as usize;
// PostgreSQL 12+: Convert from pg_sys::NullableDatum
#[cfg(feature = "postgres-12")]
return unsafe { func_call_info.args.as_slice(num_args) }
.iter()
.map(|nullable| {
if nullable.isnull {
None
} else {
Some(nullable.value)
}
});
// Older versions store two separate arrays for 'isnull' and datums
#[cfg(not(feature = "postgres-12"))]
return {
let args = &func_call_info.arg[..num_args];
let args_null = &func_call_info.argnull[..num_args];
args.iter().zip(args_null.iter()).map(|(value, isnull)| {
if pg_bool::Bool::from(*isnull).into() {
None
} else {
Some(*value)
}
})
};
}
/// Information for a longjmp
struct | {
jump_value: c_int,
}
/// This will replace the current panic_handler
pub fn register_panic_handler() {
use std::panic;
// set (and replace the existing) panic handler, this will tell Postgres that the call failed
// a level of Fatal will force the DB connection to be killed.
panic::set_hook(Box::new(|info| {
// downcast info, check if it's the value we need.
// this must check if the panic was due to a longjmp
// the fence is to make sure the longjmp is not reodered.
compiler_fence(Ordering::SeqCst);
if let Some(panic_context) = info.payload().downcast_ref::<JumpContext>() {
// WARNING: do not set this level above Notice (ERROR, FATAL, PANIC), as it will calse
// the following longjmp to execute.
notice!("continuing longjmp: {}", info);
// the panic came from a pg longjmp... so unwrap it and rethrow
unsafe {
pg_sys_longjmp(
pg_sys::PG_exception_stack as *mut _,
panic_context.jump_value,
);
}
} else {
// error level will cause a longjmp in Postgres
error!("panic in Rust extension: {}", info);
}
unreachable!("all above statements should have cause a longjmp to Postgres");
}));
}
cfg_if::cfg_if! {
if #[cfg(windows)] {
unsafe fn pg_sys_longjmp(_buf: *mut pg_sys::_JBTYPE, _value: ::std::os::raw::c_int) {
pg_sys::longjmp(_buf, _value);
}
} else if #[cfg(target_os = "macos")] {
unsafe fn pg_sys_longjmp(_buf: *mut c_int, _value: ::std::os::raw::c_int) {
pg_sys::siglongjmp(_buf, _value);
}
} else if #[cfg(unix)] {
unsafe fn pg_sys_longjmp(_buf: *mut pg_sys::__jmp_buf_tag, _value: ::std::os::raw::c_int) {
pg_sys::siglongjmp(_buf, _value);
}
}
}
/// Provides a barrier between Rust and Postgres' usage of the C set/longjmp
///
/// In the case of a longjmp being caught, this will convert that to a panic. For this to work
/// properly, there must be a Rust panic handler (see crate::register_panic_handler).PanicContext
/// If the `pg_exern` attribute macro is used for exposing Rust functions to Postgres, then
/// this is already handled.
///
/// See the man pages for info on setjmp http://man7.org/linux/man-pages/man3/setjmp.3.html
#[cfg(unix)]
#[inline(never)]
pub(crate) unsafe fn guard_pg<R, F: FnOnce() -> R>(f: F) -> R {
// setup the check protection
let original_exception_stack: *mut pg_sys::sigjmp_buf = pg_sys::PG_exception_stack;
let mut local_exception_stack: mem::MaybeUninit<pg_sys::sigjmp_buf> =
mem::MaybeUninit::uninit();
let jumped = pg_sys::sigsetjmp(
// grab a mutable reference, cast to a mutabl pointr, then case to the expected erased pointer type
local_exception_stack.as_mut_ptr() as *mut pg_sys::sigjmp_buf as *mut _,
1,
);
// now that we have the local_exception_stack, we set that for any PG longjmps...
if jumped != 0 {
notice!("PG longjmped: {}", jumped);
pg_sys::PG_exception_stack = original_exception_stack;
// The C Panicked!, handling control to Rust Panic handler
compiler_fence(Ordering::SeqCst);
panic!(JumpContext { jump_value: jumped });
}
// replace the exception stack with ours to jump to the above point
pg_sys::PG_exception_stack = local_exception_stack.as_mut_ptr() as *mut _;
// enforce that the setjmp is not reordered, though that's probably unlikely...
compiler_fence(Ordering::SeqCst);
let result = f();
compiler_fence(Ordering::SeqCst);
pg_sys::PG_exception_stack = original_exception_stack;
result
}
/// Provides a barrier between Rust and Postgres' usage of the C set/longjmp
///
/// In the case of a longjmp being caught, this will convert that to a panic. For this to work
/// properly, there must be a Rust panic handler (see crate::register_panic_handler).PanicContext
/// If the `pg_exern` attribute macro is used for exposing Rust functions to Postgres, then
/// this is already handled.
///
/// See the man pages for info on setjmp http://man7.org/linux/man-pages/man3/setjmp.3.html
#[cfg(windows)]
#[inline(never)]
pub(crate) unsafe fn guard_pg<R, F: FnOnce() -> R>(f: F) -> R {
// setup the check protection
let original_exception_stack: *mut pg_sys::jmp_buf = pg_sys::PG_exception_stack;
let mut local_exception_stack: mem::MaybeUninit<pg_sys::jmp_buf> = mem::MaybeUninit::uninit();
let jumped = pg_sys::_setjmp(
// grab a mutable reference, cast to a mutabl pointr, then case to the expected erased pointer type
local_exception_stack.as_mut_ptr() as *mut pg_sys::jmp_buf as *mut _,
);
// now that we have the local_exception_stack, we set that for any PG longjmps...
if jumped != 0 {
notice!("PG longjmped: {}", jumped);
pg_sys::PG_exception_stack = original_exception_stack;
// The C Panicked!, handling control to Rust Panic handler
compiler_fence(Ordering::SeqCst);
panic!(JumpContext { jump_value: jumped });
}
// replace the exception stack with ours to jump to the above point
pg_sys::PG_exception_stack = local_exception_stack.as_mut_ptr() as *mut _;
// enforce that the setjmp is not reordered, though that's probably unlikely...
compiler_fence(Ordering::SeqCst);
let result = f();
compiler_fence(Ordering::SeqCst);
pg_sys::PG_exception_stack = original_exception_stack;
result
}
/// auto generate function to output a SQL create statement for the function
///
/// Until concat_ident! stabilizes, this requires the name to passed with the appended sctring
/// `_pg_create_stmt`
///
/// # Example
///
/// create a binary for the library, like bin.rs, and this will generate a `main()` function in it
///
/// ```text
/// extern crate pg_extend;
///
/// use pg_extend::pg_create_stmt_bin;
///
/// pg_create_stmt_bin!(
/// add_one_pg_create_stmt,
/// add_big_one_pg_create_stmt,
/// add_small_one_pg_create_stmt,
/// add_together_pg_create_stmt
/// );
/// ```
#[macro_export]
macro_rules! pg_create_stmt_bin {
( $( $func:ident ),* ) => {
use std::env;
// becuase the lib is a cdylib... maybe there's a better way?
mod lib;
#[cfg(target_os = "linux")]
const DYLIB_EXT: &str = "so";
#[cfg(target_os = "macos")]
const DYLIB_EXT: &str = "dylib";
#[cfg(target_os = "windows")]
const DYLIB_EXT: &str = "dll";
fn main() {
const LIB_NAME: &str = env!("CARGO_PKG_NAME");
let lib_path = env::args().nth(1).unwrap_or_else(|| format!("target/release/lib{}.{}", LIB_NAME, DYLIB_EXT));
$( println!("{}", lib::$func(&lib_path)); )*
}
};
}
| JumpContext |
redundant_clone.rs | use crate::utils::{
fn_has_unsatisfiable_preds, has_drop, is_copy, is_type_diagnostic_item, match_def_path, match_type, paths,
snippet_opt, span_lint_hir, span_lint_hir_and_then, walk_ptrs_ty_depth,
};
use if_chain::if_chain;
use rustc_data_structures::{fx::FxHashMap, transitive_relation::TransitiveRelation};
use rustc_errors::Applicability;
use rustc_hir::intravisit::FnKind;
use rustc_hir::{def_id, Body, FnDecl, HirId};
use rustc_index::bit_set::{BitSet, HybridBitSet};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::mir::{
self, traversal,
visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor as _},
};
use rustc_middle::ty::{self, fold::TypeVisitor, Ty};
use rustc_mir::dataflow::BottomValue;
use rustc_mir::dataflow::{Analysis, AnalysisDomain, GenKill, GenKillAnalysis, ResultsCursor};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::source_map::{BytePos, Span};
use std::convert::TryFrom;
macro_rules! unwrap_or_continue {
($x:expr) => {
match $x {
Some(x) => x,
None => continue,
}
};
}
declare_clippy_lint! {
/// **What it does:** Checks for a redundant `clone()` (and its relatives) which clones an owned
/// value that is going to be dropped without further use.
///
/// **Why is this bad?** It is not always possible for the compiler to eliminate useless
/// allocations and deallocations generated by redundant `clone()`s.
///
/// **Known problems:**
///
/// False-negatives: analysis performed by this lint is conservative and limited.
///
/// **Example:**
/// ```rust
/// # use std::path::Path;
/// # #[derive(Clone)]
/// # struct Foo;
/// # impl Foo {
/// # fn new() -> Self { Foo {} }
/// # }
/// # fn call(x: Foo) {}
/// {
/// let x = Foo::new();
/// call(x.clone());
/// call(x.clone()); // this can just pass `x`
/// }
///
/// ["lorem", "ipsum"].join(" ").to_string();
///
/// Path::new("/a/b").join("c").to_path_buf();
/// ```
pub REDUNDANT_CLONE,
perf,
"`clone()` of an owned value that is going to be dropped immediately"
}
declare_lint_pass!(RedundantClone => [REDUNDANT_CLONE]);
impl<'tcx> LateLintPass<'tcx> for RedundantClone {
#[allow(clippy::too_many_lines)]
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
_: FnKind<'tcx>,
_: &'tcx FnDecl<'_>,
body: &'tcx Body<'_>,
_: Span,
_: HirId,
) {
let def_id = cx.tcx.hir().body_owner_def_id(body.id());
// Building MIR for `fn`s with unsatisfiable preds results in ICE.
if fn_has_unsatisfiable_preds(cx, def_id.to_def_id()) {
return;
}
let mir = cx.tcx.optimized_mir(def_id.to_def_id());
let maybe_storage_live_result = MaybeStorageLive
.into_engine(cx.tcx, mir, def_id.to_def_id())
.iterate_to_fixpoint()
.into_results_cursor(mir);
let mut possible_borrower = {
let mut vis = PossibleBorrowerVisitor::new(cx, mir);
vis.visit_body(&mir);
vis.into_map(cx, maybe_storage_live_result)
};
for (bb, bbdata) in mir.basic_blocks().iter_enumerated() {
let terminator = bbdata.terminator();
if terminator.source_info.span.from_expansion() {
continue;
}
// Give up on loops
if terminator.successors().any(|s| *s == bb) {
continue;
}
let (fn_def_id, arg, arg_ty, clone_ret) =
unwrap_or_continue!(is_call_with_ref_arg(cx, mir, &terminator.kind));
let from_borrow = match_def_path(cx, fn_def_id, &paths::CLONE_TRAIT_METHOD)
|| match_def_path(cx, fn_def_id, &paths::TO_OWNED_METHOD)
|| (match_def_path(cx, fn_def_id, &paths::TO_STRING_METHOD)
&& is_type_diagnostic_item(cx, arg_ty, sym!(string_type)));
let from_deref = !from_borrow
&& (match_def_path(cx, fn_def_id, &paths::PATH_TO_PATH_BUF)
|| match_def_path(cx, fn_def_id, &paths::OS_STR_TO_OS_STRING));
if !from_borrow && !from_deref {
continue;
}
if let ty::Adt(ref def, _) = arg_ty.kind {
if match_def_path(cx, def.did, &paths::MEM_MANUALLY_DROP) {
continue;
}
} | let loc = mir::Location {
block: bb,
statement_index: bbdata.statements.len(),
};
// `Local` to be cloned, and a local of `clone` call's destination
let (local, ret_local) = if from_borrow {
// `res = clone(arg)` can be turned into `res = move arg;`
// if `arg` is the only borrow of `cloned` at this point.
if cannot_move_out || !possible_borrower.only_borrowers(&[arg], cloned, loc) {
continue;
}
(cloned, clone_ret)
} else {
// `arg` is a reference as it is `.deref()`ed in the previous block.
// Look into the predecessor block and find out the source of deref.
let ps = &mir.predecessors()[bb];
if ps.len() != 1 {
continue;
}
let pred_terminator = mir[ps[0]].terminator();
// receiver of the `deref()` call
let (pred_arg, deref_clone_ret) = if_chain! {
if let Some((pred_fn_def_id, pred_arg, pred_arg_ty, res)) =
is_call_with_ref_arg(cx, mir, &pred_terminator.kind);
if res == cloned;
if match_def_path(cx, pred_fn_def_id, &paths::DEREF_TRAIT_METHOD);
if match_type(cx, pred_arg_ty, &paths::PATH_BUF)
|| match_type(cx, pred_arg_ty, &paths::OS_STRING);
then {
(pred_arg, res)
} else {
continue;
}
};
let (local, cannot_move_out) =
unwrap_or_continue!(find_stmt_assigns_to(cx, mir, pred_arg, true, ps[0]));
let loc = mir::Location {
block: bb,
statement_index: mir.basic_blocks()[bb].statements.len(),
};
// This can be turned into `res = move local` if `arg` and `cloned` are not borrowed
// at the last statement:
//
// ```
// pred_arg = &local;
// cloned = deref(pred_arg);
// arg = &cloned;
// StorageDead(pred_arg);
// res = to_path_buf(cloned);
// ```
if cannot_move_out || !possible_borrower.only_borrowers(&[arg, cloned], local, loc) {
continue;
}
(local, deref_clone_ret)
};
let is_temp = mir.local_kind(ret_local) == mir::LocalKind::Temp;
// 1. `local` can be moved out if it is not used later.
// 2. If `ret_local` is a temporary and is neither consumed nor mutated, we can remove this `clone`
// call anyway.
let (used, consumed_or_mutated) = traversal::ReversePostorder::new(&mir, bb).skip(1).fold(
(false, !is_temp),
|(used, consumed), (tbb, tdata)| {
// Short-circuit
if (used && consumed) ||
// Give up on loops
tdata.terminator().successors().any(|s| *s == bb)
{
return (true, true);
}
let mut vis = LocalUseVisitor {
used: (local, false),
consumed_or_mutated: (ret_local, false),
};
vis.visit_basic_block_data(tbb, tdata);
(used || vis.used.1, consumed || vis.consumed_or_mutated.1)
},
);
if !used || !consumed_or_mutated {
let span = terminator.source_info.span;
let scope = terminator.source_info.scope;
let node = mir.source_scopes[scope]
.local_data
.as_ref()
.assert_crate_local()
.lint_root;
if_chain! {
if let Some(snip) = snippet_opt(cx, span);
if let Some(dot) = snip.rfind('.');
then {
let sugg_span = span.with_lo(
span.lo() + BytePos(u32::try_from(dot).unwrap())
);
let mut app = Applicability::MaybeIncorrect;
let mut call_snip = &snip[dot + 1..];
// Machine applicable when `call_snip` looks like `foobar()`
if call_snip.ends_with("()") {
call_snip = call_snip[..call_snip.len()-2].trim();
if call_snip.as_bytes().iter().all(|b| b.is_ascii_alphabetic() || *b == b'_') {
app = Applicability::MachineApplicable;
}
}
span_lint_hir_and_then(cx, REDUNDANT_CLONE, node, sugg_span, "redundant clone", |diag| {
diag.span_suggestion(
sugg_span,
"remove this",
String::new(),
app,
);
if used {
diag.span_note(
span,
"cloned value is neither consumed nor mutated",
);
} else {
diag.span_note(
span.with_hi(span.lo() + BytePos(u32::try_from(dot).unwrap())),
"this value is dropped without further use",
);
}
});
} else {
span_lint_hir(cx, REDUNDANT_CLONE, node, span, "redundant clone");
}
}
}
}
}
}
/// If `kind` is `y = func(x: &T)` where `T: !Copy`, returns `(DefId of func, x, T, y)`.
fn is_call_with_ref_arg<'tcx>(
cx: &LateContext<'tcx>,
mir: &'tcx mir::Body<'tcx>,
kind: &'tcx mir::TerminatorKind<'tcx>,
) -> Option<(def_id::DefId, mir::Local, Ty<'tcx>, mir::Local)> {
if_chain! {
if let mir::TerminatorKind::Call { func, args, destination, .. } = kind;
if args.len() == 1;
if let mir::Operand::Move(mir::Place { local, .. }) = &args[0];
if let ty::FnDef(def_id, _) = func.ty(&*mir, cx.tcx).kind;
if let (inner_ty, 1) = walk_ptrs_ty_depth(args[0].ty(&*mir, cx.tcx));
if !is_copy(cx, inner_ty);
then {
Some((def_id, *local, inner_ty, destination.as_ref().map(|(dest, _)| dest)?.as_local()?))
} else {
None
}
}
}
type CannotMoveOut = bool;
/// Finds the first `to = (&)from`, and returns
/// ``Some((from, whether `from` cannot be moved out))``.
fn find_stmt_assigns_to<'tcx>(
cx: &LateContext<'tcx>,
mir: &mir::Body<'tcx>,
to_local: mir::Local,
by_ref: bool,
bb: mir::BasicBlock,
) -> Option<(mir::Local, CannotMoveOut)> {
let rvalue = mir.basic_blocks()[bb].statements.iter().rev().find_map(|stmt| {
if let mir::StatementKind::Assign(box (mir::Place { local, .. }, v)) = &stmt.kind {
return if *local == to_local { Some(v) } else { None };
}
None
})?;
match (by_ref, &*rvalue) {
(true, mir::Rvalue::Ref(_, _, place)) | (false, mir::Rvalue::Use(mir::Operand::Copy(place))) => {
base_local_and_movability(cx, mir, *place)
},
(false, mir::Rvalue::Ref(_, _, place)) => {
if let [mir::ProjectionElem::Deref] = place.as_ref().projection {
base_local_and_movability(cx, mir, *place)
} else {
None
}
},
_ => None,
}
}
/// Extracts and returns the undermost base `Local` of given `place`. Returns `place` itself
/// if it is already a `Local`.
///
/// Also reports whether given `place` cannot be moved out.
fn base_local_and_movability<'tcx>(
cx: &LateContext<'tcx>,
mir: &mir::Body<'tcx>,
place: mir::Place<'tcx>,
) -> Option<(mir::Local, CannotMoveOut)> {
use rustc_middle::mir::PlaceRef;
// Dereference. You cannot move things out from a borrowed value.
let mut deref = false;
// Accessing a field of an ADT that has `Drop`. Moving the field out will cause E0509.
let mut field = false;
// If projection is a slice index then clone can be removed only if the
// underlying type implements Copy
let mut slice = false;
let PlaceRef { local, mut projection } = place.as_ref();
while let [base @ .., elem] = projection {
projection = base;
deref |= matches!(elem, mir::ProjectionElem::Deref);
field |= matches!(elem, mir::ProjectionElem::Field(..))
&& has_drop(cx, mir::Place::ty_from(local, projection, &mir.local_decls, cx.tcx).ty);
slice |= matches!(elem, mir::ProjectionElem::Index(..))
&& !is_copy(cx, mir::Place::ty_from(local, projection, &mir.local_decls, cx.tcx).ty);
}
Some((local, deref || field || slice))
}
struct LocalUseVisitor {
used: (mir::Local, bool),
consumed_or_mutated: (mir::Local, bool),
}
impl<'tcx> mir::visit::Visitor<'tcx> for LocalUseVisitor {
fn visit_basic_block_data(&mut self, block: mir::BasicBlock, data: &mir::BasicBlockData<'tcx>) {
let statements = &data.statements;
for (statement_index, statement) in statements.iter().enumerate() {
self.visit_statement(statement, mir::Location { block, statement_index });
}
self.visit_terminator(
data.terminator(),
mir::Location {
block,
statement_index: statements.len(),
},
);
}
fn visit_place(&mut self, place: &mir::Place<'tcx>, ctx: PlaceContext, _: mir::Location) {
let local = place.local;
if local == self.used.0
&& !matches!(ctx, PlaceContext::MutatingUse(MutatingUseContext::Drop) | PlaceContext::NonUse(_))
{
self.used.1 = true;
}
if local == self.consumed_or_mutated.0 {
match ctx {
PlaceContext::NonMutatingUse(NonMutatingUseContext::Move)
| PlaceContext::MutatingUse(MutatingUseContext::Borrow) => {
self.consumed_or_mutated.1 = true;
},
_ => {},
}
}
}
}
/// Determines liveness of each local purely based on `StorageLive`/`Dead`.
#[derive(Copy, Clone)]
struct MaybeStorageLive;
impl<'tcx> AnalysisDomain<'tcx> for MaybeStorageLive {
type Idx = mir::Local;
const NAME: &'static str = "maybe_storage_live";
fn bits_per_block(&self, body: &mir::Body<'tcx>) -> usize {
body.local_decls.len()
}
fn initialize_start_block(&self, body: &mir::Body<'tcx>, state: &mut BitSet<Self::Idx>) {
for arg in body.args_iter() {
state.insert(arg);
}
}
}
impl<'tcx> GenKillAnalysis<'tcx> for MaybeStorageLive {
fn statement_effect(&self, trans: &mut impl GenKill<Self::Idx>, stmt: &mir::Statement<'tcx>, _: mir::Location) {
match stmt.kind {
mir::StatementKind::StorageLive(l) => trans.gen(l),
mir::StatementKind::StorageDead(l) => trans.kill(l),
_ => (),
}
}
fn terminator_effect(
&self,
_trans: &mut impl GenKill<Self::Idx>,
_terminator: &mir::Terminator<'tcx>,
_loc: mir::Location,
) {
}
fn call_return_effect(
&self,
_in_out: &mut impl GenKill<Self::Idx>,
_block: mir::BasicBlock,
_func: &mir::Operand<'tcx>,
_args: &[mir::Operand<'tcx>],
_return_place: mir::Place<'tcx>,
) {
// Nothing to do when a call returns successfully
}
}
impl BottomValue for MaybeStorageLive {
/// bottom = dead
const BOTTOM_VALUE: bool = false;
}
/// Collects the possible borrowers of each local.
/// For example, `b = &a; c = &a;` will make `b` and (transitively) `c`
/// possible borrowers of `a`.
struct PossibleBorrowerVisitor<'a, 'tcx> {
possible_borrower: TransitiveRelation<mir::Local>,
body: &'a mir::Body<'tcx>,
cx: &'a LateContext<'tcx>,
}
impl<'a, 'tcx> PossibleBorrowerVisitor<'a, 'tcx> {
fn new(cx: &'a LateContext<'tcx>, body: &'a mir::Body<'tcx>) -> Self {
Self {
possible_borrower: TransitiveRelation::default(),
cx,
body,
}
}
fn into_map(
self,
cx: &LateContext<'tcx>,
maybe_live: ResultsCursor<'tcx, 'tcx, MaybeStorageLive>,
) -> PossibleBorrowerMap<'a, 'tcx> {
let mut map = FxHashMap::default();
for row in (1..self.body.local_decls.len()).map(mir::Local::from_usize) {
if is_copy(cx, self.body.local_decls[row].ty) {
continue;
}
let borrowers = self.possible_borrower.reachable_from(&row);
if !borrowers.is_empty() {
let mut bs = HybridBitSet::new_empty(self.body.local_decls.len());
for &c in borrowers {
if c != mir::Local::from_usize(0) {
bs.insert(c);
}
}
if !bs.is_empty() {
map.insert(row, bs);
}
}
}
let bs = BitSet::new_empty(self.body.local_decls.len());
PossibleBorrowerMap {
map,
maybe_live,
bitset: (bs.clone(), bs),
}
}
}
impl<'a, 'tcx> mir::visit::Visitor<'tcx> for PossibleBorrowerVisitor<'a, 'tcx> {
fn visit_assign(&mut self, place: &mir::Place<'tcx>, rvalue: &mir::Rvalue<'_>, _location: mir::Location) {
let lhs = place.local;
match rvalue {
mir::Rvalue::Ref(_, _, borrowed) => {
self.possible_borrower.add(borrowed.local, lhs);
},
other => {
if !ContainsRegion.visit_ty(place.ty(&self.body.local_decls, self.cx.tcx).ty) {
return;
}
rvalue_locals(other, |rhs| {
if lhs != rhs {
self.possible_borrower.add(rhs, lhs);
}
});
},
}
}
fn visit_terminator(&mut self, terminator: &mir::Terminator<'_>, _loc: mir::Location) {
if let mir::TerminatorKind::Call {
args,
destination: Some((mir::Place { local: dest, .. }, _)),
..
} = &terminator.kind
{
// If the call returns something with lifetimes,
// let's conservatively assume the returned value contains lifetime of all the arguments.
// For example, given `let y: Foo<'a> = foo(x)`, `y` is considered to be a possible borrower of `x`.
if !ContainsRegion.visit_ty(&self.body.local_decls[*dest].ty) {
return;
}
for op in args {
match op {
mir::Operand::Copy(p) | mir::Operand::Move(p) => {
self.possible_borrower.add(p.local, *dest);
},
_ => (),
}
}
}
}
}
struct ContainsRegion;
impl TypeVisitor<'_> for ContainsRegion {
fn visit_region(&mut self, _: ty::Region<'_>) -> bool {
true
}
}
fn rvalue_locals(rvalue: &mir::Rvalue<'_>, mut visit: impl FnMut(mir::Local)) {
use rustc_middle::mir::Rvalue::{Aggregate, BinaryOp, Cast, CheckedBinaryOp, Repeat, UnaryOp, Use};
let mut visit_op = |op: &mir::Operand<'_>| match op {
mir::Operand::Copy(p) | mir::Operand::Move(p) => visit(p.local),
_ => (),
};
match rvalue {
Use(op) | Repeat(op, _) | Cast(_, op, _) | UnaryOp(_, op) => visit_op(op),
Aggregate(_, ops) => ops.iter().for_each(visit_op),
BinaryOp(_, lhs, rhs) | CheckedBinaryOp(_, lhs, rhs) => {
visit_op(lhs);
visit_op(rhs);
},
_ => (),
}
}
/// Result of `PossibleBorrowerVisitor`.
struct PossibleBorrowerMap<'a, 'tcx> {
/// Mapping `Local -> its possible borrowers`
map: FxHashMap<mir::Local, HybridBitSet<mir::Local>>,
maybe_live: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
// Caches to avoid allocation of `BitSet` on every query
bitset: (BitSet<mir::Local>, BitSet<mir::Local>),
}
impl PossibleBorrowerMap<'_, '_> {
/// Returns true if the set of borrowers of `borrowed` living at `at` matches with `borrowers`.
fn only_borrowers(&mut self, borrowers: &[mir::Local], borrowed: mir::Local, at: mir::Location) -> bool {
self.maybe_live.seek_after_primary_effect(at);
self.bitset.0.clear();
let maybe_live = &mut self.maybe_live;
if let Some(bitset) = self.map.get(&borrowed) {
for b in bitset.iter().filter(move |b| maybe_live.contains(*b)) {
self.bitset.0.insert(b);
}
} else {
return false;
}
self.bitset.1.clear();
for b in borrowers {
self.bitset.1.insert(*b);
}
self.bitset.0 == self.bitset.1
}
} |
// `{ cloned = &arg; clone(move cloned); }` or `{ cloned = &arg; to_path_buf(cloned); }`
let (cloned, cannot_move_out) = unwrap_or_continue!(find_stmt_assigns_to(cx, mir, arg, from_borrow, bb));
|
pipe.go | package core
import (
"time"
cfg "github.com/ColorPlatform/prism/config"
"github.com/ColorPlatform/prism/consensus"
"github.com/ColorPlatform/prism/crypto"
dbm "github.com/ColorPlatform/prism/libs/db"
"github.com/ColorPlatform/prism/libs/log"
mempl "github.com/ColorPlatform/prism/mempool"
"github.com/ColorPlatform/prism/p2p"
"github.com/ColorPlatform/prism/proxy"
sm "github.com/ColorPlatform/prism/state"
"github.com/ColorPlatform/prism/state/txindex"
"github.com/ColorPlatform/prism/types"
)
const (
// see README
defaultPerPage = 30
maxPerPage = 100
// SubscribeTimeout is the maximum time we wait to subscribe for an event.
// must be less than the server's write timeout (see rpcserver.DefaultConfig)
SubscribeTimeout = 5 * time.Second
)
//----------------------------------------------
// These interfaces are used by RPC and must be thread safe
type Consensus interface {
GetState() sm.State
GetValidators() (int64, []*types.Validator)
GetLastHeight() int64
GetRoundStateJSON() ([]byte, error)
GetRoundStateSimpleJSON() ([]byte, error)
}
type transport interface {
Listeners() []string
IsListening() bool
NodeInfo() p2p.NodeInfo
}
type peers interface {
DialPeersAsync(p2p.AddrBook, []string, bool) error
NumPeers() (outbound, inbound, dialig int)
Peers() p2p.IPeerSet
}
//----------------------------------------------
// These package level globals come with setters
// that are expected to be called only once, on startup
var (
// external, thread safe interfaces
proxyAppQuery proxy.AppConnQuery
// interfaces defined in types and above
stateDB dbm.DB
blockStore sm.BlockStore
evidencePool sm.EvidencePool
consensusState Consensus
p2pPeers peers
p2pTransport transport
// objects
pubKey crypto.PubKey
genDoc *types.GenesisDoc // cache the genesis structure
addrBook p2p.AddrBook
txIndexer txindex.TxIndexer
consensusReactor *consensus.ConsensusReactor
eventBus *types.EventBus // thread safe
mempool *mempl.Mempool
logger log.Logger
config cfg.RPCConfig
)
func SetStateDB(db dbm.DB) {
stateDB = db
}
func SetBlockStore(bs sm.BlockStore) |
func SetMempool(mem *mempl.Mempool) {
mempool = mem
}
func SetEvidencePool(evpool sm.EvidencePool) {
evidencePool = evpool
}
func SetConsensusState(cs Consensus) {
consensusState = cs
}
func SetP2PPeers(p peers) {
p2pPeers = p
}
func SetP2PTransport(t transport) {
p2pTransport = t
}
func SetPubKey(pk crypto.PubKey) {
pubKey = pk
}
func SetGenesisDoc(doc *types.GenesisDoc) {
genDoc = doc
}
func SetAddrBook(book p2p.AddrBook) {
addrBook = book
}
func SetProxyAppQuery(appConn proxy.AppConnQuery) {
proxyAppQuery = appConn
}
func SetTxIndexer(indexer txindex.TxIndexer) {
txIndexer = indexer
}
func SetConsensusReactor(conR *consensus.ConsensusReactor) {
consensusReactor = conR
}
func SetLogger(l log.Logger) {
logger = l
}
func SetEventBus(b *types.EventBus) {
eventBus = b
}
// SetConfig sets an RPCConfig.
func SetConfig(c cfg.RPCConfig) {
config = c
}
func validatePage(page, perPage, totalCount int) int {
if perPage < 1 {
return 1
}
pages := ((totalCount - 1) / perPage) + 1
if page < 1 {
page = 1
} else if page > pages {
page = pages
}
return page
}
func validatePerPage(perPage int) int {
if perPage < 1 {
return defaultPerPage
} else if perPage > maxPerPage {
return maxPerPage
}
return perPage
}
func validateSkipCount(page, perPage int) int {
skipCount := (page - 1) * perPage
if skipCount < 0 {
return 0
}
return skipCount
}
| {
blockStore = bs
} |
client_test.go | package stackdriver
import (
"fmt"
"testing"
"github.com/egnyte/ax/pkg/backend/common"
)
func TestAttributeDecoding(t *testing.T) |
func TestQueryToFilter(t *testing.T) {
if queryToFilter(common.Query{}, "my-project", "my-log") != `logName = "projects/my-project/logs/my-log"` {
t.Error("Empty search")
}
if queryToFilter(common.Query{QueryString: "My query"}, "my-project", "my-log") != `logName = "projects/my-project/logs/my-log" AND "My query"` {
t.Error("Basic search filter")
}
if queryToFilter(common.Query{
EqualityFilters: []common.EqualityFilter{
{
FieldName: "name",
Operator: "=",
Value: "pete",
},
}}, "my-project", "my-log") != `logName = "projects/my-project/logs/my-log" AND jsonPayload.name = "pete"` {
t.Error("Where filter fail")
}
}
| {
exampleJson := []byte(`{"fields":{"age":{"Kind":{"NumberValue":34}},"bool":{"Kind":{"BoolValue":true}},"list":{"Kind":{"ListValue":{"values":[{"Kind":{"NumberValue":1}},{"Kind":{"NumberValue":2}},{"Kind":{"NumberValue":3}},{"Kind":{"NumberValue":4}}]}}},"name":{"Kind":{"StringValue":"test"}},"obj":{"Kind":{"StructValue":{"fields":{"name":{"Kind":{"StringValue":"test"}}}}}},"slist":{"Kind":{"ListValue":{"values":[{"Kind":{"StringValue":"aap"}},{"Kind":{"StringValue":"noot"}},{"Kind":{"StringValue":"mies"}}]}}}}}`)
m := payloadToAttributes(exampleJson)
fmt.Printf("%+v\n", m)
if m["age"] != int64(34) {
t.Error("age")
}
if m["bool"] != true {
t.Error("bool")
}
if len(m["list"].([]interface{})) != 4 {
t.Error("list")
}
if len(m["slist"].([]interface{})) != 3 {
t.Error("slist")
}
obj, ok := m["obj"].(map[string]interface{})
if !ok {
t.Error("obj")
}
if obj["name"] != "test" {
t.Error("obj.name")
}
} |
array-to-slice-cast.rs |
const fn foo() {
let x = [1, 2, 3, 4, 5];
let y: &[_] = &x;
struct Foo<T: ?Sized>(bool, T);
let x: Foo<[u8; 3]> = Foo(true, [1, 2, 3]);
let y: &Foo<[u8]> = &x;
} | // check-pass
fn main() {} |
|
spatial.rs | use std::fs::File;
use std::ffi::CStr;
use std::os::raw::c_char;
use std::io::BufWriter;
use std::path::Path;
#[no_mangle]
pub extern fn | (
weight_file_char: *const c_char,
values_file_char: *const c_char,
out_path: *const c_char,
) -> bool {
let weight_file_str = unsafe {
assert!(!weight_file_char.is_null());
CStr::from_ptr(weight_file_char)
};
let values_file_str = unsafe {
assert!(!values_file_char.is_null());
CStr::from_ptr(values_file_char)
};
let out_file_str = unsafe {
assert!(!out_path.is_null());
CStr::from_ptr(out_path)
};
let out_file = BufWriter::new(File::create(out_file_str.to_str().unwrap()).expect("can't open file"));
indus::spatial::generate_stats(
Path::new(weight_file_str.to_str().unwrap()).to_path_buf(),
Path::new(values_file_str.to_str().unwrap()).to_path_buf(),
out_file,
Some("Moransi"),
).unwrap();
true
}
#[no_mangle]
pub extern fn gearys_c(
weight_file_char: *const c_char,
values_file_char: *const c_char,
out_path: *const c_char,
) -> bool {
let weight_file_str = unsafe {
assert!(!weight_file_char.is_null());
CStr::from_ptr(weight_file_char)
};
let values_file_str = unsafe {
assert!(!values_file_char.is_null());
CStr::from_ptr(values_file_char)
};
let out_file_str = unsafe {
assert!(!out_path.is_null());
CStr::from_ptr(out_path)
};
let out_file = BufWriter::new(File::create(out_file_str.to_str().unwrap()).expect("can't open file"));
indus::spatial::generate_stats(
Path::new(weight_file_str.to_str().unwrap()).to_path_buf(),
Path::new(values_file_str.to_str().unwrap()).to_path_buf(),
out_file,
Some("Gearyc"),
).unwrap();
true
} | morans_i |
622_test.go | package medium
import "testing"
func TestMyCircularQueue(t *testing.T) {
myCircularQueue := Constructor(3)
if !myCircularQueue.IsEmpty() {
t.FailNow()
}
if !myCircularQueue.EnQueue(1) {
t.FailNow()
}
if !myCircularQueue.EnQueue(2) {
t.FailNow()
}
if !myCircularQueue.EnQueue(3) {
t.FailNow()
}
if myCircularQueue.EnQueue(4) {
t.FailNow()
}
if myCircularQueue.Front() != 1 {
t.FailNow()
}
if myCircularQueue.IsEmpty() {
t.FailNow()
}
if !myCircularQueue.DeQueue() || !myCircularQueue.DeQueue() || !myCircularQueue.DeQueue() {
t.FailNow()
}
if !myCircularQueue.IsEmpty() {
t.FailNow()
}
| if myCircularQueue.Front() != -1 {
t.FailNow()
}
} |
|
http_server.go | // Copyright (c) 2014 Ashley Jeffs
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package input
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net/http"
"net/textproto"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/Jeffail/benthos/lib/log"
"github.com/Jeffail/benthos/lib/message"
"github.com/Jeffail/benthos/lib/message/metadata"
"github.com/Jeffail/benthos/lib/message/roundtrip"
"github.com/Jeffail/benthos/lib/message/tracing"
"github.com/Jeffail/benthos/lib/metrics"
"github.com/Jeffail/benthos/lib/types"
httputil "github.com/Jeffail/benthos/lib/util/http"
"github.com/Jeffail/benthos/lib/util/throttle"
"github.com/gorilla/websocket"
"github.com/opentracing/opentracing-go"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeHTTPServer] = TypeSpec{
constructor: NewHTTPServer,
description: `
Receive messages POSTed over HTTP(S). HTTP 2.0 is supported when using TLS,
which is enabled when key and cert files are specified.
You can leave the 'address' config field blank in order to use the instance wide
HTTP server.
The field ` + "`rate_limit`" + ` allows you to specify an optional
` + "[`rate_limit` resource](../rate_limits/README.md)" + `, which will be
applied to each HTTP request made and each websocket payload received. | When the rate limit is breached HTTP requests will have a 429 response returned
with a Retry-After header. Websocket payloads will be dropped and an optional
response payload will be sent as per ` + "`ws_rate_limit_message`" + `.
### Responses
EXPERIMENTAL: It's possible to return a response for each message received using
[synchronous responses](../sync_responses.md). This feature is considered
experimental and therefore subject to change outside of major version releases.
### Endpoints
The following fields specify endpoints that are registered for sending messages:
#### ` + "`path` (defaults to `/post`)" + `
This endpoint expects POST requests where the entire request body is consumed as
a single message.
If the request contains a multipart ` + "`content-type`" + ` header as per
[rfc1341](https://www.w3.org/Protocols/rfc1341/7_2_Multipart.html) then the
multiple parts are consumed as a batch of messages, where each body part is a
message of the batch.
#### ` + "`ws_path` (defaults to `/post/ws`)" + `
Creates a websocket connection, where payloads received on the socket are passed
through the pipeline as a batch of one message.
You may specify an optional ` + "`ws_welcome_message`" + `, which is a static
payload to be sent to all clients once a websocket connection is first
established.
It's also possible to specify a ` + "`ws_rate_limit_message`" + `, which is a
static payload to be sent to clients that have triggered the servers rate limit.
### Metadata
This input adds the following metadata fields to each message:
` + "``` text" + `
- http_server_user_agent
- All headers (only first values are taken)
- All cookies
` + "```" + `
You can access these metadata fields using
[function interpolation](../config_interpolation.md#metadata).`,
}
}
//------------------------------------------------------------------------------
// HTTPServerConfig contains configuration for the HTTPServer input type.
type HTTPServerConfig struct {
Address string `json:"address" yaml:"address"`
Path string `json:"path" yaml:"path"`
WSPath string `json:"ws_path" yaml:"ws_path"`
WSWelcomeMessage string `json:"ws_welcome_message" yaml:"ws_welcome_message"`
WSRateLimitMessage string `json:"ws_rate_limit_message" yaml:"ws_rate_limit_message"`
Timeout string `json:"timeout" yaml:"timeout"`
RateLimit string `json:"rate_limit" yaml:"rate_limit"`
CertFile string `json:"cert_file" yaml:"cert_file"`
KeyFile string `json:"key_file" yaml:"key_file"`
}
// NewHTTPServerConfig creates a new HTTPServerConfig with default values.
func NewHTTPServerConfig() HTTPServerConfig {
return HTTPServerConfig{
Address: "",
Path: "/post",
WSPath: "/post/ws",
WSWelcomeMessage: "",
WSRateLimitMessage: "",
Timeout: "5s",
RateLimit: "",
CertFile: "",
KeyFile: "",
}
}
//------------------------------------------------------------------------------
// HTTPServer is an input type that registers a range of HTTP endpoints where
// requests can send messages through Benthos. The endpoints are registered on
// the general Benthos HTTP server by default. It is also possible to specify a
// custom address to bind a new server to which the endpoints will be registered
// on instead.
type HTTPServer struct {
running int32
conf Config
stats metrics.Type
log log.Modular
ratelimit types.RateLimit
mux *http.ServeMux
server *http.Server
timeout time.Duration
transactions chan types.Transaction
closeChan chan struct{}
closedChan chan struct{}
mCount metrics.StatCounter
mRateLimited metrics.StatCounter
mWSRateLimited metrics.StatCounter
mPartsCount metrics.StatCounter
mRcvd metrics.StatCounter
mPartsRcvd metrics.StatCounter
mSyncCount metrics.StatCounter
mSyncErr metrics.StatCounter
mSyncSucc metrics.StatCounter
mWSCount metrics.StatCounter
mTimeout metrics.StatCounter
mErr metrics.StatCounter
mWSErr metrics.StatCounter
mSucc metrics.StatCounter
mWSSucc metrics.StatCounter
mAsyncErr metrics.StatCounter
mAsyncSucc metrics.StatCounter
}
// NewHTTPServer creates a new HTTPServer input type.
func NewHTTPServer(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
var mux *http.ServeMux
var server *http.Server
if len(conf.HTTPServer.Address) > 0 {
mux = http.NewServeMux()
server = &http.Server{Addr: conf.HTTPServer.Address, Handler: mux}
}
var timeout time.Duration
if len(conf.HTTPServer.Timeout) > 0 {
var err error
if timeout, err = time.ParseDuration(conf.HTTPServer.Timeout); err != nil {
return nil, fmt.Errorf("failed to parse timeout string: %v", err)
}
}
var ratelimit types.RateLimit
if len(conf.HTTPServer.RateLimit) > 0 {
var err error
if ratelimit, err = mgr.GetRateLimit(conf.HTTPServer.RateLimit); err != nil {
return nil, fmt.Errorf("unable to locate rate_limit resource '%v': %v", conf.HTTPServer.RateLimit, err)
}
}
h := HTTPServer{
running: 1,
conf: conf,
stats: stats,
log: log,
mux: mux,
ratelimit: ratelimit,
server: server,
timeout: timeout,
transactions: make(chan types.Transaction),
closeChan: make(chan struct{}),
closedChan: make(chan struct{}),
mCount: stats.GetCounter("count"),
mRateLimited: stats.GetCounter("rate_limited"),
mWSRateLimited: stats.GetCounter("ws.rate_limited"),
mPartsCount: stats.GetCounter("parts.count"),
mRcvd: stats.GetCounter("batch.received"),
mPartsRcvd: stats.GetCounter("received"),
mWSCount: stats.GetCounter("ws.count"),
mTimeout: stats.GetCounter("send.timeout"),
mErr: stats.GetCounter("send.error"),
mWSErr: stats.GetCounter("ws.send.error"),
mSucc: stats.GetCounter("send.success"),
mWSSucc: stats.GetCounter("ws.send.success"),
mAsyncErr: stats.GetCounter("send.async_error"),
mAsyncSucc: stats.GetCounter("send.async_success"),
}
postHdlr := httputil.GzipHandler(h.postHandler)
wsHdlr := httputil.GzipHandler(h.wsHandler)
if mux != nil {
if len(h.conf.HTTPServer.Path) > 0 {
mux.HandleFunc(h.conf.HTTPServer.Path, postHdlr)
}
if len(h.conf.HTTPServer.WSPath) > 0 {
mux.HandleFunc(h.conf.HTTPServer.WSPath, wsHdlr)
}
} else {
if len(h.conf.HTTPServer.Path) > 0 {
mgr.RegisterEndpoint(
h.conf.HTTPServer.Path, "Post a message into Benthos.", postHdlr,
)
}
if len(h.conf.HTTPServer.WSPath) > 0 {
mgr.RegisterEndpoint(
h.conf.HTTPServer.WSPath, "Post messages via websocket into Benthos.", wsHdlr,
)
}
}
go h.loop()
return &h, nil
}
//------------------------------------------------------------------------------
func extractMessageFromRequest(r *http.Request) (types.Message, error) {
msg := message.New(nil)
mediaType, params, err := mime.ParseMediaType(r.Header.Get("Content-Type"))
if err != nil {
return nil, err
}
if strings.HasPrefix(mediaType, "multipart/") {
mr := multipart.NewReader(r.Body, params["boundary"])
for {
var p *multipart.Part
if p, err = mr.NextPart(); err != nil {
if err == io.EOF {
err = nil
break
}
return nil, err
}
var msgBytes []byte
if msgBytes, err = ioutil.ReadAll(p); err != nil {
return nil, err
}
msg.Append(message.NewPart(msgBytes))
}
} else {
var msgBytes []byte
if msgBytes, err = ioutil.ReadAll(r.Body); err != nil {
return nil, err
}
msg.Append(message.NewPart(msgBytes))
}
meta := metadata.New(nil)
meta.Set("http_server_user_agent", r.UserAgent())
for k, v := range r.Header {
if len(v) > 0 {
meta.Set(k, v[0])
}
}
for _, c := range r.Cookies() {
meta.Set(c.Name, c.Value)
}
message.SetAllMetadata(msg, meta)
// Try to either extract parent span from headers, or create a new one.
carrier := opentracing.HTTPHeadersCarrier(r.Header)
if clientSpanContext, serr := opentracing.GlobalTracer().Extract(opentracing.HTTPHeaders, carrier); serr == nil {
tracing.InitSpansFromParent("input_http_server_post", clientSpanContext, msg)
} else {
tracing.InitSpans("input_http_server_post", msg)
}
return msg, nil
}
func (h *HTTPServer) postHandler(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
if atomic.LoadInt32(&h.running) != 1 {
http.Error(w, "Server closing", http.StatusServiceUnavailable)
return
}
if r.Method != "POST" {
http.Error(w, "Incorrect method", http.StatusMethodNotAllowed)
return
}
if h.ratelimit != nil {
if tUntil, err := h.ratelimit.Access(); err != nil {
http.Error(w, "Server error", http.StatusBadGateway)
h.log.Warnf("Failed to access rate limit: %v\n", err)
return
} else if tUntil > 0 {
w.Header().Add("Retry-After", strconv.Itoa(int(tUntil.Seconds())))
http.Error(w, "Too Many Requests", http.StatusTooManyRequests)
h.mRateLimited.Incr(1)
return
}
}
var err error
defer func() {
if err != nil {
http.Error(w, "Bad request", http.StatusBadRequest)
h.log.Warnf("Request read failed: %v\n", err)
return
}
}()
msg, err := extractMessageFromRequest(r)
if err != nil {
return
}
defer tracing.FinishSpans(msg)
store := roundtrip.NewResultStore()
roundtrip.AddResultStore(msg, store)
h.mCount.Incr(1)
h.mPartsCount.Incr(int64(msg.Len()))
h.mPartsRcvd.Incr(int64(msg.Len()))
h.mRcvd.Incr(1)
resChan := make(chan types.Response)
select {
case h.transactions <- types.NewTransaction(msg, resChan):
case <-time.After(h.timeout):
h.mTimeout.Incr(1)
http.Error(w, "Request timed out", http.StatusRequestTimeout)
return
case <-h.closeChan:
http.Error(w, "Server closing", http.StatusServiceUnavailable)
return
}
select {
case res, open := <-resChan:
if !open {
http.Error(w, "Server closing", http.StatusServiceUnavailable)
return
} else if res.Error() != nil {
h.mErr.Incr(1)
http.Error(w, res.Error().Error(), http.StatusBadGateway)
return
}
h.mSucc.Incr(1)
case <-time.After(h.timeout):
h.mTimeout.Incr(1)
http.Error(w, "Request timed out", http.StatusRequestTimeout)
go func() {
// Even if the request times out, we still need to drain a response.
resAsync := <-resChan
if resAsync.Error() != nil {
h.mAsyncErr.Incr(1)
h.mErr.Incr(1)
} else {
h.mAsyncSucc.Incr(1)
h.mSucc.Incr(1)
}
}()
return
}
var parts []types.Part
for _, responseMsg := range store.Get() {
responseMsg.Iter(func(i int, part types.Part) error {
parts = append(parts, part)
return nil
})
}
if plen := len(parts); plen == 1 {
payload := parts[0].Get()
w.Header().Set("Content-Type", http.DetectContentType(payload))
w.Write(payload)
} else if plen > 1 {
writer := multipart.NewWriter(w)
var merr error
for i := 0; i < plen && merr == nil; i++ {
payload := parts[i].Get()
var part io.Writer
if part, merr = writer.CreatePart(textproto.MIMEHeader{
"Content-Type": []string{http.DetectContentType(payload)},
}); merr == nil {
_, merr = io.Copy(part, bytes.NewReader(payload))
}
}
if merr != nil {
h.log.Errorf("Failed to return sync response: %v\n", merr)
}
}
return
}
func (h *HTTPServer) wsHandler(w http.ResponseWriter, r *http.Request) {
var err error
defer func() {
if err != nil {
http.Error(w, "Bad request", http.StatusBadRequest)
h.log.Warnf("Websocket request failed: %v\n", err)
return
}
}()
upgrader := websocket.Upgrader{}
var ws *websocket.Conn
if ws, err = upgrader.Upgrade(w, r, nil); err != nil {
return
}
defer ws.Close()
resChan := make(chan types.Response)
throt := throttle.New(throttle.OptCloseChan(h.closeChan))
if welMsg := h.conf.HTTPServer.WSWelcomeMessage; len(welMsg) > 0 {
if err = ws.WriteMessage(websocket.BinaryMessage, []byte(welMsg)); err != nil {
h.log.Errorf("Failed to send welcome message: %v\n", err)
}
}
var msgBytes []byte
for atomic.LoadInt32(&h.running) == 1 {
if msgBytes == nil {
if _, msgBytes, err = ws.ReadMessage(); err != nil {
return
}
h.mWSCount.Incr(1)
h.mCount.Incr(1)
}
if h.ratelimit != nil {
if tUntil, err := h.ratelimit.Access(); err != nil || tUntil > 0 {
if err != nil {
h.log.Warnf("Failed to access rate limit: %v\n", err)
}
if rlMsg := h.conf.HTTPServer.WSRateLimitMessage; len(rlMsg) > 0 {
if err = ws.WriteMessage(websocket.BinaryMessage, []byte(rlMsg)); err != nil {
h.log.Errorf("Failed to send rate limit message: %v\n", err)
}
}
h.mWSRateLimited.Incr(1)
continue
}
}
msg := message.New([][]byte{msgBytes})
meta := msg.Get(0).Metadata()
meta.Set("http_server_user_agent", r.UserAgent())
for k, v := range r.Header {
if len(v) > 0 {
meta.Set(k, v[0])
}
}
for _, c := range r.Cookies() {
meta.Set(c.Name, c.Value)
}
tracing.InitSpans("input_http_server_websocket", msg)
store := roundtrip.NewResultStore()
roundtrip.AddResultStore(msg, store)
select {
case h.transactions <- types.NewTransaction(msg, resChan):
case <-h.closeChan:
return
}
select {
case res, open := <-resChan:
if !open {
return
}
if res.Error() != nil {
h.mWSErr.Incr(1)
h.mErr.Incr(1)
throt.Retry()
} else {
h.mWSSucc.Incr(1)
h.mSucc.Incr(1)
msgBytes = nil
throt.Reset()
}
case <-h.closeChan:
return
}
for _, responseMsg := range store.Get() {
if err := responseMsg.Iter(func(i int, part types.Part) error {
return ws.WriteMessage(websocket.TextMessage, part.Get())
}); err != nil {
h.log.Errorf("Failed to send sync response over websocket: %v\n", err)
}
}
tracing.FinishSpans(msg)
}
}
//------------------------------------------------------------------------------
func (h *HTTPServer) loop() {
mRunning := h.stats.GetGauge("running")
defer func() {
atomic.StoreInt32(&h.running, 0)
if h.server != nil {
h.server.Shutdown(context.Background())
}
mRunning.Decr(1)
close(h.transactions)
close(h.closedChan)
}()
mRunning.Incr(1)
if h.server != nil {
go func() {
if len(h.conf.HTTPServer.KeyFile) > 0 || len(h.conf.HTTPServer.CertFile) > 0 {
h.log.Infof(
"Receiving HTTPS messages at: https://%s\n",
h.conf.HTTPServer.Address+h.conf.HTTPServer.Path,
)
if err := h.server.ListenAndServeTLS(
h.conf.HTTPServer.CertFile, h.conf.HTTPServer.KeyFile,
); err != http.ErrServerClosed {
h.log.Errorf("Server error: %v\n", err)
}
} else {
h.log.Infof(
"Receiving HTTP messages at: http://%s\n",
h.conf.HTTPServer.Address+h.conf.HTTPServer.Path,
)
if err := h.server.ListenAndServe(); err != http.ErrServerClosed {
h.log.Errorf("Server error: %v\n", err)
}
}
}()
}
<-h.closeChan
}
// TransactionChan returns a transactions channel for consuming messages from
// this input.
func (h *HTTPServer) TransactionChan() <-chan types.Transaction {
return h.transactions
}
// Connected returns a boolean indicating whether this input is currently
// connected to its target.
func (h *HTTPServer) Connected() bool {
return true
}
// CloseAsync shuts down the HTTPServer input and stops processing requests.
func (h *HTTPServer) CloseAsync() {
if atomic.CompareAndSwapInt32(&h.running, 1, 0) {
close(h.closeChan)
}
}
// WaitForClose blocks until the HTTPServer input has closed down.
func (h *HTTPServer) WaitForClose(timeout time.Duration) error {
select {
case <-h.closedChan:
case <-time.After(timeout):
return types.ErrTimeout
}
return nil
}
//------------------------------------------------------------------------------ | |
distance_view.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.enums.types import (
distance_bucket as gage_distance_bucket,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v9.resources",
marshal="google.ads.googleads.v9",
manifest={"DistanceView",},
)
class DistanceView(proto.Message):
|
__all__ = tuple(sorted(__protobuf__.manifest))
| r"""A distance view with metrics aggregated by the user's
distance from an advertiser's location extensions. Each
DistanceBucket includes all impressions that fall within its
distance and a single impression will contribute to the metrics
for all DistanceBuckets that include the user's distance.
Attributes:
resource_name (str):
Output only. The resource name of the distance view.
Distance view resource names have the form:
``customers/{customer_id}/distanceViews/1~{distance_bucket}``
distance_bucket (google.ads.googleads.v9.enums.types.DistanceBucketEnum.DistanceBucket):
Output only. Grouping of user distance from
location extensions.
metric_system (bool):
Output only. True if the DistanceBucket is
using the metric system, false otherwise.
This field is a member of `oneof`_ ``_metric_system``.
"""
resource_name = proto.Field(proto.STRING, number=1,)
distance_bucket = proto.Field(
proto.ENUM,
number=2,
enum=gage_distance_bucket.DistanceBucketEnum.DistanceBucket,
)
metric_system = proto.Field(proto.BOOL, number=4, optional=True,) |
pyunit_h2oparse_raw.py | from __future__ import print_function
import sys
sys.path.insert(1,"../../../") | import h2o
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2oparse_raw():
"""
Python API test: h2o.parse_raw(setup, id=None, first_line_is_header=0)
copied from pyunit_hexdev_29_parse_false.py
"""
fraw = h2o.import_file(pyunit_utils.locate("smalldata/jira/hexdev_29.csv"), parse=False)
assert isinstance(fraw, list)
fhex = h2o.parse_raw(h2o.parse_setup(fraw), id='hexdev_29.hex', first_line_is_header=0)
fhex.summary()
assert_is_type(fhex, H2OFrame)
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oparse_raw)
else:
h2oparse_raw() | from tests import pyunit_utils |
login.component.ts | import { Component, OnInit } from '@angular/core';
import { FormBuilder, FormGroup, Validators } from '@angular/forms';
import { Router } from '@angular/router';
import { ToastrService } from 'ngx-toastr';
import { User } from 'src/app/models/user';
import { AuthService } from 'src/app/services/auth.service';
import { LocalStorageService } from 'src/app/services/local-storage.service';
@Component({
selector: 'app-login',
templateUrl: './login.component.html',
styleUrls: ['./login.component.css']
})
export class | implements OnInit {
user:User = new User();
baseUrl= 'https://localhost:44334/';
logo = this.baseUrl + "images/logo.jpg" ;
loginForm:FormGroup;
constructor(private formBuilder:FormBuilder, private authService:AuthService,
private toastrService:ToastrService,private localStorageService:LocalStorageService,
private router:Router,
) { }
ngOnInit(): void {
this.createLoginForm();
}
createLoginForm(){
this.loginForm=this.formBuilder.group({
email:["",Validators.required],
password:["",Validators.required]
})
}
login(){
if(this.loginForm.valid){
let loginModel = Object.assign({},this.loginForm.value);
this.authService.login(loginModel).subscribe(response=>{
this.toastrService.info(response.message);
this.localStorageService.setItem("token", response.data.token);
this.localStorageService.setItem('email',this.loginForm.value.email);
this.toastrService.success("Giriş başarılı");
this.router.navigate(["/"]).then(r => window.location.reload());
},responseError=>{
this.toastrService.error("Girdiğiniz e-posta veya şifre yanlış.",responseError.error);
})
}
}
} | LoginComponent |
frame.py | """
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import annotations
import collections
from collections import abc
import datetime
from io import StringIO
import itertools
import mmap
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
Dict,
FrozenSet,
Hashable,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
overload,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib, properties
from pandas._libs.lib import no_default
from pandas._typing import (
AggFuncType,
ArrayLike,
Axes,
Axis,
CompressionOptions,
Dtype,
FilePathOrBuffer,
FrameOrSeriesUnion,
IndexKeyFunc,
Label,
Level,
Renamer,
StorageOptions,
ValueKeyFunc,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
cast_scalar_to_array,
coerce_to_dtypes,
construct_1d_arraylike_from_scalar,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_box_datetimelike,
maybe_cast_to_datetime,
maybe_casted_values,
maybe_convert_platform,
maybe_downcast_to_dtype,
maybe_infer_to_datetimelike,
maybe_upcast,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
infer_dtype_from_object,
is_bool_dtype,
is_dataclass,
is_datetime64_any_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_named_tuple,
is_object_dtype,
is_scalar,
is_sequence,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms, common as com, generic, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.aggregation import (
aggregate,
reconstruct_func,
relabel_result,
transform,
)
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.construction import extract_array
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
PeriodIndex,
ensure_index,
ensure_index_from_sequences,
)
from pandas.core.indexes.multi import MultiIndex, maybe_droplevels
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
arrays_to_mgr,
dataclasses_to_dicts,
get_names_from_index,
init_dict,
init_ndarray,
masked_rec_array_to_mgr,
reorder_arrays,
sanitize_index,
to_arrays,
)
from pandas.core.reshape.melt import melt
from pandas.core.series import Series
from pandas.core.sorting import get_group_index, lexsort_indexer, nargsort
from pandas.io.common import get_handle
from pandas.io.formats import console, format as fmt
from pandas.io.formats.info import BaseInfo, DataFrameInfo
import pandas.plotting
if TYPE_CHECKING:
from typing import Literal
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = {
"axes": "index, columns",
"klass": "DataFrame",
"axes_single_arg": "{0 or 'index', 1 or 'columns'}",
"axis": """axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
"optional_by": """
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.""",
"optional_labels": """labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
"optional_axis": """axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
}
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
When performing a cross merge, no column specifications to merge on are
allowed.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
* cross: creates the cartesian product from both frames, preserves the order
of the left keys.
.. versionadded:: 1.2.0
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : list-like, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to the output DataFrame called "_merge" with
information on the source of each row. The column can be given a different
name by providing a string argument. The column will have a Categorical
type with the value of "left_only" for observations whose merge key only
appears in the left DataFrame, "right_only" for observations
whose merge key only appears in the right DataFrame, and "both"
if the observation's merge key is found in both DataFrames.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})
>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})
>>> df1
a b
0 foo 1
1 bar 2
>>> df2
a c
0 foo 3
1 baz 4
>>> df1.merge(df2, how='inner', on='a')
a b c
0 foo 1 3
>>> df1.merge(df2, how='left', on='a')
a b c
0 foo 1 3.0
1 bar 2 NaN
>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})
>>> df2 = pd.DataFrame({'right': [7, 8]})
>>> df1
left
0 foo
1 bar
>>> df2
right
0 7
1 8
>>> df1.merge(df2, how='cross')
left right
0 foo 7
1 foo 8
2 bar 7
3 bar 8
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame, OpsMixin):
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, dataclass or list-like objects. If
data is a dict, column order follows insertion-order.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_table : Read general delimited file into DataFrame.
read_clipboard : Read text from clipboard into DataFrame.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
Constructing DataFrame from dataclass:
>>> from dataclasses import make_dataclass
>>> Point = make_dataclass("Point", [("x", int), ("y", int)])
>>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])
x y
0 0 0
1 0 3
2 2 3
"""
_internal_names_set = {"columns", "index"} | NDFrame._internal_names_set
_typ = "dataframe"
_HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray)
@property
def _constructor(self) -> Type[DataFrame]:
return DataFrame
_constructor_sliced: Type[Series] = Series
_hidden_attrs: FrozenSet[str] = NDFrame._hidden_attrs | frozenset([])
_accessors: Set[str] = {"sparse"}
@property
def _constructor_expanddim(self):
# GH#31549 raising NotImplementedError on a property causes trouble
# for `inspect`
def constructor(*args, **kwargs):
raise NotImplementedError("Not supported for DataFrames!")
return constructor
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
copy: bool = False,
):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._mgr
if isinstance(data, BlockManager):
if index is None and columns is None and dtype is None and copy is False:
# GH#33357 fastpath
NDFrame.__init__(self, data)
return
mgr = self._init_mgr(
data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy
)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, "name", None) is not None:
mgr = init_dict({data.name: data}, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
data = dataclasses_to_dicts(data)
if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
# For data is scalar
else:
if index is None or columns is None:
raise ValueError("DataFrame constructor not properly called!")
if not dtype:
dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)
# For data is a scalar extension dtype
if is_extension_array_dtype(dtype):
values = [
construct_1d_arraylike_from_scalar(data, len(index), dtype)
for _ in range(len(columns))
]
mgr = arrays_to_mgr(values, columns, index, columns, dtype=None)
else:
# Attempt to coerce to a numpy array
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
if arr.ndim != 0:
raise ValueError("DataFrame constructor not properly called!")
values = cast_scalar_to_array(
(len(index), len(columns)), data, dtype=dtype
)
mgr = init_ndarray(
values, index, columns, dtype=values.dtype, copy=False
)
NDFrame.__init__(self, mgr)
# ----------------------------------------------------------------------
@property
def axes(self) -> List[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape : Tuple of array dimensions.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
See Also
--------
Index._is_homogeneous_type : Whether the object has a single
dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._mgr.any_extension_types:
return len({block.dtype for block in self._mgr.blocks}) == 1
else:
return not self._is_mixed_type
@property
def _can_fast_transpose(self) -> bool:
"""
Can we transpose this DataFrame without creating any new array objects.
"""
if self._mgr.any_extension_types:
# TODO(EA2D) special case would be unnecessary with 2D EAs
return False
return len(self._mgr.blocks) == 1
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case of non-interactive session, no boundaries apply.
`ignore_width` is here so ipynb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(line) for line in value.split("\n"))
return repr_width < width
def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
buf = StringIO("")
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(
buf=buf,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
return buf.getvalue()
def _repr_html_(self) -> Optional[str]:
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
formatter = fmt.DataFrameFormatter(
self,
columns=None,
col_space=None,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
justify=None,
index_names=True,
header=True,
index=True,
bold_rows=True,
escape=True,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
)
return fmt.DataFrameRenderer(formatter).to_html(notebook=True)
else:
return None
@Substitution(
header_type="bool or sequence",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int, list or dict of int",
col_space="The minimum width of each column",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
columns: Optional[Sequence[str]] = None,
col_space: Optional[int] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[fmt.FormattersType] = None,
float_format: Optional[fmt.FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: Optional[int] = None,
max_colwidth: Optional[int] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
)
return fmt.DataFrameRenderer(formatter).to_string(
buf=buf,
encoding=encoding,
line_width=line_width,
)
# ----------------------------------------------------------------------
@property
def style(self) -> Styler:
"""
Returns a Styler object.
Contains methods for building a styled HTML representation of the DataFrame.
See Also
--------
io.formats.style.Styler : Helps style a DataFrame or Series according to the
data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print(f'label: {label}')
... print(f'content: {content}', sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"])
def items(self) -> Iterable[Tuple[Label, Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[Tuple[Label, Series]]:
yield from self.items()
def iterrows(self) -> Iterable[Tuple[Label, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index: bool = True, name: Optional[str] = "Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
if name is not None:
# https://github.com/python/mypy/issues/9046
# error: namedtuple() expects a string literal as the first argument
itertuple = collections.namedtuple( # type: ignore[misc]
name, fields, rename=True
)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self) -> int:
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
def dot(self, other):
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Series. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right._values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return self._constructor_sliced(result, index=left.index)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
try:
return self.T.dot(np.transpose(other)).T
except ValueError as err:
if "shape mismatch" not in str(err):
raise
# GH#21581 give exception message for original shapes
msg = f"shapes {np.shape(other)} and {self.shape} not aligned"
raise ValueError(msg) from err
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> DataFrame:
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns":
if columns is not None:
raise ValueError("cannot use columns parameter with orient='columns'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(
self, dtype=None, copy: bool = False, na_value=lib.no_default
) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the dtypes of the DataFrame columns.
.. versionadded:: 1.1.0
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
self._consolidate_inplace()
result = self._mgr.as_array(
transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value
)
if result.dtype is not dtype:
result = np.array(result, dtype=dtype, copy=False)
return result
def to_dict(self, orient="dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = com.standardize_mapping(into)
orient = orient.lower()
# GH32515
if orient.startswith(("d", "l", "s", "r", "i")) and orient not in {
"dict",
"list",
"series",
"split",
"records",
"index",
}:
warnings.warn(
"Using short name for 'orient' is deprecated. Only the "
"options: ('dict', list, 'series', 'split', 'records', 'index') "
"will be used in a future version. Use one of the above "
"to silence this warning.",
FutureWarning,
)
if orient.startswith("d"):
orient = "dict"
elif orient.startswith("l"):
orient = "list"
elif orient.startswith("sp"):
orient = "split"
elif orient.startswith("s"):
orient = "series"
elif orient.startswith("r"):
orient = "records"
elif orient.startswith("i"):
orient = "index"
if orient == "dict":
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient == "list":
return into_c((k, v.tolist()) for k, v in self.items())
elif orient == "split":
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient == "series":
return into_c((k, maybe_box_datetimelike(v)) for k, v in self.items())
elif orient == "records":
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, maybe_box_datetimelike(v)) for k, v in row.items())
for row in rows
]
elif orient == "index":
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None)
)
else:
raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
) -> None:
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists raise pandas_gbq.gbq.TableCreationError.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
) -> DataFrame:
"""
Convert structured or record ndarray to DataFrame.
Creates a DataFrame object from a structured ndarray, sequence of
tuples or dicts, or DataFrame.
Parameters
----------
data : structured ndarray, sequence of tuples or dicts, or DataFrame
Structured input data.
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use.
exclude : sequence, default None
Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
Number of rows to read if data is an iterator.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_dict : DataFrame from dict of array-like or dicts.
DataFrame : DataFrame object creation using constructor.
Examples
--------
Data can be provided as a structured ndarray:
>>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],
... dtype=[('col_1', 'i4'), ('col_2', 'U1')])
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of dicts:
>>> data = [{'col_1': 3, 'col_2': 'a'},
... {'col_1': 2, 'col_2': 'b'},
... {'col_1': 1, 'col_2': 'c'},
... {'col_1': 0, 'col_2': 'd'}]
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of tuples with corresponding columns:
>>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]
>>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns_list = []
for k, v in data.items():
if k in columns:
arr_columns_list.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
except (KeyError, TypeError):
# raised by get_loc, see GH#29258
result_index = index
else:
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(
self, index=True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index._values)))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [
np.asarray(self.iloc[:, i]) for i in range(len(self.columns))
]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = f"level_{count}"
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def _from_arrays(
cls,
arrays,
columns,
index,
dtype: Optional[Dtype] = None,
verify_integrity: bool = True,
) -> DataFrame:
"""
Create DataFrame from a list of arrays corresponding to the columns.
Parameters
----------
arrays : list-like of arrays
Each array in the list corresponds to one column, in order.
columns : list-like, Index
The column names for the resulting DataFrame.
index : list-like, Index
The rows labels for the resulting DataFrame.
dtype : dtype, optional
Optional dtype to enforce for all arrays.
verify_integrity : bool, default True
Validate and homogenize all input. If set to False, it is assumed
that all elements of `arrays` are actual arrays how they will be
stored in a block (numpy ndarray or ExtensionArray), have the same
length as and are aligned with the index, and that `columns` and
`index` are ensured to be an Index object.
Returns
-------
DataFrame
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
mgr = arrays_to_mgr(
arrays,
columns,
index,
columns,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls(mgr)
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
path: FilePathOrBuffer,
convert_dates: Optional[Dict[Label, str]] = None,
write_index: bool = True,
byteorder: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
data_label: Optional[str] = None,
variable_labels: Optional[Dict[Label, str]] = None,
version: Optional[int] = 114,
convert_strl: Optional[Sequence[Label]] = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> None:
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
path : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
.. versionchanged:: 1.0.0
Previously this was "fname"
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {{114, 117, 118, 119, None}}, default 114
Version to use in the output dta file. Set to None to let pandas
decide between 118 or 119 formats depending on the number of
columns in the frame. Version 114 can be read by Stata 10 and
later. Version 117 can be read by Stata 13 or later. Version 118
is supported in Stata 14 and later. Version 119 is supported in
Stata 15 and later. Version 114 limits string variables to 244
characters or fewer while versions 117 and later allow strings
with lengths up to 2,000,000 characters. Versions 118 and 119
support Unicode characters, and version 119 supports more than
32,767 variables.
Version 119 should usually only be used when the number of
variables exceeds the capacity of dta format 118. Exporting
smaller datasets in format 119 may have unintended consequences,
and, as of November 2020, Stata SE cannot read version 119 files.
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
compression : str or dict, default 'infer'
For on-the-fly compression of the output dta. If string, specifies
compression mode. If dict, value at key 'method' specifies
compression mode. Compression mode must be one of {{'infer', 'gzip',
'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and
`fname` is path-like, then detect compression from the following
extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
compression). If dict and compression mode is one of {{'zip',
'gzip', 'bz2'}}, or inferred as one of the above, other entries
passed as additional compression options.
.. versionadded:: 1.1.0
{storage_options}
.. versionadded:: 1.2.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriter117 as statawriter,
)
else: # versions 118 and 119
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriterUTF8 as statawriter,
)
kwargs: Dict[str, Any] = {}
if version is None or version >= 117:
# strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
if version is None or version >= 118:
# Specifying the version is only supported for UTF8 (118 or 119)
kwargs["version"] = version
# mypy: Too many arguments for "StataWriter"
writer = statawriter( # type: ignore[call-arg]
path,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
compression=compression,
storage_options=storage_options,
**kwargs,
)
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
path : str or file-like object
If a string, it will be used as Root Directory path.
**kwargs :
Additional keywords passed to :func:`pyarrow.feather.write_feather`.
Starting with pyarrow 0.17, this includes the `compression`,
`compression_level`, `chunksize` and `version` keywords.
.. versionadded:: 1.1.0
"""
from pandas.io.feather_format import to_feather
to_feather(self, path, **kwargs)
@doc(
Series.to_markdown,
klass=_shared_doc_kwargs["klass"],
storage_options=_shared_docs["storage_options"],
examples="""Examples
--------
>>> df = pd.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(df.to_markdown())
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
Output markdown with a tabulate option.
>>> print(df.to_markdown(tablefmt="grid"))
+----+------------+------------+
| | animal_1 | animal_2 |
+====+============+============+
| 0 | elk | dog |
+----+------------+------------+
| 1 | pig | quetzal |
+----+------------+------------+
""",
)
def to_markdown(
self,
buf: Optional[Union[IO[str], str]] = None,
mode: str = "wt",
index: bool = True,
storage_options: StorageOptions = None,
**kwargs,
) -> Optional[str]:
if "showindex" in kwargs:
warnings.warn(
"'showindex' is deprecated. Only 'index' will be used "
"in a future version. Use 'index' to silence this warning.",
FutureWarning,
stacklevel=2,
)
kwargs.setdefault("headers", "keys")
kwargs.setdefault("tablefmt", "pipe")
kwargs.setdefault("showindex", index)
tabulate = import_optional_dependency("tabulate")
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
with get_handle(buf, mode, storage_options=storage_options) as handles:
assert not isinstance(handles.handle, (str, mmap.mmap))
handles.handle.writelines(result)
return None
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
path: Optional[FilePathOrBuffer] = None,
engine: str = "auto",
compression: Optional[str] = "snappy",
index: Optional[bool] = None,
partition_cols: Optional[List[str]] = None,
storage_options: StorageOptions = None,
**kwargs,
) -> Optional[bytes]:
"""
Write a DataFrame to the binary parquet format.
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
path : str or file-like object, default None
If a string, it will be used as Root Directory path
when writing a partitioned dataset. By file-like object,
we refer to objects with a write() method, such as a file handle
(e.g. via builtin open function) or io.BytesIO. The engine
fastparquet does not accept file-like objects. If path is None,
a bytes object is returned.
.. versionchanged:: 1.2.0
Previously this was "fname"
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
.. versionadded:: 0.24.0
{storage_options}
.. versionadded:: 1.2.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
Returns
-------
bytes if no path argument is provided else None
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
If you want to get a buffer to the parquet content you can use a io.BytesIO
object, as long as you don't use partition_cols, which creates multiple files.
>>> import io
>>> f = io.BytesIO()
>>> df.to_parquet(f)
>>> f.seek(0)
0
>>> content = f.read()
"""
from pandas.io.parquet import to_parquet
return to_parquet(
self,
path,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
**kwargs,
)
@Substitution(
header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int, list or dict of int or str",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
bold_rows=True,
classes=None,
escape=True,
notebook=False,
border=None,
table_id=None,
render_links=False,
encoding=None,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
justify=justify,
index_names=index_names,
escape=escape,
decimal=decimal,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
)
# TODO: a generic formatter wld b in DataFrameFormatter
return fmt.DataFrameRenderer(formatter).to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
table_id=table_id,
render_links=render_links,
)
# ----------------------------------------------------------------------
@Substitution(
klass="DataFrame",
type_sub=" and columns",
max_cols_sub=dedent(
"""\
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used."""
),
show_counts_sub=dedent(
"""\
show_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the DataFrame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
null_counts : bool, optional
.. deprecated:: 1.2.0
Use show_counts instead."""
),
examples_sub=dedent(
"""\
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 165.9 MB"""
),
see_also_sub=dedent(
"""\
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns."""
),
version_added_sub="",
)
@doc(BaseInfo.render)
def info(
self,
verbose: Optional[bool] = None,
buf: Optional[IO[str]] = None,
max_cols: Optional[int] = None,
memory_usage: Optional[Union[bool, str]] = None,
show_counts: Optional[bool] = None,
null_counts: Optional[bool] = None,
) -> None:
if null_counts is not None:
if show_counts is not None:
raise ValueError("null_counts used with show_counts. Use show_counts.")
warnings.warn(
"null_counts is deprecated. Use show_counts instead",
FutureWarning,
stacklevel=2,
)
show_counts = null_counts
info = DataFrameInfo(
data=self,
memory_usage=memory_usage,
)
info.render(
buf=buf,
max_cols=max_cols,
verbose=verbose,
show_counts=show_counts,
)
def memory_usage(self, index=True, deep=False) -> Series:
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 180000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5244
"""
result = self._constructor_sliced(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
result = self._constructor_sliced(
self.index.memory_usage(deep=deep), index=["Index"]
).append(result)
return result
def transpose(self, *args, copy: bool = False) -> DataFrame:
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
*args : tuple, optional
Accepted for compatibility with NumPy.
copy : bool, default False
Whether to copy the data after transposing, even for DataFrames
with a single dtype.
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8.0
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, {})
# construct the args
dtypes = list(self.dtypes)
if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):
# We have EAs with the same dtype. We can preserve that dtype in transpose.
dtype = dtypes[0]
arr_type = dtype.construct_array_type()
values = self.values
new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
result = self._constructor(
dict(zip(self.index, new_values)), index=self.columns
)
else:
new_values = self.values.T
if copy:
new_values = new_values.copy()
result = self._constructor(
new_values, index=self.columns, columns=self.index
)
return result.__finalize__(self, method="transpose")
@property
def T(self) -> DataFrame:
return self.transpose()
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
new_values = self._mgr.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype,
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
values = self._mgr.iget(i)
result = self._box_col_values(values, i)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def _get_column_array(self, i: int) -> ArrayLike:
"""
Get the values of the i'th column (ndarray or ExtensionArray, as stored
in the Block)
"""
return self._mgr.iget_values(i)
def _iter_column_arrays(self) -> Iterator[ArrayLike]:
"""
Iterate over the arrays of all columns in order.
This returns the values as stored in the Block (ndarray or ExtensionArray).
"""
for i in range(len(self.columns)):
yield self._get_column_array(i)
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key):
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if isinstance(self.columns, MultiIndex):
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
if isinstance(indexer, np.ndarray):
indexer = lib.maybe_indices_to_slice(
indexer.astype(np.intp, copy=False), len(self)
)
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take_with_is_copy(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
# GH#26490 using data[key] can cause RecursionError
data = data._get_item_cache(key)
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
UserWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take_with_is_copy(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
# loc is neither a slice nor ndarray, so must be an int
return self._ixs(loc, axis=1)
def _get_value(self, index, col, takeable: bool = False):
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
if takeable:
series = self._ixs(col, axis=1)
return series._values[index]
series = self._get_item_cache(col)
engine = self.index._engine
try:
loc = engine.get_loc(index)
return series._values[loc]
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key: slice, value):
# NB: we can't just use self.loc[key] = value because that
# operates on labels and we need to operate positional for
# backwards-compat, xref GH#31469
self._check_setitem_copy()
self.iloc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.iloc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
self.loc._ensure_listlike_indexer(key, axis=1, value=value)
indexer = self.loc._get_listlike_indexer(
key, axis=1, raise_missing=False
)[1]
self._check_setitem_copy()
self.iloc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _iset_item(self, loc: int, value):
self._ensure_valid_index(value)
# technically _sanitize_column expects a label, not a position,
# but the behavior is the same as long as we pass broadcast=False
value = self._sanitize_column(loc, value, broadcast=False)
NDFrame._iset_item(self, loc, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_value(self, index, col, value, takeable: bool = False):
"""
Put single value at passed column and index.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
"""
try:
if takeable is True:
series = self._ixs(col, axis=1)
series._set_value(index, value, takeable=True)
return
series = self._get_item_cache(col)
engine = self.index._engine
loc = engine.get_loc(index)
validate_numeric_casting(series.dtype, value)
series._values[loc] = value
# Note: trying to use series._set_value breaks tests in
# tests.frame.indexing.test_indexing and tests.indexing.test_partial
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError) as err:
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a Series"
) from err
# GH31368 preserve name of index
index_copy = value.index.copy()
if self.index.name is not None:
index_copy.name = self.index.name
self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)
def _box_col_values(self, values, loc: int) -> Series:
"""
Provide boxed values for a column.
"""
# Lookup in columns so that if e.g. a str datetime was passed
# we attach the Timestamp object as the name.
name = self.columns[loc]
klass = self._constructor_sliced
return klass(values, index=self.index, name=name, fastpath=True)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr, inplace=False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate.
You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
You can refer to column names that are not valid Python variable names
by surrounding them in backticks. Thus, column names containing spaces
or punctuations (besides underscores) or starting with digits must be
surrounded by backticks. (For example, a column named "Area (cm^2) would
be referenced as `Area (cm^2)`). Column names which are Python keywords
(like "list", "for", "import", etc) cannot be used.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
.. versionadded:: 0.25.0
Backtick quoting introduced.
.. versionadded:: 1.0.0
Expanding functionality of backtick quoting for more than only spaces.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame or None
DataFrame resulting from the provided query expression or
None if ``inplace=True``.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
*Backtick quoted variables*
Backtick quoted variables are parsed as literal Python code and
are converted internally to a Python valid identifier.
This can lead to the following problems.
During parsing a number of disallowed characters inside the backtick
quoted string are replaced by strings that are allowed as a Python identifier.
These characters include all operators in Python, the space character, the
question mark, the exclamation mark, the dollar sign, and the euro sign.
For other characters that fall outside the ASCII range (U+0001..U+007F)
and those that are not further specified in PEP 3131,
the query parser will raise an error.
This excludes whitespace different than the space character,
but also the hashtag (as it is used for comments) and the backtick
itself (backtick can also not be escaped).
In a special case, quotes that make a pair around a backtick can
confuse the parser.
For example, ```it's` > `that's``` will raise an error,
as it forms a quoted string (``'s > `that'``) with a backtick inside.
See also the Python documentation about lexical analysis
(https://docs.python.org/3/reference/lexical_analysis.html)
in combination with the source code in :mod:`pandas.core.computation.parsing`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = f"expr must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
result = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
result = self[res]
if inplace:
self._update_inplace(result)
else:
return result
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, pandas object, or None
The result of the evaluation or None if ``inplace=True``.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
Multiple columns can be assigned to using multi-line expressions:
>>> df.eval(
... '''
... C = A + B
... D = A - B
... '''
... )
A B C D
0 1 10 11 -9
1 2 8 10 -6
2 3 6 9 -3
3 4 4 8 0
4 5 2 7 3
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
resolvers = kwargs.pop("resolvers", None)
kwargs["level"] = kwargs.pop("level", 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_cleaned_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None) -> DataFrame:
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
See Also
--------
DataFrame.dtypes: Return Series with the data type of each column.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<https://numpy.org/doc/stable/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int64'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = (frozenset(include), frozenset(exclude))
if not any(selection):
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
include = frozenset(infer_dtype_from_object(x) for x in include)
exclude = frozenset(infer_dtype_from_object(x) for x in exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(f"include and exclude overlap on {(include & exclude)}")
# We raise when both include and exclude are empty
# Hence, we can just shrink the columns we want to keep
keep_these = np.full(self.shape[1], True)
def extract_unique_dtypes_from_dtypes_set(
dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray
) -> List[Dtype]:
extracted_dtypes = [
unique_dtype
for unique_dtype in unique_dtypes
# error: Argument 1 to "tuple" has incompatible type
# "FrozenSet[Union[ExtensionDtype, str, Any, Type[str],
# Type[float], Type[int], Type[complex], Type[bool]]]";
# expected "Iterable[Union[type, Tuple[Any, ...]]]"
if issubclass(
unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type]
)
]
return extracted_dtypes
unique_dtypes = self.dtypes.unique()
if include:
included_dtypes = extract_unique_dtypes_from_dtypes_set(
include, unique_dtypes
)
keep_these &= self.dtypes.isin(included_dtypes)
if exclude:
excluded_dtypes = extract_unique_dtypes_from_dtypes_set(
exclude, unique_dtypes
)
keep_these &= ~self.dtypes.isin(excluded_dtypes)
return self.iloc[:, keep_these.values]
def insert(self, loc, column, value, allow_duplicates=False) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
if allow_duplicates and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'allow_duplicates=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)
def assign(self, **kwargs) -> DataFrame:
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
numpy.ndarray
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except ValueError as err:
# raised in MultiIndex.from_tuples, see test_insert_error_msmgs
if not value.index.is_unique:
# duplicate axis
raise err
# other
raise TypeError(
"incompatible index of inserted column with frame index"
) from err
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
# upcast
if is_extension_array_dtype(infer_dtype):
value = construct_1d_arraylike_from_scalar(
value, len(self.index), infer_dtype
)
else:
# pandas\core\frame.py:3827: error: Argument 1 to
# "cast_scalar_to_array" has incompatible type "int"; expected
# "Tuple[Any, ...]" [arg-type]
value = cast_scalar_to_array(
len(self.index), value # type: ignore[arg-type]
)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
return {
item: Series(
self._mgr.iget(idx), index=self.index, name=item, fastpath=True
)
for idx, item in enumerate(self.columns)
}
def lookup(self, row_labels, col_labels) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
.. deprecated:: 1.2.0
DataFrame.lookup is deprecated,
use DataFrame.melt and DataFrame.loc instead.
For an example see :meth:`~pandas.DataFrame.lookup`
in the user guide.
Parameters
----------
row_labels : sequence
The row labels to use for lookup.
col_labels : sequence
The column labels to use for lookup.
Returns
-------
numpy.ndarray
The found values.
"""
msg = (
"The 'lookup' method is deprecated and will be"
"removed in a future version."
"You can use DataFrame.melt and DataFrame.loc"
"as a substitute."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
if not (self.index.is_unique and self.columns.is_unique):
# GH#33041
raise ValueError("DataFrame.lookup requires unique index and columns")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def | (
self,
new_index,
method,
copy,
level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy,
level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(self, axes, copy, fill_value) -> DataFrame:
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(
self.values, indexer, fill_value=fill_value
)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@doc(NDFrame.align, **_shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
) -> DataFrame:
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub=" column or",
axis_description_sub=", and 1 identifies the columns",
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs) -> DataFrame:
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return super().reindex(**kwargs)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If False, return a copy. Otherwise, do operation
inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame or None
DataFrame without the removed index or column labels or
None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@rewrite_axis_style_signature(
"mapper",
[("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
)
def rename(
self,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[DataFrame]:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or function transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame or None
DataFrame with the renamed axis labels or None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters:
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
return super().rename(
mapper=mapper,
index=index,
columns=columns,
axis=axis,
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
) -> Optional[DataFrame]:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def pop(self, item: Label) -> Series:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : label
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
return super().pop(item=item)
@doc(NDFrame.replace, **_shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def _replace_columnwise(
self, mapping: Dict[Label, Tuple[Any, Any]], inplace: bool, regex
):
"""
Dispatch to Series.replace column-wise.
Parameters
----------
mapping : dict
of the form {col: (target, value)}
inplace : bool
regex : bool or same types as `to_replace` in DataFrame.replace
Returns
-------
DataFrame or None
"""
# Operate column-wise
res = self if inplace else self.copy()
ax = self.columns
for i in range(len(ax)):
if ax[i] in mapping:
ser = self.iloc[:, i]
target, value = mapping[ax[i]]
newobj = ser.replace(target, value, regex=regex)
res.iloc[:, i] = newobj
if inplace:
return
return res.__finalize__(self)
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(
self, periods=1, freq=None, axis=0, fill_value=lib.no_default
) -> DataFrame:
axis = self._get_axis_number(axis)
ncols = len(self.columns)
if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0:
# We will infer fill_value to match the closest column
if periods > 0:
result = self.iloc[:, :-periods]
for col in range(min(ncols, abs(periods))):
# TODO(EA2D): doing this in a loop unnecessary with 2D EAs
# Define filler inside loop so we get a copy
filler = self.iloc[:, 0].shift(len(self))
result.insert(0, col, filler, allow_duplicates=True)
else:
result = self.iloc[:, -periods:]
for col in range(min(ncols, abs(periods))):
# Define filler inside loop so we get a copy
filler = self.iloc[:, -1].shift(len(self))
result.insert(
len(result.columns), col, filler, allow_duplicates=True
)
result.columns = self.columns.copy()
return result
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def set_index(
self, keys, drop=True, append=False, inplace=False, verify_integrity=False
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
If True, modifies the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame or None
Changed row labels or None if ``inplace=True``.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing: List[Label] = []
for col in keys:
if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError as err:
raise TypeError(
f"{err_msg}. Received column of type {type(col)}"
) from err
else:
if not found:
missing.append(col)
if missing:
raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names: List[Label] = []
if append:
names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove: List[Label] = []
for col in keys:
if isinstance(col, MultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (Index, Series)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, abc.Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
f"Length mismatch: Expected {len(self)} rows, "
f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
@overload
# https://github.com/python/mypy/issues/6580
# Overloaded function signatures 1 and 2 overlap with incompatible return types
def reset_index( # type: ignore[misc]
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,
drop: bool = ...,
inplace: Literal[False] = ...,
col_level: Hashable = ...,
col_fill: Label = ...,
) -> DataFrame:
...
@overload
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,
drop: bool = ...,
inplace: Literal[True] = ...,
col_level: Hashable = ...,
col_fill: Label = ...,
) -> None:
...
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Label = "",
) -> Optional[DataFrame]:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if inplace:
new_obj = self
else:
new_obj = self.copy()
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[Tuple[Any, Optional[Any]]]
if isinstance(self.index, MultiIndex):
names = [
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
return None
# ----------------------------------------------------------------------
# Reindex-based selection methods
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isna(self) -> DataFrame:
result = self._constructor(self._mgr.isna(func=isna))
return result.__finalize__(self, method="isna")
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isnull(self) -> DataFrame:
return self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notna(self) -> DataFrame:
return ~self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notnull(self) -> DataFrame:
return ~self.isna()
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame or None
DataFrame with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'toy'])
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == "all":
mask = count > 0
else:
if how is not None:
raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> Optional[DataFrame]:
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame or None
DataFrame with duplicates removed or None if ``inplace=True``.
See Also
--------
DataFrame.value_counts: Count unique combinations of columns.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, it removes duplicate rows based on all columns.
>>> df.drop_duplicates()
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
To remove duplicates on specific column(s), use ``subset``.
>>> df.drop_duplicates(subset=['brand'])
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
To remove duplicates and keep last occurrences, use ``keep``.
>>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
brand style rating
1 Yum Yum cup 4.0
2 Indomie cup 3.5
4 Indomie pack 5.0
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
ignore_index = validate_bool_kwarg(ignore_index, "ignore_index")
duplicated = self.duplicated(subset, keep=keep)
result = self[-duplicated]
if ignore_index:
result.index = ibase.default_index(len(result))
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
) -> Series:
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
Boolean series for each duplicated rows.
See Also
--------
Index.duplicated : Equivalent method on index.
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, for each set of duplicated values, the first occurrence
is set on False and all others on True.
>>> df.duplicated()
0 False
1 True
2 False
3 False
4 False
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True.
>>> df.duplicated(keep='last')
0 True
1 False
2 False
3 False
4 False
dtype: bool
By setting ``keep`` on False, all duplicates are True.
>>> df.duplicated(keep=False)
0 True
1 True
2 False
3 False
4 False
dtype: bool
To find duplicates on specific column(s), use ``subset``.
>>> df.duplicated(subset=['brand'])
0 False
1 True
2 False
3 True
4 True
dtype: bool
"""
from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64
if self.empty:
return self._constructor_sliced(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# needed for mypy since can't narrow types using np.iterable
subset = cast(Iterable, subset)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)
return result.__finalize__(self, method="duplicated")
# ----------------------------------------------------------------------
# Sorting
# TODO: Just move the sort_values doc here.
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
# error: Signature of "sort_values" incompatible with supertype "NDFrame"
def sort_values( # type: ignore[override]
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
ignore_index=False,
key: ValueKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
# need to rewrap columns in Series to apply key function
if key is not None:
keys = [Series(k, name=name) for (k, name) in zip(keys, by)]
indexer = lexsort_indexer(
keys, orders=ascending, na_position=na_position, key=key
)
indexer = ensure_platform_int(indexer)
else:
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
# need to rewrap column in Series to apply key function
if key is not None:
k = Series(k, name=by)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position, key=key
)
new_data = self._mgr.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.axes[1] = ibase.default_index(len(indexer))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
def sort_index(
self,
axis=0,
level=None,
ascending: bool = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key: IndexKeyFunc = None,
):
"""
Sort object by labels (along an axis).
Returns a new DataFrame sorted by label if `inplace` argument is
``False``, otherwise updates the original DataFrame and returns None.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool or list of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape. For MultiIndex
inputs, the key is applied *per level*.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
The original DataFrame sorted by the labels or None if ``inplace=True``.
See Also
--------
Series.sort_index : Sort Series by the index.
DataFrame.sort_values : Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],
... columns=['A'])
>>> df.sort_index()
A
1 4
29 2
100 1
150 5
234 3
By default, it sorts in ascending order, to sort in descending order,
use ``ascending=False``
>>> df.sort_index(ascending=False)
A
234 3
150 5
100 1
29 2
1 4
A key function can be specified which is applied to the index before
sorting. For a ``MultiIndex`` this is applied to each level separately.
>>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])
>>> df.sort_index(key=lambda x: x.str.lower())
a
A 1
b 2
C 3
d 4
"""
return super().sort_index(
axis,
level,
ascending,
inplace,
kind,
na_position,
sort_remaining,
ignore_index,
key,
)
def value_counts(
self,
subset: Optional[Sequence[Label]] = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
):
"""
Return a Series containing counts of unique rows in the DataFrame.
.. versionadded:: 1.1.0
Parameters
----------
subset : list-like, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
Returns
-------
Series
See Also
--------
Series.value_counts: Equivalent method on Series.
Notes
-----
The returned Series will have a MultiIndex with one level per input
column. By default, rows that contain any NA values are omitted from
the result. By default, the resulting Series will be in descending
order so that the first element is the most frequently-occurring row.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],
... 'num_wings': [2, 0, 0, 0]},
... index=['falcon', 'dog', 'cat', 'ant'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
cat 4 0
ant 6 0
>>> df.value_counts()
num_legs num_wings
4 0 2
2 2 1
6 0 1
dtype: int64
>>> df.value_counts(sort=False)
num_legs num_wings
2 2 1
4 0 2
6 0 1
dtype: int64
>>> df.value_counts(ascending=True)
num_legs num_wings
2 2 1
6 0 1
4 0 2
dtype: int64
>>> df.value_counts(normalize=True)
num_legs num_wings
4 0 0.50
2 2 0.25
6 0 0.25
dtype: float64
"""
if subset is None:
subset = self.columns.tolist()
counts = self.groupby(subset).grouper.size()
if sort:
counts = counts.sort_values(ascending=ascending)
if normalize:
counts /= counts.sum()
# Force MultiIndex for single column
if len(subset) == 1:
counts.index = MultiIndex.from_arrays(
[counts.index], names=[counts.index.name]
)
return counts
def nlargest(self, n, columns, keep="first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n, columns, keep="first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 337000,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 337000 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "population".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 337000 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
Nauru 337000 182 NR
To order by the smallest values in column "population" and then "GDP", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Nauru 337000 182 NR
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0) -> DataFrame:
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise.
Returns
-------
DataFrame
"""
result = self.copy()
axis = self._get_axis_number(axis)
if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only swap levels on a hierarchical axis.")
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.swaplevel(i, j)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0) -> DataFrame:
"""
Rearrange index levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : {0 or 'index', 1 or 'columns'}, default 0
Where to reorder levels.
Returns
-------
DataFrame
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic Methods
def _cmp_method(self, other, op):
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None)
# See GH#4537 for discussion of scalar op behavior
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
def _arith_method(self, other, op):
if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None):
return ops.frame_arith_method_with_reindex(self, other, op)
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None)
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
_logical_method = _arith_method
def _dispatch_frame_op(self, right, func, axis: Optional[int] = None):
"""
Evaluate the frame operation func(left, right) by evaluating
column-by-column, dispatching to the Series implementation.
Parameters
----------
right : scalar, Series, or DataFrame
func : arithmetic or comparison operator
axis : {None, 0, 1}
Returns
-------
DataFrame
"""
# Get the appropriate array-op to apply to each column/block's values.
array_op = ops.get_array_op(func)
right = lib.item_from_zerodim(right)
if not is_list_like(right):
# i.e. scalar, faster than checking np.ndim(right) == 0
bm = self._mgr.apply(array_op, right=right)
return type(self)(bm)
elif isinstance(right, DataFrame):
assert self.index.equals(right.index)
assert self.columns.equals(right.columns)
# TODO: The previous assertion `assert right._indexed_same(self)`
# fails in cases with empty columns reached via
# _frame_arith_method_with_reindex
bm = self._mgr.operate_blockwise(right._mgr, array_op)
return type(self)(bm)
elif isinstance(right, Series) and axis == 1:
# axis=1 means we want to operate row-by-row
assert right.index.equals(self.columns)
right = right._values
# maybe_align_as_frame ensures we do not have an ndarray here
assert not isinstance(right, np.ndarray)
arrays = [
array_op(_left, _right)
for _left, _right in zip(self._iter_column_arrays(), right)
]
elif isinstance(right, Series):
assert right.index.equals(self.index) # Handle other cases later
right = right._values
arrays = [array_op(left, right) for left in self._iter_column_arrays()]
else:
# Remaining cases have less-obvious dispatch rules
raise NotImplementedError(right)
return type(self)._from_arrays(
arrays, self.columns, self.index, verify_integrity=False
)
def _combine_frame(self, other: DataFrame, func, fill_value=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
# since _arith_op may be called in a loop, avoid function call
# overhead if possible by doing this check once
_arith_op = func
else:
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
new_data = self._dispatch_frame_op(other, _arith_op)
return new_data
def _construct_result(self, result) -> DataFrame:
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
"""
out = self._constructor(result, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
out.index = self.index
return out
def __divmod__(self, other) -> Tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = self // other
mod = self - div * other
return div, mod
def __rdivmod__(self, other) -> Tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = other // self
mod = other - div * self
return div, mod
# ----------------------------------------------------------------------
# Combination-Related
@doc(
_shared_docs["compare"],
"""
Returns
-------
DataFrame
DataFrame that shows the differences stacked side by side.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
Raises
------
ValueError
When the two DataFrames don't have identical labels or shape.
See Also
--------
Series.compare : Compare with another Series and show differences.
DataFrame.equals : Test whether two objects contain the same elements.
Notes
-----
Matching NaNs will not appear as a difference.
Can only compare identically-labeled
(i.e. same shape, identical row and column labels) DataFrames
Examples
--------
>>> df = pd.DataFrame(
... {{
... "col1": ["a", "a", "b", "b", "a"],
... "col2": [1.0, 2.0, 3.0, np.nan, 5.0],
... "col3": [1.0, 2.0, 3.0, 4.0, 5.0]
... }},
... columns=["col1", "col2", "col3"],
... )
>>> df
col1 col2 col3
0 a 1.0 1.0
1 a 2.0 2.0
2 b 3.0 3.0
3 b NaN 4.0
4 a 5.0 5.0
>>> df2 = df.copy()
>>> df2.loc[0, 'col1'] = 'c'
>>> df2.loc[2, 'col3'] = 4.0
>>> df2
col1 col2 col3
0 c 1.0 1.0
1 a 2.0 2.0
2 b 3.0 4.0
3 b NaN 4.0
4 a 5.0 5.0
Align the differences on columns
>>> df.compare(df2)
col1 col3
self other self other
0 a c NaN NaN
2 NaN NaN 3.0 4.0
Stack the differences on rows
>>> df.compare(df2, align_axis=0)
col1 col3
0 self a NaN
other c NaN
2 self NaN 3.0
other NaN 4.0
Keep the equal values
>>> df.compare(df2, keep_equal=True)
col1 col3
self other self other
0 a c 1.0 1.0
2 b b 3.0 4.0
Keep all original rows and columns
>>> df.compare(df2, keep_shape=True)
col1 col2 col3
self other self other self other
0 a c NaN NaN NaN NaN
1 NaN NaN NaN NaN NaN NaN
2 NaN NaN NaN NaN 3.0 4.0
3 NaN NaN NaN NaN NaN NaN
4 NaN NaN NaN NaN NaN NaN
Keep all original rows and columns and also all original values
>>> df.compare(df2, keep_shape=True, keep_equal=True)
col1 col2 col3
self other self other self other
0 a c 1.0 1.0 1.0 1.0
1 a a 2.0 2.0 2.0 2.0
2 b b 3.0 3.0 3.0 4.0
3 b b NaN NaN 4.0 4.0
4 a a 5.0 5.0 5.0 5.0
""",
klass=_shared_doc_kwargs["klass"],
)
def compare(
self,
other: DataFrame,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> DataFrame:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def combine(
self, other: DataFrame, func, fill_value=None, overwrite=True
) -> DataFrame:
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, new_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other: DataFrame) -> DataFrame:
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y):
mask = extract_array(isna(x))
x_values = extract_array(x, extract_numpy=True)
y_values = extract_array(y, extract_numpy=True)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(
self, other, join="left", overwrite=True, filter_func=None, errors="ignore"
) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged:: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-column(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, its name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Captive 210.0
Wild 185.0
We can also choose to include NA in group keys or not by setting
`dropna` parameter, the default setting is `True`:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum()
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum()
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
>>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by="a").sum()
b c
a
a 13.0 13.0
b 12.3 123.0
>>> df.groupby(by="a", dropna=False).sum()
b c
a
a 13.0 13.0
b 12.3 123.0
NaN 12.3 33.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = no_default,
observed: bool = False,
dropna: bool = True,
) -> DataFrameGroupBy:
from pandas.core.groupby.generic import DataFrameGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
dropna=dropna,
)
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : str or object or a list of str, optional
Column to use to make new frame's index. If None, uses
existing index.
.. versionchanged:: 1.1.0
Also accept list of index names.
columns : str or object or a list of str
Column to use to make new frame's columns.
.. versionchanged:: 1.1.0
Also accept list of columns names.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
You could also assign a list of column names or a list of index names.
>>> df = pd.DataFrame({
... "lev1": [1, 1, 1, 2, 2, 2],
... "lev2": [1, 1, 2, 1, 1, 2],
... "lev3": [1, 2, 1, 2, 1, 2],
... "lev4": [1, 2, 3, 4, 5, 6],
... "values": [0, 1, 2, 3, 4, 5]})
>>> df
lev1 lev2 lev3 lev4 values
0 1 1 1 1 0
1 1 1 2 2 1
2 1 2 1 3 2
3 2 1 2 4 3
4 2 1 1 5 4
5 2 2 2 6 5
>>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values")
lev2 1 2
lev3 1 2 1 2
lev1
1 0.0 1.0 2.0 NaN
2 4.0 3.0 NaN 5.0
>>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values")
lev3 1 2
lev1 lev2
1 1 0.0 1.0
2 2.0 NaN
2 1 4.0 3.0
2 NaN 5.0
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None) -> DataFrame:
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with (in the resulting pivot table,
after aggregation).
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
Returns
-------
DataFrame
An Excel style pivot table.
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
DataFrame.melt: Unpivot a DataFrame from wide to long format,
optionally leaving identifiers set.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9.0 7.500000 6.0
small 5.500000 9.0 8.500000 8.0
foo large 2.000000 5.0 4.500000 4.0
small 2.333333 6.0 4.333333 2.0
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
) -> DataFrame:
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
result = stack_multiple(self, level, dropna=dropna)
else:
result = stack(self, level, dropna=dropna)
return result.__finalize__(self, method="stack")
def explode(
self, column: Union[str, Tuple], ignore_index: bool = False
) -> DataFrame:
"""
Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : str or tuple
Column to explode.
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
if columns of the frame are not unique.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of rows in the
output will be non-deterministic when exploding sets.
Examples
--------
>>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 foo 1
2 [] 1
3 [3, 4] 1
>>> df.explode('A')
A B
0 1 1
0 2 1
0 3 1
1 foo 1
2 NaN 1
3 3 1
3 4 1
"""
if not (is_scalar(column) or isinstance(column, tuple)):
raise ValueError("column must be a scalar")
if not self.columns.is_unique:
raise ValueError("columns must be unique")
df = self.reset_index(drop=True)
result = df[column].explode()
result = df.drop([column], axis=1).join(result)
if ignore_index:
result.index = ibase.default_index(len(result))
else:
result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
result = unstack(self, level, fill_value)
return result.__finalize__(self, method="unstack")
@Appender(_shared_docs["melt"] % {"caller": "df.melt(", "other": "melt"})
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
ignore_index=True,
) -> DataFrame:
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
ignore_index=ignore_index,
)
# ----------------------------------------------------------------------
# Time series-related
@doc(
Series.diff,
klass="Dataframe",
extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n "
"Take difference over rows (0) or columns (1).\n",
other_klass="Series",
examples=dedent(
"""
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0 0
1 NaN -1 3
2 NaN -1 7
3 NaN -1 13
4 NaN 0 20
5 NaN 2 28
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
Overflow in input dtype
>>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)
>>> df.diff()
a
0 NaN
1 255.0"""
),
)
def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:
if not isinstance(periods, int):
if not (is_float(periods) and periods.is_integer()):
raise ValueError("periods must be an integer")
periods = int(periods)
bm_axis = self._get_block_manager_axis(axis)
if bm_axis == 0 and periods != 0:
return self - self.shift(periods, axis=axis)
new_data = self._mgr.diff(n=periods, axis=bm_axis)
return self._constructor(new_data).__finalize__(self, "diff")
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: Union[Label, List[Label]],
ndim: int,
subset: Optional[FrameOrSeriesUnion] = None,
) -> FrameOrSeriesUnion:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.ExponentialMovingWindow : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
sum 12.0 NaN
min 1.0 2.0
max NaN 8.0
Aggregate different functions over the columns and rename the index of the resulting
DataFrame.
>>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))
A B C
x 7.0 NaN NaN
y NaN 2.0 NaN
z NaN NaN 6.0
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@doc(
_shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
)
def aggregate(self, func=None, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
result = None
try:
result, how = self._aggregate(func, axis, *args, **kwargs)
except TypeError as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
if relabeling:
# This is to keep the order to columns occurrence unchanged, and also
# keep the order of new columns occurrence unchanged
# For the return values of reconstruct_func, if relabeling is
# False, columns and order will be None.
assert columns is not None
assert order is not None
result_in_dict = relabel_result(result, func, columns, order)
result = DataFrame(result_in_dict, index=columns)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = aggregate(self.T, arg, *args, **kwargs)
result = result.T if result is not None else result
return result, how
return aggregate(self, arg, *args, **kwargs)
agg = aggregate
@doc(
_shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
def transform(
self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
) -> DataFrame:
result = transform(self, func, axis, *args, **kwargs)
assert isinstance(result, DataFrame)
return result
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
raw : bool, default False
Determines if row or column is passed as a Series or ndarray object:
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing ``result_type='expand'`` will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
func=func,
axis=axis,
raw=raw,
result_type=result_type,
args=args,
kwds=kwds,
)
return op.get_result()
def applymap(self, func, na_action: Optional[str] = None) -> DataFrame:
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
na_action : {None, 'ignore'}, default None
If ‘ignore’, propagate NaN values, without passing them to func.
.. versionadded:: 1.2
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Like Series.map, NA values can be ignored:
>>> df_copy = df.copy()
>>> df_copy.iloc[0, 0] = pd.NA
>>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')
0 1
0 <NA> 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
if na_action not in {"ignore", None}:
raise ValueError(
f"na_action must be 'ignore' or None. Got {repr(na_action)}"
)
ignore_na = na_action == "ignore"
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func, ignore_na=ignore_na)
return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)
return self.apply(infer).__finalize__(self, "applymap")
# ----------------------------------------------------------------------
# Merging / joining methods
def append(
self, other, ignore_index=False, verify_integrity=False, sort=False
) -> DataFrame:
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default False
Sort columns if the columns of `self` and `other` are not aligned.
.. versionchanged:: 1.0.0
Changed to not sort by default.
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
if not ignore_index:
raise TypeError("Can only append a dict if ignore_index=True")
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = (
other.reindex(combined_columns, copy=False)
.to_frame()
.T.infer_objects()
.rename_axis(index.names, copy=False)
)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
return (
concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
).__finalize__(self, method="append")
def join(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
) -> DataFrame:
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-column(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
def _join_compat(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
):
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import merge
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
if how == "cross":
return merge(
self,
other,
how=how,
on=on,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(
frames, axis=1, join="outer", verify_integrity=True, sort=sort
)
return res.reindex(self.index, copy=False)
else:
return concat(
frames, axis=1, join=how, verify_integrity=True, sort=sort
)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined, frame, how=how, left_index=True, right_index=True
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
) -> DataFrame:
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(self, decimals=0, *args, **kwargs) -> DataFrame:
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = list(_dict_round(self, decimals))
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method="pearson", min_periods=1) -> DataFrame:
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Series.corr : Compute the correlation between two Series.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if method == "pearson":
correl = libalgos.nancorr(mat, minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(mat, minp=min_periods)
elif method == "kendall" or callable(method):
if min_periods is None:
min_periods = 1
mat = mat.T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
def cov(
self, min_periods: Optional[int] = None, ddof: Optional[int] = 1
) -> DataFrame:
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-ddof.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
base_cov = np.empty((mat.shape[1], mat.shape[1]))
base_cov.fill(np.nan)
else:
base_cov = np.cov(mat.T, ddof=ddof)
base_cov = base_cov.reshape((len(cols), len(cols)))
else:
base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)
return self._constructor(base_cov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False, method="pearson") -> Series:
"""
Compute pairwise correlation.
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr : Compute pairwise correlation of columns.
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = self._constructor_sliced(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable"
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each row.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.value_counts: Count unique combinations of columns.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis, numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._mgr.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = self._constructor_sliced(
counts, index=frame._get_agg_axis(axis)
)
return result.astype("int64")
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError(
f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
# Mask NaNs: Mask rows or columns where the index level is NaN, and all
# values in the DataFrame that are NaN
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
values_mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
values_mask = notna(frame.values)
index_mask = notna(count_axis.get_level_values(level=level))
if axis == 1:
mask = index_mask & values_mask
else:
mask = index_mask.reshape(-1, 1) & values_mask
if isinstance(level, str):
level = count_axis._get_level_number(level)
level_name = count_axis._names[level]
level_index = count_axis.levels[level]._shallow_copy(name=level_name)
level_codes = ensure_int64(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)
if axis == 1:
result = self._constructor(counts, index=agg_axis, columns=level_index)
else:
result = self._constructor(counts, index=level_index, columns=agg_axis)
return result
def _reduce(
self,
op,
name: str,
*,
axis=0,
skipna=True,
numeric_only=None,
filter_type=None,
**kwds,
):
assert filter_type is None or filter_type == "bool", filter_type
out_dtype = "bool" if filter_type == "bool" else None
own_dtypes = [arr.dtype for arr in self._iter_column_arrays()]
dtype_is_dt = np.array(
[is_datetime64_any_dtype(dtype) for dtype in own_dtypes],
dtype=bool,
)
if numeric_only is None and name in ["mean", "median"] and dtype_is_dt.any():
warnings.warn(
"DataFrame.mean and DataFrame.median with numeric_only=None "
"will include datetime64 and datetime64tz columns in a "
"future version.",
FutureWarning,
stacklevel=5,
)
cols = self.columns[~dtype_is_dt]
self = self[cols]
# TODO: Make other agg func handle axis=None properly GH#21597
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
assert axis in [0, 1]
def func(values):
if is_extension_array_dtype(values.dtype):
return extract_array(values)._reduce(name, skipna=skipna, **kwds)
else:
return op(values, axis=axis, skipna=skipna, **kwds)
def blk_func(values):
if isinstance(values, ExtensionArray):
return values._reduce(name, skipna=skipna, **kwds)
else:
return op(values, axis=1, skipna=skipna, **kwds)
def _get_data() -> DataFrame:
if filter_type is None:
data = self._get_numeric_data()
else:
# GH#25101, GH#24434
assert filter_type == "bool"
data = self._get_bool_data()
return data
if numeric_only is not None or axis == 0:
# For numeric_only non-None and axis non-None, we know
# which blocks to use and no try/except is needed.
# For numeric_only=None only the case with axis==0 and no object
# dtypes are unambiguous can be handled with BlockManager.reduce
# Case with EAs see GH#35881
df = self
if numeric_only is True:
df = _get_data()
if axis == 1:
df = df.T
axis = 0
ignore_failures = numeric_only is None
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager.reduce
res, indexer = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)
out = df._constructor(res).iloc[0]
if out_dtype is not None:
out = out.astype(out_dtype)
if axis == 0 and is_object_dtype(out.dtype):
# GH#35865 careful to cast explicitly to object
nvs = coerce_to_dtypes(out.values, df.dtypes.iloc[np.sort(indexer)])
out[:] = np.array(nvs, dtype=object)
if axis == 0 and len(self) == 0 and name in ["sum", "prod"]:
# Even if we are object dtype, follow numpy and return
# float64, see test_apply_funcs_over_empty
out = out.astype(np.float64)
return out
assert numeric_only is None
data = self
values = data.values
try:
result = func(values)
except TypeError:
# e.g. in nanops trying to convert strs to float
data = _get_data()
labels = data._get_agg_axis(axis)
values = data.values
with np.errstate(all="ignore"):
result = func(values)
if filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
elif filter_type is None and is_object_dtype(result.dtype):
try:
result = result.astype(np.float64)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, data.dtypes)
result = self._constructor_sliced(result, index=labels)
return result
def nunique(self, axis=0, dropna=True) -> Series:
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin : Return index of the minimum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the minimum value in each column.
>>> df.idxmin()
consumption Pork
co2_emissions Wheat Products
dtype: object
To return the index for the minimum value in each row, use ``axis="columns"``.
>>> df.idxmin(axis="columns")
Pork consumption
Wheat Products co2_emissions
Beef consumption
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmin, "argmin", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax : Return index of the maximum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the maximum value in each column.
>>> df.idxmax()
consumption Wheat Products
co2_emissions Beef
dtype: object
To return the index for the maximum value in each row, use ``axis="columns"``.
>>> df.idxmax(axis="columns")
Pork co2_emissions
Wheat Products consumption
Beef co2_emissions
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmax, "argmax", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num: int) -> Index:
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
def mode(self, axis=0, numeric_only=False, dropna=True) -> DataFrame:
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. Because the resulting DataFrame has two rows,
the second row of ``species`` and ``legs`` contains ``NaN``.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'}, default 0
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
validate_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
if is_list_like(q):
return self._constructor([], index=q, columns=cols)
return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)
result = data._mgr.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(
self, freq=None, how: str = "start", axis: Axis = 0, copy: bool = True
) -> DataFrame:
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, PeriodIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_timestamp(freq=freq, how=how)
setattr(new_obj, axis_name, new_ax)
return new_obj
def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame:
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with PeriodIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, DatetimeIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_period(freq=freq)
setattr(new_obj, axis_name, new_ax)
return new_obj
def isin(self, values) -> DataFrame:
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
f"you passed a '{type(values).__name__}'"
)
return self._constructor(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
# ----------------------------------------------------------------------
# Add index and columns
_AXIS_ORDERS = ["index", "columns"]
_AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {
**NDFrame._AXIS_TO_AXIS_NUMBER,
1: 1,
"columns": 1,
}
_AXIS_REVERSED = True
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 1
_info_axis_name = "columns"
index: Index = properties.AxisProperty(
axis=1, doc="The index (row labels) of the DataFrame."
)
columns: Index = properties.AxisProperty(
axis=0, doc="The column labels of the DataFrame."
)
@property
def _AXIS_NUMBERS(self) -> Dict[str, int]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NUMBERS
return {"index": 0, "columns": 1}
@property
def _AXIS_NAMES(self) -> Dict[int, str]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NAMES
return {0: "index", 1: "columns"}
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
DataFrame._add_numeric_operations()
ops.add_flex_arithmetic_methods(DataFrame)
def _from_nested_dict(data) -> collections.defaultdict:
new_data: collections.defaultdict = collections.defaultdict(dict)
for index, s in data.items():
for col, v in s.items():
new_data[col][index] = v
return new_data
| _reindex_index |
babylon.depthRenderer.ts | module BABYLON {
export class DepthRenderer {
private _scene: Scene;
private _depthMap: RenderTargetTexture;
private _effect: Effect;
private _viewMatrix = Matrix.Zero();
private _projectionMatrix = Matrix.Zero();
private _transformMatrix = Matrix.Zero();
private _worldViewProjection = Matrix.Zero();
private _cachedDefines: string;
constructor(scene: Scene, type: number = Engine.TEXTURETYPE_FLOAT) {
this._scene = scene;
var engine = scene.getEngine();
// Render target
this._depthMap = new RenderTargetTexture("depthMap", { width: engine.getRenderWidth(), height: engine.getRenderHeight() }, this._scene, false, true, type);
this._depthMap.wrapU = Texture.CLAMP_ADDRESSMODE;
this._depthMap.wrapV = Texture.CLAMP_ADDRESSMODE;
this._depthMap.refreshRate = 1;
this._depthMap.renderParticles = false;
this._depthMap.renderList = null;
// set default depth value to 1.0 (far away)
this._depthMap.onClear = (engine: Engine) => {
engine.clear(new Color4(1.0, 1.0, 1.0, 1.0), true, true);
}
// Custom render function
var renderSubMesh = (subMesh: SubMesh): void => {
var mesh = subMesh.getRenderingMesh();
var scene = this._scene;
var engine = scene.getEngine();
// Culling
engine.setState(subMesh.getMaterial().backFaceCulling);
// Managing instances
var batch = mesh._getInstancesRenderList(subMesh._id);
if (batch.mustReturn) {
return;
}
var hardwareInstancedRendering = (engine.getCaps().instancedArrays !== null) && (batch.visibleInstances[subMesh._id] !== null);
if (this.isReady(subMesh, hardwareInstancedRendering)) {
engine.enableEffect(this._effect);
mesh._bind(subMesh, this._effect, Material.TriangleFillMode);
var material = subMesh.getMaterial();
this._effect.setMatrix("viewProjection", scene.getTransformMatrix());
this._effect.setFloat("far", scene.activeCamera.maxZ);
// Alpha test
if (material && material.needAlphaTesting()) {
var alphaTexture = material.getAlphaTestTexture();
this._effect.setTexture("diffuseSampler", alphaTexture);
this._effect.setMatrix("diffuseMatrix", alphaTexture.getTextureMatrix());
}
// Bones
if (mesh.useBones && mesh.computeBonesUsingShaders) {
this._effect.setMatrices("mBones", mesh.skeleton.getTransformMatrices(mesh));
}
// Draw
mesh._processRendering(subMesh, this._effect, Material.TriangleFillMode, batch, hardwareInstancedRendering,
(isInstance, world) => this._effect.setMatrix("world", world));
}
};
this._depthMap.customRenderFunction = (opaqueSubMeshes: SmartArray<SubMesh>, alphaTestSubMeshes: SmartArray<SubMesh>): void => {
var index;
for (index = 0; index < opaqueSubMeshes.length; index++) {
renderSubMesh(opaqueSubMeshes.data[index]);
}
for (index = 0; index < alphaTestSubMeshes.length; index++) {
renderSubMesh(alphaTestSubMeshes.data[index]);
}
};
}
public isReady(subMesh: SubMesh, useInstances: boolean): boolean {
var material: any = subMesh.getMaterial();
if (material.disableDepthWrite) {
return false;
}
var defines = [];
var attribs = [VertexBuffer.PositionKind];
var mesh = subMesh.getMesh();
var scene = mesh.getScene();
// Alpha test
if (material && material.needAlphaTesting()) {
defines.push("#define ALPHATEST");
if (mesh.isVerticesDataPresent(VertexBuffer.UVKind)) {
attribs.push(VertexBuffer.UVKind);
defines.push("#define UV1");
}
if (mesh.isVerticesDataPresent(VertexBuffer.UV2Kind)) {
attribs.push(VertexBuffer.UV2Kind);
defines.push("#define UV2");
}
}
// Bones
if (mesh.useBones && mesh.computeBonesUsingShaders) {
attribs.push(VertexBuffer.MatricesIndicesKind);
attribs.push(VertexBuffer.MatricesWeightsKind);
if (mesh.numBoneInfluencers > 4) {
attribs.push(VertexBuffer.MatricesIndicesExtraKind);
attribs.push(VertexBuffer.MatricesWeightsExtraKind);
}
defines.push("#define NUM_BONE_INFLUENCERS " + mesh.numBoneInfluencers);
defines.push("#define BonesPerMesh " + (mesh.skeleton.bones.length + 1));
} else {
defines.push("#define NUM_BONE_INFLUENCERS 0"); | // Instances
if (useInstances) {
defines.push("#define INSTANCES");
attribs.push("world0");
attribs.push("world1");
attribs.push("world2");
attribs.push("world3");
}
// Get correct effect
var join = defines.join("\n");
if (this._cachedDefines !== join) {
this._cachedDefines = join;
this._effect = this._scene.getEngine().createEffect("depth",
attribs,
["world", "mBones", "viewProjection", "diffuseMatrix", "far"],
["diffuseSampler"], join);
}
return this._effect.isReady();
}
public getDepthMap(): RenderTargetTexture {
return this._depthMap;
}
// Methods
public dispose(): void {
this._depthMap.dispose();
}
}
} | }
|
AddEPSG3857Projection101.py | #ArcGIS Server 10.1 service editor
#view your service properties at: http://[your server URL]/arcgis/admin/services/
#put a ?f=json at the end of a service name to see the json properties -
#the JSON is what is being edited here
#Loops through the services in a particular folder and edits the
#listSupportedCRS property (adds "EPSG:3857" - to be google-riffic) for each WMS service in the folder
#created by Doug Curl, Kentucky Geological Survey, 9/12/2013
# For HTTP calls
import httplib, urllib, json
# For system tools
import sys
# For reading passwords without echoing
import getpass
def main(argv=None):
# Ask for server name & port
#serverName = "kgs.uky.edu"
serverName = raw_input("Enter server name (server URL): ")
# Ask for server port - usually 6080:
serverPort = raw_input("Enter server port (usually 6080): ")
#Ask for server admin directory:
serverFolder = raw_input("Enter folder in your service directory to edit services (assumes the root is '/arcgis/admin/services/'): ")
# Ask for admin/publisher user name and password
username = raw_input("Enter admin user name: ")
password = getpass.getpass("Enter password: ")
# Get a token
token = getToken(username, password, serverName, serverPort)
# Get the root info
#serverURL = "/arcgis/admin/services/aasggeothermal/"
serverURL = "/arcgis/admin/services/"+serverFolder+"/"
# This request only needs the token and the response formatting parameter
params = urllib.urlencode({'token': token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", serverURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Could not read folder information."
return
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print "Error when reading server information. " + str(data)
return
else:
print "Processed server information successfully. Now processing folders..."
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
# Loop through each service in the folder
for item in dataObj['services']:
print item["serviceName"]
print item["type"]
if item["type"] == "MapServer":
service = item["serviceName"]+"."+item["type"]
#sUrl = "/arcgis/admin/services/%s.%s" %(item["serviceName"], item["type"])
print service
serviceURL = serverURL + service
print serviceURL
# This request only needs the token and the response formatting parameter
params = urllib.urlencode({'token': token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to service to get its current JSON definition
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", serviceURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Could not read service information."
return
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print "Error when reading service information. " + str(data)
else:
print "Service information read successfully. Now changing properties..."
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
#print data
for ext in dataObj["extensions"]:
if ext["typeName"] == "WMSServer":
#Edit the supported CRS property - add the one for google for WMS:
ext["properties"]["listSupportedCRS"] = "EPSG:3857"
# Serialize back into JSON
updatedSvcJson = json.dumps(dataObj)
#print updatedSvcJson
# Call the edit operation on the service. Pass in modified JSON.
editSvcURL = serverURL + service + "/edit"
params = urllib.urlencode({'token': token, 'f': 'json', 'service': updatedSvcJson})
httpConn.request("POST", editSvcURL, params, headers)
# Read service edit response
editResponse = httpConn.getresponse()
if (editResponse.status != 200):
httpConn.close()
print "Error while executing edit."
return
else:
editData = editResponse.read()
# Check that data returned is not an error object
if not assertJsonSuccess(editData):
print "Error returned while editing service" + str(editData)
else:
print "Service edited successfully."
#httpConn.close()
#return
else:
# Close the connection to the current service
httpConn.close()
# A function to generate a token given username, password and the adminURL.
def getToken(username, password, serverName, serverPort):
# Token URL is typically http://server[:port]/arcgis/admin/generateToken
tokenURL = "/arcgis/admin/generateToken"
params = urllib.urlencode({'username': username, 'password': password, 'client': 'requestip', 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", tokenURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Error while fetching tokens from admin URL. Please check the URL and try again."
return
else:
data = response.read()
httpConn.close()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
return
# Extract the token from it
token = json.loads(data)
return token['token']
# A function that checks that the input JSON object
# is not an error object.
def assertJsonSuccess(data):
|
# Script start
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
print "Error: JSON object returns an error. " + str(obj)
return False
else:
return True |
network_test.go | // Copyright 2021 Anapaya Systems
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package launcher_test
import (
"context"
"net"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/scionproto/scion/go/pkg/app/launcher"
)
func TestWaitForNetworkReady(t *testing.T) | {
testCases := map[string]struct {
IPs []net.IP
Setup func(t *testing.T) (context.Context, func())
AssertErr assert.ErrorAssertionFunc
}{
"no IPs": {
IPs: nil,
Setup: func(_ *testing.T) (context.Context, func()) {
return context.Background(), func() {}
},
AssertErr: assert.NoError,
},
"IPs not found time out": {
IPs: []net.IP{net.ParseIP("192.0.2.42")},
Setup: func(_ *testing.T) (context.Context, func()) {
ctx, cancelF := context.WithTimeout(context.Background(), time.Millisecond*200)
return ctx, cancelF
},
AssertErr: assert.Error,
},
"localhost": {
IPs: []net.IP{net.ParseIP("127.0.0.1")},
Setup: func(_ *testing.T) (context.Context, func()) {
ctx, cancelF := context.WithTimeout(context.Background(), time.Millisecond*500)
return ctx, cancelF
},
AssertErr: assert.NoError,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
ctx, cleanup := tc.Setup(t)
defer cleanup()
tc.AssertErr(t, launcher.WaitForNetworkReady(ctx, tc.IPs))
})
}
} |
|
__init__.py | import pathlib
import pkg_resources
from mopidy import config, ext
__version__ = pkg_resources.get_distribution(
"Mopidy-MusicBox-Webclient"
).version
class Extension(ext.Extension):
dist_name = "Mopidy-MusicBox-Webclient"
ext_name = "musicbox_webclient"
version = __version__
def get_default_config(self):
return config.read(pathlib.Path(__file__).parent / "ext.conf")
def get_config_schema(self):
schema = super().get_config_schema()
schema["musicbox"] = config.Boolean(optional=True)
schema["websocket_host"] = config.Hostname(optional=True)
schema["websocket_port"] = config.Port(optional=True)
schema["on_track_click"] = config.String(
optional=True,
choices=[
"PLAY_NOW",
"PLAY_NEXT",
"ADD_THIS_BOTTOM",
"ADD_ALL_BOTTOM",
"PLAY_ALL",
"DYNAMIC",
],
)
return schema
def setup(self, registry):
registry.add(
"http:app", {"name": self.ext_name, "factory": self.factory}
)
def factory(self, config, core):
| from tornado.web import RedirectHandler
from .web import IndexHandler, StaticHandler
path = pathlib.Path(__file__).parent / "static"
return [
(r"/", RedirectHandler, {"url": "index.html"}),
(r"/(index.html)", IndexHandler, {"config": config, "path": path}),
(r"/(.*)", StaticHandler, {"path": path}),
] |
|
types.rs | //! Protocol Buffers well-known wrapper types.
//!
//! This module provides implementations of `Message` for Rust standard library types which
//! correspond to a Protobuf well-known wrapper type. The remaining well-known types are defined in
//! the `prost-types` crate in order to avoid a cyclic dependency between `prost` and
//! `prost-build`.
use bytes::{Buf, BufMut};
use DecodeError;
use Message;
use encoding::*;
/// `google.protobuf.BoolValue`
impl Message for bool {
fn encode_raw<B>(&self, buf: &mut B) where B: BufMut {
if *self {
bool::encode(1, self, buf)
}
}
fn merge_field<B>(&mut self, buf: &mut B) -> Result<(), DecodeError> where B: Buf {
let (tag, wire_type) = decode_key(buf)?;
if tag == 1 {
bool::merge(wire_type, self, buf)
} else {
skip_field(wire_type, buf)
}
}
fn encoded_len(&self) -> usize {
if *self { 2 } else { 0 }
}
fn clear(&mut self) {
*self = false;
}
}
/// `google.protobuf.UInt32Value`
impl Message for u32 {
fn encode_raw<B>(&self, buf: &mut B) where B: BufMut {
if *self != 0 {
uint32::encode(1, self, buf)
}
}
fn merge_field<B>(&mut self, buf: &mut B) -> Result<(), DecodeError> where B: Buf {
let (tag, wire_type) = decode_key(buf)?;
if tag == 1 {
uint32::merge(wire_type, self, buf)
} else {
skip_field(wire_type, buf)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 { uint32::encoded_len(1, self) } else { 0 }
}
fn clear(&mut self) {
*self = 0;
}
}
/// `google.protobuf.UInt64Value`
impl Message for u64 {
fn encode_raw<B>(&self, buf: &mut B) where B: BufMut {
if *self != 0 {
uint64::encode(1, self, buf)
}
}
fn merge_field<B>(&mut self, buf: &mut B) -> Result<(), DecodeError> where B: Buf {
let (tag, wire_type) = decode_key(buf)?;
if tag == 1 {
uint64::merge(wire_type, self, buf)
} else {
skip_field(wire_type, buf)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 { uint64::encoded_len(1, self) } else { 0 }
}
fn clear(&mut self) {
*self = 0;
}
}
/// `google.protobuf.Int32Value`
impl Message for i32 {
fn encode_raw<B>(&self, buf: &mut B) where B: BufMut {
if *self != 0 {
int32::encode(1, self, buf)
}
}
fn merge_field<B>(&mut self, buf: &mut B) -> Result<(), DecodeError> where B: Buf {
let (tag, wire_type) = decode_key(buf)?;
if tag == 1 {
int32::merge(wire_type, self, buf)
} else {
skip_field(wire_type, buf)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 { int32::encoded_len(1, self) } else { 0 }
}
fn clear(&mut self) {
*self = 0;
}
}
/// `google.protobuf.Int64Value`
impl Message for i64 {
fn encode_raw<B>(&self, buf: &mut B) where B: BufMut {
if *self != 0 {
int64::encode(1, self, buf)
}
}
fn merge_field<B>(&mut self, buf: &mut B) -> Result<(), DecodeError> where B: Buf {
let (tag, wire_type) = decode_key(buf)?;
if tag == 1 {
int64::merge(wire_type, self, buf)
} else {
skip_field(wire_type, buf)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 { int64::encoded_len(1, self) } else |
}
fn clear(&mut self) {
*self = 0;
}
}
/// `google.protobuf.FloatValue`
impl Message for f32 {
fn encode_raw<B>(&self, buf: &mut B) where B: BufMut {
if *self != 0.0 {
float::encode(1, self, buf)
}
}
fn merge_field<B>(&mut self, buf: &mut B) -> Result<(), DecodeError> where B: Buf {
let (tag, wire_type) = decode_key(buf)?;
if tag == 1 {
float::merge(wire_type, self, buf)
} else {
skip_field(wire_type, buf)
}
}
fn encoded_len(&self) -> usize {
if *self != 0.0 { float::encoded_len(1, self) } else { 0 }
}
fn clear(&mut self) {
*self = 0.0;
}
}
/// `google.protobuf.DoubleValue`
impl Message for f64 {
fn encode_raw<B>(&self, buf: &mut B) where B: BufMut {
if *self != 0.0 {
double::encode(1, self, buf)
}
}
fn merge_field<B>(&mut self, buf: &mut B) -> Result<(), DecodeError> where B: Buf {
let (tag, wire_type) = decode_key(buf)?;
if tag == 1 {
double::merge(wire_type, self, buf)
} else {
skip_field(wire_type, buf)
}
}
fn encoded_len(&self) -> usize {
if *self != 0.0 { double::encoded_len(1, self) } else { 0 }
}
fn clear(&mut self) {
*self = 0.0;
}
}
/// `google.protobuf.StringValue`
impl Message for String {
fn encode_raw<B>(&self, buf: &mut B) where B: BufMut {
if !self.is_empty() {
string::encode(1, self, buf)
}
}
fn merge_field<B>(&mut self, buf: &mut B) -> Result<(), DecodeError> where B: Buf {
let (tag, wire_type) = decode_key(buf)?;
if tag == 1 {
string::merge(wire_type, self, buf)
} else {
skip_field(wire_type, buf)
}
}
fn encoded_len(&self) -> usize {
if !self.is_empty() { string::encoded_len(1, self) } else { 0 }
}
fn clear(&mut self) {
self.clear();
}
}
/// `google.protobuf.BytesValue`
impl Message for Vec<u8> {
fn encode_raw<B>(&self, buf: &mut B) where B: BufMut {
if !self.is_empty() {
bytes::encode(1, self, buf)
}
}
fn merge_field<B>(&mut self, buf: &mut B) -> Result<(), DecodeError> where B: Buf {
let (tag, wire_type) = decode_key(buf)?;
if tag == 1 {
bytes::merge(wire_type, self, buf)
} else {
skip_field(wire_type, buf)
}
}
fn encoded_len(&self) -> usize {
if !self.is_empty() { bytes::encoded_len(1, self) } else { 0 }
}
fn clear(&mut self) {
self.clear();
}
}
/// `google.protobuf.Empty`
impl Message for () {
fn encode_raw<B>(&self, _buf: &mut B) where B: BufMut { }
fn merge_field<B>(&mut self, buf: &mut B) -> Result<(), DecodeError> where B: Buf {
let (_, wire_type) = decode_key(buf)?;
skip_field(wire_type, buf)
}
fn encoded_len(&self) -> usize { 0 }
fn clear(&mut self) { }
}
| { 0 } |
find.py | import pytest
from webdriver.transport import Response
from tests.support.asserts import assert_error, assert_same_element, assert_success
def find_elements(session, shadow_id, using, value):
return session.transport.send(
"POST", "session/{session_id}/shadow/{shadow_id}/elements".format(
session_id=session.session_id,
shadow_id=shadow_id),
{"using": using, "value": value})
def test_null_parameter_value(session, http, inline):
session.url = inline("<div><a href=# id=linkText>full link text</a></div>")
custom_element = session.find.css("custom-shadow-element", all=False)
shadow_root = custom_element.shadow_root
path = "/session/{session_id}/shadow/{shadow_id}/elements".format(
session_id=session.session_id, shadow_id=shadow_root.id)
with http.post(path, None) as response:
assert_error(Response.from_http(response), "invalid argument")
def test_no_top_browsing_context(session, closed_window):
response = find_elements(session, "notReal", "css selector", "foo")
assert_error(response, "no such window")
def test_no_browsing_context(session, closed_frame):
response = find_elements(session, "notReal", "css selector", "foo")
assert_error(response, "no such window")
@pytest.mark.parametrize("using", [("a"), (True), (None), (1), ([]), ({})])
def test_invalid_using_argument(session, using):
# Step 1 - 2
response = find_elements(session, "notReal", using, "value")
assert_error(response, "invalid argument")
@pytest.mark.parametrize("value", [None, [], {}])
def test_invalid_selector_argument(session, value):
# Step 3 - 4
response = find_elements(session, "notReal", "css selector", value)
assert_error(response, "invalid argument")
def test_detached_shadow_root(session, get_shadow_page):
session.url = get_shadow_page("<div><input type='checkbox'/></div>")
custom_element = session.find.css("custom-shadow-element", all=False)
shadow_root = custom_element.shadow_root
session.refresh()
response = find_elements(session, shadow_root.id, "css", "input")
assert_error(response, "detached shadow root")
def test_find_elements_equivalence(session, get_shadow_page):
session.url = get_shadow_page("<div><input id='check' type='checkbox'/><input id='text'/></div>")
custom_element = session.find.css("custom-shadow-element", all=False)
shadow_root = custom_element.shadow_root
response = find_elements(session, shadow_root.id, "css", "input")
assert_success(response)
@pytest.mark.parametrize("using,value",
[("css selector", "#linkText"),
("link text", "full link text"),
("partial link text", "link text"),
("tag name", "a"),
("xpath", "//a")])
def test_find_elements(session, get_shadow_page, using, value):
# Step 8 - 9
session.url = get_shadow_page("<div><a href=# id=linkText>full link text</a></div>")
custom_element = session.find.css("custom-shadow-element", all=False)
shadow_root = custom_element.shadow_root
response = find_elements(session, shadow_root.id, using, value)
assert_success(response)
@pytest.mark.parametrize("document,value", [
("<a href=#>link text</a>", "link text"),
("<a href=#> link text </a>", "link text"),
("<a href=#>link<br>text</a>", "link\ntext"),
("<a href=#>link&text</a>", "link&text"),
("<a href=#>LINK TEXT</a>", "LINK TEXT"),
("<a href=# style='text-transform: uppercase'>link text</a>", "LINK TEXT"),
])
def test_find_elements_link_text(session, get_shadow_page, document, value):
# Step 8 - 9
session.url = get_shadow_page("<div><a href=#>not wanted</a><br/>{0}</div>".format(document))
element = session.find.css("div", all=False)
custom_element = session.find.css("custom-shadow-element", all=False)
shadow_root = custom_element.shadow_root
expected = session.execute_script("return arguments[0].shadowRoot.querySelectorAll('a')[1]",
args=(custom_element,))
|
found_element = value[0]
assert_same_element(session, found_element, expected)
@pytest.mark.parametrize("document,value", [
("<a href=#>partial link text</a>", "link"),
("<a href=#> partial link text </a>", "link"),
("<a href=#>partial link text</a>", "k t"),
("<a href=#>partial link<br>text</a>", "k\nt"),
("<a href=#>partial link&text</a>", "k&t"),
("<a href=#>PARTIAL LINK TEXT</a>", "LINK"),
("<a href=# style='text-transform: uppercase'>partial link text</a>", "LINK"),
])
def test_find_elements_partial_link_text(session, get_shadow_page, document, value):
# Step 8 - 9
session.url = get_shadow_page("<div><a href=#>not wanted</a><br/>{0}</div>".format(document))
element = session.find.css("div", all=False)
custom_element = session.find.css("custom-shadow-element", all=False)
shadow_root = custom_element.shadow_root
expected = session.execute_script("return arguments[0].shadowRoot.querySelectorAll('a')[1]",
args=(custom_element,))
response = find_elements(session, shadow_root.id, "partial link text", value)
value = assert_success(response)
assert isinstance(value, list)
assert len(value) == 1
found_element = value[0]
assert_same_element(session, found_element, expected)
@pytest.mark.parametrize("using,value", [("css selector", "#wontExist")])
def test_no_element(session, get_shadow_page, using, value):
# Step 8 - 9
session.url = get_shadow_page("<div></div>")
custom_element = session.find.css("custom-shadow-element", all=False)
shadow_root = custom_element.shadow_root
response = find_elements(session, shadow_root.id, using, value)
assert response.body["value"] == [] | response = find_elements(session, shadow_root.id, "link text", value)
value = assert_success(response)
assert isinstance(value, list)
assert len(value) == 1 |
consume.go | // Copyright 2020 The searKing Author. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package token
import (
"bytes"
"regexp"
"strings"
"unicode"
)
// A mode value is a set of flags (or 0).
// They control scanner behavior.
//
type Mode uint
const (
ModeCaseSensitive Mode = 1 << iota
ModeRegexpPerl
ModeRegexpPosix
)
func ConsumeIdentifier(inputs []rune, current int, runeType Type) (token Token, next int) {
posBegin := current
if current < 0 {
current = 0
}
if current >= len(inputs) {
return Token{
Typ: TypeEOF,
Value: "",
}, len(inputs)
}
char := inputs[current]
var value bytes.Buffer
// identifier = letter { letter | unicode_digit } .
// letter = unicode_letter | "_" .
// decimal_digit = "0" … "9" .
// octal_digit = "0" … "7" .
// hex_digit = "0" … "9" | "A" … "F" | "a" … "f" .
// newline = /* the Unicode code point U+000A */ .
// unicode_char = /* an arbitrary Unicode code point except newline */ .
// unicode_letter = /* a Unicode code point classified as "Letter" */ .
// unicode_digit = /* a Unicode code point classified as "Number, decimal digit" */ .
if unicode.IsLetter(char) || char == '_' {
for unicode.IsLetter(char) || char == '_' || unicode.IsNumber(char) || char == '.' {
value.WriteRune(char)
current++
if current >= len(inputs) {
break
}
char = inputs[current]
}
return Token{
Typ: runeType,
Value: value.String(),
}, current
}
// restore pos
return Token{Typ: TypeILLEGAL}, posBegin
}
func ComsumeRunesAny(inputs []rune, current int, runeType Type, expectRunes ...rune) (token Token, next int) {
posBegi | sumeStringsAny(inputs []rune, current int, runeType Type, mode Mode, expectStrs ...string) (token Token, next int) {
posBegin := current
if current < 0 {
current = 0
}
if current >= len(inputs) {
return Token{
Typ: TypeEOF,
Value: "",
}, len(inputs)
}
// regex mode
if mode&(ModeRegexpPerl|ModeRegexpPosix) != 0 {
for _, expect := range expectStrs {
var reg *regexp.Regexp
if mode&ModeRegexpPosix != 0 {
reg = regexp.MustCompilePOSIX(expect)
} else {
reg = regexp.MustCompile(expect)
}
matches := reg.FindStringSubmatch(string(inputs[current:]))
if len(matches) == 0 {
continue
}
current = current + len(matches[0])
return Token{
Typ: runeType,
Value: string(matches[0]),
}, current
}
// restore pos
return Token{Typ: TypeILLEGAL}, posBegin
}
// none regexp
for _, expect := range expectStrs {
endPos := current + len(expect)
if endPos > len(inputs) {
continue
}
selected := inputs[current:endPos]
if ((mode&ModeCaseSensitive != 0) && strings.EqualFold(string(selected), expect)) ||
string(selected) == expect {
return Token{
Typ: runeType,
Value: string(selected),
}, endPos
}
}
// restore pos
return Token{Typ: TypeILLEGAL}, posBegin
}
| n := current
if current < 0 {
current = 0
}
if current >= len(inputs) {
return Token{
Typ: TypeEOF,
Value: "",
}, len(inputs)
}
char := inputs[current]
current++
for _, expect := range expectRunes {
if char == expect {
return Token{
Typ: runeType,
Value: "",
}, current
}
}
// restore pos
return Token{Typ: TypeILLEGAL}, posBegin
}
func Com |
test_index_functions.py | from unittest import mock
from django.test import TestCase
from elasticsearch_django.index import (
_prune_hit,
bulk_actions,
create_index,
delete_index, | prune_index,
scan_index,
update_index,
)
from .models import ExampleModel, ExampleModelManager
class IndexFunctionTests(TestCase):
"""Test index functions."""
@mock.patch("elasticsearch_django.index.get_client")
@mock.patch("elasticsearch_django.index.get_index_mapping")
def test_create_index(self, mock_mapping, mock_client):
"""Test the create_index function."""
mock_client.return_value = mock.Mock()
create_index("foo")
mock_client.assert_called_once_with()
mock_mapping.assert_called_once_with("foo")
mock_client.return_value.indices.create.assert_called_once_with(
index="foo", body=mock_mapping.return_value
)
from django.db.models.query import QuerySet
@mock.patch.object(QuerySet, "iterator")
@mock.patch("elasticsearch_django.index.get_client")
@mock.patch("elasticsearch_django.index.bulk_actions")
@mock.patch("elasticsearch_django.index.get_index_models")
@mock.patch("elasticsearch.helpers.bulk")
def test_update_index(
self, mock_bulk, mock_models, mock_actions, mock_client, mock_qs
):
"""Test the update_index function."""
mock_foo = mock.Mock()
mock_foo.search_doc_type = mock.PropertyMock(return_value="bar")
mock_foo.objects = mock.PropertyMock(return_value=mock.Mock())
mock_models.return_value = [mock_foo]
responses = update_index("foo")
self.assertEqual(responses, [mock_bulk.return_value])
@mock.patch("elasticsearch_django.index.get_client")
def test_delete_index(self, mock_client):
"""Test the delete_index function."""
delete_index("foo")
mock_client.assert_called_once()
mock_client.return_value.indices.delete.assert_called_once_with(index="foo")
@mock.patch("elasticsearch_django.index.helpers")
@mock.patch("elasticsearch_django.index.scan_index")
@mock.patch("elasticsearch_django.index._prune_hit")
@mock.patch("elasticsearch_django.index.bulk_actions")
@mock.patch("elasticsearch_django.index.get_index_models")
@mock.patch("elasticsearch_django.index.get_client")
def test_prune_index(
self,
mock_client,
mock_models,
mock_actions,
mock_prune,
mock_scan,
mock_helpers,
):
"""Test the prune_index function."""
# this forces one single evaluation of the outer and inner for loop
mock_models.return_value = [ExampleModel]
mock_scan.return_value = ["hit"]
# _prune_hit returns an object, so bulk should be called
mock_prune.return_value = ExampleModel()
# should return a list with one item in it
self.assertEqual(prune_index("foo"), [mock_helpers.bulk.return_value])
# should have called actions and bulk once each
mock_actions.assert_called_once()
mock_helpers.bulk.assert_called_once()
mock_actions.reset_mock()
mock_helpers.bulk.reset_mock()
# if there are no objects to prune
mock_prune.return_value = None
# should return an empty list
self.assertEqual(prune_index("foo"), [])
# shouldn't call either actions or bulk (as there's no need)
mock_actions.assert_not_called()
mock_helpers.bulk.assert_not_called()
@mock.patch.object(ExampleModelManager, "in_search_queryset")
def test__prune_hit(self, mock_qs):
"""Test the _prune_hit function."""
hit = {"_id": 1, "_index": "foo"}
mock_qs.return_value = True
self.assertIsNone(_prune_hit(hit, ExampleModel))
mock_qs.return_value = False
# should now return an instance of ExampleModel
obj = _prune_hit(hit, ExampleModel)
self.assertIsInstance(obj, ExampleModel)
self.assertEqual(obj.id, hit["_id"])
@mock.patch("elasticsearch_django.index.get_client")
@mock.patch("elasticsearch_django.index.helpers")
def test_scan_index(self, mock_helpers, mock_client):
"""Test the scan_index function."""
query = {"query": {"type": {"value": "examplemodel"}}}
# mock_helpers.scan.return_value = ['foo', 'bar']
# cast to list to force evaluation of the generator
response = list(scan_index("foo", ExampleModel))
mock_helpers.scan.assert_called_once_with(
mock_client.return_value, query=query, index="foo"
)
self.assertEqual(response, list(mock_helpers.scan.return_value))
@mock.patch.object(ExampleModel, "as_search_action")
def test_bulk_actions(self, mock_action):
"""Test the bulk_actions function."""
# cannot pass in in '_all' as the bulk_actions
with self.assertRaises(ValueError):
list(bulk_actions([], "_all", "index"))
mock_action.return_value = "foo"
objects = [ExampleModel(), ExampleModel()]
self.assertEqual(list(bulk_actions(objects, "foo", "update")), ["foo", "foo"])
# now let's add in a bad object, and check we still get the good one
self.assertEqual(
list(bulk_actions([ExampleModel(), "bad"], "foo", "update")), ["foo"]
) | |
fromcallback.py | from typing import Any, Callable, Optional
from reactivex import Observable, abc, typing
from reactivex.disposable import Disposable
def from_callback_(
func: Callable[..., Callable[..., None]],
mapper: Optional[typing.Mapper[Any, Any]] = None,
) -> Callable[[], Observable[Any]]:
"""Converts a callback function to an observable sequence.
Args:
func: Function with a callback as the last argument to
convert to an Observable sequence.
mapper: [Optional] A mapper which takes the arguments
from the callback to produce a single item to yield on next.
Returns:
A function, when executed with the required arguments minus
the callback, produces an Observable sequence with a single value of
the arguments to the callback as a list.
"""
def function(*args: Any) -> Observable[Any]:
arguments = list(args)
def subscribe(
observer: abc.ObserverBase[Any],
scheduler: Optional[abc.SchedulerBase] = None,
) -> abc.DisposableBase:
def handler(*args: Any) -> None:
|
arguments.append(handler)
func(*arguments)
return Disposable()
return Observable(subscribe)
return function
__all__ = ["from_callback_"]
| results = list(args)
if mapper:
try:
results = mapper(args)
except Exception as err: # pylint: disable=broad-except
observer.on_error(err)
return
observer.on_next(results)
else:
if len(results) <= 1:
observer.on_next(*results)
else:
observer.on_next(results)
observer.on_completed() |
pvst.py | import re
import utilities.utils as utils
from spytest import st
from spytest.utils import filter_and_select
from spytest.utils import exec_foreach, exec_all
import utilities.common as utility
import apis.switching.portchannel as portchannel
import apis.system.basic as basic
from utilities.parallel import ensure_no_exception
from datetime import datetime,timedelta
debug_log_path = r"/var/log/stplog"
SHOW_STP_VLAN = "show spanning_tree vlan {}"
BLOCKING_STATE = "BLOCKING"
def config_spanning_tree(dut, feature="pvst", mode="enable", vlan=None, cli_type='click'):
"""
:param dut:
:param feature:
:param mode:
:param vlan:
:param cli_type:
:return:
"""
command = ''
no_form = 'no'
if mode == 'enable':
no_form = ''
st.log("{} spanning_tree {}".format(mode, feature))
if cli_type == 'click':
if vlan:
command = "config spanning_tree vlan {} {}".format(mode, vlan)
else:
command = "config spanning_tree {} {}".format(mode, feature)
elif cli_type == 'klish':
if mode == 'disable':
feature = ''
if vlan:
command = "{} spanning-tree vlan {}".format(no_form, vlan)
else:
command = "{} spanning-tree mode {}".format(no_form, feature)
st.config(dut, command, type=cli_type)
def config_stp_parameters(dut, cli_type='click', no_form='', **kwargs):
"""
:param dut:
:param cli_type:
:param no_form:
:param kwargs:
:return:
"""
no_form = 'no' if no_form else ''
for each_key in kwargs.keys():
if cli_type == 'click':
command = "config spanning_tree {} {}".format(each_key, kwargs[each_key])
elif cli_type == 'klish':
command = "{} spanning-tree {} {}".format(no_form, each_key, kwargs[each_key])
else:
st.error("Invalid CLI type - {}".format(cli_type))
return
st.config(dut, command, type=cli_type)
def config_stp_vlan_parameters(dut, vlan, **kwargs):
"""
:param dut:
:param vlan:
:param kwargs:
:return:
"""
cli_type = kwargs.setdefault('cli_type', 'click')
no_form = 'no' if kwargs.setdefault('no_form', False) else ''
del kwargs['cli_type']
del kwargs['no_form']
click_2_klish = {'forward_delay': 'forward-time', 'hello': 'hello-time', 'max_age': 'max-age'}
for each_key, value in kwargs.items():
if cli_type == 'click':
command = "config spanning_tree vlan {} {} {}".format(each_key, vlan, value)
elif cli_type == 'klish':
each_key1 = click_2_klish.get(each_key, each_key)
command = "{} spanning-tree vlan {} {} {}".format(no_form, vlan, each_key1, value)
else:
st.error("Invalid CLI type - {}".format(cli_type))
return
st.config(dut, command, type=cli_type)
def config_stp_vlan_parameters_parallel(dut_list, thread=True, **kwargs):
"""
Author : chaitanya lohith bollapragada
This will configure the "config_stp_vlan_parameters" in parallel to all DUTs mentioned.
:param dut_list:
:param vlan: list of vlans
:param priority: list of STP priorities
:param thread: True | False
:return:
"""
st.log("Configuring STP vlan parameters in paraller on all DUT's ... ")
dut_li = list(dut_list) if isinstance(dut_list, list) else [dut_list]
vlan_li = list(kwargs['vlan']) if isinstance(kwargs['vlan'], list) else [kwargs['vlan']]
priority_li = list(kwargs['priority']) if isinstance(kwargs['priority'], list) else [kwargs['priority']]
if not len(dut_li) == len(vlan_li) == len(priority_li):
return False
params = list()
for i,each in enumerate(dut_list):
params.append(utility.ExecAllFunc(config_stp_vlan_parameters, each, vlan_li[i], priority=priority_li[i]))
[out, exceptions] = exec_all(thread, params)
st.log(exceptions)
return False if False in out else True
def config_stp_vlan_interface(dut, vlan, iface, value, mode='cost', **kwargs):
"""
:param dut:
:param vlan:
:param iface:
:param value:
:param mode:
:return:
"""
cli_type = kwargs.get('cli_type', 'click')
no_form = 'no' if kwargs.get('no_form') else ''
if mode in ['cost', 'priority']:
if cli_type == 'click':
command = "config spanning_tree vlan interface {} {} {} {} ".format(mode, vlan, iface, value)
elif cli_type == 'klish':
if mode == 'priority':
mode = 'port-priority'
interface_data = utils.get_interface_number_from_name(iface)
command = ['interface {} {}'.format(interface_data["type"], interface_data["number"]),
'{} spanning-tree vlan {} {} {}'.format(no_form, vlan, mode, value), "exit"]
else:
st.error("Invalid CLI type - {}".format(cli_type))
return
else:
st.log("Invalid mode = {}".format(mode))
return
st.config(dut, command, type=cli_type)
def config_stp_enable_interface(dut, iface, mode="enable"):
"""
:param dut:
:param iface:
:param mode:
:return:
"""
command = "config spanning_tree interface {} {}".format(mode, iface)
st.config(dut, command)
def config_stp_interface_params(dut, iface, **kwargs):
"""
:param dut:
:param iface:
:param cli_type:
:param kwargs:
:return:
"""
cli_type = kwargs.setdefault('cli_type', 'click')
del kwargs['cli_type']
click_2_klish = {"root_guard": " guard root", "bpdu_guard": "bpduguard ", "portfast": "portfast",
"uplink_fast": "uplinkfast"}
if cli_type == 'click':
for each_key in kwargs.keys():
if each_key == "priority" or each_key == "cost":
command = "config spanning_tree interface {} {} {}".format(each_key, iface, kwargs[each_key])
elif each_key == "bpdu_guard_action":
command = "config spanning_tree interface bpdu_guard enable {} {}".format(iface, kwargs[each_key])
else:
command = "config spanning_tree interface {} {} {}".format(each_key, kwargs[each_key], iface)
st.config(dut, command)
elif cli_type == 'klish':
interface_data = utils.get_interface_number_from_name(iface)
command = ['interface {} {}'.format(interface_data["type"], interface_data["number"])]
for each_key in kwargs.keys():
no_form = 'no' if kwargs[each_key] == 'disable' else ''
if each_key == "priority" or each_key == "cost":
command.append('spanning-tree {} {}'.format(each_key, kwargs[each_key]))
elif each_key == "bpdu_guard_action":
command.append('{} spanning-tree bpduguard port-shutdown'.format(no_form))
else:
command.append("{} spanning-tree {}".format(no_form, click_2_klish[each_key]))
command.append('exit')
st.config(dut, command, type=cli_type)
def | (dut, iface, mode="enable"):
"""
:param dut:
:param iface:
:param mode:
:return:
"""
command = "config spanning_tree interface {} {} ".format(mode, iface)
st.config(dut, command)
def show_stp(dut, **kwargs):
"""
:param dut:
:return:
"""
cli_type = kwargs.get("cli_type", 'click')
command = "show spanning_tree"
if 'sub_cmd' in kwargs:
command = "show spanning_tree {}".format(kwargs['sub_cmd'])
return st.show(dut, command, type=cli_type)
def show_stp_vlan(dut, vlan, cli_type="click"):
"""
:param dut:
:param vlan:
:param cli_type:
:return:
"""
st.log("show spanning_tree vlan <id>")
command = SHOW_STP_VLAN.format(vlan)
return st.show(dut, command, type=cli_type)
def show_stp_vlan_iface(dut, vlan, iface, cli_type="click"):
"""
:param dut:
:param vlan:
:param iface:
:return:
"""
if cli_type == "click":
command = "show spanning_tree vlan interface {} {}".format(vlan, iface)
elif cli_type == "klish":
command = "show spanning_tree vlan {} interface {}".format(vlan, iface)
else:
st.log("Unsupported CLI type {}".format(cli_type))
return list()
return st.show(dut, command, type="cli_type")
def show_stp_stats(dut):
"""
:param dut:
:return:
"""
command = "show spanning_tree statistics"
return st.show(dut, command)
def show_stp_stats_vlan(dut, vlan):
"""
:param dut:
:param vlan:
:return:
"""
command = "show spanning_tree statistics vlan {} ".format(vlan)
return st.show(dut, command)
def debug_stp(dut, *argv):
"""
:param dut:
:param argv:
:return:
Usage:
debug_stp(dut)
debug_stp(dut, "reset")
debug_stp(dut, "vlan 100", "interface Ethernet0")
debug_stp(dut, "vlan 100 -d", "interface Ethernet0 -d")
"""
command = 'debug spanning_tree'
if not argv:
st.config(dut, command)
for each in argv:
command2 = "{} {}".format(command, each)
st.config(dut, command2)
return True
def get_debug_stp_log(dut, filter_list=[]):
""""
:param dut:
:param filter_list:
:return:
"""
if isinstance(filter_list, list):
filter_list = list(filter_list)
else:
filter_list = [filter_list]
command = "cat {}".format(debug_log_path)
for each_filter in filter_list:
command += " | grep '{}'".format(each_filter)
output = st.show(dut, command, skip_tmpl=True, skip_error_check=True)
reg_output = utils.remove_last_line_from_string(output)
out_list = reg_output.split('\n')
return out_list
def clear_debug_stp_log(dut):
"""
:param dut:
:return:
"""
command = "dd if=/dev/null of={}".format(debug_log_path)
st.config(dut, command)
return True
def verify_stp_vlan_iface(dut, **kwargs):
"""
:param dut:
:param kwargs:
:return:
"""
output = show_stp_vlan_iface(dut, kwargs["vlan"], kwargs["iface"])
for each in kwargs.keys():
match = {each: kwargs[each]}
entries = filter_and_select(output, None, match)
if not entries:
st.log("{} and {} is not match ".format(each, kwargs[each]))
return False
return True
def verify_stp_statistics_vlan(dut, **kwargs):
"""
:param dut:
:param kwargs:
:return:
"""
output = show_stp_stats_vlan(dut, kwargs["vlan"])
for each in kwargs.keys():
match = {each: kwargs[each]}
entries = filter_and_select(output, None, match)
if not entries:
st.log("{} and {} is not match ".format(each, kwargs[each]))
return False
return True
def check_dut_is_root_bridge_for_vlan(dut, vlanid):
"""
:param dut:
:param vlanid:
:return:
"""
cmd = SHOW_STP_VLAN.format(vlanid)
stp_output = st.show(dut, cmd)
root_bridge=stp_output[0]["rt_id"]
dut_bridge_id=stp_output[0]["br_id"]
return (root_bridge == dut_bridge_id) and stp_output[0]["rt_port"] == "Root"
def get_stp_bridge_param(dut, vlanid, bridge_param):
"""
This is used to provide value of the bridge_param for given dut and vlanid
:param dut:
:param vlanid:
:param bridge_param: should be one of the below strings
stp_mode Returns STP mode
vid Returns vlanid
inst Returns STP intance id
br_id Returns Bridge id
br_maxage Returns Bridge max age
br_hello Returns Bridge Hello timer value
br_fwddly Returns Bridge Forward Delay
br_hold Returns Bridge Hold Timer value
rt_id Returns Root Bridge id
rt_pathcost Returns RootPath Cost
rt_desigbridgeid Returns DesignatedBridge id
rt_port Returns Root
rt_maxage Returns Root max age
rt_hello Returns Root Bridge Hello Timer value
rt_fwddly Returns Root Bridge Forward Delay
:return: Returns value of the bridge_param for given dut and vlanid
"""
stp_bridge_param_list = ['stp_mode',
'vid',
'inst',
'br_id',
'br_maxage',
'br_hello',
'br_fwddly',
'br_hold',
'br_lasttopo',
'br_topoch',
'rt_id',
'rt_pathcost',
'rt_desigbridgeid',
'rt_port',
'rt_maxage',
'rt_hello',
'rt_fwddly']
if bridge_param not in stp_bridge_param_list:
st.error("Please provide the valid stp bridge parameter")
return
cmd = SHOW_STP_VLAN.format(vlanid)
stp_output = st.show(dut, cmd)
return stp_output[0][bridge_param]
def get_stp_port_param(dut, vlanid, ifname, ifparam):
"""
This is used to provide value of the bridge_param for given dut and vlanid
:param dut:
:param vlanid:
:param bridge_param: should be one of the below strings
port_name Returns Port Name
port_priority Returns Port Priority
port_pathcost Returns Port pathcost
port_portfast Returns Portfast Enabled(Y) or Not(N)
port_uplinkfast Returns Uplinkfast is Enabled(Y) or Not(N)
port_state Returns Port state
port_desigcost Returns Port Designated cost
port_desigrootid Returns Port Designated Root id
port_desigbridgeid Returns Port Designated Bridge id
:return:
"""
stp_port_param_list = ['port_name',
'port_priority',
'port_pathcost',
'port_portfast',
'port_uplinkfast',
'port_state',
'port_desigcost',
'port_desigrootid',
'port_desigbridgeid']
if ifparam not in stp_port_param_list:
st.error("Please provide the valid stp port parameter")
return
cmd = SHOW_STP_VLAN.format(vlanid)+" interface {}".format(ifname)
stp_output = st.show(dut, cmd)
return None if len(stp_output) == 0 else stp_output[0][ifparam]
def get_default_root_bridge(dut_list):
"""
This is used to get the root bridge with default config
:param vars : Testbed Vars
:return: Returns root bridge like D1 or D2
"""
duts_mac_list = basic.get_dut_mac_address_thread(dut_list)
if duts_mac_list:
min_mac_addr = min(duts_mac_list.values())
root_bridge = [dut for dut, mac_addr in duts_mac_list.items() if mac_addr == min_mac_addr][0]
return [dut for dut in dut_list if dut==root_bridge][0]
else:
return None
def get_duts_mac_address(duts):
"""
This is used to get the Duts and its mac addresses mapping
:param duts: List of DUTs
:return : Duts and its mac addresses mapping
"""
duts_mac_addresses = {}
cmd = "show platform syseeprom"
for dut in duts:
if st.is_vsonic(dut):
mac = basic.get_ifconfig_ether(dut)
duts_mac_addresses[dut] = mac
continue
eeprom_details = st.show(dut, cmd, skip_error_check=True)
if not eeprom_details:
iteration=3
for i in range(1, iteration+1):
st.wait(2)
eeprom_details = st.show(dut, cmd, skip_error_check=True)
if eeprom_details:
break
if not eeprom_details and i >= iteration + 1:
st.log("EEPROM data not found for {}".format(dut))
st.report_fail("eeprom_data_not_found", dut)
st.log("EEPROM DETAILS -- {}".format(eeprom_details))
if eeprom_details:
for data in eeprom_details:
if "tlv_name" in data and data["tlv_name"] == "Base MAC Address":
duts_mac_addresses[dut] = data["value"].replace(":","")
st.log("DUT MAC ADDRESS -- {}".format(duts_mac_addresses))
return duts_mac_addresses
def _get_duts_list_in_order(vars):
"""
This is used to get the DUTs and their mac addresses in ascending order of Mac addresses
:param duts: List of DUTs
:return : Duts and its mac addresses mapping
"""
duts_mac_addresses = get_duts_mac_address(vars["dut_list"])
return sorted(zip(duts_mac_addresses.values(), duts_mac_addresses.keys()))
def get_ports_based_on_state(vars, vlanid, port_state, dut=None, cli_type='click'):
"""
This is used to get the blocked ports on none-root bridge
:param duts: List of DUTs
:return : Duts and its mac addresses mapping
"""
selected_non_root = ""
if dut is None:
duts_list = _get_duts_list_in_order(vars)
dut_with_max_mac_address = duts_list[len(duts_list) - 1][1]
selected_non_root = [dut_key for dut_key, dut_value in vars.items() if dut_value == dut_with_max_mac_address][0]
else:
selected_non_root = [dut_key for dut_key, dut_value in vars.items() if dut_value == dut][0]
stp_output = show_stp_vlan(vars[selected_non_root], vlanid, cli_type=cli_type)
ports_list = [row["port_name"] for row in stp_output if
row["port_state"] == port_state and int(row["vid"]) == vlanid]
return ports_list
def poll_for_root_switch(dut, vlanid, iteration=20, delay=1):
"""
API to poll for root switch
:param dut:
:param vlanid:
:param iteration:
:param delay:
:return:
"""
i = 1
while True:
if check_dut_is_root_bridge_for_vlan(dut, vlanid):
st.log("Observed dut is root bridge {} iteration".format(i))
return True
if i > iteration:
st.log("Max iterations {} reached".format(i))
return False
i += 1
st.wait(delay)
def poll_for_stp_status(dut, vlanid, interface, status, iteration=20, delay=1):
"""
API to poll for stp stauts for an interface
:param dut:
:param vlanid:
:param iteration:
:param delay:
:return:
"""
i = 1
while True:
if get_stp_port_param(dut, vlanid, interface, "port_state") == status:
st.log("Port status is changed to {} after {} sec".format(status, i))
return True
if i > iteration:
st.log("Max iterations {} reached".format(i))
return False
i += 1
st.wait(delay)
def get_root_guard_details(dut, vlan=None, ifname=None , rg_param="rg_timeout"):
"""
API will return Root Guard timeout if vlan and interface won't provide , otherwise Root Guard state will return
:param dut:
:param vlan:
:param ifname:
:return:
"""
cmd = "show spanning_tree root_guard"
output = st.show(dut, cmd)
if vlan is None and ifname is None:
rg_value = int(output[0][rg_param])
else:
rg_value = [row[rg_param] for row in output if row["rg_ifname"] == ifname and int(row["rg_vid"]) == vlan][0]
return rg_value
def check_rg_current_state(dut, vlan, ifname):
"""
API will check the Root Guard status for given interface and vlan
:param dut:
:param vlan:
:param ifname:
:return:
"""
rg_status = get_root_guard_details(dut, vlan, ifname, "rg_status")
#show_stp_config_using_klish(dut, "root_guard", vlan)
return rg_status == "Consistent state"
def check_bpdu_guard_action(dut, ifname, **kwargs):
"""
API will check the BPDU Guard action config and it's operational status
:param dut:
:param ifname:
:param kwargs:
config_shut : BPDU shutdown configuration
opr_shut : status of the port shut due to BPDU Guard
:return:
"""
cmd = "show spanning_tree bpdu_guard"
show_out = st.show(dut, cmd)
#show_stp_config_using_klish(dut, "bpdu_guard")
if_out = [row for row in show_out if row['bg_ifname'] == ifname][0]
config_shut, opr_shut = if_out['bg_cfg_shut'], if_out['bg_oper_shut']
return kwargs['config_shut'] == config_shut and kwargs['opr_shut'] == opr_shut
def stp_clear_stats(dut, **kwargs):
"""
:param dut:
:param kwargs:
vlan :vlan id
interface : interface name
:return:
"""
cmd = "sonic-clear spanning_tree statistics"
if 'vlan' in kwargs and 'interface' not in kwargs:
cmd += ' vlan {}'.format(kwargs['vlan'])
if 'vlan' in kwargs and 'interface' in kwargs:
cmd += ' vlan-interface {} {}'.format(kwargs['vlan'], kwargs['interface'])
output = st.config(dut, cmd)
def get_stp_stats(dut, vlan, interface, param):
"""
:param dut:
:param vlan:
:param interface:
:param param:
tx_bpdu : BPDU Transmission count
rx_bpdu : BPDU Receive count
tx_tcn : TCN Transmission count
rx_tcn : TCN Receive count
:return:
"""
output = show_stp_stats_vlan(dut, vlan)
#show_stp_config_using_klish(dut, 'statistics', vlan)
value_list = [row[param] for row in output if int(row['st_vid']) == vlan and row['st_portno'] == interface]
utils.banner_log(value_list)
return None if len(output) == 0 else int(value_list[0])
def verify_stp_ports_by_state(dut, vlan, port_state, port_list, cli_type='click'):
"""
API Will check the port state in the VLAN.
Author: Prudvi Mangadu ([email protected])
:param dut:
:param vlan:
:param state:
:param port_list:
:param cli_type:
:return:
"""
port_li = list(port_list) if isinstance(port_list, list) else [port_list]
stp_output = show_stp_vlan(dut, vlan, cli_type=cli_type)
ports_list = [row["port_name"] for row in stp_output if
row["port_state"] == port_state and int(row["vid"]) == vlan]
result = True
for each_port in port_li:
if each_port not in ports_list:
st.log("{} is not {} state ".format(each_port, port_state))
result = False
else:
st.log("{} is {} state ".format(each_port, port_state))
return result
def get_stp_port_list(dut, vlan, exclude_port=[], cli_type='click'):
"""
API will return all ports of VLAN instance.
Author: Prudvi Mangadu ([email protected])
:param dut:
:param vlan:
:param exclude_port:
:param cli_type:
:return:
"""
ex_port_li = list(exclude_port) if isinstance(exclude_port, list) else [exclude_port]
stp_output = show_stp_vlan(dut, vlan, cli_type=cli_type)
ports_list = [row["port_name"] for row in stp_output]
for each_int in ex_port_li:
if each_int in ports_list:
ports_list.remove(each_int)
st.log("{} is excluded".format(each_int))
return ports_list
def get_stp_root_port(dut, vlan, cli_type='click'):
"""
API will return Root/Forwarding port of the device in the VLAN.
Author: Prudvi Mangadu ([email protected])
:param dut:
:param vlan:
:param cli_type:
:return:
"""
out = show_stp_vlan(dut, vlan, cli_type=cli_type)
if not out:
st.error("No Root/Forwarding port found")
return False
if out[0]['rt_port'] == "Root":
st.error("Given device is ROOT Bridge.")
return False
return out[0]['rt_port']
def get_stp_next_root_port(dut, vlan, cli_type='click'):
"""
API will return Next possible Root/Forwarding port of the device in the VLAN.
Author: Prudvi Mangadu ([email protected])
:param dut:
:param vlan:
:param cli_type:
:return:
"""
partner = None
next_root_port = None
sort_list = lambda list1, list2: [x for _, x in sorted(zip(list2, list1))]
out = show_stp_vlan(dut, vlan, cli_type=cli_type)
if not out:
st.error("No Initial Root/Forwarding port found")
return next_root_port
if out[0]['rt_port'] == "Root":
st.error("Given device is ROOT Bridge.")
return next_root_port
partner_ports = st.get_dut_links(dut)
root_port = out[0]['rt_port']
root_cost = int(filter_and_select(out, ['port_pathcost'], {'port_name': root_port})[0]['port_pathcost'])
st.log('root_port : {}, root_cost: {}'.format(root_port, root_cost))
# Finding the Root port connected partner
for each in partner_ports:
if not partner:
if root_port == each[0]:
partner = each[1]
st.log("partner : {}".format(partner))
if not partner:
st.error("No Partner found for Root/Forwarding Port.")
return next_root_port
# Dut Partner port mapping
dut_partner_ports = st.get_dut_links(dut, partner)
dut_partner_ports_map = {all[0]: all[2] for all in dut_partner_ports}
dut_partner_ports_map_rev = {all[2]: all[0] for all in dut_partner_ports}
st.log('dut_partner_ports_map : {}'.format(str(dut_partner_ports_map)))
st.log('dut_partner_ports_map_rev : {}'.format(str(dut_partner_ports_map_rev)))
# Preparing DATA to process and find the next Root/Forwarding port.
cut_data = {}
pc_list = [each['teamdev'] for each in portchannel.get_portchannel_list(partner)]
for each in out:
port = each['port_name']
if "Ethernet" in port and port in dut_partner_ports_map:
port = dut_partner_ports_map[each['port_name']]
ifindex = int(re.findall(r'\d+', port)[0])
cut_data[port] = [ifindex, each['port_state'], int(each['port_pathcost'])]
elif port in pc_list:
ifindex = int(re.findall(r'\d+', port)[0])
cut_data[port] = [ifindex, each['port_state'], int(each['port_pathcost'])]
else:
pass
st.log('cut_data == {}'.format(str(cut_data)))
cost_vs_port = {}
for each in cut_data:
if each != dut_partner_ports_map[root_port]:
if 'Ethernet' in each:
if cut_data[each][2] not in cost_vs_port:
cost_vs_port[cut_data[each][2]] = [[each], []]
else:
cost_vs_port[cut_data[each][2]][0].append(each)
else:
if cut_data[each][2] not in cost_vs_port:
cost_vs_port[cut_data[each][2]] = [[], [each]]
else:
cost_vs_port[cut_data[each][2]][1].append(each)
sorted_cost = sorted(cost_vs_port.keys())
st.log("cost_vs_port : {}".format(cost_vs_port))
st.log("sorted_cost : {}".format(sorted_cost))
# Logic to find next Root/Forwarding port
if root_cost in cost_vs_port and (len(cost_vs_port[root_cost][0]) or len(cost_vs_port[root_cost][1])):
st.debug("When 2 or more ports has configured with same root port cost.")
if len(cost_vs_port[root_cost][0]):
port_list = cost_vs_port[root_cost][0]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return dut_partner_ports_map_rev[next_root_port]
else:
port_list = cost_vs_port[root_cost][1]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return next_root_port
elif len(sorted_cost):
st.debug("When NO 2 or more ports has root port cost configured. So checking next larger cost ports")
next_root_cost = sorted_cost[0]
if len(cost_vs_port[next_root_cost][0]):
port_list = cost_vs_port[next_root_cost][0]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return dut_partner_ports_map_rev[next_root_port]
else:
port_list = cost_vs_port[next_root_cost][1]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return next_root_port
else:
st.error("No Match")
return next_root_port
def config_stp_in_parallel(dut_list, feature="pvst", mode="enable", vlan=None, thread=True):
"""
API to configure stp in parallel on all the provided DUT's
Author: Chaitanya Vella ([email protected])
:param dut_list:
:param feature:
:param mode:
:param vlan:
:param thread:
:return:
"""
st.log("Configuring {} on all the DUT's with mode as {}".format(feature.capitalize(), mode))
dut_li = list([str(e) for e in dut_list]) if isinstance(dut_list, list) else [dut_list]
params = list()
for dut in dut_li:
params.append([config_spanning_tree, dut, feature, mode, vlan])
if params:
exec_all(thread, params)
def show_stp_in_parallel(dut_list, thread=True, cli_type='click'):
"""
API to show the stp configuration in parallel in all the provided DUT's
Author: Chaitanya Vella ([email protected])
:param dut_list:
:param thread:
:param cli_type:
:return:
"""
st.log("Displaying STP result on all the DUT's in parallel ....")
dut_li = utility.make_list(dut_list)
exec_foreach(thread, dut_li, show_stp, cli_type=cli_type)
def get_root_bridge_for_vlan(dut_vlan_data, thread=True):
params = list()
result = dict()
for dut, vlan in dut_vlan_data.items():
params.append([check_dut_is_root_bridge_for_vlan, dut, vlan])
if params:
[out, exceptions] = exec_all(thread, params)
utils.banner_log("Getting root bridge details")
for i,response in enumerate(out):
result[params[i][1]] = response
print(result)
return result
def check_for_single_root_bridge_per_vlan(dut_list, vlan_list, dut_vlan_data, cli_type='click'):
"""
API to check for single root bridge per VLAN
Author: Chaitanya Vella ([email protected])
:param dut:
:param vlanid:
:param cli_type:
:return:
"""
st.log("Verifying the single root bridge per vlan ...")
dut_li = list([str(e) for e in dut_list]) if isinstance(dut_list, list) else [dut_list]
vlan_li = list([str(e) for e in vlan_list]) if isinstance(vlan_list, list) else [vlan_list]
if len(vlan_list) != len(dut_list):
st.log("Invalid data provided to check the root bridge per vlan ...")
st.report_fail("invalid_data_for_root_bridge_per_vlan")
for vlan in vlan_li:
root_count = 0
params = list()
for dut in dut_li:
params.append([show_stp_vlan, dut, vlan, cli_type])
stp_output, exceptions = exec_all(True, params)
st.log(stp_output)
st.log(exceptions)
for value in exceptions:
st.log("Exceptions observed {}".format(value))
if value is not None:
st.log("Exception occured {}".format(value))
return False
if not stp_output:
st.log("STP output not found on {} for {} instance".format(dut_li, vlan))
st.report_fail("stp_output_not_found", dut_li, vlan)
for index, stp_out in enumerate(stp_output):
if len(stp_out) <= 0:
st.log("STP OUTPUT IS NOT OBSERVED --- {}".format(stp_out))
st.report_fail("stp_output_not_found")
root_bridge = stp_out[0]["rt_id"]
dut_bridge_id = stp_out[0]["br_id"]
if root_bridge == dut_bridge_id and stp_out[0]["rt_port"] == "Root":
if dut_vlan_data[dut_li[index]] != int(vlan.strip()):
st.log("Expected DUT {} is not root for {} instance".format(dut_li[index], vlan))
st.report_fail("expected_dut_not_root", dut_li[index], vlan)
root_count += 1
if root_count > 1:
st.log("Observed more than 1 root bridge per {} instance".format(vlan))
st.report_fail("observed_more_than_1_root_bridge", vlan)
return True
def verify_root_bridge_interface_state(dut, vlan, interface_list, cli_type='click'):
"""
API to verify the root bridge interface state to be forwarded
Author: Chaitanya Vella ([email protected])
:param dut:
:param vlan:
:param interface_list:
:param cli_type:
:return:
"""
fail_states = ["BLOCKING", "DISABLED", "DISCARDING"]
pass_states = ["FORWARDING"]
forwarding_counter = 0
result = show_stp_vlan(dut, vlan, cli_type=cli_type)
if result:
for data in result:
if data["port_name"] not in interface_list:
st.log("Interface {} not found in expected list ...".format(data["port_name"]))
if data["port_state"] in fail_states:
st.log("Observed that interface {} state is {} for root bridge".format(data["port_name"],fail_states))
if data["port_state"] in pass_states:
forwarding_counter+=1
if forwarding_counter != len(interface_list):
return False
else:
return True
else:
st.log("No STP data found for {} and {} instance".format(dut, vlan))
return False
def poll_root_bridge_interfaces(dut_vlan_list, interfaces_list, iteration=30, delay=1):
"""
API to get the root bridge interfaces to be forwarded
Author: Chaitanya Vella ([email protected])
:param dut_vlan_list:
:param interfaces_list:
:param iteration:
:param delay:
:return:
"""
st.log("Polling for root bridge interfaces ...")
if dut_vlan_list and interfaces_list:
no_of_duts = len(dut_vlan_list)
check=0
for dut, vlan in dut_vlan_list.items():
i=1
while True:
if verify_root_bridge_interface_state(dut, vlan, interfaces_list[dut]):
st.log("Root bridge interface verification succeeded.")
check+=1
break
if i > iteration:
st.log("Max iteration limit reached.")
break
i+=1
st.wait(delay)
if check != no_of_duts:
st.log("Number of root DUTs check failed ...")
return False
return True
else:
st.log("Empty DUT VLAN LIST dut_vlan_list AND INTERFACE LIST interfaces_list")
return False
def verify_root_bridge_on_stp_instances(dut_list, vlan, bridge_identifier):
"""
API to verify the bridge identifier with root bridge identifier
:param dut_list:
:param vlan:
:param bridge_identifier:
:return:
"""
dut_li = list(dut_list) if isinstance(dut_list, list) else [dut_list]
params = list()
for dut in dut_li:
params.append([get_stp_bridge_param, dut, vlan, "rt_id"])
if params:
[out, exceptions] = exec_all(True, params)
st.log("#########OUTPUT###########")
st.log(out)
st.log(exceptions)
for value in exceptions:
st.log("Exceptions observed {}".format(value))
if value is not None:
return False
for identifier in out:
st.log("Comparing ROOT bridge ID {} with Provided ID {}".format(identifier, bridge_identifier))
if identifier != bridge_identifier:
st.log("Mismatch in root and bridge identifiers")
return False
else:
st.log("Root Bridge Identifier {} is matched with provided identifier {}".format(identifier, bridge_identifier))
return True
return False
def config_bpdu_filter(dut, **kwargs):
"""
API to config BPDU filter for global and interface level
Usage:
======
Interface level config:
=========================
config_bpdu_filter(dut, interface="Ethernet8", action="enable", cli_type="klish")
config_bpdu_filter(dut, interface="Ethernet8", no_form=True, cli_type="klish")
Global level config:
====================
config_bpdu_filter(dut, cli_type="klish")
config_bpdu_filter(dut, ,no_form=True, cli_type="klish")
:param dut:
:param kwargs:
:return:
"""
cli_type = kwargs.get("cli_type", "klish")
interface=kwargs.get("interface",None)
no_form=kwargs.get("no_form", None)
action=kwargs.get("action", "enable")
commands = list()
if not interface:
command = "spanning-tree edge-port bpdufilter default"
if no_form:
command = "no {}".format(command)
commands.append(command)
else:
interface_details = utils.get_interface_number_from_name(interface)
if not interface_details:
st.log("Interface details not found {}".format(interface_details))
return False
commands.append("interface {} {}".format(interface_details.get("type"), interface_details.get("number")))
command = "spanning-tree bpdufilter"
if no_form:
command = "no {}".format(command)
elif action:
command = "{} {}".format(command, action)
else:
command = ""
if command:
commands.append(command)
if commands:
st.config(dut, commands, type=cli_type)
return True
return False
def config_stp_root_bridge_by_vlan(stp_data):
"""
:param stp_data: {dut1: {"vlan":10, "priority": "0"}, dut2: {"vlan":20, "priority": "0"}, dut3: {"vlan":30, "priority": "0"}}
"""
params = list()
for dut, data in stp_data.items():
params.append(utility.ExecAllFunc(config_stp_vlan_parameters, dut, data["vlan"], priority=data["priority"]))
[out, exceptions] = exec_all(True, params)
ensure_no_exception(exceptions)
def config_port_type(dut, interface, stp_type="rpvst", port_type="edge", no_form=False, cli_type="klish"):
"""
API to config/unconfig the port type in RPVST
:param dut:
:param port_type:
:param no_form:
:return:
"""
commands = list()
command = "spanning-tree port type {}".format(port_type) if not no_form else "no spanning-tree port type"
interface_details = utils.get_interface_number_from_name(interface)
if not interface_details:
st.log("Interface details not found {}".format(interface_details))
return False
commands.append("interface {} {}".format(interface_details.get("type"), interface_details.get("number")))
commands.append(command)
commands.append('exit')
st.config(dut, commands, type=cli_type)
return True
def show_stp_config_using_klish(dut, type="", vlan="", intf=""):
if type == 'statistics':
command = "show spanning-tree counters vlan {}".format(vlan)
elif type == 'root_guard':
command = "show spanning-tree inconsistentports vlan {}".format(vlan)
elif type == 'bpdu_guard':
command = "show spanning-tree bpdu-guard"
elif type == "vlan_intf":
command = "show spanning-tree vlan {} interface {}".format(vlan, intf)
# elif type == "vlan":
# command = "show spanning-tree vlan {}".format(vlan)
st.show(dut, command, type="klish", skip_tmpl=True)
def verify_stp_intf_status(dut, vlanid, interface, status):
"""
API to poll for stp stauts for an interface
:param dut:
:param vlanid:
:param interface:
:param status:
:return:
"""
if get_stp_port_param(dut, vlanid, interface, "port_state") == status:
st.log("Port status is changed to {}".format(status))
return True
return False | config_stp_interface |
test_sequence_sampler.py | import collections
import functools
import mxnet as mx
import numpy as np
import scipy
import pytest
from mxnet.gluon import nn, HybridBlock
from numpy.testing import assert_allclose
from gluonnlp.sequence_sampler import BeamSearchScorer, BeamSearchSampler
mx.npx.set_np()
@pytest.mark.parametrize('length', [False, True])
@pytest.mark.parametrize('alpha', [0.0, 1.0])
@pytest.mark.parametrize('K', [1.0, 5.0])
@pytest.mark.parametrize('batch_size', [1, 2])
@pytest.mark.parametrize('vocab_size', [2, 5])
@pytest.mark.parametrize('from_logits', [False, True])
@pytest.mark.parametrize('hybridize', [False, True])
def test_beam_search_score(length, alpha, K, batch_size, vocab_size, from_logits, hybridize):
scorer = BeamSearchScorer(alpha=alpha, K=K, from_logits=from_logits)
if hybridize:
|
sum_log_probs = mx.np.zeros((batch_size,))
scores = mx.np.zeros((batch_size,))
for step in range(1, length + 1):
if not from_logits:
log_probs = np.random.normal(0, 1, (batch_size, vocab_size))
log_probs = np.log((scipy.special.softmax(log_probs, axis=-1)))
else:
log_probs = np.random.uniform(-10, 0, (batch_size, vocab_size))
log_probs = mx.np.array(log_probs, dtype=np.float32)
sum_log_probs += log_probs[:, 0]
scores = scorer(log_probs, scores, mx.np.array(step))[:, 0]
lp = (K + length) ** alpha / (K + 1) ** alpha
assert_allclose(scores.asnumpy(), sum_log_probs.asnumpy() / lp, 1E-5, 1E-5)
# TODO(sxjscience) Test for the state_batch_axis
@pytest.mark.parametrize('early_return', [False, True])
@pytest.mark.parametrize('eos_id', [0, None])
def test_beam_search(early_return, eos_id):
class SimpleStepDecoder(HybridBlock):
def __init__(self, vocab_size=5, hidden_units=4):
super().__init__()
self.x2h_map = nn.Embedding(input_dim=vocab_size, output_dim=hidden_units)
self.h2h_map = nn.Dense(units=hidden_units, flatten=False)
self.vocab_map = nn.Dense(units=vocab_size, flatten=False)
@property
def state_batch_axis(self):
return 0
@property
def data_batch_axis(self):
return 0
def hybrid_forward(self, F, data, state):
"""
Parameters
----------
F
data :
(batch_size,)
states :
(batch_size, C)
Returns
-------
out :
(batch_size, vocab_size)
new_state :
(batch_size, C)
"""
new_state = self.h2h_map(state)
out = self.vocab_map(self.x2h_map(data) + new_state)
return out, new_state
vocab_size = 3
batch_size = 2
hidden_units = 3
beam_size = 4
step_decoder = SimpleStepDecoder(vocab_size, hidden_units)
step_decoder.initialize()
sampler = BeamSearchSampler(beam_size=4, decoder=step_decoder, eos_id=eos_id, vocab_size=vocab_size,
max_length_b=100, early_return=early_return)
states = mx.np.random.normal(0, 1, (batch_size, hidden_units))
inputs = mx.np.random.randint(0, vocab_size, (batch_size,))
samples, scores, valid_length = sampler(inputs, states)
samples = samples.asnumpy()
valid_length = valid_length.asnumpy()
for i in range(batch_size):
for j in range(beam_size):
vl = valid_length[i, j]
if eos_id is not None:
assert samples[i, j, vl - 1] == eos_id
if vl < samples.shape[2]:
assert (samples[i, j, vl:] == -1).all()
assert (samples[i, :, 0] == inputs[i].asnumpy()).all()
# TODO(sxjscience) Test for the state_batch_axis
@pytest.mark.parametrize('early_return', [False, True])
@pytest.mark.parametrize('eos_id', [0, None])
def test_beam_search_stochastic(early_return, eos_id):
class SimpleStepDecoder(HybridBlock):
def __init__(self, vocab_size=5, hidden_units=4):
super().__init__()
self.x2h_map = nn.Embedding(input_dim=vocab_size, output_dim=hidden_units)
self.h2h_map = nn.Dense(units=hidden_units, flatten=False)
self.vocab_map = nn.Dense(units=vocab_size, flatten=False)
@property
def state_batch_axis(self):
return 0
@property
def data_batch_axis(self):
return 0
def hybrid_forward(self, F, data, state):
"""
Parameters
----------
F
data :
(batch_size,)
states :
(batch_size, C)
Returns
-------
out :
(batch_size, vocab_size)
new_state :
(batch_size, C)
"""
new_state = self.h2h_map(state)
out = self.vocab_map(self.x2h_map(data) + new_state)
return out, new_state
vocab_size = 3
batch_size = 2
hidden_units = 3
beam_size = 4
step_decoder = SimpleStepDecoder(vocab_size, hidden_units)
step_decoder.initialize()
sampler = BeamSearchSampler(beam_size=4, decoder=step_decoder, eos_id=eos_id, vocab_size=vocab_size,
stochastic=True, max_length_b=100, early_return=early_return)
states = mx.np.random.normal(0, 1, (batch_size, hidden_units))
inputs = mx.np.random.randint(0, vocab_size, (batch_size,))
samples, scores, valid_length = sampler(inputs, states)
samples = samples.asnumpy()
valid_length = valid_length.asnumpy()
for i in range(batch_size):
for j in range(beam_size):
vl = valid_length[i, j]
if eos_id is not None:
assert samples[i, j, vl-1] == eos_id
if vl < samples.shape[2]:
assert (samples[i, j, vl:] == -1).all()
assert (samples[i, :, 0] == inputs[i].asnumpy()).all()
# test for repeativeness
has_different_sample = False
for _ in range(10):
new_samples, scores, valid_length = sampler(inputs, states)
if not np.array_equal(new_samples.asnumpy(), samples):
has_different_sample = True
break
assert has_different_sample
@pytest.mark.parametrize('early_return', [False, True])
@pytest.mark.parametrize('sampling_paras', [(-1.0, -1), (0.05, -1), (-1.0, 1), (-1.0, 3)])
@pytest.mark.parametrize('eos_id', [0, None])
def test_multinomial_sampling(early_return, sampling_paras, eos_id):
class SimpleStepDecoder(HybridBlock):
def __init__(self, vocab_size=5, hidden_units=4):
super().__init__()
self.x2h_map = nn.Embedding(input_dim=vocab_size, output_dim=hidden_units)
self.h2h_map = nn.Dense(units=hidden_units, flatten=False)
self.vocab_map = nn.Dense(units=vocab_size, flatten=False)
@property
def state_batch_axis(self):
return 0
@property
def data_batch_axis(self):
return 0
def hybrid_forward(self, F, data, state):
new_state = self.h2h_map(state)
out = self.vocab_map(self.x2h_map(data) + new_state)
return out, new_state
vocab_size = 5
batch_size = 2
hidden_units = 3
beam_size = 4
step_decoder = SimpleStepDecoder(vocab_size, hidden_units)
step_decoder.initialize()
sampling_topp, sampling_topk = sampling_paras
sampler = BeamSearchSampler(beam_size=4, decoder=step_decoder, eos_id=eos_id, vocab_size=vocab_size,
stochastic=False,
sampling=True, sampling_topp=sampling_topp, sampling_topk=sampling_topk,
max_length_b=100, early_return=early_return)
states = mx.np.random.normal(0, 1, (batch_size, hidden_units))
inputs = mx.np.random.randint(0, vocab_size, (batch_size,))
samples, scores, valid_length = sampler(inputs, states)
samples = samples.asnumpy()
valid_length = valid_length.asnumpy()
for i in range(batch_size):
for j in range(beam_size):
vl = valid_length[i, j]
if eos_id is not None:
assert samples[i, j, vl - 1] == eos_id
if vl < samples.shape[2]:
assert (samples[i, j, vl:] == -1).all()
assert (samples[i, :, 0] == inputs[i].asnumpy()).all()
| scorer.hybridize() |
common.py | """ common utilities """
import itertools
import numpy as np
from pandas import (
DataFrame,
Float64Index,
MultiIndex,
Series,
UInt64Index,
date_range,
)
import pandas._testing as tm
def _mklbl(prefix, n):
return [f"{prefix}{i}" for i in range(n)]
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class Base:
""" indexing comprehensive base class """
_kinds = {"series", "frame"}
_typs = {
"ints",
"uints",
"labels",
"mixed",
"ts",
"floats",
"empty",
"ts_rev",
"multi",
}
def setup_method(self, method):
self.series_ints = Series(np.random.rand(4), index=np.arange(0, 8, 2))
self.frame_ints = DataFrame(
np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)
)
self.series_uints = Series(
np.random.rand(4), index=UInt64Index(np.arange(0, 8, 2))
)
self.frame_uints = DataFrame(
np.random.randn(4, 4),
index=UInt64Index(range(0, 8, 2)),
columns=UInt64Index(range(0, 12, 3)),
)
self.series_floats = Series(
np.random.rand(4), index=Float64Index(range(0, 8, 2))
)
self.frame_floats = DataFrame(
np.random.randn(4, 4),
index=Float64Index(range(0, 8, 2)),
columns=Float64Index(range(0, 12, 3)),
)
m_idces = [
MultiIndex.from_product([[1, 2], [3, 4]]),
MultiIndex.from_product([[5, 6], [7, 8]]),
MultiIndex.from_product([[9, 10], [11, 12]]),
]
self.series_multi = Series(np.random.rand(4), index=m_idces[0])
self.frame_multi = DataFrame(
np.random.randn(4, 4), index=m_idces[0], columns=m_idces[1]
)
self.series_labels = Series(np.random.randn(4), index=list("abcd"))
self.frame_labels = DataFrame(
np.random.randn(4, 4), index=list("abcd"), columns=list("ABCD")
)
self.series_mixed = Series(np.random.randn(4), index=[2, 4, "null", 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, "null", 8])
self.series_ts = Series(
np.random.randn(4), index=date_range("20130101", periods=4)
)
self.frame_ts = DataFrame(
np.random.randn(4, 4), index=date_range("20130101", periods=4)
)
dates_rev = date_range("20130101", periods=4).sort_values(ascending=False)
self.series_ts_rev = Series(np.random.randn(4), index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev)
self.frame_empty = DataFrame()
self.series_empty = Series(dtype=object)
# form agglomerates
for kind in self._kinds:
d = {}
for typ in self._typs:
d[typ] = getattr(self, f"{kind}_{typ}")
setattr(self, kind, d)
def generate_indices(self, f, values=False):
"""
generate the indices
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = (list(range(len(ax))) for ax in axes)
return itertools.product(*axes)
def get_value(self, name, f, i, values=False):
""" return the value for the location i """
# check against values
if values:
return f.values[i]
elif name == "iat":
return f.iloc[i]
else:
assert name == "at"
return f.loc[i]
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check against values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, method, key, typs=None, axes=None, fails=None):
def | (axis, obj, key):
""" compare equal for these 2 keys """
axified = _axify(obj, key, axis)
try:
getattr(obj, method).__getitem__(axified)
except (IndexError, TypeError, KeyError) as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
return
raise
if typs is None:
typs = self._typs
if axes is None:
axes = [0, 1]
else:
assert axes in [0, 1]
axes = [axes]
# check
for kind in self._kinds:
d = getattr(self, kind)
for ax in axes:
for typ in typs:
assert typ in self._typs
obj = d[typ]
if ax < obj.ndim:
_eq(axis=ax, obj=obj, key=key)
| _eq |
coins.ts | import { Requester, Validator } from '@chainlink/ea-bootstrap'
import { ExecuteWithConfig, Config, InputParameters } from '@chainlink/types'
import overrides from '../config/symbols.json'
export const supportedEndpoints = ['coins']
export const inputParameters: InputParameters = {}
export interface CoinsResponse {
id: string
name: string
symbol: string
rank: number
is_new: boolean
is_active: boolean
type: string
}
export const execute: ExecuteWithConfig<Config> = async (request, _, config) => {
const validator = new Validator(request, inputParameters, {}, { overrides }) | const jobRunID = validator.validated.id
const url = '/v1/coins'
const options = {
...config.api,
url,
}
const response = await Requester.request<CoinsResponse[]>(options)
return Requester.success(jobRunID, response, true)
} | |
admission_test.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podsecurity
import (
"context"
"fmt"
"io/ioutil"
"strings"
"testing"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authentication/user"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/apiserver/pkg/warning"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/core"
v1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/features"
podsecurityadmission "k8s.io/pod-security-admission/admission"
"k8s.io/utils/pointer"
"sigs.k8s.io/yaml"
)
func TestConvert(t *testing.T) {
extractor := podsecurityadmission.DefaultPodSpecExtractor{}
internalTypes := map[schema.GroupResource]runtime.Object{
core.Resource("pods"): &core.Pod{},
core.Resource("replicationcontrollers"): &core.ReplicationController{},
core.Resource("podtemplates"): &core.PodTemplate{},
apps.Resource("replicasets"): &apps.ReplicaSet{},
apps.Resource("deployments"): &apps.Deployment{},
apps.Resource("statefulsets"): &apps.StatefulSet{},
apps.Resource("daemonsets"): &apps.DaemonSet{},
batch.Resource("jobs"): &batch.Job{},
batch.Resource("cronjobs"): &batch.CronJob{},
}
for _, r := range extractor.PodSpecResources() {
internalType, ok := internalTypes[r]
if !ok {
t.Errorf("no internal type registered for %s", r.String())
continue
}
externalType, err := convert(internalType)
if err != nil {
t.Errorf("error converting %T: %v", internalType, err)
continue
}
_, _, err = extractor.ExtractPodSpec(externalType)
if err != nil {
t.Errorf("error extracting from %T: %v", externalType, err)
continue
}
}
}
func BenchmarkVerifyPod(b *testing.B) {
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, features.PodSecurity, true)()
p, err := newPlugin(nil)
if err != nil {
b.Fatal(err)
}
p.InspectFeatureGates(utilfeature.DefaultFeatureGate)
enforceImplicitPrivilegedNamespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "enforce-implicit", Labels: map[string]string{}}}
enforcePrivilegedNamespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "enforce-privileged", Labels: map[string]string{"pod-security.kubernetes.io/enforce": "privileged"}}}
enforceBaselineNamespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "enforce-baseline", Labels: map[string]string{"pod-security.kubernetes.io/enforce": "baseline"}}}
enforceRestrictedNamespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "enforce-restricted", Labels: map[string]string{"pod-security.kubernetes.io/enforce": "restricted"}}}
warnBaselineNamespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "warn-baseline", Labels: map[string]string{"pod-security.kubernetes.io/warn": "baseline"}}}
warnRestrictedNamespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "warn-restricted", Labels: map[string]string{"pod-security.kubernetes.io/warn": "restricted"}}}
enforceWarnAuditBaseline := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "enforce-warn-audit-baseline", Labels: map[string]string{"pod-security.kubernetes.io/enforce": "baseline", "pod-security.kubernetes.io/warn": "baseline", "pod-security.kubernetes.io/audit": "baseline"}}}
warnBaselineAuditRestrictedNamespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "warn-baseline-audit-restricted", Labels: map[string]string{"pod-security.kubernetes.io/warn": "baseline", "pod-security.kubernetes.io/audit": "restricted"}}}
c := fake.NewSimpleClientset(
enforceImplicitPrivilegedNamespace,
enforcePrivilegedNamespace,
enforceBaselineNamespace,
enforceRestrictedNamespace,
warnBaselineNamespace,
warnRestrictedNamespace,
enforceWarnAuditBaseline,
warnBaselineAuditRestrictedNamespace,
)
p.SetExternalKubeClientSet(c)
informerFactory := informers.NewSharedInformerFactory(c, 0)
p.SetExternalKubeInformerFactory(informerFactory)
stopCh := make(chan struct{})
defer close(stopCh)
informerFactory.Start(stopCh)
informerFactory.WaitForCacheSync(stopCh)
if err := p.ValidateInitialization(); err != nil {
b.Fatal(err)
}
corePod := &core.Pod{}
v1Pod := &corev1.Pod{}
data, err := ioutil.ReadFile("testdata/pod_restricted.yaml")
if err != nil {
b.Fatal(err)
}
if err := yaml.Unmarshal(data, v1Pod); err != nil {
b.Fatal(err)
}
if err := v1.Convert_v1_Pod_To_core_Pod(v1Pod, corePod, nil); err != nil {
b.Fatal(err)
}
appsDeployment := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: "mydeployment"},
Spec: apps.DeploymentSpec{
Template: core.PodTemplateSpec{
ObjectMeta: corePod.ObjectMeta,
Spec: corePod.Spec,
},
},
}
namespaces := []string{
"enforce-implicit", "enforce-privileged", "enforce-baseline", "enforce-restricted",
"warn-baseline", "warn-restricted",
"enforce-warn-audit-baseline", "warn-baseline-audit-restricted",
}
for _, namespace := range namespaces {
b.Run(namespace+"_pod", func(b *testing.B) {
ctx := context.Background()
attrs := admission.NewAttributesRecord(
corePod.DeepCopy(), nil,
schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"},
namespace, "mypod",
schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
"",
admission.Create, &metav1.CreateOptions{}, false,
&user.DefaultInfo{Name: "myuser"},
)
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := p.Validate(ctx, attrs, nil); err != nil {
b.Fatal(err)
}
}
})
b.Run(namespace+"_deployment", func(b *testing.B) {
ctx := context.Background()
attrs := admission.NewAttributesRecord(
appsDeployment.DeepCopy(), nil,
schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"},
namespace, "mydeployment",
schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"},
"",
admission.Create, &metav1.CreateOptions{}, false,
&user.DefaultInfo{Name: "myuser"},
)
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := p.Validate(ctx, attrs, nil); err != nil {
b.Fatal(err)
}
}
})
}
}
func BenchmarkVerifyNamespace(b *testing.B) {
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, features.PodSecurity, true)()
p, err := newPlugin(nil)
if err != nil {
b.Fatal(err)
}
p.InspectFeatureGates(utilfeature.DefaultFeatureGate)
namespace := "enforce"
enforceNamespaceBaselineV1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace, Labels: map[string]string{"pod-security.kubernetes.io/enforce": "baseline"}}}
enforceNamespaceRestrictedV1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace, Labels: map[string]string{"pod-security.kubernetes.io/enforce": "restricted"}}}
enforceNamespaceBaselineCore := &core.Namespace{}
if err := v1.Convert_v1_Namespace_To_core_Namespace(enforceNamespaceBaselineV1, enforceNamespaceBaselineCore, nil); err != nil {
b.Fatal(err)
}
enforceNamespaceRestrictedCore := &core.Namespace{}
if err := v1.Convert_v1_Namespace_To_core_Namespace(enforceNamespaceRestrictedV1, enforceNamespaceRestrictedCore, nil); err != nil |
v1Pod := &corev1.Pod{}
data, err := ioutil.ReadFile("testdata/pod_baseline.yaml")
if err != nil {
b.Fatal(err)
}
if err := yaml.Unmarshal(data, v1Pod); err != nil {
b.Fatal(err)
}
// https://github.com/kubernetes/community/blob/master/sig-scalability/configs-and-limits/thresholds.md#kubernetes-thresholds
ownerA := metav1.OwnerReference{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: "myapp-123123",
UID: types.UID("7610a7f4-8f80-4f88-95b5-6cefdd8e9dbd"),
Controller: pointer.Bool(true),
}
ownerB := metav1.OwnerReference{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: "myapp-234234",
UID: types.UID("7610a7f4-8f80-4f88-95b5-as765as76f55"),
Controller: pointer.Bool(true),
}
// number of warnings printed for the entire namespace
namespaceWarningCount := 1
podCount := 3000
objects := make([]runtime.Object, 0, podCount+1)
objects = append(objects, enforceNamespaceBaselineV1)
for i := 0; i < podCount; i++ {
v1PodCopy := v1Pod.DeepCopy()
v1PodCopy.Name = fmt.Sprintf("pod%d", i)
v1PodCopy.UID = types.UID(fmt.Sprintf("pod%d", i))
v1PodCopy.Namespace = namespace
switch i % 3 {
case 0:
v1PodCopy.OwnerReferences = []metav1.OwnerReference{ownerA}
case 1:
v1PodCopy.OwnerReferences = []metav1.OwnerReference{ownerB}
default:
// no owner references
}
objects = append(objects, v1PodCopy)
}
c := fake.NewSimpleClientset(
objects...,
)
p.SetExternalKubeClientSet(c)
informerFactory := informers.NewSharedInformerFactory(c, 0)
p.SetExternalKubeInformerFactory(informerFactory)
stopCh := make(chan struct{})
defer close(stopCh)
informerFactory.Start(stopCh)
informerFactory.WaitForCacheSync(stopCh)
if err := p.ValidateInitialization(); err != nil {
b.Fatal(err)
}
ctx := context.Background()
attrs := admission.NewAttributesRecord(
enforceNamespaceRestrictedCore.DeepCopy(), enforceNamespaceBaselineCore.DeepCopy(),
schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"},
namespace, namespace,
schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
"",
admission.Update, &metav1.UpdateOptions{}, false,
&user.DefaultInfo{Name: "myuser"},
)
b.ResetTimer()
for i := 0; i < b.N; i++ {
dc := dummyRecorder{agent: "", text: ""}
ctxWithRecorder := warning.WithWarningRecorder(ctx, &dc)
if err := p.Validate(ctxWithRecorder, attrs, nil); err != nil {
b.Fatal(err)
}
// should either be a single aggregated warning, or a unique warning per pod
if dc.count != (1+namespaceWarningCount) && dc.count != (podCount+namespaceWarningCount) {
b.Fatalf("expected either %d or %d warnings, got %d", 1+namespaceWarningCount, podCount+namespaceWarningCount, dc.count)
}
// warning should contain the runAsNonRoot issue
if e, a := "runAsNonRoot", dc.text; !strings.Contains(a, e) {
b.Fatalf("expected warning containing %q, got %q", e, a)
}
}
}
type dummyRecorder struct {
count int
agent string
text string
}
func (r *dummyRecorder) AddWarning(agent, text string) {
r.count++
r.agent = agent
r.text = text
return
}
var _ warning.Recorder = &dummyRecorder{}
| {
b.Fatal(err)
} |
main.go | // Copyright © 2018 Inanc Gumus
// Learn Go Programming Course
// License: https://creativecommons.org/licenses/by-nc-sa/4.0/
//
// For more tutorials : https://learngoprogramming.com
// In-person training : https://www.linkedin.com/in/inancgumus/
// Follow me on twitter: https://twitter.com/inancgumus
package main
// ---------------------------------------------------------
// EXERCISE: Slice the numbers
//
// We've a string that contains even and odd numbers.
//
// 1. Convert the string to an []int
//
// 2. Print the slice
//
// 3. Slice it for the even numbers and print it (assign it to a new slice variable)
//
// 4. Slice it for the odd numbers and print it (assign it to a new slice variable)
//
// 5. Slice it for the two numbers at the middle
//
// 6. Slice it for the first two numbers
//
// 7. Slice it for the last two numbers (use the len function)
//
// 8. Slice the evens slice for the last one number
//
// 9. Slice the odds slice for the last two numbers
//
//
// EXPECTED OUTPUT
// go run main.go
//
// nums : [2 4 6 1 3 5]
// evens : [2 4 6]
// odds : [1 3 5]
// middle : [6 1]
// first 2 : [2 4]
// last 2 : [3 5]
// evens last 1: [6]
// odds last 2 : [3 5]
//
//
// NOTE
//
// You can also use my prettyslice package for printing the slices.
//
//
// HINT
//
// Find a function in the strings package for splitting the string into []string
//
// ---------------------------------------------------------
func main() { |
// uncomment the declaration below
// data := "2 4 6 1 3 5"
}
|
|
19_SortBy2Criteria.js | function sortByTwo(args) { |
console.log(args.join(' '));
}
sortByTwo(['alpha', 'beta', 'gamma']);
sortByTwo(['Isacc', 'Theodor', 'Jack', 'Harrison', 'George', 'Za', 'Zaazaaazaaaa']);
sortByTwo(['test', 'Deny', 'omen', 'Default']); | args.sort().sort((a, b) => a.length - b.length);
//(a, b) => a.toLowerCase() - b.toLowerCase()
// console.log(a.toLowerCase().length);
// console.log(a.toUpperCase().length); |
command_line_risk.py | """NFSP agents trained on simplified risk."""
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import numpy as np
import igraph as ig
import cairocffi
import random
import pyspiel
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import nfsp
simple_adj = [ [1, 2],[0, 2, 3],[0, 1, 3],[1, 2, 4],[3, 5, 6],[6, 7, 8, 10, 11],[4, 5, 8, 34],[5, 9, 11, 12, 14],[5, 6, 10, 40],[7, 14],[5, 8, 11, 40],[5, 7, 10, 12, 13],[7, 11, 13, 14],[11, 12, 14],[7, 9, 12, 13, 15],[14, 16, 20],[15, 17, 19, 20],[16, 18, 19, 38],[17, 19, 22],[16, 17, 18, 22, 21, 20],[15, 16, 19, 21],[19, 20, 22, 23],[18, 19, 21, 23],[21, 22, 24],[23, 25, 27],[24, 26, 27],[25, 27],[24, 25, 26, 28],[27, 29, 33, 35, 36],[28, 30, 32],[29, 31, 32],[30, 32],[29, 30, 33, 34],[28, 32, 34, 35],[6, 32, 33, 40],[28, 33, 34, 36, 40, 41],[28, 35, 37, 41],[36, 38, 39, 41],[37, 39],[37, 38, 40, 41],[8, 10, 34, 35, 39, 41],[35, 36, 37, 39, 40] ]
vertex_lis = []
for i in range(len(simple_adj)):
for j in simple_adj[i]:
if (i,j) not in vertex_lis and (j,i) not in vertex_lis:
vertex_lis.append((i,j))
FLAGS = flags.FLAGS
SEED = 12983641
final_policy_type = pyspiel.ISMCTSFinalPolicyType.MAX_VISIT_COUNT
"""final_policy_type = pyspiel.ISMCTSFinalPolicyType.MAX_VALUE"""
evaluator = pyspiel.RandomRolloutEvaluator(1, SEED)
bot = pyspiel.ISMCTSBot(SEED, evaluator, 0.75, 1000, -1, final_policy_type,
False, False)
human_id = 1
bot_id = 0
def visualise(state,player_id):
g = ig.Graph()
g.add_vertices(42)
for i in vertex_lis:
g.add_edges([(i[0],i[1])])
colour_dict = {0:'red',0.5:'black',1:'blue'}
g.vs["terr_no"]=[i for i in range(42)]
troops=[0 for i in range(42)]
ownership=[0.5 for i in range(42)] | for terr in range(42):
if player == 0 and info_state[44+terr]>0:
ownership[terr]=0
troops[terr]=info_state[44+terr]
if player == 1 and info_state[86+terr]>0:
ownership[terr]=1
troops[terr]=info_state[86+terr]
g.vs["player"]=ownership
g.vs["troops"]=troops
g.vs["label"]=["______"+str(g.vs["terr_no"][i])+","+str(g.vs["troops"][i]) for i in range(42)]
layout = g.layout_kamada_kawai()
return(ig.plot(g,layout=layout,vertex_color = [colour_dict[player] for player in g.vs["player"]]))
def main(unused_argv):
game = pyspiel.load_game("risk")
state = game.new_initial_state()
count = 0
while not state.is_terminal():
"""if count <160:
actions = state.legal_actions()
action = random.choice(actions)
state.apply_action(action)
count+=1
continue"""
current_player = state.current_player()
if state.is_chance_node():
state.apply_action(0)
elif current_player ==human_id:
visualise(state,human_id)
info_state = state.information_state_tensor(human_id)
print(info_state[:42])
print(info_state[-4:-2])
legal = state.legal_actions()
print(state.legal_actions())
action = "1000"
while int(action) not in legal:
action = input("Action:")
if action =="":
action = "1000"
state.apply_action(int(action))
elif current_player == bot_id:
action = bot.step(state)
print("Bot action:"+str(action))
state.apply_action(action)
print(state.rewards())
if __name__ == "__main__":
app.run(main) | info_state =state.information_state_tensor(player_id)
for player in range(2): |
surface.rs | use {Scalar, TOLERANCE};
use maths::{CrossProduct, DotProduct, UnitVec3D, Vec3D};
/// Represents a `Surface` for a given set of points.
#[derive(Copy, Clone)]
pub struct Surface {
/// The `Surface` normal
pub normal: UnitVec3D,
/// The node indices associated with the `Surface`
pub nodes: [usize; 3],
}
impl Surface {
/// Creates a new `Surface` from the point cloud and indices provided.
pub fn new(vertices: &Vec<Vec3D>, index_0: usize, index_1: usize, index_2: usize) -> Surface {
let reference_point = vertices.iter()
.fold(Vec3D::zero(), |total, &vector| {
total + vector
}) / (vertices.len() as Scalar);
let base = vertices[index_0];
let relative_to_reference = base - reference_point;
let edge_0 = vertices[index_1] - base;
let edge_1 = vertices[index_2] - base;
let mut normal = edge_0.cross(edge_1).normalize();
if normal.dot(relative_to_reference) < TOLERANCE {
normal = -normal;
}
return Surface {
normal: normal,
nodes: [index_0, index_1, index_2],
};
}
/// Computes the centroid of a `Surface` using the node indices in the
/// `Surface` and the point cloud provided.
pub fn compute_centroid(surface: &Surface, vertices: &Vec<Vec3D>) -> Vec3D |
}
| {
return surface.nodes.iter()
.fold(Vec3D::zero(), |total, &index| {
total + vertices[index]
}) / 3.0;
} |
moose.rs | use krator::{Manifest, ObjectState, ObjectStatus, Operator, State, Transition, TransitionTo};
use kube::api::{ListParams, Resource};
use kube::CustomResourceExt;
use kube_derive::CustomResource;
use rand::seq::IteratorRandom;
use rand::Rng;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use structopt::StructOpt;
use tokio::sync::RwLock;
use tracing::info;
#[cfg(feature = "admission-webhook")]
use krator_derive::AdmissionWebhook;
#[cfg(feature = "admission-webhook")]
use krator::admission;
#[cfg(feature = "admission-webhook")]
use k8s_openapi::api::core::v1::Secret;
#[cfg(not(feature = "admission-webhook"))]
#[derive(CustomResource, Debug, Serialize, Deserialize, Clone, Default, JsonSchema)]
#[kube(
group = "animals.com",
version = "v1",
kind = "Moose",
derive = "Default",
status = "MooseStatus",
namespaced
)]
struct MooseSpec {
height: f64,
weight: f64,
antlers: bool,
}
#[cfg(feature = "admission-webhook")]
#[derive(
AdmissionWebhook, CustomResource, Debug, Serialize, Deserialize, Clone, Default, JsonSchema,
)]
#[admission_webhook_features(secret, service, admission_webhook_config)]
#[kube(
group = "animals.com",
version = "v1",
kind = "Moose",
derive = "Default",
status = "MooseStatus",
namespaced
)]
struct MooseSpec {
height: f64,
weight: f64,
antlers: bool,
}
#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
enum MoosePhase {
Asleep,
Hungry,
Roaming,
}
#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
struct MooseStatus {
phase: Option<MoosePhase>,
message: Option<String>,
}
impl ObjectStatus for MooseStatus {
fn failed(e: &str) -> MooseStatus {
MooseStatus {
message: Some(format!("Error tracking moose: {}.", e)),
phase: None,
}
}
fn json_patch(&self) -> serde_json::Value {
// Generate a map containing only set fields.
let mut status = serde_json::Map::new();
if let Some(phase) = self.phase.clone() {
status.insert("phase".to_string(), serde_json::json!(phase));
};
if let Some(message) = self.message.clone() {
status.insert("message".to_string(), serde_json::Value::String(message));
};
// Create status patch with map.
serde_json::json!({ "status": serde_json::Value::Object(status) })
}
}
struct MooseState {
name: String,
food: f64,
}
#[async_trait::async_trait]
impl ObjectState for MooseState {
type Manifest = Moose;
type Status = MooseStatus;
type SharedState = SharedMooseState;
async fn async_drop(self, shared: &mut Self::SharedState) {
shared.friends.remove(&self.name);
}
}
#[derive(Debug, Default)]
/// Moose was tagged for tracking.
struct Tagged;
#[async_trait::async_trait]
impl State<MooseState> for Tagged {
async fn next(
self: Box<Self>,
shared: Arc<RwLock<SharedMooseState>>,
state: &mut MooseState,
_manifest: Manifest<Moose>,
) -> Transition<MooseState> {
info!("Found new moose named {}!", state.name);
shared
.write()
.await
.friends
.insert(state.name.clone(), HashSet::new());
Transition::next(self, Roam)
}
async fn status(
&self,
_state: &mut MooseState,
_manifest: &Moose,
) -> anyhow::Result<MooseStatus> {
Ok(MooseStatus {
phase: Some(MoosePhase::Roaming),
message: None,
})
}
}
// Explicitly implement TransitionTo
impl TransitionTo<Roam> for Tagged {}
// Derive TransitionTo
#[derive(Debug, Default, TransitionTo)]
// Specify valid next states.
#[transition_to(Eat)]
/// Moose is roaming the wilderness.
struct Roam;
async fn make_friend(name: &str, shared: &Arc<RwLock<SharedMooseState>>) -> Option<String> {
let mut mooses = shared.write().await;
let mut rng = rand::thread_rng();
let other_meese = mooses
.friends
.keys()
.map(|s| s.to_owned())
.choose_multiple(&mut rng, mooses.friends.len());
for other_moose in other_meese {
if name == other_moose {
continue;
}
let friends = mooses.friends.get_mut(&other_moose).unwrap();
if !friends.contains(name) {
friends.insert(name.to_string());
return Some(other_moose.to_string());
}
}
return None;
}
#[async_trait::async_trait]
impl State<MooseState> for Roam {
async fn next(
self: Box<Self>,
shared: Arc<RwLock<SharedMooseState>>,
state: &mut MooseState,
_manifest: Manifest<Moose>,
) -> Transition<MooseState> {
loop {
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
state.food -= 5.0;
if state.food <= 10.0 {
return Transition::next(self, Eat);
}
let r: f64 = {
let mut rng = rand::thread_rng();
rng.gen()
};
if r < 0.05 {
if let Some(other_moose) = make_friend(&state.name, &shared).await {
info!("{} made friends with {}!", state.name, other_moose);
}
}
}
}
async fn status(
&self,
_state: &mut MooseState,
_manifest: &Moose,
) -> anyhow::Result<MooseStatus> |
}
#[derive(Debug, Default, TransitionTo)]
#[transition_to(Sleep)]
/// Moose is eating.
struct Eat;
#[async_trait::async_trait]
impl State<MooseState> for Eat {
async fn next(
self: Box<Self>,
_shared: Arc<RwLock<SharedMooseState>>,
state: &mut MooseState,
manifest: Manifest<Moose>,
) -> Transition<MooseState> {
let moose = manifest.latest();
state.food = moose.spec.weight / 10.0;
tokio::time::sleep(std::time::Duration::from_secs((state.food / 10.0) as u64)).await;
Transition::next(self, Sleep)
}
async fn status(
&self,
_state: &mut MooseState,
_manifest: &Moose,
) -> anyhow::Result<MooseStatus> {
Ok(MooseStatus {
phase: Some(MoosePhase::Hungry),
message: Some("*munch*".to_string()),
})
}
}
#[derive(Debug, Default, TransitionTo)]
#[transition_to(Roam)]
/// Moose is sleeping.
struct Sleep;
#[async_trait::async_trait]
impl State<MooseState> for Sleep {
async fn next(
self: Box<Self>,
_shared: Arc<RwLock<SharedMooseState>>,
_state: &mut MooseState,
_manifest: Manifest<Moose>,
) -> Transition<MooseState> {
tokio::time::sleep(std::time::Duration::from_secs(20)).await;
Transition::next(self, Roam)
}
async fn status(
&self,
_state: &mut MooseState,
_manifest: &Moose,
) -> anyhow::Result<MooseStatus> {
Ok(MooseStatus {
phase: Some(MoosePhase::Asleep),
message: Some("zzzzzz".to_string()),
})
}
}
#[derive(Debug, Default)]
/// Moose was released from our care.
struct Released;
#[async_trait::async_trait]
impl State<MooseState> for Released {
async fn next(
self: Box<Self>,
_shared: Arc<RwLock<SharedMooseState>>,
_state: &mut MooseState,
_manifest: Manifest<Moose>,
) -> Transition<MooseState> {
info!("Moose tagged for release!");
Transition::Complete(Ok(()))
}
async fn status(
&self,
state: &mut MooseState,
_manifest: &Moose,
) -> anyhow::Result<MooseStatus> {
Ok(MooseStatus {
phase: None,
message: Some(format!("Bye, {}!", state.name)),
})
}
}
struct SharedMooseState {
friends: HashMap<String, HashSet<String>>,
#[cfg(feature = "admission-webhook")]
client: kube::Client,
}
struct MooseTracker {
shared: Arc<RwLock<SharedMooseState>>,
}
impl MooseTracker {
#[cfg(feature = "admission-webhook")]
fn new(client: &kube::Client) -> Self {
let shared = Arc::new(RwLock::new(SharedMooseState {
friends: HashMap::new(),
client: client.to_owned(),
}));
MooseTracker { shared }
}
#[cfg(not(feature = "admission-webhook"))]
fn new() -> Self {
let shared = Arc::new(RwLock::new(SharedMooseState {
friends: HashMap::new(),
}));
MooseTracker { shared }
}
}
#[async_trait::async_trait]
impl Operator for MooseTracker {
type Manifest = Moose;
type Status = MooseStatus;
type InitialState = Tagged;
type DeletedState = Released;
type ObjectState = MooseState;
async fn initialize_object_state(
&self,
manifest: &Self::Manifest,
) -> anyhow::Result<Self::ObjectState> {
let name = manifest.meta().name.clone().unwrap();
Ok(MooseState {
name,
food: manifest.spec.weight / 10.0,
})
}
async fn shared_state(&self) -> Arc<RwLock<SharedMooseState>> {
Arc::clone(&self.shared)
}
#[cfg(feature = "admission-webhook")]
async fn admission_hook(
&self,
manifest: Self::Manifest,
) -> krator::admission::AdmissionResult<Self::Manifest> {
use k8s_openapi::apimachinery::pkg::apis::meta::v1::Status;
// All moose names start with "M"
let name = manifest.meta().name.clone().unwrap();
info!("Processing admission hook for moose named {}", name);
match name.chars().next() {
Some('m') | Some('M') => krator::admission::AdmissionResult::Allow(manifest),
_ => krator::admission::AdmissionResult::Deny(Status {
code: Some(400),
message: Some("Mooses may only have names starting with 'M'.".to_string()),
status: Some("Failure".to_string()),
..Default::default()
}),
}
}
#[cfg(feature = "admission-webhook")]
async fn admission_hook_tls(&self) -> anyhow::Result<krator::admission::AdmissionTls> {
let client = self.shared.read().await.client.clone();
let secret_name = Moose::admission_webhook_secret_name();
let opt = Opt::from_args();
let secret = kube::Api::<Secret>::namespaced(client, &opt.webhook_namespace)
.get(&secret_name)
.await?;
Ok(admission::AdmissionTls::from(&secret)?)
}
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "moose",
about = "An example Operator for `Moose` custom resources."
)]
struct Opt {
/// Send traces to Jaeger.
/// Configure with the standard environment variables:
/// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#jaeger-exporter
#[structopt(long)]
jaeger: bool,
/// Configure logger to emit JSON output.
#[structopt(long)]
json: bool,
/// output moose crd manifest
#[structopt(long)]
output_crd: bool,
#[cfg(feature = "admission-webhook")]
/// output webhook resources manifests for the given namespace
#[structopt(long)]
output_webhook_resources_for_namespace: Option<String>,
#[cfg(feature = "admission-webhook")]
/// namespace where to install the admission webhook service and secret
#[structopt(long, default_value = "default")]
webhook_namespace: String,
}
fn init_logger(opt: &Opt) -> anyhow::Result<Option<opentelemetry_jaeger::Uninstall>> {
// This isn't very DRY, but all of these combinations have different types,
// and Boxing them doesn't seem to work.
let guard = if opt.json {
let subscriber = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.json()
.finish();
if opt.jaeger {
use tracing_subscriber::layer::SubscriberExt;
let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline()
.from_env()
.with_service_name("moose_operator")
.install()?;
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
let subscriber = subscriber.with(telemetry);
tracing::subscriber::set_global_default(subscriber)?;
Some(_uninstall)
} else {
tracing::subscriber::set_global_default(subscriber)?;
None
}
} else {
let subscriber = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.pretty()
.finish();
if opt.jaeger {
use tracing_subscriber::layer::SubscriberExt;
let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline()
.from_env()
.with_service_name("moose_operator")
.install()?;
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
let subscriber = subscriber.with(telemetry);
tracing::subscriber::set_global_default(subscriber)?;
Some(_uninstall)
} else {
tracing::subscriber::set_global_default(subscriber)?;
None
}
};
Ok(guard)
}
#[tokio::main(flavor = "multi_thread")]
async fn main() -> anyhow::Result<()> {
let opt = Opt::from_args();
let _guard = init_logger(&opt)?;
if opt.output_crd {
println!("{}", serde_yaml::to_string(&Moose::crd()).unwrap());
return Ok(());
}
let kubeconfig = kube::Config::infer().await?;
let tracker;
#[cfg(feature = "admission-webhook")]
{
use anyhow::Context;
use kube::api::ResourceExt;
let client = kube::Client::try_default().await?;
let api = kube::Api::<k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition>::all(client.to_owned());
let crd = api.get(&Moose::crd().name()).await.context("moose crd needs to be installed first -- generate the necessary manifests with --output-crd")?;
if let Some(namespace) = opt.output_webhook_resources_for_namespace {
let resources = krator::admission::WebhookResources::from(
Moose::admission_webhook_resources(&namespace),
)
.add_owner(&crd);
println!("{}", resources);
return Ok(());
}
tracker = MooseTracker::new(&client);
}
#[cfg(not(feature = "admission-webhook"))]
{
tracker = MooseTracker::new();
}
// Only track mooses in Glacier NP
let params = ListParams::default().labels("nps.gov/park=glacier");
info!("starting mooses operator");
#[cfg(feature = "admission-webhook")]
info!(
r#"
If you run this example outside of Kubernetes (i.e. with `cargo run`), you need to make the webhook available.
Try the script example/assets/use-external-endpoint.sh to redirect webhook traffic to this process. If this
operator runs within Kubernetes and you use the webhook resources provided by the admission-webhook macro,
make sure your deployment has the following labels set:
app={}
"#,
Moose::admission_webhook_service_app_selector()
);
info!(
r#"
Running moose example. Try to install some of the manifests provided in examples/assets
"#
);
// New API does not currently support Webhooks, so use legacy API if enabled.
#[cfg(feature = "admission-webhook")]
{
use krator::OperatorRuntime;
let mut runtime = OperatorRuntime::new(&kubeconfig, tracker, Some(params));
runtime.start().await;
}
#[cfg(not(feature = "admission-webhook"))]
{
use krator::{ControllerBuilder, Manager};
let mut manager = Manager::new(&kubeconfig);
let controller = ControllerBuilder::new(tracker).with_params(params);
manager.register_controller(controller);
manager.start().await;
}
Ok(())
}
| {
Ok(MooseStatus {
phase: Some(MoosePhase::Roaming),
message: Some("Gahrooo!".to_string()),
})
} |
hybridConnection.ts | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
import * as pulumi from "@pulumi/pulumi";
import * as utilities from "../utilities";
/**
* Manages an App Service Hybrid Connection for an existing App Service, Relay and Service Bus.
*
* ## Example Usage
*
* This example provisions an App Service, a Relay Hybrid Connection, and a Service Bus using their outputs to create the App Service Hybrid Connection.
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as azure from "@pulumi/azure";
*
* const exampleResourceGroup = new azure.core.ResourceGroup("exampleResourceGroup", {location: "West Europe"});
* const examplePlan = new azure.appservice.Plan("examplePlan", {
* location: exampleResourceGroup.location,
* resourceGroupName: exampleResourceGroup.name,
* sku: {
* tier: "Standard",
* size: "S1",
* },
* });
* const exampleAppService = new azure.appservice.AppService("exampleAppService", {
* location: exampleResourceGroup.location,
* resourceGroupName: exampleResourceGroup.name,
* appServicePlanId: examplePlan.id,
* });
* const exampleNamespace = new azure.relay.Namespace("exampleNamespace", {
* location: exampleResourceGroup.location,
* resourceGroupName: exampleResourceGroup.name,
* skuName: "Standard",
* });
* const exampleHybridConnection = new azure.relay.HybridConnection("exampleHybridConnection", {
* resourceGroupName: exampleResourceGroup.name,
* relayNamespaceName: exampleNamespace.name,
* userMetadata: "examplemetadata",
* });
* const exampleAppservice_hybridConnectionHybridConnection = new azure.appservice.HybridConnection("exampleAppservice/hybridConnectionHybridConnection", {
* appServiceName: exampleAppService.name,
* resourceGroupName: exampleResourceGroup.name,
* relayId: exampleHybridConnection.id,
* hostname: "testhostname.example",
* port: 8080,
* sendKeyName: "exampleSharedAccessKey",
* });
* ```
*
* ## Import
*
* App Service Hybrid Connections can be imported using the `resource id`, e.g.
*
* ```sh
* $ pulumi import azure:appservice/hybridConnection:HybridConnection example /subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/exampleResourceGroup1/providers/Microsoft.Web/sites/exampleAppService1/hybridConnectionNamespaces/exampleRN1/relays/exampleRHC1
* ```
*/
export class HybridConnection extends pulumi.CustomResource {
/**
* Get an existing HybridConnection resource's state with the given name, ID, and optional extra
* properties used to qualify the lookup.
*
* @param name The _unique_ name of the resulting resource.
* @param id The _unique_ provider ID of the resource to lookup.
* @param state Any extra arguments used during the lookup.
* @param opts Optional settings to control the behavior of the CustomResource.
*/
public static get(name: string, id: pulumi.Input<pulumi.ID>, state?: HybridConnectionState, opts?: pulumi.CustomResourceOptions): HybridConnection {
return new HybridConnection(name, <any>state, { ...opts, id: id });
}
/** @internal */
public static readonly __pulumiType = 'azure:appservice/hybridConnection:HybridConnection';
/**
* Returns true if the given object is an instance of HybridConnection. This is designed to work even | */
public static isInstance(obj: any): obj is HybridConnection {
if (obj === undefined || obj === null) {
return false;
}
return obj['__pulumiType'] === HybridConnection.__pulumiType;
}
/**
* Specifies the name of the App Service. Changing this forces a new resource to be created.
*/
public readonly appServiceName!: pulumi.Output<string>;
/**
* The hostname of the endpoint.
*/
public readonly hostname!: pulumi.Output<string>;
/**
* The name of the Relay Namespace.
*/
public /*out*/ readonly namespaceName!: pulumi.Output<string>;
/**
* The port of the endpoint.
*/
public readonly port!: pulumi.Output<number>;
/**
* The ID of the Service Bus Relay. Changing this forces a new resource to be created.
*/
public readonly relayId!: pulumi.Output<string>;
public /*out*/ readonly relayName!: pulumi.Output<string>;
/**
* The name of the resource group in which to create the App Service. Changing this forces a new resource to be created.
*/
public readonly resourceGroupName!: pulumi.Output<string>;
/**
* The name of the Service Bus key which has Send permissions. Defaults to `RootManageSharedAccessKey`.
*/
public readonly sendKeyName!: pulumi.Output<string | undefined>;
/**
* The value of the Service Bus Primary Access key.
*/
public /*out*/ readonly sendKeyValue!: pulumi.Output<string>;
/**
* The name of the Service Bus namespace.
*/
public /*out*/ readonly serviceBusNamespace!: pulumi.Output<string>;
/**
* The suffix for the service bus endpoint.
*/
public /*out*/ readonly serviceBusSuffix!: pulumi.Output<string>;
/**
* Create a HybridConnection resource with the given unique name, arguments, and options.
*
* @param name The _unique_ name of the resource.
* @param args The arguments to use to populate this resource's properties.
* @param opts A bag of options that control this resource's behavior.
*/
constructor(name: string, args: HybridConnectionArgs, opts?: pulumi.CustomResourceOptions)
constructor(name: string, argsOrState?: HybridConnectionArgs | HybridConnectionState, opts?: pulumi.CustomResourceOptions) {
let resourceInputs: pulumi.Inputs = {};
opts = opts || {};
if (opts.id) {
const state = argsOrState as HybridConnectionState | undefined;
resourceInputs["appServiceName"] = state ? state.appServiceName : undefined;
resourceInputs["hostname"] = state ? state.hostname : undefined;
resourceInputs["namespaceName"] = state ? state.namespaceName : undefined;
resourceInputs["port"] = state ? state.port : undefined;
resourceInputs["relayId"] = state ? state.relayId : undefined;
resourceInputs["relayName"] = state ? state.relayName : undefined;
resourceInputs["resourceGroupName"] = state ? state.resourceGroupName : undefined;
resourceInputs["sendKeyName"] = state ? state.sendKeyName : undefined;
resourceInputs["sendKeyValue"] = state ? state.sendKeyValue : undefined;
resourceInputs["serviceBusNamespace"] = state ? state.serviceBusNamespace : undefined;
resourceInputs["serviceBusSuffix"] = state ? state.serviceBusSuffix : undefined;
} else {
const args = argsOrState as HybridConnectionArgs | undefined;
if ((!args || args.appServiceName === undefined) && !opts.urn) {
throw new Error("Missing required property 'appServiceName'");
}
if ((!args || args.hostname === undefined) && !opts.urn) {
throw new Error("Missing required property 'hostname'");
}
if ((!args || args.port === undefined) && !opts.urn) {
throw new Error("Missing required property 'port'");
}
if ((!args || args.relayId === undefined) && !opts.urn) {
throw new Error("Missing required property 'relayId'");
}
if ((!args || args.resourceGroupName === undefined) && !opts.urn) {
throw new Error("Missing required property 'resourceGroupName'");
}
resourceInputs["appServiceName"] = args ? args.appServiceName : undefined;
resourceInputs["hostname"] = args ? args.hostname : undefined;
resourceInputs["port"] = args ? args.port : undefined;
resourceInputs["relayId"] = args ? args.relayId : undefined;
resourceInputs["resourceGroupName"] = args ? args.resourceGroupName : undefined;
resourceInputs["sendKeyName"] = args ? args.sendKeyName : undefined;
resourceInputs["namespaceName"] = undefined /*out*/;
resourceInputs["relayName"] = undefined /*out*/;
resourceInputs["sendKeyValue"] = undefined /*out*/;
resourceInputs["serviceBusNamespace"] = undefined /*out*/;
resourceInputs["serviceBusSuffix"] = undefined /*out*/;
}
opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts);
super(HybridConnection.__pulumiType, name, resourceInputs, opts);
}
}
/**
* Input properties used for looking up and filtering HybridConnection resources.
*/
export interface HybridConnectionState {
/**
* Specifies the name of the App Service. Changing this forces a new resource to be created.
*/
appServiceName?: pulumi.Input<string>;
/**
* The hostname of the endpoint.
*/
hostname?: pulumi.Input<string>;
/**
* The name of the Relay Namespace.
*/
namespaceName?: pulumi.Input<string>;
/**
* The port of the endpoint.
*/
port?: pulumi.Input<number>;
/**
* The ID of the Service Bus Relay. Changing this forces a new resource to be created.
*/
relayId?: pulumi.Input<string>;
relayName?: pulumi.Input<string>;
/**
* The name of the resource group in which to create the App Service. Changing this forces a new resource to be created.
*/
resourceGroupName?: pulumi.Input<string>;
/**
* The name of the Service Bus key which has Send permissions. Defaults to `RootManageSharedAccessKey`.
*/
sendKeyName?: pulumi.Input<string>;
/**
* The value of the Service Bus Primary Access key.
*/
sendKeyValue?: pulumi.Input<string>;
/**
* The name of the Service Bus namespace.
*/
serviceBusNamespace?: pulumi.Input<string>;
/**
* The suffix for the service bus endpoint.
*/
serviceBusSuffix?: pulumi.Input<string>;
}
/**
* The set of arguments for constructing a HybridConnection resource.
*/
export interface HybridConnectionArgs {
/**
* Specifies the name of the App Service. Changing this forces a new resource to be created.
*/
appServiceName: pulumi.Input<string>;
/**
* The hostname of the endpoint.
*/
hostname: pulumi.Input<string>;
/**
* The port of the endpoint.
*/
port: pulumi.Input<number>;
/**
* The ID of the Service Bus Relay. Changing this forces a new resource to be created.
*/
relayId: pulumi.Input<string>;
/**
* The name of the resource group in which to create the App Service. Changing this forces a new resource to be created.
*/
resourceGroupName: pulumi.Input<string>;
/**
* The name of the Service Bus key which has Send permissions. Defaults to `RootManageSharedAccessKey`.
*/
sendKeyName?: pulumi.Input<string>;
} | * when multiple copies of the Pulumi SDK have been loaded into the same process. |
multiarray.py | """
Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
the multiarray and umath c-extension modules were merged into a single
_multiarray_umath extension module. So we replicate the old namespace
by importing from the extension module.
"""
import functools
import warnings
from . import overrides
from . import _multiarray_umath
from ._multiarray_umath import * # noqa: F403
# These imports are needed for backward compatibility,
# do not change them. issue gh-15518
# _get_ndarray_c_version is semi-public, on purpose not added to __all__
from ._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
_ARRAY_API, _monotonicity, _get_ndarray_c_version, _set_madvise_hugepage,
)
__all__ = [
'_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose',
'_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity',
'add_docstring', 'arange', 'array', 'bincount', 'broadcast',
'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner',
'interp', 'interp_complex', 'is_busday', 'lexsort',
'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
'nested_iters', 'normalize_axis_index', 'packbits',
'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
'set_string_function', 'set_typeDict', 'shares_memory',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
scalar.__module__ = 'numpy.core.multiarray'
arange.__module__ = 'numpy'
array.__module__ = 'numpy'
datetime_data.__module__ = 'numpy'
empty.__module__ = 'numpy'
frombuffer.__module__ = 'numpy'
fromfile.__module__ = 'numpy'
fromiter.__module__ = 'numpy'
frompyfunc.__module__ = 'numpy'
fromstring.__module__ = 'numpy'
geterrobj.__module__ = 'numpy'
may_share_memory.__module__ = 'numpy'
nested_iters.__module__ = 'numpy'
promote_types.__module__ = 'numpy'
set_numeric_ops.__module__ = 'numpy'
seterrobj.__module__ = 'numpy'
zeros.__module__ = 'numpy'
# We can't verify dispatcher signatures because NumPy's C functions don't
# support introspection.
array_function_from_c_func_and_dispatcher = functools.partial(
overrides.array_function_from_dispatcher,
module='numpy', docs_from_dispatcher=True, verify=False)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
"""
empty_like(prototype, dtype=None, order='K', subok=True, shape=None)
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : array_like
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `prototype` is Fortran
contiguous, 'C' otherwise. 'K' means match the layout of `prototype`
as closely as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `prototype`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], # uninitialized
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
return (prototype,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
"""
concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind")
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
dtype : str or dtype
If provided, the destination array will have this dtype. Cannot be
provided together with `out`.
.. versionadded:: 1.20.0
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'same_kind'.
.. versionadded:: 1.20.0
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
block : Assemble arrays from blocks.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
column_stack : Stack 1-D arrays as columns into a 2-D array.
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
>>> np.concatenate((a, b), axis=None)
array([1, 2, 3, 4, 5, 6])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data=[0, --, 2],
mask=[False, True, False],
fill_value=999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data=[0, 1, 2, 2, 3, 4],
mask=False,
fill_value=999999)
>>> np.ma.concatenate([a, b])
masked_array(data=[0, --, 2, 2, 3, 4],
mask=[False, True, False, False, False, False],
fill_value=999999)
"""
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
def inner(a, b):
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[7., 0.],
[0., 7.]])
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
def where(condition, x=None, y=None):
"""
where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments are
provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
See Also
--------
choose
nonzero : The function that is called when x and y are omitted
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.where(a < 5, a, 10*a)
array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
This can be used on multidimensional arrays too:
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = np.ogrid[:3, :4]
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]])
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0, 1, 2],
[ 0, 2, -1],
[ 0, 3, -1]])
"""
return (condition, x, y)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
def lexsort(keys, axis=None):
"""
lexsort(keys, axis=-1)
Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, its rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> ind
array([2, 0, 4, 6, 5, 3, 1])
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
"""
if isinstance(keys, tuple):
return keys
else:
return (keys,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
def can_cast(from_, to, casting=None):
"""
can_cast(from_, to, casting='safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from_ : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
to : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
.. versionchanged:: 1.17.0
Casting between a simple data type and a structured one is possible only
for "unsafe" casting. Casting to multiple fields is allowed, but
casting from multiple fields is not.
.. versionchanged:: 1.9.0
Casting from numeric to string types in 'safe' casting mode requires
that the string dtype length is long enough to store the maximum
integer/float value converted.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, complex)
True
>>> np.can_cast(complex, float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
"""
return (from_,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
def min_scalar_type(a):
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
def result_type(*arrays_and_dtypes):
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
"""
return arrays_and_dtypes
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
def dot(a, b, out=None):
"""
dot(a, b, out=None)
Dot product of two arrays. Specifically,
- If both `a` and `b` are 1-D arrays, it is inner product of vectors
(without complex conjugation).
- If both `a` and `b` are 2-D arrays, it is matrix multiplication,
but using :func:`matmul` or ``a @ b`` is preferred.
- If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
- If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
- If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
linalg.multi_dot : Chained dot product.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
"""
return (a, b, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
def vdot(a, b):
"""
vdot(a, b)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
def bincount(x, weights=None, minlength=None):
"""
bincount(x, weights=None, minlength=0)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is negative.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
...
TypeError: Cannot cast array data from dtype('float64') to dtype('int64')
according to the rule 'safe'
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
return (x, weights)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode=None, order=None):
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
"""
return multi_index
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
def unravel_index(indices, shape=None, order=None):
"""
unravel_index(indices, shape, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``shape``. Before version 1.6.0,
this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling ``indices``.
.. versionchanged:: 1.16.0
Renamed from ``dims`` to ``shape``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
"""
return (indices,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
def copyto(dst, src, casting=None, where=None):
"""
copyto(dst, src, casting='same_kind', where=True)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
"""
return (dst, src, where)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
def putmask(a, mask, values):
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : ndarray
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
"""
return (a, mask, values)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
def | (a, axis=None, bitorder='big'):
"""
packbits(a, axis=None, bitorder='big')
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
a : array_like
An array of integers or booleans whose elements should be packed to
bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
bitorder : {'big', 'little'}, optional
The order of the input bits. 'big' will mimic bin(val),
``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will
reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``.
Defaults to 'big'.
.. versionadded:: 1.17.0
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],
[ 64]],
[[192],
[ 32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
def unpackbits(a, axis=None, count=None, bitorder='big'):
"""
unpackbits(a, axis=None, count=None, bitorder='big')
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `a` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is
either 1-D (if `axis` is ``None``) or the same shape as the input
array with unpacking done along the axis specified.
Parameters
----------
a : ndarray, uint8 type
Input array.
axis : int, optional
The dimension over which bit-unpacking is done.
``None`` implies unpacking the flattened array.
count : int or None, optional
The number of elements to unpack along `axis`, provided as a way
of undoing the effect of packing a size that is not a multiple
of eight. A non-negative number means to only unpack `count`
bits. A negative number means to trim off that many bits from
the end. ``None`` means to unpack the entire array (the
default). Counts larger than the available number of bits will
add zero padding to the output. Negative counts must not
exceed the available number of bits.
.. versionadded:: 1.17.0
bitorder : {'big', 'little'}, optional
The order of the returned bits. 'big' will mimic bin(val),
``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse
the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``.
Defaults to 'big'.
.. versionadded:: 1.17.0
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in
a uint8 array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
>>> c = np.unpackbits(a, axis=1, count=-3)
>>> c
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0]], dtype=uint8)
>>> p = np.packbits(b, axis=0)
>>> np.unpackbits(p, axis=0)
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0]))
True
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
def shares_memory(a, b, max_work=None):
"""
shares_memory(a, b, max_work=None)
Determine if two arrays share memory.
.. warning::
This function can be exponentially slow for some inputs, unless
`max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``.
If in doubt, use `numpy.may_share_memory` instead.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays. Finding
the exact solution may take extremely long in some cases.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> np.shares_memory(x, np.array([5, 6, 7]))
False
>>> np.shares_memory(x[::2], x)
True
>>> np.shares_memory(x[::2], x[1::2])
False
Checking whether two arrays share memory is NP-complete, and
runtime may increase exponentially in the number of
dimensions. Hence, `max_work` should generally be set to a finite
number, as it is possible to construct examples that take
extremely long to run:
>>> from numpy.lib.stride_tricks import as_strided
>>> x = np.zeros([192163377], dtype=np.int8)
>>> x1 = as_strided(x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049))
>>> x2 = as_strided(x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1))
>>> np.shares_memory(x1, x2, max_work=1000)
Traceback (most recent call last):
...
numpy.TooHardError: Exceeded max_work
Running ``np.shares_memory(x1, x2)`` without `max_work` set takes
around 1 minute for this case. It is possible to find problems
that take still significantly longer.
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
def may_share_memory(a, b, max_work=None):
"""
may_share_memory(a, b, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True])
"""
return (dates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23')
"""
return (dates, offsets, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
def busday_count(begindates, enddates, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
>>> np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
"""
return (begindates, enddates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(
_multiarray_umath.datetime_as_string)
def datetime_as_string(arr, unit=None, timezone=None, casting=None):
"""
datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
Convert an array of datetimes into an array of strings.
Parameters
----------
arr : array_like of datetime64
The array of UTC timestamps to format.
unit : str
One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
timezone : {'naive', 'UTC', 'local'} or tzinfo
Timezone information to use when displaying the datetime. If 'UTC', end
with a Z to indicate UTC time. If 'local', convert to the local timezone
first, and suffix with a +-#### timezone offset. If a tzinfo object,
then do as with 'local', but use the specified timezone.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
Casting to allow when changing between datetime units.
Returns
-------
str_arr : ndarray
An array of strings the same shape as `arr`.
Examples
--------
>>> import pytz
>>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
>>> d
array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
'2002-10-27T07:30'], dtype='datetime64[m]')
Setting the timezone to UTC shows the same information, but with a Z suffix
>>> np.datetime_as_string(d, timezone='UTC')
array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
'2002-10-27T07:30Z'], dtype='<U35')
Note that we picked datetimes that cross a DST boundary. Passing in a
``pytz`` timezone object will print the appropriate offset
>>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
'2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
Passing in a unit will change the precision
>>> np.datetime_as_string(d, unit='h')
array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
dtype='<U32')
>>> np.datetime_as_string(d, unit='s')
array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
'2002-10-27T07:30:00'], dtype='<U38')
'casting' can be used to specify whether precision can be changed
>>> np.datetime_as_string(d, unit='h', casting='safe')
Traceback (most recent call last):
...
TypeError: Cannot create a datetime string as units 'h' from a NumPy
datetime with units 'm' according to the rule 'safe'
"""
return (arr,)
| packbits |
__init__.py | default_app_config = 'authmultitoken.apps.AuthMultiTokenConfig' | ||
tornado_server.py | import json, os
from threading import Thread
from tornado.ioloop import IOLoop
import tornado.web
from hbi.model import Host, Filter
from hbi.server import Service
class RootHandler(tornado.web.RequestHandler):
def get(self):
self.write("boop")
class EntitiesPoster(tornado.web.RequestHandler):
def post(self):
hosts_json = json.loads(self.request.body)
hosts = (Host.from_json(h) for h in hosts_json)
ret = self.application.service.create_or_update(hosts)
self.write(json.dumps([h.to_json() for h in ret]))
class EntitiesSearcher(tornado.web.RequestHandler):
def post(self):
filters_json = json.loads(self.request.body) if self.request.body else None
filters = [Filter.from_json(h) for h in filters_json] if filters_json else None
ret = self.application.service.get(filters)
self.write(json.dumps([h.to_json() for h in ret]))
def serve_tornado():
|
if __name__ == "__main__":
app, loop = serve_tornado()
| app = tornado.web.Application([
(r"/", RootHandler),
(r"/entities/search", EntitiesSearcher),
(r"/entities", EntitiesPoster),
])
app.listen(int(os.environ.get("PORT", "50051")))
app.service = Service()
loop = IOLoop.current()
class TornadoRunThread(Thread):
def run(self):
loop.start()
TornadoRunThread().start()
return app, loop |
part_2.go | package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
func main() | {
file, _ := os.Open("input.txt")
scanner := bufio.NewScanner(file)
valid := 0
for scanner.Scan() {
line := scanner.Text()
s := strings.Split(line, " ")
limits := strings.Split(s[0], "-")
a, _ := strconv.Atoi(limits[0])
b, _ := strconv.Atoi(limits[1])
token := strings.Split(s[1], ":")[0][0]
text := s[2]
if (text[a-1] == token && text[b-1] != token) || (text[a-1] != token && text[b-1] == token) {
valid += 1
}
}
fmt.Println(valid)
} |
|
data_manager_vep_cache_download.py | #!/usr/bin/env python
import json
import os
import re
import sys
import tarfile
from urllib.request import urlretrieve
def main():
# Read in given out_file and create target directory for file download
with open(sys.argv[1]) as fh:
params = json.load(fh)
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
# Process parameters for metadata and file download
url = params['param_dict']['url'].rstrip("/") + "/" + params['param_dict']['file_name'].lstrip("/")
m = re.search(r"(.*?)(merged|refseq)?_vep_(\d+?)_", params['param_dict']['file_name'])
version = str(m.group(3))
cache_type = m.group(2) if m.group(2) else "default"
species = m.group(1).rstrip("_")
display_name = f"{species.capitalize().replace('_', ' ')} {params['param_dict']['dbkey']} (V{version}{'' if cache_type == 'default' else ', ' + cache_type.capitalize()})"
# Download and extract given cache archive, remove archive afterwards
final_file, headers = urlretrieve(url, os.path.join(target_directory, params['param_dict']['file_name']))
tar = tarfile.open(final_file, "r:gz")
tar.extractall(target_directory)
tar.close()
os.remove(final_file)
# Construct metadata for the new data table entry
data_manager_dict = {
'data_tables': {
'vep_versioned_annotation_cache': [
{
'value': params['param_dict']['file_name'].strip(".tar.gz"),
'dbkey': params['param_dict']['dbkey'],
'version': version,
'cachetype': cache_type,
'name': display_name,
'species': species,
'path': './%s' % params['param_dict']['file_name'].strip(".tar.gz")
}
]
}
}
# Save metadata to out_file
with open(sys.argv[1], 'w') as fh:
json.dump(data_manager_dict, fh, sort_keys=True) | if __name__ == "__main__":
main() | |
train_example_deit.py | import argparse
from hugsvision.nnet.VisionClassifierTrainer import VisionClassifierTrainer
from hugsvision.dataio.VisionDataset import VisionDataset
from torchvision.datasets import ImageFolder
from transformers import DeiTFeatureExtractor, DeiTForImageClassification
| parser.add_argument('--imgs', type=str, default="./images/", help='The directory of the input images')
parser.add_argument('--output', type=str, default="./out/", help='The output directory of the model')
parser.add_argument('--epochs', type=int, default=1, help='Number of Epochs')
args = parser.parse_args()
# Load the dataset
train, test, id2label, label2id = VisionDataset.fromImageFolder(
args.imgs,
test_ratio = 0.15,
balanced = True,
augmentation = True,
)
# # Load the dataset
# train, test, id2label, label2id = VisionDataset.fromImageFolders(
# "/<PATH>/train/",
# "/<PATH>/test/",
# )
huggingface_model = "facebook/deit-base-distilled-patch16-224"
# Train the model
trainer = VisionClassifierTrainer(
model_name = args.name,
train = train,
test = test,
output_dir = args.output,
max_epochs = args.epochs,
cores = 4,
batch_size = 32,
model = DeiTForImageClassification.from_pretrained(
huggingface_model,
num_labels = len(label2id),
label2id = label2id,
id2label = id2label
),
feature_extractor = DeiTFeatureExtractor.from_pretrained(
huggingface_model
),
)
# Evaluate on the test sub-dataset
ref, hyp = trainer.evaluate_f1_score()
# Test on a single image
trainer.testing(img='./data/demo/42.png',expected=2)
trainer.testing(img='./data/demo/3.jpg',expected=0)
trainer.testing(img='./data/demo/5.jpg',expected=2)
trainer.testing(img='./data/demo/4.jpg',expected=1) |
parser = argparse.ArgumentParser(description='Image classifier')
parser.add_argument('--name', type=str, default="MyVitModel", help='The name of the model')
|
documents_api.py | import logging
from werkzeug.exceptions import BadRequest, NotFound
from flask import Blueprint, redirect, send_file, request
from apikit import jsonify, Pager, request_data
from aleph.core import archive, url_for, db
from aleph.model import Document, DocumentRecord, Entity, Reference
from aleph.logic import update_document
from aleph.events import log_event
from aleph.views.cache import enable_cache
from aleph.search import QueryState
from aleph.search import records_query, execute_records_query
from aleph.search.util import next_params
from aleph.views.util import get_document
from aleph.util import PDF_MIME
log = logging.getLogger(__name__)
blueprint = Blueprint('documents_api', __name__)
@blueprint.route('/api/1/documents', methods=['GET'])
def index():
authz = request.authz
collections = request.args.getlist('collection')
collections = authz.collections_intersect(authz.READ, collections)
q = Document.all()
q = q.filter(Document.collection_id.in_(collections))
hashes = request.args.getlist('content_hash')
if len(hashes):
q = q.filter(Document.content_hash.in_(hashes))
return jsonify(Pager(q))
@blueprint.route('/api/1/documents/<int:document_id>')
def view(document_id):
doc = get_document(document_id)
enable_cache()
data = doc.to_dict()
if doc.parent is not None:
data['parent'] = doc.parent.to_dict()
log_event(request, document_id=doc.id)
data['data_url'] = archive.generate_url(doc.content_hash)
if data['data_url'] is None:
data['data_url'] = url_for('documents_api.file',
document_id=document_id)
if doc.pdf_version:
data['pdf_url'] = url_for('documents_api.pdf',
document_id=document_id)
return jsonify(data)
@blueprint.route('/api/1/documents/<int:document_id>', methods=['POST', 'PUT'])
def | (document_id):
document = get_document(document_id, action=request.authz.WRITE)
data = request_data()
document.update(data)
db.session.commit()
log_event(request, document_id=document.id)
update_document(document)
return view(document_id)
@blueprint.route('/api/1/documents/<int:document_id>/references')
def references(document_id):
doc = get_document(document_id)
q = db.session.query(Reference)
q = q.filter(Reference.document_id == doc.id)
q = q.filter(Reference.origin == 'regex')
q = q.join(Entity)
q = q.filter(Entity.state == Entity.STATE_ACTIVE)
q = q.filter(Entity.collection_id.in_(request.authz.collections_read))
q = q.order_by(Reference.weight.desc())
return jsonify(Pager(q, document_id=document_id))
@blueprint.route('/api/1/documents/<int:document_id>/file')
def file(document_id):
document = get_document(document_id)
enable_cache(server_side=True)
log_event(request, document_id=document.id)
url = archive.generate_url(document.content_hash,
file_name=document.file_name,
mime_type=document.mime_type)
if url is not None:
return redirect(url)
local_path = archive.load_file(document.content_hash,
file_name=document.file_name)
if local_path is None:
raise NotFound("File does not exist.")
fh = open(local_path, 'rb')
return send_file(fh, as_attachment=True,
attachment_filename=document.file_name,
mimetype=document.mime_type)
@blueprint.route('/api/1/documents/<int:document_id>/pdf')
def pdf(document_id):
document = get_document(document_id)
enable_cache(server_side=True)
log_event(request, document_id=document.id)
if document.type != Document.TYPE_TEXT:
raise BadRequest("PDF is only available for text documents")
url = archive.generate_url(document.pdf_version, mime_type=PDF_MIME)
if url is not None:
return redirect(url)
path = archive.load_file(document.pdf_version,
file_name=document.file_name)
if path is None:
raise NotFound("Missing PDF file.")
return send_file(open(path, 'rb'), mimetype=PDF_MIME)
@blueprint.route('/api/1/documents/<int:document_id>/tables/<int:table_id>')
def table(document_id, table_id):
document = get_document(document_id)
enable_cache(vary_user=True)
try:
return jsonify(document.tables[table_id])
except IndexError:
raise NotFound("No such table: %s" % table_id)
@blueprint.route('/api/1/documents/<int:document_id>/records')
def records(document_id):
document = get_document(document_id)
enable_cache(vary_user=True)
state = QueryState(request.args, request.authz)
query = records_query(document.id, state)
result = execute_records_query(document.id, state, query)
params = next_params(request.args, result)
if params is not None:
result['next'] = url_for('documents_api.records',
document_id=document_id,
**params)
return jsonify(result)
@blueprint.route('/api/1/documents/<int:document_id>/records/<int:index>')
def record(document_id, index):
document = get_document(document_id)
q = db.session.query(DocumentRecord)
q = q.filter(DocumentRecord.document_id == document.id)
q = q.filter(DocumentRecord.index == index)
record = q.first()
if record is None:
raise NotFound("No such page: %s" % index)
enable_cache(server_side=True)
return jsonify(record)
| update |
encoder.rs | #![allow(clippy::too_many_arguments)]
use std::convert::TryFrom;
use std::io::{self, Write};
use byteorder::{BigEndian, WriteBytesExt};
use num_iter::range_step;
use crate::{Bgr, Bgra, ColorType, GenericImageView, ImageBuffer, Luma, LumaA, Pixel, Rgb, Rgba};
use crate::error::{ImageError, ImageResult, ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind};
use crate::image::{ImageEncoder, ImageFormat};
use crate::math::utils::clamp;
use super::entropy::build_huff_lut;
use super::transform;
// Markers
// Baseline DCT
static SOF0: u8 = 0xC0;
// Huffman Tables
static DHT: u8 = 0xC4;
// Start of Image (standalone)
static SOI: u8 = 0xD8;
// End of image (standalone)
static EOI: u8 = 0xD9;
// Start of Scan
static SOS: u8 = 0xDA;
// Quantization Tables
static DQT: u8 = 0xDB;
// Application segments start and end
static APP0: u8 = 0xE0;
// section K.1
// table K.1
#[rustfmt::skip]
static STD_LUMA_QTABLE: [u8; 64] = [
16, 11, 10, 16, 24, 40, 51, 61,
12, 12, 14, 19, 26, 58, 60, 55,
14, 13, 16, 24, 40, 57, 69, 56,
14, 17, 22, 29, 51, 87, 80, 62,
18, 22, 37, 56, 68, 109, 103, 77,
24, 35, 55, 64, 81, 104, 113, 92,
49, 64, 78, 87, 103, 121, 120, 101,
72, 92, 95, 98, 112, 100, 103, 99,
];
// table K.2
#[rustfmt::skip]
static STD_CHROMA_QTABLE: [u8; 64] = [
17, 18, 24, 47, 99, 99, 99, 99,
18, 21, 26, 66, 99, 99, 99, 99,
24, 26, 56, 99, 99, 99, 99, 99,
47, 66, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
];
// section K.3
// Code lengths and values for table K.3
static STD_LUMA_DC_CODE_LENGTHS: [u8; 16] = [
0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
];
static STD_LUMA_DC_VALUES: [u8; 12] = [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
];
// Code lengths and values for table K.4
static STD_CHROMA_DC_CODE_LENGTHS: [u8; 16] = [
0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
];
static STD_CHROMA_DC_VALUES: [u8; 12] = [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
];
// Code lengths and values for table k.5
static STD_LUMA_AC_CODE_LENGTHS: [u8; 16] = [
0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D,
];
static STD_LUMA_AC_VALUES: [u8; 162] = [
0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0,
0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5,
0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2,
0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
0xF9, 0xFA,
];
// Code lengths and values for table k.6
static STD_CHROMA_AC_CODE_LENGTHS: [u8; 16] = [
0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
];
static STD_CHROMA_AC_VALUES: [u8; 162] = [
0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0,
0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26,
0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5,
0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3,
0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA,
0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
0xF9, 0xFA,
];
static DCCLASS: u8 = 0;
static ACCLASS: u8 = 1;
static LUMADESTINATION: u8 = 0;
static CHROMADESTINATION: u8 = 1;
static LUMAID: u8 = 1;
static CHROMABLUEID: u8 = 2;
static CHROMAREDID: u8 = 3;
/// The permutation of dct coefficients.
#[rustfmt::skip]
static UNZIGZAG: [u8; 64] = [
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63,
];
/// A representation of a JPEG component
#[derive(Copy, Clone)]
struct Component {
/// The Component's identifier
id: u8,
/// Horizontal sampling factor
h: u8,
/// Vertical sampling factor
v: u8,
/// The quantization table selector
tq: u8,
/// Index to the Huffman DC Table
dc_table: u8,
/// Index to the AC Huffman Table
ac_table: u8,
/// The dc prediction of the component
_dc_pred: i32,
}
pub(crate) struct BitWriter<'a, W: 'a> {
w: &'a mut W,
accumulator: u32,
nbits: u8,
}
impl<'a, W: Write + 'a> BitWriter<'a, W> {
fn new(w: &'a mut W) -> Self {
BitWriter {
w,
accumulator: 0,
nbits: 0,
}
}
fn write_bits(&mut self, bits: u16, size: u8) -> io::Result<()> {
if size == 0 {
return Ok(());
}
self.nbits += size;
self.accumulator |= u32::from(bits) << (32 - self.nbits) as usize;
while self.nbits >= 8 {
let byte = self.accumulator >> 24;
self.w.write_all(&[byte as u8])?;
if byte == 0xFF {
self.w.write_all(&[0x00])?;
}
self.nbits -= 8;
self.accumulator <<= 8;
}
Ok(())
}
fn pad_byte(&mut self) -> io::Result<()> {
self.write_bits(0x7F, 7)
}
fn huffman_encode(&mut self, val: u8, table: &[(u8, u16)]) -> io::Result<()> {
let (size, code) = table[val as usize];
if size > 16 {
panic!("bad huffman value");
}
self.write_bits(code, size)
}
fn write_block(
&mut self,
block: &[i32],
prevdc: i32,
dctable: &[(u8, u16)],
actable: &[(u8, u16)],
) -> io::Result<i32> {
// Differential DC encoding
let dcval = block[0];
let diff = dcval - prevdc;
let (size, value) = encode_coefficient(diff);
self.huffman_encode(size, dctable)?;
self.write_bits(value, size)?;
// Figure F.2
let mut zero_run = 0;
for k in 1usize..=63 {
if block[UNZIGZAG[k] as usize] == 0 {
zero_run += 1;
} else {
while zero_run > 15 {
self.huffman_encode(0xF0, actable)?;
zero_run -= 16;
}
let (size, value) = encode_coefficient(block[UNZIGZAG[k] as usize]);
let symbol = (zero_run << 4) | size;
self.huffman_encode(symbol, actable)?;
self.write_bits(value, size)?;
zero_run = 0;
if k == 63 {
break;
}
}
}
if block[UNZIGZAG[63] as usize] == 0 {
self.huffman_encode(0x00, actable)?;
}
Ok(dcval)
}
fn write_segment(&mut self, marker: u8, data: Option<&[u8]>) -> io::Result<()> |
}
/// Represents a unit in which the density of an image is measured
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum PixelDensityUnit {
/// Represents the absence of a unit, the values indicate only a
/// [pixel aspect ratio](https://en.wikipedia.org/wiki/Pixel_aspect_ratio)
PixelAspectRatio,
/// Pixels per inch (2.54 cm)
Inches,
/// Pixels per centimeter
Centimeters,
}
/// Represents the pixel density of an image
///
/// For example, a 300 DPI image is represented by:
///
/// ```rust
/// use image::jpeg::*;
/// let hdpi = PixelDensity::dpi(300);
/// assert_eq!(hdpi, PixelDensity {density: (300,300), unit: PixelDensityUnit::Inches})
/// ```
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct PixelDensity {
/// A couple of values for (Xdensity, Ydensity)
pub density: (u16, u16),
/// The unit in which the density is measured
pub unit: PixelDensityUnit,
}
impl PixelDensity {
/// Creates the most common pixel density type:
/// the horizontal and the vertical density are equal,
/// and measured in pixels per inch.
pub fn dpi(density: u16) -> Self {
PixelDensity {
density: (density, density),
unit: PixelDensityUnit::Inches,
}
}
}
impl Default for PixelDensity {
/// Returns a pixel density with a pixel aspect ratio of 1
fn default() -> Self {
PixelDensity {
density: (1, 1),
unit: PixelDensityUnit::PixelAspectRatio,
}
}
}
/// The representation of a JPEG encoder
pub struct JpegEncoder<'a, W: 'a> {
writer: BitWriter<'a, W>,
components: Vec<Component>,
tables: Vec<u8>,
luma_dctable: Vec<(u8, u16)>,
luma_actable: Vec<(u8, u16)>,
chroma_dctable: Vec<(u8, u16)>,
chroma_actable: Vec<(u8, u16)>,
pixel_density: PixelDensity,
}
/// JPEG Encoder
///
/// An alias of [`JpegEncoder`].
///
/// TODO: remove
///
/// [`JpegEncoder`]: struct.JpegEncoder.html
#[allow(dead_code)]
#[deprecated(note = "Use `JpegEncoder` instead")]
pub type JPEGEncoder<'a, W> = JpegEncoder<'a, W>;
impl<'a, W: Write> JpegEncoder<'a, W> {
/// Create a new encoder that writes its output to ```w```
pub fn new(w: &mut W) -> JpegEncoder<W> {
JpegEncoder::new_with_quality(w, 75)
}
/// Create a new encoder that writes its output to ```w```, and has
/// the quality parameter ```quality``` with a value in the range 1-100
/// where 1 is the worst and 100 is the best.
pub fn new_with_quality(w: &mut W, quality: u8) -> JpegEncoder<W> {
let ld = build_huff_lut(&STD_LUMA_DC_CODE_LENGTHS, &STD_LUMA_DC_VALUES);
let la = build_huff_lut(&STD_LUMA_AC_CODE_LENGTHS, &STD_LUMA_AC_VALUES);
let cd = build_huff_lut(&STD_CHROMA_DC_CODE_LENGTHS, &STD_CHROMA_DC_VALUES);
let ca = build_huff_lut(&STD_CHROMA_AC_CODE_LENGTHS, &STD_CHROMA_AC_VALUES);
let components = vec![
Component {
id: LUMAID,
h: 1,
v: 1,
tq: LUMADESTINATION,
dc_table: LUMADESTINATION,
ac_table: LUMADESTINATION,
_dc_pred: 0,
},
Component {
id: CHROMABLUEID,
h: 1,
v: 1,
tq: CHROMADESTINATION,
dc_table: CHROMADESTINATION,
ac_table: CHROMADESTINATION,
_dc_pred: 0,
},
Component {
id: CHROMAREDID,
h: 1,
v: 1,
tq: CHROMADESTINATION,
dc_table: CHROMADESTINATION,
ac_table: CHROMADESTINATION,
_dc_pred: 0,
},
];
// Derive our quantization table scaling value using the libjpeg algorithm
let scale = u32::from(clamp(quality, 1, 100));
let scale = if scale < 50 {
5000 / scale
} else {
200 - scale * 2
};
let mut tables = Vec::new();
let scale_value = |&v: &u8| {
let value = (u32::from(v) * scale + 50) / 100;
clamp(value, 1, u32::from(u8::max_value())) as u8
};
tables.extend(STD_LUMA_QTABLE.iter().map(&scale_value));
tables.extend(STD_CHROMA_QTABLE.iter().map(&scale_value));
JpegEncoder {
writer: BitWriter::new(w),
components,
tables,
luma_dctable: ld,
luma_actable: la,
chroma_dctable: cd,
chroma_actable: ca,
pixel_density: PixelDensity::default(),
}
}
/// Set the pixel density of the images the encoder will encode.
/// If this method is not called, then a default pixel aspect ratio of 1x1 will be applied,
/// and no DPI information will be stored in the image.
pub fn set_pixel_density(&mut self, pixel_density: PixelDensity) {
self.pixel_density = pixel_density;
}
/// Encodes the image stored in the raw byte buffer ```image```
/// that has dimensions ```width``` and ```height```
/// and ```ColorType``` ```c```
///
/// The Image in encoded with subsampling ratio 4:2:2
pub fn encode(
&mut self,
image: &[u8],
width: u32,
height: u32,
color_type: ColorType,
) -> ImageResult<()> {
match color_type {
ColorType::L8 => {
let image: ImageBuffer<Luma<_>, _> = ImageBuffer::from_raw(width, height, image).unwrap();
self.encode_image(&image)
},
ColorType::La8 => {
let image: ImageBuffer<LumaA<_>, _> = ImageBuffer::from_raw(width, height, image).unwrap();
self.encode_image(&image)
},
ColorType::Rgb8 => {
let image: ImageBuffer<Rgb<_>, _> = ImageBuffer::from_raw(width, height, image).unwrap();
self.encode_image(&image)
},
ColorType::Rgba8 => {
let image: ImageBuffer<Rgba<_>, _> = ImageBuffer::from_raw(width, height, image).unwrap();
self.encode_image(&image)
},
ColorType::Bgr8 => {
let image: ImageBuffer<Bgr<_>, _> = ImageBuffer::from_raw(width, height, image).unwrap();
self.encode_image(&image)
},
ColorType::Bgra8 => {
let image: ImageBuffer<Bgra<_>, _> = ImageBuffer::from_raw(width, height, image).unwrap();
self.encode_image(&image)
},
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Jpeg.into(),
UnsupportedErrorKind::Color(color_type.into()),
),
))
},
}
}
/// Encodes the given image.
///
/// As a special feature this does not require the whole image to be present in memory at the
/// same time such that it may be computed on the fly, which is why this method exists on this
/// encoder but not on others. Instead the encoder will iterate over 8-by-8 blocks of pixels at
/// a time, inspecting each pixel exactly once. You can rely on this behaviour when calling
/// this method.
///
/// The Image in encoded with subsampling ratio 4:2:2
pub fn encode_image<I: GenericImageView>(
&mut self,
image: &I,
) -> ImageResult<()> {
let n = I::Pixel::CHANNEL_COUNT;
let num_components = if n == 1 || n == 2 { 1 } else { 3 };
self.writer.write_segment(SOI, None)?;
let mut buf = Vec::new();
build_jfif_header(&mut buf, self.pixel_density);
self.writer.write_segment(APP0, Some(&buf))?;
build_frame_header(
&mut buf,
8,
// TODO: not idiomatic yet. Should be an EncodingError and mention jpg. Further it
// should check dimensions prior to writing.
u16::try_from(image.width()).map_err(|_| {
ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
))
})?,
u16::try_from(image.height()).map_err(|_| {
ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
))
})?,
&self.components[..num_components],
);
self.writer.write_segment(SOF0, Some(&buf))?;
assert_eq!(self.tables.len() / 64, 2);
let numtables = if num_components == 1 { 1 } else { 2 };
for (i, table) in self.tables.chunks(64).enumerate().take(numtables) {
build_quantization_segment(&mut buf, 8, i as u8, table);
self.writer.write_segment(DQT, Some(&buf))?;
}
build_huffman_segment(
&mut buf,
DCCLASS,
LUMADESTINATION,
&STD_LUMA_DC_CODE_LENGTHS,
&STD_LUMA_DC_VALUES,
);
self.writer.write_segment(DHT, Some(&buf))?;
build_huffman_segment(
&mut buf,
ACCLASS,
LUMADESTINATION,
&STD_LUMA_AC_CODE_LENGTHS,
&STD_LUMA_AC_VALUES,
);
self.writer.write_segment(DHT, Some(&buf))?;
if num_components == 3 {
build_huffman_segment(
&mut buf,
DCCLASS,
CHROMADESTINATION,
&STD_CHROMA_DC_CODE_LENGTHS,
&STD_CHROMA_DC_VALUES,
);
self.writer.write_segment(DHT, Some(&buf))?;
build_huffman_segment(
&mut buf,
ACCLASS,
CHROMADESTINATION,
&STD_CHROMA_AC_CODE_LENGTHS,
&STD_CHROMA_AC_VALUES,
);
self.writer.write_segment(DHT, Some(&buf))?;
}
build_scan_header(&mut buf, &self.components[..num_components]);
self.writer.write_segment(SOS, Some(&buf))?;
if I::Pixel::COLOR_TYPE.has_color() {
self.encode_rgb(image)
} else {
self.encode_gray(image)
}?;
self.writer.pad_byte()?;
self.writer.write_segment(EOI, None)?;
Ok(())
}
fn encode_gray<I: GenericImageView>(
&mut self,
image: &I,
) -> io::Result<()> {
let mut yblock = [0u8; 64];
let mut y_dcprev = 0;
let mut dct_yblock = [0i32; 64];
for y in range_step(0, image.height(), 8) {
for x in range_step(0, image.width(), 8) {
copy_blocks_gray(image, x, y, &mut yblock);
// Level shift and fdct
// Coeffs are scaled by 8
transform::fdct(&yblock, &mut dct_yblock);
// Quantization
for (i, dct) in dct_yblock.iter_mut().enumerate().take(64) {
*dct = ((*dct / 8) as f32 / f32::from(self.tables[i])).round() as i32;
}
let la = &*self.luma_actable;
let ld = &*self.luma_dctable;
y_dcprev = self.writer.write_block(&dct_yblock, y_dcprev, ld, la)?;
}
}
Ok(())
}
fn encode_rgb<I: GenericImageView>(
&mut self,
image: &I,
) -> io::Result<()> {
let mut y_dcprev = 0;
let mut cb_dcprev = 0;
let mut cr_dcprev = 0;
let mut dct_yblock = [0i32; 64];
let mut dct_cb_block = [0i32; 64];
let mut dct_cr_block = [0i32; 64];
let mut yblock = [0u8; 64];
let mut cb_block = [0u8; 64];
let mut cr_block = [0u8; 64];
for y in range_step(0, image.height(), 8) {
for x in range_step(0, image.width(), 8) {
// RGB -> YCbCr
copy_blocks_ycbcr(
image,
x,
y,
&mut yblock,
&mut cb_block,
&mut cr_block,
);
// Level shift and fdct
// Coeffs are scaled by 8
transform::fdct(&yblock, &mut dct_yblock);
transform::fdct(&cb_block, &mut dct_cb_block);
transform::fdct(&cr_block, &mut dct_cr_block);
// Quantization
for i in 0usize..64 {
dct_yblock[i] =
((dct_yblock[i] / 8) as f32 / f32::from(self.tables[i])).round() as i32;
dct_cb_block[i] = ((dct_cb_block[i] / 8) as f32
/ f32::from(self.tables[64+i]))
.round() as i32;
dct_cr_block[i] = ((dct_cr_block[i] / 8) as f32
/ f32::from(self.tables[64+i]))
.round() as i32;
}
let la = &*self.luma_actable;
let ld = &*self.luma_dctable;
let cd = &*self.chroma_dctable;
let ca = &*self.chroma_actable;
y_dcprev = self.writer.write_block(&dct_yblock, y_dcprev, ld, la)?;
cb_dcprev = self.writer.write_block(&dct_cb_block, cb_dcprev, cd, ca)?;
cr_dcprev = self.writer.write_block(&dct_cr_block, cr_dcprev, cd, ca)?;
}
}
Ok(())
}
}
impl<'a, W: Write> ImageEncoder for JpegEncoder<'a, W> {
fn write_image(
mut self,
buf: &[u8],
width: u32,
height: u32,
color_type: ColorType,
) -> ImageResult<()> {
self.encode(buf, width, height, color_type)
}
}
fn build_jfif_header(m: &mut Vec<u8>, density: PixelDensity) {
m.clear();
// TODO: More idiomatic would be extend_from_slice, to_be_bytes
let _ = write!(m, "JFIF");
let _ = m.write_all(&[0]);
let _ = m.write_all(&[0x01]);
let _ = m.write_all(&[0x02]);
let _ = m.write_all(&[match density.unit {
PixelDensityUnit::PixelAspectRatio => 0x00,
PixelDensityUnit::Inches => 0x01,
PixelDensityUnit::Centimeters => 0x02,
}]);
let _ = m.write_u16::<BigEndian>(density.density.0);
let _ = m.write_u16::<BigEndian>(density.density.1);
let _ = m.write_all(&[0]);
let _ = m.write_all(&[0]);
}
fn build_frame_header(
m: &mut Vec<u8>,
precision: u8,
width: u16,
height: u16,
components: &[Component],
) {
m.clear();
// TODO: More idiomatic would be extend_from_slice, to_be_bytes
let _ = m.write_all(&[precision]);
let _ = m.write_u16::<BigEndian>(height);
let _ = m.write_u16::<BigEndian>(width);
let _ = m.write_all(&[components.len() as u8]);
for &comp in components.iter() {
let _ = m.write_all(&[comp.id]);
let hv = (comp.h << 4) | comp.v;
let _ = m.write_all(&[hv]);
let _ = m.write_all(&[comp.tq]);
}
}
fn build_scan_header(m: &mut Vec<u8>, components: &[Component]) {
m.clear();
// TODO: More idiomatic would be extend_from_slice, to_be_bytes
let _ = m.write_all(&[components.len() as u8]);
for &comp in components.iter() {
let _ = m.write_all(&[comp.id]);
let tables = (comp.dc_table << 4) | comp.ac_table;
let _ = m.write_all(&[tables]);
}
// spectral start and end, approx. high and low
let _ = m.write_all(&[0]);
let _ = m.write_all(&[63]);
let _ = m.write_all(&[0]);
}
fn build_huffman_segment(
m: &mut Vec<u8>,
class: u8,
destination: u8,
numcodes: &[u8],
values: &[u8],
) {
m.clear();
// TODO: More idiomatic would be pub, extend_from_slice
let tcth = (class << 4) | destination;
let _ = m.write_u8(tcth);
assert_eq!(numcodes.len(), 16);
let _ = m.write_all(numcodes);
let mut sum = 0usize;
for &i in numcodes.iter() {
sum += i as usize;
}
assert_eq!(sum, values.len());
let _ = m.write_all(values);
}
fn build_quantization_segment(m: &mut Vec<u8>, precision: u8, identifier: u8, qtable: &[u8]) {
assert_eq!(qtable.len() % 64, 0);
m.clear();
// TODO: More idiomatic would be pub, extend_from_slice
let p = if precision == 8 { 0 } else { 1 };
let pqtq = (p << 4) | identifier;
let _ = m.write_u8(pqtq);
for &i in &UNZIGZAG[..] {
let _ = m.write_u8(qtable[i as usize]);
}
}
fn encode_coefficient(coefficient: i32) -> (u8, u16) {
let mut magnitude = coefficient.abs() as u16;
let mut num_bits = 0u8;
while magnitude > 0 {
magnitude >>= 1;
num_bits += 1;
}
let mask = (1 << num_bits as usize) - 1;
let val = if coefficient < 0 {
(coefficient - 1) as u16 & mask
} else {
coefficient as u16 & mask
};
(num_bits, val)
}
#[inline]
fn rgb_to_ycbcr<P: Pixel>(pixel: P) -> (u8, u8, u8) {
use num_traits::{cast::ToPrimitive, bounds::Bounded};
let [r, g, b] = pixel.to_rgb().0;
let max: f32 = P::Subpixel::max_value().to_f32().unwrap();
let r: f32 = r.to_f32().unwrap();
let g: f32 = g.to_f32().unwrap();
let b: f32 = b.to_f32().unwrap();
// Coefficients from JPEG File Interchange Format (Version 1.02), multiplied for 255 maximum.
let y = 76.245 / max * r + 149.685 / max * g + 29.07 / max * b;
let cb = -43.0185 / max * r - 84.4815 / max * g + 127.5 / max * b + 128.;
let cr = 127.5 / max * r - 106.7685 / max * g - 20.7315 / max * b + 128.;
(y as u8, cb as u8, cr as u8)
}
/// Returns the pixel at (x,y) if (x,y) is in the image,
/// otherwise the closest pixel in the image
#[inline]
fn pixel_at_or_near<I: GenericImageView>(source: &I, x: u32, y: u32) -> I::Pixel {
if source.in_bounds(x, y) {
source.get_pixel(x, y)
} else {
source.get_pixel(
x.min(source.width() - 1),
y.min(source.height() - 1),
)
}
}
fn copy_blocks_ycbcr<I: GenericImageView>(
source: &I,
x0: u32,
y0: u32,
yb: &mut [u8; 64],
cbb: &mut [u8; 64],
crb: &mut [u8; 64],
) {
for y in 0..8 {
for x in 0..8 {
let pixel = pixel_at_or_near(source, x + x0, y + y0);
let (yc, cb, cr) = rgb_to_ycbcr(pixel);
yb[(y * 8 + x) as usize] = yc;
cbb[(y * 8 + x) as usize] = cb;
crb[(y * 8 + x) as usize] = cr;
}
}
}
fn copy_blocks_gray<I: GenericImageView>(
source: &I,
x0: u32,
y0: u32,
gb: &mut [u8; 64],
) {
use num_traits::cast::ToPrimitive;
for y in 0..8 {
for x in 0..8 {
let pixel = pixel_at_or_near(source, x0 + x, y0 + y);
let [luma] = pixel.to_luma().0;
gb[(y * 8 + x) as usize] = luma.to_u8().unwrap();
}
}
}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use crate::{Bgra, ImageBuffer, ImageEncoder, ImageError};
use crate::color::ColorType;
use crate::error::ParameterErrorKind::DimensionMismatch;
use crate::image::ImageDecoder;
use super::{build_jfif_header, JpegEncoder, PixelDensity};
use super::super::JpegDecoder;
fn decode(encoded: &[u8]) -> Vec<u8> {
let decoder = JpegDecoder::new(Cursor::new(encoded))
.expect("Could not decode image");
let mut decoded = vec![0; decoder.total_bytes() as usize];
decoder.read_image(&mut decoded).expect("Could not decode image");
decoded
}
#[test]
fn roundtrip_sanity_check() {
// create a 1x1 8-bit image buffer containing a single red pixel
let img = [255u8, 0, 0];
// encode it into a memory buffer
let mut encoded_img = Vec::new();
{
let encoder = JpegEncoder::new_with_quality(&mut encoded_img, 100);
encoder
.write_image(&img, 1, 1, ColorType::Rgb8)
.expect("Could not encode image");
}
// decode it from the memory buffer
{
let decoded = decode(&encoded_img);
// note that, even with the encode quality set to 100, we do not get the same image
// back. Therefore, we're going to assert that it's at least red-ish:
assert_eq!(3, decoded.len());
assert!(decoded[0] > 0x80);
assert!(decoded[1] < 0x80);
assert!(decoded[2] < 0x80);
}
}
#[test]
fn grayscale_roundtrip_sanity_check() {
// create a 2x2 8-bit image buffer containing a white diagonal
let img = [255u8, 0, 0, 255];
// encode it into a memory buffer
let mut encoded_img = Vec::new();
{
let encoder = JpegEncoder::new_with_quality(&mut encoded_img, 100);
encoder
.write_image(&img[..], 2, 2, ColorType::L8)
.expect("Could not encode image");
}
// decode it from the memory buffer
{
let decoded = decode(&encoded_img);
// note that, even with the encode quality set to 100, we do not get the same image
// back. Therefore, we're going to assert that the diagonal is at least white-ish:
assert_eq!(4, decoded.len());
assert!(decoded[0] > 0x80);
assert!(decoded[1] < 0x80);
assert!(decoded[2] < 0x80);
assert!(decoded[3] > 0x80);
}
}
#[test]
fn jfif_header_density_check() {
let mut buffer = Vec::new();
build_jfif_header(&mut buffer, PixelDensity::dpi(300));
assert_eq!(buffer, vec![
b'J', b'F', b'I', b'F',
0, 1, 2, // JFIF version 1.2
1, // density is in dpi
300u16.to_be_bytes()[0], 300u16.to_be_bytes()[1],
300u16.to_be_bytes()[0], 300u16.to_be_bytes()[1],
0, 0, // No thumbnail
]
);
}
#[test]
fn test_image_too_large() {
// JPEG cannot encode images larger than 65,535×65,535
// create a 65,536×1 8-bit black image buffer
let img = [0; 65_536];
// Try to encode an image that is too large
let mut encoded = Vec::new();
let encoder = JpegEncoder::new_with_quality(&mut encoded, 100);
let result = encoder.write_image(&img, 65_536, 1, ColorType::L8);
match result {
Err(ImageError::Parameter(err)) => {
assert_eq!(err.kind(), DimensionMismatch)
}
other => {
assert!(false, "Encoding an image that is too large should return a DimensionError \
it returned {:?} instead", other)
}
}
}
#[test]
fn test_bgra16() {
// Test encoding an RGBA 16-bit image.
// Jpeg is RGB 8-bit, so the conversion should be done on the fly
let mut encoded = Vec::new();
let max = std::u16::MAX;
let image: ImageBuffer<Bgra<u16>, _> = ImageBuffer::from_raw(
1, 1, vec![0, max / 2, max, max]).unwrap();
let mut encoder = JpegEncoder::new_with_quality(&mut encoded, 100);
encoder.encode_image(&image).unwrap();
let decoded = decode(&encoded);
assert!(decoded[0] > 200, "bad red channel in {:?}", &decoded);
assert!(100 < decoded[1] && decoded[1] < 150, "bad green channel in {:?}", &decoded);
assert!(decoded[2] < 50, "bad blue channel in {:?}", &decoded);
}
}
| {
self.w.write_all(&[0xFF, marker])?;
if let Some(b) = data {
self.w.write_u16::<BigEndian>(b.len() as u16 + 2)?;
self.w.write_all(b)?;
}
Ok(())
} |
models.rs | #[cfg(test)]
mod model_test {
use crate::tests::*;
use raylib::prelude::*;
ray_test!(test_load_model);
fn test_load_model(thread: &RaylibThread) {
let mut handle = TEST_HANDLE.write().unwrap();
let rl = handle.as_mut().unwrap();
let _ = rl.load_model(thread, "resources/cube.obj");
let _ = rl.load_model(thread, "resources/pbr/trooper.obj");
}
ray_test!(test_load_meshes);
fn test_load_meshes(_thread: &RaylibThread) {
// TODO run this test when Raysan implements LoadMeshes
// let m = Mesh::load_meshes(thread, "resources/cube.obj").expect("couldn't load any meshes");
}
// ray_test!(test_load_anims);
#[test]
fn test_load_anims() {
let _ = ModelAnimation::load_model_animations("resources/guy/guyanim.iqm") | .expect("could not load model animations");
}
} |
|
__init__.py | from dataclasses import dataclass
from dechainy.plugins import Probe
from dechainy.ebpf import EbpfCompiler
| class Valid(Probe):
def __post_init__(self):
self.ingress.required = True
self.ingress.cflags.append("-DCUSTOM_VARIABLE=0")
self.egress.required = False
super().__post_init__(path=__file__)
def autopatch(self):
self.ingress.cflags[-1] = "-DCUSTOM_VARIABLE=1"
EbpfCompiler().patch_hook("ingress", self._programs.ingress,
self.ingress.code, self.ingress.cflags) |
@dataclass |
snl_block.py | import torch
import torch.nn as nn
class ImprovedSNL(nn.Module):
def __init__(self, in_channels, transfer_channels, stage_num=2):
super(ImprovedSNL, self).__init__()
self.in_channels = in_channels
self.transfer_channels = transfer_channels
self.stage_num = stage_num
self.transform_t = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.transform_p = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.row_transform = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.column_transform = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.w1 = nn.Conv2d(transfer_channels, in_channels, kernel_size=1, stride=1, bias=False)
self.w2 = nn.Conv2d(transfer_channels, in_channels, kernel_size=1, stride=1, bias=False)
self.bn = nn.BatchNorm2d(in_channels)
self._init_params()
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def getAtt(self, x):
t = self.transform_t(x)
p = self.transform_p(x)
b, c, h, w = t.size()
t = t.view(b, c, -1).permute(0, 2, 1)
p = p.view(b, c, -1)
m = torch.bmm(torch.relu(t), torch.relu(p))
m += m.permute(0, 2, 1)
m_hat = m / 2
degree = torch.sum(m_hat, dim=2)
degree[degree != 0] = torch.sqrt(1.0 / degree[degree != 0])
affinity_matrix = m_hat * degree.unsqueeze(1)
affinity_matrix *= degree.unsqueeze(2)
return affinity_matrix
def stage(self, x):
affinity_matrix = self.getAtt(x)
column_features = self.column_transform(x)
b, c, h, w = column_features.size()
column_features = column_features.view(b, c, -1)
column_features = torch.bmm(column_features, affinity_matrix).contiguous().view(b,c,h,w)
column_features = self.w1(column_features)
row_features = self.row_transform(x)
b, c, h, w = row_features.size()
row_features = row_features.view(b, c, -1).permute(0, 2, 1)
row_features = torch.bmm(affinity_matrix, row_features).permute(0, 2, 1).contiguous().view(b,c,h,w)
row_features = self.w2(row_features)
output = column_features + row_features
output = self.bn(output)
output = output + x
return output
def | (self, x):
for stage in range(self.stage_num):
x = self.stage(x)
return x
| forward |
inf_nan.rs | /*!
One-line description.
More detailed description, with
# Example
*/
use std::convert::TryFrom;
use std::fmt::{Debug, Display, Formatter};
use std::str::FromStr;
use num::Float;
use crate::error::{Error, ErrorKind};
use crate::read::syntax_str::{
VALUE_MATH_INFINITY_NEGATIVE, VALUE_MATH_INFINITY_POSITIVE, VALUE_MATH_NAN_NEGATIVE,
VALUE_MATH_NAN_POSITIVE,
};
use crate::types::numbers::{InexactReal, TYPE_NAME_INEXACT_REAL};
// ------------------------------------------------------------------------------------------------
// Public Types
// ------------------------------------------------------------------------------------------------
#[derive(Clone, Debug, PartialEq)]
pub enum InfNan {
PositiveInfinity,
NegativeInfinity,
PositiveNan,
NegativeNan,
}
const TYPE_NAME_INF_NAN: &str = "inf-nan";
// ------------------------------------------------------------------------------------------------
// Private Types
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Public Functions
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
impl Display for InfNan {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<InfNan> for InexactReal {
fn from(v: InfNan) -> Self {
match v {
InfNan::PositiveInfinity => InexactReal::infinity(),
InfNan::NegativeInfinity => InexactReal::neg_infinity(),
InfNan::PositiveNan => InexactReal::nan(),
InfNan::NegativeNan => -InexactReal::nan(),
}
}
}
impl TryFrom<InexactReal> for InfNan {
type Error = Error;
fn try_from(v: InexactReal) -> Result<Self, Self::Error> {
match (v.is_sign_negative(), v.is_infinite(), v.is_nan()) {
(false, true, _) => Ok(InfNan::PositiveInfinity),
(true, true, _) => Ok(InfNan::NegativeInfinity),
(false, _, true) => Ok(InfNan::PositiveNan),
(true, _, true) => Ok(InfNan::NegativeNan),
_ => Err(ErrorKind::TypeCast {
from: TYPE_NAME_INEXACT_REAL.to_string(),
to: TYPE_NAME_INF_NAN.to_string(),
}
.into()),
}
}
}
impl FromStr for InfNan {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == VALUE_MATH_INFINITY_POSITIVE {
Ok(InfNan::PositiveInfinity)
} else if s == VALUE_MATH_INFINITY_NEGATIVE {
Ok(InfNan::NegativeInfinity)
} else if s == VALUE_MATH_NAN_POSITIVE {
Ok(InfNan::PositiveNan)
} else if s == VALUE_MATH_NAN_NEGATIVE {
Ok(InfNan::NegativeNan)
} else {
Err(ErrorKind::ParseValue {
kind: TYPE_NAME_INF_NAN.to_string(),
value: s.to_string(),
}
.into())
}
}
}
impl InfNan {
pub fn as_str(&self) -> &'static str {
match self {
InfNan::PositiveInfinity => VALUE_MATH_INFINITY_POSITIVE,
InfNan::NegativeInfinity => VALUE_MATH_INFINITY_NEGATIVE,
InfNan::PositiveNan => VALUE_MATH_NAN_POSITIVE,
InfNan::NegativeNan => VALUE_MATH_NAN_NEGATIVE,
}
}
}
// ------------------------------------------------------------------------------------------------
// Private Functions
// ------------------------------------------------------------------------------------------------ | // ------------------------------------------------------------------------------------------------ |
// ------------------------------------------------------------------------------------------------
// Modules |
test_compat.py | # -*- coding: utf-8 -*-
# Define source file encoding to support raw unicode characters in Python 2
import sys
# Third party
import pytest
# Project
from ddtrace.compat import to_unicode, PY2, reraise, get_connection_response
# Use different test suites for each Python version, this allows us to test the expected
# results for each Python version rather than writing a generic "works for both" test suite
if PY2:
class TestCompatPY2(object):
def test_to_unicode_string(self):
# Calling `compat.to_unicode` on a non-unicode string
res = to_unicode('test')
assert type(res) == unicode
assert res == 'test'
def test_to_unicode_unicode_encoded(self):
# Calling `compat.to_unicode` on a unicode encoded string
res = to_unicode('\xc3\xbf')
assert type(res) == unicode
assert res == u'ÿ'
def test_to_unicode_unicode_double_decode(self):
# Calling `compat.to_unicode` on a unicode decoded string
# This represents the double-decode issue, which can cause a `UnicodeEncodeError`
# `'\xc3\xbf'.decode('utf-8').decode('utf-8')`
r | def test_to_unicode_unicode_string(self):
# Calling `compat.to_unicode` on a unicode string
res = to_unicode(u'ÿ')
assert type(res) == unicode
assert res == u'ÿ'
def test_to_unicode_bytearray(self):
# Calling `compat.to_unicode` with a `bytearray` containing unicode
res = to_unicode(bytearray('\xc3\xbf'))
assert type(res) == unicode
assert res == u'ÿ'
def test_to_unicode_bytearray_double_decode(self):
# Calling `compat.to_unicode` with an already decoded `bytearray`
# This represents the double-decode issue, which can cause a `UnicodeEncodeError`
# `bytearray('\xc3\xbf').decode('utf-8').decode('utf-8')`
res = to_unicode(bytearray('\xc3\xbf').decode('utf-8'))
assert type(res) == unicode
assert res == u'ÿ'
def test_to_unicode_non_string(self):
# Calling `compat.to_unicode` on non-string types
assert to_unicode(1) == u'1'
assert to_unicode(True) == u'True'
assert to_unicode(None) == u'None'
assert to_unicode(dict(key='value')) == u'{\'key\': \'value\'}'
def test_get_connection_response(self):
"""Ensure that buffering is in kwargs."""
class MockConn(object):
def getresponse(self, *args, **kwargs):
assert 'buffering' in kwargs
mock = MockConn()
get_connection_response(mock)
else:
class TestCompatPY3(object):
def test_to_unicode_string(self):
# Calling `compat.to_unicode` on a non-unicode string
res = to_unicode('test')
assert type(res) == str
assert res == 'test'
def test_to_unicode_unicode_encoded(self):
# Calling `compat.to_unicode` on a unicode encoded string
res = to_unicode('\xff')
assert type(res) == str
assert res == 'ÿ'
def test_to_unicode_unicode_string(self):
# Calling `compat.to_unicode` on a unicode string
res = to_unicode('ÿ')
assert type(res) == str
assert res == 'ÿ'
def test_to_unicode_bytearray(self):
# Calling `compat.to_unicode` with a `bytearray` containing unicode """
res = to_unicode(bytearray('\xff', 'utf-8'))
assert type(res) == str
assert res == 'ÿ'
def test_to_unicode_non_string(self):
# Calling `compat.to_unicode` on non-string types
assert to_unicode(1) == '1'
assert to_unicode(True) == 'True'
assert to_unicode(None) == 'None'
assert to_unicode(dict(key='value')) == '{\'key\': \'value\'}'
def test_get_connection_response(self):
"""Ensure that buffering is NOT in kwargs."""
class MockConn(object):
def getresponse(self, *args, **kwargs):
assert 'buffering' not in kwargs
mock = MockConn()
get_connection_response(mock)
class TestPy2Py3Compat(object):
"""Common tests to ensure functions are both Python 2 and
Python 3 compatible.
"""
def test_reraise(self):
# ensure the `raise` function is Python 2/3 compatible
with pytest.raises(Exception) as ex:
try:
raise Exception('Ouch!')
except Exception:
# original exception we want to re-raise
(typ, val, tb) = sys.exc_info()
try:
# this exception doesn't allow a re-raise, and we need
# to use the previous one collected via `exc_info()`
raise Exception('Obfuscate!')
except Exception:
pass
# this call must be Python 2 and 3 compatible
raise reraise(typ, val, tb)
assert ex.value.args[0] == 'Ouch!'
| es = to_unicode('\xc3\xbf'.decode('utf-8'))
assert type(res) == unicode
assert res == u'ÿ'
|
matlab_to_year.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 30 20:12:44 2019
@author: ben
"""
def | (t):
# approximate conversion of matlab date to year. Uses the matlab conversion
# datestr('jan 1 2000') -> 730486
return (t-730486.)/365.25+2000.
| matlab_to_year |
screen-service.ts | import { reactive } from '@vue/reactivity';
import { debounce } from '../../utils/utils';
import { EventTopic } from '../system/event/event-topic';
/**
* Media query breakpoints.
*/
const SM_WIDTH = 768;
const MD_WIDTH = 992;
const LG_WIDTH = 1200;
/**
* The HiDPI breakpoint.
* Any resolution above this breakpoint will be considered HiDPI.
* http://bjango.com/articles/min-device-pixel-ratio/
*/
const HIDPI_BREAKPOINT = 1.5;
export const onScreenResize = new EventTopic<void>();
class ScreenService {
/**
* The actual width of the browser/screen context. Either in actual pixels,
* or device pixels if we can.
*/
width = 0;
/**
* The actual height of the browser/screen context. Either in actual pixels,
* or device pixels if we can.
*/
height = 0;
isXs = false;
isSm = false;
isMd = false;
/**
* lg is the default true state.
*/
isLg = true;
get breakpoint() {
return this.isXs ? 'xs' : this.isSm ? 'sm' : this.isMd ? 'md' : 'lg';
}
get isMobile() {
return this.isXs || this.isSm;
}
get isDesktop() {
return !this.isMobile;
}
/**
* If it's Retina/HiDPI or not.
*/
isHiDpi = import.meta.env.SSR
? false
: window.matchMedia(
'only screen and (-webkit-min-device-pixel-ratio: ' +
HIDPI_BREAKPOINT +
')' +
', only screen and (min--moz-device-pixel-ratio: ' +
HIDPI_BREAKPOINT +
')' +
', only screen and (-o-min-device-pixel-ratio: ' +
HIDPI_BREAKPOINT +
' / 1)' +
', only screen and (min-resolution: ' +
HIDPI_BREAKPOINT +
'dppx)' +
', only screen and (min-resolution: ' +
HIDPI_BREAKPOINT * 96 +
'dpi)'
).matches;
isPointerMouse = import.meta.env.SSR
? true
: window.matchMedia('not screen and (pointer: coarse)').matches;
}
export const Screen = reactive(new ScreenService()) as ScreenService;
if (!import.meta.env.SSR) {
// Check the breakpoints on app load.
_onResize();
/**
* This is used internally to check things every time window resizes.
* We debounce this and afterwards fire the resizeChanges for everyone else.
*/
window.addEventListener(
'resize',
debounce(() => _onResize(), 250)
);
}
async function _onResize() {
Screen.isXs = false;
Screen.isSm = false;
Screen.isMd = false;
Screen.isLg = false;
// Get everything for the window first.
if (window.matchMedia('only screen and (max-width: ' + (SM_WIDTH - 1) + 'px)').matches) {
Screen.isXs = true;
} else if (
window.matchMedia(
'only screen and (min-width: ' +
SM_WIDTH +
'px) and (max-width: ' +
(MD_WIDTH - 1) +
'px)'
).matches
) {
Screen.isSm = true;
} else if (
window.matchMedia(
'only screen and (min-width: ' +
MD_WIDTH +
'px) and (max-width: ' +
(LG_WIDTH - 1) +
'px)'
).matches
) {
Screen.isMd = true;
} else if (window.matchMedia('only screen and (min-width: ' + LG_WIDTH + 'px)').matches) {
Screen.isLg = true; | }
Screen.width = window.innerWidth > 0 ? window.innerWidth : (window as any)['width'];
Screen.height = window.innerHeight > 0 ? window.innerHeight : (window as any)['height'];
// Emit every time we resize.
onScreenResize.next();
}
/**
* Can be used to tell the Screen service to check sizing again in case things
* have shifted around without the screen actually resizing.
*/
export function triggerOnScreenResize() {
_onResize();
} | |
concat.rs | use syntax::ast;
use syntax::ext::base::{self, DummyResult};
use syntax::symbol::Symbol;
use syntax::tokenstream::TokenStream;
use std::string::String;
pub fn expand_concat(
cx: &mut base::ExtCtxt<'_>,
sp: syntax_pos::Span,
tts: TokenStream,
) -> Box<dyn base::MacResult + 'static> | {
let es = match base::get_exprs_from_tts(cx, sp, tts) {
Some(e) => e,
None => return DummyResult::any(sp),
};
let mut accumulator = String::new();
let mut missing_literal = vec![];
let mut has_errors = false;
for e in es {
match e.kind {
ast::ExprKind::Lit(ref lit) => match lit.kind {
ast::LitKind::Str(ref s, _)
| ast::LitKind::Float(ref s, _)
| ast::LitKind::FloatUnsuffixed(ref s) => {
accumulator.push_str(&s.as_str());
}
ast::LitKind::Char(c) => {
accumulator.push(c);
}
ast::LitKind::Int(i, ast::LitIntType::Unsigned(_))
| ast::LitKind::Int(i, ast::LitIntType::Signed(_))
| ast::LitKind::Int(i, ast::LitIntType::Unsuffixed) => {
accumulator.push_str(&i.to_string());
}
ast::LitKind::Bool(b) => {
accumulator.push_str(&b.to_string());
}
ast::LitKind::Byte(..) | ast::LitKind::ByteStr(..) => {
cx.span_err(e.span, "cannot concatenate a byte string literal");
}
ast::LitKind::Err(_) => {
has_errors = true;
}
},
ast::ExprKind::Err => {
has_errors = true;
}
_ => {
missing_literal.push(e.span);
}
}
}
if missing_literal.len() > 0 {
let mut err = cx.struct_span_err(missing_literal, "expected a literal");
err.note("only literals (like `\"foo\"`, `42` and `3.14`) can be passed to `concat!()`");
err.emit();
return DummyResult::any(sp);
} else if has_errors {
return DummyResult::any(sp);
}
let sp = cx.with_def_site_ctxt(sp);
base::MacEager::expr(cx.expr_str(sp, Symbol::intern(&accumulator)))
} |
|
web.go | package web
import (
"encoding/json"
"errors"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"strconv"
pb "github.com/buoyantio/emojivoto/emojivoto-web/gen/proto"
)
type WebApp struct {
emojiServiceClient pb.EmojiServiceClient
votingServiceClient pb.VotingServiceClient
indexBundle string
webpackDevServer string
}
func (app *WebApp) listEmojiHandler(w http.ResponseWriter, r *http.Request) {
serviceResponse, err := app.emojiServiceClient.ListAll(r.Context(), &pb.ListAllEmojiRequest{})
if err != nil {
writeError(err, w, r, http.StatusInternalServerError)
return
}
list := make([]map[string]string, 0)
for _, e := range serviceResponse.List {
list = append(list, map[string]string{
"shortcode": e.Shortcode,
"unicode": e.Unicode,
})
}
err = writeJsonBody(w, http.StatusOK, list)
if err != nil {
writeError(err, w, r, http.StatusInternalServerError)
}
}
func (app *WebApp) leaderboardHandler(w http.ResponseWriter, r *http.Request) {
results, err := app.votingServiceClient.Results(r.Context(), &pb.ResultsRequest{})
if err != nil {
writeError(err, w, r, http.StatusInternalServerError)
return
}
representations := make([]map[string]string, 0)
for _, result := range results.Results {
findByShortcodeRequest := &pb.FindByShortcodeRequest{
Shortcode: result.Shortcode,
}
findByShortcodeResponse, err := app.emojiServiceClient.FindByShortcode(r.Context(), findByShortcodeRequest)
if err != nil {
writeError(err, w, r, http.StatusInternalServerError)
return
}
emoji := findByShortcodeResponse.Emoji
representation := make(map[string]string)
representation["votes"] = strconv.Itoa(int(result.Votes))
representation["unicode"] = emoji.Unicode
representation["shortcode"] = emoji.Shortcode
representations = append(representations, representation)
}
err = writeJsonBody(w, http.StatusOK, representations)
if err != nil {
writeError(err, w, r, http.StatusInternalServerError)
}
}
func (app *WebApp) voteEmojiHandler(w http.ResponseWriter, r *http.Request) {
emojiShortcode := r.FormValue("choice")
if emojiShortcode == "" {
error := errors.New(fmt.Sprintf("Emoji choice [%s] is mandatory", emojiShortcode))
writeError(error, w, r, http.StatusBadRequest)
return
}
request := &pb.FindByShortcodeRequest{
Shortcode: emojiShortcode,
}
response, err := app.emojiServiceClient.FindByShortcode(r.Context(), request)
if err != nil {
writeError(err, w, r, http.StatusInternalServerError)
return
}
if response.Emoji == nil {
err = errors.New(fmt.Sprintf("Choosen emoji shortcode [%s] doesnt exist", emojiShortcode))
writeError(err, w, r, http.StatusBadRequest)
return
}
voteRequest := &pb.VoteRequest{}
switch emojiShortcode {
case ":poop:":
_, err = app.votingServiceClient.VotePoop(r.Context(), voteRequest)
case ":joy:":
_, err = app.votingServiceClient.VoteJoy(r.Context(), voteRequest)
case ":sunglasses:":
_, err = app.votingServiceClient.VoteSunglasses(r.Context(), voteRequest)
case ":relaxed:":
_, err = app.votingServiceClient.VoteRelaxed(r.Context(), voteRequest)
case ":stuck_out_tongue_winking_eye:":
_, err = app.votingServiceClient.VoteStuckOutTongueWinkingEye(r.Context(), voteRequest)
case ":money_mouth_face:":
_, err = app.votingServiceClient.VoteMoneyMouthFace(r.Context(), voteRequest)
case ":flushed:":
_, err = app.votingServiceClient.VoteFlushed(r.Context(), voteRequest)
case ":mask:":
_, err = app.votingServiceClient.VoteMask(r.Context(), voteRequest)
case ":nerd_face:":
_, err = app.votingServiceClient.VoteNerdFace(r.Context(), voteRequest)
case ":ghost:":
_, err = app.votingServiceClient.VoteGhost(r.Context(), voteRequest)
case ":skull_and_crossbones:":
_, err = app.votingServiceClient.VoteSkullAndCrossbones(r.Context(), voteRequest)
case ":heart_eyes_cat:":
_, err = app.votingServiceClient.VoteHeartEyesCat(r.Context(), voteRequest)
case ":hear_no_evil:":
_, err = app.votingServiceClient.VoteHearNoEvil(r.Context(), voteRequest)
case ":see_no_evil:":
_, err = app.votingServiceClient.VoteSeeNoEvil(r.Context(), voteRequest)
case ":speak_no_evil:":
_, err = app.votingServiceClient.VoteSpeakNoEvil(r.Context(), voteRequest)
case ":boy:":
_, err = app.votingServiceClient.VoteBoy(r.Context(), voteRequest)
case ":girl:":
_, err = app.votingServiceClient.VoteGirl(r.Context(), voteRequest)
case ":man:":
_, err = app.votingServiceClient.VoteMan(r.Context(), voteRequest)
case ":woman:":
_, err = app.votingServiceClient.VoteWoman(r.Context(), voteRequest)
case ":older_man:":
_, err = app.votingServiceClient.VoteOlderMan(r.Context(), voteRequest)
case ":policeman:":
_, err = app.votingServiceClient.VotePoliceman(r.Context(), voteRequest)
case ":guardsman:":
_, err = app.votingServiceClient.VoteGuardsman(r.Context(), voteRequest)
case ":construction_worker_man:":
_, err = app.votingServiceClient.VoteConstructionWorkerMan(r.Context(), voteRequest)
case ":prince:":
_, err = app.votingServiceClient.VotePrince(r.Context(), voteRequest)
case ":princess:":
_, err = app.votingServiceClient.VotePrincess(r.Context(), voteRequest)
case ":man_in_tuxedo:":
_, err = app.votingServiceClient.VoteManInTuxedo(r.Context(), voteRequest)
case ":bride_with_veil:":
_, err = app.votingServiceClient.VoteBrideWithVeil(r.Context(), voteRequest)
case ":mrs_claus:":
_, err = app.votingServiceClient.VoteMrsClaus(r.Context(), voteRequest)
case ":santa:":
_, err = app.votingServiceClient.VoteSanta(r.Context(), voteRequest)
case ":turkey:":
_, err = app.votingServiceClient.VoteTurkey(r.Context(), voteRequest)
case ":rabbit:":
_, err = app.votingServiceClient.VoteRabbit(r.Context(), voteRequest)
case ":no_good_woman:":
_, err = app.votingServiceClient.VoteNoGoodWoman(r.Context(), voteRequest)
case ":ok_woman:":
_, err = app.votingServiceClient.VoteOkWoman(r.Context(), voteRequest)
case ":raising_hand_woman:":
_, err = app.votingServiceClient.VoteRaisingHandWoman(r.Context(), voteRequest)
case ":bowing_man:":
_, err = app.votingServiceClient.VoteBowingMan(r.Context(), voteRequest)
case ":man_facepalming:":
_, err = app.votingServiceClient.VoteManFacepalming(r.Context(), voteRequest)
case ":woman_shrugging:":
_, err = app.votingServiceClient.VoteWomanShrugging(r.Context(), voteRequest)
case ":massage_woman:":
_, err = app.votingServiceClient.VoteMassageWoman(r.Context(), voteRequest)
case ":walking_man:":
_, err = app.votingServiceClient.VoteWalkingMan(r.Context(), voteRequest)
case ":running_man:":
_, err = app.votingServiceClient.VoteRunningMan(r.Context(), voteRequest)
case ":dancer:":
_, err = app.votingServiceClient.VoteDancer(r.Context(), voteRequest)
case ":man_dancing:":
_, err = app.votingServiceClient.VoteManDancing(r.Context(), voteRequest)
case ":dancing_women:":
_, err = app.votingServiceClient.VoteDancingWomen(r.Context(), voteRequest)
case ":rainbow:":
_, err = app.votingServiceClient.VoteRainbow(r.Context(), voteRequest)
case ":skier:":
_, err = app.votingServiceClient.VoteSkier(r.Context(), voteRequest)
case ":golfing_man:":
_, err = app.votingServiceClient.VoteGolfingMan(r.Context(), voteRequest)
case ":surfing_man:":
_, err = app.votingServiceClient.VoteSurfingMan(r.Context(), voteRequest)
case ":basketball_man:":
_, err = app.votingServiceClient.VoteBasketballMan(r.Context(), voteRequest)
case ":biking_man:":
_, err = app.votingServiceClient.VoteBikingMan(r.Context(), voteRequest)
case ":point_up_2:":
_, err = app.votingServiceClient.VotePointUp2(r.Context(), voteRequest)
case ":vulcan_salute:":
_, err = app.votingServiceClient.VoteVulcanSalute(r.Context(), voteRequest)
case ":metal:":
_, err = app.votingServiceClient.VoteMetal(r.Context(), voteRequest)
case ":call_me_hand:":
_, err = app.votingServiceClient.VoteCallMeHand(r.Context(), voteRequest)
case ":thumbsup:":
_, err = app.votingServiceClient.VoteThumbsup(r.Context(), voteRequest)
case ":wave:":
_, err = app.votingServiceClient.VoteWave(r.Context(), voteRequest)
case ":clap:":
_, err = app.votingServiceClient.VoteClap(r.Context(), voteRequest)
case ":raised_hands:":
_, err = app.votingServiceClient.VoteRaisedHands(r.Context(), voteRequest)
case ":pray:":
_, err = app.votingServiceClient.VotePray(r.Context(), voteRequest)
case ":dog:":
_, err = app.votingServiceClient.VoteDog(r.Context(), voteRequest)
case ":cat2:":
_, err = app.votingServiceClient.VoteCat2(r.Context(), voteRequest)
case ":pig:":
_, err = app.votingServiceClient.VotePig(r.Context(), voteRequest)
case ":hatching_chick:":
_, err = app.votingServiceClient.VoteHatchingChick(r.Context(), voteRequest)
case ":snail:":
_, err = app.votingServiceClient.VoteSnail(r.Context(), voteRequest)
case ":bacon:":
_, err = app.votingServiceClient.VoteBacon(r.Context(), voteRequest)
case ":pizza:":
_, err = app.votingServiceClient.VotePizza(r.Context(), voteRequest)
case ":taco:":
_, err = app.votingServiceClient.VoteTaco(r.Context(), voteRequest)
case ":burrito:":
_, err = app.votingServiceClient.VoteBurrito(r.Context(), voteRequest)
case ":ramen:":
_, err = app.votingServiceClient.VoteRamen(r.Context(), voteRequest)
case ":doughnut:":
_, err = app.votingServiceClient.VoteDoughnut(r.Context(), voteRequest)
case ":champagne:":
_, err = app.votingServiceClient.VoteChampagne(r.Context(), voteRequest)
case ":tropical_drink:":
_, err = app.votingServiceClient.VoteTropicalDrink(r.Context(), voteRequest)
case ":beer:":
_, err = app.votingServiceClient.VoteBeer(r.Context(), voteRequest)
case ":tumbler_glass:":
_, err = app.votingServiceClient.VoteTumblerGlass(r.Context(), voteRequest)
case ":world_map:":
_, err = app.votingServiceClient.VoteWorldMap(r.Context(), voteRequest)
case ":beach_umbrella:":
_, err = app.votingServiceClient.VoteBeachUmbrella(r.Context(), voteRequest)
case ":mountain_snow:":
_, err = app.votingServiceClient.VoteMountainSnow(r.Context(), voteRequest)
case ":camping:":
_, err = app.votingServiceClient.VoteCamping(r.Context(), voteRequest)
case ":steam_locomotive:":
_, err = app.votingServiceClient.VoteSteamLocomotive(r.Context(), voteRequest)
case ":flight_departure:":
_, err = app.votingServiceClient.VoteFlightDeparture(r.Context(), voteRequest)
case ":rocket:":
_, err = app.votingServiceClient.VoteRocket(r.Context(), voteRequest)
case ":star2:":
_, err = app.votingServiceClient.VoteStar2(r.Context(), voteRequest)
case ":sun_behind_small_cloud:":
_, err = app.votingServiceClient.VoteSunBehindSmallCloud(r.Context(), voteRequest)
case ":cloud_with_rain:":
_, err = app.votingServiceClient.VoteCloudWithRain(r.Context(), voteRequest)
case ":fire:":
_, err = app.votingServiceClient.VoteFire(r.Context(), voteRequest)
case ":jack_o_lantern:":
_, err = app.votingServiceClient.VoteJackOLantern(r.Context(), voteRequest)
case ":balloon:":
_, err = app.votingServiceClient.VoteBalloon(r.Context(), voteRequest)
case ":tada:":
_, err = app.votingServiceClient.VoteTada(r.Context(), voteRequest)
case ":trophy:":
_, err = app.votingServiceClient.VoteTrophy(r.Context(), voteRequest)
case ":iphone:":
_, err = app.votingServiceClient.VoteIphone(r.Context(), voteRequest)
case ":pager:":
_, err = app.votingServiceClient.VotePager(r.Context(), voteRequest)
case ":fax:":
_, err = app.votingServiceClient.VoteFax(r.Context(), voteRequest)
case ":bulb:":
_, err = app.votingServiceClient.VoteBulb(r.Context(), voteRequest)
case ":money_with_wings:":
_, err = app.votingServiceClient.VoteMoneyWithWings(r.Context(), voteRequest)
case ":crystal_ball:":
_, err = app.votingServiceClient.VoteCrystalBall(r.Context(), voteRequest)
case ":underage:":
_, err = app.votingServiceClient.VoteUnderage(r.Context(), voteRequest)
case ":interrobang:":
_, err = app.votingServiceClient.VoteInterrobang(r.Context(), voteRequest)
case ":100:":
_, err = app.votingServiceClient.Vote100(r.Context(), voteRequest)
case ":checkered_flag:":
_, err = app.votingServiceClient.VoteCheckeredFlag(r.Context(), voteRequest)
case ":crossed_swords:":
_, err = app.votingServiceClient.VoteCrossedSwords(r.Context(), voteRequest)
case ":floppy_disk:":
_, err = app.votingServiceClient.VoteFloppyDisk(r.Context(), voteRequest)
}
if err != nil {
writeError(err, w, r, http.StatusInternalServerError)
return
}
}
func (app *WebApp) indexHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
indexTemplate := `
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Emoji Vote</title>
<link rel="icon" href="/img/favicon.ico">
</head>
<body>
<div id="main" class="main"></div>
</body>
{{ if ne . ""}}
<script type="text/javascript" src="{{ . }}/dist/index_bundle.js" async></script>
{{else}}
<script type="text/javascript" src="/js" async></script>
{{end}}
</html>`
t, err := template.New("indexTemplate").Parse(indexTemplate)
if err != nil {
panic(err)
}
t.Execute(w, app.webpackDevServer)
}
func (app *WebApp) jsHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/javascript")
f, err := ioutil.ReadFile(app.indexBundle)
if err != nil {
panic(err)
}
fmt.Fprint(w, string(f))
}
func (app *WebApp) faviconHandler(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./web/favicon.ico")
}
func writeJsonBody(w http.ResponseWriter, status int, body interface{}) error {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(status)
return json.NewEncoder(w).Encode(body)
}
func | (err error, w http.ResponseWriter, r *http.Request, status int) {
log.Printf("Error serving request [%v]: %v", r, err)
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(status)
errorMessage := make(map[string]string)
errorMessage["error"] = fmt.Sprintf("%v", err)
json.NewEncoder(w).Encode(errorMessage)
}
func StartServer(webPort, webpackDevServer, indexBundle string, emojiServiceClient pb.EmojiServiceClient, votingClient pb.VotingServiceClient) {
webApp := &WebApp{
emojiServiceClient: emojiServiceClient,
votingServiceClient: votingClient,
indexBundle: indexBundle,
webpackDevServer: webpackDevServer,
}
log.Printf("Starting web server on WEB_PORT=[%s]", webPort)
http.HandleFunc("/", webApp.indexHandler)
http.HandleFunc("/leaderboard", webApp.indexHandler)
http.HandleFunc("/js", webApp.jsHandler)
http.HandleFunc("/img/favicon.ico", webApp.faviconHandler)
http.HandleFunc("/api/list", webApp.listEmojiHandler)
http.HandleFunc("/api/vote", webApp.voteEmojiHandler)
http.HandleFunc("/api/leaderboard", webApp.leaderboardHandler)
// TODO: make static assets dir configurable
http.Handle("/dist/", http.StripPrefix("/dist/", http.FileServer(http.Dir("dist"))))
err := http.ListenAndServe(fmt.Sprintf(":%s", webPort), nil)
if err != nil {
panic(err)
}
}
| writeError |
teams_invite.go | package cmd
import (
"fmt"
"errors"
"github.com/gosuri/uitable"
"github.com/mittwald/spacectl/client/teams"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var teamInviteFlags struct {
Email string
UserID string
Message string
Role string
}
// inviteCmd represents the invite command
var teamInviteCmd = &cobra.Command{
Use: "invite -t <team-id> -e <email> -m <message>",
Short: "Invite new users to your team",
Long: `Invite a new user into your team`,
RunE: func(cmd *cobra.Command, args []string) error {
var err error
var invite teams.Invite
teamID := viper.GetString("teamID")
if teamID == "" {
return errors.New("must provide team (--team-id or -t)")
}
if teamInviteFlags.Email == "" && teamInviteFlags.UserID == "" {
return errors.New("must provide user (either --email|-e or --user-id|-u)")
}
userTemplate := "inviting user \"%s\" into team %s\n"
if teamInviteFlags.UserID != "" {
fmt.Printf(userTemplate, teamInviteFlags.UserID, teamID)
invite, err = api.Teams().InviteByUID(
teamID,
teamInviteFlags.UserID,
teamInviteFlags.Message,
teamInviteFlags.Role,
)
} else if teamInviteFlags.Email != "" {
fmt.Printf(userTemplate, teamInviteFlags.Email, teamID)
invite, err = api.Teams().InviteByEmail(
teamID,
teamInviteFlags.Email,
teamInviteFlags.Message,
teamInviteFlags.Role,
)
}
if err != nil {
return err
}
fmt.Printf("invite %s issued\n", invite.ID)
table := uitable.New()
table.MaxColWidth = 80
table.Wrap = true
table.AddRow("ID:", invite.ID)
table.AddRow("Message:", invite.Message)
table.AddRow("State:", invite.State)
fmt.Println(table)
return nil
},
}
func | () {
teamsCmd.AddCommand(teamInviteCmd)
teamInviteCmd.Flags().StringVarP(&teamInviteFlags.Email, "email", "e", "", "Email address of the user to invite")
teamInviteCmd.Flags().StringVarP(&teamInviteFlags.UserID, "user-id", "u", "", "User ID of the user to invite")
teamInviteCmd.Flags().StringVarP(&teamInviteFlags.Message, "message", "m", "", "Invitation message")
teamInviteCmd.Flags().StringVarP(&teamInviteFlags.Role, "role", "r", "", "User role")
}
| init |
10_request_shutdown.rs | //! This example demonstrates how a subsystem can initiate
//! a shutdown.
use env_logger::{Builder, Env};
use miette::Result;
use tokio::time::{sleep, Duration};
use tokio_graceful_shutdown::{SubsystemHandle, Toplevel};
struct CountdownSubsystem {}
impl CountdownSubsystem {
fn new() -> Self {
Self {}
}
async fn countdown(&self) {
for i in (1..10).rev() {
log::info!("Shutting down in: {}", i); | sleep(Duration::from_millis(1000)).await;
}
}
async fn run(self, subsys: SubsystemHandle) -> Result<()> {
tokio::select! {
_ = subsys.on_shutdown_requested() => {
log::info!("Countdown cancelled.");
},
_ = self.countdown() => {
subsys.request_shutdown();
}
};
Ok(())
}
}
#[tokio::main]
async fn main() -> Result<()> {
// Init logging
Builder::from_env(Env::default().default_filter_or("debug")).init();
// Create toplevel
Toplevel::new()
.start("Countdown", |h| CountdownSubsystem::new().run(h))
.catch_signals()
.handle_shutdown_requests(Duration::from_millis(1000))
.await
.map_err(Into::into)
} | |
thing.ts | /* Generated from:
* ap-northeast-1 (https://d33vqc0rt9ld30.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* ap-northeast-2 (https://d1ane3fvebulky.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* ap-northeast-3 (https://d2zq80gdmjim8k.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* ap-south-1 (https://d2senuesg1djtx.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* ap-southeast-1 (https://doigdx0kgq9el.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* ap-southeast-2 (https://d2stg8d246z9di.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* ca-central-1 (https://d2s8ygphhesbe7.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* eu-central-1 (https://d1mta8qj7i28i2.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* eu-west-1 (https://d3teyb21fexa9r.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* eu-west-2 (https://d1742qcu2c1ncx.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* eu-west-3 (https://d2d0mfegowb3wk.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* sa-east-1 (https://d3c9jyj3w509b0.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* us-east-1 (https://d1uauaxba7bl26.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* us-east-2 (https://dnwj8swjjbsbt.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* us-west-1 (https://d68hl49wbnanq.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0,
* us-west-2 (https://d201a2mn26r7lk.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json), version 14.2.0
*/
import {ResourceBase} from '../resource'
import {Value, List} from '../dataTypes'
export class | {
Attributes?: {[key: string]: Value<string>}
constructor(properties: AttributePayload) {
Object.assign(this, properties)
}
}
export interface ThingProperties {
AttributePayload?: AttributePayload
ThingName?: Value<string>
}
export default class Thing extends ResourceBase<ThingProperties> {
static AttributePayload = AttributePayload
constructor(properties?: ThingProperties) {
super('AWS::IoT::Thing', properties || {})
}
}
| AttributePayload |
PostView.js | import React, { Component, Fragment } from 'react'
import PropTypes from 'prop-types'
import Paper from '@material-ui/core/Paper'
import Typography from '@material-ui/core/Typography'
import Divider from '@material-ui/core/Divider'
import Helmet from 'react-helmet-async'
import { withStyles } from '@material-ui/core/styles'
import { observer, inject } from 'mobx-react'
import { placeHolder } from './PlaceHolder'
import { styles } from './PostViewStyles'
import { db } from 'components/commons/firebase/firebase'
import { currentPath } from 'components/commons/utils'
import PostActions from 'components/shared/postactions/PostActions'
import MapCard from 'components/shared/map/MapCard'
import Media from 'components/mobile/postview/Media'
import PostDetails from 'components/mobile/postview/PostDetails'
import Caption from 'components/mobile/postcard/Caption'
import Author from 'components/mobile/postview/Author'
@inject('usersStore')
@inject('postsStore')
@inject('bookmarksStore')
@inject('notificationsStore')
@observer
class | extends Component {
state = {
post: {},
user: {}
}
componentDidMount () {
const currentPost = this.props.postsStore.currentPost
if (!currentPost) {
this.loadFromDB()
return
}
this.loadUser(currentPost)
}
loadFromDB () {
const postRef = db.collection('posts').doc(currentPath(2))
postRef.get()
.then(result => {
if (result.exists && !result.data().deleted) {
const post = result.data()
post.id = result.id
this.loadUser(post)
} else {
// throw 404
}
}).catch(error => {
console.error('Unable to fetch post information', error)
})
}
loadUser = post => {
const userRef = db.collection('users').doc(post.userId)
userRef.get()
.then(user => {
if (user.exists) {
// Load post and user here to avoid doble loading
this.setState({user: user.data()})
this.setState({post: post})
}
}).catch(error => {
console.error('Unable to fetch user information', error)
})
}
handleSold = (post) => this.setState({post: post})
render() {
const { classes } = this.props
const { post, user } = this.state
return (
<Fragment>
<Helmet>
{ post.title && <title>{post.title} near {post.locText} | Nearo</title> }
</Helmet>
<Paper square elevation={0} style={{minHeight: '100vh'}}>
<div style={{padding: 10, paddingBottom: 0}}>
<Typography component="h1" variant="subtitle2" >
{ post.title }
{ post.category === 'forsale' &&
post.price !== 0 &&
<span className={classes.price}>
{"$" + post.price }
</span>
}
</Typography>
{ post.id && <Caption post={post} /> }
</div>
<div style={{padding: 10, paddingBottom: 0}}>
{ post.id ? <Media post={post} />: placeHolder() }
</div>
<div style={{padding: 10, paddingBottom: 0}}>
{ post.id && <PostActions post={post}/> }
</div>
<div style={{padding: 10, paddingBottom: 0}}>
{ post.id && <PostDetails post={ post } classes={ classes }/> }
</div>
<div style={{padding: 10}}>
<Divider />
</div>
<div style={{padding: 10}}>
{ user && <Author user={ user }/> }
</div>
<div style={{padding: 10, paddingBottom: 0}}>
<Divider />
</div>
<div style={{padding: 10}}>
<Typography variant="caption" gutterBottom>
Aproximate Location
</Typography>
<MapCard height={150} center={ post._geoloc }/>
</div>
</Paper>
</Fragment>
)
}
}
PostView.propTypes = {
classes: PropTypes.object.isRequired,
}
export default withStyles(styles)(PostView)
| PostView |
lib.rs | // Copyright 2019 Authors of Red Sift
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
/*!
Procedural macros to help writing eBPF programs using the `redbpf-probes`
crate.
# Overview
`redbpf-macros` is part of the `redbpf` project. Together with
[`redbpf-probes`](https://ingraind.org/api/redbpf_probes/), it
provides an idiomatic Rust API to write programs that can be compiled to eBPF
bytecode and executed by the linux in-kernel eBPF virtual machine.
To streamline the process of working with eBPF programs even further,
`redbpf` also provides
[`cargo-bpf`](https://ingraind.org/api/cargo_bpf/) - a cargo subcommand
to simplify creating and building eBPF programs.
# Example
```no_run
#![no_std]
#![no_main]
use redbpf_probes::xdp::prelude::*;
// configure kernel version compatibility and license
program!(0xFFFFFFFE, "GPL");
#[xdp]
fn example_xdp_probe(ctx: XdpContext) -> XdpResult {
// do something here
Ok(XdpAction::Pass)
}
```
*/
#![cfg_attr(RUSTC_IS_NIGHTLY, feature(proc_macro_diagnostic))]
extern crate proc_macro;
extern crate proc_macro2;
use proc_macro::TokenStream;
#[cfg(RUSTC_IS_NIGHTLY)]
use proc_macro::{Diagnostic, Level};
use proc_macro2::{Ident, Span, TokenStream as TokenStream2};
use quote::quote;
use std::str;
use syn::parse::{Parse, ParseStream};
use syn::punctuated::Punctuated;
use syn::token::Comma;
use syn::{
parse_macro_input, parse_quote, parse_str, Expr, ExprLit, File, ItemFn, ItemStatic, Lit, Meta,
Result,
};
fn inline_string_literal(e: &Expr) -> (TokenStream2, TokenStream2) {
let bytes = match e {
Expr::Lit(ExprLit {
lit: Lit::Str(s), ..
}) => s.value().into_bytes(),
_ => panic!("expected string literal"),
};
inline_bytes(bytes)
}
fn inline_bytes(mut bytes: Vec<u8>) -> (TokenStream2, TokenStream2) {
bytes.push(0u8);
let len = bytes.len();
let bytes = bytes;
let ty = quote!([u8; #len]);
let array_lit = quote!([#(#bytes),*]);
(ty, array_lit)
}
struct Args(Punctuated<Expr, Comma>);
impl Parse for Args {
fn parse(input: ParseStream) -> Result<Args> {
Ok(Args(Punctuated::parse_terminated(input)?))
}
}
/// Generates program metadata.
///
/// Takes two arguments, the `LINUX_VERSION_CODE` the program is compatible with,
/// and the license. The special version code `0xFFFFFFFE` can be used to signify
/// any kernel version.
///
/// # Example
///
/// ```no_run
/// #![no_std]
/// #![no_main]
/// use redbpf_macros::program;
/// program!(0xFFFFFFFE, "GPL");
/// # fn main() {}
/// ```
///
#[proc_macro]
pub fn program(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as Args);
let mut args = input.0.iter();
let version = args.next().expect("no version");
let license = args.next().expect("no license");
let (license_ty, license) = inline_string_literal(&license);
let (panic_ty, panic_msg) = inline_bytes(b"panic".to_vec());
let mut tokens = quote! {
#[no_mangle]
#[link_section = "license"]
pub static _license: #license_ty = #license;
#[no_mangle]
#[link_section = "version"]
pub static _version: u32 = #version;
#[panic_handler]
#[no_mangle]
pub extern "C" fn rust_begin_panic(info: &::core::panic::PanicInfo) -> ! {
use ::redbpf_probes::helpers::{bpf_trace_printk};
let msg: #panic_ty = #panic_msg;
bpf_trace_printk(&msg);
unsafe { core::hint::unreachable_unchecked() }
}
};
let mem = str::from_utf8(include_bytes!("mem.rs")).unwrap();
let mem: File = parse_str(&mem).unwrap();
tokens.extend(quote! {
#mem
});
tokens.into()
}
#[doc(hidden)]
#[proc_macro]
pub fn impl_network_buffer_array(_: TokenStream) -> TokenStream {
let mut tokens = TokenStream2::new();
for i in 1..=512usize {
tokens.extend(quote! {
impl NetworkBufferArray for [u8; #i] {}
});
}
tokens.into()
}
/// Attribute macro that must be used when creating [eBPF
/// maps](https://ingraind.org/api/redbpf_probes/maps/index.html).
///
/// The default `#[map]` places the map into a section of the resulting
/// ELF binary called `maps/<item_name>`.
///
/// If you wish to set the section name manually for BPF programs that
/// require strict naming conventions use `#[map(link_section = "foo")]`
/// which place the map into a section called `foo`.
///
/// **NOTE:** The `#[map("foo")` (which uses link section `maps/foo`) has
/// been deprecated in favor of `#[map]` or `#[map(link_section = "maps/foo")]`
///
/// # Example
///
/// ```no_run
/// # use redbpf_probes::kprobe::prelude::*;
/// // Will be linked into the ELF in the section 'maps/counts'
/// #[map]
/// static mut counts: PerfMap<u64> = PerfMap::with_max_entries(10240);
///
/// // Will be linked into the ELF in the section 'dns_queries'
/// #[map(link_section = "dns_queries")]
/// static mut queries: PerfMap<Query> = PerfMap::with_max_entries(1024);
///
/// struct Query {
/// // ...
/// }
/// ```
#[proc_macro_attribute]
pub fn map(attrs: TokenStream, item: TokenStream) -> TokenStream {
let section_name = if attrs.is_empty() {
let item = item.clone();
let item = parse_macro_input!(item as ItemStatic);
format!("maps/{}", item.ident.to_string())
} else {
match syn::parse::<Meta>(attrs.clone()) {
// First try #[map(section_name = "..")]
Ok(Meta::NameValue(mnv)) => {
if !mnv.path.is_ident("link_section") {
panic!("expected #[map(link_section = \"...\")]");
}
match mnv.lit {
Lit::Str(lit_str) => lit_str.value(),
_ => panic!("expected #[map(link_section = \"...\")]"),
}
}
// Fallback to deprecated #[map("..")]
_ => match syn::parse::<Expr>(attrs) {
Ok(Expr::Lit(ExprLit {
lit: Lit::Str(s), ..
})) => {
#[cfg(RUSTC_IS_NIGHTLY)]
Diagnostic::new(Level::Warning, "`#[map(\"..\")` has been deprecated in favor of `#[map]` or `#[map(link_section = \"..\")]`")
.emit();
format!("maps/{}", s.value())
}
_ => panic!("expected #[map(\"...\")]"),
},
}
};
let item = TokenStream2::from(item);
let tokens = quote! {
#[no_mangle]
#[link_section = #section_name]
#item
};
tokens.into()
}
fn probe_impl(ty: &str, attrs: TokenStream, item: ItemFn, mut name: String) -> TokenStream {
if !attrs.is_empty() {
name = match parse_macro_input!(attrs as Expr) {
Expr::Lit(ExprLit {
lit: Lit::Str(s), ..
}) => s.value(),
_ => panic!("expected string literal"),
}
};
let section_name = format!("{}/{}", ty, name);
let tokens = quote! {
#[no_mangle]
#[link_section = #section_name]
#item
};
tokens.into()
}
fn wrap_kprobe(item: ItemFn) -> ItemFn {
let ident = item.sig.ident.clone();
let outer_ident = Ident::new(&format!("outer_{}", ident), Span::call_site());
parse_quote! {
fn #outer_ident(ctx: *mut c_void) -> i32 {
let regs = ::redbpf_probes::registers::Registers::from(ctx);
let _ = #ident(regs);
return 0;
#item
}
}
}
/// Attribute macro that must be used to define [`kprobes`](https://www.kernel.org/doc/Documentation/kprobes.txt).
///
/// # Example
/// ```no_run
/// use redbpf_probes::kprobe::prelude::*;
///
/// #[kprobe("__x64_sys_clone")]
/// fn clone_enter(regs: Registers) {
/// // this is executed when clone() is invoked
/// }
/// ```
#[proc_macro_attribute]
pub fn kprobe(attrs: TokenStream, item: TokenStream) -> TokenStream {
let item = parse_macro_input!(item as ItemFn);
let name = item.sig.ident.to_string();
let wrapper = wrap_kprobe(item);
probe_impl("kprobe", attrs, wrapper, name)
}
/// Attribute macro that must be used to define [`kretprobes`](https://www.kernel.org/doc/Documentation/kprobes.txt).
///
/// # Example
/// ```no_run
/// use redbpf_probes::kprobe::prelude::*;
///
/// #[kretprobe("__x64_sys_clone")]
/// fn clone_exit(regs: Registers) {
/// // this is executed when clone() returns
/// }
/// ```
#[proc_macro_attribute]
pub fn kretprobe(attrs: TokenStream, item: TokenStream) -> TokenStream {
let item = parse_macro_input!(item as ItemFn);
let name = item.sig.ident.to_string();
let wrapper = wrap_kprobe(item);
probe_impl("kretprobe", attrs, wrapper, name)
}
/// Attribute macro that must be used to define [`uprobes`](https://www.kernel.org/doc/Documentation/trace/uprobetracer.txt).
///
/// # Example
/// ```no_run
/// use redbpf_probes::uprobe::prelude::*;
///
/// #[uprobe]
/// fn getaddrinfo(regs: Registers) {
/// // this is executed when getaddrinfo() is invoked
/// }
/// ```
#[proc_macro_attribute]
pub fn uprobe(attrs: TokenStream, item: TokenStream) -> TokenStream {
let item = parse_macro_input!(item as ItemFn);
let name = item.sig.ident.to_string();
let wrapper = wrap_kprobe(item);
probe_impl("uprobe", attrs, wrapper, name)
}
/// Attribute macro that must be used to define [`uretprobes`](https://www.kernel.org/doc/Documentation/trace/uprobetracer.txt).
///
/// # Example
/// ```no_run
/// use redbpf_probes::uprobe::prelude::*;
///
/// #[uretprobe]
/// fn getaddrinfo(regs: Registers) {
/// // this is executed when getaddrinfo() returns
/// }
/// ```
#[proc_macro_attribute]
pub fn uretprobe(attrs: TokenStream, item: TokenStream) -> TokenStream {
let item = parse_macro_input!(item as ItemFn);
let name = item.sig.ident.to_string();
let wrapper = wrap_kprobe(item);
probe_impl("uretprobe", attrs, wrapper, name)
}
/// Attribute macro that must be used to define [`XDP` probes](https://www.iovisor.org/technology/xdp).
///
/// See also the [`XDP` API provided by
/// `redbpf-probes`](https://ingraind.org/api/redbpf_probes/xdp/index.html).
///
/// # Example
/// ```no_run
/// use redbpf_probes::xdp::prelude::*;
///
/// #[xdp]
/// fn probe(ctx: XdpContext) -> XdpResult {
/// // do something with the packet
///
/// Ok(XdpAction::Pass)
/// }
/// ```
#[proc_macro_attribute]
pub fn xdp(attrs: TokenStream, item: TokenStream) -> TokenStream {
let item = parse_macro_input!(item as ItemFn);
let name = item.sig.ident.to_string();
let ident = item.sig.ident.clone();
let outer_ident = Ident::new(&format!("outer_{}", ident), Span::call_site());
let wrapper = parse_quote! {
fn #outer_ident(ctx: *mut ::redbpf_probes::bindings::xdp_md) -> ::redbpf_probes::xdp::XdpAction {
let ctx = ::redbpf_probes::xdp::XdpContext { ctx };
return match #ident(ctx) {
Ok(action) => action,
Err(_) => ::redbpf_probes::xdp::XdpAction::Pass
};
#item
}
};
probe_impl("xdp", attrs, wrapper, name)
}
/// Attribute macro that must be used to define [`socket
/// filter`](https://www.kernel.org/doc/Documentation/networking/filter.txt)
/// probes.
///
/// See also the [`socket filter` API provided by
/// `redbpf-probes`](https://ingraind.org/api/redbpf_probes/socket_filter/index.html).
///
/// # Example
/// ```no_run
/// use redbpf_probes::socket_filter::prelude::*;
///
/// #[socket_filter]
/// fn probe(skb: SkBuff) -> SkBuffResult {
/// Ok(SkBuffAction::SendToUserspace)
/// }
/// ```
#[proc_macro_attribute]
pub fn socket_filter(attrs: TokenStream, item: TokenStream) -> TokenStream {
let item = parse_macro_input!(item as ItemFn);
let name = item.sig.ident.to_string();
let ident = item.sig.ident.clone();
let outer_ident = Ident::new(&format!("outer_{}", ident), Span::call_site());
let wrapper = parse_quote! {
fn #outer_ident(skb: *const ::redbpf_probes::bindings::__sk_buff) -> i32 {
let skb = ::redbpf_probes::socket_filter::SkBuff { skb };
return match #ident(skb) {
Ok(::redbpf_probes::socket_filter::SkBuffAction::SendToUserspace) => -1,
_ => 0
};
#item
}
};
probe_impl("socketfilter", attrs, wrapper, name)
}
/// Attribute macro for defining BPF programs of `stream parser`s. A `sockmap`
/// can be attached to the stream parser. The role of stream parsers is to find
/// a message boundary of TCP stream and return the length of a message. If it
/// returns proper length of a message then a `stream verdict` BPF program will
/// be called.
///
/// # Example
/// ```no_run
/// use core::ptr;
/// use memoffset::offset_of;
/// use redbpf_probes::sockmap::prelude::*;
///
/// #[stream_parser]
/// fn parse_message_boundary(skb: SkBuff) -> StreamParserResult {
/// let len: u32 = unsafe {
/// let addr = (skb.skb as usize + offset_of!(__sk_buff, len)) as *const u32;
/// ptr::read(addr)
/// };
/// Ok(StreamParserAction::MessageLength(len))
/// }
/// ```
#[proc_macro_attribute]
pub fn stream_parser(attrs: TokenStream, item: TokenStream) -> TokenStream |
/// Attribute macro for defining BPF programs of `stream verdict`s. A `sockmap`
/// can be attached to the stream verdict. The role of stream verdicts is to
/// predicate to which socket a message should be redirected.
///
/// # Example
/// ```no_run
/// use redbpf_probes::sockmap::prelude::*;
/// #[map(link_section = "maps/echo_sockmap")]
/// static mut SOCKMAP: SockMap = SockMap::with_max_entries(10240);
///
/// #[stream_verdict]
/// fn verdict(skb: SkBuff) -> SkAction {
/// match unsafe { SOCKMAP.redirect(skb.skb as *mut _, 0) } {
/// Ok(_) => SkAction::Pass,
/// Err(_) => SkAction::Drop,
/// }
/// }
/// ```
#[proc_macro_attribute]
pub fn stream_verdict(attrs: TokenStream, item: TokenStream) -> TokenStream {
let item = parse_macro_input!(item as ItemFn);
let name = item.sig.ident.to_string();
let ident = item.sig.ident.clone();
let outer_ident = Ident::new(&format!("outer_{}", ident), Span::call_site());
let wrapper = parse_quote! {
fn #outer_ident(skb: *const ::redbpf_probes::bindings::__sk_buff) -> i32 {
let skb = ::redbpf_probes::socket::SkBuff { skb };
use ::redbpf_probes::socket::SkAction;
return match #ident(skb) {
SkAction::Pass => ::redbpf_probes::bindings::sk_action_SK_PASS,
SkAction::Drop => ::redbpf_probes::bindings::sk_action_SK_DROP,
} as i32;
#item
}
};
probe_impl("streamverdict", attrs, wrapper, name)
}
/// Define [tc action BPF programs](https://man7.org/linux/man-pages/man8/tc-bpf.8.html)
#[proc_macro_attribute]
pub fn tc_action(attrs: TokenStream, item: TokenStream) -> TokenStream {
let item = parse_macro_input!(item as ItemFn);
let name = item.sig.ident.to_string();
let ident = item.sig.ident.clone();
let outer_ident = Ident::new(&format!("outer_{}", ident), Span::call_site());
let wrapper = parse_quote! {
fn #outer_ident(skb: *const ::redbpf_probes::bindings::__sk_buff) -> i32 {
let skb = ::redbpf_probes::socket::SkBuff { skb };
return match #ident(skb) {
Ok(::redbpf_probes::tc::TcAction::Ok) => 0,
Ok(::redbpf_probes::tc::TcAction::Shot) => 2,
Ok(::redbpf_probes::tc::TcAction::Unspec) => -1,
Ok(::redbpf_probes::tc::TcAction::Pipe) => 3,
Ok(::redbpf_probes::tc::TcAction::Reclassify) => 1,
Err(_) => -1
};
#item
}
};
probe_impl("tc_action", attrs, wrapper, name)
}
| {
let item = parse_macro_input!(item as ItemFn);
let name = item.sig.ident.to_string();
let ident = item.sig.ident.clone();
let outer_ident = Ident::new(&format!("outer_{}", ident), Span::call_site());
let wrapper = parse_quote! {
fn #outer_ident(skb: *const ::redbpf_probes::bindings::__sk_buff) -> i32 {
let skb = ::redbpf_probes::socket::SkBuff { skb };
use ::redbpf_probes::sockmap::StreamParserAction::*;
return match #ident(skb) {
Ok(MessageLength(len)) if len > 0 => len as i32,
Ok(MoreDataWanted) => 0,
Ok(SendToUserspace) => -86, // -ESTRPIPE
_ => -1, // error
};
#item
}
};
probe_impl("streamparser", attrs, wrapper, name)
} |
test_post_commit_hooks.py | from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import json
import logging
from urllib.parse import urlencode
import mock
from django_dynamic_fixture import get
from django.test import TestCase
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project
from readthedocs.projects import tasks
log = logging.getLogger(__name__)
class BasePostCommitTest(TestCase):
def _setup(self):
self.rtfd = get(
Project, repo='https://github.com/rtfd/readthedocs.org', slug='read-the-docs')
self.rtfd_not_ok = get(
Version, project=self.rtfd, slug='not_ok', identifier='not_ok', active=False)
self.rtfd_awesome = get(
Version, project=self.rtfd, slug='awesome', identifier='awesome', active=True)
self.pip = get(Project, repo='https://bitbucket.org/pip/pip', repo_type='hg')
self.pip_not_ok = get(
Version, project=self.pip, slug='not_ok', identifier='not_ok', active=False)
self.sphinx = get(Project, repo='https://bitbucket.org/sphinx/sphinx', repo_type='git')
self.mocks = [mock.patch('readthedocs.core.views.hooks.trigger_build')]
self.patches = [m.start() for m in self.mocks]
self.client.login(username='eric', password='test')
class GitLabWebHookTest(BasePostCommitTest):
fixtures = ["eric"]
def setUp(self):
self._setup()
self.payload = {
"object_kind": "push",
"before": "95790bf891e76fee5e1747ab589903a6a1f80f22",
"after": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
"ref": "refs/heads/awesome",
"checkout_sha": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
"user_id": 4,
"user_name": "John Smith",
"user_email": "[email protected]",
"project_id": 15,
"project":{
"name":"readthedocs",
"description":"",
"web_url":"http://example.com/mike/diaspora",
"avatar_url": None,
"git_ssh_url":"[email protected]:rtfd/readthedocs.org.git",
"git_http_url":"http://github.com/rtfd/readthedocs.org.git",
"namespace":"Mike",
"visibility_level":0,
"path_with_namespace":"mike/diaspora",
"default_branch":"master",
"homepage":"http://example.com/mike/diaspora",
"url":"[email protected]/rtfd/readthedocs.org.git",
"ssh_url":"[email protected]/rtfd/readthedocs.org.git",
"http_url":"http://github.com/rtfd/readthedocs.org.git"
},
"repository":{
"name": "Diaspora",
"url": "[email protected]:rtfd/readthedocs.org.git",
"description": "",
"homepage": "http://github.com/rtfd/readthedocs.org",
"git_http_url": "http://github.com/rtfd/readthedocs.org.git",
"git_ssh_url": "[email protected]:rtfd/readthedocs.org.git",
"visibility_level": 0
},
"commits": [
{
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"message": "Update Catalan translation to e38cb41.",
"timestamp": "2011-12-12T14:27:31+02:00",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"author": {
"name": "Jordi Mallach",
"email": "[email protected]"
},
"added": ["CHANGELOG"],
"modified": ["app/controller/application.rb"],
"removed": []
},
{
"id": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
"message": "fixed readme",
"timestamp": "2012-01-03T23:36:29+02:00",
"url": "http://example.com/mike/diaspora/commit/da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
"author": {
"name": "GitLab dev user",
"email": "gitlabdev@dv6700.(none)"
},
"added": ["CHANGELOG"],
"modified": ["app/controller/application.rb"],
"removed": []
}
],
"total_commits_count": 4
}
def test_gitlab_post_commit_hook_builds_branch_docs_if_it_should(self):
"""GitLab webhook should only build active versions"""
r = self.client.post('/gitlab/', data=json.dumps(self.payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [awesome]')
self.payload['ref'] = 'refs/heads/not_ok'
r = self.client.post('/gitlab/', data=json.dumps(self.payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Not Building: github.com/rtfd/readthedocs.org [not_ok]')
self.payload['ref'] = 'refs/heads/unknown'
r = self.client.post('/gitlab/', data=json.dumps(self.payload),
content_type='application/json')
self.assertContains(r, '(URL Build) No known branches were pushed to.')
def test_gitlab_post_commit_knows_default_branches(self):
"""
Test the gitlab post commit hook so that the default branch
will be respected and built as the latest version.
"""
rtd = Project.objects.get(slug='read-the-docs')
old_default = rtd.default_branch
rtd.default_branch = 'master'
rtd.save()
self.payload['ref'] = 'refs/heads/master'
r = self.client.post('/gitlab/', data=json.dumps(self.payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [latest]')
rtd.default_branch = old_default
rtd.save()
class GitHubPostCommitTest(BasePostCommitTest):
fixtures = ["eric"]
def setUp(self):
self._setup()
self.payload = {
"after": "5ad757394b926e5637ffeafe340f952ef48bd270",
"base_ref": "refs/heads/master",
"before": "5b4e453dc913b08642b1d4fb10ed23c9d6e5b129",
"commits": [
{
"added": [],
"author": {
"email": "[email protected]",
"name": "Eric Holscher",
"username": "ericholscher"
},
"distinct": False,
"id": "11f229c6a78f5bc8cb173104a3f7a68cdb7eb15a",
"message": "Fix it on the front list as well.",
"modified": [
"readthedocs/templates/core/project_list_detailed.html"
],
"removed": [],
"timestamp": "2011-09-12T19:38:55-07:00",
"url": ("https://github.com/wraithan/readthedocs.org/"
"commit/11f229c6a78f5bc8cb173104a3f7a68cdb7eb15a")
},
],
"compare": ("https://github.com/wraithan/readthedocs.org/compare/"
"5b4e453...5ad7573"),
"created": False,
"deleted": False,
"forced": False,
"pusher": {
"name": "none"
},
"ref": "refs/heads/awesome",
"repository": {
"created_at": "2011/09/09 14:20:13 -0700",
"description": "source code to readthedocs.org",
"fork": True,
"forks": 0,
"has_downloads": True,
"has_issues": False,
"has_wiki": True,
"homepage": "http://rtfd.org/",
"language": "Python",
"name": "readthedocs.org",
"open_issues": 0,
"owner": {
"email": "[email protected]",
"name": "wraithan"
},
"private": False,
"pushed_at": "2011/09/12 22:33:34 -0700",
"size": 140,
"url": "https://github.com/rtfd/readthedocs.org",
"ssh_url": "[email protected]:rtfd/readthedocs.org.git",
"watchers": 1
}
}
def test_post_types(self):
"""Ensure various POST formats"""
r = self.client.post('/github/',
data=json.dumps(self.payload),
content_type='application/json')
self.assertEqual(r.status_code, 200)
r = self.client.post('/github/',
data=urlencode({'payload': json.dumps(self.payload)}),
content_type='application/x-www-form-urlencoded')
self.assertEqual(r.status_code, 200)
def test_github_upper_case_repo(self):
"""
Test the github post commit hook will build properly with upper case
repository.
This allows for capitalization differences in post-commit hook URL's.
"""
payload = self.payload.copy()
payload['repository']['url'] = payload['repository']['url'].upper()
r = self.client.post('/github/', data=json.dumps(payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Build Started: HTTPS://GITHUB.COM/RTFD/READTHEDOCS.ORG [awesome]')
self.payload['ref'] = 'refs/heads/not_ok'
def test_400_on_no_ref(self):
"""
GitHub sometimes sends us a post-commit hook without a ref.
This means we don't know what branch to build,
so return a 400.
"""
payload = self.payload.copy()
del payload['ref']
r = self.client.post('/github/', data=json.dumps(payload),
content_type='application/json')
self.assertEqual(r.status_code, 400)
def test_private_repo_mapping(self):
"""
Test for private GitHub repo mapping.
Previously we were missing triggering post-commit hooks because
we only compared against the *public* ``github.com/user/repo`` URL.
Users can also enter a ``github.com:user/repo`` URL,
which we should support.
"""
self.rtfd.repo = '[email protected]:rtfd/readthedocs.org'
self.rtfd.save()
payload = self.payload.copy()
r = self.client.post('/github/', data=json.dumps(payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [awesome]')
def test_github_post_commit_hook_builds_branch_docs_if_it_should(self):
"""
Test the github post commit hook to see if it will only build
versions that are set to be built if the branch they refer to
is updated. Otherwise it is no op.
"""
r = self.client.post('/github/', data=json.dumps(self.payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [awesome]')
self.payload['ref'] = 'refs/heads/not_ok'
r = self.client.post('/github/', data=json.dumps(self.payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Not Building: github.com/rtfd/readthedocs.org [not_ok]')
self.payload['ref'] = 'refs/heads/unknown'
r = self.client.post('/github/', data=json.dumps(self.payload),
content_type='application/json')
self.assertContains(r, '(URL Build) No known branches were pushed to.')
def test_github_post_commit_knows_default_branches(self):
"""
Test the github post commit hook so that the default branch
will be respected and built as the latest version.
"""
rtd = Project.objects.get(slug='read-the-docs')
old_default = rtd.default_branch
rtd.default_branch = 'master'
rtd.save()
self.payload['ref'] = 'refs/heads/master'
r = self.client.post('/github/', data=json.dumps(self.payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [latest]')
rtd.default_branch = old_default
rtd.save()
class CorePostCommitTest(BasePostCommitTest):
fixtures = ["eric"]
def setUp(self):
self._setup()
def test_core_commit_hook(self):
rtd = Project.objects.get(slug='read-the-docs')
rtd.default_branch = 'master'
rtd.save()
r = self.client.post('/build/%s' % rtd.pk, {'version_slug': 'master'})
self.assertEqual(r.status_code, 302)
self.assertEqual(r._headers['location'][1],
'/projects/read-the-docs/builds/')
def test_hook_state_tracking(self):
rtd = Project.objects.get(slug='read-the-docs')
self.assertEqual(Project.objects.get(slug='read-the-docs').has_valid_webhook, False)
self.client.post('/build/%s' % rtd.pk, {'version_slug': 'latest'})
# Need to re-query to get updated DB entry
self.assertEqual(Project.objects.get(slug='read-the-docs').has_valid_webhook, True)
class BitBucketHookTests(BasePostCommitTest):
def setUp(self):
self._setup()
self.hg_payload = {
"canon_url": "https://bitbucket.org",
"commits": [
{
"author": "marcus",
"branch": "default",
"files": [
{
"file": "somefile.py",
"type": "modified"
}
],
"message": "Added some feature things",
"node": "d14d26a93fd2",
"parents": [
"1b458191f31a"
],
"raw_author": "Marcus Bertrand <[email protected]>",
"raw_node": "d14d26a93fd28d3166fa81c0cd3b6f339bb95bfe",
"revision": 3,
"size": -1,
"timestamp": "2012-05-30 06:07:03",
"utctimestamp": "2012-05-30 04:07:03+00:00"
}
],
"repository": {
"absolute_url": "/pip/pip/",
"fork": False,
"is_private": True,
"name": "Project X",
"owner": "marcus",
"scm": "hg",
"slug": "project-x",
"website": ""
},
"user": "marcus"
}
self.git_payload = {
"canon_url": "https://bitbucket.org",
"commits": [
{
"author": "marcus",
"branch": "master",
"files": [
{
"file": "somefile.py",
"type": "modified"
}
],
"message": "Added some more things to somefile.py\n",
"node": "620ade18607a",
"parents": [
"702c70160afc"
],
"raw_author": "Marcus Bertrand <[email protected]>",
"raw_node": "620ade18607ac42d872b568bb92acaa9a28620e9",
"revision": None,
"size": -1,
"timestamp": "2012-05-30 05:58:56",
"utctimestamp": "2012-05-30 03:58:56+00:00"
}
],
"repository": {
"absolute_url": "/sphinx/sphinx/",
"fork": False,
"is_private": True,
"name": "Project X",
"owner": "marcus",
"scm": "git",
"slug": "project-x",
"website": "https://atlassian.com/"
},
"user": "marcus"
}
def test_post_types(self):
"""Ensure various POST formats"""
r = self.client.post('/bitbucket/',
data=json.dumps(self.hg_payload),
content_type='application/json')
self.assertEqual(r.status_code, 200)
r = self.client.post('/bitbucket/',
data=urlencode({'payload': json.dumps(self.hg_payload)}),
content_type='application/x-www-form-urlencoded')
self.assertEqual(r.status_code, 200)
def test_bitbucket_post_commit(self):
r = self.client.post('/bitbucket/', data=json.dumps(self.hg_payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Build Started: bitbucket.org/pip/pip [latest]')
r = self.client.post('/bitbucket/', data=json.dumps(self.git_payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Build Started: bitbucket.org/sphinx/sphinx [latest]')
def | (self):
self.hg_payload['commits'] = []
self.git_payload['commits'] = []
r = self.client.post('/bitbucket/', data=json.dumps(self.hg_payload),
content_type='application/json')
self.assertContains(r, 'Commit/branch not found', status_code=404)
r = self.client.post('/bitbucket/', data=json.dumps(self.git_payload),
content_type='application/json')
self.assertContains(r, 'Commit/branch not found', status_code=404)
def test_bitbucket_post_commit_non_existent_url(self):
self.hg_payload['repository']['absolute_url'] = '/invalid/repository'
self.git_payload['repository']['absolute_url'] = '/invalid/repository'
r = self.client.post('/bitbucket/', data=json.dumps(self.hg_payload),
content_type='application/json')
self.assertContains(r, 'Project match not found', status_code=404)
r = self.client.post('/bitbucket/', data=json.dumps(self.git_payload),
content_type='application/json')
self.assertContains(r, 'Project match not found', status_code=404)
def test_bitbucket_post_commit_hook_builds_branch_docs_if_it_should(self):
"""
Test the bitbucket post commit hook to see if it will only build
versions that are set to be built if the branch they refer to
is updated. Otherwise it is no op.
"""
r = self.client.post('/bitbucket/', data=json.dumps(self.hg_payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Build Started: bitbucket.org/pip/pip [latest]')
self.hg_payload['commits'] = [{
"branch": "not_ok",
}]
r = self.client.post('/bitbucket/', data=json.dumps(self.hg_payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Not Building: bitbucket.org/pip/pip [not_ok]')
self.hg_payload['commits'] = [{
"branch": "unknown",
}]
r = self.client.post('/bitbucket/', data=json.dumps(self.hg_payload),
content_type='application/json')
self.assertContains(r, '(URL Build) No known branches were pushed to.')
def test_bitbucket_default_branch(self):
self.test_project = get(
Project, repo='HTTPS://bitbucket.org/test/project', slug='test-project',
default_branch='integration', repo_type='git',
)
self.git_payload['commits'] = [{
"branch": "integration",
}]
self.git_payload['repository'] = {
'absolute_url': '/test/project/'
}
r = self.client.post('/bitbucket/', data=json.dumps(self.git_payload),
content_type='application/json')
self.assertContains(r, '(URL Build) Build Started: bitbucket.org/test/project [latest]')
| test_bitbucket_post_commit_empty_commit_list |
archiveSubmission.ts | export interface ArchiveSubmission { | fileUploadPlan: any[];
errors: any[];
_links: object;
} | created: string;
dspUuid: string;
dspUrl: string;
submissionUuid?: any; |
cube_sim.py | from .legacy_cube import CubeClass
class Cube(CubeClass):
def __init__(self,faces):
super().__init__(faces)
self.algo = []
self.rotation_dict = {'r':lambda x:x.R(),\
'l':lambda x:x.L(),\
'u':lambda x:x.U(),\
'f':lambda x:x.F(),\
'b':lambda x:x.B(),\
'd':lambda x:x.D(),\
'ri':lambda x:x.Ri(),\
'li':lambda x:x.Li(),\
'ui':lambda x:x.Ui(),\
'fi':lambda x:x.Fi(),\
'bi':lambda x:x.Bi(),\
'di':lambda x:x.Di()}
def R(self):
self.rotate("clockwise","right")
self.algo.append('r')
return 'R'
def Ri(self):
self.rotate("counterClockwise","right")
self.algo.append('ri')
return 'Ri'
def R2(self):
self.rotate("clockwise","right")
self.rotate("clockwise","right")
self.algo.append('r')
self.algo.append('r')
return 'R2'
def L(self):
self.rotate("clockwise","left")
self.algo.append('l')
return 'L'
def | (self):
self.rotate("counterClockwise","left")
self.algo.append('li')
return 'Li'
def L2(self):
self.rotate("clockwise","left")
self.rotate("clockwise","left")
self.algo.append('l')
self.algo.append('l')
return 'L2'
def F(self):
self.rotate("clockwise","front")
self.algo.append('f')
return 'F'
def Fi(self):
self.rotate("counterClockwise","front")
self.algo.append('fi')
return 'Fi'
def F2(self):
self.rotate("clockwise","front")
self.rotate("clockwise","front")
self.algo.append('f')
self.algo.append('f')
return 'F2'
def B(self):
self.rotate("clockwise","back")
self.algo.append('b')
return 'B'
def Bi(self):
self.rotate("counterClockwise","back")
self.algo.append('bi')
return 'Bi'
def B2(self):
self.rotate("clockwise","back")
self.rotate("clockwise","back")
self.algo.append('b')
self.algo.append('b')
return 'B2'
def U(self):
self.rotate("clockwise","top")
self.algo.append('u')
return 'U'
def Ui(self):
self.rotate("counterClockwise","top")
self.algo.append('ui')
return 'Ui'
def U2(self):
self.rotate("clockwise","top")
self.rotate("clockwise","top")
self.algo.append('u')
self.algo.append('u')
return 'U2'
def D(self):
self.rotate("clockwise","bottom")
self.algo.append('d')
return 'D'
def Di(self):
self.rotate("counterClockwise","bottom")
self.algo.append('di')
return 'Di'
def D2(self):
self.rotate("clockwise","bottom")
self.rotate("clockwise","bottom")
self.algo.append('d')
self.algo.append('d')
return 'D2'
def compressAlgo(self):
#print("in compression stage")
if len(self.algo)<3:
return
inv = { 'r':'ri', 'ri':'r', 'l':'li', 'li':'l', 'u':'ui', 'ui':'u',\
'f':'fi', 'fi':'f', 'b':'bi', 'bi':'b', 'd':'di', 'di':'d', }
algo = self.algo
if len(algo)==3:
if algo[0] == algo[1] == algo[2]:
self.algo = [inv[algo[0]]]
return
flag = True
while flag:
for i in range(len(algo)-3):
if algo[i] == algo[i+1] == algo[i+2] == algo[i+3]:
del algo[i]; del algo[i]; del algo[i]; del algo[i]
break
if algo[i] == inv[algo[i+1]]:
del algo[i]; del algo[i]
break
if algo[i] == algo[i+1] == algo[i+2]:
del algo[i]; del algo[i]; algo[i] = inv[algo[i]]
break
if algo[i+1] == algo[i+2] == algo[i+3]:
del algo[i+1]; del algo[i+1]; algo[i+1] = inv[algo[i+1]]
break
if (i == len(algo)-4) or (len(algo)<4):
flag = False
self.algo = algo
| Li |
ron.rs | use std::collections::HashMap;
use std::error::Error;
use crate::value::{Value, ValueKind};
pub fn parse(
uri: Option<&String>,
text: &str,
) -> Result<HashMap<String, Value>, Box<dyn Error + Send + Sync>> {
let value = from_ron_value(uri, ron::from_str(text)?)?;
match value.kind {
ValueKind::Table(map) => Ok(map),
_ => Ok(HashMap::new()),
}
}
fn from_ron_value(
uri: Option<&String>,
value: ron::Value,
) -> Result<Value, Box<dyn Error + Send + Sync>> | {
let kind = match value {
ron::Value::Option(value) => match value {
Some(value) => from_ron_value(uri, *value)?.kind,
None => ValueKind::Nil,
},
ron::Value::Unit => ValueKind::Nil,
ron::Value::Bool(value) => ValueKind::Boolean(value),
ron::Value::Number(value) => match value {
ron::Number::Float(value) => ValueKind::Float(value.get()),
ron::Number::Integer(value) => ValueKind::Integer(value),
},
ron::Value::Char(value) => ValueKind::String(value.to_string()),
ron::Value::String(value) => ValueKind::String(value),
ron::Value::Seq(values) => {
let array = values
.into_iter()
.map(|value| from_ron_value(uri, value))
.collect::<Result<Vec<_>, _>>()?;
ValueKind::Array(array)
}
ron::Value::Map(values) => {
let map = values
.iter()
.map(|(key, value)| -> Result<_, Box<dyn Error + Send + Sync>> {
let key = key.clone().into_rust::<String>()?;
let value = from_ron_value(uri, value.clone())?;
Ok((key, value))
})
.collect::<Result<HashMap<_, _>, _>>()?;
ValueKind::Table(map)
}
};
Ok(Value::new(uri, kind))
} |
|
commands_test.go | package websockets
import (
"encoding/json"
"io/ioutil"
"testing"
"github.com/rubblelabs/ripple/data"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MessagesSuite struct{}
var _ = Suite(&MessagesSuite{})
func readResponseFile(c *C, msg interface{}, path string) {
b, err := ioutil.ReadFile(path)
if err != nil {
c.Error(err)
}
if err = json.Unmarshal(b, msg); err != nil {
c.Error(err)
}
}
func (s *MessagesSuite) TestLedgerResponse(c *C) {
msg := &LedgerCommand{}
readResponseFile(c, msg, "testdata/ledger.json")
// Response fields
c.Assert(msg.Status, Equals, "success")
c.Assert(msg.Type, Equals, "response")
// Result fields
c.Assert(msg.Result.Ledger.LedgerSequence, Equals, uint32(6917762))
c.Assert(msg.Result.Ledger.Accepted, Equals, true)
c.Assert(msg.Result.Ledger.CloseTime.String(), Equals, "2014-May-30 13:11:50")
c.Assert(msg.Result.Ledger.Closed, Equals, true)
c.Assert(msg.Result.Ledger.Hash.String(), Equals, "0C5C5B39EA40D40ACA6EB47E50B2B85FD516D1A2BA67BA3E050349D3EF3632A4")
c.Assert(msg.Result.Ledger.PreviousLedger.String(), Equals, "F8F0363803C30E659AA24D6A62A6512BA24BEA5AC52A29731ABA1E2D80796E8B")
c.Assert(msg.Result.Ledger.TotalXRP, Equals, uint64(99999990098968782))
c.Assert(msg.Result.Ledger.StateHash.String(), Equals, "46D3E36FE845B9A18293F4C0F134D7DAFB06D4D9A1C7E4CB03F8B293CCA45FA0")
c.Assert(msg.Result.Ledger.TransactionHash.String(), Equals, "757CCB586D44F3C58E366EC7618988C0596277D3D5D0B412E49563B5EEDF04FF")
c.Assert(msg.Result.Ledger.Transactions, HasLen, 7)
tx0 := msg.Result.Ledger.Transactions[0]
c.Assert(tx0.GetHash().String(), Equals, "2D0CE11154B655A2BFE7F3F857AAC344622EC7DAB11B1EBD920DCDB00E8646FF")
c.Assert(tx0.MetaData.AffectedNodes, HasLen, 4)
}
func (s *MessagesSuite) TestLedgerHeaderResponse(c *C) {
msg := &LedgerHeaderCommand{}
readResponseFile(c, msg, "testdata/ledger_header.json")
// Response fields
c.Assert(msg.Status, Equals, "success")
c.Assert(msg.Type, Equals, "response")
// Result fields
c.Assert(len(msg.Result.LedgerData), Equals, 118)
c.Assert(msg.Result.LedgerSequence, Equals, uint32(32570))
c.Assert(msg.Result.Ledger.LedgerSequence, Equals, uint32(32570))
c.Assert(msg.Result.Ledger.Accepted, Equals, true)
c.Assert(msg.Result.Ledger.CloseTime.String(), Equals, "2013-Jan-01 03:21:10")
c.Assert(msg.Result.Ledger.Closed, Equals, true)
c.Assert(msg.Result.Ledger.Hash.String(), Equals, "4109C6F2045FC7EFF4CDE8F9905D19C28820D86304080FF886B299F0206E42B5")
c.Assert(msg.Result.Ledger.PreviousLedger.String(), Equals, "60A01EBF11537D8394EA1235253293508BDA7131D5F8710EFE9413AA129653A2")
c.Assert(msg.Result.Ledger.TotalXRP, Equals, uint64(99999999999996320))
c.Assert(msg.Result.Ledger.StateHash.String(), Equals, "3806AF8F22037DE598D30D38C8861FADF391171D26F7DE34ACFA038996EA6BEB") | }
func (s *MessagesSuite) TestTxResponse(c *C) {
msg := &TxCommand{}
readResponseFile(c, msg, "testdata/tx.json")
// Response fields
c.Assert(msg.Status, Equals, "success")
c.Assert(msg.Type, Equals, "response")
// Result fields
c.Assert(msg.Result.Date.String(), Equals, "2014-May-30 13:11:50")
c.Assert(msg.Result.Validated, Equals, true)
c.Assert(msg.Result.MetaData.AffectedNodes, HasLen, 4)
c.Assert(msg.Result.MetaData.TransactionResult.String(), Equals, "tesSUCCESS")
offer := msg.Result.Transaction.(*data.OfferCreate)
c.Assert(msg.Result.GetHash().String(), Equals, "2D0CE11154B655A2BFE7F3F857AAC344622EC7DAB11B1EBD920DCDB00E8646FF")
c.Assert(offer.GetType(), Equals, "OfferCreate")
c.Assert(offer.Account.String(), Equals, "rwpxNWdpKu2QVgrh5LQXEygYLshhgnRL1Y")
c.Assert(offer.Fee.String(), Equals, "0.00001")
c.Assert(offer.SigningPubKey.String(), Equals, "02BD6F0CFD0182F2F408512286A0D935C58FF41169DAC7E721D159D711695DFF85")
c.Assert(offer.TxnSignature.String(), Equals, "30440220216D42DF672C1CC7EF0CA9C7840838A2AF5FEDD4DEFCBA770C763D7509703C8702203C8D831BFF8A8BC2CC993BECB4E6C7BE1EA9D394AB7CE7C6F7542B6CDA781467")
c.Assert(offer.Sequence, Equals, uint32(1681497))
}
func (s *MessagesSuite) TestAccountTxResponse(c *C) {
msg := &AccountTxCommand{}
readResponseFile(c, msg, "testdata/account_tx.json")
// Response fields
c.Assert(msg.Status, Equals, "success")
c.Assert(msg.Type, Equals, "response")
c.Assert(len(msg.Result.Transactions), Equals, 2)
c.Assert(msg.Result.Transactions[1].Date.String(), Equals, "2014-Jun-19 14:14:40")
offer := msg.Result.Transactions[1].Transaction.(*data.OfferCreate)
c.Assert(offer.TakerPays.String(), Equals, "0.034800328/BTC/rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B")
}
func (s *MessagesSuite) TestLedgerDataResponse(c *C) {
msg := &LedgerDataCommand{}
readResponseFile(c, msg, "testdata/ledger_data.json")
// Response fields
c.Assert(msg.Status, Equals, "success")
c.Assert(msg.Type, Equals, "response")
c.Assert(msg.Result.LedgerSequence, Equals, uint32(6281820))
c.Assert(msg.Result.Hash.String(), Equals, "83CC350B1CDD9792D47F60D3DBB7673518FD6E71821070673E6EAE65DE69086B")
c.Assert(msg.Result.Marker.String(), Equals, "02DE1A2AD4332A1AF01C59F16E45218FA70E5792BD963B6D7ACF188D6D150607")
c.Assert(len(msg.Result.State), Equals, 2048)
c.Assert(msg.Result.State[0].GetType(), Equals, "AccountRoot")
c.Assert(msg.Result.State[0].GetLedgerIndex().String(), Equals, "00001A2969BE1FC85F1D7A55282FA2E6D95C71D2E4B9C0FDD3D9994F3C00FF8F")
}
func (s *MessagesSuite) TestRipplePathFindResponse(c *C) {
msg := &RipplePathFindCommand{}
readResponseFile(c, msg, "testdata/ripple_path_find.json")
// Response fields
c.Assert(msg.Status, Equals, "success")
c.Assert(msg.Type, Equals, "response")
c.Assert(msg.Result.DestAccount.String(), Equals, "r9Dr5xwkeLegBeXq6ujinjSBLQzQ1zQGjH")
c.Assert(msg.Result.DestCurrencies, HasLen, 6)
c.Assert(msg.Result.Alternatives, HasLen, 1)
c.Assert(msg.Result.Alternatives[0].SrcAmount.String(), Equals, "0.9940475268/USD/rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B")
c.Assert(msg.Result.Alternatives[0].PathsComputed, HasLen, 4)
c.Assert(msg.Result.Alternatives[0].PathsCanonical, HasLen, 0)
c.Assert(msg.Result.Alternatives[0].PathsComputed[0], HasLen, 2)
c.Assert(msg.Result.Alternatives[0].PathsComputed[0].String(), Equals, "XRP => SGD/r9Dr5xwkeLegBeXq6ujinjSBLQzQ1zQGjH")
}
func (s *MessagesSuite) TestAccountInfoResponse(c *C) {
msg := &AccountInfoCommand{}
readResponseFile(c, msg, "testdata/account_info.json")
// Response fields
c.Assert(msg.Status, Equals, "success")
c.Assert(msg.Type, Equals, "response")
c.Assert(msg.Result.LedgerSequence, Equals, uint32(7636529))
c.Assert(*msg.Result.AccountData.TransferRate, Equals, uint32(1002000000))
c.Assert(msg.Result.AccountData.LedgerEntryType, Equals, data.ACCOUNT_ROOT)
c.Assert(*msg.Result.AccountData.Sequence, Equals, uint32(546))
c.Assert(msg.Result.AccountData.Balance.String(), Equals, "10321199.422233")
} | c.Assert(msg.Result.Ledger.TransactionHash.String(), Equals, "0000000000000000000000000000000000000000000000000000000000000000")
c.Assert(msg.Result.Ledger.Transactions, HasLen, 0) |
DeprecatedColorPropType.d.ts | /** | declare var colorPropType: (props: any, propName: string, componentName: string, location: string, propFullName?: null | undefined | string) => null | undefined | Error;
declare var ColorPropType: typeof colorPropType & {
isRequired: typeof colorPropType;
};
declare const $f2tExportDefault: typeof ColorPropType;
export default $f2tExportDefault; | * TODO: Figure out why these are not included in the Flow dump
*/ |
move.go | package main
import (
"os"
"path"
"path/filepath"
"strconv"
"github.com/urfave/cli"
// "strconv"
)
func | (c *cli.Context) error {
toCategory := c.String("category")
var files []string
if c.Args().Present() {
err := filepath.Walk(NotesPath, getAppendWalkFunction(&files))
printErr(err, "unable to find files")
for _, n := range c.Args() {
intN, err := strconv.Atoi(n)
printErr(err, "Unable to convert to int")
oldPath := files[intN]
newPath := filepath.Join(NotesPath, toCategory, path.Base(oldPath))
err2 := os.Rename(oldPath, newPath)
printErr(err2, "Unable to rename file")
}
}
return nil
}
| moveAction |
lib.rs | //! # libgit2 bindings for Rust
//!
//! This library contains bindings to the [libgit2][1] C library which is used
//! to manage git repositories. The library itself is a work in progress and is
//! likely lacking some bindings here and there, so be warned.
//!
//! [1]: https://libgit2.github.com/
//!
//! The git2-rs library strives to be as close to libgit2 as possible, but also
//! strives to make using libgit2 as safe as possible. All resource management
//! is automatic as well as adding strong types to all interfaces (including
//! `Result`)
//!
//! ## Creating a `Repository`
//!
//! The `Repository` is the source from which almost all other objects in git-rs
//! are spawned. A repository can be created through opening, initializing, or
//! cloning.
//!
//! ### Initializing a new repository
//!
//! The `init` method will create a new repository, assuming one does not
//! already exist.
//!
//! ```no_run
//! # #![allow(unstable)]
//! use git2::Repository;
//!
//! let repo = match Repository::init("/path/to/a/repo") {
//! Ok(repo) => repo,
//! Err(e) => panic!("failed to init: {}", e),
//! };
//! ```
//!
//! ### Opening an existing repository
//!
//! ```no_run
//! # #![allow(unstable)]
//! use git2::Repository;
//!
//! let repo = match Repository::open("/path/to/a/repo") {
//! Ok(repo) => repo,
//! Err(e) => panic!("failed to open: {}", e),
//! };
//! ```
//!
//! ### Cloning an existing repository
//!
//! ```no_run
//! # #![allow(unstable)]
//! use git2::Repository;
//!
//! let url = "https://github.com/alexcrichton/git2-rs";
//! let repo = match Repository::clone(url, "/path/to/a/repo") {
//! Ok(repo) => repo,
//! Err(e) => panic!("failed to clone: {}", e),
//! };
//! ```
//!
//! ## Working with a `Repository`
//!
//! All deriviative objects, references, etc are attached to the lifetime of the
//! source `Repository`, to ensure that they do not outlive the repository
//! itself.
#![doc(html_root_url = "https://docs.rs/git2/0.6")]
#![allow(trivial_numeric_casts, trivial_casts)]
#![deny(missing_docs)]
#![cfg_attr(test, deny(warnings))]
extern crate libc;
extern crate url;
extern crate libgit2_sys as raw;
#[macro_use] extern crate bitflags;
#[macro_use] extern crate log;
#[cfg(test)] extern crate tempdir;
use std::ffi::{CStr, CString};
use std::fmt;
use std::str;
use std::sync::{Once, ONCE_INIT};
pub use blame::{Blame, BlameHunk, BlameIter, BlameOptions};
pub use blob::{Blob, BlobWriter};
pub use branch::{Branch, Branches};
pub use buf::Buf;
pub use commit::{Commit, Parents};
pub use config::{Config, ConfigEntry, ConfigEntries};
pub use cred::{Cred, CredentialHelper};
pub use describe::{Describe, DescribeFormatOptions, DescribeOptions};
pub use diff::{Diff, DiffDelta, DiffFile, DiffOptions, Deltas};
pub use diff::{DiffBinary, DiffBinaryFile, DiffBinaryKind};
pub use diff::{DiffLine, DiffHunk, DiffStats, DiffFindOptions};
pub use error::Error;
pub use index::{Index, IndexConflict, IndexConflicts, IndexEntry, IndexEntries, IndexMatchedPath};
pub use merge::{AnnotatedCommit, MergeOptions};
pub use message::{message_prettify, DEFAULT_COMMENT_CHAR};
pub use note::{Note, Notes};
pub use object::Object;
pub use oid::Oid;
pub use packbuilder::{PackBuilder, PackBuilderStage};
pub use pathspec::{Pathspec, PathspecMatchList, PathspecFailedEntries};
pub use pathspec::{PathspecDiffEntries, PathspecEntries};
pub use patch::Patch;
pub use proxy_options::ProxyOptions;
pub use rebase::{Rebase, RebaseOptions, RebaseOperation, RebaseOperationType};
pub use reference::{Reference, References, ReferenceNames};
pub use reflog::{Reflog, ReflogEntry, ReflogIter};
pub use refspec::Refspec;
pub use remote::{Remote, RemoteConnection, Refspecs, RemoteHead, FetchOptions, PushOptions};
pub use remote_callbacks::{RemoteCallbacks, Credentials, TransferProgress};
pub use remote_callbacks::{TransportMessage, Progress, UpdateTips};
pub use repo::{Repository, RepositoryInitOptions};
pub use revspec::Revspec;
pub use revwalk::Revwalk;
pub use signature::Signature;
pub use status::{StatusOptions, Statuses, StatusIter, StatusEntry, StatusShow};
pub use stash::{StashApplyOptions, StashCb, StashApplyProgressCb};
pub use submodule::{Submodule, SubmoduleUpdateOptions};
pub use tag::Tag;
pub use time::{Time, IndexTime};
pub use tree::{Tree, TreeEntry, TreeIter, TreeWalkMode, TreeWalkResult};
pub use treebuilder::TreeBuilder;
pub use odb::{Odb, OdbObject, OdbReader, OdbWriter};
pub use util::IntoCString;
// Create a convinience method on bitflag struct which checks the given flag
macro_rules! is_bit_set {
($name:ident, $flag:expr) => (
#[allow(missing_docs)]
pub fn $name(&self) -> bool {
self.intersects($flag)
}
)
}
/// An enumeration of possible errors that can happen when working with a git
/// repository.
#[derive(PartialEq, Eq, Clone, Debug, Copy)]
pub enum ErrorCode {
/// Generic error
GenericError,
/// Requested object could not be found
NotFound,
/// Object exists preventing operation
Exists,
/// More than one object matches
Ambiguous,
/// Output buffer too short to hold data
BufSize,
/// User-generated error
User,
/// Operation not allowed on bare repository
BareRepo,
/// HEAD refers to branch with no commits
UnbornBranch,
/// Merge in progress prevented operation
Unmerged,
/// Reference was not fast-forwardable
NotFastForward,
/// Name/ref spec was not in a valid format
InvalidSpec,
/// Checkout conflicts prevented operation
Conflict,
/// Lock file prevented operation
Locked,
/// Reference value does not match expected
Modified,
/// Authentication error
Auth,
/// Server certificate is invalid
Certificate,
/// Patch/merge has already been applied
Applied,
/// The requested peel operation is not possible
Peel,
/// Unexpected EOF
Eof,
/// Invalid operation or input
Invalid,
/// Uncommitted changes in index prevented operation
Uncommitted,
/// Operation was not valid for a directory,
Directory,
}
/// An enumeration of possible categories of things that can have
/// errors when working with a git repository.
#[derive(PartialEq, Eq, Clone, Debug, Copy)]
pub enum ErrorClass {
/// Uncategorized
None,
/// Out of memory or insufficient allocated space
NoMemory,
/// Syscall or standard system library error
Os,
/// Invalid input
Invalid,
/// Error resolving or manipulating a reference
Reference,
/// ZLib failure
Zlib,
/// Bad repository state
Repository,
/// Bad configuration
Config,
/// Regex failure
Regex,
/// Bad object
Odb,
/// Invalid index data
Index,
/// Error creating or obtaining an object
Object,
/// Network error
Net,
/// Error manpulating a tag
Tag,
/// Invalid value in tree
Tree,
/// Hashing or packing error
Indexer,
/// Error from SSL
Ssl,
/// Error involing submodules
Submodule,
/// Threading error
Thread,
/// Error manipulating a stash
Stash,
/// Checkout failure
Checkout,
/// Invalid FETCH_HEAD
FetchHead,
/// Merge failure
Merge,
/// SSH failure
Ssh,
/// Error manipulating filters
Filter,
/// Error reverting commit
Revert,
/// Error from a user callback
Callback,
/// Error cherry-picking commit
CherryPick,
/// Can't describe object
Describe,
/// Error during rebase
Rebase,
/// Filesystem-related error
Filesystem,
}
/// A listing of the possible states that a repository can be in.
#[derive(PartialEq, Eq, Clone, Debug, Copy)]
#[allow(missing_docs)]
pub enum RepositoryState {
Clean,
Merge,
Revert,
RevertSequence,
CherryPick,
CherryPickSequence,
Bisect,
Rebase,
RebaseInteractive,
RebaseMerge,
ApplyMailbox,
ApplyMailboxOrRebase,
}
/// An enumeration of the possible directions for a remote.
#[derive(Copy, Clone)]
pub enum Direction {
/// Data will be fetched (read) from this remote.
Fetch,
/// Data will be pushed (written) to this remote.
Push,
}
/// An enumeration of the operations that can be performed for the `reset`
/// method on a `Repository`.
#[derive(Copy, Clone)]
pub enum ResetType {
/// Move the head to the given commit.
Soft,
/// Soft plus reset the index to the commit.
Mixed,
/// Mixed plus changes in the working tree are discarded.
Hard,
}
/// An enumeration all possible kinds objects may have.
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub enum ObjectType {
/// Any kind of git object
Any,
/// An object which corresponds to a git commit
Commit,
/// An object which corresponds to a git tree
Tree,
/// An object which corresponds to a git blob
Blob,
/// An object which corresponds to a git tag
Tag,
}
/// An enumeration of all possile kinds of references.
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub enum ReferenceType {
/// A reference which points at an object id.
Oid,
/// A reference which points at another reference.
Symbolic,
}
/// An enumeration for the possible types of branches
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum BranchType {
/// A local branch not on a remote.
Local,
/// A branch for a remote.
Remote,
}
/// An enumeration of the possible priority levels of a config file.
///
/// The levels corresponding to the escalation logic (higher to lower) when
/// searching for config entries.
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum ConfigLevel {
/// System-wide on Windows, for compatibility with portable git
ProgramData,
/// System-wide configuration file, e.g. /etc/gitconfig
System,
/// XDG-compatible configuration file, e.g. ~/.config/git/config
XDG,
/// User-specific configuration, e.g. ~/.gitconfig
Global,
/// Repository specific config, e.g. $PWD/.git/config
Local,
/// Application specific configuration file
App,
/// Highest level available
Highest,
}
/// Merge file favor options for `MergeOptions` instruct the file-level
/// merging functionality how to deal with conflicting regions of the files.
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum FileFavor {
/// When a region of a file is changed in both branches, a conflict will be
/// recorded in the index so that git_checkout can produce a merge file with
/// conflict markers in the working directory. This is the default.
Normal,
/// When a region of a file is changed in both branches, the file created
/// in the index will contain the "ours" side of any conflicting region.
/// The index will not record a conflict.
Ours,
/// When a region of a file is changed in both branches, the file created
/// in the index will contain the "theirs" side of any conflicting region.
/// The index will not record a conflict.
Theirs,
/// When a region of a file is changed in both branches, the file created
/// in the index will contain each unique line from each side, which has
/// the result of combining both files. The index will not record a conflict.
Union,
}
bitflags! {
/// Orderings that may be specified for Revwalk iteration.
pub struct Sort: u32 {
/// Sort the repository contents in no particular ordering.
///
/// This sorting is arbitrary, implementation-specific, and subject to
/// change at any time. This is the default sorting for new walkers.
const NONE = raw::GIT_SORT_NONE as u32;
/// Sort the repository contents in topological order (children before
/// parents).
///
/// This sorting mode can be combined with time sorting.
const TOPOLOGICAL = raw::GIT_SORT_TOPOLOGICAL as u32;
/// Sort the repository contents by commit time.
///
/// This sorting mode can be combined with topological sorting.
const TIME = raw::GIT_SORT_TIME as u32;
/// Iterate through the repository contents in reverse order.
///
/// This sorting mode can be combined with any others.
const REVERSE = raw::GIT_SORT_REVERSE as u32;
}
}
impl Sort {
is_bit_set!(is_none, Sort::NONE);
is_bit_set!(is_topological, Sort::TOPOLOGICAL);
is_bit_set!(is_time, Sort::TIME);
is_bit_set!(is_reverse, Sort::REVERSE);
}
bitflags! {
/// Types of credentials that can be requested by a credential callback.
pub struct CredentialType: u32 {
#[allow(missing_docs)]
const USER_PASS_PLAINTEXT = raw::GIT_CREDTYPE_USERPASS_PLAINTEXT as u32;
#[allow(missing_docs)]
const SSH_KEY = raw::GIT_CREDTYPE_SSH_KEY as u32;
#[allow(missing_docs)]
const SSH_MEMORY = raw::GIT_CREDTYPE_SSH_MEMORY as u32;
#[allow(missing_docs)]
const SSH_CUSTOM = raw::GIT_CREDTYPE_SSH_CUSTOM as u32;
#[allow(missing_docs)]
const DEFAULT = raw::GIT_CREDTYPE_DEFAULT as u32;
#[allow(missing_docs)]
const SSH_INTERACTIVE = raw::GIT_CREDTYPE_SSH_INTERACTIVE as u32;
#[allow(missing_docs)]
const USERNAME = raw::GIT_CREDTYPE_USERNAME as u32;
}
}
impl CredentialType {
is_bit_set!(is_user_pass_plaintext, CredentialType::USER_PASS_PLAINTEXT);
is_bit_set!(is_ssh_key, CredentialType::SSH_KEY);
is_bit_set!(is_ssh_memory, CredentialType::SSH_MEMORY);
is_bit_set!(is_ssh_custom, CredentialType::SSH_CUSTOM);
is_bit_set!(is_default, CredentialType::DEFAULT);
is_bit_set!(is_ssh_interactive, CredentialType::SSH_INTERACTIVE);
is_bit_set!(is_username, CredentialType::USERNAME);
}
impl Default for CredentialType {
fn default() -> Self {
CredentialType::DEFAULT
}
}
bitflags! {
/// Flags for the `flags` field of an IndexEntry.
pub struct IndexEntryFlag: u16 {
/// Set when the `extended_flags` field is valid.
const EXTENDED = raw::GIT_IDXENTRY_EXTENDED as u16;
/// "Assume valid" flag
const VALID = raw::GIT_IDXENTRY_VALID as u16;
}
}
impl IndexEntryFlag {
is_bit_set!(is_extended, IndexEntryFlag::EXTENDED);
is_bit_set!(is_valid, IndexEntryFlag::VALID);
}
bitflags! {
/// Flags for the `extended_flags` field of an IndexEntry.
pub struct IndexEntryExtendedFlag: u16 {
/// An "intent to add" entry from "git add -N"
const INTENT_TO_ADD = raw::GIT_IDXENTRY_INTENT_TO_ADD as u16;
/// Skip the associated worktree file, for sparse checkouts
const SKIP_WORKTREE = raw::GIT_IDXENTRY_SKIP_WORKTREE as u16;
/// Reserved for a future on-disk extended flag
const EXTENDED2 = raw::GIT_IDXENTRY_EXTENDED2 as u16;
#[allow(missing_docs)]
const UPDATE = raw::GIT_IDXENTRY_UPDATE as u16;
#[allow(missing_docs)]
const REMOVE = raw::GIT_IDXENTRY_REMOVE as u16;
#[allow(missing_docs)]
const UPTODATE = raw::GIT_IDXENTRY_UPTODATE as u16;
#[allow(missing_docs)]
const ADDED = raw::GIT_IDXENTRY_ADDED as u16;
#[allow(missing_docs)]
const HASHED = raw::GIT_IDXENTRY_HASHED as u16;
#[allow(missing_docs)]
const UNHASHED = raw::GIT_IDXENTRY_UNHASHED as u16;
#[allow(missing_docs)]
const WT_REMOVE = raw::GIT_IDXENTRY_WT_REMOVE as u16;
#[allow(missing_docs)]
const CONFLICTED = raw::GIT_IDXENTRY_CONFLICTED as u16;
#[allow(missing_docs)]
const UNPACKED = raw::GIT_IDXENTRY_UNPACKED as u16;
#[allow(missing_docs)]
const NEW_SKIP_WORKTREE = raw::GIT_IDXENTRY_NEW_SKIP_WORKTREE as u16;
}
}
impl IndexEntryExtendedFlag {
is_bit_set!(is_intent_to_add, IndexEntryExtendedFlag::INTENT_TO_ADD);
is_bit_set!(is_skip_worktree, IndexEntryExtendedFlag::SKIP_WORKTREE);
is_bit_set!(is_extended2, IndexEntryExtendedFlag::EXTENDED2);
is_bit_set!(is_update, IndexEntryExtendedFlag::UPDATE);
is_bit_set!(is_remove, IndexEntryExtendedFlag::REMOVE);
is_bit_set!(is_up_to_date, IndexEntryExtendedFlag::UPTODATE);
is_bit_set!(is_added, IndexEntryExtendedFlag::ADDED);
is_bit_set!(is_hashed, IndexEntryExtendedFlag::HASHED);
is_bit_set!(is_unhashed, IndexEntryExtendedFlag::UNHASHED);
is_bit_set!(is_wt_remove, IndexEntryExtendedFlag::WT_REMOVE);
is_bit_set!(is_conflicted, IndexEntryExtendedFlag::CONFLICTED);
is_bit_set!(is_unpacked, IndexEntryExtendedFlag::UNPACKED);
is_bit_set!(is_new_skip_worktree, IndexEntryExtendedFlag::NEW_SKIP_WORKTREE);
}
bitflags! {
/// Flags for APIs that add files matching pathspec
pub struct IndexAddOption: u32 {
#[allow(missing_docs)]
const DEFAULT = raw::GIT_INDEX_ADD_DEFAULT as u32;
#[allow(missing_docs)]
const FORCE = raw::GIT_INDEX_ADD_FORCE as u32;
#[allow(missing_docs)]
const DISABLE_PATHSPEC_MATCH =
raw::GIT_INDEX_ADD_DISABLE_PATHSPEC_MATCH as u32;
#[allow(missing_docs)]
const CHECK_PATHSPEC = raw::GIT_INDEX_ADD_CHECK_PATHSPEC as u32;
}
}
impl IndexAddOption {
is_bit_set!(is_default, IndexAddOption::DEFAULT);
is_bit_set!(is_force, IndexAddOption::FORCE);
is_bit_set!(is_disable_pathspec_match, IndexAddOption::DISABLE_PATHSPEC_MATCH);
is_bit_set!(is_check_pathspec, IndexAddOption::CHECK_PATHSPEC);
}
impl Default for IndexAddOption {
fn default() -> Self {
IndexAddOption::DEFAULT
}
}
bitflags! {
/// Flags for `Repository::open_ext`
pub struct RepositoryOpenFlags: u32 {
/// Only open the specified path; don't walk upward searching.
const NO_SEARCH = raw::GIT_REPOSITORY_OPEN_NO_SEARCH as u32;
/// Search across filesystem boundaries.
const CROSS_FS = raw::GIT_REPOSITORY_OPEN_CROSS_FS as u32;
/// Force opening as bare repository, and defer loading its config.
const BARE = raw::GIT_REPOSITORY_OPEN_BARE as u32;
/// Don't try appending `/.git` to the specified repository path.
const NO_DOTGIT = raw::GIT_REPOSITORY_OPEN_NO_DOTGIT as u32;
/// Respect environment variables like `$GIT_DIR`.
const FROM_ENV = raw::GIT_REPOSITORY_OPEN_FROM_ENV as u32;
}
}
impl RepositoryOpenFlags {
is_bit_set!(is_no_search, RepositoryOpenFlags::NO_SEARCH);
is_bit_set!(is_cross_fs, RepositoryOpenFlags::CROSS_FS);
is_bit_set!(is_bare, RepositoryOpenFlags::BARE);
is_bit_set!(is_no_dotgit, RepositoryOpenFlags::NO_DOTGIT);
is_bit_set!(is_from_env, RepositoryOpenFlags::FROM_ENV);
}
bitflags! {
/// Flags for the return value of `Repository::revparse`
pub struct RevparseMode: u32 {
/// The spec targeted a single object
const SINGLE = raw::GIT_REVPARSE_SINGLE as u32;
/// The spec targeted a range of commits
const RANGE = raw::GIT_REVPARSE_RANGE as u32;
/// The spec used the `...` operator, which invokes special semantics.
const MERGE_BASE = raw::GIT_REVPARSE_MERGE_BASE as u32;
}
}
impl RevparseMode {
is_bit_set!(is_no_single, RevparseMode::SINGLE);
is_bit_set!(is_range, RevparseMode::RANGE);
is_bit_set!(is_merge_base, RevparseMode::MERGE_BASE);
}
bitflags! {
/// The results of `merge_analysis` indicating the merge opportunities.
pub struct MergeAnalysis: u32 {
/// No merge is possible.
const ANALYSIS_NONE = raw::GIT_MERGE_ANALYSIS_NONE as u32;
/// A "normal" merge; both HEAD and the given merge input have diverged
/// from their common ancestor. The divergent commits must be merged.
const ANALYSIS_NORMAL = raw::GIT_MERGE_ANALYSIS_NORMAL as u32;
/// All given merge inputs are reachable from HEAD, meaning the
/// repository is up-to-date and no merge needs to be performed.
const ANALYSIS_UP_TO_DATE = raw::GIT_MERGE_ANALYSIS_UP_TO_DATE as u32;
/// The given merge input is a fast-forward from HEAD and no merge
/// needs to be performed. Instead, the client can check out the
/// given merge input.
const ANALYSIS_FASTFORWARD = raw::GIT_MERGE_ANALYSIS_FASTFORWARD as u32;
/// The HEAD of the current repository is "unborn" and does not point to
/// a valid commit. No merge can be performed, but the caller may wish
/// to simply set HEAD to the target commit(s).
const ANALYSIS_UNBORN = raw::GIT_MERGE_ANALYSIS_UNBORN as u32;
}
}
impl MergeAnalysis {
is_bit_set!(is_none, MergeAnalysis::ANALYSIS_NONE);
is_bit_set!(is_normal, MergeAnalysis::ANALYSIS_NORMAL);
is_bit_set!(is_up_to_date, MergeAnalysis::ANALYSIS_UP_TO_DATE);
is_bit_set!(is_fast_forward, MergeAnalysis::ANALYSIS_FASTFORWARD);
is_bit_set!(is_unborn, MergeAnalysis::ANALYSIS_UNBORN);
}
bitflags! {
/// The user's stated preference for merges.
pub struct MergePreference: u32 {
/// No configuration was found that suggests a preferred behavior for
/// merge.
const NONE = raw::GIT_MERGE_PREFERENCE_NONE as u32;
/// There is a `merge.ff=false` configuration setting, suggesting that
/// the user does not want to allow a fast-forward merge.
const NO_FAST_FORWARD = raw::GIT_MERGE_PREFERENCE_NO_FASTFORWARD as u32;
/// There is a `merge.ff=only` configuration setting, suggesting that
/// the user only wants fast-forward merges.
const FASTFORWARD_ONLY = raw::GIT_MERGE_PREFERENCE_FASTFORWARD_ONLY as u32;
}
}
impl MergePreference {
is_bit_set!(is_none, MergePreference::NONE);
is_bit_set!(is_no_fast_forward, MergePreference::NO_FAST_FORWARD);
is_bit_set!(is_fastforward_only, MergePreference::FASTFORWARD_ONLY);
}
#[cfg(test)] #[macro_use] mod test;
#[macro_use] mod panic;
mod call;
mod util;
pub mod build;
pub mod cert;
pub mod string_array;
pub mod oid_array;
pub mod transport;
mod blame;
mod blob;
mod branch;
mod buf;
mod commit;
mod config;
mod cred;
mod describe;
mod diff;
mod error;
mod index;
mod merge;
mod message;
mod note;
mod object;
mod odb;
mod oid;
mod packbuilder;
mod pathspec;
mod patch;
mod proxy_options;
mod rebase;
mod reference;
mod reflog;
mod refspec;
mod remote;
mod remote_callbacks;
mod repo;
mod revspec;
mod revwalk;
mod signature;
mod status;
mod submodule;
mod stash;
mod tag;
mod time;
mod tree;
mod treebuilder;
fn init() {
static INIT: Once = ONCE_INIT;
INIT.call_once(|| {
openssl_env_init();
});
raw::init();
}
#[cfg(all(unix, not(target_os = "macos"), not(target_os = "ios"), feature = "https"))]
fn openssl_env_init() {
extern crate openssl_probe;
// Currently, libgit2 leverages OpenSSL for SSL support when cloning
// repositories over HTTPS. This means that we're picking up an OpenSSL
// dependency on non-Windows platforms (where it has its own HTTPS
// subsystem). As a result, we need to link to OpenSSL.
//
// Now actually *linking* to OpenSSL isn't so hard. We just need to make
// sure to use pkg-config to discover any relevant system dependencies for
// differences between distributions like CentOS and Ubuntu. The actual
// trickiness comes about when we start *distributing* the resulting
// binaries. Currently Cargo is distributed in binary form as nightlies,
// which means we're distributing a binary with OpenSSL linked in.
//
// For historical reasons, the Linux nightly builder is running a CentOS
// distribution in order to have as much ABI compatibility with other
// distributions as possible. Sadly, however, this compatibility does not
// extend to OpenSSL. Currently OpenSSL has two major versions, 0.9 and 1.0,
// which are incompatible (many ABI differences). The CentOS builder we
// build on has version 1.0, as do most distributions today. Some still have
// 0.9, however. This means that if we are to distribute the binaries built
// by the CentOS machine, we would only be compatible with OpenSSL 1.0 and
// we would fail to run (a dynamic linker error at runtime) on systems with
// only 9.8 installed (hopefully).
//
// But wait, the plot thickens! Apparently CentOS has dubbed their OpenSSL
// library as `libssl.so.10`, notably the `10` is included at the end. On
// the other hand Ubuntu, for example, only distributes `libssl.so`. This
// means that the binaries created at CentOS are hard-wired to probe for a
// file called `libssl.so.10` at runtime (using the LD_LIBRARY_PATH), which
// will not be found on ubuntu. The conclusion of this is that binaries
// built on CentOS cannot be distributed to Ubuntu and run successfully.
//
// There are a number of sneaky things we could do, including, but not
// limited to:
//
// 1. Create a shim program which runs "just before" cargo runs. The
// responsibility of this shim program would be to locate `libssl.so`,
// whatever it's called, on the current system, make sure there's a
// symlink *somewhere* called `libssl.so.10`, and then set up
// LD_LIBRARY_PATH and run the actual cargo.
//
// This approach definitely seems unconventional, and is borderline
// overkill for this problem. It's also dubious if we can find a
// libssl.so reliably on the target system.
//
// 2. Somehow re-work the CentOS installation so that the linked-against
// library is called libssl.so instead of libssl.so.10
//
// The problem with this approach is that systems with 0.9 installed will
// start to silently fail, due to also having libraries called libssl.so
// (probably symlinked under a more appropriate version).
//
// 3. Compile Cargo against both OpenSSL 1.0 *and* OpenSSL 0.9, and
// distribute both. Also make sure that the linked-against name of the
// library is `libssl.so`. At runtime we determine which version is
// installed, and we then the appropriate binary.
//
// This approach clearly has drawbacks in terms of infrastructure and
// feasibility.
//
// 4. Build a nightly of Cargo for each distribution we'd like to support.
// You would then pick the appropriate Cargo nightly to install locally.
//
// So, with all this in mind, the decision was made to *statically* link
// OpenSSL. This solves any problem of relying on a downstream OpenSSL
// version being available. This does, however, open a can of worms related
// to security issues. It's generally a good idea to dynamically link
// OpenSSL as you'll get security updates over time without having to do
// anything (the system administrator will update the local openssl
// package). By statically linking, we're forfeiting this feature.
//
// The conclusion was made it is likely appropriate for the Cargo nightlies
// to statically link OpenSSL, but highly encourage distributions and
// packagers of Cargo to dynamically link OpenSSL. Packagers are targeting
// one system and are distributing to only that system, so none of the
// problems mentioned above would arise.
//
// In order to support this, a new package was made: openssl-static-sys.
// This package currently performs a fairly simple task:
//
// 1. Run pkg-config to discover where openssl is installed.
// 2. If openssl is installed in a nonstandard location, *and* static copies
// of the libraries are available, copy them to $OUT_DIR.
//
// This library will bring in libssl.a and libcrypto.a into the local build,
// allowing them to be picked up by this crate. This allows us to configure
// our own buildbots to have pkg-config point to these local pre-built
// copies of a static OpenSSL (with very few dependencies) while allowing
// most other builds of Cargo to naturally dynamically link OpenSSL.
//
// So in summary, if you're with me so far, we've statically linked OpenSSL
// to the Cargo binary (or any binary, for that matter) and we're ready to
// distribute it to *all* linux distributions. Remember that our original
// intent for openssl was for HTTPS support, which implies that we need some
// for of CA certificate store to validate certificates. This is normally
// installed in a standard system location.
//
// Unfortunately, as one might imagine, OpenSSL is configured for where this
// standard location is at *build time*, but it often varies widely
// per-system. Consequently, it was discovered that OpenSSL will respect the
// SSL_CERT_FILE and SSL_CERT_DIR environment variables in order to assist
// in discovering the location of this file (hurray!).
//
// So, finally getting to the point, this function solely exists to support
// our static builds of OpenSSL by probing for the "standard system
// location" of certificates and setting relevant environment variable to
// point to them.
//
// Ah, and as a final note, this is only a problem on Linux, not on OS X. On
// OS X the OpenSSL binaries are stable enough that we can just rely on
// dynamic linkage (plus they have some weird modifications to OpenSSL which
// means we wouldn't want to link statically).
openssl_probe::init_ssl_cert_env_vars();
}
#[cfg(any(windows, target_os = "macos", target_os = "ios", not(feature = "https")))]
fn openssl_env_init() {}
unsafe fn opt_bytes<'a, T>(_anchor: &'a T,
c: *const libc::c_char) -> Option<&'a [u8]> |
fn opt_cstr<T: IntoCString>(o: Option<T>) -> Result<Option<CString>, Error> {
match o {
Some(s) => s.into_c_string().map(Some),
None => Ok(None)
}
}
impl ObjectType {
/// Convert an object type to its string representation.
pub fn str(&self) -> &'static str {
unsafe {
let ptr = call!(raw::git_object_type2string(*self)) as *const _;
let data = CStr::from_ptr(ptr).to_bytes();
str::from_utf8(data).unwrap()
}
}
/// Determine if the given git_otype is a valid loose object type.
pub fn is_loose(&self) -> bool {
unsafe { (call!(raw::git_object_typeisloose(*self)) == 1) }
}
/// Convert a raw git_otype to an ObjectType
pub fn from_raw(raw: raw::git_otype) -> Option<ObjectType> {
match raw {
raw::GIT_OBJ_ANY => Some(ObjectType::Any),
raw::GIT_OBJ_COMMIT => Some(ObjectType::Commit),
raw::GIT_OBJ_TREE => Some(ObjectType::Tree),
raw::GIT_OBJ_BLOB => Some(ObjectType::Blob),
raw::GIT_OBJ_TAG => Some(ObjectType::Tag),
_ => None,
}
}
/// Convert this kind into its raw representation
pub fn raw(&self) -> raw::git_otype {
call::convert(self)
}
/// Convert a string object type representation to its object type.
pub fn from_str(s: &str) -> Option<ObjectType> {
let raw = unsafe { call!(raw::git_object_string2type(CString::new(s).unwrap())) };
ObjectType::from_raw(raw)
}
}
impl fmt::Display for ObjectType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.str().fmt(f)
}
}
impl ReferenceType {
/// Convert an object type to its string representation.
pub fn str(&self) -> &'static str {
match self {
&ReferenceType::Oid => "oid",
&ReferenceType::Symbolic => "symbolic",
}
}
/// Convert a raw git_ref_t to a ReferenceType.
pub fn from_raw(raw: raw::git_ref_t) -> Option<ReferenceType> {
match raw {
raw::GIT_REF_OID => Some(ReferenceType::Oid),
raw::GIT_REF_SYMBOLIC => Some(ReferenceType::Symbolic),
_ => None,
}
}
}
impl fmt::Display for ReferenceType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.str().fmt(f)
}
}
impl ConfigLevel {
/// Converts a raw configuration level to a ConfigLevel
pub fn from_raw(raw: raw::git_config_level_t) -> ConfigLevel {
match raw {
raw::GIT_CONFIG_LEVEL_PROGRAMDATA => ConfigLevel::ProgramData,
raw::GIT_CONFIG_LEVEL_SYSTEM => ConfigLevel::System,
raw::GIT_CONFIG_LEVEL_XDG => ConfigLevel::XDG,
raw::GIT_CONFIG_LEVEL_GLOBAL => ConfigLevel::Global,
raw::GIT_CONFIG_LEVEL_LOCAL => ConfigLevel::Local,
raw::GIT_CONFIG_LEVEL_APP => ConfigLevel::App,
raw::GIT_CONFIG_HIGHEST_LEVEL => ConfigLevel::Highest,
n => panic!("unknown config level: {}", n),
}
}
}
bitflags! {
/// Status flags for a single file
///
/// A combination of these values will be returned to indicate the status of
/// a file. Status compares the working directory, the index, and the
/// current HEAD of the repository. The `STATUS_INDEX_*` set of flags
/// represents the status of file in the index relative to the HEAD, and the
/// `STATUS_WT_*` set of flags represent the status of the file in the
/// working directory relative to the index.
pub struct Status: u32 {
#[allow(missing_docs)]
const CURRENT = raw::GIT_STATUS_CURRENT as u32;
#[allow(missing_docs)]
const INDEX_NEW = raw::GIT_STATUS_INDEX_NEW as u32;
#[allow(missing_docs)]
const INDEX_MODIFIED = raw::GIT_STATUS_INDEX_MODIFIED as u32;
#[allow(missing_docs)]
const INDEX_DELETED = raw::GIT_STATUS_INDEX_DELETED as u32;
#[allow(missing_docs)]
const INDEX_RENAMED = raw::GIT_STATUS_INDEX_RENAMED as u32;
#[allow(missing_docs)]
const INDEX_TYPECHANGE = raw::GIT_STATUS_INDEX_TYPECHANGE as u32;
#[allow(missing_docs)]
const WT_NEW = raw::GIT_STATUS_WT_NEW as u32;
#[allow(missing_docs)]
const WT_MODIFIED = raw::GIT_STATUS_WT_MODIFIED as u32;
#[allow(missing_docs)]
const WT_DELETED = raw::GIT_STATUS_WT_DELETED as u32;
#[allow(missing_docs)]
const WT_TYPECHANGE = raw::GIT_STATUS_WT_TYPECHANGE as u32;
#[allow(missing_docs)]
const WT_RENAMED = raw::GIT_STATUS_WT_RENAMED as u32;
#[allow(missing_docs)]
const IGNORED = raw::GIT_STATUS_IGNORED as u32;
#[allow(missing_docs)]
const CONFLICTED = raw::GIT_STATUS_CONFLICTED as u32;
}
}
impl Status {
is_bit_set!(is_index_new, Status::INDEX_NEW);
is_bit_set!(is_index_modified, Status::INDEX_MODIFIED);
is_bit_set!(is_index_deleted, Status::INDEX_DELETED);
is_bit_set!(is_index_renamed, Status::INDEX_RENAMED);
is_bit_set!(is_index_typechange, Status::INDEX_TYPECHANGE);
is_bit_set!(is_wt_new, Status::WT_NEW);
is_bit_set!(is_wt_modified, Status::WT_MODIFIED);
is_bit_set!(is_wt_deleted, Status::WT_DELETED);
is_bit_set!(is_wt_typechange, Status::WT_TYPECHANGE);
is_bit_set!(is_wt_renamed, Status::WT_RENAMED);
is_bit_set!(is_ignored, Status::IGNORED);
is_bit_set!(is_conflicted, Status::CONFLICTED);
}
bitflags! {
/// Mode options for RepositoryInitOptions
pub struct RepositoryInitMode: u32 {
/// Use permissions configured by umask - the default
const SHARED_UMASK = raw::GIT_REPOSITORY_INIT_SHARED_UMASK as u32;
/// Use `--shared=group` behavior, chmod'ing the new repo to be
/// group writable and \"g+sx\" for sticky group assignment
const SHARED_GROUP = raw::GIT_REPOSITORY_INIT_SHARED_GROUP as u32;
/// Use `--shared=all` behavior, adding world readability.
const SHARED_ALL = raw::GIT_REPOSITORY_INIT_SHARED_ALL as u32;
}
}
impl RepositoryInitMode {
is_bit_set!(is_shared_umask, RepositoryInitMode::SHARED_UMASK);
is_bit_set!(is_shared_group, RepositoryInitMode::SHARED_GROUP);
is_bit_set!(is_shared_all, RepositoryInitMode::SHARED_ALL);
}
/// What type of change is described by a `DiffDelta`?
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Delta {
/// No changes
Unmodified,
/// Entry does not exist in old version
Added,
/// Entry does not exist in new version
Deleted,
/// Entry content changed between old and new
Modified,
/// Entry was renamed between old and new
Renamed,
/// Entry was copied from another old entry
Copied,
/// Entry is ignored item in workdir
Ignored,
/// Entry is untracked item in workdir
Untracked,
/// Type of entry changed between old and new
Typechange,
/// Entry is unreadable
Unreadable,
/// Entry in the index is conflicted
Conflicted,
}
bitflags! {
/// Return codes for submodule status.
///
/// A combination of these flags will be returned to describe the status of a
/// submodule. Depending on the "ignore" property of the submodule, some of
/// the flags may never be returned because they indicate changes that are
/// supposed to be ignored.
///
/// Submodule info is contained in 4 places: the HEAD tree, the index, config
/// files (both .git/config and .gitmodules), and the working directory. Any
/// or all of those places might be missing information about the submodule
/// depending on what state the repo is in. We consider all four places to
/// build the combination of status flags.
///
/// There are four values that are not really status, but give basic info
/// about what sources of submodule data are available. These will be
/// returned even if ignore is set to "ALL".
///
/// * IN_HEAD - superproject head contains submodule
/// * IN_INDEX - superproject index contains submodule
/// * IN_CONFIG - superproject gitmodules has submodule
/// * IN_WD - superproject workdir has submodule
///
/// The following values will be returned so long as ignore is not "ALL".
///
/// * INDEX_ADDED - in index, not in head
/// * INDEX_DELETED - in head, not in index
/// * INDEX_MODIFIED - index and head don't match
/// * WD_UNINITIALIZED - workdir contains empty directory
/// * WD_ADDED - in workdir, not index
/// * WD_DELETED - in index, not workdir
/// * WD_MODIFIED - index and workdir head don't match
///
/// The following can only be returned if ignore is "NONE" or "UNTRACKED".
///
/// * WD_INDEX_MODIFIED - submodule workdir index is dirty
/// * WD_WD_MODIFIED - submodule workdir has modified files
///
/// Lastly, the following will only be returned for ignore "NONE".
///
/// * WD_UNTRACKED - wd contains untracked files
pub struct SubmoduleStatus: u32 {
#[allow(missing_docs)]
const IN_HEAD = raw::GIT_SUBMODULE_STATUS_IN_HEAD as u32;
#[allow(missing_docs)]
const IN_INDEX = raw::GIT_SUBMODULE_STATUS_IN_INDEX as u32;
#[allow(missing_docs)]
const IN_CONFIG = raw::GIT_SUBMODULE_STATUS_IN_CONFIG as u32;
#[allow(missing_docs)]
const IN_WD = raw::GIT_SUBMODULE_STATUS_IN_WD as u32;
#[allow(missing_docs)]
const INDEX_ADDED = raw::GIT_SUBMODULE_STATUS_INDEX_ADDED as u32;
#[allow(missing_docs)]
const INDEX_DELETED = raw::GIT_SUBMODULE_STATUS_INDEX_DELETED as u32;
#[allow(missing_docs)]
const INDEX_MODIFIED = raw::GIT_SUBMODULE_STATUS_INDEX_MODIFIED as u32;
#[allow(missing_docs)]
const WD_UNINITIALIZED =
raw::GIT_SUBMODULE_STATUS_WD_UNINITIALIZED as u32;
#[allow(missing_docs)]
const WD_ADDED = raw::GIT_SUBMODULE_STATUS_WD_ADDED as u32;
#[allow(missing_docs)]
const WD_DELETED = raw::GIT_SUBMODULE_STATUS_WD_DELETED as u32;
#[allow(missing_docs)]
const WD_MODIFIED = raw::GIT_SUBMODULE_STATUS_WD_MODIFIED as u32;
#[allow(missing_docs)]
const WD_INDEX_MODIFIED =
raw::GIT_SUBMODULE_STATUS_WD_INDEX_MODIFIED as u32;
#[allow(missing_docs)]
const WD_WD_MODIFIED = raw::GIT_SUBMODULE_STATUS_WD_WD_MODIFIED as u32;
#[allow(missing_docs)]
const WD_UNTRACKED = raw::GIT_SUBMODULE_STATUS_WD_UNTRACKED as u32;
}
}
impl SubmoduleStatus {
is_bit_set!(is_in_head, SubmoduleStatus::IN_HEAD);
is_bit_set!(is_in_index, SubmoduleStatus::IN_INDEX);
is_bit_set!(is_in_config, SubmoduleStatus::IN_CONFIG);
is_bit_set!(is_in_wd, SubmoduleStatus::IN_WD);
is_bit_set!(is_index_added, SubmoduleStatus::INDEX_ADDED);
is_bit_set!(is_index_deleted, SubmoduleStatus::INDEX_DELETED);
is_bit_set!(is_index_modified, SubmoduleStatus::INDEX_MODIFIED);
is_bit_set!(is_wd_uninitialized, SubmoduleStatus::WD_UNINITIALIZED);
is_bit_set!(is_wd_added, SubmoduleStatus::WD_ADDED);
is_bit_set!(is_wd_deleted, SubmoduleStatus::WD_DELETED);
is_bit_set!(is_wd_modified, SubmoduleStatus::WD_MODIFIED);
is_bit_set!(is_wd_wd_modified, SubmoduleStatus::WD_WD_MODIFIED);
is_bit_set!(is_wd_untracked, SubmoduleStatus::WD_UNTRACKED);
}
/// Submodule ignore values
///
/// These values represent settings for the `submodule.$name.ignore`
/// configuration value which says how deeply to look at the working
/// directory when getting the submodule status.
pub enum SubmoduleIgnore {
/// Use the submodule's configuration
Unspecified,
/// Any change or untracked file is considered dirty
None,
/// Only dirty if tracked files have changed
Untracked,
/// Only dirty if HEAD has moved
Dirty,
/// Never dirty
All,
}
bitflags! {
/// ...
pub struct PathspecFlags: u32 {
/// Use the default pathspec matching configuration.
const DEFAULT = raw::GIT_PATHSPEC_DEFAULT as u32;
/// Force matching to ignore case, otherwise matching will use native
/// case sensitivity fo the platform filesystem.
const IGNORE_CASE = raw::GIT_PATHSPEC_IGNORE_CASE as u32;
/// Force case sensitive matches, otherwise match will use the native
/// case sensitivity of the platform filesystem.
const USE_CASE = raw::GIT_PATHSPEC_USE_CASE as u32;
/// Disable glob patterns and just use simple string comparison for
/// matching.
const NO_GLOB = raw::GIT_PATHSPEC_NO_GLOB as u32;
/// Means that match functions return the error code `NotFound` if no
/// matches are found. By default no matches is a success.
const NO_MATCH_ERROR = raw::GIT_PATHSPEC_NO_MATCH_ERROR as u32;
/// Means that the list returned should track which patterns matched
/// which files so that at the end of the match we can identify patterns
/// that did not match any files.
const FIND_FAILURES = raw::GIT_PATHSPEC_FIND_FAILURES as u32;
/// Means that the list returned does not need to keep the actual
/// matching filenames. Use this to just test if there were any matches
/// at all or in combination with `PATHSPEC_FAILURES` to validate a
/// pathspec.
const FAILURES_ONLY = raw::GIT_PATHSPEC_FAILURES_ONLY as u32;
}
}
impl PathspecFlags {
is_bit_set!(is_default, PathspecFlags::DEFAULT);
is_bit_set!(is_ignore_case, PathspecFlags::IGNORE_CASE);
is_bit_set!(is_use_case, PathspecFlags::USE_CASE);
is_bit_set!(is_no_glob, PathspecFlags::NO_GLOB);
is_bit_set!(is_no_match_error, PathspecFlags::NO_MATCH_ERROR);
is_bit_set!(is_find_failures, PathspecFlags::FIND_FAILURES);
is_bit_set!(is_failures_only, PathspecFlags::FAILURES_ONLY);
}
impl Default for PathspecFlags {
fn default() -> Self {
PathspecFlags::DEFAULT
}
}
bitflags! {
/// Types of notifications emitted from checkouts.
pub struct CheckoutNotificationType: u32 {
/// Notification about a conflict.
const CONFLICT = raw::GIT_CHECKOUT_NOTIFY_CONFLICT as u32;
/// Notification about a dirty file.
const DIRTY = raw::GIT_CHECKOUT_NOTIFY_DIRTY as u32;
/// Notification about an updated file.
const UPDATED = raw::GIT_CHECKOUT_NOTIFY_UPDATED as u32;
/// Notification about an untracked file.
const UNTRACKED = raw::GIT_CHECKOUT_NOTIFY_UNTRACKED as u32;
/// Notification about an ignored file.
const IGNORED = raw::GIT_CHECKOUT_NOTIFY_IGNORED as u32;
}
}
impl CheckoutNotificationType {
is_bit_set!(is_conflict, CheckoutNotificationType::CONFLICT);
is_bit_set!(is_dirty, CheckoutNotificationType::DIRTY);
is_bit_set!(is_updated, CheckoutNotificationType::UPDATED);
is_bit_set!(is_untracked, CheckoutNotificationType::UNTRACKED);
is_bit_set!(is_ignored, CheckoutNotificationType::IGNORED);
}
/// Possible output formats for diff data
#[derive(Copy, Clone)]
pub enum DiffFormat {
/// full git diff
Patch,
/// just the headers of the patch
PatchHeader,
/// like git diff --raw
Raw,
/// like git diff --name-only
NameOnly,
/// like git diff --name-status
NameStatus,
}
bitflags! {
/// Formatting options for diff stats
pub struct DiffStatsFormat: raw::git_diff_stats_format_t {
/// Don't generate any stats
const NONE = raw::GIT_DIFF_STATS_NONE;
/// Equivalent of `--stat` in git
const FULL = raw::GIT_DIFF_STATS_FULL;
/// Equivalent of `--shortstat` in git
const SHORT = raw::GIT_DIFF_STATS_SHORT;
/// Equivalent of `--numstat` in git
const NUMBER = raw::GIT_DIFF_STATS_NUMBER;
/// Extended header information such as creations, renames and mode
/// changes, equivalent of `--summary` in git
const INCLUDE_SUMMARY = raw::GIT_DIFF_STATS_INCLUDE_SUMMARY;
}
}
impl DiffStatsFormat {
is_bit_set!(is_none, DiffStatsFormat::NONE);
is_bit_set!(is_full, DiffStatsFormat::FULL);
is_bit_set!(is_short, DiffStatsFormat::SHORT);
is_bit_set!(is_number, DiffStatsFormat::NUMBER);
is_bit_set!(is_include_summary, DiffStatsFormat::INCLUDE_SUMMARY);
}
/// Automatic tag following options.
pub enum AutotagOption {
/// Use the setting from the remote's configuration
Unspecified,
/// Ask the server for tags pointing to objects we're already downloading
Auto,
/// Don't ask for any tags beyond the refspecs
None,
/// Ask for all the tags
All,
}
/// Configuration for how pruning is done on a fetch
pub enum FetchPrune {
/// Use the setting from the configuration
Unspecified,
/// Force pruning on
On,
/// Force pruning off
Off,
}
#[allow(missing_docs)]
#[derive(Debug)]
pub enum StashApplyProgress {
/// None
None,
/// Loading the stashed data from the object database
LoadingStash,
/// The stored index is being analyzed
AnalyzeIndex,
/// The modified files are being analyzed
AnalyzeModified,
/// The untracked and ignored files are being analyzed
AnalyzeUntracked,
/// The untracked files are being written to disk
CheckoutUntracked,
/// The modified files are being written to disk
CheckoutModified,
/// The stash was applied successfully
Done,
}
bitflags! {
#[allow(missing_docs)]
pub struct StashApplyFlags: u32 {
#[allow(missing_docs)]
const DEFAULT = raw::GIT_STASH_APPLY_DEFAULT as u32;
/// Try to reinstate not only the working tree's changes,
/// but also the index's changes.
const REINSTATE_INDEX = raw::GIT_STASH_APPLY_REINSTATE_INDEX as u32;
}
}
impl StashApplyFlags {
is_bit_set!(is_default, StashApplyFlags::DEFAULT);
is_bit_set!(is_reinstate_index, StashApplyFlags::REINSTATE_INDEX);
}
impl Default for StashApplyFlags {
fn default() -> Self {
StashApplyFlags::DEFAULT
}
}
bitflags! {
#[allow(missing_docs)]
pub struct StashFlags: u32 {
#[allow(missing_docs)]
const DEFAULT = raw::GIT_STASH_DEFAULT as u32;
/// All changes already added to the index are left intact in
/// the working directory
const KEEP_INDEX = raw::GIT_STASH_KEEP_INDEX as u32;
/// All untracked files are also stashed and then cleaned up
/// from the working directory
const INCLUDE_UNTRACKED = raw::GIT_STASH_INCLUDE_UNTRACKED as u32;
/// All ignored files are also stashed and then cleaned up from
/// the working directory
const INCLUDE_IGNORED = raw::GIT_STASH_INCLUDE_IGNORED as u32;
}
}
impl StashFlags {
is_bit_set!(is_default, StashFlags::DEFAULT);
is_bit_set!(is_keep_index, StashFlags::KEEP_INDEX);
is_bit_set!(is_include_untracked, StashFlags::INCLUDE_UNTRACKED);
is_bit_set!(is_include_ignored, StashFlags::INCLUDE_IGNORED);
}
impl Default for StashFlags {
fn default() -> Self {
StashFlags::DEFAULT
}
}
#[cfg(test)]
mod tests {
use super::ObjectType;
#[test]
fn convert() {
assert_eq!(ObjectType::Blob.str(), "blob");
assert_eq!(ObjectType::from_str("blob"), Some(ObjectType::Blob));
assert!(ObjectType::Blob.is_loose());
}
}
| {
if c.is_null() {
None
} else {
Some(CStr::from_ptr(c).to_bytes())
}
} |
package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Eospac(Package):
"""A collection of C routines that can be used to access the Sesame data
library.
"""
homepage = "http://laws.lanl.gov/projects/data/eos.html"
list_url = "http://laws.lanl.gov/projects/data/eos/eospacReleases.php"
version('6.4.0', sha256='15a953beac735c68431afe86ffe33323d540d0fbbbec03ba79438dd29736051d',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0_612ea8c9b8ffa6d9175d9118955571d9107f1e3c.tgz")
version('6.4.0beta.4', sha256='0ebfd8badff575ea77444aa978629dbdca3135a0b5eb373b8daba058773d4635',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.4_aff6429bb6868de31a980278bafa13487c2ce83f.tgz")
version('6.4.0beta.3', sha256='9f387ca5356519494c6f3f27adb0c165cf9f9e15e3355a67bf940a4a92eebdab',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.3_90ff265f62aa1780bfcd0a62dad807b6be6ed461.tgz")
version('6.4.0beta.2', sha256='f9db46cd6c62a6f83960d802350f3e37675921af102969b293c02eb797558a53',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.2_69196eadbc77506561eef711f19d2f03b4ab0ffa.tgz")
version('6.4.0beta.1', sha256='14c5c804e5f628f41e8ed80bcee5a80adeb6c6f3d130715421ca99a30c7eb7e2',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.1_r20171213193219.tgz")
version('6.3.1', sha256='aa1112c4251c9c3c2883a7ab2c7f2abff2c339f29dbbf8421ef88b0c9df904f8', preferred=True,
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.3.1_r20161202150449.tgz")
# This patch allows the use of spack's compile wrapper 'flang'
patch('flang.patch', when='@:6.4.0beta.2%clang')
def install(self, spec, prefix):
with working_dir('Source'):
make('install',
'CC={0}'.format(spack_cc),
'CXX={0}'.format(spack_cxx),
'F77={0}'.format(spack_f77),
'F90={0}'.format(spack_fc),
'prefix={0}'.format(prefix),
'INSTALLED_LIBRARY_DIR={0}'.format(prefix.lib), | 'INSTALLED_BIN_DIR={0}'.format(prefix.bin))
# fix conflict with linux's getopt for 6.4.0beta.2
if spec.satisfies('@6.4.0beta.2'):
with working_dir(prefix.bin):
move('getopt', 'driver_getopt') | 'INSTALLED_INCLUDE_DIR={0}'.format(prefix.include),
'INSTALLED_EXAMPLE_DIR={0}'.format(prefix.example), |
sqlalchemy_datastore.py | import pandas
from ..schema.schema_base import *
from .datastore_base import DataStore
from .odo_datastore import OdoDataStore
from ..config import config
from functools import lru_cache, partial
from sqlalchemy import Table, MetaData, select
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Select, and_
from sqlalchemy import sql
import io
import tempfile
import time
import os
import datetime
import ciso8601
import odo
metadatas = {}
def get_engine_metadata(engine):
if engine in metadatas:
return metadatas[engine]
else:
metadata = MetaData()
metadata.bind = engine
metadatas[engine] = metadata
return metadata
def get_reflected_metadata(engine, schema_name=None):
metadata = MetaData()
metadata.reflect(bind=engine, schema=schema_name)
metadata.bind = engine
return metadata
########################################################################
for col_type in [dt, delta, num, bool_]:
col_type._storage_target_registry['sqlalchemy'] = col_type._storage_target_registry['pandas'].copy()
@cat.register_check('sqlalchemy')
def _(col):
return col.dtype == 'object'
@cat.register_transform('sqlalchemy')
def _(col):
return col.astype('object')
@id_.register_check('sqlalchemy')
def _(col):
return col.dtype == 'object'
@id_.register_transform('sqlalchemy')
def _(col):
return col.astype('object')
########################################################################
@cat.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Text, nullable=True)
@id_.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Integer, nullable=True)
@dt.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.DateTime(timezone=True), nullable=True)
@delta.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Interval, nullable=True)
@big_dt.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.DateTime(timezone=True), nullable=True)
@num.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Float, nullable=True)
@bool_.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Boolean, nullable=True)
########################################################################
@lru_cache()
def schema_as_table(schema, engine):
|
sa_type_2_col_type = {
sql.sqltypes.Integer: num,
sql.sqltypes.String: cat,
sql.sqltypes.Date: dt,
sql.sqltypes.DateTime: dt,
sql.sqltypes.Interval: delta,
sql.sqltypes.Numeric: num,
sql.sqltypes.Boolean: bool_
}
def table_as_schema(table):
schema_cols = []
for sa_col in table.c:
for sa_type, col_type in sa_type_2_col_type.items():
if isinstance(sa_col.type, sa_type):
if isinstance(sa_col.type, sql.sqltypes.Integer) and (sa_col.primary_key or sa_col.foreign_keys):
schema_cols.append(id_(sa_col.name))
else:
schema_cols.append(col_type(sa_col.name))
break
options = {}
if table.schema is not None:
options['db_schema'] = table.schema
s = Schema(table.name, schema_cols, options=options)
return s
########################################################################
def fast_sql_to_df(table, schema):
engine = table.bind
if engine.dialect.name == 'mysql':
return fast_mysql_to_df(table, schema)
elif engine.dialect.name == 'postgresql':
return fast_postgresql_to_df(table, schema)
ods = OdoDataStore(schema, table)
df = ods.load()
df = df[schema.col_names()]
return df
def fast_mysql_to_df(table, schema):
f = tempfile.NamedTemporaryFile('w', suffix='.csv', dir=config.data_dir+'tmp')
try:
f.close()
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
# converting to csv
sql = """SELECT {cols} FROM {table} INTO OUTFILE '{filename}'
FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"'
ESCAPED BY '\\\\'
LINES TERMINATED BY '\n'""".format(
cols=', '.join('`'+colname+'`' for colname in schema.col_names()),
filename=f.name,
table=table_name)
table.bind.execute(sql)
# reading csv
df = pandas.read_csv(f.name, header=None, names=schema.col_names(), na_values=['\\N'])
finally:
os.remove(f.name)
for col in schema.cols:
if isinstance(col, dt):
# converting datetime column
df[col.name] = pandas.to_datetime(df[col.name], format="%Y-%m-%d %H:%M:%S", coerce=True)
if isinstance(col, big_dt):
# converting big_dt column
strptime = datetime.datetime.strptime
parse_func = (lambda x: strptime(x, "%Y-%m-%d %H:%M:%S"))
df[col.name] = df[col.name].map(parse_func, na_action='ignore')
return df
def fast_postgresql_to_df(table, schema):
engine = table.bind
conn = engine.raw_connection()
with conn.cursor() as cur:
with io.StringIO() as f:
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
sql = "COPY {table_name} TO STDOUT WITH (FORMAT CSV, HEADER TRUE)".format(
table_name=table_name)
cur.copy_expert(sql, f)
f.seek(0)
df = pandas.read_csv(f)
for col in schema.cols:
if isinstance(col, dt):
# converting datetime column
df[col.name] = pandas.to_datetime(df[col.name], format="%Y-%m-%d %H:%M:%S", coerce=True)
if isinstance(col, big_dt):
# converting big_dt column
strptime = datetime.datetime.strptime
parse_func = (lambda x: strptime(x, "%Y-%m-%d %H:%M:%S"))
df[col.name] = df[col.name].map(parse_func, na_action='ignore')
return df
def fast_postgresql_to_csv(table, file_path):
engine = table.bind
conn = engine.raw_connection()
with conn.cursor() as cur:
with open(file_path, 'w') as f:
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
sql = "COPY {table_name} TO STDOUT WITH (FORMAT CSV, HEADER TRUE)".format(
table_name=table_name)
cur.copy_expert(sql, f)
def fast_df_to_sql(df, table, schema):
ods = OdoDataStore(schema, table, storage_target_type='sqlalchemy')
ods.store(df)
class SATableDataStore(DataStore):
def __init__(self, schema, engine, where_clauses=None):
super().__init__(schema)
self.engine = engine
self.table = schema_as_table(self.schema, self.engine)
self.where_clauses = where_clauses
def storage_target(self):
return 'sqlalchemy'
def _load(self):
query = self.table
if self.where_clauses is not None:
query = query.select()
for where_clause in self.where_clauses:
query = query.where(where_clause)
df = fast_sql_to_df(query, self.schema)
return df
def to_csv(self, file_path):
if self.engine.dialect.name != 'postgresql':
raise NotImplementedError('converting directly to csv not supported for non-postgres databases')
query = self.table
if self.where_clauses is not None:
query = query.select()
for where_clause in self.where_clauses:
query = query.where(where_clause)
fast_postgresql_to_csv(query, file_path)
def _store(self, df):
if self.where_clauses is not None:
raise NotImplementedError('Cannot store to a query (where_clauses must be left blank)')
df = df.copy()
fast_df_to_sql(self.table, self.schema)
def _update(self, df):
if self.where_clauses is not None:
raise NotImplementedError('Cannot update to a query (where_clauses must be left blank)')
df = df.copy()
with self.engine.connect() as conn:
temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)
temp_schema.options['temporary'] = True
temp_table = schema_as_table(temp_schema, self.engine)
print('storing new df in temp table')
fast_df_to_sql(df, temp_table, temp_schema)
print('updating table from matching rows')
index = self.schema.options['index']
update = self.table.update(
values={
col_name: temp_table.c[col_name] for col_name in self.schema.col_names()
},
whereclause=self.table.c[index] == temp_table.c[index]
)
update_res = conn.execute(update)
print('inserting new rows into table')
exists_query = self.table.select().where(self.table.c[index] == temp_table.c[index]).exists()
insert = self.table.insert().from_select(
temp_schema.col_names(),
temp_table.select().where(~exists_query))
ins_res = conn.execute(insert)
def delete(self):
if self.where_clauses is not None:
raise NotImplementedError('Cannot delete a query (where_clauses must be left blank)')
self.table.drop(self.engine)
class SAJoinDataStore(DataStore):
def __init__(self, root_schema, engine, has_schemas=None, belongs_to_schemas=None, root_conditions=None, where_clauses=None):
self.engine = engine
self.root_schema = root_schema
self.root_table = schema_as_table(self.root_schema, self.engine)
self.has_schemas, self.has_join_conditions = self._parse_schema_list(has_schemas)
self.has_tables = [schema_as_table(h_schema, self.engine) for h_schema in self.has_schemas]
self.belongs_to_schemas, self.belongs_to_join_conditions = self._parse_schema_list(belongs_to_schemas)
self.belongs_to_tables = [schema_as_table(b_schema, self.engine) for b_schema in self.belongs_to_schemas]
self.root_conditions = root_conditions
self.where_clauses = where_clauses
schema = Schema.union([self.root_schema] + self.has_schemas + self.belongs_to_schemas, with_prefix=True, schema_name=self.root_schema.name+'_join')
super().__init__(schema)
def _parse_schema_list(self, schema_list=None):
if schema_list is None:
schema_list = []
schemas = []
join_conditions = {}
for schema in schema_list:
if isinstance(schema, tuple):
schema, j_c = schema
join_conditions[schema] = j_c
schemas.append(schema)
return schemas, join_conditions
def storage_target(self):
return 'sqlalchemy'
def _load(self):
root = self.root_table
if self.root_conditions is not None:
root = root.select().where(and_(*self.root_conditions)).alias()
join_clause = root
select_clause = []
root_col_prefix = self.root_schema.options['prefix']
for col in root.c:
select_clause.append(col.label("{}.{}".format(root_col_prefix, col.name)))
for h_table, h_schema in zip(self.has_tables, self.has_schemas):
col_prefix = h_schema.options['prefix']
h_join_conditions = [root.c.id == h_table.c['{}_id'.format(root_col_prefix)]]
for join_condition in self.has_join_conditions.get(h_schema, []):
h_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(h_table, and_(*h_join_conditions))
for col in h_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
for b_table, b_schema in zip(self.belongs_to_tables, self.belongs_to_schemas):
col_prefix = b_schema.options['prefix']
b_join_conditions = [root.c['{}_id'.format(col_prefix)] == b_table.c.id]
for join_condition in self.belongs_to_join_conditions.get(b_schema, []):
b_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(b_table, and_(*b_join_conditions))
for col in b_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)
temp_table = schema_as_table(temp_schema, self.engine)
try:
temp_table.create(self.engine)
query = select(select_clause).select_from(join_clause)
if self.where_clauses is not None:
query = query.where(and_(*self.where_clauses))
insert = temp_table.insert().from_select(temp_schema.col_names(), query)
start = time.time()
print('executing join into temp table')
self.engine.execute(insert)
joined = time.time()
print('loading rows from temp table')
df = fast_sql_to_df(temp_table, temp_schema)
loaded = time.time()
finally:
temp_table.drop(self.engine)
print('type checking and sorting')
print('took', joined - start, 'seconds to perform the join')
print('took', loaded - joined, 'seconds to load the results')
return df
class SAQueryDataStore(DataStore):
def __init__(self, schema, engine, query):
self.engine = engine
self.query = query
self.schema = schema
def _load(self):
df = pandas.read_sql(self.query, self.engine)
return df
| if schema.options.get('temporary', False):
prefixes = ['TEMPORARY']
else:
prefixes = []
db_schema = schema.options.get('db_schema', None)
metadata = get_engine_metadata(engine)
return Table(schema.name, metadata, *[col.metadata('sqlalchemy') for col in schema.cols], schema=db_schema, prefixes=prefixes) |
qly-dia-diem.module_20190830000119.ts | import { NgModule } from '@angular/core';
import { ThemeModule } from '../../@theme/theme.module';
import { Ng2SmartTableModule } from 'ng2-smart-table';
import { SharedModule } from '../../shared/share.module';
import { RouterModule } from '@angular/router';
import { ButtonViewModule } from '../../shared/modules/button-view/button-view.module';
import { DialogModule } from '../../shared/modules/dialog/dialog.module';
import { QlyDiaDiemComponent, ButtonViewComponent } from './qly-dia-diem.component';
import { AddDiaDiemComponent } from './add-dia-diem/add-dia-diem.component';
@NgModule({
imports: [ThemeModule, SharedModule, Ng2SmartTableModule, RouterModule, ButtonViewModule, DialogModule], | export class QlyDiaDiemModule {} | exports: [QlyDiaDiemComponent, ButtonViewComponent],
declarations: [QlyDiaDiemComponent, ButtonViewComponent, AddDiaDiemComponent],
entryComponents: [QlyDiaDiemComponent, ButtonViewComponent, AddDiaDiemComponent],
}) |
index.ts | import { HTTP_INTERCEPTORS } from '@angular/common/http';
import { AuthInterceptor } from './auth-interceptor.service';
/** Http interceptor providers in outside-in order */
export const httpInterceptorProviders = [
{ provide: HTTP_INTERCEPTORS, useClass: AuthInterceptor, multi: true },
]; | /* "Barrel" of Http Interceptors */ |
|
usbc_client_ctrl.rs | //! A generic USB client layer managing control requests
//!
//! This layer responds to control requests and handles the state machine for
//! implementing them.
//!
//! Right now, the stack looks like this:
//!
//! ```
//! Client
//! | ^
//! |----- |
//! v |
//! ClientCtrl |
//! | |
//! v |
//! UsbController
//! ```
use super::descriptors::Buffer64;
use super::descriptors::Descriptor;
use super::descriptors::DescriptorBuffer;
use super::descriptors::DescriptorType;
use super::descriptors::DeviceBuffer;
use super::descriptors::HIDDescriptor;
use super::descriptors::LanguagesDescriptor;
use super::descriptors::Recipient;
use super::descriptors::ReportDescriptor;
use super::descriptors::SetupData;
use super::descriptors::StandardRequest;
use super::descriptors::StringDescriptor;
use super::descriptors::TransferDirection;
use core::cell::Cell;
use core::cmp::min;
use kernel::hil;
use kernel::hil::usb::TransferType;
const DESCRIPTOR_BUFLEN: usize = 128;
const N_ENDPOINTS: usize = 3;
/// Handler for USB control endpoint requests.
pub struct ClientCtrl<'a, 'b, U: 'a> {
/// The USB hardware controller.
controller: &'a U,
/// State of each endpoint.
state: [Cell<State>; N_ENDPOINTS],
/// A 64-byte buffer for the control endpoint to be passed to the USB
/// driver.
pub ctrl_buffer: Buffer64,
/// Storage for composing responses to device descriptor requests.
descriptor_storage: [Cell<u8>; DESCRIPTOR_BUFLEN],
/// Buffer containing the byte-packed representation of the device
/// descriptor. This is expected to be created and passed from the user of
/// `ClientCtrl`.
device_descriptor_buffer: DeviceBuffer,
/// Buffer containing the byte-serialized representation of the configuration
/// descriptor and all other descriptors for this device.
other_descriptor_buffer: DescriptorBuffer,
/// An optional HID descriptor for the configuration. This can be requested
/// separately. It must also be included in `other_descriptor_buffer` if it exists.
hid_descriptor: Option<&'b HIDDescriptor<'b>>,
/// An optional report descriptor for the configuration. This can be
/// requested separately. It must also be included in
/// `other_descriptor_buffer` if it exists.
report_descriptor: Option<&'b ReportDescriptor<'b>>,
/// Supported language (only one for now).
language: &'b [u16; 1],
/// USB strings to provide human readable descriptions of certain descriptor attributes.
strings: &'b [&'b str],
}
/// States for the individual endpoints.
#[derive(Copy, Clone)]
enum State {
Init,
/// We are doing a Control In transfer of some data in
/// self.descriptor_storage, with the given extent remaining to send.
CtrlIn(usize, usize),
/// We will accept data from the host.
CtrlOut,
SetAddress,
}
impl Default for State {
fn default() -> Self {
State::Init
}
}
impl<'a, 'b, U: hil::usb::UsbController<'a>> ClientCtrl<'a, 'b, U> {
pub fn new(
controller: &'a U,
device_descriptor_buffer: DeviceBuffer,
other_descriptor_buffer: DescriptorBuffer,
hid_descriptor: Option<&'b HIDDescriptor<'b>>,
report_descriptor: Option<&'b ReportDescriptor<'b>>,
language: &'b [u16; 1],
strings: &'b [&'b str],
) -> Self {
ClientCtrl {
controller: controller,
state: Default::default(),
// For the moment, the Default trait is not implemented for arrays
// of length > 32, and the Cell type is not Copy, so we have to
// initialize each element manually.
#[rustfmt::skip]
descriptor_storage: [
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
Cell::default(), Cell::default(), Cell::default(), Cell::default(),
],
ctrl_buffer: Buffer64::default(),
device_descriptor_buffer,
other_descriptor_buffer,
hid_descriptor,
report_descriptor,
language,
strings,
}
}
#[inline]
pub fn controller(&self) -> &'a U {
self.controller
}
#[inline]
fn descriptor_buf(&'a self) -> &'a [Cell<u8>] {
&self.descriptor_storage
}
pub fn enable(&'a self) {
// Set up the default control endpoint
self.controller
.endpoint_set_ctrl_buffer(&self.ctrl_buffer.buf);
self.controller
.enable_as_device(hil::usb::DeviceSpeed::Full); // must be Full for Bulk transfers
self.controller
.endpoint_out_enable(TransferType::Control, 0);
}
pub fn attach(&'a self) {
self.controller.attach();
}
/// Handle a Control Setup transaction
pub fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
if endpoint != 0 {
// For now we only support the default Control endpoint
return hil::usb::CtrlSetupResult::ErrInvalidDeviceIndex;
}
SetupData::get(&self.ctrl_buffer.buf).map_or(
hil::usb::CtrlSetupResult::ErrNoParse,
|setup_data| {
let transfer_direction = setup_data.request_type.transfer_direction();
let recipient = setup_data.request_type.recipient();
setup_data.get_standard_request().map_or_else(
|| {
// XX: CtrlSetupResult::ErrNonstandardRequest
// For now, promiscuously accept vendor data and even supply
// a few debugging bytes when host does a read
match transfer_direction {
TransferDirection::HostToDevice => {
self.state[endpoint].set(State::CtrlOut);
hil::usb::CtrlSetupResult::Ok
}
TransferDirection::DeviceToHost => {
// Arrange to send some crap back
let buf = self.descriptor_buf();
buf[0].set(0xa);
buf[1].set(0xb);
buf[2].set(0xc);
self.state[endpoint].set(State::CtrlIn(0, 3));
hil::usb::CtrlSetupResult::Ok
}
}
},
|request| match recipient {
Recipient::Device => self.handle_standard_device_request(endpoint, request),
Recipient::Interface => {
self.handle_standard_interface_request(endpoint, request)
}
_ => hil::usb::CtrlSetupResult::ErrGeneric,
},
)
},
)
}
fn handle_standard_device_request(
&'a self,
endpoint: usize,
request: StandardRequest,
) -> hil::usb::CtrlSetupResult {
match request {
StandardRequest::GetDescriptor {
descriptor_type,
descriptor_index,
lang_id,
requested_length,
} => {
match descriptor_type {
DescriptorType::Device => match descriptor_index {
0 => {
let buf = self.descriptor_buf();
let len = self.device_descriptor_buffer.write_to(buf);
let end = min(len, requested_length as usize);
self.state[endpoint].set(State::CtrlIn(0, end));
hil::usb::CtrlSetupResult::Ok
}
_ => hil::usb::CtrlSetupResult::ErrInvalidDeviceIndex,
},
DescriptorType::Configuration => match descriptor_index {
0 => {
let buf = self.descriptor_buf();
let len = self.other_descriptor_buffer.write_to(buf);
let end = min(len, requested_length as usize);
self.state[endpoint].set(State::CtrlIn(0, end));
hil::usb::CtrlSetupResult::Ok
}
_ => hil::usb::CtrlSetupResult::ErrInvalidConfigurationIndex,
},
DescriptorType::String => {
if let Some(len) = match descriptor_index {
0 => {
let buf = self.descriptor_buf();
let d = LanguagesDescriptor {
langs: self.language,
};
let len = d.write_to(buf);
Some(len)
}
i if i > 0
&& (i as usize) <= self.strings.len()
&& lang_id == self.language[0] =>
{
let buf = self.descriptor_buf();
let d = StringDescriptor {
string: self.strings[i as usize - 1],
};
let len = d.write_to(buf);
Some(len)
}
_ => None,
} {
let end = min(len, requested_length as usize);
self.state[endpoint].set(State::CtrlIn(0, end));
hil::usb::CtrlSetupResult::Ok
} else {
hil::usb::CtrlSetupResult::ErrInvalidStringIndex
}
}
DescriptorType::DeviceQualifier => {
// We are full-speed only, so we must
// respond with a request error
hil::usb::CtrlSetupResult::ErrNoDeviceQualifier
}
_ => hil::usb::CtrlSetupResult::ErrUnrecognizedDescriptorType,
} // match descriptor_type
}
StandardRequest::SetAddress { device_address } => {
// Load the address we've been assigned ...
self.controller.set_address(device_address);
// ... and when this request gets to the Status stage we will actually enable the
// address.
self.state[endpoint].set(State::SetAddress);
hil::usb::CtrlSetupResult::OkSetAddress
}
StandardRequest::SetConfiguration { .. } => {
// We have been assigned a particular configuration: fine!
hil::usb::CtrlSetupResult::Ok
}
_ => hil::usb::CtrlSetupResult::ErrUnrecognizedRequestType,
}
}
fn handle_standard_interface_request(
&'a self,
endpoint: usize,
request: StandardRequest,
) -> hil::usb::CtrlSetupResult |
/// Handle a Control In transaction
pub fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
match self.state[endpoint].get() {
State::CtrlIn(start, end) => {
let len = end.saturating_sub(start);
if len > 0 {
let packet_bytes = min(self.ctrl_buffer.buf.len(), len);
let packet = &self.descriptor_storage[start..start + packet_bytes];
let buf = &self.ctrl_buffer.buf;
// Copy a packet into the endpoint buffer
for (i, b) in packet.iter().enumerate() {
buf[i].set(b.get());
}
let start = start + packet_bytes;
let len = end.saturating_sub(start);
let transfer_complete = len == 0;
self.state[endpoint].set(State::CtrlIn(start, end));
hil::usb::CtrlInResult::Packet(packet_bytes, transfer_complete)
} else {
hil::usb::CtrlInResult::Packet(0, true)
}
}
_ => hil::usb::CtrlInResult::Error,
}
}
/// Handle a Control Out transaction
pub fn ctrl_out(&'a self, endpoint: usize, _packet_bytes: u32) -> hil::usb::CtrlOutResult {
match self.state[endpoint].get() {
State::CtrlOut => {
// Gamely accept the data
hil::usb::CtrlOutResult::Ok
}
_ => {
// Bad state
hil::usb::CtrlOutResult::Halted
}
}
}
pub fn ctrl_status(&'a self, _endpoint: usize) {
// Entered Status stage
}
/// Handle the completion of a Control transfer
pub fn ctrl_status_complete(&'a self, endpoint: usize) {
// Control Read: IN request acknowledged
// Control Write: status sent
match self.state[endpoint].get() {
State::SetAddress => {
self.controller.enable_address();
}
_ => {}
};
self.state[endpoint].set(State::Init);
}
}
| {
match request {
StandardRequest::GetDescriptor {
descriptor_type,
// TODO: use the descriptor index
descriptor_index: _,
// TODO: use the language ID?
lang_id: _,
requested_length,
} => match descriptor_type {
DescriptorType::HID => {
if let Some(desc) = self.hid_descriptor {
let buf = self.descriptor_buf();
let len = desc.write_to(buf);
let end = min(len, requested_length as usize);
self.state[endpoint].set(State::CtrlIn(0, end));
hil::usb::CtrlSetupResult::Ok
} else {
hil::usb::CtrlSetupResult::ErrGeneric
}
}
DescriptorType::Report => {
if let Some(desc) = self.report_descriptor {
let buf = self.descriptor_buf();
let len = desc.write_to(buf);
let end = min(len, requested_length as usize);
self.state[endpoint].set(State::CtrlIn(0, end));
hil::usb::CtrlSetupResult::Ok
} else {
hil::usb::CtrlSetupResult::ErrGeneric
}
}
_ => hil::usb::CtrlSetupResult::ErrGeneric,
},
_ => hil::usb::CtrlSetupResult::ErrGeneric,
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.