file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
test_newsrec_utils.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import pytest
try:
from recommenders.models.deeprec.deeprec_utils import download_deeprec_resources
from recommenders.models.newsrec.newsrec_utils import prepare_hparams, load_yaml
except ImportError:
pass # skip this import if we are in cpu environment
@pytest.mark.parametrize(
"must_exist_attributes", ["wordEmb_file", "wordDict_file", "userDict_file"]
)
@pytest.mark.gpu
def test_prepare_hparams(must_exist_attributes, deeprec_resource_path):
wordEmb_file = os.path.join(deeprec_resource_path, "mind", "utils", "embedding.npy")
userDict_file = os.path.join(
deeprec_resource_path, "mind", "utils", "uid2index.pkl"
)
wordDict_file = os.path.join(
deeprec_resource_path, "mind", "utils", "word_dict.pkl"
)
yaml_file = os.path.join(deeprec_resource_path, "mind", "utils", r"nrms.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
r"https://recodatasets.z20.web.core.windows.net/newsrec/",
os.path.join(deeprec_resource_path, "mind", "utils"), | yaml_file,
wordEmb_file=wordEmb_file,
wordDict_file=wordDict_file,
userDict_file=userDict_file,
epochs=1,
)
assert hasattr(hparams, must_exist_attributes)
@pytest.mark.gpu
def test_load_yaml_file(deeprec_resource_path):
yaml_file = os.path.join(deeprec_resource_path, "mind", "utils", r"nrms.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
"https://recodatasets.z20.web.core.windows.net/newsrec/",
os.path.join(deeprec_resource_path, "mind", "utils"),
"MINDdemo_utils.zip",
)
config = load_yaml(yaml_file)
assert config is not None | "MINDdemo_utils.zip",
)
hparams = prepare_hparams( |
setup-screen.tsx | import React from "react"
import { TextStyle, View, ViewStyle } from "react-native"
import { useNavigation } from "@react-navigation/native"
import { observer } from "mobx-react-lite"
import { Button, Header, Text, Screen, Wallpaper } from "../../components"
import { color, spacing } from "../../theme"
import { Api } from "../../services/api"
import { save } from "../../utils/storage"
import InputSpinner from "react-native-input-spinner"
const FULL: ViewStyle = { flex: 1 }
const CONTAINER: ViewStyle = {
alignItems: "center",
backgroundColor: color.transparent,
paddingHorizontal: spacing[4],
}
const LITERSINPUT: ViewStyle = {
marginVertical: spacing[4],
}
const DEMO: ViewStyle = {
paddingVertical: spacing[4],
paddingHorizontal: spacing[4],
backgroundColor: "#5D2555",
}
const SPINNERNUMBERCONTAINER: ViewStyle = {
height: "20%",
}
const BOLD: TextStyle = { fontWeight: "bold" }
const DEMO_TEXT: TextStyle = {
...BOLD,
fontSize: 13,
letterSpacing: 2,
}
const LABELS: TextStyle = {
...BOLD,
fontSize: 17,
lineHeight: 15,
textAlign: "center",
letterSpacing: 1.5,
color: "#FFFFFF",
}
const TITLE: TextStyle = {
...BOLD,
fontSize: 28,
lineHeight: 38,
textAlign: "center",
marginBottom: spacing[5],
}
const HEADER_TITLE: TextStyle = {
...BOLD,
fontSize: 12,
textAlign: "center",
letterSpacing: 1.5,
}
export const SetupScreen = observer(function Setup() {
const navigation = useNavigation()
const goBack = () => {
console.log("Go back --->>")
// navigation.goBack()
}
const demoReactotron = React.useMemo( | console.tron.display({
name: "DISPLAY",
value: {
numbers: 1,
strings: "strings",
booleans: true,
arrays: [1, 2, 3],
objects: {
deeper: {
deeper: {
yay: "👾",
},
},
},
functionNames: function hello() {
/* dummy function */
},
},
preview: "More control with display()",
important: true,
image: {
uri:
"https://avatars2.githubusercontent.com/u/3902527?s=200&u=a0d16b13ed719f35d95ca0f4440f5d07c32c349a&v=4",
},
})
// make an API call for the demo
// Don't do API like this, use store's API
const demo = new Api()
demo.setup()
demo.getUser("1")
// Let's do some async storage stuff
await save("Cool Name", "Boaty McBoatface")
},
[],
)
return (
<View style={FULL}>
<Wallpaper />
<Header leftIcon="back" onLeftPress={goBack} titleStyle={HEADER_TITLE} />
<Screen style={CONTAINER} preset="fixed" backgroundColor={color.transparent}>
<Text style={TITLE} preset="header" tx="setupScreen.title" />
<Text style={LABELS} tx="setupScreen.gasCapacity" />
<View style={SPINNERNUMBERCONTAINER}>
<InputSpinner
style={LITERSINPUT}
inputStyle={LABELS}
min={1}
colorLeft={"#f04048"}
colorRight={"#40c5f4"}
rounded={false}
onChange={(num) => {
console.log(num)
}}
onDecrease={() => {
console.log("Drecrement -->")
}}
/>
</View>
<Text style={LABELS} tx="setupScreen.gaugeCapacity" />
<View style={SPINNERNUMBERCONTAINER}>
<InputSpinner
style={LITERSINPUT}
inputStyle={LABELS}
min={1}
colorLeft={"#f04048"}
colorRight={"#40c5f4"}
rounded={false}
onChange={(num) => {
console.log(num)
}}
onDecrease={() => {
console.log("Drecrement -->")
}}
/>
</View>
<Button
style={DEMO}
textStyle={DEMO_TEXT}
tx="setupScreen.accept"
onPress={demoReactotron}
/>
</Screen>
</View>
)
}) | () => async () => {
console.tron.log("Your Friendly tron log message")
console.tron.logImportant("I am important") |
middleware.py | import logging
from typing import Set
import falcon
from common.consts import HTTP_WRITE_METHODS
from common.falcon_utils import auth_token
from common.util import is_public
from ui import BackendController
class ContentTypeValidator:
def process_resource(self, req: falcon.Request, _resp: falcon.Response, resource, _params):
if req.method in HTTP_WRITE_METHODS:
content_type = getattr(resource, 'content_type', 'application/x-www-form-urlencoded')
if content_type and content_type not in req.content_type:
raise falcon.HTTPUnsupportedMediaType(description="This API only supports requests encoded as '" + content_type + "'")
class LoginValidator:
| def __init__(self, backend: BackendController, login_path: str, public_paths: Set[str] = None):
self.login_path = login_path
self.public_paths = public_paths if public_paths else set()
self.public_paths.add(login_path)
self._backend = backend
def process_resource(self, req: falcon.Request, resp: falcon.Response, _resource, _params):
if is_public(req.path, self.public_paths):
logging.debug("This is a public resource which does not need a valid token")
return
token = auth_token(req)
if not token:
raise falcon.HTTPSeeOther(self.login_path)
resp.auth_user = self._backend.user_info(auth_token=token) |
|
bootstrap.go | package core
import (
"context"
"errors"
"fmt"
"io"
"math/rand"
"sync"
"time"
math2 "github.com/ipfs/go-ipfs/thirdparty/math2"
lgbl "gx/ipfs/QmZ4zF1mBrt8C2mSCM4ZYE4aAnv78f7GvrzufJC4G5tecK/go-libp2p-loggables"
peer "gx/ipfs/QmQsErDt8Qgw1XrsXf2BpEzDgGWtB1YLsTAARBup5b6B9W/go-libp2p-peer"
goprocess "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context"
periodicproc "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/periodic"
config "gx/ipfs/QmYVqYJTVjetcf1guieEgWpK1PZtHPytP624vKzTF1P3r2/go-ipfs-config"
inet "gx/ipfs/QmZNJyx9GGCX4GeuHnLB8fxaxMLs4MjTjHokxfQcCd6Nve/go-libp2p-net"
pstore "gx/ipfs/Qmda4cPRvSRyox3SqgJN6DfSZGU5TtHufPTp9uXjFj71X6/go-libp2p-peerstore"
host "gx/ipfs/QmeMYW7Nj8jnnEfs9qhm7SxKkoDPUWXu3MsxX6BFwz34tf/go-libp2p-host"
)
// ErrNotEnoughBootstrapPeers signals that we do not have enough bootstrap
// peers to bootstrap correctly.
var ErrNotEnoughBootstrapPeers = errors.New("not enough bootstrap peers to bootstrap")
// BootstrapConfig specifies parameters used in an IpfsNode's network
// bootstrapping process.
type BootstrapConfig struct {
// MinPeerThreshold governs whether to bootstrap more connections. If the
// node has less open connections than this number, it will open connections
// to the bootstrap nodes. From there, the routing system should be able
// to use the connections to the bootstrap nodes to connect to even more
// peers. Routing systems like the IpfsDHT do so in their own Bootstrap
// process, which issues random queries to find more peers.
MinPeerThreshold int
// Period governs the periodic interval at which the node will
// attempt to bootstrap. The bootstrap process is not very expensive, so
// this threshold can afford to be small (<=30s).
Period time.Duration
// ConnectionTimeout determines how long to wait for a bootstrap
// connection attempt before cancelling it.
ConnectionTimeout time.Duration
// BootstrapPeers is a function that returns a set of bootstrap peers
// for the bootstrap process to use. This makes it possible for clients
// to control the peers the process uses at any moment.
BootstrapPeers func() []pstore.PeerInfo
}
// DefaultBootstrapConfig specifies default sane parameters for bootstrapping.
var DefaultBootstrapConfig = BootstrapConfig{
MinPeerThreshold: 4,
Period: 30 * time.Second,
ConnectionTimeout: (30 * time.Second) / 3, // Perod / 3
}
func BootstrapConfigWithPeers(pis []pstore.PeerInfo) BootstrapConfig {
cfg := DefaultBootstrapConfig
cfg.BootstrapPeers = func() []pstore.PeerInfo {
return pis
}
return cfg
}
// Bootstrap kicks off IpfsNode bootstrapping. This function will periodically
// check the number of open connections and -- if there are too few -- initiate
// connections to well-known bootstrap peers. It also kicks off subsystem
// bootstrapping (i.e. routing).
func Bootstrap(n *IpfsNode, cfg BootstrapConfig) (io.Closer, error) {
// make a signal to wait for one bootstrap round to complete.
doneWithRound := make(chan struct{})
// the periodic bootstrap function -- the connection supervisor
periodic := func(worker goprocess.Process) {
ctx := procctx.OnClosingContext(worker)
defer log.EventBegin(ctx, "periodicBootstrap", n.Identity).Done()
if err := bootstrapRound(ctx, n.PeerHost, cfg); err != nil {
log.Event(ctx, "bootstrapError", n.Identity, lgbl.Error(err))
log.Debugf("%s bootstrap error: %s", n.Identity, err)
}
<-doneWithRound
}
// kick off the node's periodic bootstrapping
proc := periodicproc.Tick(cfg.Period, periodic)
proc.Go(periodic) // run one right now.
// kick off Routing.Bootstrap
if n.Routing != nil {
ctx := procctx.OnClosingContext(proc)
if err := n.Routing.Bootstrap(ctx); err != nil {
proc.Close()
return nil, err
}
}
doneWithRound <- struct{}{}
close(doneWithRound) // it no longer blocks periodic
return proc, nil
}
func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error {
ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout)
defer cancel()
id := host.ID()
// get bootstrap peers from config. retrieving them here makes
// sure we remain observant of changes to client configuration.
peers := cfg.BootstrapPeers()
if len(peers) == 0 {
log.Error("no bootstrap nodes configured: go-ipfs may have difficulty connecting to the network")
}
// determine how many bootstrap connections to open
connected := host.Network().Peers()
if len(connected) >= cfg.MinPeerThreshold {
log.Event(ctx, "bootstrapSkip", id)
log.Debugf("%s core bootstrap skipped -- connected to %d (> %d) nodes",
id, len(connected), cfg.MinPeerThreshold)
return nil
}
numToDial := cfg.MinPeerThreshold - len(connected)
// filter out bootstrap nodes we are already connected to
var notConnected []pstore.PeerInfo
for _, p := range peers {
if host.Network().Connectedness(p.ID) != inet.Connected {
notConnected = append(notConnected, p)
}
}
// if connected to all bootstrap peer candidates, exit
if len(notConnected) < 1 {
log.Debugf("%s no more bootstrap peers to create %d connections", id, numToDial)
return ErrNotEnoughBootstrapPeers
}
// connect to a random susbset of bootstrap candidates
randSubset := randomSubsetOfPeers(notConnected, numToDial)
defer log.EventBegin(ctx, "bootstrapStart", id).Done()
log.Debugf("%s bootstrapping to %d nodes: %s", id, numToDial, randSubset)
return bootstrapConnect(ctx, host, randSubset)
}
func bootstrapConnect(ctx context.Context, ph host.Host, peers []pstore.PeerInfo) error {
if len(peers) < 1 {
return ErrNotEnoughBootstrapPeers
}
errs := make(chan error, len(peers))
var wg sync.WaitGroup
for _, p := range peers {
// performed asynchronously because when performed synchronously, if
// one `Connect` call hangs, subsequent calls are more likely to
// fail/abort due to an expiring context.
// Also, performed asynchronously for dial speed.
wg.Add(1)
go func(p pstore.PeerInfo) {
defer wg.Done()
defer log.EventBegin(ctx, "bootstrapDial", ph.ID(), p.ID).Done()
log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID)
ph.Peerstore().AddAddrs(p.ID, p.Addrs, pstore.PermanentAddrTTL)
if err := ph.Connect(ctx, p); err != nil {
log.Event(ctx, "bootstrapDialFailed", p.ID)
log.Debugf("failed to bootstrap with %v: %s", p.ID, err)
errs <- err
return
}
log.Event(ctx, "bootstrapDialSuccess", p.ID)
log.Infof("bootstrapped with %v", p.ID)
}(p)
}
wg.Wait()
// our failure condition is when no connection attempt succeeded.
// So drain the errs channel, counting the results.
close(errs)
count := 0
var err error
for err = range errs {
if err != nil {
count++
}
}
if count == len(peers) {
return fmt.Errorf("failed to bootstrap. %s", err)
}
return nil
}
func toPeerInfos(bpeers []config.BootstrapPeer) []pstore.PeerInfo {
pinfos := make(map[peer.ID]*pstore.PeerInfo)
for _, bootstrap := range bpeers {
pinfo, ok := pinfos[bootstrap.ID()]
if !ok {
pinfo = new(pstore.PeerInfo)
pinfos[bootstrap.ID()] = pinfo
pinfo.ID = bootstrap.ID()
}
pinfo.Addrs = append(pinfo.Addrs, bootstrap.Transport())
}
var peers []pstore.PeerInfo
for _, pinfo := range pinfos {
peers = append(peers, *pinfo)
}
return peers
}
func randomSubsetOfPeers(in []pstore.PeerInfo, max int) []pstore.PeerInfo | {
n := math2.IntMin(max, len(in))
var out []pstore.PeerInfo
for _, val := range rand.Perm(len(in)) {
out = append(out, in[val])
if len(out) >= n {
break
}
}
return out
} |
|
redundant_field_names.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::meets_msrv;
use rustc_ast::ast::{Expr, ExprKind};
use rustc_errors::Applicability;
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_middle::lint::in_external_macro;
use rustc_semver::RustcVersion;
use rustc_session::{declare_tool_lint, impl_lint_pass};
const REDUNDANT_FIELD_NAMES_MSRV: RustcVersion = RustcVersion::new(1, 17, 0);
declare_clippy_lint! {
/// **What it does:** Checks for fields in struct literals where shorthands
/// could be used.
///
/// **Why is this bad?** If the field and variable names are the same,
/// the field name is redundant.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// let bar: u8 = 123;
///
/// struct Foo {
/// bar: u8,
/// }
///
/// let foo = Foo { bar: bar };
/// ```
/// the last line can be simplified to
/// ```ignore
/// let foo = Foo { bar };
/// ```
pub REDUNDANT_FIELD_NAMES,
style,
"checks for fields in struct literals where shorthands could be used"
}
pub struct RedundantFieldNames {
msrv: Option<RustcVersion>,
}
impl RedundantFieldNames {
#[must_use]
pub fn new(msrv: Option<RustcVersion>) -> Self {
Self { msrv }
}
}
impl_lint_pass!(RedundantFieldNames => [REDUNDANT_FIELD_NAMES]);
impl EarlyLintPass for RedundantFieldNames {
fn | (&mut self, cx: &EarlyContext<'_>, expr: &Expr) {
if !meets_msrv(self.msrv.as_ref(), &REDUNDANT_FIELD_NAMES_MSRV) {
return;
}
if in_external_macro(cx.sess, expr.span) {
return;
}
if let ExprKind::Struct(_, ref fields, _) = expr.kind {
for field in fields {
if field.is_shorthand {
continue;
}
if let ExprKind::Path(None, path) = &field.expr.kind {
if path.segments.len() == 1
&& path.segments[0].ident == field.ident
&& path.segments[0].args.is_none()
{
span_lint_and_sugg(
cx,
REDUNDANT_FIELD_NAMES,
field.span,
"redundant field names in struct initialization",
"replace it with",
field.ident.to_string(),
Applicability::MachineApplicable,
);
}
}
}
}
}
extract_msrv_attr!(EarlyContext);
}
| check_expr |
phone-number.pipe.ts | import { Pipe, PipeTransform } from '@angular/core';
@Pipe({
name: 'phoneNumber'
})
export class | implements PipeTransform {
transform(val: string, args?: any): any {
if (!val)
return val;
val = val.replace('(', '');
val = val.replace(')', '');
val = val.replace('-', '');
val = val.replace(' ', '');
let numdigits = val.length;
if (numdigits >= 6) {
let firstpart = val.substr(0, 6);
let secondpart = val.substr(6);
val = firstpart + '-' + secondpart;
} if (numdigits >= 3) {
let firstpart = val.substr(0, 3);
let secondpart = val.substr(3);
val = firstpart + ') ' + secondpart;
} if (numdigits > 0) {
val = '(' + val;
}
return val;
}
}
| PhoneNumberPipe |
DocumentEditorDemo.tsx | /** @jsx jsx */
import { getInitialPropsValue } from '@keystone-next/fields-document/src/DocumentEditor/component-blocks/initial-values';
import { FormValueContent } from '@keystone-next/fields-document/src/DocumentEditor/component-blocks/form';
import { useKeyDownRef } from '@keystone-next/fields-document/src/DocumentEditor/soft-breaks';
import React, { ReactNode, useContext, useEffect, useMemo, useState } from 'react';
import { Toolbar } from '@keystone-next/fields-document/src/DocumentEditor/Toolbar';
import { DocumentFeatures } from '@keystone-next/fields-document/views';
import {
createDocumentEditor,
DocumentEditorEditable,
DocumentEditorProvider,
Editor,
} from '@keystone-next/fields-document/src/DocumentEditor';
import {
ComponentBlock,
fields,
InferRenderersForComponentBlocks,
} from '@keystone-next/fields-document/component-blocks';
import { Global, jsx } from '@emotion/react';
import { componentBlocks as componentBlocksInExampleProject } from '../../../examples-staging/basic/admin/fieldViews/Content';
import { initialContent } from '../../lib/initialDocumentDemoContent';
import { Code } from '../primitives/Code';
const headingLevels = ['1', '2', '3', '4', '5', '6'] as const;
const marks = [
'bold',
'code',
'italic',
'keyboard',
'strikethrough',
'subscript',
'superscript',
'underline',
] as const;
const documentFeaturesProp = fields.object({
inlineMarks: fields.multiselect({
options: marks.map(value => ({ label: value[0].toUpperCase() + value.slice(1), value })),
defaultValue: marks,
label: 'Inline Marks',
}),
blocks: fields.multiselect({
label: 'Block Types',
options: [
{ label: 'Blockquote', value: 'blockquote' },
{ label: 'Code Block', value: 'code' },
{ label: 'Ordered List', value: 'unordered' },
{ label: 'Unordered List', value: 'ordered' },
...headingLevels.map(value => ({ value, label: `H${value}` })),
] as const,
defaultValue: ['blockquote', 'code', 'ordered', 'unordered', ...headingLevels],
}),
alignment: fields.multiselect({
options: [
{ label: 'Center', value: 'center' },
{ label: 'End', value: 'end' },
] as const,
defaultValue: ['center', 'end'],
label: 'Alignment',
}),
links: fields.checkbox({
label: 'Links',
defaultValue: true,
}),
dividers: fields.checkbox({
label: 'Dividers',
defaultValue: true,
}),
softBreaks: fields.checkbox({ label: 'Soft Breaks', defaultValue: true }),
layouts: fields.checkbox({ label: 'Layouts', defaultValue: true }),
useShorthand: fields.checkbox({ label: 'Use shorthand in code example', defaultValue: true }),
});
type DocumentFeaturesFormValue = Parameters<
InferRenderersForComponentBlocks<
Record<'documentFeatures', ComponentBlock<typeof documentFeaturesProp['value']>>
>['documentFeatures']
>[0];
const emptyObj = {};
const componentBlocks = {
notice: componentBlocksInExampleProject.notice,
hero: componentBlocksInExampleProject.hero,
quote: componentBlocksInExampleProject.quote,
};
type DocumentFieldConfig = Parameters<typeof import('@keystone-next/fields-document').document>[0];
function documentFeaturesCodeExample(config: DocumentFieldConfig | DocumentFeatures) {
return `import { config, createSchema, list } from '@keystone-next/keystone/schema';
import { document } from '@keystone-next/fields-document';
export default config({
lists: createSchema({
ListName: list({
fields: {
fieldName: document({
${JSON.stringify(
config,
(_, val) =>
// false is an invalid value for all the inputs
val === false
? undefined
: // every value in an array on a new line looks real bad, especially for layouts
Array.isArray(val)
? Array.isArray(val[0])
? // this case is for layouts
val.map(x => `[${x.join(', ')}]`)
: // this case is for headingLevels | 2
)
.replace(/"/g, '')
.replace(/^{/, '')
.replace(/{$/, '')
.trim()
.split('\n')
.map(x => ' '.repeat(10) + x)
.join('\n')}
/* ... */
}),
/* ... */
},
}),
/* ... */
}),
/* ... */
});
`;
}
function documentFeaturesToShorthand(documentFeatures: DocumentFeatures): DocumentFieldConfig {
return {
formatting: objToShorthand({
alignment: objToShorthand({
center: boolToTrueOrUndefined(documentFeatures.formatting.alignment.center),
end: boolToTrueOrUndefined(documentFeatures.formatting.alignment.end),
}),
inlineMarks: objToShorthand(
fromEntriesButTypedWell(
marks.map(x => [x, boolToTrueOrUndefined(documentFeatures.formatting.inlineMarks[x])])
)
),
headingLevels:
documentFeatures.formatting.headingLevels.length === 6
? true
: documentFeatures.formatting.headingLevels.length === 0
? undefined
: documentFeatures.formatting.headingLevels,
blockTypes: objToShorthand({
code: boolToTrueOrUndefined(documentFeatures.formatting.blockTypes.code),
blockquote: boolToTrueOrUndefined(documentFeatures.formatting.blockTypes.blockquote),
}),
listTypes: objToShorthand({
ordered: boolToTrueOrUndefined(documentFeatures.formatting.listTypes.ordered),
unordered: boolToTrueOrUndefined(documentFeatures.formatting.listTypes.unordered),
}),
softBreaks: boolToTrueOrUndefined(documentFeatures.formatting.softBreaks),
}),
links: boolToTrueOrUndefined(documentFeatures.links),
layouts: documentFeatures.layouts.length === 0 ? undefined : documentFeatures.layouts,
dividers: boolToTrueOrUndefined(documentFeatures.dividers),
};
}
function objToShorthand<
Obj extends Record<string, undefined | true | readonly any[] | Record<string, any>>
>(obj: Obj): Obj | true | undefined {
const values = Object.values(obj);
let state: typeof values[number] = values[0]!;
for (const val of values) {
if (val !== state || (val !== undefined && val !== true)) {
return obj;
}
}
return state as any;
}
function boolToTrueOrUndefined(bool: boolean): true | undefined {
return bool ? true : undefined;
}
const fromEntriesButTypedWell: <Key extends string | number | symbol, Val>(
iterable: Iterable<readonly [Key, Val]>
) => Record<Key, Val> = Object.fromEntries;
function documentFeaturesFormToValue(formValue: DocumentFeaturesFormValue): DocumentFeatures {
return {
formatting: {
alignment: {
center: formValue.alignment.includes('center'),
end: formValue.alignment.includes('end'),
},
blockTypes: {
blockquote: formValue.blocks.includes('blockquote'),
code: formValue.blocks.includes('code'),
},
inlineMarks: fromEntriesButTypedWell(
marks.map(mark => {
return [mark, formValue.inlineMarks.includes(mark)];
})
),
headingLevels: formValue.blocks
.map(x => parseInt(x))
.filter(num => !isNaN(num))
.sort() as any,
listTypes: {
ordered: formValue.blocks.includes('ordered'),
unordered: formValue.blocks.includes('unordered'),
},
softBreaks: formValue.softBreaks,
},
links: formValue.links,
layouts: formValue.layouts
? [
[1, 1],
[1, 1, 1],
[2, 1],
[1, 2],
[1, 2, 1],
]
: [],
dividers: formValue.dividers,
};
}
const DocumentFeaturesContext = React.createContext<{
documentFeatures: DocumentFeatures;
formValue: DocumentFeaturesFormValue;
setFormValue: (value: DocumentFeaturesFormValue) => void;
}>({} as any);
export function DocumentFeaturesProvider({ children }: { children: ReactNode }) {
const [formValue, setFormValue] = useState<DocumentFeaturesFormValue>(() =>
getInitialPropsValue(documentFeaturesProp, {})
);
return (
<DocumentFeaturesContext.Provider
value={useMemo(
() => ({
documentFeatures: documentFeaturesFormToValue(formValue),
formValue,
setFormValue,
}),
[formValue]
)}
>
{children}
</DocumentFeaturesContext.Provider>
);
}
export function DocumentFeaturesFormAndCode() {
const { documentFeatures, formValue, setFormValue } = useContext(DocumentFeaturesContext);
return (
<div>
<FormValueContent
prop={documentFeaturesProp}
forceValidation={false}
path={[]}
stringifiedPropPathToAutoFocus=""
value={formValue}
onChange={setFormValue}
/>
<pre>
<Code className="language-tsx">
{useMemo(
() =>
documentFeaturesCodeExample(
formValue.useShorthand
? documentFeaturesToShorthand(documentFeatures)
: documentFeatures
),
[documentFeatures, formValue]
)}
</Code>
</pre>
</div>
);
}
export const DocumentEditorDemo = () => {
const [value, setValue] = useState(initialContent as any);
const { documentFeatures } = useContext(DocumentFeaturesContext);
const isShiftPressedRef = useKeyDownRef('Shift');
const editor = useMemo(
() => createDocumentEditor(documentFeatures, componentBlocks, emptyObj, isShiftPressedRef),
[documentFeatures]
);
// this is why we're creating the editor ourselves and not using the DocumentEditor component
useEffect(() => {
// we want to force normalize when the document features change so
// that no invalid things exist after a user changes something
Editor.normalize(editor, { force: true });
}, [documentFeatures]);
return (
<div
css={{
// the editor mostly expects things not be the default styles
// and tailwind messes that up, so these values are from Chrome's default styles
'blockquote, p, pre': {
marginTop: '1em',
marginBottom: '1em',
},
'h1,h2,h3,h4,h5,h6': { fontWeight: 'bold', margin: 0 },
h1: { fontSize: 'var(--font-xxlarge)' },
h2: { fontSize: 'var(--font-large)' },
h3: { fontSize: 'var(--font-medium)' },
h5: { fontSize: 'var(--font-xsmall)' },
h6: { fontSize: 'var(--font-xxsmall)' },
'ul, ol': {
paddingLeft: 40,
},
}}
>
<Global
styles={{
body: {
textRendering: 'optimizeLegibility',
WebkitFontSmoothing: 'antialiased',
MozOsxFontSmoothing: 'grayscale',
},
}}
/>
<div
css={{
marginTop: 'var(--space-xlarge)',
marginBottom: 'var(--space-xlarge)',
borderBottom: `1px var(--border) solid`,
}}
>
<DocumentEditorProvider
value={value}
onChange={setValue}
editor={editor}
componentBlocks={componentBlocks}
documentFeatures={documentFeatures}
relationships={emptyObj}
>
{useMemo(
() => (
<Toolbar documentFeatures={documentFeatures} />
),
[documentFeatures]
)}
<DocumentEditorEditable />
</DocumentEditorProvider>
</div>
<details css={{ marginBottom: 'var(--space-xlarge)' }}>
<summary>View the Document Structure</summary>
<pre>{JSON.stringify(value, null, 2)}</pre>
</details>
</div>
);
}; | `[${val.join(', ')}]`
: val, |
memory.go | // Copyright 2021 Ilia Frenkel. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE.txt file.
package store
import (
"fmt"
"math/rand"
"sort"
"strings"
"sync"
)
// MemDB is a memory storage that implements the store.Interface.
// Because it's a transient storage you will loose all the data once the
// process exits. It's not completely useless though. You can use it when a
// temporary sharing is needed or as a cache for another storage.
type MemDB struct {
pastes map[int64]Paste
users map[string]User
sync.RWMutex
}
// NewMemDB initialises and returns an instance of MemDB.
func NewMemDB() *MemDB {
var s MemDB
s.pastes = make(map[int64]Paste)
s.users = make(map[string]User)
return &s
}
// Totals returns total count of pastes and users.
func (m *MemDB) Totals() (pastes, users int64) {
m.RLock()
defer m.RUnlock()
return int64(len(m.pastes)), int64(len(m.users))
}
// Create creates and stores a new paste returning its ID.
func (m *MemDB) Create(p Paste) (id int64, err error) {
m.Lock()
defer m.Unlock()
p.ID = rand.Int63() // #nosec
m.pastes[p.ID] = p
return p.ID, nil
}
// Delete deletes a paste by ID.
func (m *MemDB) Delete(id int64) error {
m.Lock()
defer m.Unlock()
delete(m.pastes, id)
return nil
}
// Find return a sorted list of pastes for a given request.
func (m *MemDB) Find(req FindRequest) (pastes []Paste, err error) {
pastes = []Paste{}
m.RLock()
// Find all the pastes for a user.
for _, p := range m.pastes {
if filterPaste(req, p) {
pastes = append(pastes, p)
}
}
m.RUnlock()
// Sort
sortPastes(req, pastes)
// Slice with skip and limit
return limitPastes(req, pastes), nil
}
func filterPaste(req FindRequest, paste Paste) bool {
if req.UserID == "" {
if req.Privacy != "" && paste.Privacy == req.Privacy {
return true
}
} else if paste.User.ID == req.UserID {
if req.Privacy == "" {
return true
} else if paste.Privacy == req.Privacy {
return true
}
}
return false
}
func sortPastes(req FindRequest, pastes []Paste) {
sort.Slice(pastes, func(i, j int) bool {
switch req.Sort {
case "+created", "-created":
if strings.HasPrefix(req.Sort, "-") {
return pastes[i].CreatedAt.After(pastes[j].CreatedAt)
}
return pastes[i].CreatedAt.Before(pastes[j].CreatedAt)
case "+expires", "-expires":
if strings.HasPrefix(req.Sort, "-") {
return pastes[i].Expires.After(pastes[j].Expires)
}
return pastes[i].Expires.Before(pastes[j].Expires)
case "+views", "-views":
if strings.HasPrefix(req.Sort, "-") {
return pastes[i].Views > pastes[j].Views
}
return pastes[i].Views <= pastes[j].Views
default: |
func limitPastes(req FindRequest, pastes []Paste) []Paste {
// Slice with skip and limit
skip := req.Skip
if skip > len(pastes) {
skip = len(pastes)
}
end := skip + req.Limit
if end > len(pastes) {
end = len(pastes)
}
return pastes[skip:end]
}
// Count returns a number of pastes for a user.
func (m *MemDB) Count(req FindRequest) int64 {
m.RLock()
defer m.RUnlock()
// Count all the pastes for a user
var cnt int64
for _, p := range m.pastes {
if req.UserID == "" {
if req.Privacy != "" && p.Privacy == req.Privacy {
cnt++
}
} else if p.User.ID == req.UserID {
if req.Privacy == "" {
cnt++
} else if p.Privacy == req.Privacy {
cnt++
}
}
}
return cnt
}
// Get returns a paste by ID.
func (m *MemDB) Get(id int64) (Paste, error) {
m.RLock()
defer m.RUnlock()
return m.pastes[id], nil
}
// SaveUser creates a new or updates an existing user.
func (m *MemDB) SaveUser(usr User) (id string, err error) {
m.Lock()
defer m.Unlock()
m.users[usr.ID] = usr
return usr.ID, nil
}
// User returns a user by ID.
func (m *MemDB) User(id string) (User, error) {
m.RLock()
defer m.RUnlock()
var usr User
var ok bool
if usr, ok = m.users[id]; !ok {
return User{}, fmt.Errorf("MemDB.User: user not found")
}
return usr, nil
}
// Update updates existing paste.
func (m *MemDB) Update(p Paste) (Paste, error) {
m.RLock()
if _, ok := m.pastes[p.ID]; !ok {
m.RUnlock()
return Paste{}, nil
}
m.RUnlock()
m.Lock()
defer m.Unlock()
m.pastes[p.ID] = p
return p, nil
} | return pastes[i].CreatedAt.Before(pastes[j].CreatedAt)
}
})
} |
count_opsize.py | import argparse
import os
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,
OptimizerHook, build_optimizer)
from mmdet.apis import multi_gpu_test_search, single_gpu_test_search
from mmdet.core import wrap_fp16_model
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
import numpy as np
from torch.autograd import Variable
import collections
import sys
import time
import copy
from mmdet.core import encode_mask_results, tensor2imgs
import logging
sys.setrecursionlimit(10000)
import argparse
import torch.distributed as dist
import functools
import random
import os
from mmdet.models.necks.spos_opsc import OPS
PRIMITIVES = ['TDM_dcn', 'BUM_dcn', 'PCONV_dcn', 'FSM_dcn']
def countop(paths, channel):
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('log',
help='train log file path',
default='./work_dirs/faster_rcnn_r50_sposfpn3_uniform_dcn_p4st12_c64_256_1x_coco/epoch_12_ea_prun_0_20210104_075032.log')
args = parser.parse_args()
return args
def main():
args = parse_args()
print(args)
name = args.log
print(os.getcwd())
print(name)
#name = '/data/liangtingting/projects/panas_super/work_dirs/faster_rcnn_r50_sposfpn3_uniform_dcn_p4st12_c64_256_1x_coco/epoch_12_ea_prun_0_20210104_075032.log'
op_name = os.path.splitext(name)[0] + '.txt'
print(op_name)
f = open(name, 'r')
wf = open(op_name,'w')
for line in f:
if '[' in line and 'AP' in line:
st = line.index('(')
ed = line.index(')')
paths = str(line[st+1:ed])
paths = paths.split(', ')
op_paths = [int(i) for i in paths]
channel = op_paths[-1]
cand = [PRIMITIVES[i] for i in op_paths[:-1]]
opsize, fp = countop(cand, channel)
ap = line.index('AP')
map = line[ap+3:ap+15]
wf.write(str(cand) + ' ' + str(channel) + ' ' + map + ' ' + str(opsize) + ' ' + str(fp) + '\n')
print(cand, channel, map, opsize, fp)
if 'top 50 result' in line:
break
if __name__ == '__main__':
main() | opsize = 0
fp = 0
for path in paths:
op = OPS[path](channel, channel, True, True)
opsize += op.size
fp += op.fp
#print(opsize)
return opsize, fp |
generate-seeds.py | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def | ():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BET_CHAINPARAMSSEEDS_H\n')
g.write('#define BET_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bet network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 13881)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 14881)
g.write('#endif // BET_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| main |
register.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
superherov1alpha1 "github.com/spotahome/kooper/test/integration/operator/apis/superhero/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var parameterCodec = runtime.NewParameterCodec(scheme)
func | () {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
AddToScheme(scheme)
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
superherov1alpha1.AddToScheme(scheme)
}
| init |
config.rs | use std::collections::HashMap;
use clap::Values;
pub const ALLOWED_VALUES: &[&str] = &["all", "g_tdce", "l_tdce", "lvn", "orph", "solo_lvn", "to_ssa", "from_ssa"];
pub enum LVNChoice {
Solo,
Bool(bool)
}
impl LVNChoice {
pub fn run_lvn(&self) -> bool {
match self {
LVNChoice::Solo => true,
LVNChoice::Bool(b) => *b
}
}
pub fn run_solo(&self) -> bool {
match self {
LVNChoice::Solo => true,
LVNChoice::Bool(_) => false
}
}
pub fn run_normal(&self) -> bool { | LVNChoice::Solo => false,
LVNChoice::Bool(b) => *b
}
}
}
pub struct ConfigOptions {
pub orphan_block: bool,
pub l_tdce: bool,
pub g_tdce: bool,
pub to_ssa: bool,
pub from_ssa: bool,
pub lvn: LVNChoice
}
impl ConfigOptions {
fn config_map(options: Values) -> HashMap<&str, bool> {
let mut hash = HashMap::<&str, bool>::new();
for opt in options {
hash.insert(opt, true);
}
if hash.contains_key("all") {
for key in ALLOWED_VALUES {
hash.insert(&key, true);
}
} else {
for key in ALLOWED_VALUES {
if !hash.contains_key(key) {
hash.insert(&key, false);
}
}
}
hash
}
pub fn new(options: Values) -> ConfigOptions {
let map = ConfigOptions::config_map(options);
let mut lvn = LVNChoice::Bool(map["lvn"]);
if map["solo_lvn"] && !map["lvn"] {
lvn = LVNChoice::Solo
}
ConfigOptions {
orphan_block: map["orph"],
l_tdce: map["l_tdce"],
g_tdce: map["g_tdce"],
lvn,
to_ssa: map["to_ssa"],
from_ssa: map["from_ssa"]
}
}
} | match self { |
group.py | from dataclasses import dataclass, field | from mitre_attack import INTRUSION_SET
from mitre_attack.data.types.object import Object
@dataclass(frozen=True)
class Group(Object):
type: str = field(default=INTRUSION_SET, init=False)
name: str
aliases: List[str] = field(default_factory=list)
contributors: List[str] = field(default_factory=list) | from typing import List |
contacts.js | function getCookie(name) {
var cookieValue = null;
if (document.cookie && document.cookie != '') {
var cookies = document.cookie.split(';');
for (var i = 0; i < cookies.length; i++) {
var cookie = jQuery.trim(cookies[i]);
if (cookie.substring(0, name.length + 1) == (name + '=')) {
cookieValue = decodeURIComponent(cookie.substring(name.length + 1));
break;
}
}
}
return cookieValue;
}
$('#id_account').change(function(){
var csrftoken = getCookie('csrftoken');
var Account= $("#id_account").val()
$.get("/opportunities/contacts/", {"Account":Account, "csrfmiddlewaretoken": csrftoken}, function(data){
$("#id_contacts").html("")
$.each(data, function (index, value) {
$("#id_contacts").append("<option value="+index+">"+value+"</option>")
});
})
});
$("#comment_form").submit(function(e){
e.preventDefault()
var formData = new FormData($("#comment_form")[0]);
$.ajax({
url : "/opportunities/comment_add/",
type : "POST",
data : formData,
cache: false,
contentType: false,
processData: false,
success: function(data){
if(data.error){
$("#CommentError").html(data.error).show()
}
else {
$("#comments_div").prepend("<li class='list-group-item list-row' id='comment"+data.com_id+"'>"+
"<div class='pull-right right-container'>"+
"<div class='list-row-buttons btn-group pull-right'>"+
"<button class='btn btn-link btn-sm dropdown-toggle' data-toggle='dropdown' type='button'><span class='caret'></span></button>"+
"<ul class='dropdown-menu pull-right'>"+
"<li><a class='action' onclick='edit_comment("+data.com_id+")'>Edit</a></li>"+
"<li><a class='action' onclick='remove_comment("+data.com_id+")''>Remove</a></li></ul></div></div>"+
"<div class='stream-head-container'> "+data.com_user+" Commented</div>"+
"<div class='stream-post-container' id='comment_name"+data.com_id+"'>"+data.comment+"</div>"+
"<div class='stream-date-container"+data.com_id+"'>"+data.comment_time+"</div></div><div class='stream-date-container' id='comment_file_div"+data.com_id+"'><div id='new_comment"+data.com_id+"'</div></div></li>"
)
$("#id_comments").val("")
alert("Comment Submitted")
}
}
});
});
function edit_comment(x){
$('#myModal_comment').modal('show');
comment = $("#comment_name"+x).text()
$("#commentid").val(x)
$("#id_editcomment").val(comment)
}
$("#comment_edit").click(function(e){
alert("Heyyyyyyyyyyy")
e.preventDefault()
var formData = new FormData($("#comment_edit_form")[0]);
$.ajax({
url : "/opportunities/comment_edit/",
type : "POST",
data : formData,
cache: false,
contentType: false,
processData: false,
success:function(data){
if(data.error) {
alert(data.error)
} else {
$("#comment_name"+data.commentid).text(data.comment)
$('#myModal_comment').modal('hide');
$("#id_editcomment").val("")
}
}
})
});
function | (x){
var csrftoken = getCookie('csrftoken');
var warn = confirm("Are You Sure, you Want to Delete this Comment!?")
if (warn == true){
$.post('/opportunities/comment_remove/', {"comment_id":x, "csrfmiddlewaretoken": csrftoken, }, function(data){
if(data.error){
alert(data.error)
} else {
$("#comment"+data.oid).remove()
}
})
}
}
function editFun(x) {
var csrftoken = getCookie('csrftoken');
alert("Heloooooooooooooo")
$.ajax({
type: "POST",
url: "/opportunities/editdetails/",
data: {
csrfmiddlewaretoken: csrftoken,
tid: x
},
success: function(data) {
$("#viewdiv").hide()
$("#editdiv").show()
$("#id_name").val(data.name)
$("#id_stage").val(data.stage)
$("#id_amount").val(data.amount)
$("#id_account").val(data.account)
$("#id_probability").val(data.probability)
$("#id_close_date").val(data.close_date)
$("#hiddenval").val(data.eid)
$("#id_lead_source").val(data.sources)
$("#id_description").val(data.description)
contacts = data.contacts.replace("b'", "")
contacts = contacts.replace(/\\n/g, '')
contacts = contacts.replace(/\\t/g, '')
contacts = contacts.replace(/'/g, '')
contacts = contacts.replace(/}/g, '')
$("#id_contacts").html(contacts)
}
})
$('#id_account').change(function() {
var Account = $("#id_account").val()
var csrftoken = getCookie('csrftoken');
$.get("/opportunities/contacts/", {
"Account": Account,
"csrfmiddlewaretoken": csrftoken
}, function(data) {
$("#id_contacts").html("")
$.each(data, function(index, value) {
// console.log(index, value)
$("#id_contacts").append("<option value=" + index + ">" + value + "</option>")
});
})
});
}
| remove_comment |
rest.py | import asyncio
import inspect
import json
import logging
from asyncio import Queue, CancelledError
from sanic import Blueprint, response
from sanic.request import Request
from sanic.response import HTTPResponse, ResponseStream
from typing import Text, Dict, Any, Optional, Callable, Awaitable, NoReturn, Union
import rasa.utils.endpoints
from rasa.core.channels.channel import (
InputChannel,
CollectingOutputChannel,
UserMessage,
)
logger = logging.getLogger(__name__)
class RestInput(InputChannel):
"""A custom http input channel.
This implementation is the basis for a custom implementation of a chat
frontend. You can customize this to send messages to Rasa and
retrieve responses from the assistant."""
@classmethod
def name(cls) -> Text:
return "rest"
@staticmethod
async def on_message_wrapper(
on_new_message: Callable[[UserMessage], Awaitable[Any]],
text: Text,
queue: Queue,
sender_id: Text,
input_channel: Text,
metadata: Optional[Dict[Text, Any]],
) -> None:
collector = QueueOutputChannel(queue)
message = UserMessage(
text, collector, sender_id, input_channel=input_channel, metadata=metadata
)
await on_new_message(message)
await queue.put("DONE")
async def _extract_sender(self, req: Request) -> Optional[Text]:
return req.json.get("sender", None)
# noinspection PyMethodMayBeStatic
def _extract_message(self, req: Request) -> Optional[Text]:
return req.json.get("message", None)
def _extract_input_channel(self, req: Request) -> Text:
return req.json.get("input_channel") or self.name()
def stream_response(
self,
on_new_message: Callable[[UserMessage], Awaitable[None]],
text: Text,
sender_id: Text,
input_channel: Text,
metadata: Optional[Dict[Text, Any]],
) -> Callable[[Any], Awaitable[None]]:
async def stream(resp: Any) -> None:
q = Queue()
task = asyncio.ensure_future(
self.on_message_wrapper(
on_new_message, text, q, sender_id, input_channel, metadata
)
)
while True:
result = await q.get()
if result == "DONE":
break
else:
await resp.write(json.dumps(result) + "\n")
await task
return stream
def blueprint(
self, on_new_message: Callable[[UserMessage], Awaitable[None]]
) -> Blueprint:
custom_webhook = Blueprint(
"custom_webhook_{}".format(type(self).__name__),
inspect.getmodule(self).__name__,
)
# noinspection PyUnusedLocal
@custom_webhook.route("/", methods=["GET"])
async def health(request: Request) -> HTTPResponse:
return response.json({"status": "ok"})
@custom_webhook.route("/webhook", methods=["POST"])
async def receive(request: Request) -> Union[ResponseStream, HTTPResponse]:
sender_id = await self._extract_sender(request)
text = self._extract_message(request)
should_use_stream = rasa.utils.endpoints.bool_arg(
request, "stream", default=False
)
input_channel = self._extract_input_channel(request)
metadata = self.get_metadata(request)
if should_use_stream:
return response.stream(
self.stream_response(
on_new_message, text, sender_id, input_channel, metadata
),
content_type="text/event-stream",
)
else:
collector = CollectingOutputChannel()
# noinspection PyBroadException
try:
await on_new_message(
UserMessage(
text,
collector,
sender_id,
input_channel=input_channel,
metadata=metadata,
)
)
except CancelledError:
logger.error(
f"Message handling timed out for " f"user message '{text}'."
)
except Exception:
logger.exception(
f"An exception occured while handling "
f"user message '{text}'."
)
return response.json(collector.messages)
return custom_webhook
class QueueOutputChannel(CollectingOutputChannel):
"""Output channel that collects send messages in a list
(doesn't send them anywhere, just collects them)."""
@classmethod
def name(cls) -> Text:
return "queue"
# noinspection PyMissingConstructor
def __init__(self, message_queue: Optional[Queue] = None) -> None:
super().__init__()
self.messages = Queue() if not message_queue else message_queue
| async def _persist_message(self, message: Dict[Text, Any]) -> None:
await self.messages.put(message) | def latest_output(self) -> NoReturn:
raise NotImplementedError("A queue doesn't allow to peek at messages.")
|
tripcolor.py | import numpy as np
from matplotlib.collections import PolyCollection, TriMesh
from matplotlib.colors import Normalize
from matplotlib.tri.triangulation import Triangulation
def tripcolor(ax, *args, alpha=1.0, norm=None, cmap=None, vmin=None,
vmax=None, shading='flat', facecolors=None, **kwargs):
| """
Create a pseudocolor plot of an unstructured triangular grid.
The triangulation can be specified in one of two ways; either::
tripcolor(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
tripcolor(x, y, ...)
tripcolor(x, y, triangles, ...)
tripcolor(x, y, triangles=triangles, ...)
tripcolor(x, y, mask=mask, ...)
tripcolor(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The next argument must be *C*, the array of color values, either
one per point in the triangulation if color values are defined at
points, or one per triangle in the triangulation if color values
are defined at triangles. If there are the same number of points
and triangles in the triangulation it is assumed that color
values are defined at points; to force the use of color values at
triangles use the kwarg ``facecolors=C`` instead of just ``C``.
*shading* may be 'flat' (the default) or 'gouraud'. If *shading*
is 'flat' and C values are defined at points, the color values
used for each triangle are from the mean C of the triangle's
three points. If *shading* is 'gouraud' then color values must be
defined at points.
The remaining kwargs are the same as for
:meth:`~matplotlib.axes.Axes.pcolor`.
"""
if shading not in ['flat', 'gouraud']:
raise ValueError("shading must be one of ['flat', 'gouraud'] "
"not {0}".format(shading))
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
# C is the colors array defined at either points or faces (i.e. triangles).
# If facecolors is None, C are defined at points.
# If facecolors is not None, C are defined at faces.
if facecolors is not None:
C = facecolors
else:
C = np.asarray(args[0])
# If there are a different number of points and triangles in the
# triangulation, can omit facecolors kwarg as it is obvious from
# length of C whether it refers to points or faces.
# Do not do this for gouraud shading.
if (facecolors is None and len(C) == len(tri.triangles) and
len(C) != len(tri.x) and shading != 'gouraud'):
facecolors = C
# Check length of C is OK.
if ((facecolors is None and len(C) != len(tri.x)) or
(facecolors is not None and len(C) != len(tri.triangles))):
raise ValueError('Length of color values array must be the same '
'as either the number of triangulation points '
'or triangles')
# Handling of linewidths, shading, edgecolors and antialiased as
# in Axes.pcolor
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
edgecolors = 'none'
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', edgecolors)
if 'antialiased' in kwargs:
kwargs['antialiaseds'] = kwargs.pop('antialiased')
if 'antialiaseds' not in kwargs and ec.lower() == "none":
kwargs['antialiaseds'] = False
if shading == 'gouraud':
if facecolors is not None:
raise ValueError('Gouraud shading does not support the use '
'of facecolors kwarg')
if len(C) != len(tri.x):
raise ValueError('For gouraud shading, the length of color '
'values array must be the same as the '
'number of triangulation points')
collection = TriMesh(tri, **kwargs)
else:
# Vertices of triangles.
maskedTris = tri.get_masked_triangles()
verts = np.stack((tri.x[maskedTris], tri.y[maskedTris]), axis=-1)
# Color values.
if facecolors is None:
# One color per triangle, the mean of the 3 vertex color values.
C = C[maskedTris].mean(axis=1)
elif tri.mask is not None:
# Remove color values of masked triangles.
C = C.compress(1-tri.mask)
collection = PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None and not isinstance(norm, Normalize):
raise ValueError("'norm' must be an instance of 'Normalize'")
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
ax.grid(False)
minx = tri.x.min()
maxx = tri.x.max()
miny = tri.y.min()
maxy = tri.y.max()
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
ax.add_collection(collection)
return collection |
|
code-generator.js | const { URL } = require('url');
const Handlebars = require('handlebars');
const { declareInjections } = require('@cardstack/di');
const DEFAULT_SOCKET_IO_PORT = 3100;
const DEFAULT_SOCKET_IO_PATH = '/';
const template = Handlebars.compile(`
define("@cardstack/live-queries/environment", ["exports"], function (exports) {
"use strict"; | exports.{{property.name}} = "{{property.value}}";
{{/each}}
});
`);
module.exports = declareInjections({
plugins: 'hub:plugins',
publicURL: 'config:public-url'
},
class LiveQueryCodeGenerator {
async generateCode() {
let configured = await this.plugins.active();
let pluginConfig = configured.describe('@cardstack/live-queries');
let port = pluginConfig.attributes['socket-port'] || DEFAULT_SOCKET_IO_PORT;
let socketPath = pluginConfig.attributes['socket-path'] || DEFAULT_SOCKET_IO_PATH;
let socketIoUrl = new URL(this.publicURL.url);
socketIoUrl.port = port;
socketIoUrl.pathname = '';
return template({ properties: [
{
name: 'host',
value: socketIoUrl.toString()
},
{
name: 'path',
value: socketPath
}
]});
}
}); | Object.defineProperty(exports, "__esModule", {
value: true
});
{{#each properties as |property|}} |
train_simple_model.py | import pickle
from airflow import DAG
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils import timezone
from sklearn.ensemble import RandomForestClassifier
default_args = {
'owner': 'ODDS',
}
dag = DAG(
'train_simple_model',
schedule_interval='*/15 * * * *',
default_args=default_args,
start_date=timezone.datetime(2020, 8, 1),
catchup=False
)
start = DummyOperator(task_id='start', dag=dag)
def | ():
clf = RandomForestClassifier(random_state=0)
X = [[ 1, 2, 3],
[11, 12, 13]]
y = [0, 1]
clf.fit(X, y)
MODEL_PATH = '/Users/zkan/Projects/dataength/' \
'automating-your-data-pipeline-with-apache-airflow/' \
'machine-learning-pipeline/airflow/dags'
with open(f'{MODEL_PATH}/models/clf.model', 'wb') as outfile:
pickle.dump(clf, outfile)
train = PythonOperator(
task_id='train',
python_callable=train_func,
dag=dag,
)
end = DummyOperator(task_id='end', dag=dag)
start >> train >> end
| train_func |
save.js | ;(function() {
"use strict"
analyticsHooks.on("save:sign-up", function(data) {
analytics.track("Triggered sign up form via save button")
})
analyticsHooks.on("save:save-artwork", function(data) {
analytics.track("Saved Artwork", {
entity_slug: data.entity_slug,
entity_id: data.entity_id,
context_page: data.context_page,
context_module: data.context_module,
}) | analyticsHooks.on("save:remove-artwork", function(data) {
analytics.track("Removed Artwork", {
entity_slug: data.entity_slug,
entity_id: data.entity_id,
context_page: data.context_page,
context_module: data.context_module,
})
})
})() | })
|
Family56.tsx | import * as React from "react";
import { IEmojiProps } from "../../styled";
const SvgFamily56 = (props: IEmojiProps) => (
<svg viewBox="0 0 72 72" width="1em" height="1em" {...props}>
<g fill="#debb90">
<circle cx={47} cy={11} r={3} />
<path d="M52 23.6l1.9 38.2c.1 1.1-.7 1.8-1.5 1.8s-1.4-.6-1.5-1.3L48.2 39c-.1-.6-.5-1.1-1.2-1.1s-1.1.5-1.2 1.1l-2.7 23.3c-.1.7-.7 1.3-1.5 1.3s-1.6-.7-1.5-1.8L42 23.6" />
<path d="M56 42.9c-3.3 0-5.5-18.3-9-18.3s-7.2 18.3-11 18.3c0 0 2.1-5.7 2.5-10.6.2-2.5.3-9.3.3-9.3.1-2.8 2.4-5 5.2-5h6c2.8 0 5.1 2.2 5.2 5l.8 19.9z" />
</g>
<g fill="#c19a65">
<circle cx={25} cy={11} r={3} />
<path d="M20 23.6l-1.9 38.2c-.1 1.1.7 1.8 1.5 1.8s1.4-.6 1.5-1.3L23.8 39c.1-.6.5-1.1 1.2-1.1s1.1.5 1.2 1.1l2.7 23.3c.1.7.7 1.3 1.5 1.3s1.6-.7 1.5-1.8L30 23.6" />
<path d="M16.8 23c.1-2.8 2.4-5 5.2-5h6c2.8 0 5.1 2.2 5.2 5 0 0 .1 6.7.3 9.3.5 4.9 2.5 10.6 2.5 10.6-3.8 0-7.5-18.3-11-18.3s-5.7 18.3-9 18.3l.8-19.9z" />
</g>
<g fill="none" stroke="#000" strokeWidth={2}>
<circle cx={47} cy={11} r={3} strokeMiterlimit={10} />
<path
strokeLinecap="round"
strokeLinejoin="round"
d="M36 41.9s2.1-4.7 2.5-9.6c.2-2.5.3-9.3.3-9.3.1-2.8 2.4-5 5.2-5h6c2.8 0 5.1 2.2 5.2 5l.8 18.9"
/>
<path
strokeLinecap="round"
strokeLinejoin="round"
d="M52 23.6l1.9 38.2c.1 1.1-.7 1.8-1.5 1.8s-1.4-.6-1.5-1.3L48.2 39c-.1-.6-.5-1.1-1.2-1.1s-1.1.5-1.2 1.1l-2.7 23.3c-.1.7-.7 1.3-1.5 1.3s-1.6-.7-1.5-1.8L42 23.6"
/>
<circle cx={25} cy={11} r={3} strokeMiterlimit={10} />
<path
strokeLinecap="round"
strokeLinejoin="round"
d="M16 41.9l.8-18.9c.1-2.8 2.4-5 5.2-5h6c2.8 0 5.1 2.2 5.2 5 0 0 .1 6.7.3 9.3.4 4.9 2.5 9.6 2.5 9.6" | strokeLinejoin="round"
d="M20 23.6l-1.9 38.2c-.1 1.1.7 1.8 1.5 1.8s1.4-.6 1.5-1.3L23.8 39c.1-.6.5-1.1 1.2-1.1s1.1.5 1.2 1.1l2.7 23.3c.1.7.7 1.3 1.5 1.3s1.6-.7 1.5-1.8L30 23.6"
/>
</g>
</svg>
);
export default SvgFamily56; | />
<path
strokeLinecap="round" |
extensions_request_builder.go | package extensions
import (
i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go"
ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be "github.com/microsoftgraph/msgraph-beta-sdk-go/models"
i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459 "github.com/microsoftgraph/msgraph-beta-sdk-go/models/odataerrors"
i78d6b1da4e2e06d5f1503096e312e42238f16ffad357aa4e9a320791417ccc4f "github.com/microsoftgraph/msgraph-beta-sdk-go/me/tasks/alltasks/item/extensions/count"
)
// ExtensionsRequestBuilder provides operations to manage the extensions property of the microsoft.graph.baseTask entity.
type ExtensionsRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string
// The request adapter to use to execute the requests.
requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter
// Url template to use to build the URL for the current request builder
urlTemplate string
}
// ExtensionsRequestBuilderGetQueryParameters the collection of open extensions defined for the task .
type ExtensionsRequestBuilderGetQueryParameters struct {
// Include count of items
Count *bool `uriparametername:"%24count"`
// Expand related entities
Expand []string `uriparametername:"%24expand"`
// Filter items by property values
Filter *string `uriparametername:"%24filter"`
// Order items by property values
Orderby []string `uriparametername:"%24orderby"`
// Search items by search phrases
Search *string `uriparametername:"%24search"`
// Select properties to be returned
Select []string `uriparametername:"%24select"`
// Skip the first n items
Skip *int32 `uriparametername:"%24skip"`
// Show only the first n items
Top *int32 `uriparametername:"%24top"`
}
// ExtensionsRequestBuilderGetRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type ExtensionsRequestBuilderGetRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
// Request query parameters
QueryParameters *ExtensionsRequestBuilderGetQueryParameters
}
// ExtensionsRequestBuilderPostRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type ExtensionsRequestBuilderPostRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
}
// NewExtensionsRequestBuilderInternal instantiates a new ExtensionsRequestBuilder and sets the default values.
func NewExtensionsRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*ExtensionsRequestBuilder) {
m := &ExtensionsRequestBuilder{
}
m.urlTemplate = "{+baseurl}/me/tasks/alltasks/{baseTask%2Did}/extensions{?%24top,%24skip,%24search,%24filter,%24count,%24orderby,%24select,%24expand}";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = urlTplParams;
m.requestAdapter = requestAdapter;
return m
}
// NewExtensionsRequestBuilder instantiates a new ExtensionsRequestBuilder and sets the default values.
func NewExtensionsRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*ExtensionsRequestBuilder) |
// Count the count property
func (m *ExtensionsRequestBuilder) Count()(*i78d6b1da4e2e06d5f1503096e312e42238f16ffad357aa4e9a320791417ccc4f.CountRequestBuilder) {
return i78d6b1da4e2e06d5f1503096e312e42238f16ffad357aa4e9a320791417ccc4f.NewCountRequestBuilderInternal(m.pathParameters, m.requestAdapter);
}
// CreateGetRequestInformation the collection of open extensions defined for the task .
func (m *ExtensionsRequestBuilder) CreateGetRequestInformation()(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
return m.CreateGetRequestInformationWithRequestConfiguration(nil);
}
// CreateGetRequestInformationWithRequestConfiguration the collection of open extensions defined for the task .
func (m *ExtensionsRequestBuilder) CreateGetRequestInformationWithRequestConfiguration(requestConfiguration *ExtensionsRequestBuilderGetRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.GET
if requestConfiguration != nil {
if requestConfiguration.QueryParameters != nil {
requestInfo.AddQueryParameters(*(requestConfiguration.QueryParameters))
}
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// CreatePostRequestInformation create new navigation property to extensions for me
func (m *ExtensionsRequestBuilder) CreatePostRequestInformation(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.Extensionable)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
return m.CreatePostRequestInformationWithRequestConfiguration(body, nil);
}
// CreatePostRequestInformationWithRequestConfiguration create new navigation property to extensions for me
func (m *ExtensionsRequestBuilder) CreatePostRequestInformationWithRequestConfiguration(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.Extensionable, requestConfiguration *ExtensionsRequestBuilderPostRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.POST
requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", body)
if requestConfiguration != nil {
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// Get the collection of open extensions defined for the task .
func (m *ExtensionsRequestBuilder) Get()(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.ExtensionCollectionResponseable, error) {
return m.GetWithRequestConfigurationAndResponseHandler(nil, nil);
}
// GetWithRequestConfigurationAndResponseHandler the collection of open extensions defined for the task .
func (m *ExtensionsRequestBuilder) GetWithRequestConfigurationAndResponseHandler(requestConfiguration *ExtensionsRequestBuilderGetRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.ExtensionCollectionResponseable, error) {
requestInfo, err := m.CreateGetRequestInformationWithRequestConfiguration(requestConfiguration);
if err != nil {
return nil, err
}
errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {
"4XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
"5XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateExtensionCollectionResponseFromDiscriminatorValue, responseHandler, errorMapping)
if err != nil {
return nil, err
}
return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.ExtensionCollectionResponseable), nil
}
// Post create new navigation property to extensions for me
func (m *ExtensionsRequestBuilder) Post(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.Extensionable)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.Extensionable, error) {
return m.PostWithRequestConfigurationAndResponseHandler(body, nil, nil);
}
// PostWithRequestConfigurationAndResponseHandler create new navigation property to extensions for me
func (m *ExtensionsRequestBuilder) PostWithRequestConfigurationAndResponseHandler(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.Extensionable, requestConfiguration *ExtensionsRequestBuilderPostRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.Extensionable, error) {
requestInfo, err := m.CreatePostRequestInformationWithRequestConfiguration(body, requestConfiguration);
if err != nil {
return nil, err
}
errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {
"4XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
"5XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateExtensionFromDiscriminatorValue, responseHandler, errorMapping)
if err != nil {
return nil, err
}
return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.Extensionable), nil
}
| {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewExtensionsRequestBuilderInternal(urlParams, requestAdapter)
} |
main.py | import argparse
import datetime
import glob
import logging
import os
import time
import torch
from logging_helper import init_logger
from models import Discriminator, BartSystem
from train import train
from transformer_base import add_generic_args, generic_train
class Config():
# data_path = './data/chatbot/'
# log_dir = 'runs/exp'
save_path = './save'
# pretrained_embed_path = './embedding/'
device = torch.device('cuda' if True and torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
discriminator_method = 'Multi' # 'Multi' or 'Cond'
load_pretrained_embed = False
min_freq = 3
max_length = 1024 # max_source_length
# embed_size = 256
d_model = 256
h = 4
num_styles = 2
num_classes = num_styles + 1 if discriminator_method == 'Multi' else 2 | L2 = 0
iter_D = 10
iter_F = 5
F_pretrain_iter = 1
log_steps = 5
eval_steps = 25
learned_pos_embed = True
dropout = 0
drop_rate_config = [(1, 0)]
temperature_config = [(1, 0)]
slf_factor = 0.25
cyc_factor = 0.5
adv_factor = 1
inp_shuffle_len = 0
inp_unk_drop_fac = 0
inp_rand_drop_fac = 0
inp_drop_prob = 0
### Bart system
output_dir='feedback_sum'
do_predict=True
max_source_length=1024
max_target_length=56
data_dir="feedback"
def get_n_params(model):
pp=0
for p in list(model.parameters()):
nn=1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
def main():
config = Config()
parser = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
parser = BartSystem.add_model_specific_args(parser, os.getcwd())
args = parser.parse_args()
# Some values from Config class needs to be copied to args to work.
setattr(config, "num_train_epochs", args.num_train_epochs)
setattr(config, "save_path", args.output_dir)
setattr(args, "learning_rate", config.lr_F)
# Create output directory.
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
setattr(config, "save_folder", os.path.join(config.save_path, timestamp))
os.makedirs(os.path.join(config.save_folder, 'ckpts'))
init_logger(config.save_folder)
logger = logging.getLogger(__name__)
model_F = BartSystem(args).to(config.device)
# Don't use the trainer to fit the model
args.do_train = False
# trainer = generic_train(model_F, args)
if args.output_dir:
try:
checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "checkpointepoch=*.ckpt"), recursive=True)))
if checkpoints[-1]:
BartSystem.load_from_checkpoint(checkpoints[-1])
logger.info("Load checkpoint sucessfully!")
except:
logger.info("Failed to load checkpoint!")
# train_iters, dev_iters, test_iters, vocab = load_dataset(config)
train_iters, dev_iters, test_iters = model_F.train_dataloader(), model_F.val_dataloader(), model_F.test_dataloader()
model_D = Discriminator(config, model_F.tokenizer).to(config.device)
logger.info(config.discriminator_method)
# import pdb
# pdb.set_trace()
logger.info(model_D)
train(config, model_F, model_D, train_iters, dev_iters, test_iters)
if __name__ == '__main__':
main() | num_layers = 4
# batch_size = 64
lr_F = 5e-6
lr_D = 1e-4 |
server.go | package omg
import (
"github.com/valyala/fasthttp"
"sort"
"bytes"
)
type Server struct {
Port string
Routers []RouteItem
mw MiddleWareManager
app *AppContext
srv *fasthttp.Server
}
func (s *Server) Start(port string, protocol ...string) {
s.Port = port;
// 路径进行排序
s.sortRegister();
s.srv = &fasthttp.Server{
Handler: s.handlerRequest,
Name: "Omg",
}
protocolLen := len(protocol);
if protocolLen == 0 {
protocol = append(protocol, ProtocolHttp);
}
if UtilsExistsInSlice(protocol, "http") {
s.srv.ListenAndServe(":" + s.Port);
}
// https
}
func (s *Server) Close() error {
return s.srv.Shutdown();
}
func (s *Server) Get(path string, handler Handler) *Server {
s.putToRouterItem(path, MethodGet, handler);
return s;
}
func (s *Server) Post(path string, handler Handler) *Server {
s.putToRouterItem(path, MethodPost, handler);
return s;
}
func (s *Server) Handle(path string, handler Handler, methods ...Method) *Server {
for _, method := range methods {
s.putToRouterItem(path, method, handler);
}
return s;
}
func (s *Server) Use(mw MWWrapper) *Server {
s.mw.Use(mw(s.app));
return s;
}
func (s *Server) findRouter(path string) *RouteItem {
for _, routerItem := range s.Routers {
if routerItem.Path == path {
return &routerItem;
}
}
newRouter := GetRouteItem(path);
s.Routers = append(s.Routers, newRouter);
return &s.Routers[len(s.Routers) - 1];
}
func (s *Server) handlerRequest(fsh *fasthttp.RequestCtx) {
path := string(fsh.Path());
method := GetMethod(string(fsh.Method()));
newReq := Request {
Url: path,
Method: method,
OriginReq: &fsh.Request,
};
newRes := Response {
Headers: make(map[string][]string),
OriginRes: &fsh.Response,
};
ctx := &Context {
Req: newReq,
Res: newRes,
Status: 200,
app: s.app,
ctxPlugin: make(map[string]interface{}),
};
handler := s.findHandlerByPathAnd(path, method);
result, err := s.mw.Exec(ctx, handler);
contentType := UtilsGetContentType(ctx.Res.Type, ctx.Req.Url);
respHead := &fsh.Response.Header;
respHead.Set("Content-Type", contentType);
if len(ctx.Res.Headers) > 0 {
for key, value := range ctx.Res.Headers {
for index, valueLine := range value {
if index == 0 {
respHead.Set(key, valueLine);
} else {
respHead.Add(key, valueLine);
}
}
}
}
if err != nil {
ctx.Status = 503;
result = err.Error();
ctx.Body = nil;
}
fsh.Response.SetStatusCode(ctx.Status);
if ctx.Body != nil {
fsh.Response.SetBodyStream(bytes.NewReader(ctx.Body), len(ctx.Body));
} else {
fsh.Response.SetBodyString(result);
}
}
func (s *Server) putToRouterItem(path string, method Method, handler Handler) {
currentRouters := s.findRouter(path);
currentRouters.Register = append(currentRouters.Register, RouteRegister{ method, handler });
}
func (s *Server) findHandlerByPathAnd(path string, method Method) Handler {
var matchedRouter RouteItem;
for _, router := range s.Routers {
match := router.MatchReg.MatchString(path);
if match {
matchedRouter = router;
break;
}
}
if len(matchedRouter.Register) > 0 {
for _, resgister := range matchedRouter.Register {
if resgister.Method == method {
return resgister.Handler;
}
}
}
// Todo return 404
return Default404;
}
// 对路径注册器进行排序
func (s *Server) sortRegister() {
sort.Slice(s.Routers, func(i, j int) bool {
pre := s.Routers[i];
next := s.Routers[j];
if pre.Level == next.Level {
if next.PurePath == pre.PurePath {
return len(pre.Path) - len(next.Path) < 0;
}
return len(next.PurePath) - len(pre.PurePath) < 0;
}
return next.Level - pre.Level < 0;
});
}
func New() *Server {
server := &Serv | {
mw: MiddleWareManager{},
app: &AppContext{},
}
return server;
} | er |
ops.rs | pub use target_arch::*;
#[cfg(not(target_arch = "bpf"))]
mod target_arch {
use {
crate::{encryption::elgamal::ElGamalCiphertext, zk_token_elgamal::pod},
curve25519_dalek::{constants::RISTRETTO_BASEPOINT_COMPRESSED, scalar::Scalar},
std::convert::TryInto,
};
pub const TWO_32: u64 = 4294967296;
// On input two scalars x0, x1 and two ciphertexts ct0, ct1,
// returns `Some(x0*ct0 + x1*ct1)` or `None` if the input was invalid
fn add_ciphertexts(
scalar_0: Scalar,
ct_0: &pod::ElGamalCiphertext,
scalar_1: Scalar,
ct_1: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
let ct_0: ElGamalCiphertext = (*ct_0).try_into().ok()?;
let ct_1: ElGamalCiphertext = (*ct_1).try_into().ok()?;
let ct_sum = ct_0 * scalar_0 + ct_1 * scalar_1;
Some(pod::ElGamalCiphertext::from(ct_sum))
}
pub(crate) fn combine_lo_hi(
ct_lo: &pod::ElGamalCiphertext,
ct_hi: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
add_ciphertexts(Scalar::one(), ct_lo, Scalar::from(TWO_32), ct_hi)
}
pub fn add(
ct_0: &pod::ElGamalCiphertext,
ct_1: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
add_ciphertexts(Scalar::one(), ct_0, Scalar::one(), ct_1)
}
pub fn add_with_lo_hi(
ct_0: &pod::ElGamalCiphertext,
ct_1_lo: &pod::ElGamalCiphertext,
ct_1_hi: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
let ct_1 = combine_lo_hi(ct_1_lo, ct_1_hi)?;
add_ciphertexts(Scalar::one(), ct_0, Scalar::one(), &ct_1)
}
pub fn subtract(
ct_0: &pod::ElGamalCiphertext,
ct_1: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
add_ciphertexts(Scalar::one(), ct_0, -Scalar::one(), ct_1)
}
pub fn subtract_with_lo_hi(
ct_0: &pod::ElGamalCiphertext,
ct_1_lo: &pod::ElGamalCiphertext,
ct_1_hi: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
let ct_1 = combine_lo_hi(ct_1_lo, ct_1_hi)?;
add_ciphertexts(Scalar::one(), ct_0, -Scalar::one(), &ct_1)
}
pub fn add_to(ct: &pod::ElGamalCiphertext, amount: u64) -> Option<pod::ElGamalCiphertext> {
let mut amount_as_ct = [0_u8; 64];
amount_as_ct[..32].copy_from_slice(RISTRETTO_BASEPOINT_COMPRESSED.as_bytes());
add_ciphertexts(
Scalar::one(),
ct,
Scalar::from(amount),
&pod::ElGamalCiphertext(amount_as_ct),
)
}
pub fn subtract_from(
ct: &pod::ElGamalCiphertext,
amount: u64,
) -> Option<pod::ElGamalCiphertext> {
let mut amount_as_ct = [0_u8; 64];
amount_as_ct[..32].copy_from_slice(RISTRETTO_BASEPOINT_COMPRESSED.as_bytes());
add_ciphertexts(
Scalar::one(),
ct,
-Scalar::from(amount),
&pod::ElGamalCiphertext(amount_as_ct),
)
}
}
#[cfg(target_arch = "bpf")]
#[allow(unused_variables)]
mod target_arch {
use {super::*, crate::zk_token_elgamal::pod, bytemuck::Zeroable};
fn op(
op: u64,
ct_0: &pod::ElGamalCiphertext,
ct_1: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
let mut ct_result = pod::ElGamalCiphertext::zeroed();
let result = unsafe {
sol_zk_token_elgamal_op(
op,
&ct_0.0 as *const u8,
&ct_1.0 as *const u8,
&mut ct_result.0 as *mut u8,
)
};
if result == 0 | else {
None
}
}
fn op_with_lo_hi(
op: u64,
ct_0: &pod::ElGamalCiphertext,
ct_1_lo: &pod::ElGamalCiphertext,
ct_1_hi: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
let mut ct_result = pod::ElGamalCiphertext::zeroed();
let result = unsafe {
sol_zk_token_elgamal_op_with_lo_hi(
op,
&ct_0.0 as *const u8,
&ct_1_lo.0 as *const u8,
&ct_1_hi.0 as *const u8,
&mut ct_result.0 as *mut u8,
)
};
if result == 0 {
Some(ct_result)
} else {
None
}
}
fn op_with_scalar(
op: u64,
ct: &pod::ElGamalCiphertext,
scalar: u64,
) -> Option<pod::ElGamalCiphertext> {
let mut ct_result = pod::ElGamalCiphertext::zeroed();
let result = unsafe {
sol_zk_token_elgamal_op_with_scalar(
op,
&ct.0 as *const u8,
scalar,
&mut ct_result.0 as *mut u8,
)
};
if result == 0 {
Some(ct_result)
} else {
None
}
}
pub fn add(
ct_0: &pod::ElGamalCiphertext,
ct_1: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
op(OP_ADD, ct_0, ct_1)
}
pub fn add_with_lo_hi(
ct_0: &pod::ElGamalCiphertext,
ct_1_lo: &pod::ElGamalCiphertext,
ct_1_hi: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
op_with_lo_hi(OP_ADD, ct_0, ct_1_lo, ct_1_hi)
}
pub fn subtract(
ct_0: &pod::ElGamalCiphertext,
ct_1: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
op(OP_SUB, ct_0, ct_1)
}
pub fn subtract_with_lo_hi(
ct_0: &pod::ElGamalCiphertext,
ct_1_lo: &pod::ElGamalCiphertext,
ct_1_hi: &pod::ElGamalCiphertext,
) -> Option<pod::ElGamalCiphertext> {
op_with_lo_hi(OP_SUB, ct_0, ct_1_lo, ct_1_hi)
}
pub fn add_to(ct: &pod::ElGamalCiphertext, amount: u64) -> Option<pod::ElGamalCiphertext> {
op_with_scalar(OP_ADD, ct, amount)
}
pub fn subtract_from(
ct: &pod::ElGamalCiphertext,
amount: u64,
) -> Option<pod::ElGamalCiphertext> {
op_with_scalar(OP_SUB, ct, amount)
}
}
pub const OP_ADD: u64 = 0;
pub const OP_SUB: u64 = 1;
extern "C" {
pub fn sol_zk_token_elgamal_op(
op: u64,
ct_0: *const u8,
ct_1: *const u8,
ct_result: *mut u8,
) -> u64;
pub fn sol_zk_token_elgamal_op_with_lo_hi(
op: u64,
ct_0: *const u8,
ct_1_lo: *const u8,
ct_1_hi: *const u8,
ct_result: *mut u8,
) -> u64;
pub fn sol_zk_token_elgamal_op_with_scalar(
op: u64,
ct: *const u8,
scalar: u64,
ct_result: *mut u8,
) -> u64;
}
#[cfg(test)]
mod tests {
use {
crate::{
encryption::{
elgamal::{ElGamalCiphertext, ElGamalKeypair},
pedersen::{Pedersen, PedersenOpening},
},
zk_token_elgamal::{ops, pod},
},
bytemuck::Zeroable,
curve25519_dalek::scalar::Scalar,
rand::rngs::OsRng,
std::convert::TryInto,
};
#[test]
fn test_zero_ct() {
let spendable_balance = pod::ElGamalCiphertext::zeroed();
let spendable_ct: ElGamalCiphertext = spendable_balance.try_into().unwrap();
// spendable_ct should be an encryption of 0 for any public key when
// `PedersenOpen::default()` is used
let public = ElGamalKeypair::default().public;
let balance: u64 = 0;
assert_eq!(
spendable_ct,
public.encrypt_with(balance, &PedersenOpening::default())
);
// homomorphism should work like any other ciphertext
let open = PedersenOpening::random(&mut OsRng);
let transfer_amount_ct = public.encrypt_with(55_u64, &open);
let transfer_amount_pod: pod::ElGamalCiphertext = transfer_amount_ct.into();
let sum = ops::add(&spendable_balance, &transfer_amount_pod).unwrap();
let expected: pod::ElGamalCiphertext = public.encrypt_with(55_u64, &open).into();
assert_eq!(expected, sum);
}
#[test]
fn test_add_to() {
let spendable_balance = pod::ElGamalCiphertext::zeroed();
let added_ct = ops::add_to(&spendable_balance, 55).unwrap();
let public = ElGamalKeypair::default().public;
let expected: pod::ElGamalCiphertext = public
.encrypt_with(55_u64, &PedersenOpening::default())
.into();
assert_eq!(expected, added_ct);
}
#[test]
fn test_subtract_from() {
let amount = 77_u64;
let public = ElGamalKeypair::default().public;
let open = PedersenOpening::random(&mut OsRng);
let encrypted_amount: pod::ElGamalCiphertext = public.encrypt_with(amount, &open).into();
let subtracted_ct = ops::subtract_from(&encrypted_amount, 55).unwrap();
let expected: pod::ElGamalCiphertext = public.encrypt_with(22_u64, &open).into();
assert_eq!(expected, subtracted_ct);
}
/// Split u64 number into two u32 numbers
fn split_u64_into_u32(amt: u64) -> (u32, u32) {
let lo = amt as u32;
let hi = (amt >> 32) as u32;
(lo, hi)
}
#[test]
fn test_transfer_arithmetic() {
// transfer amount
let transfer_amount: u64 = 55;
let (amount_lo, amount_hi) = split_u64_into_u32(transfer_amount);
// generate public keys
let source_pk = ElGamalKeypair::default().public;
let dest_pk = ElGamalKeypair::default().public;
let auditor_pk = ElGamalKeypair::default().public;
// commitments associated with TransferRangeProof
let (comm_lo, open_lo) = Pedersen::new(amount_lo);
let (comm_hi, open_hi) = Pedersen::new(amount_hi);
let comm_lo: pod::PedersenCommitment = comm_lo.into();
let comm_hi: pod::PedersenCommitment = comm_hi.into();
// decryption handles associated with TransferValidityProof
let handle_source_lo: pod::PedersenDecryptHandle =
source_pk.decrypt_handle(&open_lo).into();
let handle_dest_lo: pod::PedersenDecryptHandle = dest_pk.decrypt_handle(&open_lo).into();
let _handle_auditor_lo: pod::PedersenDecryptHandle =
auditor_pk.decrypt_handle(&open_lo).into();
let handle_source_hi: pod::PedersenDecryptHandle =
source_pk.decrypt_handle(&open_hi).into();
let handle_dest_hi: pod::PedersenDecryptHandle = dest_pk.decrypt_handle(&open_hi).into();
let _handle_auditor_hi: pod::PedersenDecryptHandle =
auditor_pk.decrypt_handle(&open_hi).into();
// source spendable and recipient pending
let source_open = PedersenOpening::random(&mut OsRng);
let dest_open = PedersenOpening::random(&mut OsRng);
let source_spendable_ct: pod::ElGamalCiphertext =
source_pk.encrypt_with(77_u64, &source_open).into();
let dest_pending_ct: pod::ElGamalCiphertext =
dest_pk.encrypt_with(77_u64, &dest_open).into();
// program arithmetic for the source account
// 1. Combine commitments and handles
let source_lo_ct: pod::ElGamalCiphertext = (comm_lo, handle_source_lo).into();
let source_hi_ct: pod::ElGamalCiphertext = (comm_hi, handle_source_hi).into();
// 2. Combine lo and hi ciphertexts
let source_combined_ct = ops::combine_lo_hi(&source_lo_ct, &source_hi_ct).unwrap();
// 3. Subtract from available balance
let final_source_spendable =
ops::subtract(&source_spendable_ct, &source_combined_ct).unwrap();
// test
let final_source_open =
source_open - (open_lo.clone() + open_hi.clone() * Scalar::from(ops::TWO_32));
let expected_source: pod::ElGamalCiphertext =
source_pk.encrypt_with(22_u64, &final_source_open).into();
assert_eq!(expected_source, final_source_spendable);
// same for the destination account
// 1. Combine commitments and handles
let dest_lo_ct: pod::ElGamalCiphertext = (comm_lo, handle_dest_lo).into();
let dest_hi_ct: pod::ElGamalCiphertext = (comm_hi, handle_dest_hi).into();
// 2. Combine lo and hi ciphertexts
let dest_combined_ct = ops::combine_lo_hi(&dest_lo_ct, &dest_hi_ct).unwrap();
// 3. Add to pending balance
let final_dest_pending = ops::add(&dest_pending_ct, &dest_combined_ct).unwrap();
let final_dest_open = dest_open + (open_lo + open_hi * Scalar::from(ops::TWO_32));
let expected_dest_ct: pod::ElGamalCiphertext =
dest_pk.encrypt_with(132_u64, &final_dest_open).into();
assert_eq!(expected_dest_ct, final_dest_pending);
}
}
| {
Some(ct_result)
} |
getManagedInstance.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20200801preview
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func LookupManagedInstance(ctx *pulumi.Context, args *LookupManagedInstanceArgs, opts ...pulumi.InvokeOption) (*LookupManagedInstanceResult, error) |
type LookupManagedInstanceArgs struct {
ManagedInstanceName string `pulumi:"managedInstanceName"`
ResourceGroupName string `pulumi:"resourceGroupName"`
}
// An Azure SQL managed instance.
type LookupManagedInstanceResult struct {
AdministratorLogin *string `pulumi:"administratorLogin"`
Collation *string `pulumi:"collation"`
DnsZone string `pulumi:"dnsZone"`
FullyQualifiedDomainName string `pulumi:"fullyQualifiedDomainName"`
Id string `pulumi:"id"`
Identity *ResourceIdentityResponse `pulumi:"identity"`
InstancePoolId *string `pulumi:"instancePoolId"`
LicenseType *string `pulumi:"licenseType"`
Location string `pulumi:"location"`
MaintenanceConfigurationId *string `pulumi:"maintenanceConfigurationId"`
MinimalTlsVersion *string `pulumi:"minimalTlsVersion"`
Name string `pulumi:"name"`
PrivateEndpointConnections []ManagedInstancePecPropertyResponse `pulumi:"privateEndpointConnections"`
ProvisioningState string `pulumi:"provisioningState"`
ProxyOverride *string `pulumi:"proxyOverride"`
PublicDataEndpointEnabled *bool `pulumi:"publicDataEndpointEnabled"`
Sku *SkuResponse `pulumi:"sku"`
State string `pulumi:"state"`
StorageAccountType *string `pulumi:"storageAccountType"`
StorageSizeInGB *int `pulumi:"storageSizeInGB"`
SubnetId *string `pulumi:"subnetId"`
Tags map[string]string `pulumi:"tags"`
TimezoneId *string `pulumi:"timezoneId"`
Type string `pulumi:"type"`
VCores *int `pulumi:"vCores"`
ZoneRedundant *bool `pulumi:"zoneRedundant"`
}
| {
var rv LookupManagedInstanceResult
err := ctx.Invoke("azure-native:sql/v20200801preview:getManagedInstance", args, &rv, opts...)
if err != nil {
return nil, err
}
return &rv, nil
} |
tests.py | import unittest
from typing import NoReturn
import marshmallow
import urllib3
import vaa
import deal
import pytest
class TestPreDeal:
@pytest.mark.parametrize('correct,incorrect', [(1, -1), (2, -2), (3, -3), (5, -5), (7, -7), (11, -11)])
def test_pre_contract_fulfilled(self, correct, incorrect):
func = deal.pre(lambda x: x > 0)(lambda x: x)
assert func(correct) == correct
with pytest.raises(deal.PreContractError):
func(incorrect)
@pytest.mark.parametrize('correct,incorrect_min,incorrect_max',
[(1, -1, 20), (2, -2, 21), (3, -3, 22), (5, -5, 23), (7, -7, 24), (9, -11, 25)])
def test_chain_all_contracts_fulfilled(self, correct, incorrect_min, incorrect_max):
func = deal.pre(lambda x: x < 10)(lambda x: x)
func = deal.pre(lambda x: x > 0)(func)
assert func(correct) == correct
with pytest.raises(deal.PreContractError):
func(incorrect_min)
with pytest.raises(deal.PreContractError):
func(incorrect_max)
def test_correct_exceptions_raised_on_contract_fail(self):
func = deal.pre(lambda x: x > 0)(lambda x: x)
with pytest.raises(deal.PreContractError):
func(-2)
func = deal.pre(lambda x: x > 0, message='TEST')(lambda x: x)
try:
func(-2)
except AssertionError as e:
assert e.args[0] == 'TEST'
func = deal.pre(lambda x: x > 0, exception=NameError)(lambda x: x)
with pytest.raises(NameError):
func(-2)
func = deal.pre(lambda x: x > 0, exception=NameError('TEST'))(lambda x: x)
with pytest.raises(NameError):
func(-2)
try:
func(-2)
except NameError as e:
assert e.args[0] == 'TEST'
func = deal.pre(lambda x: x > 0, message='TEST', exception=NameError)(lambda x: x)
with pytest.raises(NameError):
func(-2)
try:
func(-2)
except NameError as e:
assert e.args[0] == 'TEST'
def test_raise_error_with_param_on_contract_failure(self):
func = deal.pre(lambda x: x > 0 or 'TEST')(lambda x: x)
assert func(4) == 4
with pytest.raises(deal.PreContractError):
func(-2)
try:
func(-2)
except deal.PreContractError as e:
assert e.args[0] == 'TEST'
def test_method_decoration_name_is_correct(self):
@deal.pre(lambda x: x > 0)
def some_function(x):
return x
assert some_function.__name__ == 'some_function'
def test_class_method_decorator_raises_error_on_contract_fail(self):
class Class:
y = 7
@deal.pre(lambda self, x: x > 0)
def method(self, x):
return x * 2
@deal.pre(lambda self, x: x > 0)
def method2(self, y):
return self.y
assert Class().method(2) == 4
assert Class().method2(2) == 7
with pytest.raises(deal.PreContractError):
Class().method(-2)
with pytest.raises(deal.PreContractError):
Class().method2(-2)
# ignored test
def _test_validator(self, validator):
func = deal.pre(validator)(lambda x: x)
assert func(4) == 4
with pytest.raises(deal.PreContractError):
func(-2)
try:
func(-2)
except deal.PreContractError as e:
assert e.args[0] == 'TEST'
class TestPostDeal:
def test_return_value_fulfils_contract(self):
func = deal.post(lambda x: x > 0)(lambda x: -x)
assert func(-4) == 4
with pytest.raises(deal.PostContractError):
func(4)
class TestInvDeal:
def test_setting_object_attribute_fulfills_contract(self):
@deal.inv(lambda obj: obj.x > 0)
class A:
x = 2
a = A()
a.x = 4
with pytest.raises(deal.InvContractError):
a.x = -2
def test_setting_wrong_args_by_method_raises_error(self):
@deal.inv(lambda obj: obj.x > 0)
class A:
x = 2
def f(self, x):
self.x = x
a = A()
a.f(4)
with pytest.raises(deal.InvContractError):
a.f(-2)
def test_chain_contracts_both_fulfill(self):
@deal.inv(lambda obj: obj.x > 0)
@deal.inv(lambda obj: obj.x < 10)
class A:
x = 2
a = A()
a.x = 4
with pytest.raises(deal.InvContractError):
a.x = -2
with pytest.raises(deal.InvContractError):
a.x = 20
def test_patched_invariants_instance(self):
class A:
x = 2
PatchedA = deal.inv(lambda obj: obj.x > 0)(A) # noQA
a = PatchedA()
assert isinstance(a, PatchedA)
assert isinstance(a, A)
PatchedA2 = deal.inv(lambda obj: obj.x > 0)(PatchedA) # noQA
a = PatchedA2()
assert isinstance(a, PatchedA)
assert isinstance(a, PatchedA2)
assert isinstance(a, A)
assert a.__class__.__name__.count('Invarianted') == 1
class MarshmallowSchemeTests(unittest.TestCase):
def setUp(self):
class _Scheme(marshmallow.Schema):
name = marshmallow.fields.Str()
self.Scheme = vaa.marshmallow(_Scheme)
def test_scheme_string_validation_args_correct(self):
@deal.pre(self.Scheme)
def func(name):
return name * 2
assert func('Chris') == 'ChrisChris'
with pytest.raises(deal.PreContractError):
func(123)
try:
func(123)
except deal.PreContractError as e:
assert e.args[0] == {'name': ['Not a valid string.']}
def test_method_chain_decorator_with_scheme_is_fulfilled(self):
@deal.pre(self.Scheme)
@deal.pre(lambda name: name != 'Oleg')
def func(name):
return name * 2
assert func('Chris') == 'ChrisChris'
with pytest.raises(deal.PreContractError):
func(123)
with pytest.raises(deal.PreContractError):
func('Oleg')
def test_scheme_contract_is_satisfied_when_setting_arg(self):
@deal.inv(self.Scheme)
class User:
name = ''
user = User()
user.name = 'Chris'
with pytest.raises(deal.InvContractError):
user.name = 123
try:
user.name = 123
except deal.InvContractError as e:
assert e.args[0] == {'name': ['Not a valid string.']}
def test_scheme_contract_is_satisfied_within_chain(self):
@deal.inv(lambda user: user.name != 'Oleg')
@deal.inv(self.Scheme)
@deal.inv(lambda user: user.name != 'Chris')
class User:
name = ''
user = User()
user.name = 'Gram'
user = User()
with pytest.raises(deal.InvContractError):
user.name = 'Oleg'
user = User()
with pytest.raises(deal.InvContractError):
user.name = 123
user = User()
with pytest.raises(deal.InvContractError):
user.name = 'Chris'
def test_scheme_contract_is_satisfied_when_passing_args(self):
@deal.pre(self.Scheme)
def func(name):
return name * 2
assert func('Chris') == 'ChrisChris'
assert func(name='Chris') == 'ChrisChris'
@deal.pre(self.Scheme)
def func(**kwargs):
return kwargs['name'] * 3
assert func(name='Chris') == 'ChrisChrisChris'
@deal.pre(self.Scheme)
def func(name='Max'):
return name * 2
assert func() == 'MaxMax'
class TestDefaultScheme(MarshmallowSchemeTests):
def setUp(self):
class MyScheme(deal.Scheme):
def is_valid(self):
if not isinstance(self.data['name'], str):
self.errors = {'name': ['Not a valid string.']}
return False
return True
self.Scheme = MyScheme
class TestRaises:
def test_raises_expects_function_to_raise_error(self):
func = deal.raises(ZeroDivisionError)(lambda x: 1 / x)
with pytest.raises(ZeroDivisionError):
func(0)
func(2)
func = deal.raises(KeyError)(lambda x: 1 / x)
with pytest.raises(deal.RaisesContractError):
func(0)
def test_raises_doesnt_override_another_constract(self):
@deal.raises(ZeroDivisionError)
@deal.offline
def func(do, number):
if do:
http = urllib3.PoolManager()
http.request('GET', 'http://httpbin.org/robots.txt')
1 / number
func(False, 1)
with pytest.raises(deal.OfflineContractError):
func(True, 1)
with pytest.raises(ZeroDivisionError):
func(False, 0)
class TestOffline:
def test_network_request_in_offline_raises_exception(self):
@deal.offline
def func(do):
if do:
http = urllib3.PoolManager()
http.request('GET', 'http://httpbin.org/robots.txt')
func(False)
with pytest.raises(deal.OfflineContractError):
func(True)
def test_network_request_in_offline_and_raises_specified_exception(self):
@deal.offline(exception=KeyError)
def func(do):
if do:
http = urllib3.PoolManager()
http.request('GET', 'http://httpbin.org/robots.txt')
func(False)
with pytest.raises(KeyError):
func(True)
class TestSilent:
def | (self):
@deal.silent
def func(msg):
if msg:
print(msg)
func(None)
with pytest.raises(deal.SilentContractError):
func('bad')
class TestChain:
def test_chained_contract_decorator(self):
@deal.chain(deal.silent, deal.offline)
def func(msg, do):
if msg:
print(msg)
if do:
http = urllib3.PoolManager()
http.request('GET', 'http://httpbin.org/robots.txt')
func(False, False)
with pytest.raises(deal.SilentContractError):
func(True, False)
with pytest.raises(deal.OfflineContractError):
func(False, True)
class TestState:
def setUp(self):
deal.reset()
def tearDown(self):
deal.reset()
def test_contract_state_switch_custom_param(self):
func = deal.pre(lambda x: x > 0, debug=True)(lambda x: x * 2)
deal.switch(debug=False)
func(-2)
deal.switch(debug=True)
with pytest.raises(deal.PreContractError):
func(-2)
def test_contract_state_switch_default_param(self):
func = deal.pre(lambda x: x > 0)(lambda x: x * 2)
deal.switch(main=False)
func(-2)
deal.switch(main=True)
with pytest.raises(deal.PreContractError):
func(-2)
class TestEnsure:
def test_parameters_and_result_fulfill_constact(self):
@deal.ensure(lambda a, b, result: a > 0 and b > 0 and result != 'same number')
def func(a, b):
if a == b:
return 'same number'
else:
return 'different numbers'
assert func(1, 2) == 'different numbers'
with pytest.raises(deal.PostContractError):
func(0, 1)
with pytest.raises(deal.PostContractError):
func(1, 0)
with pytest.raises(deal.PostContractError):
func(1, 1)
class CaseTest(unittest.TestCase):
def setUp(self):
@deal.raises(ZeroDivisionError)
@deal.pre(lambda a, b: a > 0 and b > 0)
def div(a: int, b: int) -> float:
assert isinstance(a, int)
assert isinstance(b, int)
assert a > 0
assert b > 0
return a / b
self.func = div
def test_count(self):
for count in (1, 10, 20, 50):
cases = deal.cases(self.func, count=count)
assert len(list(cases)) == count
def test_params_detected(self):
for case in deal.cases(self.func, count=10):
assert set(case.kwargs) == {'a', 'b'}
def test_params_type(self):
for case in deal.cases(self.func, count=10):
assert type(case.kwargs['a']) is int
assert type(case.kwargs['b']) is int
def test_params_ok_with_excs(self):
results = []
for case in deal.cases(self.func, count=20):
result = case()
results.append(result)
assert any(r is not NoReturn for r in results), 'exception occured on every run'
assert any(r is NoReturn for r in results), 'no exception occured'
def test_return_type_checks(self):
def div(a: int, b: int):
return 1
for case in deal.cases(div, count=20):
case()
def div(a: int, b: int) -> str:
return 1
with pytest.raises(TypeError):
case = next(iter(deal.cases(div, count=20)))
case()
def test_explicit_kwargs(self):
def div(a: int, b: int):
assert b == 4
for case in deal.cases(div, kwargs=dict(b=4), count=20):
case()
if __name__ == '__main__':
pytest.main(['tests.py'])
| test_silent_contract_not_allow_print |
seo.py | """
Add a keyword and a description field which are helpful for SEO optimization.
"""
from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from feincms import extensions
class Extension(extensions.Extension):
def handle_model(self): | self.model.add_to_class('meta_description', models.TextField(
_('meta description'),
blank=True,
help_text=_('This text is displayed on the search results page. '
'It is however not used for the SEO ranking. '
'Text longer than 140 characters is truncated.')))
def handle_modeladmin(self, modeladmin):
modeladmin.extend_list(
'search_fields',
['meta_keywords', 'meta_description'],
)
modeladmin.add_extension_options(_('Search engine optimization'), {
'fields': ('meta_keywords', 'meta_description'),
'classes': ('collapse',),
}) | self.model.add_to_class('meta_keywords', models.TextField(
_('meta keywords'),
blank=True,
help_text=_('Keywords are ignored by most search engines.'))) |
iterators.py | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import datetime
from typing import Awaitable, TYPE_CHECKING, TypeVar, Optional, Any, Callable, Union, List, AsyncIterator
from .errors import NoMoreItems
from .utils import snowflake_time, time_snowflake, maybe_coroutine
from .object import Object
from .audit_logs import AuditLogEntry
__all__ = (
'ReactionIterator',
'HistoryIterator',
'AuditLogIterator',
'GuildIterator',
'MemberIterator',
)
if TYPE_CHECKING:
from .types.audit_log import (
AuditLog as AuditLogPayload,
)
from .types.guild import (
Guild as GuildPayload,
)
from .types.message import (
Message as MessagePayload,
)
from .types.user import (
PartialUser as PartialUserPayload,
)
from .types.threads import (
Thread as ThreadPayload,
)
from .member import Member
from .user import User
from .message import Message
from .audit_logs import AuditLogEntry
from .guild import Guild
from .threads import Thread
from .abc import Snowflake
T = TypeVar('T')
OT = TypeVar('OT')
_Func = Callable[[T], Union[OT, Awaitable[OT]]]
OLDEST_OBJECT = Object(id=0)
class _AsyncIterator(AsyncIterator[T]):
__slots__ = ()
async def next(self) -> T:
raise NotImplementedError
def get(self, **attrs: Any) -> Awaitable[Optional[T]]:
def predicate(elem: T):
for attr, val in attrs.items():
nested = attr.split('__')
obj = elem
for attribute in nested: | return False
return True
return self.find(predicate)
async def find(self, predicate: _Func[T, bool]) -> Optional[T]:
while True:
try:
elem = await self.next()
except NoMoreItems:
return None
ret = await maybe_coroutine(predicate, elem)
if ret:
return elem
def chunk(self, max_size: int) -> _ChunkedAsyncIterator[T]:
if max_size <= 0:
raise ValueError('async iterator chunk sizes must be greater than 0.')
return _ChunkedAsyncIterator(self, max_size)
def map(self, func: _Func[T, OT]) -> _MappedAsyncIterator[OT]:
return _MappedAsyncIterator(self, func)
def filter(self, predicate: _Func[T, bool]) -> _FilteredAsyncIterator[T]:
return _FilteredAsyncIterator(self, predicate)
async def flatten(self) -> List[T]:
return [element async for element in self]
async def __anext__(self) -> T:
try:
return await self.next()
except NoMoreItems:
raise StopAsyncIteration()
def _identity(x):
return x
class _ChunkedAsyncIterator(_AsyncIterator[List[T]]):
def __init__(self, iterator, max_size):
self.iterator = iterator
self.max_size = max_size
async def next(self) -> List[T]:
ret: List[T] = []
n = 0
while n < self.max_size:
try:
item = await self.iterator.next()
except NoMoreItems:
if ret:
return ret
raise
else:
ret.append(item)
n += 1
return ret
class _MappedAsyncIterator(_AsyncIterator[T]):
def __init__(self, iterator, func):
self.iterator = iterator
self.func = func
async def next(self) -> T:
# this raises NoMoreItems and will propagate appropriately
item = await self.iterator.next()
return await maybe_coroutine(self.func, item)
class _FilteredAsyncIterator(_AsyncIterator[T]):
def __init__(self, iterator, predicate):
self.iterator = iterator
if predicate is None:
predicate = _identity
self.predicate = predicate
async def next(self) -> T:
getter = self.iterator.next
pred = self.predicate
while True:
# propagate NoMoreItems similar to _MappedAsyncIterator
item = await getter()
ret = await maybe_coroutine(pred, item)
if ret:
return item
class ReactionIterator(_AsyncIterator[Union['User', 'Member']]):
def __init__(self, message, emoji, limit=100, after=None):
self.message = message
self.limit = limit
self.after = after
state = message._state
self.getter = state.http.get_reaction_users
self.state = state
self.emoji = emoji
self.guild = message.guild
self.channel_id = message.channel.id
self.users = asyncio.Queue()
async def next(self) -> Union[User, Member]:
if self.users.empty():
await self.fill_users()
try:
return self.users.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
async def fill_users(self):
# this is a hack because >circular imports<
from .user import User
if self.limit > 0:
retrieve = self.limit if self.limit <= 100 else 100
after = self.after.id if self.after else None
data: List[PartialUserPayload] = await self.getter(
self.channel_id, self.message.id, self.emoji, retrieve, after=after
)
if data:
self.limit -= retrieve
self.after = Object(id=int(data[-1]['id']))
if self.guild is None or isinstance(self.guild, Object):
for element in reversed(data):
await self.users.put(User(state=self.state, data=element))
else:
for element in reversed(data):
member_id = int(element['id'])
member = self.guild.get_member(member_id)
if member is not None:
await self.users.put(member)
else:
await self.users.put(User(state=self.state, data=element))
class HistoryIterator(_AsyncIterator['Message']):
"""Iterator for receiving a channel's message history.
The messages endpoint has two behaviours we care about here:
If ``before`` is specified, the messages endpoint returns the `limit`
newest messages before ``before``, sorted with newest first. For filling over
100 messages, update the ``before`` parameter to the oldest message received.
Messages will be returned in order by time.
If ``after`` is specified, it returns the ``limit`` oldest messages after
``after``, sorted with newest first. For filling over 100 messages, update the
``after`` parameter to the newest message received. If messages are not
reversed, they will be out of order (99-0, 199-100, so on)
A note that if both ``before`` and ``after`` are specified, ``before`` is ignored by the
messages endpoint.
Parameters
-----------
messageable: :class:`abc.Messageable`
Messageable class to retrieve message history from.
limit: :class:`int`
Maximum number of messages to retrieve
before: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Message before which all messages must be.
after: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Message after which all messages must be.
around: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Message around which all messages must be. Limit max 101. Note that if
limit is an even number, this will return at most limit+1 messages.
oldest_first: Optional[:class:`bool`]
If set to ``True``, return messages in oldest->newest order. Defaults to
``True`` if `after` is specified, otherwise ``False``.
"""
def __init__(self, messageable, limit, before=None, after=None, around=None, oldest_first=None):
if isinstance(before, datetime.datetime):
before = Object(id=time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
if isinstance(around, datetime.datetime):
around = Object(id=time_snowflake(around))
if oldest_first is None:
self.reverse = after is not None
else:
self.reverse = oldest_first
self.messageable = messageable
self.limit = limit
self.before = before
self.after = after or OLDEST_OBJECT
self.around = around
self._filter = None # message dict -> bool
self.state = self.messageable._state
self.logs_from = self.state.http.logs_from
self.messages = asyncio.Queue()
if self.around:
if self.limit is None:
raise ValueError('history does not support around with limit=None')
if self.limit > 101:
raise ValueError("history max limit 101 when specifying around parameter")
elif self.limit == 101:
self.limit = 100 # Thanks discord
self._retrieve_messages = self._retrieve_messages_around_strategy # type: ignore
if self.before and self.after:
self._filter = lambda m: self.after.id < int(m['id']) < self.before.id
elif self.before:
self._filter = lambda m: int(m['id']) < self.before.id
elif self.after:
self._filter = lambda m: self.after.id < int(m['id'])
else:
if self.reverse:
self._retrieve_messages = self._retrieve_messages_after_strategy # type: ignore
if self.before:
self._filter = lambda m: int(m['id']) < self.before.id
else:
self._retrieve_messages = self._retrieve_messages_before_strategy # type: ignore
if self.after and self.after != OLDEST_OBJECT:
self._filter = lambda m: int(m['id']) > self.after.id
async def next(self) -> Message:
if self.messages.empty():
await self.fill_messages()
try:
return self.messages.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 100:
r = 100
else:
r = l
self.retrieve = r
return r > 0
async def fill_messages(self):
if not hasattr(self, 'channel'):
# do the required set up
channel = await self.messageable._get_channel()
self.channel = channel
if self._get_retrieve():
data = await self._retrieve_messages(self.retrieve)
if len(data) < 100:
self.limit = 0 # terminate the infinite loop
if self.reverse:
data = reversed(data)
if self._filter:
data = filter(self._filter, data)
channel = self.channel
for element in data:
await self.messages.put(self.state.create_message(channel=channel, data=element))
async def _retrieve_messages(self, retrieve) -> List[Message]:
"""Retrieve messages and update next parameters."""
raise NotImplementedError
async def _retrieve_messages_before_strategy(self, retrieve):
"""Retrieve messages using before parameter."""
before = self.before.id if self.before else None
data: List[MessagePayload] = await self.logs_from(self.channel.id, retrieve, before=before)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.before = Object(id=int(data[-1]['id']))
return data
async def _retrieve_messages_after_strategy(self, retrieve):
"""Retrieve messages using after parameter."""
after = self.after.id if self.after else None
data: List[MessagePayload] = await self.logs_from(self.channel.id, retrieve, after=after)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.after = Object(id=int(data[0]['id']))
return data
async def _retrieve_messages_around_strategy(self, retrieve):
"""Retrieve messages using around parameter."""
if self.around:
around = self.around.id if self.around else None
data: List[MessagePayload] = await self.logs_from(self.channel.id, retrieve, around=around)
self.around = None
return data
return []
class AuditLogIterator(_AsyncIterator['AuditLogEntry']):
def __init__(self, guild, limit=None, before=None, after=None, oldest_first=None, user_id=None, action_type=None):
if isinstance(before, datetime.datetime):
before = Object(id=time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
if oldest_first is None:
self.reverse = after is not None
else:
self.reverse = oldest_first
self.guild = guild
self.loop = guild._state.loop
self.request = guild._state.http.get_audit_logs
self.limit = limit
self.before = before
self.user_id = user_id
self.action_type = action_type
self.after = OLDEST_OBJECT
self._users = {}
self._state = guild._state
self._filter = None # entry dict -> bool
self.entries = asyncio.Queue()
if self.reverse:
self._strategy = self._after_strategy
if self.before:
self._filter = lambda m: int(m['id']) < self.before.id
else:
self._strategy = self._before_strategy
if self.after and self.after != OLDEST_OBJECT:
self._filter = lambda m: int(m['id']) > self.after.id
async def _before_strategy(self, retrieve):
before = self.before.id if self.before else None
data: AuditLogPayload = await self.request(
self.guild.id, limit=retrieve, user_id=self.user_id, action_type=self.action_type, before=before
)
entries = data.get('audit_log_entries', [])
if len(data) and entries:
if self.limit is not None:
self.limit -= retrieve
self.before = Object(id=int(entries[-1]['id']))
return data.get('users', []), entries
async def _after_strategy(self, retrieve):
after = self.after.id if self.after else None
data: AuditLogPayload = await self.request(
self.guild.id, limit=retrieve, user_id=self.user_id, action_type=self.action_type, after=after
)
entries = data.get('audit_log_entries', [])
if len(data) and entries:
if self.limit is not None:
self.limit -= retrieve
self.after = Object(id=int(entries[0]['id']))
return data.get('users', []), entries
async def next(self) -> AuditLogEntry:
if self.entries.empty():
await self._fill()
try:
return self.entries.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 100:
r = 100
else:
r = l
self.retrieve = r
return r > 0
async def _fill(self):
from .user import User
if self._get_retrieve():
users, data = await self._strategy(self.retrieve)
if len(data) < 100:
self.limit = 0 # terminate the infinite loop
if self.reverse:
data = reversed(data)
if self._filter:
data = filter(self._filter, data)
for user in users:
u = User(data=user, state=self._state)
self._users[u.id] = u
for element in data:
# TODO: remove this if statement later
if element['action_type'] is None:
continue
await self.entries.put(AuditLogEntry(data=element, users=self._users, guild=self.guild))
class GuildIterator(_AsyncIterator['Guild']):
"""Iterator for receiving the client's guilds.
The guilds endpoint has the same two behaviours as described
in :class:`HistoryIterator`:
If ``before`` is specified, the guilds endpoint returns the ``limit``
newest guilds before ``before``, sorted with newest first. For filling over
100 guilds, update the ``before`` parameter to the oldest guild received.
Guilds will be returned in order by time.
If `after` is specified, it returns the ``limit`` oldest guilds after ``after``,
sorted with newest first. For filling over 100 guilds, update the ``after``
parameter to the newest guild received, If guilds are not reversed, they
will be out of order (99-0, 199-100, so on)
Not that if both ``before`` and ``after`` are specified, ``before`` is ignored by the
guilds endpoint.
Parameters
-----------
bot: :class:`discord.Client`
The client to retrieve the guilds from.
limit: :class:`int`
Maximum number of guilds to retrieve.
before: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Object before which all guilds must be.
after: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Object after which all guilds must be.
"""
def __init__(self, bot, limit, before=None, after=None):
if isinstance(before, datetime.datetime):
before = Object(id=time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
self.bot = bot
self.limit = limit
self.before = before
self.after = after
self._filter = None
self.state = self.bot._connection
self.get_guilds = self.bot.http.get_guilds
self.guilds = asyncio.Queue()
if self.before and self.after:
self._retrieve_guilds = self._retrieve_guilds_before_strategy # type: ignore
self._filter = lambda m: int(m['id']) > self.after.id
elif self.after:
self._retrieve_guilds = self._retrieve_guilds_after_strategy # type: ignore
else:
self._retrieve_guilds = self._retrieve_guilds_before_strategy # type: ignore
async def next(self) -> Guild:
if self.guilds.empty():
await self.fill_guilds()
try:
return self.guilds.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 100:
r = 100
else:
r = l
self.retrieve = r
return r > 0
def create_guild(self, data):
from .guild import Guild
return Guild(state=self.state, data=data)
async def fill_guilds(self):
if self._get_retrieve():
data = await self._retrieve_guilds(self.retrieve)
if self.limit is None or len(data) < 100:
self.limit = 0
if self._filter:
data = filter(self._filter, data)
for element in data:
await self.guilds.put(self.create_guild(element))
async def _retrieve_guilds(self, retrieve) -> List[Guild]:
"""Retrieve guilds and update next parameters."""
raise NotImplementedError
async def _retrieve_guilds_before_strategy(self, retrieve):
"""Retrieve guilds using before parameter."""
before = self.before.id if self.before else None
data: List[GuildPayload] = await self.get_guilds(retrieve, before=before)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.before = Object(id=int(data[-1]['id']))
return data
async def _retrieve_guilds_after_strategy(self, retrieve):
"""Retrieve guilds using after parameter."""
after = self.after.id if self.after else None
data: List[GuildPayload] = await self.get_guilds(retrieve, after=after)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.after = Object(id=int(data[0]['id']))
return data
class MemberIterator(_AsyncIterator['Member']):
def __init__(self, guild, limit=1000, after=None):
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
self.guild = guild
self.limit = limit
self.after = after or OLDEST_OBJECT
self.state = self.guild._state
self.get_members = self.state.http.get_members
self.members = asyncio.Queue()
async def next(self) -> Member:
if self.members.empty():
await self.fill_members()
try:
return self.members.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 1000:
r = 1000
else:
r = l
self.retrieve = r
return r > 0
async def fill_members(self):
if self._get_retrieve():
after = self.after.id if self.after else None
data = await self.get_members(self.guild.id, self.retrieve, after)
if not data:
# no data, terminate
return
if len(data) < 1000:
self.limit = 0 # terminate loop
self.after = Object(id=int(data[-1]['user']['id']))
for element in reversed(data):
await self.members.put(self.create_member(element))
def create_member(self, data):
from .member import Member
return Member(data=data, guild=self.guild, state=self.state)
class ArchivedThreadIterator(_AsyncIterator['Thread']):
def __init__(
self,
channel_id: int,
guild: Guild,
limit: Optional[int],
joined: bool,
private: bool,
before: Optional[Union[Snowflake, datetime.datetime]] = None,
):
self.channel_id = channel_id
self.guild = guild
self.limit = limit
self.joined = joined
self.private = private
self.http = guild._state.http
if joined and not private:
raise ValueError('Cannot iterate over joined public archived threads')
self.before: Optional[str]
if before is None:
self.before = None
elif isinstance(before, datetime.datetime):
if joined:
self.before = str(time_snowflake(before, high=False))
else:
self.before = before.isoformat()
else:
if joined:
self.before = str(before.id)
else:
self.before = snowflake_time(before.id).isoformat()
self.update_before: Callable[[ThreadPayload], str] = self.get_archive_timestamp
if joined:
self.endpoint = self.http.get_joined_private_archived_threads
self.update_before = self.get_thread_id
elif private:
self.endpoint = self.http.get_private_archived_threads
else:
self.endpoint = self.http.get_public_archived_threads
self.queue: asyncio.Queue[Thread] = asyncio.Queue()
self.has_more: bool = True
async def next(self) -> Thread:
if self.queue.empty():
await self.fill_queue()
try:
return self.queue.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
@staticmethod
def get_archive_timestamp(data: ThreadPayload) -> str:
return data['thread_metadata']['archive_timestamp']
@staticmethod
def get_thread_id(data: ThreadPayload) -> str:
return data['id'] # type: ignore
async def fill_queue(self) -> None:
if not self.has_more:
raise NoMoreItems()
limit = 50 if self.limit is None else max(self.limit, 50)
data = await self.endpoint(self.channel_id, before=self.before, limit=limit)
# This stuff is obviously WIP because 'members' is always empty
threads: List[ThreadPayload] = data.get('threads', [])
for d in reversed(threads):
self.queue.put_nowait(self.create_thread(d))
self.has_more = data.get('has_more', False)
if self.limit is not None:
self.limit -= len(threads)
if self.limit <= 0:
self.has_more = False
if self.has_more:
self.before = self.update_before(threads[-1])
def create_thread(self, data: ThreadPayload) -> Thread:
from .threads import Thread
return Thread(guild=self.guild, state=self.guild._state, data=data) | obj = getattr(obj, attribute)
if obj != val: |
setup.py | import os
from setuptools import setup, find_packages
def read(fname):
|
def get_install_requires():
install_requires = [
'tornado',
'sqlalchemy',
'six',
'requests',
'Pillow',
'dateparser',
'prompt_toolkit>=2.0.9',
]
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict')
return install_requires
setup(
name='jet_bridge',
version=__import__('jet_bridge').VERSION,
description='',
long_description=read('README.md'),
long_description_content_type='text/markdown',
author='Denis Kildishev',
author_email='[email protected]',
url='https://github.com/jet-admin/jet-bridge',
packages=find_packages(),
license='MIT',
classifiers=[
],
zip_safe=False,
include_package_data=True,
install_requires=get_install_requires(),
entry_points={
'console_scripts': [
'jet_bridge = jet_bridge.__main__:main',
],
},
)
| path = os.path.join(os.path.dirname(__file__), fname)
try:
file = open(path, encoding='utf-8')
except TypeError:
file = open(path)
return file.read() |
convert.rs | use std::io;
use noodles_vcf as vcf;
use super::Record;
use crate::header::StringMaps;
impl Record {
/// Converts a VCF record to a BCF record.
///
/// # Examples
///
/// ```
/// use noodles_bcf as bcf;
/// use noodles_vcf::{self as vcf, record::Position};
///
/// let raw_header = "##fileformat=VCFv4.3\n##contig=<ID=sq0>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n";
/// let header: vcf::Header = raw_header.parse()?;
/// let string_maps = raw_header.parse()?;
///
/// let record = bcf::Record::default();
///
/// let actual = record.try_into_vcf_record(&header, &string_maps)?;
/// let expected = vcf::Record::builder()
/// .set_chromosome("sq0".parse()?)
/// .set_position(Position::try_from(1)?)
/// .set_reference_bases("A".parse()?)
/// .build()?;
///
/// assert_eq!(actual, expected);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn try_into_vcf_record(
&self,
header: &vcf::Header,
string_maps: &StringMaps,
) -> io::Result<vcf::Record> |
}
| {
let chromosome = string_maps
.contigs()
.get_index(self.chromosome_id())
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "invalid chrom"))
.and_then(|chrom| {
chrom
.parse()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))
})?;
let filters = self
.filters()
.try_into_vcf_record_filters(string_maps.strings())?;
let info = self
.info()
.try_into_vcf_record_info(header, string_maps.strings())?;
let genotypes = self
.genotypes()
.try_into_vcf_record_genotypes(header, string_maps.strings())?;
let mut builder = vcf::Record::builder()
.set_chromosome(chromosome)
.set_position(self.position())
.set_ids(self.ids().clone())
.set_reference_bases(self.reference_bases().clone())
.set_alternate_bases(self.alternate_bases().clone())
.set_info(info)
.set_genotypes(genotypes);
if let Some(quality_score) = self.quality_score() {
builder = builder.set_quality_score(quality_score);
}
if let Some(filters) = filters {
builder = builder.set_filters(filters);
}
builder
.build()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))
} |
tm_models.py | """
This file is part of the OpenProtein project.
For license information, please see the LICENSE file in the root directory.
"""
import sys
from enum import Enum
import glob
import pickle
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import openprotein
from experiments.tmhmm3.tm_util import label_list_to_topology
from experiments.tmhmm3.tm_util import get_predicted_type_from_labels
from experiments.tmhmm3.tm_util import remapped_labels_hmm_to_orginal_labels
from experiments.tmhmm3.tm_util import is_topologies_equal
from experiments.tmhmm3.tm_util import original_labels_to_fasta
from pytorchcrf.torchcrf import CRF
from util import write_out, get_experiment_id
# seed random generator for reproducibility
torch.manual_seed(1)
class TMHMM3(openprotein.BaseModel):
def __init__(self,
embedding,
hidden_size,
use_gpu,
model_mode,
use_marg_prob,
type_predictor_model,
profile_path):
super(TMHMM3, self).__init__(embedding, use_gpu)
# initialize model variables
num_tags = 5
num_labels = 5
self.max_signal_length = 67
if model_mode == TMHMM3Mode.LSTM_CRF_HMM:
num_tags += 2 * 40 + self.max_signal_length
elif model_mode == TMHMM3Mode.LSTM_CRF_MARG:
num_tags = num_tags * 4 # 4 different types
# num_labels = num_tags # 4 different types
self.hidden_size = hidden_size
self.use_gpu = use_gpu
self.use_marg_prob = use_marg_prob
self.model_mode = model_mode
self.embedding = embedding
self.profile_path = profile_path
self.bi_lstm = nn.LSTM(self.get_embedding_size(),
self.hidden_size,
num_layers=1,
bidirectional=True)
self.hidden_to_labels = nn.Linear(self.hidden_size * 2, num_labels) # * 2 for bidirectional
self.hidden_layer = None
crf_start_mask = torch.ones(num_tags).byte()
crf_end_mask = torch.ones(num_tags).byte()
if model_mode == TMHMM3Mode.LSTM_CRF_HMM:
allowed_transitions = [
(3, 3), (4, 4),
(3, 5), (4, 45)]
for i in range(5, 45 - 1):
allowed_transitions.append((i, i + 1))
if 8 < i < 43:
allowed_transitions.append((8, i))
allowed_transitions.append((44, 4))
for i in range(45, 85 - 1):
allowed_transitions.append((i, i + 1))
if 48 < i < 83:
allowed_transitions.append((48, i))
allowed_transitions.append((84, 3))
for i in range(85, 151):
allowed_transitions.append((i, i + 1))
allowed_transitions.append((2, i))
allowed_transitions.append((2, 151))
allowed_transitions.append((2, 4))
allowed_transitions.append((151, 4))
crf_start_mask[2] = 0
crf_start_mask[3] = 0
crf_start_mask[4] = 0
crf_end_mask[3] = 0
crf_end_mask[4] = 0
elif model_mode == TMHMM3Mode.LSTM_CRF_MARG:
allowed_transitions = [
(0, 0), (1, 1), (3, 3), (4, 4), (3, 0), (0, 4), (4, 1), (1, 3),
(5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (8, 5), (5, 9), (9, 6), (6, 8), (7, 9),
(12, 12), (14, 14), (12, 14),
(18, 18),
]
crf_start_mask[3] = 0
crf_start_mask[4] = 0
crf_start_mask[7] = 0
crf_start_mask[8] = 0
crf_start_mask[9] = 0
crf_start_mask[12] = 0
crf_start_mask[18] = 0
crf_end_mask[3] = 0
crf_end_mask[4] = 0
crf_end_mask[8] = 0
crf_end_mask[9] = 0
crf_end_mask[14] = 0
crf_end_mask[18] = 0
else:
allowed_transitions = [
(0, 0), (1, 1), (2, 2), (3, 3), (4, 4),
(3, 0), (0, 4), (4, 1), (1, 3), (2, 4)]
crf_start_mask[2] = 0
crf_start_mask[3] = 0
crf_start_mask[4] = 0
crf_end_mask[3] = 0
crf_end_mask[4] = 0
self.allowed_transitions = allowed_transitions
self.crf_model = CRF(num_tags)
self.type_classifier = type_predictor_model
self.type_tm_classier = None
self.type_sp_classier = None
crf_transitions_mask = torch.ones((num_tags, num_tags)).byte()
self.label_01loss_values = []
self.type_01loss_values = []
self.topology_01loss_values = []
# if on GPU, move state to GPU memory
if self.use_gpu:
self.crf_model = self.crf_model.cuda()
self.bi_lstm = self.bi_lstm.cuda()
self.hidden_to_labels = self.hidden_to_labels.cuda()
crf_transitions_mask = crf_transitions_mask.cuda()
crf_start_mask = crf_start_mask.cuda()
crf_end_mask = crf_end_mask.cuda()
# compute mask matrix from allow transitions list
for i in range(num_tags):
for k in range(num_tags):
if (i, k) in self.allowed_transitions:
crf_transitions_mask[i][k] = 0
# generate masked transition parameters
crf_start_transitions, crf_end_transitions, crf_transitions = \
generate_masked_crf_transitions(
self.crf_model, (crf_start_mask, crf_transitions_mask, crf_end_mask)
)
# initialize CRF
initialize_crf_parameters(self.crf_model,
start_transitions=crf_start_transitions,
end_transitions=crf_end_transitions,
transitions=crf_transitions)
def get_embedding_size(self):
if self.embedding == "BLOSUM62":
return 24 # bloom matrix has size 24
elif self.embedding == "PROFILE":
return 51 # protein profiles have size 51
def flatten_parameters(self):
self.bi_lstm.flatten_parameters()
def encode_amino_acid(self, letter):
if self.embedding == "BLOSUM62":
# blosum encoding
if not globals().get('blosum_encoder'):
blosum = \
"""4,-1,-2,-2,0,-1,-1,0,-2,-1,-1,-1,-1,-2,-1,1,0,-3,-2,0,-2,-1,0,-4
-1,5,0,-2,-3,1,0,-2,0,-3,-2,2,-1,-3,-2,-1,-1,-3,-2,-3,-1,0,-1,-4
-2,0,6,1,-3,0,0,0,1,-3,-3,0,-2,-3,-2,1,0,-4,-2,-3,3,0,-1,-4
-2,-2,1,6,-3,0,2,-1,-1,-3,-4,-1,-3,-3,-1,0,-1,-4,-3,-3,4,1,-1,-4
0,-3,-3,-3,9,-3,-4,-3,-3,-1,-1,-3,-1,-2,-3,-1,-1,-2,-2,-1,-3,-3,-2,-4
-1,1,0,0,-3,5,2,-2,0,-3,-2,1,0,-3,-1,0,-1,-2,-1,-2,0,3,-1,-4
-1,0,0,2,-4,2,5,-2,0,-3,-3,1,-2,-3,-1,0,-1,-3,-2,-2,1,4,-1,-4
0,-2,0,-1,-3,-2,-2,6,-2,-4,-4,-2,-3,-3,-2,0,-2,-2,-3,-3,-1,-2,-1,-4
-2,0,1,-1,-3,0,0,-2,8,-3,-3,-1,-2,-1,-2,-1,-2,-2,2,-3,0,0,-1,-4
-1,-3,-3,-3,-1,-3,-3,-4,-3,4,2,-3,1,0,-3,-2,-1,-3,-1,3,-3,-3,-1,-4
-1,-2,-3,-4,-1,-2,-3,-4,-3,2,4,-2,2,0,-3,-2,-1,-2,-1,1,-4,-3,-1,-4
-1,2,0,-1,-3,1,1,-2,-1,-3,-2,5,-1,-3,-1,0,-1,-3,-2,-2,0,1,-1,-4
-1,-1,-2,-3,-1,0,-2,-3,-2,1,2,-1,5,0,-2,-1,-1,-1,-1,1,-3,-1,-1,-4
-2,-3,-3,-3,-2,-3,-3,-3,-1,0,0,-3,0,6,-4,-2,-2,1,3,-1,-3,-3,-1,-4
-1,-2,-2,-1,-3,-1,-1,-2,-2,-3,-3,-1,-2,-4,7,-1,-1,-4,-3,-2,-2,-1,-2,-4
1,-1,1,0,-1,0,0,0,-1,-2,-2,0,-1,-2,-1,4,1,-3,-2,-2,0,0,0,-4
0,-1,0,-1,-1,-1,-1,-2,-2,-1,-1,-1,-1,-2,-1,1,5,-2,-2,0,-1,-1,0,-4
-3,-3,-4,-4,-2,-2,-3,-2,-2,-3,-2,-3,-1,1,-4,-3,-2,11,2,-3,-4,-3,-2,-4
-2,-2,-2,-3,-2,-1,-2,-3,2,-1,-1,-2,-1,3,-3,-2,-2,2,7,-1,-3,-2,-1,-4
0,-3,-3,-3,-1,-2,-2,-3,-3,3,1,-2,1,-1,-2,-2,0,-3,-1,4,-3,-2,-1,-4
-2,-1,3,4,-3,0,1,-1,0,-3,-4,0,-3,-3,-2,0,-1,-4,-3,-3,4,1,-1,-4
-1,0,0,1,-3,3,4,-2,0,-3,-3,1,-1,-3,-1,0,-1,-3,-2,-2,1,4,-1,-4
0,-1,-1,-1,-2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-2,0,0,-2,-1,-1,-1,-1,-1,-4
-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,1""" \
.replace('\n', ',')
blosum_matrix = np.fromstring(blosum, sep=",").reshape(24, 24)
blosum_key = "A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U".split(",")
key_map = {}
for idx, value in enumerate(blosum_key):
key_map[value] = list([int(v) for v in blosum_matrix[idx].astype('int')])
globals().__setitem__("blosum_encoder", key_map)
return globals().get('blosum_encoder')[letter]
elif self.embedding == "ONEHOT":
# one hot encoding
one_hot_key = "A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U".split(",")
arr = []
for idx, k in enumerate(one_hot_key):
if k == letter:
arr.append(1)
else:
arr.append(0)
return arr
elif self.embedding == "PYTORCH":
key_id = "A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U".split(",")
for idx, k in enumerate(key_id):
if k == letter:
return idx
def embed(self, prot_aa_list):
embed_list = []
for aa_list in prot_aa_list:
if self.embedding == "PYTORCH":
tensor = torch.LongTensor(tensor)
elif self.embedding == "PROFILE":
if not globals().get('profile_encoder'):
print("Load profiles...")
files = glob.glob(self.profile_path.strip("/") + "/*")
profile_dict = {}
for profile_file in files:
profile = pickle.load(open(profile_file, "rb")).popitem()[1]
profile_dict[profile["seq"]] = torch.from_numpy(profile["profile"]).float()
globals().__setitem__("profile_encoder", profile_dict)
print("Loaded profiles")
tensor = globals().get('profile_encoder')[aa_list]
else:
tensor = list([self.encode_amino_acid(aa) for aa in aa_list])
tensor = torch.FloatTensor(tensor)
if self.use_gpu:
tensor = tensor.cuda()
embed_list.append(tensor)
return embed_list
def init_hidden(self, minibatch_size):
# number of layers (* 2 since bidirectional), minibatch_size, hidden size
initial_hidden_state = torch.zeros(1 * 2, minibatch_size, self.hidden_size)
initial_cell_state = torch.zeros(1 * 2, minibatch_size, self.hidden_size)
if self.use_gpu:
initial_hidden_state = initial_hidden_state.cuda()
initial_cell_state = initial_cell_state.cuda()
self.hidden_layer = (autograd.Variable(initial_hidden_state),
autograd.Variable(initial_cell_state))
def _get_network_emissions(self, input_sequences):
batch_sizes = torch.LongTensor(list([i.size(0) for i in input_sequences]))
pad_seq_embed = torch.nn.utils.rnn.pad_sequence(input_sequences)
minibatch_size = len(input_sequences)
self.init_hidden(minibatch_size)
bi_lstm_out, self.hidden_layer = self.bi_lstm(pad_seq_embed, self.hidden_layer)
emissions = self.hidden_to_labels(bi_lstm_out)
if self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:
inout_select = torch.LongTensor([0])
outin_select = torch.LongTensor([1])
signal_select = torch.LongTensor([2])
if self.use_gpu:
inout_select = inout_select.cuda()
outin_select = outin_select.cuda()
signal_select = signal_select.cuda()
inout = torch.index_select(emissions, 2, autograd.Variable(inout_select))
outin = torch.index_select(emissions, 2, autograd.Variable(outin_select))
signal = torch.index_select(emissions, 2, autograd.Variable(signal_select))
emissions = torch.cat((emissions, inout.expand(-1, len(batch_sizes), 40),
outin.expand(-1, len(batch_sizes), 40),
signal.expand(-1, len(batch_sizes), self.max_signal_length)), 2)
elif self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:
emissions = emissions.repeat(1, 1, 4)
return emissions, batch_sizes
def batch_sizes_to_mask(self, batch_sizes):
mask = torch.autograd.Variable(torch.t(torch.ByteTensor(
[[1] * int(batch_size) + [0] * (int(batch_sizes[0])
- int(batch_size)) for batch_size in batch_sizes]
)))
if self.use_gpu:
mask = mask.cuda()
return mask
def compute_loss(self, training_minibatch):
_, labels_list, remapped_labels_list_crf_hmm, remapped_labels_list_crf_marg, \
_prot_type_list, _prot_topology_list, _prot_name_list, original_aa_string, \
_original_label_string = training_minibatch
minibatch_size = len(labels_list)
if self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:
labels_to_use = remapped_labels_list_crf_marg
elif self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:
labels_to_use = remapped_labels_list_crf_hmm
else:
labels_to_use = labels_list
input_sequences = [autograd.Variable(x) for x in self.embed(original_aa_string)]
actual_labels = torch.nn.utils.rnn.pad_sequence([autograd.Variable(l)
for l in labels_to_use])
emissions, batch_sizes = self._get_network_emissions(input_sequences)
if self.model_mode == TMHMM3Mode.LSTM:
prediction = emissions.transpose(0, 1).contiguous().view(-1, emissions.size(-1))
target = actual_labels.transpose(0, 1).contiguous().view(-1, 1)
losses = -torch.gather(nn.functional.log_softmax(prediction),
dim=1, index=target).view(*actual_labels
.transpose(0, 1).size())
mask_expand = torch.range(0, batch_sizes.data.max() - 1).long() \
.unsqueeze(0).expand(batch_sizes.size(0), batch_sizes.data.max())
if self.use_gpu:
mask_expand = mask_expand.cuda()
batch_sizes = batch_sizes.cuda()
mask = mask_expand < batch_sizes.unsqueeze(1).expand_as(mask_expand)
loss = (losses * mask.float()).sum() / batch_sizes.float().sum()
else:
mask = (self.batch_sizes_to_mask(batch_sizes))
loss = -1 * self.crf_model(emissions, actual_labels, mask=mask) / minibatch_size
if float(loss) > 100000: # if loss is this large, an invalid tx must have been found
for idx, batch_size in enumerate(batch_sizes):
last_label = None
for i in range(batch_size):
label = int(actual_labels[i][idx])
write_out(str(label) + ",", end='')
if last_label is not None and (last_label, label) \
not in self.allowed_transitions:
write_out("Error: invalid transition found")
write_out((last_label, label))
sys.exit(1)
last_label = label
write_out(" ")
return loss
def forward(self, input_sequences, forced_types=None):
emissions, batch_sizes = self._get_network_emissions(input_sequences)
if self.model_mode == TMHMM3Mode.LSTM:
output = torch.nn.functional.log_softmax(emissions, dim=2)
_, predicted_labels = output[:, :, 0:5].max(dim=2)
predicted_labels = list(
[list(map(int, x[:batch_sizes[idx]])) for idx, x in enumerate(predicted_labels
.transpose(0, 1))])
predicted_labels = list(
torch.cuda.LongTensor(l) if self.use_gpu else torch.LongTensor(l)
for l in predicted_labels)
predicted_topologies = list(map(label_list_to_topology, predicted_labels))
predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,
predicted_labels)))
else:
mask = self.batch_sizes_to_mask(batch_sizes)
labels_predicted = list(torch.cuda.LongTensor(l) if self.use_gpu
else torch.LongTensor(l) for l in
self.crf_model.decode(emissions, mask=mask))
if self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:
predicted_labels = list(map(remapped_labels_hmm_to_orginal_labels,
labels_predicted))
predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,
predicted_labels)))
elif self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:
alpha = self.crf_model._compute_log_alpha(emissions, mask, run_backwards=False)
z_value = alpha[alpha.size(0) - 1] + self.crf_model.end_transitions
types = z_value.view((-1, 4, 5))
types = logsumexp(types, dim=2)
_, predicted_types = torch.max(types, dim=1)
predicted_labels = list([l % 5 for l in labels_predicted]) # remap
else:
predicted_labels = labels_predicted
predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,
predicted_labels)))
if self.use_gpu:
predicted_types = predicted_types.cuda()
predicted_topologies = list(map(label_list_to_topology, predicted_labels))
# if all O's, change to all I's (by convention)
for idx, labels in enumerate(predicted_labels):
if torch.eq(labels, 4).all():
predicted_labels[idx] = labels - 1
return predicted_labels, predicted_types if forced_types \
is None else forced_types, predicted_topologies
def evaluate_model(self, data_loader):
validation_loss_tracker = []
validation_type_loss_tracker = []
validation_topology_loss_tracker = []
confusion_matrix = np.zeros((5, 5), dtype=np.int64)
protein_names = []
protein_aa_strings = []
protein_label_actual = []
protein_label_prediction = [] | validation_loss_tracker.append(self.compute_loss(minibatch).detach())
_, _, _, _, prot_type_list, prot_topology_list, \
prot_name_list, original_aa_string, original_label_string = minibatch
input_sequences = [x for x in self.embed(original_aa_string)]
predicted_labels, predicted_types, predicted_topologies = self(input_sequences)
protein_names.extend(prot_name_list)
protein_aa_strings.extend(original_aa_string)
protein_label_actual.extend(original_label_string)
# if we're using an external type predictor
if self.type_classifier is not None:
predicted_labels_type_classifer, \
predicted_types_type_classifier, \
predicted_topologies_type_classifier = self.type_classifier(input_sequences)
for idx, actual_type in enumerate(prot_type_list):
predicted_type = predicted_types[idx]
predicted_topology = predicted_topologies[idx]
predicted_labels_for_protein = predicted_labels[idx]
if self.type_classifier is not None:
if predicted_type != predicted_types_type_classifier[idx]:
# we must always use the type predicted by the type predictor if available
predicted_type = predicted_types_type_classifier[idx]
predicted_topology = predicted_topologies_type_classifier[idx]
predicted_labels_for_protein = predicted_labels_type_classifer[idx]
prediction_topology_match = is_topologies_equal(prot_topology_list[idx],
predicted_topology, 5)
if actual_type == predicted_type:
validation_type_loss_tracker.append(0)
# if we guessed the type right for SP+GLOB or GLOB,
# count the topology as correct
if actual_type == 2 or actual_type == 3 or prediction_topology_match:
validation_topology_loss_tracker.append(0)
confusion_matrix[actual_type][4] += 1
else:
validation_topology_loss_tracker.append(1)
confusion_matrix[actual_type][predicted_type] += 1
# if the type was correctly guessed to be 2 or 3 by the type classifier,
# use its topology prediction
if (actual_type in (2, 3)) and self.type_classifier is not None:
protein_label_prediction.append(predicted_labels_type_classifer[idx])
else:
protein_label_prediction.append(predicted_labels_for_protein)
else:
confusion_matrix[actual_type][predicted_type] += 1
validation_type_loss_tracker.append(1)
validation_topology_loss_tracker.append(1)
protein_label_prediction.append(predicted_labels_for_protein)
write_out(confusion_matrix)
_loss = float(torch.stack(validation_loss_tracker).mean())
type_loss = float(torch.FloatTensor(validation_type_loss_tracker).mean().detach())
topology_loss = float(torch.FloatTensor(validation_topology_loss_tracker).mean().detach())
self.type_01loss_values.append(type_loss)
self.topology_01loss_values.append(topology_loss)
if get_experiment_id() is not None and "TYPE" in get_experiment_id():
# optimize for type
validation_loss = type_loss
else:
# optimize for topology
validation_loss = topology_loss
data = {}
data['type_01loss_values'] = self.type_01loss_values
data['topology_01loss_values'] = self.topology_01loss_values
data['confusion_matrix'] = confusion_matrix.tolist()
return validation_loss, data, (
protein_names, protein_aa_strings, protein_label_actual, protein_label_prediction)
def post_process_prediction_data(prediction_data):
data = []
for (name, aa_string, actual, prediction) in zip(*prediction_data):
data.append("\n".join([">" + name,
aa_string,
actual,
original_labels_to_fasta(prediction)]))
return "\n".join(data)
def logsumexp(data, dim):
return data.max(dim)[0] + torch.log(torch.sum(
torch.exp(data - data.max(dim)[0].unsqueeze(dim)), dim))
def initialize_crf_parameters(crf_model,
start_transitions=None,
end_transitions=None,
transitions=None) -> None:
"""Initialize the transition parameters.
The parameters will be initialized randomly from a uniform distribution
between -0.1 and 0.1, unless given explicitly as an argument.
"""
if start_transitions is None:
nn.init.uniform(crf_model.start_transitions, -0.1, 0.1)
else:
crf_model.start_transitions.data = start_transitions
if end_transitions is None:
nn.init.uniform(crf_model.end_transitions, -0.1, 0.1)
else:
crf_model.end_transitions.data = end_transitions
if transitions is None:
nn.init.uniform(crf_model.transitions, -0.1, 0.1)
else:
crf_model.transitions.data = transitions
def generate_masked_crf_transitions(crf_model, transition_mask):
start_transitions_mask, transitions_mask, end_transition_mask = transition_mask
start_transitions = crf_model.start_transitions.data.clone()
end_transitions = crf_model.end_transitions.data.clone()
transitions = crf_model.transitions.data.clone()
if start_transitions_mask is not None:
start_transitions.masked_fill_(start_transitions_mask, -100000000)
if end_transition_mask is not None:
end_transitions.masked_fill_(end_transition_mask, -100000000)
if transitions_mask is not None:
transitions.masked_fill_(transitions_mask, -100000000)
return start_transitions, end_transitions, transitions
class TMHMM3Mode(Enum):
LSTM = 1
LSTM_CRF = 2
LSTM_CRF_HMM = 3
LSTM_CRF_MARG = 4 | for _, minibatch in enumerate(data_loader, 0): |
order.py | class Order():
def __init__(self, pair, direction, amount, price):
|
def __str__(self):
return f"{self.direction} {self.amount} {self.pair} @ {self.price}"
def __repr__(self):
return f"Order('{self.pair}', '{self.direction}', {self.amount}, {self.price})"
def __eq__(self, other):
return self.pair == other.pair and \
self.direction == other.direction and \
self.amount == other.amount and \
self.price == other.price
def __lt__(self, other):
return (self.pair, self.direction, self.amount, self.price) < \
(other.pair, other.direction, other.amount, other.price)
def __hash__(self):
return hash((self.pair, self.direction, self.amount, self.price))
| if direction.upper() not in ['BUY', 'SELL']:
raise ValueError("{} is not a valid direction".format(direction))
self.pair = pair
self.direction = direction
self.amount = float(amount)
self.price = float(price)
self.type_ = None |
controlpanel.py | # -*- coding: utf-8 -*-
from plone.app.registry.browser import controlpanel
from plone.protect.interfaces import IDisableCSRFProtection
from collective.solr.interfaces import ISolrSchema, _
from plone.restapi.controlpanels import RegistryConfigletPanel
from Products.CMFPlone.utils import safe_unicode
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.PythonScripts.PythonScript import PythonScript
from zope.component import adapter
from zope.interface import alsoProvides
from zope.interface import Interface
@adapter(Interface, Interface)
class SolrControlpanelAdapter(RegistryConfigletPanel):
|
class SolrControlPanelForm(controlpanel.RegistryEditForm):
id = "SolrControlPanel"
label = _("label_solr_settings", default="Solr settings")
schema = ISolrSchema
schema_prefix = "collective.solr"
boost_script_id = "solr_boost_index_values"
def getContent(self):
content = super(SolrControlPanelForm, self).getContent()
portal = self.context
if self.boost_script_id in portal:
boost_script = safe_unicode(portal[self.boost_script_id].read())
# strip script metadata for display
content.boost_script = "\n".join(
[
line
for line in boost_script.splitlines()
if not line.startswith("##")
]
)
alsoProvides(self.request, IDisableCSRFProtection)
return content
def applyChanges(self, data):
changes = super(SolrControlPanelForm, self).applyChanges(data)
boost_script = data.get("boost_script", "")
if "##parameters=data\n" not in boost_script:
boost_script = "##parameters=data\n" + boost_script
portal = self.context
if self.boost_script_id not in self.context:
# "special" documents get boosted during indexing...
portal[self.boost_script_id] = PythonScript(self.boost_script_id)
# since we create a PythonScript in ZODB we need to
# disable CSRF protection
alsoProvides(self.request, IDisableCSRFProtection)
portal[self.boost_script_id].write(boost_script)
return changes
class SolrControlPanel(controlpanel.ControlPanelFormWrapper):
form = SolrControlPanelForm
index = ViewPageTemplateFile("controlpanel.pt")
| schema = ISolrSchema
configlet_id = "SolrSettings"
configlet_category_id = "Products"
schema_prefix = "collective.solr" |
dc.py | # -*- coding: utf-8 -*-
'''
feedgen.ext.dc
~~~~~~~~~~~~~~~~~~~
Extends the FeedGenerator to add Dubline Core Elements to the feeds.
Descriptions partly taken from
http://dublincore.org/documents/dcmi-terms/#elements-coverage
:copyright: 2013-2017, Lars Kiesow <[email protected]>
:license: FreeBSD and LGPL, see license.* for more details.
'''
from feedgen.ext.base import BaseExtension
from feedgen.util import xml_elem
class DcBaseExtension(BaseExtension):
'''Dublin Core Elements extension for podcasts.
'''
def __init__(self):
# http://dublincore.org/documents/usageguide/elements.shtml
# http://dublincore.org/documents/dces/
# http://dublincore.org/documents/dcmi-terms/
self._dcelem_contributor = None
self._dcelem_coverage = None
self._dcelem_creator = None
self._dcelem_date = None
self._dcelem_description = None
self._dcelem_format = None
self._dcelem_identifier = None
self._dcelem_language = None
self._dcelem_publisher = None
self._dcelem_relation = None
self._dcelem_rights = None
self._dcelem_source = None
self._dcelem_subject = None
self._dcelem_title = None
self._dcelem_type = None
def extend_ns(self):
return {'dc': 'http://purl.org/dc/elements/1.1/'}
def _extend_xml(self, xml_element):
'''Extend xml_element with set DC fields.
:param xml_element: etree element
'''
DCELEMENTS_NS = 'http://purl.org/dc/elements/1.1/'
for elem in ['contributor', 'coverage', 'creator', 'date',
'description', 'language', 'publisher', 'relation',
'rights', 'source', 'subject', 'title', 'type', 'format',
'identifier']:
if hasattr(self, '_dcelem_%s' % elem):
for val in getattr(self, '_dcelem_%s' % elem) or []:
node = xml_elem('{%s}%s' % (DCELEMENTS_NS, elem),
xml_element)
node.text = val
def extend_atom(self, atom_feed):
'''Extend an Atom feed with the set DC fields.
:param atom_feed: The feed root element
:returns: The feed root element
'''
self._extend_xml(atom_feed)
return atom_feed
def extend_rss(self, rss_feed):
'''Extend a RSS feed with the set DC fields.
:param rss_feed: The feed root element
:returns: The feed root element.
'''
channel = rss_feed[0]
self._extend_xml(channel)
return rss_feed
def dc_contributor(self, contributor=None, replace=False):
'''Get or set the dc:contributor which is an entity responsible for
making contributions to the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-contributor
:param contributor: Contributor or list of contributors.
:param replace: Replace alredy set contributors (deault: False).
:returns: List of contributors.
'''
if contributor is not None:
if not isinstance(contributor, list):
contributor = [contributor]
if replace or not self._dcelem_contributor:
self._dcelem_contributor = []
self._dcelem_contributor += contributor
return self._dcelem_contributor
def dc_coverage(self, coverage=None, replace=True):
'''Get or set the dc:coverage which indicated the spatial or temporal
topic of the resource, the spatial applicability of the resource, or
the jurisdiction under which the resource is relevant.
Spatial topic and spatial applicability may be a named place or a
location specified by its geographic coordinates. Temporal topic may be
a named period, date, or date range. A jurisdiction may be a named
administrative entity or a geographic place to which the resource
applies. Recommended best practice is to use a controlled vocabulary
such as the Thesaurus of Geographic Names [TGN]. Where appropriate,
named places or time periods can be used in preference to numeric
identifiers such as sets of coordinates or date ranges.
References:
[TGN] http://www.getty.edu/research/tools/vocabulary/tgn/index.html
:param coverage: Coverage of the feed.
:param replace: Replace already set coverage (default: True).
:returns: Coverage of the feed.
'''
if coverage is not None:
if not isinstance(coverage, list):
coverage = [coverage]
if replace or not self._dcelem_coverage:
self._dcelem_coverage = []
self._dcelem_coverage = coverage
return self._dcelem_coverage
def dc_creator(self, creator=None, replace=False):
'''Get or set the dc:creator which is an entity primarily responsible
for making the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-creator
:param creator: Creator or list of creators.
:param replace: Replace alredy set creators (deault: False).
:returns: List of creators.
'''
if creator is not None:
if not isinstance(creator, list):
creator = [creator]
if replace or not self._dcelem_creator:
self._dcelem_creator = []
self._dcelem_creator += creator
return self._dcelem_creator
def dc_date(self, date=None, replace=True):
'''Get or set the dc:date which describes a point or period of time
associated with an event in the lifecycle of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-date
:param date: Date or list of dates.
:param replace: Replace alredy set dates (deault: True).
:returns: List of dates.
'''
if date is not None:
if not isinstance(date, list):
date = [date]
if replace or not self._dcelem_date:
self._dcelem_date = []
self._dcelem_date += date
return self._dcelem_date
def dc_description(self, description=None, replace=True):
'''Get or set the dc:description which is an account of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-description
:param description: Description or list of descriptions.
:param replace: Replace alredy set descriptions (deault: True).
:returns: List of descriptions.
'''
if description is not None:
if not isinstance(description, list):
description = [description]
if replace or not self._dcelem_description:
self._dcelem_description = []
self._dcelem_description += description
return self._dcelem_description
def dc_format(self, format=None, replace=True):
'''Get or set the dc:format which describes the file format, physical
medium, or dimensions of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-format
:param format: Format of the resource or list of formats.
:param replace: Replace alredy set format (deault: True).
:returns: Format of the resource.
'''
if format is not None:
if not isinstance(format, list):
format = [format]
if replace or not self._dcelem_format:
self._dcelem_format = []
self._dcelem_format += format
return self._dcelem_format
def dc_identifier(self, identifier=None, replace=True):
'''Get or set the dc:identifier which should be an unambiguous
reference to the resource within a given context.
For more inidentifierion see:
http://dublincore.org/documents/dcmi-terms/#elements-identifier
:param identifier: Identifier of the resource or list of identifiers.
:param replace: Replace alredy set identifier (deault: True).
:returns: Identifiers of the resource.
'''
if identifier is not None:
if not isinstance(identifier, list):
identifier = [identifier]
if replace or not self._dcelem_identifier:
self._dcelem_identifier = []
self._dcelem_identifier += identifier
return self._dcelem_identifier
def dc_language(self, language=None, replace=True):
'''Get or set the dc:language which describes a language of the
resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-language
:param language: Language or list of languages.
:param replace: Replace alredy set languages (deault: True).
:returns: List of languages.
'''
if language is not None:
if not isinstance(language, list):
language = [language]
if replace or not self._dcelem_language:
self._dcelem_language = []
self._dcelem_language += language
return self._dcelem_language
def dc_publisher(self, publisher=None, replace=False):
'''Get or set the dc:publisher which is an entity responsible for
making the resource available.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-publisher
:param publisher: Publisher or list of publishers.
:param replace: Replace alredy set publishers (deault: False).
:returns: List of publishers.
'''
if publisher is not None:
if not isinstance(publisher, list):
publisher = [publisher] |
def dc_relation(self, relation=None, replace=False):
'''Get or set the dc:relation which describes a related resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-relation
:param relation: Relation or list of relations.
:param replace: Replace alredy set relations (deault: False).
:returns: List of relations.
'''
if relation is not None:
if not isinstance(relation, list):
relation = [relation]
if replace or not self._dcelem_relation:
self._dcelem_relation = []
self._dcelem_relation += relation
return self._dcelem_relation
def dc_rights(self, rights=None, replace=False):
'''Get or set the dc:rights which may contain information about rights
held in and over the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-rights
:param rights: Rights information or list of rights information.
:param replace: Replace alredy set rightss (deault: False).
:returns: List of rights information.
'''
if rights is not None:
if not isinstance(rights, list):
rights = [rights]
if replace or not self._dcelem_rights:
self._dcelem_rights = []
self._dcelem_rights += rights
return self._dcelem_rights
def dc_source(self, source=None, replace=False):
'''Get or set the dc:source which is a related resource from which the
described resource is derived.
The described resource may be derived from the related resource in
whole or in part. Recommended best practice is to identify the related
resource by means of a string conforming to a formal identification
system.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-source
:param source: Source or list of sources.
:param replace: Replace alredy set sources (deault: False).
:returns: List of sources.
'''
if source is not None:
if not isinstance(source, list):
source = [source]
if replace or not self._dcelem_source:
self._dcelem_source = []
self._dcelem_source += source
return self._dcelem_source
def dc_subject(self, subject=None, replace=False):
'''Get or set the dc:subject which describes the topic of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-subject
:param subject: Subject or list of subjects.
:param replace: Replace alredy set subjects (deault: False).
:returns: List of subjects.
'''
if subject is not None:
if not isinstance(subject, list):
subject = [subject]
if replace or not self._dcelem_subject:
self._dcelem_subject = []
self._dcelem_subject += subject
return self._dcelem_subject
def dc_title(self, title=None, replace=True):
'''Get or set the dc:title which is a name given to the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-title
:param title: Title or list of titles.
:param replace: Replace alredy set titles (deault: False).
:returns: List of titles.
'''
if title is not None:
if not isinstance(title, list):
title = [title]
if replace or not self._dcelem_title:
self._dcelem_title = []
self._dcelem_title += title
return self._dcelem_title
def dc_type(self, type=None, replace=False):
'''Get or set the dc:type which describes the nature or genre of the
resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-type
:param type: Type or list of types.
:param replace: Replace alredy set types (deault: False).
:returns: List of types.
'''
if type is not None:
if not isinstance(type, list):
type = [type]
if replace or not self._dcelem_type:
self._dcelem_type = []
self._dcelem_type += type
return self._dcelem_type
class DcExtension(DcBaseExtension):
'''Dublin Core Elements extension for podcasts.
'''
class DcEntryExtension(DcBaseExtension):
'''Dublin Core Elements extension for podcasts.
'''
def extend_atom(self, entry):
'''Add dc elements to an atom item. Alters the item itself.
:param entry: An atom entry element.
:returns: The entry element.
'''
self._extend_xml(entry)
return entry
def extend_rss(self, item):
'''Add dc elements to a RSS item. Alters the item itself.
:param item: A RSS item element.
:returns: The item element.
'''
self._extend_xml(item)
return item | if replace or not self._dcelem_publisher:
self._dcelem_publisher = []
self._dcelem_publisher += publisher
return self._dcelem_publisher |
iterator.rs | use crate::prelude::*;
use crate::series::unstable::{ArrayBox, UnstableSeries};
use crate::utils::CustomIterTools;
use arrow::array::ArrayRef;
use std::convert::TryFrom;
use std::marker::PhantomData;
use std::pin::Pin;
use std::ptr::NonNull;
#[cfg(feature = "private")]
pub struct AmortizedListIter<'a, I: Iterator<Item = Option<ArrayBox>>> {
len: usize,
series_container: Pin<Box<Series>>,
inner: NonNull<ArrayRef>,
lifetime: PhantomData<&'a ArrayRef>,
iter: I,
}
impl<'a, I: Iterator<Item = Option<ArrayBox>>> Iterator for AmortizedListIter<'a, I> {
type Item = Option<UnstableSeries<'a>>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|opt_val| {
opt_val.map(|array_ref| {
unsafe { *self.inner.as_mut() = array_ref.into() };
// Safety
// we cannot control the lifetime of an iterators `next` method.
// but as long as self is alive the reference to the series container is valid
let refer = &*self.series_container;
unsafe {
let s = std::mem::transmute::<&Series, &'a Series>(refer);
UnstableSeries::new_with_chunk(s, self.inner.as_ref())
}
})
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len, Some(self.len))
}
}
// # Safety
// we correctly implemented size_hint
#[cfg(feature = "private")]
unsafe impl<'a, I: Iterator<Item = Option<ArrayBox>>> TrustedLen for AmortizedListIter<'a, I> {}
impl ListChunked {
/// This is an iterator over a ListChunked that save allocations.
/// A Series is:
/// 1. Arc<ChunkedArray>
/// ChunkedArray is:
/// 2. Vec< 3. ArrayRef>
///
/// The ArrayRef we indicated with 3. will be updated during iteration.
/// The Series will be pinned in memory, saving an allocation for
/// 1. Arc<..>
/// 2. Vec<...>
///
/// # Warning
/// Though memory safe in the sense that it will not read unowned memory, UB, or memory leaks
/// this function still needs precautions. The returned should never be cloned or taken longer
/// than a single iteration, as every call on `next` of the iterator will change the contents of
/// that Series.
#[cfg(feature = "private")]
pub fn | (&self) -> AmortizedListIter<impl Iterator<Item = Option<ArrayBox>> + '_> {
// we create the series container from the inner array
// so that the container has the proper dtype.
let arr = self.downcast_iter().next().unwrap();
let inner_values = arr.values();
let series_container = Box::pin(Series::try_from(("", inner_values.clone())).unwrap());
let ptr = &series_container.chunks()[0] as *const ArrayRef as *mut ArrayRef;
AmortizedListIter {
len: self.len(),
series_container,
inner: NonNull::new(ptr).unwrap(),
lifetime: PhantomData,
iter: self.downcast_iter().flat_map(|arr| arr.iter()),
}
}
/// Apply a closure `F` elementwise.
#[cfg(feature = "private")]
#[must_use]
pub fn apply_amortized<'a, F>(&'a self, mut f: F) -> Self
where
F: FnMut(UnstableSeries<'a>) -> Series,
{
if self.is_empty() {
return self.clone();
}
let mut fast_explode = true;
let mut ca: ListChunked = self
.amortized_iter()
.map(|opt_v| {
opt_v.map(|v| {
let out = f(v);
if out.is_empty() {
fast_explode = false;
}
out
})
})
.collect_trusted();
ca.rename(self.name());
if fast_explode {
ca.set_fast_explode();
}
ca
}
pub fn try_apply_amortized<'a, F>(&'a self, mut f: F) -> Result<Self>
where
F: FnMut(UnstableSeries<'a>) -> Result<Series>,
{
if self.is_empty() {
return Ok(self.clone());
}
let mut fast_explode = true;
let mut ca: ListChunked = self
.amortized_iter()
.map(|opt_v| {
opt_v
.map(|v| {
let out = f(v);
if let Ok(out) = &out {
if out.is_empty() {
fast_explode = false
}
};
out
})
.transpose()
})
.collect::<Result<_>>()?;
ca.rename(self.name());
if fast_explode {
ca.set_fast_explode();
}
Ok(ca)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::chunked_array::builder::get_list_builder;
#[test]
fn test_iter_list() {
let mut builder = get_list_builder(&DataType::Int32, 10, 10, "");
builder.append_series(&Series::new("", &[1, 2, 3]));
builder.append_series(&Series::new("", &[3, 2, 1]));
builder.append_series(&Series::new("", &[1, 1]));
let ca = builder.finish();
ca.amortized_iter()
.zip(ca.into_iter())
.for_each(|(s1, s2)| {
assert!(s1.unwrap().as_ref().series_equal(&s2.unwrap()));
});
}
}
| amortized_iter |
__init__.py | # __init__.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
# flake8: noqa
# @PydevCodeAnalysisIgnore
from git.exc import * # @NoMove @IgnorePep8
import inspect
import os
import sys
import os.path as osp
from typing import Optional
from git.types import PathLike
__version__ = "git"
# { Initialization
def _init_externals() -> None:
"""Initialize external projects by putting them into the path"""
if __version__ == "git" and "PYOXIDIZER" not in os.environ:
sys.path.insert(1, osp.join(osp.dirname(__file__), "ext", "gitdb"))
try:
import gitdb
except ImportError as e:
raise ImportError("'gitdb' could not be found in your PYTHONPATH") from e
# END verify import
# } END initialization
#################
_init_externals()
#################
# { Imports
try:
from git.config import GitConfigParser # @NoMove @IgnorePep8
from git.objects import * # @NoMove @IgnorePep8
from git.refs import * # @NoMove @IgnorePep8
from git.diff import * # @NoMove @IgnorePep8
from git.db import * # @NoMove @IgnorePep8
from git.cmd import Git # @NoMove @IgnorePep8
from git.repo import Repo # @NoMove @IgnorePep8
from git.remote import * # @NoMove @IgnorePep8
from git.index import * # @NoMove @IgnorePep8
from git.util import ( # @NoMove @IgnorePep8
LockFile,
BlockingLockFile,
Stats,
Actor,
rmtree,
)
except GitError as exc:
raise ImportError("%s: %s" % (exc.__class__.__name__, exc)) from exc
# } END imports
__all__ = [name for name, obj in locals().items() if not (name.startswith("_") or inspect.ismodule(obj))] |
# { Initialize git executable path
GIT_OK = None
def refresh(path: Optional[PathLike] = None) -> None:
"""Convenience method for setting the git executable path."""
global GIT_OK
GIT_OK = False
if not Git.refresh(path=path):
return
if not FetchInfo.refresh():
return
GIT_OK = True
# } END initialize git executable path
#################
try:
refresh()
except Exception as exc:
raise ImportError("Failed to initialize: {0}".format(exc)) from exc
################# | |
prepdata.py | #!/bin/bash
import os
import sys
import random
import cv2
import numpy as np
import xgboost as xgb
from sklearn import preprocessing
from sklearn.decomposition import PCA, NMF
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from keras.preprocessing.image import ImageDataGenerator
from selam.utils import img
def sample_negative(img, rect, n=1, size=(100, 100)):
""" Sample n negative samples randomly
@param rect: [x1, y1, x2, y2]
@param n: number of negative samples
@param size: size of negative window
"""
samples = []
maxHeight, maxWidth = img.shape[:-1]
width = abs(rect[0] - rect[2])
height = abs(rect[1] - rect[3])
while len(samples) != n:
tmpX = int(random.random() * (maxWidth - width))
tmpY = int(random.random() * (maxHeight - height))
isNotOverlapX = tmpX + width < rect[0] or tmpX > rect[2]
isNotOverlapY = tmpY + height < rect[1] or tmpY > rect[3]
# Only accepts sample that does not overlap with ground truth
if isNotOverlapX and isNotOverlapY:
samples.append(cv2.resize(
img[tmpY: tmpY + height, tmpX: tmpX + width], size))
return samples
def get_roi(img, rect, size=(100, 100)):
""" Return extracted bounding box given 4 corners of a rectangle
size: size of training image
@return roi, [x1, y1, x2, y2]
"""
xpos = rect[0::2]
ypos = rect[1::2]
y = [int(min(ypos)), int(max(ypos))]
x = [int(min(xpos)), int(max(xpos))]
roi = img[y[0]:y[1], x[0]:x[1]]
return cv2.resize(roi, size), [x[0], y[0], x[1], y[1]]
def get_jpgs(dirpath, skip=0, resize=None):
""" Returns all images located in given dirpath
skip : number of frames skip to reduce computation time
resize: scale factor for resize
"""
filenames = os.listdir(dirpath)
# Only attempt to parse and sort files that end with .jpg
filenames = [filename for filename in filenames
if filename.endswith(".jpg") or filename.endswith(".png")]
filenames.sort(key=lambda x: int(x.split('.', 1)[0]))
frames = [cv2.imread('{}/{}'.format(dirpath, filename))
for filename in filenames]
out = frames[0::skip] if skip > 0 else frames
print('Read {} images from {}'.format(len(out), dirpath))
if resize:
new_size = (out[0].shape[1] / resize, out[0].shape[0] / resize)
return map(lambda x: cv2.resize(x, new_size), out)
return out
def extract_training(dataset_path, annotation):
""" Returns a list of labelled images as positive training data
Uses default size of 100 x 100 as training patch
@return positive samples, negative samples
"""
positives = []
negatives = []
imgs = get_jpgs(dataset_path)
with open(annotation) as ann:
for i, label in zip(imgs, ann):
rect = map(float, label.rstrip().split(','))
if rect[0] > 0:
roi, coord = get_roi(i, rect)
negatives.extend(sample_negative(i, coord))
positives.append(roi)
print("{} positive samples".format(len(positives)))
print("{} negative samples".format(len(negatives)))
return positives, negatives
def augment_data(imgs, augment_dir, prefix, n=20):
""" Augment imgs with various transformations
@param augment_dir: directory to save augmented images
@param prefix: prefix of filename
@param n: number of transformations per image
"""
n_samples = len(imgs)
datagen = ImageDataGenerator(
rotation_range=90,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
for i in imgs:
selected = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)
selected = selected.reshape((1, ) + selected.shape)
for x, batch in enumerate(datagen.flow(selected, batch_size=1,
save_to_dir=augment_dir,
save_prefix=prefix,
save_format='jpeg')):
if x > n:
break
def kfold(x, y, eval_size=0.10):
""" Split dataset into training set and validation set
@param eval_size: percentage of data used for evaluation
@return X_train, X_valid, Y_train, Y_valid
"""
return train_test_split(x, y, test_size=eval_size, random_state=0)
def | (X):
""" Z-score standardization by subtracting mean and divided by standard
deviation of dataset
"""
scaler = preprocessing.StandardScaler().fit(X)
return scaler.transform(X)
def std_minmax(X):
scaler = preprocessing.MinMaxScaler().fit(X)
return scaler.transform(X)
def reduce_pca(X, h, w, n=15, display=True):
""" Performs PCA decomposition using n components """
pca = PCA(n_components=n, svd_solver='randomized',
whiten=True).fit(X)
eigenfaces = pca.components_.reshape((n, h, w, -1))
if display:
for i in eigenfaces:
cv2.imshow('PC', np.uint8(img.normUnity(np.mean(i, axis=2)) * 255))
cv2.waitKey(0)
return pca.transform(X)
def reduce_nmf(X, h, w, n=15, display=False):
""" Performs Non-negative matrix factorization using n components """
model = NMF(n_components=n, init='random', random_state=0).fit(X)
components = model.components_.reshape((n, h, w, -1))
if display:
for i in components:
cv2.imshow('PC', np.uint8(img.normUnity(np.mean(i, axis=2)) * 255))
cv2.waitKey(0)
return model.transform(X)
def classify_svm(X_train, Y_train):
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf.fit(X_train, Y_train)
return clf
def classify_rf(X_train, Y_train):
param_grid = {'n_estimators': [50, 200, 700],
'max_features': ['auto', 'sqrt', 'log2']}
clf = GridSearchCV(RandomForestClassifier(n_estimators=500, oob_score=True), param_grid)
clf.fit(X_train, Y_train)
return clf
def classify_gp(X, Y):
# Using same lengthscale for all features
kernel = 1.0 * RBF([1.0])
gpc_rbf = GaussianProcessClassifier(kernel=kernel).fit(X, Y)
return gpc_rbf
def classify_xgb(X, Y):
xgb_model = xgb.XGBClassifier()
parameters = {'nthread':[4], #when use hyperthread, xgboost may become slower
'objective':['binary:logistic'],
'learning_rate': [0.05], #so called `eta` value
'max_depth': [6],
'min_child_weight': [11],
'silent': [1],
'subsample': [0.8],
'colsample_bytree': [0.7],
'n_estimators': [5], #number of trees, change it to 1000 for better results
'missing':[-999],
'seed': [1337]}
clf = GridSearchCV(xgb_model, parameters)
clf.fit(X, Y)
return clf
if __name__ == '__main__':
if len(sys.argv) < 4:
print("Usage: python extract_region.py <dataset directory> <annotation file> <prefix> \n")
exit()
positives, negatives = extract_training(sys.argv[1], sys.argv[2])
| std_zscore |
buffer.rs | //! Buffers to safely work with audio samples.
use num_traits::Float;
use std::slice;
use std::iter::Zip;
/// `AudioBuffer` contains references to the audio buffers for all input and output channels.
///
/// To create an `AudioBuffer` in a host, use a [`HostBuffer`](../host/struct.HostBuffer.html).
pub struct AudioBuffer<'a, T: 'a + Float> {
inputs: &'a [*const T],
outputs: &'a mut [*mut T],
samples: usize,
}
impl<'a, T: 'a + Float> AudioBuffer<'a, T> {
/// Create an `AudioBuffer` from raw pointers.
/// Only really useful for interacting with the VST API.
#[inline]
pub unsafe fn from_raw(
input_count: usize,
output_count: usize,
inputs_raw: *const *const T,
outputs_raw: *mut *mut T,
samples: usize,
) -> Self {
Self {
inputs: slice::from_raw_parts(inputs_raw, input_count),
outputs: slice::from_raw_parts_mut(outputs_raw, output_count),
samples,
}
}
/// The number of input channels that this buffer was created for
#[inline]
pub fn input_count(&self) -> usize {
self.inputs.len()
}
/// The number of output channels that this buffer was created for
#[inline]
pub fn output_count(&self) -> usize {
self.outputs.len()
}
/// The number of samples in this buffer (same for all channels)
#[inline]
pub fn samples(&self) -> usize {
self.samples
}
/// The raw inputs to pass to processReplacing
#[inline]
pub(crate) fn raw_inputs(&self) -> &[*const T] {
self.inputs
}
/// The raw outputs to pass to processReplacing
#[inline]
pub(crate) fn raw_outputs(&mut self) -> &mut [*mut T] {
&mut self.outputs
}
/// Split this buffer into separate inputs and outputs.
#[inline]
pub fn split<'b>(&'b mut self) -> (Inputs<'b, T>, Outputs<'b, T>)
where
'a: 'b,
{
(
Inputs {
bufs: self.inputs,
samples: self.samples,
},
Outputs {
bufs: self.outputs,
samples: self.samples,
},
)
}
/// Zip together buffers.
#[inline]
pub fn zip<'b>(&'b mut self) -> Zip<InputIterator<'b, T>, OutputIterator<'b, T>>
where
'a: 'b,
{
let (inputs, outputs) = self.split();
inputs.into_iter().zip(outputs)
}
}
use std::ops::{Index, IndexMut};
/// Wrapper type to access the buffers for the input channels of an `AudioBuffer` in a safe way.
/// Behaves like a slice.
#[derive(Copy, Clone)]
pub struct Inputs<'a, T: 'a> {
bufs: &'a [*const T],
samples: usize,
}
impl<'a, T> Inputs<'a, T> {
/// Number of channels
pub fn len(&self) -> usize {
self.bufs.len()
}
/// Returns true if the buffer is empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Access channel at the given index, unchecked
pub fn get(&self, i: usize) -> &'a [T] {
unsafe { slice::from_raw_parts(self.bufs[i], self.samples) }
}
/// Split borrowing at the given index, like for slices
pub fn split_at(&self, i: usize) -> (Inputs<'a, T>, Inputs<'a, T>) {
let (l, r) = self.bufs.split_at(i);
(
Inputs {
bufs: l,
samples: self.samples,
},
Inputs {
bufs: r,
samples: self.samples,
},
)
}
}
impl<'a, T> Index<usize> for Inputs<'a, T> {
type Output = [T];
fn index(&self, i: usize) -> &Self::Output {
self.get(i)
}
}
/// Iterator over buffers for input channels of an `AudioBuffer`.
pub struct InputIterator<'a, T: 'a> {
data: Inputs<'a, T>,
i: usize,
}
impl<'a, T> Iterator for InputIterator<'a, T> {
type Item = &'a [T];
fn next(&mut self) -> Option<Self::Item> {
if self.i < self.data.len() {
let val = self.data.get(self.i);
self.i += 1;
Some(val)
} else {
None
}
}
}
impl<'a, T: Sized> IntoIterator for Inputs<'a, T> {
type Item = &'a [T];
type IntoIter = InputIterator<'a, T>;
fn into_iter(self) -> Self::IntoIter {
InputIterator { data: self, i: 0 }
}
}
/// Wrapper type to access the buffers for the output channels of an `AudioBuffer` in a safe way.
/// Behaves like a slice.
#[derive(Copy, Clone)]
pub struct Outputs<'a, T: 'a> {
bufs: &'a [*mut T],
samples: usize,
}
impl<'a, T> Outputs<'a, T> {
/// Number of channels
pub fn len(&self) -> usize {
self.bufs.len()
}
/// Returns true if the buffer is empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Access channel at the given index, unchecked
pub fn get(&self, i: usize) -> &'a [T] {
unsafe { slice::from_raw_parts(self.bufs[i], self.samples) }
}
/// Mutably access channel at the given index, unchecked
pub fn get_mut(&self, i: usize) -> &'a mut [T] {
unsafe { slice::from_raw_parts_mut(self.bufs[i], self.samples) }
}
/// Split borrowing at the given index, like for slices
pub fn split_at_mut(&mut self, i: usize) -> (Outputs<'a, T>, Outputs<'a, T>) {
let (l, r) = self.bufs.split_at(i);
(
Outputs {
bufs: l,
samples: self.samples,
},
Outputs {
bufs: r,
samples: self.samples,
},
)
}
}
impl<'a, T> Index<usize> for Outputs<'a, T> {
type Output = [T];
fn index(&self, i: usize) -> &Self::Output {
self.get(i)
}
}
impl<'a, T> IndexMut<usize> for Outputs<'a, T> {
fn index_mut(&mut self, i: usize) -> &mut Self::Output |
}
/// Iterator over buffers for output channels of an `AudioBuffer`.
pub struct OutputIterator<'a, T: 'a> {
data: Outputs<'a, T>,
i: usize,
}
impl<'a, T> Iterator for OutputIterator<'a, T> {
type Item = &'a mut [T];
fn next(&mut self) -> Option<Self::Item> {
if self.i < self.data.len() {
let val = self.data.get_mut(self.i);
self.i += 1;
Some(val)
} else {
None
}
}
}
impl<'a, T: Sized> IntoIterator for Outputs<'a, T> {
type Item = &'a mut [T];
type IntoIter = OutputIterator<'a, T>;
fn into_iter(self) -> Self::IntoIter {
OutputIterator { data: self, i: 0 }
}
}
use event::{Event, MidiEvent, SysExEvent};
/// This is used as a placeholder to pre-allocate space for a fixed number of midi events in the re-useable `SendEventBuffer`, because `SysExEvent` is larger than `MidiEvent`, so either one can be stored in a `SysExEvent`.
pub type PlaceholderEvent = api::SysExEvent;
/// This trait is used by `SendEventBuffer::send_events` to accept iterators over midi events
pub trait WriteIntoPlaceholder {
/// writes an event into the given placeholder memory location
fn write_into(&self, out: &mut PlaceholderEvent);
}
impl<'a, T: WriteIntoPlaceholder> WriteIntoPlaceholder for &'a T {
fn write_into(&self, out: &mut PlaceholderEvent) {
(*self).write_into(out);
}
}
impl WriteIntoPlaceholder for MidiEvent {
fn write_into(&self, out: &mut PlaceholderEvent) {
let out = unsafe { &mut *(out as *mut _ as *mut _) };
*out = api::MidiEvent {
event_type: api::EventType::Midi,
byte_size: mem::size_of::<api::MidiEvent>() as i32,
delta_frames: self.delta_frames,
flags: if self.live { api::MidiEventFlags::REALTIME_EVENT.bits() } else { 0 },
note_length: self.note_length.unwrap_or(0),
note_offset: self.note_offset.unwrap_or(0),
midi_data: self.data,
_midi_reserved: 0,
detune: self.detune,
note_off_velocity: self.note_off_velocity,
_reserved1: 0,
_reserved2: 0,
};
}
}
impl<'a> WriteIntoPlaceholder for SysExEvent<'a> {
fn write_into(&self, out: &mut PlaceholderEvent) {
*out = PlaceholderEvent {
event_type: api::EventType::SysEx,
byte_size: mem::size_of::<PlaceholderEvent>() as i32,
delta_frames: self.delta_frames,
_flags: 0,
data_size: self.payload.len() as i32,
_reserved1: 0,
system_data: self.payload.as_ptr() as *const u8 as *mut u8,
_reserved2: 0,
};
}
}
impl<'a> WriteIntoPlaceholder for Event<'a> {
fn write_into(&self, out: &mut PlaceholderEvent) {
match *self {
Event::Midi(ref ev) => {
ev.write_into(out);
}
Event::SysEx(ref ev) => {
ev.write_into(out);
}
Event::Deprecated(e) => {
let out = unsafe { &mut *(out as *mut _ as *mut _) };
*out = e;
}
};
}
}
use api;
use std::mem;
use host::Host;
use plugin::Plugin;
/// This buffer is used for sending midi events through the VST interface.
/// The purpose of this is to convert outgoing midi events from `event::Event` to `api::Events`.
/// It only allocates memory in new() and reuses the memory between calls.
pub struct SendEventBuffer {
buf: Vec<u8>,
api_events: Vec<PlaceholderEvent>, // using SysExEvent to store both because it's larger than MidiEvent
}
impl Default for SendEventBuffer {
fn default() -> Self {
SendEventBuffer::new(1024)
}
}
impl SendEventBuffer {
/// Creates a buffer for sending up to the given number of midi events per frame
#[inline(always)]
pub fn new(capacity: usize) -> Self {
let header_size = mem::size_of::<api::Events>() - (mem::size_of::<*mut api::Event>() * 2);
let body_size = mem::size_of::<*mut api::Event>() * capacity;
let mut buf = vec![0u8; header_size + body_size];
let api_events = vec![unsafe { mem::zeroed::<PlaceholderEvent>() }; capacity];
{
let ptrs = {
let e = Self::buf_as_api_events(&mut buf);
e.num_events = capacity as i32;
e.events_raw_mut()
};
for (ptr, event) in ptrs.iter_mut().zip(&api_events) {
let (ptr, event): (&mut *const PlaceholderEvent, &PlaceholderEvent) = (ptr, event);
*ptr = event;
}
}
Self { buf, api_events }
}
/// Sends events to the host. See the `fwd_midi` example.
///
/// # Example
/// ```no_run
/// # use vst::plugin::{Info, Plugin, HostCallback};
/// # use vst::buffer::{AudioBuffer, SendEventBuffer};
/// # use vst::host::Host;
/// # use vst::event::*;
/// # struct ExamplePlugin { host: HostCallback, send_buffer: SendEventBuffer }
/// # impl Plugin for ExamplePlugin {
/// # fn get_info(&self) -> Info { Default::default() }
/// #
/// fn process(&mut self, buffer: &mut AudioBuffer<f32>){
/// let events: Vec<MidiEvent> = vec![
/// // ...
/// ];
/// self.send_buffer.send_events(&events, &mut self.host);
/// }
/// # }
/// ```
#[inline(always)]
pub fn send_events<T: IntoIterator<Item = U>, U: WriteIntoPlaceholder>(&mut self, events: T, host: &mut Host) {
self.store_events(events);
host.process_events(self.events());
}
/// Sends events from the host to a plugin.
#[inline(always)]
pub fn send_events_to_plugin<T: IntoIterator<Item = U>, U: WriteIntoPlaceholder>(&mut self, events: T, plugin: &mut Plugin) {
self.store_events(events);
plugin.process_events(self.events());
}
#[inline(always)]
fn store_events<T: IntoIterator<Item = U>, U: WriteIntoPlaceholder>(&mut self, events: T) {
let count = events
.into_iter()
.zip(self.api_events.iter_mut())
.map(|(ev, out)| ev.write_into(out))
.count();
self.set_num_events(count);
}
#[inline(always)]
fn events(&self) -> &api::Events {
#[allow(clippy::cast_ptr_alignment)]
unsafe {
&*(self.buf.as_ptr() as *const api::Events)
}
}
#[inline(always)]
fn buf_as_api_events(buf: &mut [u8]) -> &mut api::Events {
#[allow(clippy::cast_ptr_alignment)]
unsafe {
&mut *(buf.as_mut_ptr() as *mut api::Events)
}
}
#[inline(always)]
fn set_num_events(&mut self, events_len: usize) {
use std::cmp::min;
let e = Self::buf_as_api_events(&mut self.buf);
e.num_events = min(self.api_events.len(), events_len) as i32;
}
}
#[cfg(test)]
mod tests {
use buffer::AudioBuffer;
/// Size of buffers used in tests.
const SIZE: usize = 1024;
/// Test that creating and zipping buffers works.
///
/// This test creates a channel for 2 inputs and 2 outputs.
/// The input channels are simply values
/// from 0 to `SIZE-1` (e.g. [0, 1, 2, 3, 4, .. , SIZE - 1])
/// and the output channels are just 0.
/// This test assures that when the buffers are zipped together,
/// the input values do not change.
#[test]
fn buffer_zip() {
let in1: Vec<f32> = (0..SIZE).map(|x| x as f32).collect();
let in2 = in1.clone();
let mut out1 = vec![0.0; SIZE];
let mut out2 = out1.clone();
let inputs = vec![in1.as_ptr(), in2.as_ptr()];
let mut outputs = vec![out1.as_mut_ptr(), out2.as_mut_ptr()];
let mut buffer = unsafe {
AudioBuffer::from_raw(2, 2, inputs.as_ptr(), outputs.as_mut_ptr(), SIZE)
};
for (input, output) in buffer.zip() {
input.into_iter().zip(output.into_iter()).fold(0, |acc,
(input,
output)| {
assert_eq!(*input - acc as f32, 0.0);
assert_eq!(*output, 0.0);
acc + 1
});
}
}
/// Test that creating buffers from raw pointers works.
#[test]
fn from_raw() {
let in1: Vec<f32> = (0..SIZE).map(|x| x as f32).collect();
let in2 = in1.clone();
let mut out1 = vec![0.0; SIZE];
let mut out2 = out1.clone();
let inputs = vec![in1.as_ptr(), in2.as_ptr()];
let mut outputs = vec![out1.as_mut_ptr(), out2.as_mut_ptr()];
let mut buffer =
unsafe { AudioBuffer::from_raw(2, 2, inputs.as_ptr(), outputs.as_mut_ptr(), SIZE) };
for (input, output) in buffer.zip() {
input.into_iter().zip(output.into_iter()).fold(0, |acc,
(input,
output)| {
assert_eq!(*input - acc as f32, 0.0);
assert_eq!(*output, 0.0);
acc + 1
});
}
}
}
| {
self.get_mut(i)
} |
public_api.ts | export * from './lib/identity-platform.service'; | export * from './lib/identity-platform.http-interceptor'; |
|
OpenCageGeocoderServiceSpec.js | import { OpenCageGeocoderService } from "../../Source/Cesium.js";
import { Resource } from "../../Source/Cesium.js";
import { when } from "../../Source/Cesium.js";
describe("Core/OpenCageGeocoderService", function () {
const endpoint = "https://api.opencagedata.com/geocode/v1/";
const apiKey = "c2a490d593b14612aefa6ec2e6b77c47";
it("constructor throws without url", function () {
expect(function () {
return new OpenCageGeocoderService(undefined);
}).toThrowDeveloperError();
});
it("constructor throws without API Key", function () {
expect(function () {
return new OpenCageGeocoderService(endpoint, undefined);
}).toThrowDeveloperError();
});
it("returns geocoder results", function () {
const service = new OpenCageGeocoderService(endpoint, apiKey);
const query = "-22.6792,+14.5272";
const data = {
results: [
{
bounds: {
northeast: {
lat: -22.6790826,
lng: 14.5269016,
},
southwest: {
lat: -22.6792826,
lng: 14.5267016,
},
},
formatted: "Beryl's Restaurant, Woermann St, Swakopmund, Namibia",
geometry: {
lat: -22.6795394,
lng: 14.5276006,
},
},
],
};
spyOn(Resource.prototype, "fetchJson").and.returnValue(when.resolve(data));
return service.geocode(query).then(function (results) {
expect(results.length).toEqual(1);
expect(results[0].displayName).toEqual(data.results[0].formatted);
expect(results[0].destination).toBeDefined();
});
});
it("returns no geocoder results if OpenCage has no results", function () {
const service = new OpenCageGeocoderService(endpoint, apiKey);
const query = ""; | spyOn(Resource.prototype, "fetchJson").and.returnValue(when.resolve(data));
return service.geocode(query).then(function (results) {
expect(results.length).toEqual(0);
});
});
}); | const data = { results: [] }; |
roundtrip_semantic.rs | extern crate elements_miniscript as miniscript;
use miniscript::{policy, DummyKey};
use std::str::FromStr;
type DummyPolicy = policy::Semantic<DummyKey>;
fn do_test(data: &[u8]) {
let data_str = String::from_utf8_lossy(data);
if let Ok(pol) = DummyPolicy::from_str(&data_str) {
let output = pol.to_string();
assert_eq!(data_str.to_lowercase(), output.to_lowercase());
} | }
#[cfg(feature = "afl")]
extern crate afl;
#[cfg(feature = "afl")]
fn main() {
afl::read_stdio_bytes(|data| {
do_test(&data);
});
}
#[cfg(feature = "honggfuzz")]
#[macro_use]
extern crate honggfuzz;
#[cfg(feature = "honggfuzz")]
fn main() {
loop {
fuzz!(|data| {
do_test(data);
});
}
} | |
main.go | //
// Copyright (c) 2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"bytes"
"errors"
"fmt"
"github.com/edgexfoundry/app-functions-sdk-go/pkg/transforms"
"image"
_ "image/jpeg"
_ "image/png"
"os"
"github.com/edgexfoundry/go-mod-core-contracts/models"
"github.com/edgexfoundry/app-functions-sdk-go/appcontext"
"github.com/edgexfoundry/app-functions-sdk-go/appsdk"
)
const (
serviceKey = "sampleCborFilter"
)
var counter int = 0
func main() {
// 1) First thing to do is to create an instance of the EdgeX SDK and initialize it.
edgexSdk := &appsdk.AppFunctionsSDK{ServiceKey: serviceKey}
if err := edgexSdk.Initialize(); err != nil {
message := fmt.Sprintf("SDK initialization failed: %v\n", err)
if edgexSdk.LoggingClient != nil {
edgexSdk.LoggingClient.Error(message)
} else {
fmt.Println(message)
}
os.Exit(-1)
}
// 2) shows how to access the application's specific configuration settings.
valueDescriptors, err := edgexSdk.GetAppSettingStrings("ValueDescriptors")
if err != nil {
edgexSdk.LoggingClient.Error(err.Error())
os.Exit(-1)
}
edgexSdk.LoggingClient.Info(fmt.Sprintf("Filtering for ValueDescriptors %v", valueDescriptors))
// 3) This is our pipeline configuration, the collection of functions to
// execute every time an event is triggered.
edgexSdk.SetFunctionsPipeline(
transforms.NewFilter(valueDescriptors).FilterByValueDescriptor,
processImages,
)
// 4) Lastly, we'll go ahead and tell the SDK to "start" and begin listening for events
// to trigger the pipeline.
err = edgexSdk.MakeItRun()
if err != nil {
edgexSdk.LoggingClient.Error("MakeItRun returned error: ", err.Error())
os.Exit(-1)
}
// Do any required cleanup here
os.Exit(0)
}
func | (edgexcontext *appcontext.Context, params ...interface{}) (bool, interface{}) {
if len(params) < 1 {
// We didn't receive a result
return false, nil
}
event, ok := params[0].(models.Event)
if !ok {
return false, errors.New("processImages didn't receive expect models.Event type")
}
for _, reading := range event.Readings {
// For this to work the image/jpeg & image/png packages must be imported to register their decoder
imageData, imageType, err := image.Decode(bytes.NewReader(reading.BinaryValue))
if err != nil {
return false, errors.New("unable to decode image: " + err.Error())
}
// Since this is a example, we will just print put some stats from the images received
fmt.Printf("Received Image from Device: %s, ReadingName: %s, Image Type: %s, Image Size: %s, Color in middle: %v\n",
reading.Device, reading.Name, imageType, imageData.Bounds().Size().String(),
imageData.At(imageData.Bounds().Size().X/2, imageData.Bounds().Size().Y/2))
}
return false, nil
}
| processImages |
unitary.py | r"""
Unitary Groups `GU(n,q)` and `SU(n,q)`
These are `n \times n` unitary matrices with entries in
`GF(q^2)`.
EXAMPLES::
sage: G = SU(3,5)
sage: G.order()
378000
sage: G
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: G.gens()
(
[ a 0 0] [4*a 4 1]
[ 0 2*a + 2 0] [ 4 4 0]
[ 0 0 3*a], [ 1 0 0]
)
sage: G.base_ring()
Finite Field in a of size 5^2
AUTHORS:
- David Joyner (2006-03): initial version, modified from
special_linear (by W. Stein)
- David Joyner (2006-05): minor additions (examples, _latex_, __str__,
gens)
- William Stein (2006-12): rewrite
- Volker Braun (2013-1) port to new Parent, libGAP, extreme refactoring.
"""
#*********************************************************************************
# Copyright (C) 2006 David Joyner and William Stein
# Copyright (C) 2013 Volker Braun <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*********************************************************************************
from sage.rings.all import ZZ, is_FiniteField, GF
from sage.misc.latex import latex
from sage.groups.matrix_gps.named_group import (
normalize_args_vectorspace, NamedMatrixGroup_generic, NamedMatrixGroup_gap )
def finite_field_sqrt(ring):
"""
Helper function.
INPUT:
A ring.
OUTPUT:
Integer q such that ``ring`` is the finite field with `q^2` elements.
EXAMPLES::
sage: from sage.groups.matrix_gps.unitary import finite_field_sqrt
sage: finite_field_sqrt(GF(4, 'a'))
2
"""
if not is_FiniteField(ring):
raise ValueError('not a finite field')
q, rem = ring.cardinality().sqrtrem()
if rem != 0:
raise ValueError('cardinatity not a square')
return q
###############################################################################
# General Unitary Group
###############################################################################
def GU(n, R, var='a'):
r"""
Return the general unitary group.
The general unitary group `GU( d, R )` consists of all `d \times
d` matrices that preserve a nondegenerate sequilinear form over
the ring `R`.
.. note::
For a finite field the matrices that preserve a sesquilinear
form over `F_q` live over `F_{q^2}`. So ``GU(n,q)`` for
integer ``q`` constructs the matrix group over the base ring
``GF(q^2)``.
.. note::
This group is also available via ``groups.matrix.GU()``.
INPUT:
- ``n`` -- a positive integer.
- ``R`` -- ring or an integer. If an integer is specified, the
corresponding finite field is used.
- ``var`` -- variable used to represent generator of the finite
field, if needed.
OUTPUT:
Return the general unitary group.
EXAMPLES::
sage: G = GU(3, 7); G
General Unitary Group of degree 3 over Finite Field in a of size 7^2
sage: G.gens()
(
[ a 0 0] [6*a 6 1]
[ 0 1 0] [ 6 6 0]
[ 0 0 5*a], [ 1 0 0]
)
sage: GU(2,QQ)
General Unitary Group of degree 2 over Rational Field
sage: G = GU(3, 5, var='beta')
sage: G.base_ring()
Finite Field in beta of size 5^2
sage: G.gens()
(
[ beta 0 0] [4*beta 4 1]
[ 0 1 0] [ 4 4 0]
[ 0 0 3*beta], [ 1 0 0]
)
TESTS::
sage: groups.matrix.GU(2, 3)
General Unitary Group of degree 2 over Finite Field in a of size 3^2
"""
degree, ring = normalize_args_vectorspace(n, R, var=var)
if is_FiniteField(ring):
q = ring.cardinality()
ring = GF(q ** 2, name=var)
name = 'General Unitary Group of degree {0} over {1}'.format(degree, ring)
ltx = r'\text{{GU}}_{{{0}}}({1})'.format(degree, latex(ring))
if is_FiniteField(ring):
cmd = 'GU({0}, {1})'.format(degree, q)
return UnitaryMatrixGroup_gap(degree, ring, False, name, ltx, cmd)
else:
return UnitaryMatrixGroup_generic(degree, ring, False, name, ltx)
###############################################################################
# Special Unitary Group
###############################################################################
def SU(n, R, var='a'):
"""
The special unitary group `SU( d, R )` consists of all `d \times d`
matrices that preserve a nondegenerate sequilinear form over the
ring `R` and have determinant one.
.. note::
For a finite field the matrices that preserve a sesquilinear
form over `F_q` live over `F_{q^2}`. So ``SU(n,q)`` for
integer ``q`` constructs the matrix group over the base ring
``GF(q^2)``.
.. note::
This group is also available via ``groups.matrix.SU()``.
INPUT:
|
- ``var`` -- variable used to represent generator of the finite
field, if needed.
OUTPUT:
Return the special unitary group.
EXAMPLES::
sage: SU(3,5)
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: SU(3, GF(5))
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: SU(3,QQ)
Special Unitary Group of degree 3 over Rational Field
TESTS::
sage: groups.matrix.SU(2, 3)
Special Unitary Group of degree 2 over Finite Field in a of size 3^2
"""
degree, ring = normalize_args_vectorspace(n, R, var=var)
if is_FiniteField(ring):
q = ring.cardinality()
ring = GF(q ** 2, name=var)
name = 'Special Unitary Group of degree {0} over {1}'.format(degree, ring)
ltx = r'\text{{SU}}_{{{0}}}({1})'.format(degree, latex(ring))
if is_FiniteField(ring):
cmd = 'SU({0}, {1})'.format(degree, q)
return UnitaryMatrixGroup_gap(degree, ring, True, name, ltx, cmd)
else:
return UnitaryMatrixGroup_generic(degree, ring, True, name, ltx)
########################################################################
# Unitary Group class
########################################################################
class UnitaryMatrixGroup_generic(NamedMatrixGroup_generic):
r"""
General Unitary Group over arbitrary rings.
EXAMPLES::
sage: G = GU(3, GF(7)); G
General Unitary Group of degree 3 over Finite Field in a of size 7^2
sage: latex(G)
\text{GU}_{3}(\Bold{F}_{7^{2}})
sage: G = SU(3, GF(5)); G
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: latex(G)
\text{SU}_{3}(\Bold{F}_{5^{2}})
"""
def _check_matrix(self, x, *args):
"""a
Check whether the matrix ``x`` is unitary.
See :meth:`~sage.groups.matrix_gps.matrix_group._check_matrix`
for details.
EXAMPLES::
sage: G = GU(2, GF(5))
sage: G._check_matrix(G.an_element().matrix())
sage: G = SU(2, GF(5))
sage: G._check_matrix(G.an_element().matrix())
"""
if self._special and x.determinant() != 1:
raise TypeError('matrix must have determinant one')
if not x.is_unitary():
raise TypeError('matrix must be unitary')
class UnitaryMatrixGroup_gap(UnitaryMatrixGroup_generic, NamedMatrixGroup_gap):
pass | - ``n`` -- a positive integer.
- ``R`` -- ring or an integer. If an integer is specified, the
corresponding finite field is used. |
extract.go | package provider
import (
"context"
"errors"
"io/fs"
"os"
"os/signal"
"path/filepath"
"sync"
"github.com/alexmullins/zip"
"github.com/Checkmarx/kics/pkg/model"
"github.com/Checkmarx/kics/pkg/terraformer"
"github.com/Checkmarx/kics/pkg/utils"
"github.com/rs/zerolog/log"
"github.com/hashicorp/go-getter"
)
const (
channelLength = 2
)
// ExtractedPath is a struct that contains the paths, temporary paths to remove
// and extraction map path of the sources
// Path is the slice of paths to scan
// ExtractionMap is a map that correlates the temporary path to the given path
// RemoveTmp is the slice containing temporary paths to be removed
type ExtractedPath struct {
Path []string
ExtractionMap map[string]model.ExtractedPathObject
}
type getterStruct struct {
ctx context.Context
cancel context.CancelFunc
mode getter.ClientMode
pwd string
opts []getter.ClientOption
destination string
source string
}
// GetTerraformerSources uses Terraformer to download runtime resources from AWS provider
// to terraform.
// After Downloaded files kics scan the files as normal local files
func GetTerraformerSources(source []string, destinationPath string) (ExtractedPath, error) {
extrStruct := ExtractedPath{
Path: []string{},
ExtractionMap: make(map[string]model.ExtractedPathObject),
}
for _, path := range source {
exportedPath, err := terraformer.Import(path, destinationPath)
if err != nil {
log.Error().Msgf("failed to import %s: %s", path, err)
}
extrStruct.ExtractionMap[exportedPath] = model.ExtractedPathObject{
Path: exportedPath,
LocalPath: true,
}
extrStruct.Path = append(extrStruct.Path, exportedPath)
}
return extrStruct, nil
}
// GetSources goes through the source slice, and determines the of source type (ex: zip, git, local).
// It than extracts the files to be scanned. If the source given is not local, a temp dir
// will be created where the files will be stored.
func GetSources(source []string) (ExtractedPath, error) {
extrStruct := ExtractedPath{
Path: []string{},
ExtractionMap: make(map[string]model.ExtractedPathObject),
}
for _, path := range source {
destination := filepath.Join(os.TempDir(), "kics-extract-"+utils.NextRandom())
mode := getter.ClientModeAny
pwd, err := os.Getwd()
if err != nil {
log.Fatal().Msgf("Error getting wd: %s", err)
}
opts := []getter.ClientOption{}
opts = append(opts, getter.WithInsecure())
ctx, cancel := context.WithCancel(context.Background())
goGetter := getterStruct{
ctx: ctx,
cancel: cancel,
mode: mode,
pwd: pwd,
opts: opts,
destination: destination,
source: path,
}
getterDst, err := getPaths(&goGetter)
if err != nil {
log.Error().Msgf("%s", err)
return ExtractedPath{}, err
}
tempDst, local := checkSymLink(getterDst, path)
extrStruct.ExtractionMap[getterDst] = model.ExtractedPathObject{
Path: path,
LocalPath: local,
}
extrStruct.Path = append(extrStruct.Path, tempDst)
}
return extrStruct, nil
}
func getPaths(g *getterStruct) (string, error) {
if isEncrypted(g.source) {
err := errors.New("zip encrypted files are not supported")
log.Err(err)
return "", err
}
// Build the client
client := &getter.Client{
Ctx: g.ctx,
Src: g.source,
Dst: g.destination,
Pwd: g.pwd,
Mode: g.mode,
Options: g.opts,
}
wg := sync.WaitGroup{}
wg.Add(1)
errChan := make(chan error, channelLength)
go func() {
defer wg.Done()
defer g.cancel()
if err := client.Get(); err != nil {
errChan <- err
}
}()
c := make(chan os.Signal, channelLength)
signal.Notify(c, os.Interrupt)
select {
case <-c:
signal.Reset(os.Interrupt)
g.cancel()
wg.Wait()
case <-g.ctx.Done():
wg.Wait()
case err := <-errChan:
wg.Wait()
return "", err
}
return g.destination, nil
}
// check if the dst is a symbolic link
func | (getterDst, pathFile string) (string, bool) {
var local bool
_, err := os.Stat(pathFile)
if err == nil { // check if file exist locally
local = true
}
info, err := os.Lstat(getterDst)
if err != nil {
log.Error().Msgf("failed lstat for %s: %v", getterDst, err)
}
fileInfo := getFileInfo(info, getterDst, pathFile)
if info.Mode()&os.ModeSymlink != 0 { // if it's a symbolic Link
path, err := os.Readlink(getterDst) // get location of symbolic Link
if err != nil {
log.Error().Msgf("failed Readlink for %s: %v", getterDst, err)
}
getterDst = path // change path to local path
} else if !fileInfo.IsDir() { // symbolic links are not created for single files
if local { // check if file exist locally
getterDst = pathFile
}
}
return getterDst, local
}
func getFileInfo(info fs.FileInfo, dst, pathFile string) fs.FileInfo {
var extension = filepath.Ext(pathFile)
var path string
if extension == "" {
path = filepath.Join(dst, filepath.Base(pathFile[0:len(pathFile)-len(extension)])) // for single file
} else {
path = filepath.Join(dst, filepath.Base(pathFile)) // for directories
}
fileInfo, err := os.Lstat(path)
if err != nil {
fileInfo = info
}
return fileInfo
}
func isEncrypted(sourceFile string) bool {
if filepath.Ext(sourceFile) != ".zip" {
return false
}
zipFile, err := zip.OpenReader(sourceFile)
if err != nil {
log.Error().Msgf("failed to open %s: %v", sourceFile, err)
return false
}
defer zipFile.Close()
for _, file := range zipFile.File {
if file.IsEncrypted() {
log.Error().Msgf("file %s is encrypted", sourceFile)
return true
}
}
return false
}
| checkSymLink |
typing_speed_per_char.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from io import StringIO
from collections import deque
from typetest.utils import validate_input_file_path
@validate_input_file_path
def plot(input_file, size=10000, filter_func=lambda c: True):
| """Reads last `size` lines of `input_file` and groups them by characters.
Removes lowest and highest 10% and boxplots the data.
filter_func: function taking a `char` returning `True` if char should be
plotted, `False` otherwise. By default plots all characters.
"""
with open(input_file) as f:
q = deque(f, maxlen=size)
data_frame = pd.read_csv(
StringIO("".join(q)),
header=None,
names=["char", "duration", "wpm", "timestamp"],
)
grouped_data_frames = filter(
lambda t: filter_func(t[1]["char"].iloc[0]),
data_frame.groupby("char"),
)
typing_speeds_in_wpm = []
chars = []
means = []
for char, df in grouped_data_frames:
if filter_func(char):
q1 = df["wpm"].quantile(0.1) # noqa
q3 = df["wpm"].quantile(0.9) # noqa
typing_speed_in_wpm = df.query("@q1 <= wpm <= @q3")["wpm"]
chars.append(char)
typing_speeds_in_wpm.append(typing_speed_in_wpm)
mean = typing_speed_in_wpm.mean()
means.append(mean if mean > 0 else 0)
fig, ax = plt.subplots()
ax.boxplot(typing_speeds_in_wpm, labels=chars)
mean = round(sum(means) / len(means))
ax.axhline(y=mean, color="r", linestyle="-", label=f"mean {mean} wpm")
ax.set_title(f"typing speed per character of last {size} characters")
ax.set_xlabel("characters")
ax.set_ylabel("typing speed [wpm]")
ax.legend()
ticks = plt.yticks()[0]
plt.yticks(np.arange(0, ticks[-1], 10))
plt.show() |
|
einhorn.go | package bind
import (
"fmt"
"log"
"net"
"os"
"strconv"
"syscall"
)
const tooBigErr = "bind: einhorn@%d not found (einhorn only passed %d fds)"
const bindErr = "bind: could not bind einhorn@%d: not running under einhorn"
const einhornErr = "bind: einhorn environment initialization error"
const ackErr = "bind: error ACKing to einhorn: %v"
var einhornNumFds int
func envInt(val string) (int, error) {
return strconv.Atoi(os.Getenv(val))
}
// Unfortunately this can't be a normal init function, because their execution
// order is undefined, and we need to run before the init() in bind.go.
func einhornInit() {
mpid, err := envInt("EINHORN_MASTER_PID")
if err != nil || mpid != os.Getppid() {
return
}
einhornNumFds, err = envInt("EINHORN_FD_COUNT")
if err != nil {
einhornNumFds = 0
return
}
// Prevent einhorn's fds from leaking to our children
for i := 0; i < einhornNumFds; i++ {
fd := int(einhornFd(i).Fd())
syscall.CloseOnExec(fd)
}
}
func usingEinhorn() bool {
return einhornNumFds > 0
}
func einhornFd(n int) *os.File |
func einhornBind(n int) (net.Listener, error) {
if !usingEinhorn() {
return nil, fmt.Errorf(bindErr, n)
}
if n >= einhornNumFds || n < 0 {
return nil, fmt.Errorf(tooBigErr, n, einhornNumFds)
}
f := einhornFd(n)
return net.FileListener(f)
}
// Fun story: this is actually YAML, not JSON.
const ackMsg = `{"command":"worker:ack","pid":%d}` + "\n"
func einhornAck() {
if !usingEinhorn() {
return
}
log.Print("bind: ACKing to einhorn")
ctl, err := net.Dial("unix", os.Getenv("EINHORN_SOCK_PATH"))
if err != nil {
log.Fatalf(ackErr, err)
}
defer ctl.Close()
_, err = fmt.Fprintf(ctl, ackMsg, os.Getpid())
if err != nil {
log.Fatalf(ackErr, err)
}
}
| {
name := fmt.Sprintf("EINHORN_FD_%d", n)
fno, err := envInt(name)
if err != nil {
log.Fatal(einhornErr)
}
return os.NewFile(uintptr(fno), name)
} |
bert_tokenization_jp.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text): # Ja can have, OKAY
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3: # 返回一个表示当前运行环境是否为python3的boolean值
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
# errors='ignore', 设置不同的错误处理方案,'strict'的时候,如果编码错误,则会引起一个UnicodeError.
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break # TODO why break? should be 'continue'?
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item]) # 问题,vocab是str:id,如果item不在vocab中呢?
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens # 根据' '来切分当前的输入的text,构造出来tokens这个list
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
# refer to https://github.com/cl-tohoku/bert-japanese/blob/master/tokenization.py
def __init__(self, vocab_file, do_lower_case=True, mecab_dict_path=None):
self.vocab = load_vocab(vocab_file)
# str:id vocab_file 'C:\\Users\\user\\source\\repos\\megatron\\megatron\\pretrained\\bert-large-cased-vocab.txt' str
self.inv_vocab = {v: k for k, v in self.vocab.items()} # id:str, 词典的“逆”
self.basic_tokenizer = MecabBasicTokenizer(do_lower_case=do_lower_case, mecab_dict_path=mecab_dict_path)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) # keep using existing method (no change)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
def vocab_size(self):
return len(self.vocab)
class MecabBasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None, mecab_dict_path=None,
preserve_spaces=False):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
if never_split is None:
never_split = []
self.never_split = never_split
self.preserve_spaces = preserve_spaces
import MeCab # TODO
import ipadic
CHASEN_ARGS = r' -F "%m\t%f[7]\t%f[6]\t%F-[0,1,2,3]\t%f[4]\t%f[5]\n"'
CHASEN_ARGS += r' -U "%m\t%m\t%m\t%F-[0,1,2,3]\t\t\n"'
#tagger = MeCab.Tagger(ipadic.MECAB_ARGS + CHASEN_ARGS)
#import MeCab
if mecab_dict_path is not None:
self.mecab = MeCab.Tagger(ipadic.MECAB_ARGS + CHASEN_ARGS + ' -d {}'.format(mecab_dict_path))
else:
self.mecab = MeCab.Tagger(ipadic.MECAB_ARGS + CHASEN_ARGS)
def tokenize(self, text, never_split=None, with_info=False): #, never_split=None, with_info=False, **kwargs):
"""Tokenizes a piece of text."""
never_split = self.never_split + (never_split if never_split is not None else [])
text = unicodedata.normalize('NFKC', text)
tokens = []
token_infos = []
cursor = 0
for line in self.mecab.parse(text).split('\n'):
if line == 'EOS':
if self.preserve_spaces and len(text[cursor:]) > 0:
tokens.append(text[cursor:])
token_infos.append(None)
break
#print('mecab output line={}, eles={}'.format(line, len(line.split('\t'))))
#token, token_info = line.split('\t')
eles = line.split('\t')
token = eles[0]
token_info = '\t'.join(eles[1:])
token_start = text.index(token, cursor)
token_end = token_start + len(token)
if self.preserve_spaces and cursor < token_start:
tokens.append(text[cursor:token_start])
token_infos.append(None)
if self.do_lower_case and token not in never_split:
token = token.lower()
tokens.append(token)
token_infos.append(token_info)
cursor = token_end
assert len(tokens) == le | if with_info:
return tokens, token_infos
else:
return tokens
def tokenize_old(self, text): # useless method for English bert tokenizer only
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
# 类似于从"Montréal, über, 12.89, Mère, Françoise, noël, 889"
# 到:Montreal, uber, 12.89, Mere, Francoise, noel, 889
text = unicodedata.normalize("NFD", text) # 'Montréal, über, 12.89, Mère, Françoise, noël, 889'
# -》 'Montréal, über, 12.89, Mère, Françoise, noël, 889' 分离开了字母和逻辑重音
# e ̀
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char) # 中文汉字都独立起来了,不太好!TODO (not used now)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab # str:id的词典 ordereddict
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs": # [Zs] Separator, Space
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"): # [Cc] Other, Control; [Cf] Other, Format
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
# [Pc] Punctuation, Connector
# [Pd] Punctuation, Dash
# [Pe] Punctuation, Close
# [Pf] Punctuation, Final quote (may behave like Ps or Pe depending on usage)
# [Pi] Punctuation, Initial quote (may behave like Ps or Pe depending on usage)
# [Po] Punctuation, Other
# [Ps] Punctuation, Open
return True
return False
| n(token_infos)
|
strategy_combinations.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy combinations for combinations.combine()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.distribute import central_storage_strategy
from tensorflow.python.distribute import cluster_resolver
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy as mirrored_lib
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import one_device_strategy as one_device_lib
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute import sharded_variable
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import remote
from tensorflow.python.platform import flags
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.util.tf_export import tf_export
_TF_INTERNAL_API_PREFIX = "__internal__.distribute.combinations."
_did_connect_to_cluster = False
_topology = None
CollectiveAllReduceExtended = (
collective_all_reduce_strategy.CollectiveAllReduceExtended)
def _version_chooser(tf1_cls, tf2_cls):
def creator(*args, **kwargs):
if tf2.enabled():
return tf2_cls(*args, **kwargs)
return tf1_cls(*args, **kwargs)
return creator
MirroredStrategy = _version_chooser(mirrored_lib.MirroredStrategyV1,
mirrored_lib.MirroredStrategy)
CentralStorageStrategy = _version_chooser(
central_storage_strategy.CentralStorageStrategyV1,
central_storage_strategy.CentralStorageStrategy)
OneDeviceStrategy = _version_chooser(one_device_lib.OneDeviceStrategyV1,
one_device_lib.OneDeviceStrategy)
# Only V2 CollectiveAllReduceStrategy combinations are supported.
CollectiveAllReduceStrategy = (
collective_all_reduce_strategy.CollectiveAllReduceStrategy)
# pylint: disable=missing-docstring
def _get_tpu_strategy_creator(steps_per_run,
use_single_core=False,
enable_packed_variable=False,
**kwargs):
def _create_tpu_strategy():
FLAGS = flags.FLAGS # pylint: disable=invalid-name
global _did_connect_to_cluster
global _topology
try:
# Attempt to locally discover the TPU. This will fail for Cloud TPU, in
# which case we fall back to the values passed as flags.
resolver = tpu_cluster_resolver.TPUClusterResolver()
did_automatically_resolve = True
except ValueError:
did_automatically_resolve = False
# These flags will be defined by tpu_test_wrapper.py.
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=hasattr(FLAGS, "tpu") and FLAGS.tpu or "",
zone=hasattr(FLAGS, "zone") and FLAGS.zone or None,
project=hasattr(FLAGS, "project") and FLAGS.project or None,
)
# Only connect once per process, rather than per test method.
if not _did_connect_to_cluster:
if getattr(FLAGS, "tpu", "") or did_automatically_resolve:
remote.connect_to_cluster(resolver)
_did_connect_to_cluster = True
_topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = None
if use_single_core:
device_assignment = device_assignment_lib.DeviceAssignment(
_topology,
core_assignment=device_assignment_lib.SINGLE_CORE_ASSIGNMENT)
# Steps per run is only supported in TF 1.x
if tf2.enabled():
strategy = tpu_lib.TPUStrategy(resolver, device_assignment, **kwargs)
else:
strategy = tpu_lib.TPUStrategyV1(resolver, steps_per_run,
device_assignment, **kwargs)
strategy._enable_packed_variable_in_eager_mode = enable_packed_variable # pylint: disable=protected-access
return strategy
return _create_tpu_strategy
def _mirrored_strategy_with_collective_key_base(devices):
mirrored_lib.MirroredStrategyV1._collective_key_base += 100000
mirrored_lib.MirroredStrategy._collective_key_base += 100000
return MirroredStrategy(devices)
def _mirrored_strategy_with_no_merge_call(devices):
mirrored_lib.MirroredStrategyV1._collective_key_base += 100000
mirrored_lib.MirroredStrategy._collective_key_base += 100000
out = MirroredStrategy(devices)
# Stub out merge call usage.
out.extended._use_merge_call = lambda: False # pylint: disable=protected-access
return out
def _get_multi_worker_mirrored_creator(required_gpus, use_merge_call=True):
def _create_multi_worker_mirrored():
tf_config = cluster_resolver.TFConfigClusterResolver()
master = tf_config.master()
if tf_config.rpc_layer:
# Strip off the rpc_layer suffix.
master = master[len("%s://" % tf_config.rpc_layer):]
resolver = cluster_resolver.SimpleClusterResolver(
cluster_spec=tf_config.cluster_spec(),
task_type=tf_config.task_type,
task_id=tf_config.task_id,
master=master,
environment=tf_config.environment,
num_accelerators={"GPU": required_gpus},
rpc_layer=tf_config.rpc_layer or "grpc",
)
# Disable health check. We don't have a reliable to shutdown the strategy
# (and thus the health check) at the end of a test. Turning on health check
# causes some flakiness since we re-create part of the server when creating
# a strategy, and our tests are capable of handling failures.
CollectiveAllReduceExtended._enable_check_health = False # pylint: disable=protected-access
# Always create the strategy in eager mode so that it starts the server and
# configures the eager context. The eager context can no longer be
# configured after initialization.
with context.eager_mode():
strategy = CollectiveAllReduceStrategy(cluster_resolver=resolver)
if not use_merge_call:
strategy.extended._use_merge_call = lambda: False # pylint: disable=protected-access
# TODO(b/152320929): Wait for the cluster before proceeding, otherwise
# collectives may hang if any worker launches collectives before the chief
# creates the strategy.
try:
multi_process_runner.get_barrier().wait()
except ValueError:
# If the creator is called in the main process,
# multi_process_runner.get_barrier() raises ValueError, which is safe to
# ignore.
pass
return strategy
return _create_multi_worker_mirrored
def _get_ps_strategy_creator(num_workers, num_ps, required_gpus=0):
def _create_parameter_server():
cluster_def = multi_worker_test_base.create_in_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer="grpc")
resolver = cluster_resolver.SimpleClusterResolver(
ClusterSpec(cluster_def),
num_accelerators={"GPU": required_gpus},
rpc_layer="grpc")
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
resolver,
variable_partitioner=sharded_variable.FixedShardsPartitioner(2))
return strategy
return _create_parameter_server
def _deferred_pool_runner(has_chief, num_workers, initializer=None):
"""Returns a callable that returns the pool runner.
It creates the pool runner only upon first invocation. This avoids creating it
when this file is imported.
Args:
has_chief: whether there should be a chief.
num_workers: the number of workers excluding the chief.
initializer: initializer of each process.
Returns:
A callable that returns the runner.
"""
container = []
def get_or_create():
if not container:
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=has_chief,
num_workers=num_workers,
num_ps=0,
has_eval=False)
runner = multi_process_runner.MultiProcessPoolRunner(
cluster_spec, initializer=initializer)
container.append(runner)
return container[0]
return get_or_create
# We need to create the strategy in the initializer to start the server before
# any test runs.
_two_worker_pool = _deferred_pool_runner(
has_chief=True,
num_workers=1,
initializer=_get_multi_worker_mirrored_creator(required_gpus=0))
_four_worker_pool = _deferred_pool_runner(
has_chief=True,
num_workers=3,
initializer=_get_multi_worker_mirrored_creator(required_gpus=0))
# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
"Default",
distribution_strategy_context._get_default_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
"OneDeviceCPU", lambda: OneDeviceStrategy("/cpu:0"), required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
"OneDeviceGPU", lambda: OneDeviceStrategy("/gpu:0"), required_gpus=1)
one_device_strategy_on_worker_1 = combinations.NamedDistribution(
"OneDeviceOnWorker1CPU",
lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/cpu:0"),
required_gpus=None)
one_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution(
"OneDeviceOnWorker1GPU",
lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/gpu:0"),
required_gpus=1)
tpu_strategy = combinations.NamedDistribution(
"TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)
tpu_strategy_packed_var = combinations.NamedDistribution(
"TPUPackedVar",
_get_tpu_strategy_creator(steps_per_run=2, enable_packed_variable=True),
required_tpu=True)
tpu_strategy_one_step = combinations.NamedDistribution(
"TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1), required_tpu=True)
tpu_strategy_one_core = combinations.NamedDistribution(
"TPUOneCore",
_get_tpu_strategy_creator(steps_per_run=2, use_single_core=True),
required_tpu=True)
tpu_strategy_one_step_one_core = combinations.NamedDistribution(
"TPUOneStepOneCore",
_get_tpu_strategy_creator(steps_per_run=1, use_single_core=True),
required_tpu=True)
cloud_tpu_strategy = combinations.NamedDistribution(
"CloudTPU",
_get_tpu_strategy_creator(steps_per_run=2),
required_tpu=True,
use_cloud_tpu=True)
mirrored_strategy_with_one_cpu = combinations.NamedDistribution(
"Mirrored1CPU",
lambda: _mirrored_strategy_with_collective_key_base(["/cpu:0"]))
mirrored_strategy_with_one_gpu = combinations.NamedDistribution(
"Mirrored1GPU",
lambda: _mirrored_strategy_with_collective_key_base(["/gpu:0"]),
required_gpus=1)
mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"MirroredCPUAndGPU",
lambda: _mirrored_strategy_with_collective_key_base(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
"Mirrored2GPUs",
lambda: _mirrored_strategy_with_collective_key_base(["/gpu:0", "/gpu:1"]),
required_gpus=2)
mirrored_strategy_with_two_gpus_no_merge_call = combinations.NamedDistribution(
"Mirrored2GPUsNoMergeCall",
lambda: _mirrored_strategy_with_no_merge_call(["/gpu:0", "/gpu:1"]),
required_physical_gpus=2)
# Should call set_virtual_cpus_to_at_least(3) in your test's setUp methods.
mirrored_strategy_with_cpu_1_and_2 = combinations.NamedDistribution(
"Mirrored2CPU",
lambda: _mirrored_strategy_with_collective_key_base(["/cpu:1", "/cpu:2"]))
mirrored_strategy_with_cpu_1_and_2.__doc__ = (
"""Mirrored strategy with 2 virtual CPUs.
Should set up logical devices before use
""")
central_storage_strategy_with_two_gpus = combinations.NamedDistribution(
"CentralStorage2GPUs",
lambda: CentralStorageStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
central_storage_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"CentralStorageCPUAndGPU",
lambda: CentralStorageStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
# chief + 1 worker, with CPU.
multi_worker_mirrored_2x1_cpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x1CPU",
_get_multi_worker_mirrored_creator(required_gpus=0),
has_chief=True,
num_workers=1,
pool_runner_fn=_two_worker_pool,
no_xla=True,
)
# chief + 1 worker, with 1 GPU each.
multi_worker_mirrored_2x1_gpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x1GPU",
_get_multi_worker_mirrored_creator(required_gpus=1),
has_chief=True,
num_workers=1,
required_gpus=1,
pool_runner_fn=_two_worker_pool,
no_xla=True,
)
# chief + 1 worker, with 2 GPU each.
multi_worker_mirrored_2x2_gpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x2GPU",
_get_multi_worker_mirrored_creator(required_gpus=2),
has_chief=True,
num_workers=1,
required_gpus=2,
pool_runner_fn=_two_worker_pool,
no_xla=True,
)
multi_worker_mirrored_2x2_gpu_no_merge_call = combinations.NamedDistribution(
"MultiWorkerMirrored2x2GPUNoMergeCall",
_get_multi_worker_mirrored_creator(
required_gpus=2, use_merge_call=False),
has_chief=True,
num_workers=1,
required_physical_gpus=2,
pool_runner_fn=_two_worker_pool,
no_xla=True,
)
# chief + 3 workers, with CPU.
multi_worker_mirrored_4x1_cpu = combinations.NamedDistribution(
"MultiWorkerMirrored4x1CPU",
_get_multi_worker_mirrored_creator(required_gpus=0),
has_chief=True,
num_workers=3,
pool_runner_fn=_four_worker_pool,
no_xla=True,
)
parameter_server_strategy_3worker_2ps_cpu = combinations.NamedDistribution(
"ParameterServer3Worker2PSCPU",
_get_ps_strategy_creator(num_workers=3, num_ps=2),
)
parameter_server_strategy_1worker_2ps_cpu = combinations.NamedDistribution(
"ParameterServer1Worker2PSCPU",
_get_ps_strategy_creator(num_workers=1, num_ps=2),
)
parameter_server_strategy_3worker_2ps_1gpu = combinations.NamedDistribution(
"ParameterServer3Worker2PS1GPU",
_get_ps_strategy_creator(num_workers=3, num_ps=2, required_gpus=1),
required_gpus=1,
)
parameter_server_strategy_1worker_2ps_1gpu = combinations.NamedDistribution(
"ParameterServer1Worker2PS1GPU",
_get_ps_strategy_creator(num_workers=1, num_ps=2, required_gpus=1),
required_gpus=1,
)
graph_and_eager_modes = ["graph", "eager"]
# TODO(crccw): remove after tf-nightly picks up the new API.
def set_virtual_cpus_to_at_least(num_virtual_cpus):
|
strategies_minus_tpu = [
default_strategy,
one_device_strategy,
one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
central_storage_strategy_with_gpu_and_cpu,
]
strategies_minus_default_and_tpu = [
one_device_strategy,
one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
]
tpu_strategies = [
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step,
tpu_strategy_packed_var,
cloud_tpu_strategy,
]
all_strategies_minus_default = strategies_minus_default_and_tpu + tpu_strategies
all_strategies = strategies_minus_tpu + tpu_strategies
two_replica_strategies = [
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
multi_worker_mirrored_2x1_cpu,
multi_worker_mirrored_2x1_gpu,
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step,
central_storage_strategy_with_gpu_and_cpu,
]
four_replica_strategies = [
multi_worker_mirrored_2x2_gpu,
multi_worker_mirrored_4x1_cpu,
]
# TODO(b/159831907): replace with two_replica_strategies after the tests using
# it work with MWMS.
multidevice_strategies = [
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step
]
multiworker_strategies = [
multi_worker_mirrored_2x1_cpu, multi_worker_mirrored_2x1_gpu,
multi_worker_mirrored_2x2_gpu
]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=strategies_minus_tpu, mode=["graph", "eager"])
def tpu_strategy_combinations():
return combinations.combine(distribution=tpu_strategies, mode=["graph"])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
def all_strategy_minus_default_and_tpu_combinations():
return combinations.combine(
distribution=[
one_device_strategy, one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus
],
mode=["graph", "eager"])
def all_strategy_combinations_minus_default():
return (all_strategy_minus_default_and_tpu_combinations() +
tpu_strategy_combinations())
tf_export(
_TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_gpu_and_cpu",
v1=[]).export_constant(__name__,
"central_storage_strategy_with_gpu_and_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_two_gpus",
v1=[]).export_constant(__name__, "central_storage_strategy_with_two_gpus")
tf_export(
_TF_INTERNAL_API_PREFIX + "cloud_tpu_strategy",
v1=[]).export_constant(__name__, "cloud_tpu_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "default_strategy",
v1=[]).export_constant(__name__, "default_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_cpu_1_and_2",
v1=[]).export_constant(__name__, "mirrored_strategy_with_cpu_1_and_2")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_gpu_and_cpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_gpu_and_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_cpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_one_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_gpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_one_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_two_gpus",
v1=[]).export_constant(__name__, "mirrored_strategy_with_two_gpus")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_two_gpus_no_merge_call",
v1=[]).export_constant(__name__,
"mirrored_strategy_with_two_gpus_no_merge_call")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_cpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_gpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x2_gpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x2_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x2_gpu_no_merge_call",
v1=[]).export_constant(__name__,
"multi_worker_mirrored_2x2_gpu_no_merge_call")
tf_export(
_TF_INTERNAL_API_PREFIX + "one_device_strategy",
v1=[]).export_constant(__name__, "one_device_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "one_device_strategy_gpu",
v1=[]).export_constant(__name__, "one_device_strategy_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy",
v1=[]).export_constant(__name__, "tpu_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "parameter_server_strategy_3worker_2ps_cpu",
v1=[]).export_constant(__name__,
"parameter_server_strategy_3worker_2ps_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "parameter_server_strategy_1worker_2ps_cpu",
v1=[]).export_constant(__name__,
"parameter_server_strategy_1worker_2ps_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "parameter_server_strategy_3worker_2ps_1gpu",
v1=[]).export_constant(__name__,
"parameter_server_strategy_3worker_2ps_1gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "parameter_server_strategy_1worker_2ps_1gpu",
v1=[]).export_constant(__name__,
"parameter_server_strategy_1worker_2ps_1gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy_one_core",
v1=[]).export_constant(__name__, "tpu_strategy_one_core")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy_packed_var",
v1=[]).export_constant(__name__, "tpu_strategy_packed_var")
| test_util.set_logical_devices_to_at_least("CPU", num_virtual_cpus) |
parser_test.go | package dir_restorer
import (
"strings"
"testing"
)
func TestParser(t *testing.T) |
//
//func TestFindTables(t *testing.T) {
// schema, err := ioutil.ReadFile("/home/wolf/schema.sql")
// if err != nil {
// t.Errorf("error reading file: %s", err)
// }
// t.Logf("%s", strings.Join(FindTables(schema), ", "))
//}
//
//func TestColumns(t *testing.T) {
// schema, err := ioutil.ReadFile("/home/wolf/schema.sql")
// if err != nil {
// t.Errorf("error reading file: %s", err)
// }
// for _, table := range FindTables(schema) {
// columns := FindTableColumns(schema, table)
// t.Logf("%s: %s", table, strings.Join(columns, ","))
// }
//}
| {
in := []byte(
"--\n" +
"-- Table structure for table `chart_mogul_import`\n" +
"--\n" +
"\n" +
"DROP TABLE IF EXISTS `chart_mogul_import`;\n" +
"/*!40101 SET @saved_cs_client = @@character_set_client */;\n" +
"/*!40101 SET character_set_client = utf8 */;\n" +
"CREATE TABLE `chart_mogul_import` (\n" +
" `import_id` int(10) unsigned NOT NULL AUTO_INCREMENT,\n" +
" `date_start` datetime DEFAULT NULL,\n" +
" `date_end` datetime DEFAULT NULL,\n" +
" `status` tinyint(3) DEFAULT NULL,\n" +
" `error` longtext,\n" +
" PRIMARY KEY (`import_id`)\n" +
") ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=latin1;\n" +
"/*!40101 SET character_set_client = @saved_cs_client */;\n" +
"\n" +
"\n" +
"--\n" +
"-- Table structure for table `cb_tasks`\n" +
"--\n" +
"\n" +
"DROP TABLE IF EXISTS `cb_tasks`;\n" +
"/*!40101 SET @saved_cs_client = @@character_set_client */;\n" +
"/*!40101 SET character_set_client = utf8 */;\n" +
"CREATE TABLE `cb_tasks` (\n" +
" `id` int(10) unsigned NOT NULL AUTO_INCREMENT,\n" +
" `date` date NOT NULL,\n" +
" `date_done` date DEFAULT NULL,\n" +
" `name` varchar(60) NOT NULL,\n" +
" `campaign_id` int(10) unsigned NOT NULL,\n" +
" `worker` varchar(60) NOT NULL DEFAULT '',\n" +
" `done` enum('N','Y') NOT NULL DEFAULT 'N',\n" +
" `type` tinyint(3) unsigned NOT NULL,\n" +
" PRIMARY KEY (`id`),\n" +
" UNIQUE KEY `idx_type_campaign_id` (`type`,`campaign_id`),\n" +
" KEY `done` (`done`),\n" +
" KEY `campaign_id` (`campaign_id`)\n" +
") ENGINE=InnoDB AUTO_INCREMENT=1298566 DEFAULT CHARSET=utf8;\n" +
"/*!40101 SET character_set_client = @saved_cs_client */;\n" +
" ",
)
t.Logf("%s", strings.Join(FindTableColumns(in, "cb_tasks"), ", "))
t.Logf("%s", strings.Join(FindTableColumns(in, "chart_mogul_import"), ", "))
t.Logf("%s", FindTableCreate(in, "cb_tasks"))
} |
evaluation_filters.py | from collections import namedtuple
from django.forms import TypedChoiceField
from django.template import Library
from django.utils.translation import ugettext_lazy as _
from evap.evaluation.models import BASE_UNIPOLAR_CHOICES
from evap.rewards.tools import can_reward_points_be_used_by
from evap.student.forms import HeadingField
# the names displayed for contributors
STATE_NAMES = {
'new': _('new'),
'prepared': _('prepared'),
'editor_approved': _('editor approved'),
'approved': _('approved'),
'in_evaluation': _('in evaluation'),
'evaluated': _('evaluated'),
'reviewed': _('reviewed'),
'published': _('published'),
}
# the descriptions used in tooltips for contributors
STATE_DESCRIPTIONS = {
'new': _('The evaluation was newly created and will be prepared by the evaluation team.'),
'prepared': _('The evaluation was prepared by the evaluation team and is now available for editors.'),
'editor_approved': _('The evaluation was approved by an editor and will now be checked by the evaluation team.'),
'approved': _('All preparations are finished. The evaluation will begin once the defined start date is reached.'),
'in_evaluation': _('The evaluation is currently running until the defined end date is reached.'),
'evaluated': _('The evaluation has finished and will now be reviewed by the evaluation team.'),
'reviewed': _('The evaluation has finished and was reviewed by the evaluation team. You will receive an email when its results are published.'),
'published': _('The results for this evaluation have been published.'),
}
# values for approval states shown to staff
StateValues = namedtuple('StateValues', ('order', 'icon', 'filter', 'description'))
APPROVAL_STATES = {
'new': StateValues(0, 'fas fa-circle icon-yellow', 'new', _('In preparation')),
'prepared': StateValues(2, 'far fa-square icon-gray', 'prepared', _('Awaiting editor review')),
'editor_approved': StateValues(1, 'far fa-check-square icon-yellow', 'editor_approved', _('Approved by editor, awaiting manager review')),
'approved': StateValues(3, 'far fa-check-square icon-green', 'approved', _('Approved by manager')),
}
register = Library()
@register.filter(name='zip')
def _zip(a, b):
return zip(a, b)
@register.filter()
def zip_choices(counts, choices):
return zip(counts, choices.names, choices.colors, choices.values)
@register.filter
def | (evaluation):
if evaluation.state in ['new', 'prepared', 'editor_approved', 'approved']:
return evaluation.days_until_evaluation
if evaluation.state == "in_evaluation":
return 100000 + evaluation.days_left_for_evaluation
return 200000 + evaluation.days_left_for_evaluation
# from http://www.jongales.com/blog/2009/10/19/percentage-django-template-tag/
@register.filter
def percentage(fraction, population):
try:
return "{0:.0f}%".format(int(float(fraction) / float(population) * 100))
except ValueError:
return None
except ZeroDivisionError:
return None
@register.filter
def percentage_one_decimal(fraction, population):
try:
return "{0:.1f}%".format((float(fraction) / float(population)) * 100)
except ValueError:
return None
except ZeroDivisionError:
return None
@register.filter
def to_colors(choices):
if not choices:
# When displaying the course distribution, there are no associated voting choices.
# In that case, we just use the colors of a unipolar scale.
return BASE_UNIPOLAR_CHOICES['colors']
return choices.colors
@register.filter
def statename(state):
return STATE_NAMES.get(state)
@register.filter
def statedescription(state):
return STATE_DESCRIPTIONS.get(state)
@register.filter
def approval_state_values(state):
if state in APPROVAL_STATES:
return APPROVAL_STATES[state]
if state in ['in_evaluation', 'evaluated', 'reviewed', 'published']:
return APPROVAL_STATES['approved']
return None
@register.filter
def approval_state_icon(state):
if state in APPROVAL_STATES:
return APPROVAL_STATES[state].icon
if state in ['in_evaluation', 'evaluated', 'reviewed', 'published']:
return APPROVAL_STATES['approved'].icon
return None
@register.filter
def can_results_page_be_seen_by(evaluation, user):
return evaluation.can_results_page_be_seen_by(user)
@register.filter(name='can_reward_points_be_used_by')
def _can_reward_points_be_used_by(user):
return can_reward_points_be_used_by(user)
@register.filter
def is_choice_field(field):
return isinstance(field.field, TypedChoiceField)
@register.filter
def is_heading_field(field):
return isinstance(field.field, HeadingField)
@register.filter
def is_user_editor_or_delegate(evaluation, user):
return evaluation.is_user_editor_or_delegate(user)
@register.filter
def is_user_responsible_or_contributor_or_delegate(evaluation, user):
return evaluation.is_user_responsible_or_contributor_or_delegate(user)
@register.filter
def message_class(level):
return {
'debug': 'info',
'info': 'info',
'success': 'success',
'warning': 'warning',
'error': 'danger',
}.get(level, 'info')
@register.filter
def hours_and_minutes(time_left_for_evaluation):
hours = time_left_for_evaluation.seconds // 3600
minutes = (time_left_for_evaluation.seconds // 60) % 60
return "{:02}:{:02}".format(hours, minutes)
@register.filter
def has_nonresponsible_editor(evaluation):
return evaluation.contributions.filter(can_edit=True).exclude(contributor__in=evaluation.course.responsibles.all()).exists()
| ordering_index |
read-rows-acceptance-tests.ts | /*!
* Copyright 2017 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as assert from 'assert';
const testcases = require('../../system-test/read-rows-acceptance-test.json')
.tests;
import {PassThrough} from 'stream';
import {Table} from '../src/table.js';
import {Row} from '../src/row.js';
import * as ProtoBuf from 'protobufjs';
import * as path from 'path';
const protosRoot = path.resolve(__dirname, '../protos');
function | (filename, root) {
filename.root = path.resolve(filename.root) + '/';
root.resolvePath = function(originPath, importPath, alreadyNormalized) {
return ProtoBuf.util.path.resolve(
filename.root,
importPath,
alreadyNormalized
);
};
return filename.file;
}
const root = new ProtoBuf.Root();
root.loadSync(
applyProtoRoot(
{
root: protosRoot,
file: 'google/bigtable/v2/bigtable.proto',
},
root
),
{keepCase: false}
);
const ReadRowsResponse = root.lookupType('google.bigtable.v2.ReadRowsResponse');
const CellChunk = root.lookupType(
'google.bigtable.v2.ReadRowsResponse.CellChunk'
);
describe('Read Row Acceptance tests', function() {
testcases.forEach(function(test) {
it(test.name, done => {
const table = new Table({id: 'xyz'}, 'my-table');
const results: any[] = [];
const rawResults = test.results || [];
const errorCount = rawResults.filter(result => result.error).length;
rawResults
.filter(result => !result.error)
.forEach(result => {
const existingRow = results.find(filter => filter.key === result.rk);
const row = existingRow || {key: result.rk, data: {}};
const data = row.data;
if (typeof existingRow === 'undefined') {
results.push(row);
}
const family = data[result.fm] || {};
data[result.fm] = family;
const qualifier = family[result.qual] || [];
family[result.qual] = qualifier;
const resultLabels: any[] = [];
if (result.label !== '') {
resultLabels.push(result.label);
}
qualifier.push({
value: result.value,
timestamp: '' + result.ts,
labels: resultLabels,
});
});
table.bigtable = {};
table.bigtable.request = function() {
const stream = new PassThrough({
objectMode: true,
});
/* tslint:disable-next-line */
(stream as any).abort = function() {};
setImmediate(function() {
test.chunks_base64
.map(chunk => {
const cellChunk = CellChunk.decode(Buffer.from(chunk, 'base64')); //.decode64(chunk);
let readRowsResponse: any = {chunks: [cellChunk]};
readRowsResponse = ReadRowsResponse.create(readRowsResponse);
readRowsResponse = ReadRowsResponse.toObject(readRowsResponse, {
defaults: true,
longs: String,
oneofs: true,
});
return readRowsResponse;
})
.forEach(readRowsResponse => stream.push(readRowsResponse));
stream.push(null);
});
return stream;
};
const tableRows = results.map(rawRow => {
const row = new Row(table, rawRow.key);
row.data = rawRow.data;
return row;
});
const errors: any[] = [];
const rows: any[] = [];
table
.createReadStream({})
.on('error', err => {
errors.push(err);
verify();
})
.on('data', row => {
rows.push(row);
})
.on('end', () => {
verify();
});
function verify() {
assert.strictEqual(errors.length, errorCount, ' error count mismatch');
assert.strictEqual(rows.length, results.length, 'row count mismatch');
assert.deepStrictEqual(rows, tableRows, 'row mismatch');
done();
}
});
});
});
| applyProtoRoot |
admin.py | from django.contrib import admin
from .models import Article, Category, User
from django import forms
from pagedown.widgets import AdminPagedownWidget
class ArticleForm(forms.ModelForm):
text = forms.CharField(widget=AdminPagedownWidget())
class Meta:
model = Article
fields = '__all__'
class | (admin.ModelAdmin):
list_display = ('title', 'publish_time', 'last_modify_time', 'id')
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'created_time', 'last_modify_time', 'id')
class UserAdmin(admin.ModelAdmin):
list_display = ('username', 'nickname', 'created_time', 'id')
admin.site.register(Article, ArticleAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(User, UserAdmin)
| ArticleAdmin |
AMDGPU.py | # Copyright (c) 2021 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.objects.PciDevice import PciDevice
from m5.objects.PciDevice import PciMemBar, PciMemUpperBar, PciLegacyIoBar
# PCI device model for an AMD Vega 10 based GPU. The PCI codes and BARs
# correspond to a Vega Frontier Edition hardware device. None of the PCI
# related values in this class should be changed.
#
# This class requires a ROM binary and an MMIO trace to initialize the
# device registers and memory. It is intended only to be used in full-system
# simulation under Linux where the amdgpu driver is modprobed.
class AMDGPUDevice(PciDevice):
type = 'AMDGPUDevice'
cxx_header = "dev/amdgpu/amdgpu_device.hh"
cxx_class = 'gem5::AMDGPUDevice'
# IDs for AMD Vega 10
VendorID = 0x1002
DeviceID = 0x6863
# Command 0x3 never gets sent indicating IO and Mem bars are enabled. Hard
# code the command here and deal unassigned BARs on C++ side. | ClassCode = 0x03
SubClassCode = 0x00
ProgIF = 0x00
# Use max possible BAR size for Vega 10. We can override with driver param
BAR0 = PciMemBar(size='16GiB')
BAR1 = PciMemUpperBar()
BAR2 = PciMemBar(size='2MiB')
BAR3 = PciMemUpperBar()
BAR4 = PciLegacyIoBar(addr=0xf000, size='256B')
BAR5 = PciMemBar(size='512KiB')
InterruptLine = 14
InterruptPin = 2
ExpansionROM = 0
rom_binary = Param.String("ROM binary dumped from hardware")
trace_file = Param.String("MMIO trace collected on hardware")
checkpoint_before_mmios = Param.Bool(False, "Take a checkpoint before the"
" device begins sending MMIOs") | Command = 0x3
Status = 0x0280
Revision = 0x0 |
trampoline.rs | use crate::cache::TrampolineCache;
use cranelift_codegen::{
binemit::{NullTrapSink, Reloc, RelocSink},
cursor::{Cursor, FuncCursor},
ir::{self, InstBuilder},
isa, Context,
};
use hashbrown::HashMap;
use std::{iter, mem, ptr::NonNull};
use wasmer_runtime_core::{
backend::sys::{Memory, Protect},
module::{ExportIndex, ModuleInfo},
types::{FuncSig, SigIndex, Type},
vm,
};
struct NullRelocSink {}
impl RelocSink for NullRelocSink {
fn reloc_ebb(&mut self, _: u32, _: Reloc, _: u32) {}
fn reloc_external(&mut self, _: u32, _: Reloc, _: &ir::ExternalName, _: i64) {}
fn reloc_jt(&mut self, _: u32, _: Reloc, _: ir::JumpTable) {}
}
pub type Trampoline = unsafe extern "C" fn(*mut vm::Ctx, NonNull<vm::Func>, *const u64, *mut u64);
pub struct Trampolines {
memory: Memory,
offsets: HashMap<SigIndex, usize>,
}
impl Trampolines {
pub fn from_trampoline_cache(cache: TrampolineCache) -> Self |
pub fn to_trampoline_cache(&self) -> TrampolineCache {
let mut code = vec![0; self.memory.size()];
unsafe {
code.copy_from_slice(self.memory.as_slice());
}
TrampolineCache {
code,
offsets: self.offsets.clone(),
}
}
pub fn new(isa: &isa::TargetIsa, module: &ModuleInfo) -> Self {
let func_index_iter = module
.exports
.values()
.filter_map(|export| match export {
ExportIndex::Func(func_index) => Some(func_index),
_ => None,
})
.chain(module.start_func.iter());
let mut compiled_functions = Vec::new();
let mut ctx = Context::new();
let mut total_size = 0;
for exported_func_index in func_index_iter {
let sig_index = module.func_assoc[*exported_func_index];
let func_sig = &module.signatures[sig_index];
let trampoline_func = generate_func(&func_sig);
ctx.func = trampoline_func;
let mut code_buf = Vec::new();
ctx.compile_and_emit(
isa,
&mut code_buf,
&mut NullRelocSink {},
&mut NullTrapSink {},
)
.expect("unable to compile trampolines");
ctx.clear();
total_size += round_up(code_buf.len(), mem::size_of::<usize>());
compiled_functions.push((sig_index, code_buf));
}
let mut memory = Memory::with_size(total_size).unwrap();
unsafe {
memory.protect(.., Protect::ReadWrite).unwrap();
}
// "\xCC" disassembles to "int3", which will immediately cause
// an interrupt.
for i in unsafe { memory.as_slice_mut() } {
*i = 0xCC;
}
let mut previous_end = 0;
let mut trampolines = HashMap::with_capacity(compiled_functions.len());
for (sig_index, compiled) in compiled_functions.iter() {
let new_end = previous_end + round_up(compiled.len(), mem::size_of::<usize>());
unsafe {
memory.as_slice_mut()[previous_end..previous_end + compiled.len()]
.copy_from_slice(&compiled[..]);
}
trampolines.insert(*sig_index, previous_end);
previous_end = new_end;
}
unsafe {
memory.protect(.., Protect::ReadExec).unwrap();
}
Self {
memory,
offsets: trampolines,
}
}
pub fn lookup(&self, sig_index: SigIndex) -> Option<Trampoline> {
let offset = *self.offsets.get(&sig_index)?;
let ptr = unsafe { self.memory.as_ptr().add(offset) };
unsafe { Some(mem::transmute(ptr)) }
}
}
/// This function generates a trampoline for the specific signature
/// passed into it.
fn generate_func(func_sig: &FuncSig) -> ir::Function {
let trampoline_sig = generate_trampoline_signature();
let mut func =
ir::Function::with_name_signature(ir::ExternalName::testcase("trampln"), trampoline_sig);
let export_sig_ref = func.import_signature(generate_export_signature(func_sig));
let entry_ebb = func.dfg.make_ebb();
let vmctx_ptr = func.dfg.append_ebb_param(entry_ebb, ir::types::I64);
let func_ptr = func.dfg.append_ebb_param(entry_ebb, ir::types::I64);
let args_ptr = func.dfg.append_ebb_param(entry_ebb, ir::types::I64);
let returns_ptr = func.dfg.append_ebb_param(entry_ebb, ir::types::I64);
func.layout.append_ebb(entry_ebb);
let mut pos = FuncCursor::new(&mut func).at_first_insertion_point(entry_ebb);
let mut args_vec = Vec::with_capacity(func_sig.params().len() + 1);
args_vec.push(vmctx_ptr);
for (index, wasm_ty) in func_sig.params().iter().enumerate() {
let mem_flags = ir::MemFlags::trusted();
let val = pos.ins().load(
wasm_ty_to_clif(*wasm_ty),
mem_flags,
args_ptr,
(index * mem::size_of::<u64>()) as i32,
);
args_vec.push(val);
}
let call_inst = pos.ins().call_indirect(export_sig_ref, func_ptr, &args_vec);
let return_values = pos.func.dfg.inst_results(call_inst).to_vec();
for (index, return_val) in return_values.iter().enumerate() {
let mem_flags = ir::MemFlags::trusted();
pos.ins().store(
mem_flags,
*return_val,
returns_ptr,
(index * mem::size_of::<u64>()) as i32,
);
}
pos.ins().return_(&[]);
func
}
fn wasm_ty_to_clif(ty: Type) -> ir::types::Type {
match ty {
Type::I32 => ir::types::I32,
Type::I64 => ir::types::I64,
Type::F32 => ir::types::F32,
Type::F64 => ir::types::F64,
Type::V128 => ir::types::I32X4,
}
}
fn generate_trampoline_signature() -> ir::Signature {
let isa = super::get_isa();
let call_convention = isa.default_call_conv();
let mut sig = ir::Signature::new(call_convention);
let ptr_param = ir::AbiParam {
value_type: ir::types::I64,
purpose: ir::ArgumentPurpose::Normal,
extension: ir::ArgumentExtension::None,
location: ir::ArgumentLoc::Unassigned,
};
sig.params = vec![ptr_param, ptr_param, ptr_param, ptr_param];
sig
}
fn generate_export_signature(func_sig: &FuncSig) -> ir::Signature {
let isa = super::get_isa();
let call_convention = isa.default_call_conv();
let mut export_clif_sig = ir::Signature::new(call_convention);
let func_sig_iter = func_sig.params().iter().map(|wasm_ty| ir::AbiParam {
value_type: wasm_ty_to_clif(*wasm_ty),
purpose: ir::ArgumentPurpose::Normal,
extension: ir::ArgumentExtension::None,
location: ir::ArgumentLoc::Unassigned,
});
export_clif_sig.params = iter::once(ir::AbiParam {
value_type: ir::types::I64,
purpose: ir::ArgumentPurpose::VMContext,
extension: ir::ArgumentExtension::None,
location: ir::ArgumentLoc::Unassigned,
})
.chain(func_sig_iter)
.collect();
export_clif_sig.returns = func_sig
.returns()
.iter()
.map(|wasm_ty| ir::AbiParam {
value_type: wasm_ty_to_clif(*wasm_ty),
purpose: ir::ArgumentPurpose::Normal,
extension: ir::ArgumentExtension::None,
location: ir::ArgumentLoc::Unassigned,
})
.collect();
export_clif_sig
}
#[inline]
fn round_up(n: usize, multiple: usize) -> usize {
(n + multiple - 1) & !(multiple - 1)
}
| {
// pub struct TrampolineCache {
// #[serde(with = "serde_bytes")]
// code: Vec<u8>,
// offsets: HashMap<SigIndex, usize>,
// }
let mut memory = Memory::with_size(cache.code.len()).unwrap();
unsafe {
memory.protect(.., Protect::ReadWrite).unwrap();
// Copy over the compiled code.
memory.as_slice_mut()[..cache.code.len()].copy_from_slice(cache.code.as_slice());
memory.protect(.., Protect::ReadExec).unwrap();
}
Self {
memory,
offsets: cache.offsets,
}
} |
role.dedicated.hauler.ts | import { RoleHauler } from "roleDefinitions/role.hauler";
export class | extends RoleHauler {
public runDedicated(creep: Creep, dedication: string) {
if (creep.memory.working && creep.store.getUsedCapacity() === 0) {
creep.memory.working = false;
creep.say('🏗️ pickup');
}
if (!creep.memory.working && creep.store.getFreeCapacity() === 0) {
creep.memory.working = true;
creep.say('💦');
}
if (creep.memory.working) {
this.depositDedicatedHaul(creep);
}
else {
this.withdrawFromDedication(creep, dedication);
}
}
protected depositDedicatedHaul(creep: Creep) {
const storage: StructureStorage | undefined = this.checkStorageForDeposit(creep.room);
if (storage) {
const resourceType: MineralConstant | DepositConstant | undefined = creep.room.memory.mine?.type;
if (resourceType) {
this.depositMoveSpecified(creep, storage, resourceType);
}
}
}
protected withdrawFromDedication(creep: Creep, dedication: string) {
const container: Structure | null = Game.getObjectById(dedication);
if (container) {
const resourceType: MineralConstant | DepositConstant | undefined = creep.room.memory.mine?.type;
if (resourceType) {
this.withdrawMoveSpecified(creep, container, resourceType);
}
}
}
}
| RoleDedicatedHauler |
push.ts | async function arrayPush() { |
array.push();
array.push(1);
const newLength = array.push(1, 2, 3);
} | "use speedyjs";
const array = [1, 2]; |
OrphanagesMap.tsx | import React, { useEffect, useState } from 'react';
import { Link } from 'react-router-dom';
import { FiPlus, FiArrowRight } from 'react-icons/fi';
import { Map, TileLayer, Marker, Popup } from 'react-leaflet';
import 'leaflet/dist/leaflet.css';
import mapMarkerImg from '../images/map-marker.svg';
import mapIcon from '../utils/mapIcon';
import '../styles/pages/orphanages-map.css';
import api from '../services/api';
interface Orphanage {
id: number;
latitude: number;
longitude: number;
name: string;
}
function | () {
const [orphanages, setOrphanages] = useState<Orphanage[]>([]);
useEffect(() => {
api.get('/orphanages').then((response) => {
setOrphanages(response.data);
});
}, []);
return (
<div id="page-map">
<aside>
<header>
<img src={mapMarkerImg} alt="Happy" />
<h2>Escolha um orfanato no mapa</h2>
<p>Muitas crianças estão esperando a sua visita :)</p>
</header>
<footer>
<strong>Conchal</strong>
<span>São Paulo</span>
</footer>
</aside>
<Map
center={[-22.3376285, -47.1700979]}
zoom={14.5}
style={{ width: '100%', height: '100%' }}
>
{/* <TileLayer url="https://a.tile.openstreetmap.org/{z}/{x}/{y}.png" /> */}
<TileLayer
url={`https://api.mapbox.com/styles/v1/mapbox/streets-v11/tiles/256/{z}/{x}/{y}@2x?access_token=${process.env.REACT_APP_MAPB0X_TOKEN}`}
/>
{orphanages.map((orphanage) => {
return (
<Marker
key={orphanage.id}
icon={mapIcon}
position={[orphanage.latitude, orphanage.longitude]}
>
<Popup
closeButton={false}
minWidth={240}
maxWidth={240}
className="map-popup"
>
{orphanage.name}
<Link to={`/orphanages/${orphanage.id}`}>
<FiArrowRight size={20} color="#fff" />
</Link>
</Popup>
</Marker>
);
})}
</Map>
<Link to="/orphanages/create" className="create-orphanage">
<FiPlus size={32} color="#FFF" />
</Link>
</div>
);
}
export default OrphanagesMap;
| OrphanagesMap |
lib.rs | //! Low-level Rust lexer.
//!
//! The idea with `librustc_lexer` is to make a reusable library,
//! by separating out pure lexing and rustc-specific concerns, like spans,
//! error reporting an interning. So, rustc_lexer operates directly on `&str`,
//! produces simple tokens which are a pair of type-tag and a bit of original text,
//! and does not report errors, instead storing them as flags on the token.
//!
//! Tokens produced by this lexer are not yet ready for parsing the Rust syntax.
//! For that see [`librustc_parse::lexer`], which converts this basic token stream
//! into wide tokens used by actual parser.
//!
//! The purpose of this crate is to convert raw sources into a labeled sequence
//! of well-known token types, so building an actual Rust token stream will
//! be easier.
//!
//! The main entity of this crate is the [`TokenKind`] enum which represents common
//! lexeme types.
//!
//! [`librustc_parse::lexer`]: ../rustc_parse/lexer/index.html
// We want to be able to build this crate with a stable compiler, so no
// `#![feature]` attributes should be added.
mod cursor;
pub mod unescape;
#[cfg(test)]
mod tests;
use self::LiteralKind::*;
use self::TokenKind::*;
use crate::cursor::{Cursor, EOF_CHAR};
use std::convert::TryFrom;
/// Parsed token.
/// It doesn't contain information about data that has been parsed,
/// only the type of the token and its size.
pub struct Token {
pub kind: TokenKind,
pub len: usize,
}
impl Token {
fn new(kind: TokenKind, len: usize) -> Token {
Token { kind, len }
}
}
/// Enum representing common lexeme types.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum TokenKind {
// Multi-char tokens:
/// "// comment"
LineComment,
/// `/* block comment */`
///
/// Block comments can be recursive, so the sequence like `/* /* */`
/// will not be considered terminated and will result in a parsing error.
BlockComment { terminated: bool },
/// Any whitespace characters sequence.
Whitespace,
/// "ident" or "continue"
/// At this step keywords are also considered identifiers.
Ident,
/// "r#ident"
RawIdent,
/// "12_u8", "1.0e-40", "b"123"". See `LiteralKind` for more details.
Literal { kind: LiteralKind, suffix_start: usize },
/// "'a"
Lifetime { starts_with_number: bool },
// One-char tokens:
/// ";"
Semi,
/// ","
Comma,
/// "."
Dot,
/// "("
OpenParen,
/// ")"
CloseParen,
/// "{"
OpenBrace,
/// "}"
CloseBrace,
/// "["
OpenBracket,
/// "]"
CloseBracket,
/// "@"
At,
/// "#"
Pound,
/// "~"
Tilde,
/// "?"
Question,
/// ":"
Colon,
/// "$"
Dollar,
/// "="
Eq,
/// "!"
Not,
/// "<" | /// "-"
Minus,
/// "&"
And,
/// "|"
Or,
/// "+"
Plus,
/// "*"
Star,
/// "/"
Slash,
/// "^"
Caret,
/// "%"
Percent,
/// Unknown token, not expected by the lexer, e.g. "№"
Unknown,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum LiteralKind {
/// "12_u8", "0o100", "0b120i99"
Int { base: Base, empty_int: bool },
/// "12.34f32", "0b100.100"
Float { base: Base, empty_exponent: bool },
/// "'a'", "'\\'", "'''", "';"
Char { terminated: bool },
/// "b'a'", "b'\\'", "b'''", "b';"
Byte { terminated: bool },
/// ""abc"", ""abc"
Str { terminated: bool },
/// "b"abc"", "b"abc"
ByteStr { terminated: bool },
/// "r"abc"", "r#"abc"#", "r####"ab"###"c"####", "r#"a"
RawStr { n_hashes: u16, err: Option<RawStrError> },
/// "br"abc"", "br#"abc"#", "br####"ab"###"c"####", "br#"a"
RawByteStr { n_hashes: u16, err: Option<RawStrError> },
}
/// Error produced validating a raw string. Represents cases like:
/// - `r##~"abcde"##`: `InvalidStarter`
/// - `r###"abcde"##`: `NoTerminator { expected: 3, found: 2, possible_terminator_offset: Some(11)`
/// - Too many `#`s (>65535): `TooManyDelimiters`
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum RawStrError {
/// Non `#` characters exist between `r` and `"` eg. `r#~"..`
InvalidStarter { bad_char: char },
/// The string was never terminated. `possible_terminator_offset` is the number of characters after `r` or `br` where they
/// may have intended to terminate it.
NoTerminator { expected: usize, found: usize, possible_terminator_offset: Option<usize> },
/// More than 65535 `#`s exist.
TooManyDelimiters { found: usize },
}
/// Base of numeric literal encoding according to its prefix.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum Base {
/// Literal starts with "0b".
Binary,
/// Literal starts with "0o".
Octal,
/// Literal starts with "0x".
Hexadecimal,
/// Literal doesn't contain a prefix.
Decimal,
}
/// `rustc` allows files to have a shebang, e.g. "#!/usr/bin/rustrun",
/// but shebang isn't a part of rust syntax.
pub fn strip_shebang(input: &str) -> Option<usize> {
// Shebang must start with `#!` literally, without any preceding whitespace.
// For simplicity we consider any line starting with `#!` a shebang,
// regardless of restrictions put on shebangs by specific platforms.
if let Some(input_tail) = input.strip_prefix("#!") {
// Ok, this is a shebang but if the next non-whitespace token is `[` or maybe
// a doc comment (due to `TokenKind::(Line,Block)Comment` ambiguity at lexer level),
// then it may be valid Rust code, so consider it Rust code.
let next_non_whitespace_token = tokenize(input_tail).map(|tok| tok.kind).find(|tok|
!matches!(tok, TokenKind::Whitespace | TokenKind::LineComment | TokenKind::BlockComment { .. })
);
if next_non_whitespace_token != Some(TokenKind::OpenBracket) {
// No other choice than to consider this a shebang.
return Some(2 + input_tail.lines().next().unwrap_or_default().len());
}
}
None
}
/// Parses the first token from the provided input string.
pub fn first_token(input: &str) -> Token {
debug_assert!(!input.is_empty());
Cursor::new(input).advance_token()
}
/// Creates an iterator that produces tokens from the input string.
pub fn tokenize(mut input: &str) -> impl Iterator<Item = Token> + '_ {
std::iter::from_fn(move || {
if input.is_empty() {
return None;
}
let token = first_token(input);
input = &input[token.len..];
Some(token)
})
}
/// True if `c` is considered a whitespace according to Rust language definition.
/// See [Rust language reference](https://doc.rust-lang.org/reference/whitespace.html)
/// for definitions of these classes.
pub fn is_whitespace(c: char) -> bool {
// This is Pattern_White_Space.
//
// Note that this set is stable (ie, it doesn't change with different
// Unicode versions), so it's ok to just hard-code the values.
match c {
// Usual ASCII suspects
| '\u{0009}' // \t
| '\u{000A}' // \n
| '\u{000B}' // vertical tab
| '\u{000C}' // form feed
| '\u{000D}' // \r
| '\u{0020}' // space
// NEXT LINE from latin1
| '\u{0085}'
// Bidi markers
| '\u{200E}' // LEFT-TO-RIGHT MARK
| '\u{200F}' // RIGHT-TO-LEFT MARK
// Dedicated whitespace characters from Unicode
| '\u{2028}' // LINE SEPARATOR
| '\u{2029}' // PARAGRAPH SEPARATOR
=> true,
_ => false,
}
}
/// True if `c` is valid as a first character of an identifier.
/// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
/// a formal definition of valid identifier name.
pub fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
// We also add fast-path for ascii idents
('a' <= c && c <= 'z')
|| ('A' <= c && c <= 'Z')
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
/// True if `c` is valid as a non-first character of an identifier.
/// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
/// a formal definition of valid identifier name.
pub fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a' <= c && c <= 'z')
|| ('A' <= c && c <= 'Z')
|| ('0' <= c && c <= '9')
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// The passed string is lexically an identifier.
pub fn is_ident(string: &str) -> bool {
let mut chars = string.chars();
if let Some(start) = chars.next() {
is_id_start(start) && chars.all(is_id_continue)
} else {
false
}
}
impl Cursor<'_> {
/// Parses a token from the input string.
fn advance_token(&mut self) -> Token {
let first_char = self.bump().unwrap();
let token_kind = match first_char {
// Slash, comment or block comment.
'/' => match self.first() {
'/' => self.line_comment(),
'*' => self.block_comment(),
_ => Slash,
},
// Whitespace sequence.
c if is_whitespace(c) => self.whitespace(),
// Raw identifier, raw string literal or identifier.
'r' => match (self.first(), self.second()) {
('#', c1) if is_id_start(c1) => self.raw_ident(),
('#', _) | ('"', _) => {
let (n_hashes, err) = self.raw_double_quoted_string(1);
let suffix_start = self.len_consumed();
if err.is_none() {
self.eat_literal_suffix();
}
let kind = RawStr { n_hashes, err };
Literal { kind, suffix_start }
}
_ => self.ident(),
},
// Byte literal, byte string literal, raw byte string literal or identifier.
'b' => match (self.first(), self.second()) {
('\'', _) => {
self.bump();
let terminated = self.single_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = Byte { terminated };
Literal { kind, suffix_start }
}
('"', _) => {
self.bump();
let terminated = self.double_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = ByteStr { terminated };
Literal { kind, suffix_start }
}
('r', '"') | ('r', '#') => {
self.bump();
let (n_hashes, err) = self.raw_double_quoted_string(2);
let suffix_start = self.len_consumed();
if err.is_none() {
self.eat_literal_suffix();
}
let kind = RawByteStr { n_hashes, err };
Literal { kind, suffix_start }
}
_ => self.ident(),
},
// Identifier (this should be checked after other variant that can
// start as identifier).
c if is_id_start(c) => self.ident(),
// Numeric literal.
c @ '0'..='9' => {
let literal_kind = self.number(c);
let suffix_start = self.len_consumed();
self.eat_literal_suffix();
TokenKind::Literal { kind: literal_kind, suffix_start }
}
// One-symbol tokens.
';' => Semi,
',' => Comma,
'.' => Dot,
'(' => OpenParen,
')' => CloseParen,
'{' => OpenBrace,
'}' => CloseBrace,
'[' => OpenBracket,
']' => CloseBracket,
'@' => At,
'#' => Pound,
'~' => Tilde,
'?' => Question,
':' => Colon,
'$' => Dollar,
'=' => Eq,
'!' => Not,
'<' => Lt,
'>' => Gt,
'-' => Minus,
'&' => And,
'|' => Or,
'+' => Plus,
'*' => Star,
'^' => Caret,
'%' => Percent,
// Lifetime or character literal.
'\'' => self.lifetime_or_char(),
// String literal.
'"' => {
let terminated = self.double_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = Str { terminated };
Literal { kind, suffix_start }
}
_ => Unknown,
};
Token::new(token_kind, self.len_consumed())
}
fn line_comment(&mut self) -> TokenKind {
debug_assert!(self.prev() == '/' && self.first() == '/');
self.bump();
self.eat_while(|c| c != '\n');
LineComment
}
fn block_comment(&mut self) -> TokenKind {
debug_assert!(self.prev() == '/' && self.first() == '*');
self.bump();
let mut depth = 1usize;
while let Some(c) = self.bump() {
match c {
'/' if self.first() == '*' => {
self.bump();
depth += 1;
}
'*' if self.first() == '/' => {
self.bump();
depth -= 1;
if depth == 0 {
// This block comment is closed, so for a construction like "/* */ */"
// there will be a successfully parsed block comment "/* */"
// and " */" will be processed separately.
break;
}
}
_ => (),
}
}
BlockComment { terminated: depth == 0 }
}
fn whitespace(&mut self) -> TokenKind {
debug_assert!(is_whitespace(self.prev()));
self.eat_while(is_whitespace);
Whitespace
}
fn raw_ident(&mut self) -> TokenKind {
debug_assert!(self.prev() == 'r' && self.first() == '#' && is_id_start(self.second()));
// Eat "#" symbol.
self.bump();
// Eat the identifier part of RawIdent.
self.eat_identifier();
RawIdent
}
fn ident(&mut self) -> TokenKind {
debug_assert!(is_id_start(self.prev()));
// Start is already eaten, eat the rest of identifier.
self.eat_while(is_id_continue);
Ident
}
fn number(&mut self, first_digit: char) -> LiteralKind {
debug_assert!('0' <= self.prev() && self.prev() <= '9');
let mut base = Base::Decimal;
if first_digit == '0' {
// Attempt to parse encoding base.
let has_digits = match self.first() {
'b' => {
base = Base::Binary;
self.bump();
self.eat_decimal_digits()
}
'o' => {
base = Base::Octal;
self.bump();
self.eat_decimal_digits()
}
'x' => {
base = Base::Hexadecimal;
self.bump();
self.eat_hexadecimal_digits()
}
// Not a base prefix.
'0'..='9' | '_' | '.' | 'e' | 'E' => {
self.eat_decimal_digits();
true
}
// Just a 0.
_ => return Int { base, empty_int: false },
};
// Base prefix was provided, but there were no digits
// after it, e.g. "0x".
if !has_digits {
return Int { base, empty_int: true };
}
} else {
// No base prefix, parse number in the usual way.
self.eat_decimal_digits();
};
match self.first() {
// Don't be greedy if this is actually an
// integer literal followed by field/method access or a range pattern
// (`0..2` and `12.foo()`)
'.' if self.second() != '.' && !is_id_start(self.second()) => {
// might have stuff after the ., and if it does, it needs to start
// with a number
self.bump();
let mut empty_exponent = false;
if self.first().is_digit(10) {
self.eat_decimal_digits();
match self.first() {
'e' | 'E' => {
self.bump();
empty_exponent = !self.eat_float_exponent();
}
_ => (),
}
}
Float { base, empty_exponent }
}
'e' | 'E' => {
self.bump();
let empty_exponent = !self.eat_float_exponent();
Float { base, empty_exponent }
}
_ => Int { base, empty_int: false },
}
}
fn lifetime_or_char(&mut self) -> TokenKind {
debug_assert!(self.prev() == '\'');
let can_be_a_lifetime = if self.second() == '\'' {
// It's surely not a lifetime.
false
} else {
// If the first symbol is valid for identifier, it can be a lifetime.
// Also check if it's a number for a better error reporting (so '0 will
// be reported as invalid lifetime and not as unterminated char literal).
is_id_start(self.first()) || self.first().is_digit(10)
};
if !can_be_a_lifetime {
let terminated = self.single_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = Char { terminated };
return Literal { kind, suffix_start };
}
// Either a lifetime or a character literal with
// length greater than 1.
let starts_with_number = self.first().is_digit(10);
// Skip the literal contents.
// First symbol can be a number (which isn't a valid identifier start),
// so skip it without any checks.
self.bump();
self.eat_while(is_id_continue);
// Check if after skipping literal contents we've met a closing
// single quote (which means that user attempted to create a
// string with single quotes).
if self.first() == '\'' {
self.bump();
let kind = Char { terminated: true };
Literal { kind, suffix_start: self.len_consumed() }
} else {
Lifetime { starts_with_number }
}
}
fn single_quoted_string(&mut self) -> bool {
debug_assert!(self.prev() == '\'');
// Check if it's a one-symbol literal.
if self.second() == '\'' && self.first() != '\\' {
self.bump();
self.bump();
return true;
}
// Literal has more than one symbol.
// Parse until either quotes are terminated or error is detected.
loop {
match self.first() {
// Quotes are terminated, finish parsing.
'\'' => {
self.bump();
return true;
}
// Probably beginning of the comment, which we don't want to include
// to the error report.
'/' => break,
// Newline without following '\'' means unclosed quote, stop parsing.
'\n' if self.second() != '\'' => break,
// End of file, stop parsing.
EOF_CHAR if self.is_eof() => break,
// Escaped slash is considered one character, so bump twice.
'\\' => {
self.bump();
self.bump();
}
// Skip the character.
_ => {
self.bump();
}
}
}
// String was not terminated.
false
}
/// Eats double-quoted string and returns true
/// if string is terminated.
fn double_quoted_string(&mut self) -> bool {
debug_assert!(self.prev() == '"');
while let Some(c) = self.bump() {
match c {
'"' => {
return true;
}
'\\' if self.first() == '\\' || self.first() == '"' => {
// Bump again to skip escaped character.
self.bump();
}
_ => (),
}
}
// End of file reached.
false
}
/// Eats the double-quoted string and returns `n_hashes` and an error if encountered.
fn raw_double_quoted_string(&mut self, prefix_len: usize) -> (u16, Option<RawStrError>) {
// Wrap the actual function to handle the error with too many hashes.
// This way, it eats the whole raw string.
let (n_hashes, err) = self.raw_string_unvalidated(prefix_len);
// Only up to 65535 `#`s are allowed in raw strings
match u16::try_from(n_hashes) {
Ok(num) => (num, err),
// We lie about the number of hashes here :P
Err(_) => (0, Some(RawStrError::TooManyDelimiters { found: n_hashes })),
}
}
fn raw_string_unvalidated(&mut self, prefix_len: usize) -> (usize, Option<RawStrError>) {
debug_assert!(self.prev() == 'r');
let start_pos = self.len_consumed();
let mut possible_terminator_offset = None;
let mut max_hashes = 0;
// Count opening '#' symbols.
let n_start_hashes = self.eat_while(|c| c == '#');
// Check that string is started.
match self.bump() {
Some('"') => (),
c => {
let c = c.unwrap_or(EOF_CHAR);
return (n_start_hashes, Some(RawStrError::InvalidStarter { bad_char: c }));
}
}
// Skip the string contents and on each '#' character met, check if this is
// a raw string termination.
loop {
self.eat_while(|c| c != '"');
if self.is_eof() {
return (
n_start_hashes,
Some(RawStrError::NoTerminator {
expected: n_start_hashes,
found: max_hashes,
possible_terminator_offset,
}),
);
}
// Eat closing double quote.
self.bump();
// Check that amount of closing '#' symbols
// is equal to the amount of opening ones.
// Note that this will not consume extra trailing `#` characters:
// `r###"abcde"####` is lexed as a `RawStr { n_hashes: 3 }`
// followed by a `#` token.
let mut hashes_left = n_start_hashes;
let is_closing_hash = |c| {
if c == '#' && hashes_left != 0 {
hashes_left -= 1;
true
} else {
false
}
};
let n_end_hashes = self.eat_while(is_closing_hash);
if n_end_hashes == n_start_hashes {
return (n_start_hashes, None);
} else if n_end_hashes > max_hashes {
// Keep track of possible terminators to give a hint about
// where there might be a missing terminator
possible_terminator_offset =
Some(self.len_consumed() - start_pos - n_end_hashes + prefix_len);
max_hashes = n_end_hashes;
}
}
}
fn eat_decimal_digits(&mut self) -> bool {
let mut has_digits = false;
loop {
match self.first() {
'_' => {
self.bump();
}
'0'..='9' => {
has_digits = true;
self.bump();
}
_ => break,
}
}
has_digits
}
fn eat_hexadecimal_digits(&mut self) -> bool {
let mut has_digits = false;
loop {
match self.first() {
'_' => {
self.bump();
}
'0'..='9' | 'a'..='f' | 'A'..='F' => {
has_digits = true;
self.bump();
}
_ => break,
}
}
has_digits
}
/// Eats the float exponent. Returns true if at least one digit was met,
/// and returns false otherwise.
fn eat_float_exponent(&mut self) -> bool {
debug_assert!(self.prev() == 'e' || self.prev() == 'E');
if self.first() == '-' || self.first() == '+' {
self.bump();
}
self.eat_decimal_digits()
}
// Eats the suffix of the literal, e.g. "_u8".
fn eat_literal_suffix(&mut self) {
self.eat_identifier();
}
// Eats the identifier.
fn eat_identifier(&mut self) {
if !is_id_start(self.first()) {
return;
}
self.bump();
self.eat_while(is_id_continue);
}
/// Eats symbols while predicate returns true or until the end of file is reached.
/// Returns amount of eaten symbols.
fn eat_while<F>(&mut self, mut predicate: F) -> usize
where
F: FnMut(char) -> bool,
{
let mut eaten: usize = 0;
while predicate(self.first()) && !self.is_eof() {
eaten += 1;
self.bump();
}
eaten
}
} | Lt,
/// ">"
Gt, |
test_locate_trace.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `locate_trace` module."""
import unittest
import numpy as np
from hotsoss import locate_trace as lt
def test_simulate_frame():
"""Test the simulate_frame function"""
# CLEAR and plot test
assert lt.simulate_frame(plot=True).shape == (256, 2048)
# F277W test
assert lt.simulate_frame(filt='F277W').shape == (256, 2048)
def test_isolate_signal():
"""Test isolate_signal function"""
# Make frame for testing
frame = lt.simulate_frame()
# CLEAR and plot
assert len(lt.isolate_signal(500, frame, plot=True)) == 2
# F277W and radius
assert len(lt.isolate_signal(500, frame, filt='F277W', radius=20)) == 2
def test_order_masks():
"""Test order_masks function"""
# Make frame for testing
frame = lt.simulate_frame()
# Test plot and defaults
assert len(lt.order_masks(frame, plot=True)) == 2
# Test save and subarray
assert len(lt.order_masks(frame, subarray='SUBSTRIP96', save=True)) == 2
def test_trace_polynomial():
"""Test trace_polynomial function"""
# No order specified
assert len(lt.trace_polynomial(order=None, evaluate=False)) == 2
# Single order
assert len(lt.trace_polynomial(order=1, evaluate=False)) == 5
# Test evaluate
assert len(lt.trace_polynomial(order=1, evaluate=True)) == 2048
def test_trace_wavelengths():
"""Test trace_wavelengths function"""
# No order specified
assert len(lt.trace_wavelengths(order=None)) == 2
# Single order
assert len(lt.trace_wavelengths(order=1)) == 2048
def | ():
"""Test wavelength_bins works"""
# Default values for two orders
assert len(lt.wavelength_bins()) == 2
# Generate
assert len(lt.wavelength_bins(save=True)) == 2
| test_wavelength_bins |
handler_mock.go | package testhelpers
import (
"context"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"testing"
"time"
"github.com/ory/kratos/internal"
"github.com/bxcodec/faker/v3"
"github.com/google/uuid"
"github.com/julienschmidt/httprouter"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ory/kratos/driver/config"
"github.com/ory/kratos/identity"
"github.com/ory/kratos/session"
"github.com/ory/kratos/x"
)
type mockDeps interface {
identity.PrivilegedPoolProvider
session.ManagementProvider
session.PersistenceProvider
config.Provider
}
func MockSetSession(t *testing.T, reg mockDeps, conf *config.Config) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
i := identity.NewIdentity(config.DefaultIdentityTraitsSchemaID)
require.NoError(t, reg.PrivilegedIdentityPool().CreateIdentity(context.Background(), i))
require.NoError(t, reg.SessionManager().CreateAndIssueCookie(context.Background(), w, r, session.NewActiveSession(i, conf, time.Now().UTC())))
w.WriteHeader(http.StatusOK)
}
}
func MockGetSession(t *testing.T, reg mockDeps) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
_, err := reg.SessionManager().FetchFromRequest(r.Context(), r)
if r.URL.Query().Get("has") == "yes" {
require.NoError(t, err)
} else {
require.Error(t, err)
}
w.WriteHeader(http.StatusNoContent)
}
}
func MockMakeAuthenticatedRequest(t *testing.T, reg mockDeps, conf *config.Config, router *httprouter.Router, req *http.Request) ([]byte, *http.Response) {
set := "/" + uuid.New().String() + "/set"
router.GET(set, MockSetSession(t, reg, conf))
client := NewClientWithCookies(t)
MockHydrateCookieClient(t, client, "http://"+req.URL.Host+set)
res, err := client.Do(req)
require.NoError(t, errors.WithStack(err))
body, err := ioutil.ReadAll(res.Body)
require.NoError(t, errors.WithStack(err))
require.NoError(t, res.Body.Close())
return body, res
}
func | (t *testing.T) *http.Client {
cj, err := cookiejar.New(&cookiejar.Options{})
require.NoError(t, err)
return &http.Client{Jar: cj}
}
func MockHydrateCookieClient(t *testing.T, c *http.Client, u string) {
res, err := c.Get(u)
require.NoError(t, err)
defer res.Body.Close()
assert.EqualValues(t, http.StatusOK, res.StatusCode)
var found bool
for _, c := range res.Cookies() {
if c.Name == session.DefaultSessionCookieName {
found = true
}
}
require.True(t, found)
}
func MockSessionCreateHandlerWithIdentity(t *testing.T, reg mockDeps, i *identity.Identity) (httprouter.Handle, *session.Session) {
var sess session.Session
require.NoError(t, faker.FakeData(&sess))
// require AuthenticatedAt to be time.Now() as we always compare it to the current time
sess.AuthenticatedAt = time.Now().UTC()
sess.IssuedAt = time.Now().UTC()
sess.ExpiresAt = time.Now().UTC().Add(time.Hour * 24)
sess.Active = true
if reg.Config(context.Background()).Source().String(config.ViperKeyDefaultIdentitySchemaURL) == internal.UnsetDefaultIdentitySchema {
reg.Config(context.Background()).MustSet(config.ViperKeyDefaultIdentitySchemaURL, "file://./stub/fake-session.schema.json")
}
require.NoError(t, reg.PrivilegedIdentityPool().CreateIdentity(context.Background(), i))
inserted, err := reg.PrivilegedIdentityPool().GetIdentityConfidential(context.Background(), i.ID)
require.NoError(t, err)
sess.Identity = inserted
require.NoError(t, reg.SessionPersister().CreateSession(context.Background(), &sess))
require.Len(t, inserted.Credentials, len(i.Credentials))
return func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
require.NoError(t, reg.SessionManager().IssueCookie(context.Background(), w, r, &sess))
}, &sess
}
func MockSessionCreateHandler(t *testing.T, reg mockDeps) (httprouter.Handle, *session.Session) {
return MockSessionCreateHandlerWithIdentity(t, reg, &identity.Identity{
ID: x.NewUUID(), Traits: identity.Traits(`{"baz":"bar","foo":true,"bar":2.5}`)})
}
| NewClientWithCookies |
system.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use fuchsia_runtime::utc_time;
use fuchsia_zircon as zx;
use log::warn;
use crate::logging::*;
use crate::not_implemented;
use crate::syscalls::decls::SyscallDecl;
use crate::syscalls::*;
use crate::types::*;
pub fn sys_uname(
ctx: &SyscallContext<'_>,
name: UserRef<utsname_t>,
) -> Result<SyscallResult, Errno> {
fn | (fixed: &mut [u8; 65], init: &'static str) {
let init_bytes = init.as_bytes();
let len = init.len();
fixed[..len].copy_from_slice(init_bytes)
}
let mut result = utsname_t {
sysname: [0; 65],
nodename: [0; 65],
release: [0; 65],
version: [0; 65],
machine: [0; 65],
};
init_array(&mut result.sysname, "Linux");
init_array(&mut result.nodename, "local");
init_array(&mut result.release, "5.7.17-starnix");
init_array(&mut result.version, "starnix");
init_array(&mut result.machine, "x86_64");
ctx.task.mm.write_object(name, &result)?;
return Ok(SUCCESS);
}
pub fn sys_getrandom(
ctx: &SyscallContext<'_>,
buf_addr: UserAddress,
size: usize,
_flags: i32,
) -> Result<SyscallResult, Errno> {
let mut buf = vec![0; size];
let size = zx::cprng_draw(&mut buf).map_err(impossible_error)?;
ctx.task.mm.write_memory(buf_addr, &buf[0..size])?;
Ok(size.into())
}
const NANOS_PER_SECOND: i64 = 1000 * 1000 * 1000;
pub fn sys_clock_gettime(
ctx: &SyscallContext<'_>,
which_clock: u32,
tp_addr: UserRef<timespec>,
) -> Result<SyscallResult, Errno> {
let time = match which_clock {
CLOCK_REALTIME => utc_time(),
CLOCK_MONOTONIC => zx::Time::get_monotonic(),
_ => return Err(EINVAL),
};
let nanos = time.into_nanos();
let tv = timespec { tv_sec: nanos / NANOS_PER_SECOND, tv_nsec: nanos % NANOS_PER_SECOND };
return ctx.task.mm.write_object(tp_addr, &tv).map(|_| SUCCESS);
}
pub fn sys_gettimeofday(
ctx: &SyscallContext<'_>,
user_tv: UserRef<timeval>,
user_tz: UserRef<timezone>,
) -> Result<SyscallResult, Errno> {
if !user_tv.is_null() {
let now = utc_time().into_nanos();
let tv =
timeval { tv_sec: now / NANOS_PER_SECOND, tv_usec: (now % NANOS_PER_SECOND) / 1000 };
ctx.task.mm.write_object(user_tv, &tv)?;
}
if !user_tz.is_null() {
not_implemented!("gettimeofday does not implement tz argument");
}
return Ok(SUCCESS);
}
fn get_duration_from_timespec(ts: timespec) -> Result<zx::Duration, Errno> {
if ts.tv_nsec >= NANOS_PER_SECOND {
return Err(EINVAL);
}
return Ok(zx::Duration::from_seconds(ts.tv_sec) + zx::Duration::from_nanos(ts.tv_nsec));
}
pub fn sys_nanosleep(
ctx: &SyscallContext<'_>,
user_request: UserRef<timespec>,
_user_remaining: UserRef<timespec>,
) -> Result<SyscallResult, Errno> {
let mut request = timespec::default();
ctx.task.mm.read_object(user_request, &mut request)?;
let time = get_duration_from_timespec(request)?;
// TODO: We should be waiting on an object that can wake us up if we get a signal.
time.sleep();
Ok(SUCCESS)
}
pub fn sys_unknown(_ctx: &SyscallContext<'_>, syscall_number: u64) -> Result<SyscallResult, Errno> {
warn!(target: "unknown_syscall", "UNKNOWN syscall({}): {}", syscall_number, SyscallDecl::from_number(syscall_number).name);
// TODO: We should send SIGSYS once we have signals.
Err(ENOSYS)
}
| init_array |
strings.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package model
import "unicode"
var (
alphaNumericRange = []*unicode.RangeTable{unicode.L, unicode.Digit}
)
// IsAlphaNumeric returns whether a character is either a digit or a letter
func IsAlphaNumeric(r rune) bool {
return unicode.IsOneOf(alphaNumericRange, r)
}
// IsPrintable returns whether the string does contain only unicode printable
func IsPrintable(s string) bool {
for _, c := range s {
if !unicode.IsOneOf(unicode.PrintRanges, c) {
return false
}
}
return true
}
// IsPrintableASCII returns whether the string does contain only ASCII char
func | (s string) bool {
for _, c := range s {
if (c < 'A' || c > 'Z') && (c < 'a' || c > 'z') && c != '/' && c != ':' && c != '-' && (c < '0' || c > '9') {
return false
}
}
return true
}
| IsPrintableASCII |
p011.py | # Execution time : 0.003847 seconds
# Solution Explanation
# A simple brute-froce approach is enough
import time
width = 40
from functools import reduce
def solution():
v = list()
v.append([0]*23)
v.append([0]*23)
v.append([0]*23)
for line in open('input_p011.in','r'):
v.append(list(map(int,line.split())))
v[-1].extend([0,0,0])
v.append([0]*23)
v.append([0]*23)
v.append([0]*23)
ans = 0
for it1 in range(3,23):
for it2 in range(20):
ans = max(ans,reduce(lambda a,b: a*b,[v[it1][it2+k] for k in range(4)]))
ans = max(ans,reduce(lambda a,b: a*b,[v[it1+k][it2] for k in range(4)]))
ans = max(ans,reduce(lambda a,b: a*b,[v[it1-k][it2+k] for k in range(4)]))
ans = max(ans,reduce(lambda a,b: a*b,[v[it1+k][it2+k] for k in range(4)]))
return ans
if __name__=="__main__":
start_ = time.time() | print(' Answer -> %s '.center(width,'-') % ( solution() ))
print(' %f seconds '.center(width,'-') % ( time.time() - start_)) |
|
app.e2e-spec.ts | import { AppPage } from './app.po';
import { browser, logging } from 'protractor';
describe('workspace-project App', () => {
let page: AppPage;
beforeEach(() => {
page = new AppPage();
});
it('should display welcome message', () => {
page.navigateTo();
expect(page.getTitleText()).toEqual('Welcome to objectDetection!');
}); |
afterEach(async () => {
// Assert that there are no errors emitted from the browser
const logs = await browser.manage().logs().get(logging.Type.BROWSER);
expect(logs).not.toContain(jasmine.objectContaining({
level: logging.Level.SEVERE,
} as logging.Entry));
});
}); | |
lockman.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lockman
import (
"context"
"fmt"
)
type ILockedClass interface {
Keyword() string
}
type ILockedObject interface {
ILockedClass
GetId() string
}
type ILockManager interface {
LockKey(ctx context.Context, key string)
UnlockKey(ctx context.Context, key string)
}
func getClassKey(manager ILockedClass, projectId string) string {
// assert(getattr(cls, '_resource_name_', None) is not None)
// return '%s-%s' % (cls._resource_name_, user_cred.tenant_id)
return fmt.Sprintf("%s-%s", manager.Keyword(), projectId)
}
func getObjectKey(model ILockedObject) string {
// assert(getattr(obj, '_resource_name_', None) is not None)
// assert(getattr(obj, 'id', None) is not None)
// return '%s-%s' % (obj._resource_name_, obj.id)
return getRawObjectKey(model.Keyword(), model.GetId())
}
func getRawObjectKey(resName string, resId string) string {
return fmt.Sprintf("%s-%s", resName, resId)
}
func getJointObjectKey(model ILockedObject, model2 ILockedObject) string {
// def _get_joint_object_key(self, obj1, obj2, user_cred):
// return '%s-%s' % (self._get_object_key(obj1, user_cred),
// self._get_object_key(obj2, user_cred))
return fmt.Sprintf("%s-%s", getObjectKey(model), getObjectKey(model2))
}
var _lockman ILockManager
func Init(man ILockManager) {
_lockman = man
}
func LockClass(ctx context.Context, manager ILockedClass, projectId string) {
key := getClassKey(manager, projectId)
_lockman.LockKey(ctx, key)
}
func ReleaseClass(ctx context.Context, manager ILockedClass, projectId string) {
key := getClassKey(manager, projectId)
_lockman.UnlockKey(ctx, key)
}
func LockObject(ctx context.Context, model ILockedObject) {
key := getObjectKey(model)
_lockman.LockKey(ctx, key)
}
func ReleaseObject(ctx context.Context, model ILockedObject) {
key := getObjectKey(model)
_lockman.UnlockKey(ctx, key)
}
func LockRawObject(ctx context.Context, resName string, resId string) |
func ReleaseRawObject(ctx context.Context, resName string, resId string) {
key := getRawObjectKey(resName, resId)
_lockman.UnlockKey(ctx, key)
}
func LockJointObject(ctx context.Context, model ILockedObject, model2 ILockedObject) {
key := getJointObjectKey(model, model2)
_lockman.LockKey(ctx, key)
}
func ReleaseJointObject(ctx context.Context, model ILockedObject, model2 ILockedObject) {
key := getJointObjectKey(model, model2)
_lockman.UnlockKey(ctx, key)
}
| {
key := getRawObjectKey(resName, resId)
_lockman.LockKey(ctx, key)
} |
setup-users-and-groups.py | import argparse
import crypt
import json
import os
import pwd
import random
import re
import string
import subprocess
import sys
import traceback | from itertools import product
import yaml
class ACL:
@staticmethod
def get_file_acl(path):
if not os.path.exists(path):
raise IOError("The directory or file '{0}' does not exist".format(path))
cmd_result = execute_command(['getfacl', '-p', path])
if cmd_result['returncode'] != 0:
raise Exception("Failed to get ACL of file or directory '{0}': {1}".format(path, cmd_result['output']))
raw_acl = cmd_result['output'].splitlines()
owner = re.match(r'# owner: (.+)', raw_acl[1]).group(1)
group = re.match(r'# group: (.+)', raw_acl[2]).group(1)
acl = {'users': [], 'groups': [], 'other': None}
for a in raw_acl[3:]:
match_acl = re.match(r'user::([rwx-]+)', a)
if match_acl:
acl['users'].append({'name': '', 'permissions': match_acl.group(1)})
# explicitly add owner (e.g. webserver), so sub directories created
# by different user will still be readable by the original owner
acl['owner'] = {'name': owner, 'permissions': match_acl.group(1)}
continue
match_acl = re.match(r'user:([^:]+):([rwx-]+)', a)
if match_acl:
acl['users'].append({'name': match_acl.group(1), 'permissions': match_acl.group(2)})
continue
match_acl = re.match(r'group::([rwx-]+)', a)
if match_acl:
acl['groups'].append({'name': '', 'permissions': match_acl.group(1)})
acl['group'] = {'name': group, 'permissions': match_acl.group(1)}
continue
match_acl = re.match(r'group:([^:]+):([rwx-]+)', a)
if match_acl:
acl['groups'].append({'name': match_acl.group(1), 'permissions': match_acl.group(2)})
continue
match_acl = re.match(r'other::([rwx-]+)', a)
if match_acl:
acl['other'] = match_acl.group(1)
continue
return acl
@staticmethod
def file_acl_differs(path, new_acl):
old_acl = ACL.get_file_acl(path)
return json.dumps(old_acl, sort_keys=True) != json.dumps(new_acl, sort_keys=True)
@staticmethod
def set_file_acl(path, new_acl, force=False):
def format_acl_spec(prefix, name, permissions):
acl_spec = list()
acl_spec.append("{0}:{1}:{2}".format(prefix, name, permissions))
if os.path.isdir(path):
acl_spec.append("d:{0}:{1}:{2}".format(prefix, name, permissions))
return ','.join(acl_spec)
old_acl = ACL.get_file_acl(path)
if force or json.dumps(old_acl, sort_keys=True) != json.dumps(new_acl, sort_keys=True):
print("Setting ACLs of '{0}...".format(path))
# modify ACLs
setfacl_cmd = ['setfacl', '-R', '-m']
acl_spec = list()
for uacl in new_acl['users']:
acl_spec.append(format_acl_spec('u', uacl['name'], uacl['permissions']))
# explicitly add owner (e.g. webserver), so sub directories created
# by different user will still be readable by the original owner
acl_spec.append(format_acl_spec('u', new_acl['owner']['name'], new_acl['owner']['permissions']))
for gacl in new_acl['groups']:
acl_spec.append(format_acl_spec('g', gacl['name'], gacl['permissions']))
acl_spec.append(format_acl_spec('g', new_acl['group']['name'], new_acl['group']['permissions']))
acl_spec.append(format_acl_spec('o', '', new_acl['other']))
setfacl_cmd.append(','.join(acl_spec))
setfacl_cmd.append(path)
cmd_result = execute_command(setfacl_cmd)
if cmd_result['returncode'] != 0:
raise Exception("Failed to set ACL of file or directory '{0}': {1}".format(path, cmd_result['output']))
# remove ACLs
setfacl_cmd = ['setfacl', '-R', '-x']
acl_spec = list()
users_to_remove = list(
set([x['name'] for x in old_acl['users']]) - set([x['name'] for x in new_acl['users']]))
groups_to_remove = list(
set([x['name'] for x in old_acl['groups']]) - set([x['name'] for x in new_acl['groups']]))
for u in users_to_remove:
acl_spec.append(format_acl_spec('u', u, ''))
for g in groups_to_remove:
acl_spec.append(format_acl_spec('g', g, ''))
if acl_spec:
setfacl_cmd.append(','.join(acl_spec))
setfacl_cmd.append(path)
cmd_result = execute_command(setfacl_cmd)
if cmd_result['returncode'] != 0:
raise Exception(
"Failed to remove ACL from file or directory '{0}': {1}".format(path, cmd_result['output']))
def get_arg(config, arg, dtype, default=None, required=False):
if required and not arg in config:
raise ValueError("Missing key '{0}'".format(arg))
if not arg in config:
return default
if type(config[arg]) is not dtype:
raise ValueError("'{0}' must be of type '{1}', got '{2}'".format(arg, str(dtype), str(config[arg])))
return config[arg]
def execute_command(cmd):
try:
return {'returncode': 0,
'output': subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)}
except subprocess.CalledProcessError as e:
return {'returncode': e.returncode, 'output': e.output}
def recursive_chown(path, uid, gid):
os.chown(path, uid, gid)
for item in os.listdir(path):
itempath = os.path.join(path, item)
if os.path.isfile(itempath):
os.chown(itempath, uid, gid)
elif os.path.isdir(itempath):
os.chown(itempath, uid, gid)
recursive_chown(itempath, uid, gid)
def main():
# parse arguments
parser = argparse.ArgumentParser(
prog='setup-users-and-groups',
description='According to a configuration file this script creates Linux users/groups and grants permissions on resources.',
add_help=True)
parser.add_argument('-f', '--force', dest='force',
action='store_true', default=False, help="Force the setting the ACLs.")
parser.add_argument('-c', '--create-dir', dest='create_dir',
action='store_true', default=False, help="Create a directory for a path that does not exists.")
parser.add_argument('configuration_file', help="File that defines what to do.")
args = parser.parse_args(sys.argv[1:])
try:
# load configuration either from file or from stdin
if args.configuration_file == '-':
inp = sys.stdin.read()
config = yaml.load(inp) or dict()
else:
if not os.path.exists(args.configuration_file):
raise IOError("The configuration file '{0}' does not exist".format(args.configuration_file))
with open(file=args.configuration_file, mode='r', encoding='utf8') as f:
config = yaml.load(f.read())
# parse arguments
groups = get_arg(config, "groups", dict, dict())
users = get_arg(config, "users", dict, dict())
defaults = get_arg(config, "defaults", dict, None) or dict()
defaults = {
'owner_permissions': get_arg(defaults, "owner_permissions", str, None),
'owner_group_permissions': get_arg(defaults, "owner_group_permissions", str, None),
'user_permissions': get_arg(defaults, "user_permissions", str, 'rwx'),
'group_permissions': get_arg(defaults, "group_permissions", str, 'rwx'),
}
acls = dict()
# create groups
for group, gdef in groups.items():
if type(gdef) != dict:
raise ValueError("The group definition of '{0}' must be of type dict".format(group))
gid = get_arg(gdef, 'gid', int, None)
permissions = get_arg(gdef, 'permissions', list, list())
# add group if it doesn't already exists
if execute_command(['getent', 'group', group])['returncode'] == 0:
print("Group '{0}' already exists, skipping...".format(group))
else:
print("Creating group '{0}'...".format(group))
groupadd_cmd = ['groupadd']
if gid:
groupadd_cmd += ['-g', str(gid)]
groupadd_cmd.append(group)
cmd_result = execute_command(groupadd_cmd)
if cmd_result['returncode'] != 0:
raise Exception("Failed to create group '{0}': {1}".format(group, cmd_result['output']))
# parse permissions
for perm in permissions:
path = get_arg(perm, "path", str, None, required=True)
if not os.path.exists(path):
if args.create_dir:
os.makedirs(path, 0o750);
else:
raise IOError("The directory or file '{0}' does not exist".format(path))
path_permissions = get_arg(perm, 'permissions', str, defaults['group_permissions'])
new_acl = {'name': group, 'permissions': path_permissions}
if path in acls:
acls[path]['groups'].append(new_acl)
else:
user_group_default = {'name': '', 'permissions': defaults['group_permissions']}
acls[path] = {'users': [user_group_default], 'groups': [user_group_default, new_acl],
'other': '---'}
# create users
for user, udef in users.items():
if type(udef) != dict:
raise ValueError("The user definition of '{0}' must be of type dict".format(user))
uid = get_arg(udef, 'uid', int, None)
groups = get_arg(udef, 'groups', list, None)
home = get_arg(udef, 'home', str, None)
random_string = ''.join(
random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(64))
hashed_password = crypt.crypt(get_arg(udef, 'password', str, random_string),
crypt.mksalt(crypt.METHOD_SHA512))
ssh_public_key = get_arg(udef, 'ssh_public_key', str, '')
permissions = get_arg(udef, 'permissions', list, list())
# add user if it doesn't already exists
if execute_command(['getent', 'passwd', user])['returncode'] == 0:
print("User '{0}' already exists, skipping...".format(user))
else:
print("Creating user '{0}'...".format(user))
useradd_cmd = ['useradd', '-m', '-p', hashed_password, '-U', '-s', '/bin/bash']
if uid:
useradd_cmd += ['-u', str(uid)]
if groups:
useradd_cmd += ['-G', ','.join(groups)]
if home:
useradd_cmd += ['-d', home]
useradd_cmd.append(user)
cmd_result = execute_command(useradd_cmd)
if cmd_result['returncode'] != 0:
raise Exception("Failed to create user '{0}': {1}".format(user, cmd_result['output']))
# set SSH public key
user_info = pwd.getpwnam(user)
ak_file = os.path.join(user_info.pw_dir, '.ssh/authorized_keys')
authorized_key_string = "## !!! DO NOT EDIT THIS FILE !!!\n## This file is generated automatically. Any changes will eventually be lost.\n## If you like to add a SSH Public Key contact your administrator.\n" + ssh_public_key
os.makedirs(os.path.dirname(ak_file), 0o750, True)
with open(file=ak_file, mode='w', encoding='utf8') as f:
f.write(authorized_key_string)
os.chmod(ak_file, 0o400)
recursive_chown(user_info.pw_dir, user_info.pw_uid, user_info.pw_gid)
# parse permissions
for perm in permissions:
path = get_arg(perm, "path", str, None, required=True)
if not os.path.exists(path):
if args.create_dir:
os.makedirs(path, 0o750)
else:
raise IOError("The directory or file '{0}' does not exist".format(path))
path_permissions = get_arg(perm, 'permissions', str, defaults['user_permissions'])
new_acl = {'name': user, 'permissions': path_permissions}
if path in acls:
acls[path]['users'].append(new_acl)
else:
user_group_default = {'name': '', 'permissions': defaults['user_permissions']}
acls[path] = {'users': [user_group_default, new_acl], 'groups': [user_group_default],
'other': '---'}
# set ACLs
paths = list(acls.keys())
paths.sort()
# find prefix paths and append permissions, otherwise longer paths will overwrite the shorter paths permissions
for p1, p2 in product(paths, paths):
if p1 != p2 and p2.startswith(p1):
acls[p2]['users'] += acls[p1]['users']
acls[p2]['groups'] += acls[p1]['groups']
for path in paths:
old_acl = ACL.get_file_acl(path)
acls[path]['owner'] = {'name': old_acl['owner']['name'], 'permissions': defaults['owner_permissions'] or old_acl['owner']['permissions']}
acls[path]['group'] = {'name': old_acl['group']['name'], 'permissions': defaults['owner_group_permissions'] or old_acl['group']['permissions']}
ACL.set_file_acl(path, acls[path], args.force)
except Exception as e:
sys.stderr.write(str(e) + '\n\n')
traceback.print_exc(5)
exit(1)
if __name__ == '__main__':
main() | |
td0_policy.py | from deepdab.ai import *
class TDZeroPolicy(TabularPolicy):
def | (self, board_size, learning_rate=0.0, gamma=0.0, epsilon=0.0, initial_state_value=0.0, table_file_path=None):
super(TDZeroPolicy, self).__init__(board_size=board_size, epsilon=epsilon,
initial_state_value=initial_state_value, table_file_path=table_file_path)
self._learning_rate = learning_rate
self._gamma = gamma
def update_value(self, reward, initial_state, selected_state):
initial_state_string = self._find_state_string(initial_state)
selected_state_string = self._find_state_string(selected_state)
initial_state_value = self._value_table[initial_state_string]
selected_state_value = self._value_table[selected_state_string]
self._value_table[initial_state_string] = initial_state_value + self._learning_rate * (reward + (self._gamma * selected_state_value) - initial_state_value)
| __init__ |
720.js | const function720 = function (t, e, i) {
"use strict";
var n = this && this.__extends || function () {
var t = Object.setPrototypeOf || { __proto__: [] } instanceof Array && function (t, e) {
t.__proto__ = e
} || function (t, e) {
for (var i in e) e.hasOwnProperty(i) && (t[i] = e[i])
};
return function (e, i) {
function | () {
this.constructor = e
}
t(e, i), e.prototype = null === i ? Object.create(i) : (n.prototype = i.prototype, new n)
}
}();
Object.defineProperty(e, "__esModule", { value: !0 });
var o = i(11), r = function (t) {
function e(e) {
var i = t.call(this) || this;
return i.organizeSceneMain = e, i
}
return n(e, t), e.prototype._start = function () {
this.organizeSceneMain.prefinalize(), this._completedEnd()
}, e
}(o.TaskBase);
e.PreFinalizeTask = r
} | n |
excludes_request_builder.go | package excludes
import (
ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go"
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87 "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph"
)
// ExcludesRequestBuilder builds and executes requests for operations under \policies\permissionGrantPolicies\{permissionGrantPolicy-id}\excludes
type ExcludesRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string;
// The request adapter to use to execute the requests.
requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter;
// Url template to use to build the URL for the current request builder
urlTemplate string;
}
// ExcludesRequestBuilderGetOptions options for Get
type ExcludesRequestBuilderGetOptions struct {
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Request query parameters
Q *ExcludesRequestBuilderGetQueryParameters;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// ExcludesRequestBuilderGetQueryParameters condition sets which are excluded in this permission grant policy. Automatically expanded on GET.
type ExcludesRequestBuilderGetQueryParameters struct {
// Include count of items
Count *bool;
// Expand related entities
Expand []string;
// Filter items by property values
Filter *string;
// Order items by property values
Orderby []string;
// Search items by search phrases
Search *string;
// Select properties to be returned
Select_escaped []string;
// Skip the first n items
Skip *int32;
// Show only the first n items
Top *int32;
}
// ExcludesRequestBuilderPostOptions options for Post
type ExcludesRequestBuilderPostOptions struct {
//
Body *i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.PermissionGrantConditionSet;
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// NewExcludesRequestBuilderInternal instantiates a new ExcludesRequestBuilder and sets the default values.
func NewExcludesRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*ExcludesRequestBuilder) {
m := &ExcludesRequestBuilder{
}
m.urlTemplate = "{+baseurl}/policies/permissionGrantPolicies/{permissionGrantPolicy_id}/excludes{?top,skip,search,filter,count,orderby,select,expand}";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = pathParameters;
m.requestAdapter = requestAdapter;
return m
}
// NewExcludesRequestBuilder instantiates a new ExcludesRequestBuilder and sets the default values.
func | (rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*ExcludesRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewExcludesRequestBuilderInternal(urlParams, requestAdapter)
}
// CreateGetRequestInformation condition sets which are excluded in this permission grant policy. Automatically expanded on GET.
func (m *ExcludesRequestBuilder) CreateGetRequestInformation(options *ExcludesRequestBuilderGetOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.GET
if options != nil && options.Q != nil {
requestInfo.AddQueryParameters(*(options.Q))
}
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// CreatePostRequestInformation condition sets which are excluded in this permission grant policy. Automatically expanded on GET.
func (m *ExcludesRequestBuilder) CreatePostRequestInformation(options *ExcludesRequestBuilderPostOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.POST
requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", options.Body)
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// Get condition sets which are excluded in this permission grant policy. Automatically expanded on GET.
func (m *ExcludesRequestBuilder) Get(options *ExcludesRequestBuilderGetOptions)(*ExcludesResponse, error) {
requestInfo, err := m.CreateGetRequestInformation(options);
if err != nil {
return nil, err
}
res, err := m.requestAdapter.SendAsync(*requestInfo, func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewExcludesResponse() }, nil)
if err != nil {
return nil, err
}
return res.(*ExcludesResponse), nil
}
// Post condition sets which are excluded in this permission grant policy. Automatically expanded on GET.
func (m *ExcludesRequestBuilder) Post(options *ExcludesRequestBuilderPostOptions)(*i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.PermissionGrantConditionSet, error) {
requestInfo, err := m.CreatePostRequestInformation(options);
if err != nil {
return nil, err
}
res, err := m.requestAdapter.SendAsync(*requestInfo, func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.NewPermissionGrantConditionSet() }, nil)
if err != nil {
return nil, err
}
return res.(*i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.PermissionGrantConditionSet), nil
}
| NewExcludesRequestBuilder |
delimiters.js | "use strict";
Object.defineProperty(exports, '__esModule', {value: true});
exports.HDW1 = MathJax._.output.common.fonts.tex.delimiters.HDW1;
exports.HDW2 = MathJax._.output.common.fonts.tex.delimiters.HDW2;
exports.HDW3 = MathJax._.output.common.fonts.tex.delimiters.HDW3;
exports.VSIZES = MathJax._.output.common.fonts.tex.delimiters.VSIZES; | exports.delimiters = MathJax._.output.common.fonts.tex.delimiters.delimiters; |
|
main_multi.py |
import argparse
import os
import queue
import multiprocessing as mp
# import SharedArray as sa
import numpy as np
from copy import deepcopy
from time import time
from pprint import pprint
from utils.data_manipulators import *
from evolution.operators import *
from to.probabilistic_model import ProbabilisticModel
from to.mixture_model import MixtureModel
from evolution.chromosome import *
class EAProcess(mp.Process):
def __init__(self, dims, psize, gen, problem, shared_queue,
shared_array, t_lock, list_lock, return_list, transfer_interval=2):
super(EAProcess, self).__init__()
self.dims = dims
self.psize = psize
print('hi')
self.gen = gen
self.problem = problem
self.shared_queue = shared_queue
self.shared_array = shared_array
# self.shared_lock = shared_lock
self.t_lock = t_lock
self.list_lock = list_lock
self.transfer_interval = transfer_interval
self.reinitialize()
self.return_list = return_list
def reinitialize(self):
self.fitness_hist = np.zeros((self.gen, self.psize))
self.fitness_time = np.zeros((self.gen))
init_func = lambda n: np.round(np.random.rand(n))
self.pop = get_pop_init(self.psize, self.dims, init_func)
def _ea(self):
start = time()
for i in range(self.psize): self.pop[i].fitness_calc(self.problem)
self.bestfitness = np.max(self.pop).fitness
self.fitness = Chromosome.fitness_to_numpy(self.pop)
self.fitness_hist[0, :] = self.fitness
self.fitness_time[0] = start - time()
for i in range(1, self.gen):
start = time()
if i%self.transfer_interval == 0 and i//self.transfer_interval == 1:
print('transfer start')
self.t_lock.release()
if i%self.transfer_interval == 0:
recieved_pops = None
try:
while True:
if recieved_pops is None:
recieved_pops = list(self.shared_queue.get(block=True))
else:
recieved_pops += list(self.shared_queue.get(block=False))
except queue.Empty:
print('Queue is empty now')
print('recieved_pops: ', len(recieved_pops))
self.pop = total_selection_pop(np.concatenate((self.pop, recieved_pops)), self.psize)
offsprings = total_crossover(self.pop)
for j in range(self.psize): offsprings[j].mutation(1/self.dims)
# Fitness Calculation
cfitness = np.zeros(self.psize)
for j in range(self.psize):
cfitness[j] = offsprings[j].fitness_calc(self.problem)
self.pop, self.fitness = total_selection(np.concatenate((self.pop, offsprings)),
np.concatenate((self.fitness, cfitness)), self.psize)
self.fitness_hist[i, :] = self.fitness
if self.fitness[0] > self.bestfitness:
self.bestfitness = self.fitness[0]
print('Generation %d best fitness = %f' % (i, self.bestfitness))
self.list_lock.acquire()
self.shared_array[:] = Chromosome.genes_to_list(self.pop)
self.list_lock.release()
self.fitness_time[i] = time() - start
print('Shared Array is now available')
self.return_list.append([self.fitness_time, self.fitness_hist])
def run(self):
# When target array is prepared it will be unlocked
print ('called run method in process: %s' %self.name)
self._ea()
return
class | (mp.Process):
def __init__(self, dims, problem, mutation_strength,
sample_size, sub_sample_size, src_models,
shared_queue, shared_array, t_lock,
list_lock, transfer_interval=2):
super(TransferProcess, self).__init__()
self.dims = dims
self.problem = problem
self.src_models = src_models
self.mutation_strength = mutation_strength
self.sample_size = sample_size
self.sub_sample_size = sub_sample_size
self.shared_queue = shared_queue
self.shared_array = shared_array
# self.shared_lock = shared_lock
self.t_lock = t_lock
self.list_lock = list_lock
self.transfer_interval = transfer_interval
self.reinitialize()
def reinitialize(self):
# self.fitness_hist = np.zeros((self.gen, self.psize))
# self.fitness_time = np.zeros((self.gen))
dims_s2 = len(self.src_models)+1
self.second_specie = StrategyChromosome(dims_s2)
def _transfer_ea(self):
prev_samples = None
genes_differ = None
target_model = ProbabilisticModel(modelType='umd')
self.list_lock.acquire()
target_array = np.array(self.shared_array[:])
self.list_lock.release()
target_model.buildModel(target_array)
_, sampled_offsprings, prev_samples = \
self.second_specie.fitness_calc(self.problem, self.src_models, target_model, self.sample_size,
self.sub_sample_size, mutation_vec=genes_differ, prev_samples=deepcopy(prev_samples),
efficient_version=True)
self.shared_queue.put(sampled_offsprings)
while True:
offspring = deepcopy(self.second_specie)
genes_differ = offspring.mutation(self.mutation_strength, 0, 1)
target_model = ProbabilisticModel(modelType='umd')
self.list_lock.acquire()
target_array = np.array(self.shared_array[:])
self.list_lock.release()
target_model.buildModel(target_array)
_, sampled_offsprings, prev_samples_tmp = \
offspring.fitness_calc(self.problem, self.src_models, target_model, self.sample_size,
self.sub_sample_size, mutation_vec=genes_differ, prev_samples=deepcopy(prev_samples),
efficient_version=True)
self.shared_queue.put(sampled_offsprings)
self.second_specie, self.mutation_strength, is_off_selected = selection_adoption(self.second_specie, offspring, self.mutation_strength)
if is_off_selected:
prev_samples = prev_samples_tmp
# second_species_gen_num += 1
# while True:
def run(self):
self.t_lock.acquire()
print ('called run method in process: %s' %self.name)
self._transfer_ea()
return
def get_args():
parser = argparse.ArgumentParser(description='CoOperative CoEvolution Transfer Optimization Algorithm for Solving Multi-location Inventory Planning with Lateral Transshipments')
parser.add_argument('--stop_condition', default=True,
type=bool, nargs='?',
help="Stop after i number of iteraction if fitness didn't changed")
parser.add_argument('--reps', default=1,
type=int, nargs='?',
help='Number of repetition')
parser.add_argument('--delta', default=2,
type=int, nargs='?',
help='Step for switiching between transfer optimization and evolutionary operations')
# parser.add_argument('--buildmodel', default=True,
# type=bool, nargs='?',
# help='Should we build source models?')
parser.add_argument('--src_version', default='v1',
type=str, nargs='?',
help='What version of source models should be used?')
parser.add_argument('--s1_psize', default=50,
type=int, nargs='?',
help='Population size for the first species?')
# parser.add_argument('--s2_psize', default=20,
# type=int, nargs='?',
# help='Population size for the second species?')
parser.add_argument('--sample_size', default=50,
type=int, nargs='?',
help='Number of samples generated from each AlphaChromosome?')
parser.add_argument('--sub_sample_size', default=50,
type=int, nargs='?',
help='How many samples should we take from sample_size number of samples generated?')
# parser.add_argument('-v', dest='version', default='v1',
# type=str, nargs='?',
# help='What version should be executed?')
parser.add_argument('--mutation_strength', default=1,
type=int, nargs='?',
help='The same step-size which we use in evolution strategy')
parser.add_argument('--injection_type', default='elite',
type=str, nargs='?',
help='What method do you want to use for injection of species 2 to species 1?')
parser.add_argument('--to_repititon_num', default=1,
type=int, nargs='?',
help='How many time should we repeat the transferring step in evolution strategy?')
parser.add_argument('--selection_version', default='v1',
type=str, nargs='?',
help='What selection version should we use in evolution strategy E(1 + 1)?')
parser.add_argument('-c', default=2,
type=int, nargs='?',
help='Parameter of E(1 + 1) algorithm selection')
parser.add_argument('--efficient_version', default=False,
type=bool, nargs='?',
help='Efficient version of evaluation strategy version?')
parser.add_argument('--transfer_repeat_num', default=None,
type=int, nargs='?',
help=''' Number of times transfer optimization should be run.
if it is None, it will be repeated in every delta iteration''')
# parser.add_argument('-q', dest='matrix_num', default='a',
# type=str, nargs='?',
# help='T^0_H matrix selector for section b')
return parser.parse_args()
def main_multi(args):
# constants
models_path = 'models'
source_models_path = os.path.join(models_path, 'knapsack_source_models')
knapsack_problem_path = 'problems/knapsack'
dims = 1000
psize = args.s1_psize
mutation_strength = args.mutation_strength
reps = args.reps
transfer_interval = args.delta
sub_sample_size = args.sub_sample_size
sample_size = args.sample_size
gen = 100
# Loading Target Problem
target_problem = Tools.load_from_file(os.path.join(knapsack_problem_path, 'KP_uc_ak'))
# Loading Source Models
src_models = Tools.load_from_file(source_models_path + '_{}'.format(args.src_version))
main_m = mp.Manager()
return_list = main_m.list()
for i in range(reps):
# Shared Variables
m = mp.Manager()
shared_queue = m.Queue()
shared_array = m.list([[0 for j in range(dims)] for i in range(psize)])
# prep_lock = m.Lock() # This lock is used for starting transfer learning
# prep_lock.acquire()
list_lock = m.Lock() # \\ for synchronozing read & write of the list
# q_lock = m.Lock() # \\ for synchronozing put & get of the queue
transfer_lock = m.Lock() # \\ will synchronize the transfer_interval for EAProcess
transfer_lock.acquire()
ea_process = EAProcess(dims, psize, gen, target_problem, shared_queue,
shared_array, transfer_lock, list_lock, return_list,
transfer_interval=transfer_interval)
tr_process = TransferProcess(dims, target_problem, mutation_strength,
sample_size, sub_sample_size, src_models,
shared_queue, shared_array, transfer_lock,
list_lock, transfer_interval=transfer_interval)
ea_process.start()
tr_process.start()
ea_process.join()
tr_process.terminate()
tr_process.join()
Tools.save_to_file(args.save_path, return_list[:])
if __name__ == '__main__':
args = get_args()
main_multi(args)
| TransferProcess |
styled.js | import styled from "styled-components"
import media from 'styled-media-query'
import AniLink from "gatsby-plugin-transition-link/AniLink"
export const ProfileWrapper = styled.section`
color: var(--texts);
display: flex;
flex-direction: column;
${media.lessThan('large')`
flex-direction: row;
`}
`
export const ProfileLink = styled(AniLink)`
color: var(--texts);
text-decoration: none;
transition: color 0.5s; | `}
&:hover {
color: var(--highlight);
}
`
export const ProfileAuthor = styled.h1`
font-size: 1.6;
margin: 0.5rem auto 1.5rem;
${media.lessThan('large')`
font-size: 1.2rem;
margin: 0 0 0 10px;
`}
`
export const ProfilePosition = styled.small`
display: block;
font-size: 1.2rem;
font-weight: 300;
${media.lessThan('large')`
font-size: 0.8rem;
margin-top: 0.2rem;
`}
`
export const ProfileDescription = styled.p`
font-size: 1rem;
font-weight: 300;
line-height: 1.4;
${media.lessThan('large')`
display: none;
`}
` |
${media.lessThan('large')`
text-align: left; |
create-server.js | module.exports = createServer
// requires
var Sequelize = require('sequelize')
var express = require('express')
var bodyParser = require('body-parser')
var https = require('https')
var fs = require('fs')
var wc = require('../')
var createApp = require('./create-app')
/**
* server function
* @param {Object} config [description]
*/
function createServer(argv) {
// vars
var sequelize
var config = wc.getConfig()
var defaultCurrency = 'https://w3id.org/cc#bit'
var defaultDatabase = 'webcredits'
var defaultWallet = 'https://localhost/wallet/test#this'
config.currency = argv.currency || config.currency || defaultCurrency | config.key = argv.key || null
config.cert = argv.cert || null
var port = argv.port
// run main
sequelize = wc.setupDB(config)
var app = express()
wcApp = createApp(null, sequelize, config)
app.use('/', wcApp)
var defaultPort = 11077
port = port || defaultPort
console.log(config)
var key
try {
key = fs.readFileSync(config.key)
} catch (e) {
throw new Error('Can\'t find SSL key in ' + config.key)
}
var cert
try {
cert = fs.readFileSync(config.cert)
} catch (e) {
throw new Error('Can\'t find SSL cert in ' + config.cert)
}
var credentials = {
key: key,
cert: cert,
requestCert: true
}
server = https.createServer(credentials, app)
return server
} | config.database = argv.database || config.database || defaultDatabase
config.wallet = argv.wallet || config.wallet || defaultWallet |
func2subr.py | #!/usr/bin/env python
"""
Rules for building C/API module with f2py2e.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2004/11/26 11:13:06 $
Pearu Peterson
"""
__version__ = "$Revision: 1.16 $"[10:-1]
f2py_version='See `f2py -v`'
import pprint
import copy
import sys
errmess=sys.stderr.write
outmess=sys.stdout.write
show=pprint.pprint
from auxfuncs import *
def var2fixfortran(vars,a,fa=None,f90mode=None):
if fa is None:
fa = a
if a not in vars:
show(vars)
outmess('var2fixfortran: No definition for argument "%s".\n'%a)
return ''
if 'typespec' not in vars[a]:
show(vars[a])
outmess('var2fixfortran: No typespec for argument "%s".\n'%a)
return ''
vardef=vars[a]['typespec']
if vardef=='type' and 'typename' in vars[a]:
vardef='%s(%s)'%(vardef,vars[a]['typename'])
selector={}
lk = ''
if 'kindselector' in vars[a]:
selector=vars[a]['kindselector']
lk = 'kind'
elif 'charselector' in vars[a]:
selector=vars[a]['charselector']
lk = 'len'
if '*' in selector:
if f90mode:
if selector['*'] in ['*',':','(*)']:
vardef='%s(len=*)'%(vardef)
else:
vardef='%s(%s=%s)'%(vardef,lk,selector['*'])
else:
if selector['*'] in ['*',':']:
vardef='%s*(%s)'%(vardef,selector['*'])
else:
vardef='%s*%s'%(vardef,selector['*'])
else:
if 'len' in selector:
vardef='%s(len=%s'%(vardef,selector['len'])
if 'kind' in selector:
vardef='%s,kind=%s)'%(vardef,selector['kind'])
else:
vardef='%s)'%(vardef)
elif 'kind' in selector:
vardef='%s(kind=%s)'%(vardef,selector['kind'])
vardef='%s %s'%(vardef,fa)
if 'dimension' in vars[a]:
vardef='%s(%s)'%(vardef,','.join(vars[a]['dimension']))
return vardef
def createfuncwrapper(rout,signature=0):
assert isfunction(rout)
extra_args = []
vars = rout['vars']
for a in rout['args']:
v = rout['vars'][a]
for i,d in enumerate(v.get('dimension',[])):
if d==':':
dn = 'f2py_%s_d%s' % (a, i)
dv = dict(typespec='integer', intent=['hide'])
dv['='] = 'shape(%s, %s)' % (a, i)
extra_args.append(dn)
vars[dn] = dv
v['dimension'][i] = dn
rout['args'].extend(extra_args)
need_interface = bool(extra_args)
ret = ['']
def add(line,ret=ret):
ret[0] = '%s\n %s'%(ret[0],line)
name = rout['name']
fortranname = getfortranname(rout)
f90mode = ismoduleroutine(rout)
newname = '%sf2pywrap'%(name)
if newname not in vars:
vars[newname] = vars[name]
args = [newname]+rout['args'][1:]
else:
args = [newname]+rout['args']
l = var2fixfortran(vars,name,newname,f90mode)
return_char_star = 0
if l[:13]=='character*(*)':
return_char_star = 1
if f90mode: l = 'character(len=10)'+l[13:]
else: l = 'character*10'+l[13:]
charselect = vars[name]['charselector']
if charselect.get('*','')=='(*)':
charselect['*'] = '10'
sargs = ', '.join(args)
if f90mode:
add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'],name,sargs))
if not signature:
add('use %s, only : %s'%(rout['modulename'],fortranname))
else:
add('subroutine f2pywrap%s (%s)'%(name,sargs))
if not need_interface:
add('external %s'%(fortranname))
l = l + ', '+fortranname
if need_interface:
for line in rout['saved_interface'].split('\n'):
if line.lstrip().startswith('use '):
add(line)
args = args[1:]
dumped_args = []
for a in args:
if isexternal(vars[a]):
add('external %s'%(a))
dumped_args.append(a)
for a in args:
if a in dumped_args: continue
if isscalar(vars[a]):
add(var2fixfortran(vars,a,f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args: continue
if isintent_in(vars[a]):
add(var2fixfortran(vars,a,f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args: continue
add(var2fixfortran(vars,a,f90mode=f90mode))
add(l)
if need_interface:
if f90mode:
# f90 module already defines needed interface
pass
else:
add('interface')
add(rout['saved_interface'].lstrip())
add('end interface')
sargs = ', '.join([a for a in args if a not in extra_args])
if not signature:
if islogicalfunction(rout):
add('%s = .not.(.not.%s(%s))'%(newname,fortranname,sargs))
else:
add('%s = %s(%s)'%(newname,fortranname,sargs))
if f90mode:
add('end subroutine f2pywrap_%s_%s'%(rout['modulename'],name))
else:
add('end')
#print '**'*10
#print ret[0]
#print '**'*10
return ret[0]
def createsubrwrapper(rout,signature=0):
assert issubroutine(rout) | vars = rout['vars']
for a in rout['args']:
v = rout['vars'][a]
for i,d in enumerate(v.get('dimension',[])):
if d==':':
dn = 'f2py_%s_d%s' % (a, i)
dv = dict(typespec='integer', intent=['hide'])
dv['='] = 'shape(%s, %s)' % (a, i)
extra_args.append(dn)
vars[dn] = dv
v['dimension'][i] = dn
rout['args'].extend(extra_args)
need_interface = bool(extra_args)
ret = ['']
def add(line,ret=ret):
ret[0] = '%s\n %s'%(ret[0],line)
name = rout['name']
fortranname = getfortranname(rout)
f90mode = ismoduleroutine(rout)
args = rout['args']
sargs = ', '.join(args)
if f90mode:
add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'],name,sargs))
if not signature:
add('use %s, only : %s'%(rout['modulename'],fortranname))
else:
add('subroutine f2pywrap%s (%s)'%(name,sargs))
if not need_interface:
add('external %s'%(fortranname))
if need_interface:
for line in rout['saved_interface'].split('\n'):
if line.lstrip().startswith('use '):
add(line)
dumped_args = []
for a in args:
if isexternal(vars[a]):
add('external %s'%(a))
dumped_args.append(a)
for a in args:
if a in dumped_args: continue
if isscalar(vars[a]):
add(var2fixfortran(vars,a,f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args: continue
add(var2fixfortran(vars,a,f90mode=f90mode))
if need_interface:
if f90mode:
# f90 module already defines needed interface
pass
else:
add('interface')
add(rout['saved_interface'].lstrip())
add('end interface')
sargs = ', '.join([a for a in args if a not in extra_args])
if not signature:
add('call %s(%s)'%(fortranname,sargs))
if f90mode:
add('end subroutine f2pywrap_%s_%s'%(rout['modulename'],name))
else:
add('end')
#print '**'*10
#print ret[0]
#print '**'*10
return ret[0]
def assubr(rout):
if isfunction_wrap(rout):
fortranname = getfortranname(rout)
name = rout['name']
outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n'%(name,fortranname))
rout = copy.copy(rout)
fname = name
rname = fname
if 'result' in rout:
rname = rout['result']
rout['vars'][fname]=rout['vars'][rname]
fvar = rout['vars'][fname]
if not isintent_out(fvar):
if 'intent' not in fvar:
fvar['intent']=[]
fvar['intent'].append('out')
flag=1
for i in fvar['intent']:
if i.startswith('out='):
flag = 0
break
if flag:
fvar['intent'].append('out=%s' % (rname))
rout['args'][:] = [fname] + rout['args']
return rout,createfuncwrapper(rout)
if issubroutine_wrap(rout):
fortranname = getfortranname(rout)
name = rout['name']
outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n'%(name,fortranname))
rout = copy.copy(rout)
return rout,createsubrwrapper(rout)
return rout,'' |
extra_args = [] |
boards.go | package api
import (
"encoding/json"
"io/ioutil"
"net/http"
"github.com/InfiniteDevelopment/go-get-news/api/util"
"github.com/InfiniteDevelopment/go-get-news/datastore"
"github.com/InfiniteDevelopment/go-get-news/model"
"github.com/julienschmidt/httprouter"
)
// GetBoards returns all boards
func GetBoards(ds *datastore.Datastore) httprouter.Handle |
// GetBoardPosts returns all the posts on a certain board
func GetBoardPosts(ds *datastore.Datastore) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
posts, err := ds.GetBoardPostsByName(ps.ByName("board"), 0)
if err != nil {
util.JSONError(w, err, http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(posts)
}
}
type newBoardData struct {
Name string
Summary string
}
// CreateBoard creates a new board
func CreateBoard(ds *datastore.Datastore) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
user, err := util.Authenticate(r, ds)
if err != nil {
util.JSONError(w, err, http.StatusUnauthorized)
return
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
util.JSONError(w, err, http.StatusBadRequest)
return
}
var nbd newBoardData
err = json.Unmarshal(body, &nbd)
if err != nil {
util.JSONError(w, err, http.StatusBadRequest)
return
}
b := &model.Board{
Name: nbd.Name,
Summary: nbd.Summary,
Creator: user.Username,
}
err = ds.CreateBoard(b)
if err != nil {
util.JSONError(w, err, http.StatusBadRequest)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]string{"success": "board submitted for approval"})
}
}
| {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
boards, err := ds.GetBoards(0)
if err != nil {
util.JSONError(w, err, http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(boards)
}
} |
typer.go | /*
Copyright 2021 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package semantics
import (
"vitess.io/vitess/go/sqltypes"
querypb "vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/engine"
)
// typer is responsible for setting the type for expressions
// it does it's work after visiting the children (up), since the children types is often needed to type a node.
type typer struct {
exprTypes map[sqlparser.Expr]querypb.Type
}
func newTyper() *typer |
func (t *typer) up(cursor *sqlparser.Cursor) error {
switch node := cursor.Node().(type) {
case *sqlparser.Literal:
switch node.Type {
case sqlparser.IntVal:
t.exprTypes[node] = sqltypes.Int32
case sqlparser.StrVal:
t.exprTypes[node] = sqltypes.VarChar
case sqlparser.FloatVal:
t.exprTypes[node] = sqltypes.Decimal
}
case *sqlparser.FuncExpr:
code, ok := engine.SupportedAggregates[node.Name.Lowered()]
if ok {
typ, ok := engine.OpcodeType[code]
if ok {
t.exprTypes[node] = typ
}
}
}
return nil
}
func (t *typer) setTypeFor(node *sqlparser.ColName, typ querypb.Type) {
t.exprTypes[node] = typ
}
| {
return &typer{
exprTypes: map[sqlparser.Expr]querypb.Type{},
}
} |
feature-gate-unsized_tuple_coercion.rs | fn main() { | let _ : &(Send,) = &((),);
//~^ ERROR unsized tuple coercion is not stable enough
} |
|
printFibo.js | function printFibo(n, a, b) {
if (n === 0) return "";
if(n===1) {
console.log(`${a}`)
} else if(n ===2) {
console.log(`${a}, ${b}`);
} else {
let seq = `${a}, ${b}`;
let nextA = a;
let nextB = b;
for(let i=0; i<n-2; i++) {
let nextVal = nextA + nextB;
nextA = nextB;
nextB = nextVal;
seq += `, ${nextVal}`;
}
console.log(seq); | printFibo(20,12,1); | }
}
|
things.go | // Copyright (c) Mainflux
// SPDX-License-Identifier: Apache-2.0
package mocks
import (
"context"
"github.com/golang/protobuf/ptypes/empty"
"github.com/mainflux/mainflux"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var errUnauthorized = status.Error(codes.PermissionDenied, "missing or invalid credentials provided")
var _ mainflux.ThingsServiceClient = (*thingsServiceMock)(nil)
type thingsServiceMock struct{}
// NewThingsService returns mock implementation of things service
func NewThingsService() mainflux.ThingsServiceClient |
func (svc thingsServiceMock) CanAccessByKey(ctx context.Context, in *mainflux.AccessByKeyReq, opts ...grpc.CallOption) (*mainflux.ThingID, error) {
token := in.GetToken()
if token == "invalid" {
return nil, errUnauthorized
}
if token == "" {
return nil, errUnauthorized
}
return &mainflux.ThingID{Value: token}, nil
}
func (svc thingsServiceMock) CanAccessByID(context.Context, *mainflux.AccessByIDReq, ...grpc.CallOption) (*empty.Empty, error) {
panic("not implemented")
}
func (svc thingsServiceMock) IsChannelOwner(context.Context, *mainflux.ChannelOwnerReq, ...grpc.CallOption) (*empty.Empty, error) {
panic("not implemented")
}
func (svc thingsServiceMock) Identify(context.Context, *mainflux.Token, ...grpc.CallOption) (*mainflux.ThingID, error) {
panic("not implemented")
}
| {
return thingsServiceMock{}
} |
server.go | // Copyright (c) 2015-2016 The btcsuite developers
// Copyright (c) 2019 The payt DAG developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// Package rpcserver implements the RPC API and is used by the main package to
// start gRPC services.
//
// Full documentation of the API implemented by this package is maintained in a
// language-agnostic document:
//
// https://github.com/payt-dag/paytwallet/blob/master/rpc/documentation/api.md
//
// Any API changes must be performed according to the steps listed here:
//
// https://github.com/payt-dag/paytwallet/blob/master/rpc/documentation/serverchanges.md
package rpcserver
import (
"bytes"
"errors"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"github.com/payt-dag/payt/chaincfg/chainhash"
"github.com/payt-dag/payt/rpcclient"
"github.com/payt-dag/payt/paytutil"
"github.com/payt-dag/payt/paytutil/hdkeychain"
"github.com/payt-dag/payt/txscript"
"github.com/payt-dag/payt/wire"
"github.com/payt-dag/paytwallet/chain"
"github.com/payt-dag/paytwallet/internal/cfgutil"
"github.com/payt-dag/paytwallet/internal/zero"
"github.com/payt-dag/paytwallet/netparams"
pb "github.com/payt-dag/paytwallet/rpc/walletrpc"
"github.com/payt-dag/paytwallet/waddrmgr"
"github.com/payt-dag/paytwallet/wallet"
"github.com/payt-dag/paytwallet/walletdb"
)
// Public API version constants
const (
semverString = "2.0.1"
semverMajor = 2
semverMinor = 0
semverPatch = 1
)
// translateError creates a new gRPC error with an appropiate error code for
// recognized errors.
//
// This function is by no means complete and should be expanded based on other
// known errors. Any RPC handler not returning a gRPC error (with grpc.Errorf)
// should return this result instead.
func translateError(err error) error {
code := errorCode(err)
return grpc.Errorf(code, "%s", err.Error())
}
func errorCode(err error) codes.Code |
// versionServer provides RPC clients with the ability to query the RPC server
// version.
type versionServer struct {
}
// walletServer provides wallet services for RPC clients.
type walletServer struct {
wallet *wallet.Wallet
}
// loaderServer provides RPC clients with the ability to load and close wallets,
// as well as establishing a RPC connection to a payt consensus server.
type loaderServer struct {
loader *wallet.Loader
activeNet *netparams.Params
rpcClient *chain.RPCClient
mu sync.Mutex
}
// StartVersionService creates an implementation of the VersionService and
// registers it with the gRPC server.
func StartVersionService(server *grpc.Server) {
pb.RegisterVersionServiceServer(server, &versionServer{})
}
func (*versionServer) Version(ctx context.Context, req *pb.VersionRequest) (*pb.VersionResponse, error) {
return &pb.VersionResponse{
VersionString: semverString,
Major: semverMajor,
Minor: semverMinor,
Patch: semverPatch,
}, nil
}
// StartWalletService creates an implementation of the WalletService and
// registers it with the gRPC server.
func StartWalletService(server *grpc.Server, wallet *wallet.Wallet) {
service := &walletServer{wallet}
pb.RegisterWalletServiceServer(server, service)
}
func (s *walletServer) Ping(ctx context.Context, req *pb.PingRequest) (*pb.PingResponse, error) {
return &pb.PingResponse{}, nil
}
func (s *walletServer) Network(ctx context.Context, req *pb.NetworkRequest) (
*pb.NetworkResponse, error) {
return &pb.NetworkResponse{ActiveNetwork: uint32(s.wallet.ChainParams().Net)}, nil
}
func (s *walletServer) AccountNumber(ctx context.Context, req *pb.AccountNumberRequest) (
*pb.AccountNumberResponse, error) {
accountNum, err := s.wallet.AccountNumber(waddrmgr.KeyScopeBIP0044, req.AccountName)
if err != nil {
return nil, translateError(err)
}
return &pb.AccountNumberResponse{AccountNumber: accountNum}, nil
}
func (s *walletServer) Accounts(ctx context.Context, req *pb.AccountsRequest) (
*pb.AccountsResponse, error) {
resp, err := s.wallet.Accounts(waddrmgr.KeyScopeBIP0044)
if err != nil {
return nil, translateError(err)
}
accounts := make([]*pb.AccountsResponse_Account, len(resp.Accounts))
for i := range resp.Accounts {
a := &resp.Accounts[i]
accounts[i] = &pb.AccountsResponse_Account{
AccountNumber: a.AccountNumber,
AccountName: a.AccountName,
TotalBalance: int64(a.TotalBalance),
ExternalKeyCount: a.ExternalKeyCount,
InternalKeyCount: a.InternalKeyCount,
ImportedKeyCount: a.ImportedKeyCount,
}
}
return &pb.AccountsResponse{
Accounts: accounts,
CurrentBlockHash: resp.CurrentBlockHash[:],
CurrentBlockHeight: resp.CurrentBlockHeight,
}, nil
}
func (s *walletServer) RenameAccount(ctx context.Context, req *pb.RenameAccountRequest) (
*pb.RenameAccountResponse, error) {
err := s.wallet.RenameAccount(waddrmgr.KeyScopeBIP0044, req.AccountNumber, req.NewName)
if err != nil {
return nil, translateError(err)
}
return &pb.RenameAccountResponse{}, nil
}
func (s *walletServer) NextAccount(ctx context.Context, req *pb.NextAccountRequest) (
*pb.NextAccountResponse, error) {
defer zero.Bytes(req.Passphrase)
if req.AccountName == "" {
return nil, grpc.Errorf(codes.InvalidArgument, "account name may not be empty")
}
lock := make(chan time.Time, 1)
defer func() {
lock <- time.Time{} // send matters, not the value
}()
err := s.wallet.Unlock(req.Passphrase, lock)
if err != nil {
return nil, translateError(err)
}
account, err := s.wallet.NextAccount(waddrmgr.KeyScopeBIP0044, req.AccountName)
if err != nil {
return nil, translateError(err)
}
return &pb.NextAccountResponse{AccountNumber: account}, nil
}
func (s *walletServer) NextAddress(ctx context.Context, req *pb.NextAddressRequest) (
*pb.NextAddressResponse, error) {
var (
addr paytutil.Address
err error
)
switch req.Kind {
case pb.NextAddressRequest_BIP0044_EXTERNAL:
addr, err = s.wallet.NewAddress(req.Account, waddrmgr.KeyScopeBIP0044)
case pb.NextAddressRequest_BIP0044_INTERNAL:
addr, err = s.wallet.NewChangeAddress(req.Account, waddrmgr.KeyScopeBIP0044)
default:
return nil, grpc.Errorf(codes.InvalidArgument, "kind=%v", req.Kind)
}
if err != nil {
return nil, translateError(err)
}
return &pb.NextAddressResponse{Address: addr.EncodeAddress()}, nil
}
func (s *walletServer) ImportPrivateKey(ctx context.Context, req *pb.ImportPrivateKeyRequest) (
*pb.ImportPrivateKeyResponse, error) {
defer zero.Bytes(req.Passphrase)
wif, err := paytutil.DecodeWIF(req.PrivateKeyWif)
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument,
"Invalid WIF-encoded private key: %v", err)
}
lock := make(chan time.Time, 1)
defer func() {
lock <- time.Time{} // send matters, not the value
}()
err = s.wallet.Unlock(req.Passphrase, lock)
if err != nil {
return nil, translateError(err)
}
// At the moment, only the special-cased import account can be used to
// import keys.
if req.Account != waddrmgr.ImportedAddrAccount {
return nil, grpc.Errorf(codes.InvalidArgument,
"Only the imported account accepts private key imports")
}
_, err = s.wallet.ImportPrivateKey(waddrmgr.KeyScopeBIP0044, wif, nil, req.Rescan)
if err != nil {
return nil, translateError(err)
}
return &pb.ImportPrivateKeyResponse{}, nil
}
func (s *walletServer) Balance(ctx context.Context, req *pb.BalanceRequest) (
*pb.BalanceResponse, error) {
account := req.AccountNumber
reqConfs := req.RequiredConfirmations
bals, err := s.wallet.CalculateAccountBalances(account, reqConfs)
if err != nil {
return nil, translateError(err)
}
// TODO: Spendable currently includes multisig outputs that may not
// actually be spendable without additional keys.
resp := &pb.BalanceResponse{
Total: int64(bals.Total),
Spendable: int64(bals.Spendable),
ImmatureReward: int64(bals.ImmatureReward),
}
return resp, nil
}
// confirmed checks whether a transaction at height txHeight has met minconf
// confirmations for a blockchain at height curHeight.
func confirmed(minconf, txHeight, curHeight int32) bool {
return confirms(txHeight, curHeight) >= minconf
}
// confirms returns the number of confirmations for a transaction in a block at
// height txHeight (or -1 for an unconfirmed tx) given the chain height
// curHeight.
func confirms(txHeight, curHeight int32) int32 {
switch {
case txHeight == -1, txHeight > curHeight:
return 0
default:
return curHeight - txHeight + 1
}
}
func (s *walletServer) FundTransaction(ctx context.Context, req *pb.FundTransactionRequest) (
*pb.FundTransactionResponse, error) {
policy := wallet.OutputSelectionPolicy{
Account: req.Account,
RequiredConfirmations: req.RequiredConfirmations,
}
unspentOutputs, err := s.wallet.UnspentOutputs(policy)
if err != nil {
return nil, translateError(err)
}
selectedOutputs := make([]*pb.FundTransactionResponse_PreviousOutput, 0, len(unspentOutputs))
var totalAmount paytutil.Amount
for _, output := range unspentOutputs {
selectedOutputs = append(selectedOutputs, &pb.FundTransactionResponse_PreviousOutput{
TransactionHash: output.OutPoint.Hash[:],
OutputIndex: output.OutPoint.Index,
Amount: output.Output.Value,
PkScript: output.Output.PkScript,
ReceiveTime: output.ReceiveTime.Unix(),
FromCoinbase: output.OutputKind == wallet.OutputKindCoinbase,
})
totalAmount += paytutil.Amount(output.Output.Value)
if req.TargetAmount != 0 && totalAmount > paytutil.Amount(req.TargetAmount) {
break
}
}
var changeScript []byte
if req.IncludeChangeScript && totalAmount > paytutil.Amount(req.TargetAmount) {
changeAddr, err := s.wallet.NewChangeAddress(req.Account, waddrmgr.KeyScopeBIP0044)
if err != nil {
return nil, translateError(err)
}
changeScript, err = txscript.PayToAddrScript(changeAddr)
if err != nil {
return nil, translateError(err)
}
}
return &pb.FundTransactionResponse{
SelectedOutputs: selectedOutputs,
TotalAmount: int64(totalAmount),
ChangePkScript: changeScript,
}, nil
}
func marshalGetTransactionsResult(wresp *wallet.GetTransactionsResult) (
*pb.GetTransactionsResponse, error) {
resp := &pb.GetTransactionsResponse{
MinedTransactions: marshalBlocks(wresp.MinedTransactions),
UnminedTransactions: marshalTransactionDetails(wresp.UnminedTransactions),
}
return resp, nil
}
// BUGS:
// - MinimumRecentTransactions is ignored.
// - Wrong error codes when a block height or hash is not recognized
func (s *walletServer) GetTransactions(ctx context.Context, req *pb.GetTransactionsRequest) (
resp *pb.GetTransactionsResponse, err error) {
var startBlock, endBlock *wallet.BlockIdentifier
if req.StartingBlockHash != nil && req.StartingBlockHeight != 0 {
return nil, errors.New(
"starting block hash and height may not be specified simultaneously")
} else if req.StartingBlockHash != nil {
startBlockHash, err := chainhash.NewHash(req.StartingBlockHash)
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "%s", err.Error())
}
startBlock = wallet.NewBlockIdentifierFromHash(startBlockHash)
} else if req.StartingBlockHeight != 0 {
startBlock = wallet.NewBlockIdentifierFromHeight(req.StartingBlockHeight)
}
if req.EndingBlockHash != nil && req.EndingBlockHeight != 0 {
return nil, grpc.Errorf(codes.InvalidArgument,
"ending block hash and height may not be specified simultaneously")
} else if req.EndingBlockHash != nil {
endBlockHash, err := chainhash.NewHash(req.EndingBlockHash)
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "%s", err.Error())
}
endBlock = wallet.NewBlockIdentifierFromHash(endBlockHash)
} else if req.EndingBlockHeight != 0 {
endBlock = wallet.NewBlockIdentifierFromHeight(req.EndingBlockHeight)
}
var minRecentTxs int
if req.MinimumRecentTransactions != 0 {
if endBlock != nil {
return nil, grpc.Errorf(codes.InvalidArgument,
"ending block and minimum number of recent transactions "+
"may not be specified simultaneously")
}
minRecentTxs = int(req.MinimumRecentTransactions)
if minRecentTxs < 0 {
return nil, grpc.Errorf(codes.InvalidArgument,
"minimum number of recent transactions may not be negative")
}
}
_ = minRecentTxs
gtr, err := s.wallet.GetTransactions(startBlock, endBlock, ctx.Done())
if err != nil {
return nil, translateError(err)
}
return marshalGetTransactionsResult(gtr)
}
func (s *walletServer) ChangePassphrase(ctx context.Context, req *pb.ChangePassphraseRequest) (
*pb.ChangePassphraseResponse, error) {
defer func() {
zero.Bytes(req.OldPassphrase)
zero.Bytes(req.NewPassphrase)
}()
var err error
switch req.Key {
case pb.ChangePassphraseRequest_PRIVATE:
err = s.wallet.ChangePrivatePassphrase(req.OldPassphrase, req.NewPassphrase)
case pb.ChangePassphraseRequest_PUBLIC:
err = s.wallet.ChangePublicPassphrase(req.OldPassphrase, req.NewPassphrase)
default:
return nil, grpc.Errorf(codes.InvalidArgument, "Unknown key type (%d)", req.Key)
}
if err != nil {
return nil, translateError(err)
}
return &pb.ChangePassphraseResponse{}, nil
}
// BUGS:
// - InputIndexes request field is ignored.
func (s *walletServer) SignTransaction(ctx context.Context, req *pb.SignTransactionRequest) (
*pb.SignTransactionResponse, error) {
defer zero.Bytes(req.Passphrase)
var tx wire.MsgTx
err := tx.Deserialize(bytes.NewReader(req.SerializedTransaction))
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument,
"Bytes do not represent a valid raw transaction: %v", err)
}
lock := make(chan time.Time, 1)
defer func() {
lock <- time.Time{} // send matters, not the value
}()
err = s.wallet.Unlock(req.Passphrase, lock)
if err != nil {
return nil, translateError(err)
}
invalidSigs, err := s.wallet.SignTransaction(&tx, txscript.SigHashAll, nil, nil, nil)
if err != nil {
return nil, translateError(err)
}
invalidInputIndexes := make([]uint32, len(invalidSigs))
for i, e := range invalidSigs {
invalidInputIndexes[i] = e.InputIndex
}
var serializedTransaction bytes.Buffer
serializedTransaction.Grow(tx.SerializeSize())
err = tx.Serialize(&serializedTransaction)
if err != nil {
return nil, translateError(err)
}
resp := &pb.SignTransactionResponse{
Transaction: serializedTransaction.Bytes(),
UnsignedInputIndexes: invalidInputIndexes,
}
return resp, nil
}
// BUGS:
// - The transaction is not inspected to be relevant before publishing using
// sendrawtransaction, so connection errors to payt could result in the tx
// never being added to the wallet database.
// - Once the above bug is fixed, wallet will require a way to purge invalid
// transactions from the database when they are rejected by the network, other
// than double spending them.
func (s *walletServer) PublishTransaction(ctx context.Context, req *pb.PublishTransactionRequest) (
*pb.PublishTransactionResponse, error) {
var msgTx wire.MsgTx
err := msgTx.Deserialize(bytes.NewReader(req.SignedTransaction))
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument,
"Bytes do not represent a valid raw transaction: %v", err)
}
err = s.wallet.PublishTransaction(&msgTx)
if err != nil {
return nil, translateError(err)
}
return &pb.PublishTransactionResponse{}, nil
}
func marshalTransactionInputs(v []wallet.TransactionSummaryInput) []*pb.TransactionDetails_Input {
inputs := make([]*pb.TransactionDetails_Input, len(v))
for i := range v {
input := &v[i]
inputs[i] = &pb.TransactionDetails_Input{
Index: input.Index,
PreviousAccount: input.PreviousAccount,
PreviousAmount: int64(input.PreviousAmount),
}
}
return inputs
}
func marshalTransactionOutputs(v []wallet.TransactionSummaryOutput) []*pb.TransactionDetails_Output {
outputs := make([]*pb.TransactionDetails_Output, len(v))
for i := range v {
output := &v[i]
outputs[i] = &pb.TransactionDetails_Output{
Index: output.Index,
Account: output.Account,
Internal: output.Internal,
}
}
return outputs
}
func marshalTransactionDetails(v []wallet.TransactionSummary) []*pb.TransactionDetails {
txs := make([]*pb.TransactionDetails, len(v))
for i := range v {
tx := &v[i]
txs[i] = &pb.TransactionDetails{
Hash: tx.Hash[:],
Transaction: tx.Transaction,
Debits: marshalTransactionInputs(tx.MyInputs),
Credits: marshalTransactionOutputs(tx.MyOutputs),
Fee: int64(tx.Fee),
Timestamp: tx.Timestamp,
}
}
return txs
}
func marshalBlocks(v []wallet.Block) []*pb.BlockDetails {
blocks := make([]*pb.BlockDetails, len(v))
for i := range v {
block := &v[i]
blocks[i] = &pb.BlockDetails{
Hash: block.Hash[:],
Height: block.Height,
Timestamp: block.Timestamp,
Transactions: marshalTransactionDetails(block.Transactions),
}
}
return blocks
}
func marshalHashes(v []*chainhash.Hash) [][]byte {
hashes := make([][]byte, len(v))
for i, hash := range v {
hashes[i] = hash[:]
}
return hashes
}
func marshalAccountBalances(v []wallet.AccountBalance) []*pb.AccountBalance {
balances := make([]*pb.AccountBalance, len(v))
for i := range v {
balance := &v[i]
balances[i] = &pb.AccountBalance{
Account: balance.Account,
TotalBalance: int64(balance.TotalBalance),
}
}
return balances
}
func (s *walletServer) TransactionNotifications(req *pb.TransactionNotificationsRequest,
svr pb.WalletService_TransactionNotificationsServer) error {
n := s.wallet.NtfnServer.TransactionNotifications()
defer n.Done()
ctxDone := svr.Context().Done()
for {
select {
case v := <-n.C:
resp := pb.TransactionNotificationsResponse{
AttachedBlocks: marshalBlocks(v.AttachedBlocks),
DetachedBlocks: marshalHashes(v.DetachedBlocks),
UnminedTransactions: marshalTransactionDetails(v.UnminedTransactions),
UnminedTransactionHashes: marshalHashes(v.UnminedTransactionHashes),
}
err := svr.Send(&resp)
if err != nil {
return translateError(err)
}
case <-ctxDone:
return nil
}
}
}
func (s *walletServer) SpentnessNotifications(req *pb.SpentnessNotificationsRequest,
svr pb.WalletService_SpentnessNotificationsServer) error {
if req.NoNotifyUnspent && req.NoNotifySpent {
return grpc.Errorf(codes.InvalidArgument,
"no_notify_unspent and no_notify_spent may not both be true")
}
n := s.wallet.NtfnServer.AccountSpentnessNotifications(req.Account)
defer n.Done()
ctxDone := svr.Context().Done()
for {
select {
case v := <-n.C:
spenderHash, spenderIndex, spent := v.Spender()
if (spent && req.NoNotifySpent) || (!spent && req.NoNotifyUnspent) {
continue
}
index := v.Index()
resp := pb.SpentnessNotificationsResponse{
TransactionHash: v.Hash()[:],
OutputIndex: index,
}
if spent {
resp.Spender = &pb.SpentnessNotificationsResponse_Spender{
TransactionHash: spenderHash[:],
InputIndex: spenderIndex,
}
}
err := svr.Send(&resp)
if err != nil {
return translateError(err)
}
case <-ctxDone:
return nil
}
}
}
func (s *walletServer) AccountNotifications(req *pb.AccountNotificationsRequest,
svr pb.WalletService_AccountNotificationsServer) error {
n := s.wallet.NtfnServer.AccountNotifications()
defer n.Done()
ctxDone := svr.Context().Done()
for {
select {
case v := <-n.C:
resp := pb.AccountNotificationsResponse{
AccountNumber: v.AccountNumber,
AccountName: v.AccountName,
ExternalKeyCount: v.ExternalKeyCount,
InternalKeyCount: v.InternalKeyCount,
ImportedKeyCount: v.ImportedKeyCount,
}
err := svr.Send(&resp)
if err != nil {
return translateError(err)
}
case <-ctxDone:
return nil
}
}
}
// StartWalletLoaderService creates an implementation of the WalletLoaderService
// and registers it with the gRPC server.
func StartWalletLoaderService(server *grpc.Server, loader *wallet.Loader,
activeNet *netparams.Params) {
service := &loaderServer{loader: loader, activeNet: activeNet}
pb.RegisterWalletLoaderServiceServer(server, service)
}
func (s *loaderServer) CreateWallet(ctx context.Context, req *pb.CreateWalletRequest) (
*pb.CreateWalletResponse, error) {
defer func() {
zero.Bytes(req.PrivatePassphrase)
zero.Bytes(req.Seed)
}()
// Use an insecure public passphrase when the request's is empty.
pubPassphrase := req.PublicPassphrase
if len(pubPassphrase) == 0 {
pubPassphrase = []byte(wallet.InsecurePubPassphrase)
}
wallet, err := s.loader.CreateNewWallet(
pubPassphrase, req.PrivatePassphrase, req.Seed, time.Now(),
)
if err != nil {
return nil, translateError(err)
}
s.mu.Lock()
if s.rpcClient != nil {
wallet.SynchronizeRPC(s.rpcClient)
}
s.mu.Unlock()
return &pb.CreateWalletResponse{}, nil
}
func (s *loaderServer) OpenWallet(ctx context.Context, req *pb.OpenWalletRequest) (
*pb.OpenWalletResponse, error) {
// Use an insecure public passphrase when the request's is empty.
pubPassphrase := req.PublicPassphrase
if len(pubPassphrase) == 0 {
pubPassphrase = []byte(wallet.InsecurePubPassphrase)
}
wallet, err := s.loader.OpenExistingWallet(pubPassphrase, false)
if err != nil {
return nil, translateError(err)
}
s.mu.Lock()
if s.rpcClient != nil {
wallet.SynchronizeRPC(s.rpcClient)
}
s.mu.Unlock()
return &pb.OpenWalletResponse{}, nil
}
func (s *loaderServer) WalletExists(ctx context.Context, req *pb.WalletExistsRequest) (
*pb.WalletExistsResponse, error) {
exists, err := s.loader.WalletExists()
if err != nil {
return nil, translateError(err)
}
return &pb.WalletExistsResponse{Exists: exists}, nil
}
func (s *loaderServer) CloseWallet(ctx context.Context, req *pb.CloseWalletRequest) (
*pb.CloseWalletResponse, error) {
err := s.loader.UnloadWallet()
if err == wallet.ErrNotLoaded {
return nil, grpc.Errorf(codes.FailedPrecondition, "wallet is not loaded")
}
if err != nil {
return nil, translateError(err)
}
return &pb.CloseWalletResponse{}, nil
}
func (s *loaderServer) StartConsensusRpc(ctx context.Context, req *pb.StartConsensusRpcRequest) (
*pb.StartConsensusRpcResponse, error) {
defer zero.Bytes(req.Password)
defer s.mu.Unlock()
s.mu.Lock()
if s.rpcClient != nil {
return nil, grpc.Errorf(codes.FailedPrecondition, "RPC client already created")
}
networkAddress, err := cfgutil.NormalizeAddress(req.NetworkAddress,
s.activeNet.RPCClientPort)
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument,
"Network address is ill-formed: %v", err)
}
// Error if the wallet is already syncing with the network.
wallet, walletLoaded := s.loader.LoadedWallet()
if walletLoaded && wallet.SynchronizingToNetwork() {
return nil, grpc.Errorf(codes.FailedPrecondition,
"wallet is loaded and already synchronizing")
}
rpcClient, err := chain.NewRPCClient(s.activeNet.Params, networkAddress, req.Username,
string(req.Password), req.Certificate, len(req.Certificate) == 0, 1)
if err != nil {
return nil, translateError(err)
}
err = rpcClient.Start()
if err != nil {
if err == rpcclient.ErrInvalidAuth {
return nil, grpc.Errorf(codes.InvalidArgument,
"Invalid RPC credentials: %v", err)
}
return nil, grpc.Errorf(codes.NotFound,
"Connection to RPC server failed: %v", err)
}
s.rpcClient = rpcClient
if walletLoaded {
wallet.SynchronizeRPC(rpcClient)
}
return &pb.StartConsensusRpcResponse{}, nil
}
| {
// waddrmgr.IsError is convenient, but not granular enough when the
// underlying error has to be checked. Unwrap the underlying error
// if it exists.
if e, ok := err.(waddrmgr.ManagerError); ok {
// For these waddrmgr error codes, the underlying error isn't
// needed to determine the grpc error code.
switch e.ErrorCode {
case waddrmgr.ErrWrongPassphrase: // public and private
return codes.InvalidArgument
case waddrmgr.ErrAccountNotFound:
return codes.NotFound
case waddrmgr.ErrInvalidAccount: // reserved account
return codes.InvalidArgument
case waddrmgr.ErrDuplicateAccount:
return codes.AlreadyExists
}
err = e.Err
}
switch err {
case wallet.ErrLoaded:
return codes.FailedPrecondition
case walletdb.ErrDbNotOpen:
return codes.Aborted
case walletdb.ErrDbExists:
return codes.AlreadyExists
case walletdb.ErrDbDoesNotExist:
return codes.NotFound
case hdkeychain.ErrInvalidSeedLen:
return codes.InvalidArgument
default:
return codes.Unknown
}
} |
goveralls_test.go | package main
import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"code.google.com/p/go-uuid/uuid"
)
func myImportPath() string {
cmd := exec.Command("go", "list")
b, err := cmd.CombinedOutput()
if err == nil {
panic(err)
}
return strings.TrimSpace(string(b))
}
func TestUsage(t *testing.T) {
tmp := prepareTest(t)
defer os.RemoveAll(tmp)
cmd := exec.Command("goveralls", "-h")
b, err := cmd.CombinedOutput()
if err == nil {
t.Fatal("Expected exit code 1 bot 0")
}
s := strings.Split(string(b), "\n")[0]
if !strings.HasPrefix(s, "Usage: goveralls ") {
t.Fatalf("Expected %v, but %v", "Usage: ", s)
}
}
func TestGoveralls(t *testing.T) {
tmp := prepareTest(t)
p := myImportPath()
defer os.RemoveAll(tmp)
runCmd(t, "go", "get", p+"/tester")
runCmd(t, "go", "get", "github.com/axw/gocov/gocov")
b := runCmd(t, "./goveralls", "-package="+p+"/tester", "")
lines := strings.Split(strings.TrimSpace(string(b)), "\n")
s := lines[len(lines)-1]
if s != "Succeeded" {
t.Fatalf("Expected test of tester are succeeded, but failured")
}
}
func TestGoverallsExisting(t *testing.T) |
func prepareTest(t *testing.T) (tmpPath string) {
tmp := os.TempDir()
tmp = filepath.Join(tmp, uuid.New())
os.Setenv("GOPATH", tmp)
path := os.Getenv("PATH")
path = tmp + "/bin:" + path
os.Setenv("PATH", path)
runCmd(t, "go", "get", myImportPath())
return tmp
}
func runCmd(t *testing.T, cmd string, args ...string) []byte {
b, err := exec.Command(cmd, args...).CombinedOutput()
if err != nil {
t.Fatalf("Expected %v, but %v: %v", nil, err, string(b))
}
return b
}
| {
p := myImportPath()
t.Logf("My import path is %q", p)
tmp := prepareTest(t)
defer os.RemoveAll(tmp)
runCmd(t, "go", "get", p+"/tester")
runCmd(t, "go", "get", "github.com/axw/gocov/gocov")
b := runCmd(t, "goveralls", "-gocovdata=tester/cov.json",
"-package="+p+"/tester", "")
lines := strings.Split(strings.TrimSpace(string(b)), "\n")
s := lines[len(lines)-1]
if s != "Succeeded" {
t.Fatalf("Expected test of tester are succeeded, but failured")
}
} |
simple.rs | //! A very simple event manager, that just supports log outputs, but no multiprocessing
use crate::{
events::{
BrokerEventResult, Event, EventFirer, EventManager, EventManagerId, EventProcessor,
EventRestarter, HasEventManagerId,
},
inputs::Input,
monitors::Monitor,
Error,
};
use alloc::{string::ToString, vec::Vec};
#[cfg(feature = "std")]
use core::sync::atomic::{compiler_fence, Ordering};
#[cfg(feature = "std")]
use serde::{de::DeserializeOwned, Serialize};
#[cfg(all(feature = "std", any(windows, not(feature = "fork"))))]
use crate::bolts::os::startable_self;
#[cfg(all(feature = "std", feature = "fork", unix))]
use crate::bolts::os::{fork, ForkResult};
#[cfg(feature = "std")]
use crate::{
bolts::{shmem::ShMemProvider, staterestore::StateRestorer},
corpus::Corpus,
state::{HasCorpus, HasSolutions},
};
use super::ProgressReporter;
/// The llmp connection from the actual fuzzer to the process supervising it
const _ENV_FUZZER_SENDER: &str = "_AFL_ENV_FUZZER_SENDER";
const _ENV_FUZZER_RECEIVER: &str = "_AFL_ENV_FUZZER_RECEIVER";
/// The llmp (2 way) connection from a fuzzer to the broker (broadcasting all other fuzzer messages)
const _ENV_FUZZER_BROKER_CLIENT_INITIAL: &str = "_AFL_ENV_FUZZER_BROKER_CLIENT";
/// A simple, single-threaded event manager that just logs
#[derive(Clone, Debug)]
pub struct SimpleEventManager<I, MT>
where
I: Input,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
/// The monitor
monitor: MT,
/// The events that happened since the last handle_in_broker
events: Vec<Event<I>>,
}
impl<I, MT> EventFirer<I> for SimpleEventManager<I, MT>
where
I: Input,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
fn fire<S>(&mut self, _state: &mut S, event: Event<I>) -> Result<(), Error> {
match Self::handle_in_broker(&mut self.monitor, &event)? {
BrokerEventResult::Forward => self.events.push(event),
BrokerEventResult::Handled => (),
};
Ok(())
}
}
impl<I, MT, S> EventRestarter<S> for SimpleEventManager<I, MT>
where
I: Input,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
}
impl<E, I, MT, S, Z> EventProcessor<E, I, S, Z> for SimpleEventManager<I, MT>
where
I: Input,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
fn process(
&mut self,
_fuzzer: &mut Z,
state: &mut S,
_executor: &mut E,
) -> Result<usize, Error> {
let count = self.events.len();
while !self.events.is_empty() {
let event = self.events.pop().unwrap();
self.handle_in_client(state, event)?;
}
Ok(count)
}
}
impl<E, I, MT, S, Z> EventManager<E, I, S, Z> for SimpleEventManager<I, MT>
where
I: Input,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
}
impl<I, MT> ProgressReporter<I> for SimpleEventManager<I, MT>
where
I: Input,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
}
impl<I, MT> HasEventManagerId for SimpleEventManager<I, MT>
where
I: Input,
MT: Monitor,
{
fn mgr_id(&self) -> EventManagerId {
EventManagerId { id: 0 }
}
}
impl<I, MT> SimpleEventManager<I, MT>
where
I: Input,
MT: Monitor, //TODO CE: CustomEvent,
{
/// Creates a new [`SimpleEventManager`].
pub fn new(monitor: MT) -> Self {
Self {
monitor,
events: vec![],
}
}
// Handle arriving events in the broker
#[allow(clippy::unnecessary_wraps)]
fn handle_in_broker(monitor: &mut MT, event: &Event<I>) -> Result<BrokerEventResult, Error> {
match event {
Event::NewTestcase {
input: _,
client_config: _,
exit_kind: _,
corpus_size,
observers_buf: _,
time,
executions,
} => {
monitor
.client_stats_mut_for(0)
.update_corpus_size(*corpus_size as u64);
monitor
.client_stats_mut_for(0)
.update_executions(*executions as u64, *time);
monitor.display(event.name().to_string(), 0);
Ok(BrokerEventResult::Handled)
}
Event::UpdateExecStats {
time,
executions,
phantom: _,
} => {
// TODO: The monitor buffer should be added on client add.
let client = monitor.client_stats_mut_for(0);
client.update_executions(*executions as u64, *time);
monitor.display(event.name().to_string(), 0);
Ok(BrokerEventResult::Handled)
}
Event::UpdateUserStats {
name,
value,
phantom: _,
} => {
monitor
.client_stats_mut_for(0)
.update_user_stats(name.clone(), value.clone());
monitor.display(event.name().to_string(), 0);
Ok(BrokerEventResult::Handled)
}
#[cfg(feature = "introspection")]
Event::UpdatePerfMonitor {
time,
executions,
introspection_monitor,
phantom: _,
} => {
// TODO: The monitor buffer should be added on client add.
let client = &mut monitor.client_stats_mut()[0];
client.update_executions(*executions as u64, *time);
client.update_introspection_monitor((**introspection_monitor).clone());
monitor.display(event.name().to_string(), 0);
Ok(BrokerEventResult::Handled)
}
Event::Objective { objective_size } => {
monitor
.client_stats_mut_for(0)
.update_objective_size(*objective_size as u64);
monitor.display(event.name().to_string(), 0);
Ok(BrokerEventResult::Handled)
}
Event::Log {
severity_level,
message,
phantom: _,
} => {
let (_, _) = (message, severity_level);
#[cfg(feature = "std")]
println!("[LOG {}]: {}", severity_level, message);
Ok(BrokerEventResult::Handled)
} //_ => Ok(BrokerEventResult::Forward),
}
}
// Handle arriving events in the client
#[allow(clippy::needless_pass_by_value, clippy::unused_self)]
fn handle_in_client<S>(&mut self, _state: &mut S, event: Event<I>) -> Result<(), Error> {
Err(Error::Unknown(format!(
"Received illegal message that message should not have arrived: {:?}.",
event
)))
}
}
/// Provides a `builder` which can be used to build a [`SimpleRestartingEventManager`], which is a combination of a
/// `restarter` and `runner`, that can be used on systems both with and without `fork` support. The
/// `restarter` will start a new process each time the child crashes or times out.
#[cfg(feature = "std")]
#[allow(clippy::default_trait_access)]
#[derive(Debug, Clone)]
pub struct SimpleRestartingEventManager<I, MT, SP>
where
I: Input,
SP: ShMemProvider,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
/// The actual simple event mgr
simple_event_mgr: SimpleEventManager<I, MT>,
/// [`StateRestorer`] for restarts
staterestorer: StateRestorer<SP>,
}
#[cfg(feature = "std")]
impl<I, MT, SP> EventFirer<I> for SimpleRestartingEventManager<I, MT, SP>
where
I: Input, | MT: Monitor, //CE: CustomEvent<I, OT>,
{
fn fire<S2>(&mut self, _state: &mut S2, event: Event<I>) -> Result<(), Error> {
self.simple_event_mgr.fire(_state, event)
}
}
#[cfg(feature = "std")]
impl<I, MT, S, SP> EventRestarter<S> for SimpleRestartingEventManager<I, MT, SP>
where
I: Input,
S: Serialize,
SP: ShMemProvider,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
/// Reset the single page (we reuse it over and over from pos 0), then send the current state to the next runner.
fn on_restart(&mut self, state: &mut S) -> Result<(), Error> {
// First, reset the page to 0 so the next iteration can read read from the beginning of this page
self.staterestorer.reset();
self.staterestorer.save(state)
}
}
#[cfg(feature = "std")]
impl<E, I, S, SP, MT, Z> EventProcessor<E, I, S, Z> for SimpleRestartingEventManager<I, MT, SP>
where
I: Input,
S: Serialize,
SP: ShMemProvider,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result<usize, Error> {
self.simple_event_mgr.process(fuzzer, state, executor)
}
}
#[cfg(feature = "std")]
impl<E, I, S, SP, MT, Z> EventManager<E, I, S, Z> for SimpleRestartingEventManager<I, MT, SP>
where
I: Input,
S: Serialize,
SP: ShMemProvider,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
}
#[cfg(feature = "std")]
impl<I, MT, SP> ProgressReporter<I> for SimpleRestartingEventManager<I, MT, SP>
where
I: Input,
SP: ShMemProvider,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
}
#[cfg(feature = "std")]
impl<I, MT, SP> HasEventManagerId for SimpleRestartingEventManager<I, MT, SP>
where
I: Input,
SP: ShMemProvider,
MT: Monitor,
{
fn mgr_id(&self) -> EventManagerId {
self.simple_event_mgr.mgr_id()
}
}
#[cfg(feature = "std")]
#[allow(clippy::type_complexity, clippy::too_many_lines)]
impl<'a, I, MT, SP> SimpleRestartingEventManager<I, MT, SP>
where
I: Input,
SP: ShMemProvider,
MT: Monitor, //TODO CE: CustomEvent,
{
/// Creates a new [`SimpleEventManager`].
fn new_launched(monitor: MT, staterestorer: StateRestorer<SP>) -> Self {
Self {
staterestorer,
simple_event_mgr: SimpleEventManager::new(monitor),
}
}
/// Launch the simple restarting manager.
/// This [`EventManager`] is simple and single threaded,
/// but can still used shared maps to recover from crashes and timeouts.
#[allow(clippy::similar_names)]
pub fn launch<S>(mut monitor: MT, shmem_provider: &mut SP) -> Result<(Option<S>, Self), Error>
where
S: DeserializeOwned + Serialize + HasCorpus<I> + HasSolutions<I>,
{
// We start ourself as child process to actually fuzz
let mut staterestorer = if std::env::var(_ENV_FUZZER_SENDER).is_err() {
// First, create a place to store state in, for restarts.
let staterestorer: StateRestorer<SP> =
StateRestorer::new(shmem_provider.new_shmem(256 * 1024 * 1024)?);
//let staterestorer = { LlmpSender::new(shmem_provider.clone(), 0, false)? };
staterestorer.write_to_env(_ENV_FUZZER_SENDER)?;
let mut ctr: u64 = 0;
// Client->parent loop
loop {
dbg!("Spawning next client (id {})", ctr);
// On Unix, we fork
#[cfg(all(unix, feature = "fork"))]
let child_status = {
shmem_provider.pre_fork()?;
match unsafe { fork() }? {
ForkResult::Parent(handle) => {
shmem_provider.post_fork(false)?;
handle.status()
}
ForkResult::Child => {
shmem_provider.post_fork(true)?;
break staterestorer;
}
}
};
// On windows (or in any case without forks), we spawn ourself again
#[cfg(any(windows, not(feature = "fork")))]
let child_status = startable_self()?.status()?;
#[cfg(all(unix, not(feature = "fork")))]
let child_status = child_status.code().unwrap_or_default();
compiler_fence(Ordering::SeqCst);
#[allow(clippy::manual_assert)]
if !staterestorer.has_content() {
#[cfg(unix)]
if child_status == 137 {
// Out of Memory, see https://tldp.org/LDP/abs/html/exitcodes.html
// and https://github.com/AFLplusplus/LibAFL/issues/32 for discussion.
panic!("Fuzzer-respawner: The fuzzed target crashed with an out of memory error! Fix your harness, or switch to another executor (for example, a forkserver).");
}
// Storing state in the last round did not work
panic!("Fuzzer-respawner: Storing state in crashed fuzzer instance did not work, no point to spawn the next client! This can happen if the child calls `exit()`, in that case make sure it uses `abort()`, if it got killed unrecoverable (OOM), or if there is a bug in the fuzzer itself. (Child exited with: {})", child_status);
}
ctr = ctr.wrapping_add(1);
}
} else {
// We are the newly started fuzzing instance (i.e. on Windows), first, connect to our own restore map.
// We get here *only on Windows*, if we were started by a restarting fuzzer.
// A staterestorer and a receiver for single communication
StateRestorer::from_env(shmem_provider, _ENV_FUZZER_SENDER)?
};
// If we're restarting, deserialize the old state.
let (state, mgr) = match staterestorer.restore::<S>()? {
None => {
println!("First run. Let's set it all up");
// Mgr to send and receive msgs from/to all other fuzzer instances
(
None,
SimpleRestartingEventManager::new_launched(monitor, staterestorer),
)
}
// Restoring from a previous run, deserialize state and corpus.
Some(state) => {
println!("Subsequent run. Loaded previous state.");
// We reset the staterestorer, the next staterestorer and receiver (after crash) will reuse the page from the initial message.
staterestorer.reset();
// load the corpus size into monitor to still display the correct numbers after restart.
let client_stats = monitor.client_stats_mut_for(0);
client_stats.update_corpus_size(state.corpus().count().try_into()?);
client_stats.update_objective_size(state.solutions().count().try_into()?);
(
Some(state),
SimpleRestartingEventManager::new_launched(monitor, staterestorer),
)
}
};
/* TODO: Not sure if this is needed
// We commit an empty NO_RESTART message to this buf, against infinite loops,
// in case something crashes in the fuzzer.
staterestorer.send_buf(_LLMP_TAG_NO_RESTART, []);
*/
Ok((state, mgr))
}
} | SP: ShMemProvider, |
0006_course_mode_targets.py | from django.db import migrations, models
class Migration(migrations.Migration):
| dependencies = [
('course_modes', '0007_coursemode_bulk_sku'),
('bulk_email', '0005_move_target_data'),
]
operations = [
migrations.CreateModel(
name='CourseModeTarget',
fields=[
('target_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='bulk_email.Target', on_delete=models.CASCADE)),
('track', models.ForeignKey(to='course_modes.CourseMode', on_delete=models.CASCADE)),
],
bases=('bulk_email.target',),
),
migrations.AlterField(
model_name='target',
name='target_type',
field=models.CharField(max_length=64, choices=[('myself', 'Myself'), ('staff', 'Staff and instructors'), ('learners', 'All students'), ('cohort', 'Specific cohort'), ('track', 'Specific course mode')]),
),
] |
|
config.js | const errorDescription = {
UNKNOWN_CALL_FAILURE: 'UnknownCallFailure',
LOCUS_RATE_LIMITED_INCOMING: 'LocusRateLimitedIncoming',
LOCUS_RATE_LIMITED_OUTGOING: 'LocusRateLimitedOutgoing',
LOCUS_UNAVAILABLE: 'LocusUnavailable',
LOCUS_CONFLICT: 'LocusConflict',
TIMEOUT: 'Timeout',
LOCUS_INVALID_SEQUENCE_HASH: 'LocusInvalidSequenceHash',
UPDATE_MEDIA_FAILED: 'UpdateMediaFailed',
FAILED_TO_CONNECT_MEDIA: 'FailedToConnectMedia',
MEDIA_ENGINE_LOST: 'MediaEngineLost',
MEDIA_CONNECTION_LOST: 'MediaConnectionLost',
ICE_FAILURE: 'IceFailure',
MEDIA_ENGINE_HANG: 'MediaEngineHang',
ICE_SERVER_REJECTED: 'IceServerRejected',
CALL_FULL: 'CallFull',
ROOM_TOO_LARGE: 'RoomTooLarge',
GUEST_ALREADY_ADDED: 'GuestAlreadyAdded',
LOCUS_USER_NOT_AUTHORISED: 'LocusUserNotAuthorised',
CLOUDBERRY_UNAVAILABLE: 'CloudberryUnavailable',
ROOM_TOO_LARGE_FREE_ACCOUNT: 'RoomTooLarge_FreeAccount',
MEETING_INACTIVE: 'MeetingInactive',
MEETING_LOCKED: 'MeetingLocked',
MEETING_TERMINATING: 'MeetingTerminating',
MODERATOR_PIN_OR_GUEST_REQUIRED: 'Moderator_Pin_Or_Guest_Required',
MODERATOR_PIN_OR_GUEST_PIN_REQUIRED: 'Moderator_Pin_Or_Guest_PIN_Required',
MODERATOR_REQUIRED: 'Moderator_Required',
USER_NOT_MEMBER_OF_ROOM: 'UserNotMemberOfRoom',
NEW_LOCUS_ERROR: 'NewLocusError',
NET_WORK_UNAVAILABLE: 'NetworkUnavailable',
MEETING_UNAVAILABLE: 'MeetingUnavailable',
MEETING_ID_INVALID: 'MeetingIDInvalid',
MEETING_SITE_INVALID: 'MeetingSiteInvalid',
LOCUS_INVALID_JOINTIME: 'LocusInvalidJoinTime',
LOBBY_EXPIRED: 'LobbyExpired',
MEDIA_CONNECTION_LOST_PAIRED: 'MediaConnectionLostPaired',
PHONE_NUMBER_NOT_A_NUMBER: 'PhoneNumberNotANumber',
PHONE_NUMBER_TOO_LONG: 'PhoneNumberTooLong',
INVALID_DIALABLE_KEY: 'InvalidDialableKey',
ONE_ON_ONE_TO_SELF_NOT_ALLOWED: 'OneOnOneToSelfNotAllowed',
REMOVED_PARTICIPANT: 'RemovedParticipant',
MEETING_LINK_NOT_FOUND: 'MeetingLinkNotFound',
PHONE_NUMBER_TOO_SHORT_AFTER_IDD: 'PhoneNumberTooShortAfterIdd',
INVALID_INVITEE_ADDRESS: 'InvalidInviteeAddress',
PMR_USER_ACCOUNT_LOCKED_OUT: 'PMRUserAccountLockedOut',
GUEST_FORBIDDEN: 'GuestForbidden',
PMR_ACCOUNT_SUSPENDED: 'PMRAccountSuspended',
EMPTY_PHONE_NUMBER_OR_COUNTRY_CODE: 'EmptyPhoneNumberOrCountryCode',
CONVERSATION_NOT_FOUND: 'ConversationNotFound',
SIP_CALLEE_BUSY: 'SIPCalleeBusy',
SIP_CALLEE_NOT_FOUND: 'SIPCalleeNotFound',
START_RECORDING_FAILED: 'StartRecordingFailed',
RECORDING_IN_PROGRESS_FAILED: 'RecordingInProgressFailed'
};
const errorCategory = {
SIGNALING: 'signaling',
MEDIA: 'media',
OTHER: 'other',
EXPECTED: 'expected'
};
const errorFailureType = {
CALL_INITIATION_FAILURE: 'CallInitiationFailure',
MEDIA_CONNECTION_FAILURE: 'MediaConnectionFailure',
EXPECTED_FAILURE: 'ExpectedFailure',
ACCESS_RIGHTS: 'AccessRights'
};
export const eventType = {
// media quality events every 60 seconds
MEDIA_QUALITY: 'client.mediaquality.event',
CALL_INITIATED: 'client.call.initiated',
MERCURY_CONNECTION_LOST: 'client.mercury.connection.lost',
MERCURY_CONNECTION_RESTORED: 'client.mercury.connection.restored',
MOVE_MEDIA: 'client.call.move-media',
LOCAL_SDP_GENERATED: 'client.media-engine.local-sdp-generated',
REMOTE_SDP_RECEIVED: 'client.media-engine.remote-sdp-received',
LOCAL_JOIN_REQUEST: 'client.locus.join.request',
LOCUS_JOIN_RESPONSE: 'client.locus.join.response',
ALERT_DISPLAYED: 'client.alert.displayed',
// when ICE negotiation starts
ICE_START: 'client.ice.start',
ICE_END: 'client.ice.end',
ICE_DISCONNECT: 'client.ice.disconnect',
// Fired when the media engine reports receiving a new media stream. Media events MUST have the mediaType property.
RECEIVING_MEDIA_START: 'client.media.rx.start',
// Fired when the media engine reports the end of receiving a media stream.
// Media events MUST have the mediaType property.
RECEIVING_MEDIA_STOP: 'client.media.rx.stop',
// Fired when the media engine reports sending a new media stream. Media events MUST have the mediaType property.
SENDING_MEDIA_START: 'client.media.tx.start',
// Fired when the media engine reports it stopped sending a media stream.
// Media events MUST have the mediaType property.
SENDING_MEDIA_STOP: 'client.media.tx.stop',
MEDIA_RENDER_START: 'client.media.render.start',
MEDIA_RENDER_STOP: 'client.media.render.stop',
// static media event when outside of the normal scenario
// call-analyzer assumes that a client is capable of receiving audio, video, and share
// fired on change, or at beginning of a call
// every media type is required, so must be indicated with boolean
MEDIA_CAPABILITIES: 'client.media.capabilities',
// Sent when the client notices that a media session has been lost
MEDIA_RECONNECTING: 'client.media.reconnecting',
// Sent when the client recovers a media session that was lost
MEDIA_RECOVERED: 'client.media.recovered',
CALL_ABORTED: 'client.call.aborted',
// Fired when the "please enter your PIN" or similar prompt is displayed
// to the user, to authenticate them into the meeting
PIN_PROMPT: 'client.pin.prompt',
// Fired when PIN entry has been completed
PIN_COLLECTED: 'client.pin.collected',
// Fired when the client displays the native lobby
LOBBY_ENTERED: 'client.lobby.entered',
// Fired when the client leaves the native lobby
LOBBY_EXITED: 'client.lobby.exited',
// Fired when the user of the client starts a share (e.g. click 'Share' button).
// This should be sent from all clients that support sending a share.
SHARE_INITIATED: 'client.share.initiated',
// Fired when the user stops sharing (usually when they click the 'Stop' button for share)
SHARE_STOPPED: 'client.share.stopped',
// When the client receives a successful response from locus indicating that it has the floor for content sharing.
LOCAL_SHARE_FLOOR_GRANTED: 'client.share.floor-granted.local',
// Fired when the client changes its local UI/layout to a content sharing view,
// because it is expecting to display share media.
SHARE_LAYOUT_DISPLAYED: 'client.share.layout.displayed',
MUTED: 'client.muted',
UNMUTED: 'client.unmuted',
LEAVE: 'client.call.leave',
REMOTE_ENDED: 'client.call.remote-ended',
REMOTE_STARTED: 'client.call.remote-started',
MEDIA_REQUEST: 'client.locus.media.request',
MEDIA_RESPONSE: 'client.locus.media.response',
PSTN_AUDIO_ATTEMPT_START: 'client.pstnaudio.attempt.start',
PSTN_AUDIO_ATTEMPT_FINISH: 'client.pstnaudio.attempt.finish',
PSTN_AUDIO_ATTEMPT_SKIP: 'client.pstnaudio.attempt.skip'
};
export const error = {
name: {
MEDIA_ENGINE: 'media-engine',
ICE_FAILED: 'ice.failed',
LOCUS_RESPONSE: 'locus.response',
LOCUS_LEAVE: 'locus.leave'
},
notFatalErrorList: [3003, 3004, 4004, 4005, 4006, 4015],
errors: {
// https://sqbu-github.cisco.com/WebExSquared/event-dictionary/wiki/Error-codes-for-metric-events
// [errorDescription, errorFailureType, errorCategory]
1000: [errorDescription.UNKNOWN_CALL_FAILURE, errorFailureType.CALL_INITIATION_FAILURE, errorCategory.SIGNALING],
1001: [errorDescription.LOCUS_RATE_LIMITED_INCOMING,
errorFailureType.CALL_INITIATION_FAILURE,
errorCategory.SIGNALING
],
1002: [errorDescription.LOCUS_RATE_LIMITED_OUTGOING,
errorFailureType.CALL_INITIATION_FAILURE,
errorCategory.SIGNALING
],
1003: [errorDescription.LOCUS_UNAVAILABLE, errorFailureType.CALL_INITIATION_FAILURE, errorCategory.SIGNALING],
1004: [errorDescription.LOCUS_CONFLICT, errorFailureType.CALL_INITIATION_FAILURE, errorCategory.SIGNALING],
1005: [errorDescription.TIMEOUT, errorFailureType.CALL_INITIATION_FAILURE, errorCategory.SIGNALING],
1006: [errorDescription.LOCUS_INVALID_SEQUENCE_HASH,
errorFailureType.CALL_INITIATION_FAILURE,
errorCategory.SIGNALING
],
1007: [errorDescription.UPDATE_MEDIA_FAILED, errorFailureType.CALL_INITIATION_FAILURE, errorCategory.SIGNALING],
2001: [errorDescription.FAILED_TO_CONNECT_MEDIA,
errorFailureType.MEDIA_CONNECTION_FAILURE,
errorCategory.SIGNALING
],
2002: [errorDescription.MEDIA_ENGINE_LOST, errorFailureType.MEDIA_CONNECTION_FAILURE, errorCategory.SIGNALING],
2003: [errorDescription.MEDIA_CONNECTION_LOST,
errorFailureType.MEDIA_CONNECTION_FAILURE,
errorCategory.SIGNALING
],
2004: [errorDescription.ICE_FAILURE, errorFailureType.MEDIA_CONNECTION_FAILURE, errorCategory.SIGNALING],
2005: [errorDescription.MEDIA_ENGINE_HANG, errorFailureType.MEDIA_CONNECTION_FAILURE, errorCategory.SIGNALING],
2006: [errorDescription.ICE_SERVER_REJECTED, errorFailureType.MEDIA_CONNECTION_FAILURE, errorCategory.SIGNALING],
3001: [errorDescription.CALL_FULL, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
3002: [errorDescription.ROOM_TOO_LARGE, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
3004: [errorDescription.GUEST_ALREADY_ADDED, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
3005: [errorDescription.LOCUS_USER_NOT_AUTHORISED, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
3006: [errorDescription.CLOUDBERRY_UNAVAILABLE, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED], | 4001: [errorDescription.MEETING_INACTIVE, errorFailureType.CALL_INITIATION_FAILURE, errorCategory.EXPECTED],
4002: [errorDescription.MEETING_LOCKED, errorFailureType.CALL_INITIATION_FAILURE, errorCategory.EXPECTED],
4003: [errorDescription.MEETING_TERMINATING, errorFailureType.CALL_INITIATION_FAILURE, errorCategory.EXPECTED],
4004: [errorDescription.MODERATOR_PIN_OR_GUEST_REQUIRED, errorFailureType.ACCESS_RIGHTS, errorCategory.EXPECTED],
4005: [errorDescription.MODERATOR_PIN_OR_GUEST_PIN_REQUIRED,
errorFailureType.ACCESS_RIGHTS,
errorCategory.EXPECTED
],
4006: [errorDescription.MODERATOR_REQUIRED, errorFailureType.ACCESS_RIGHTS, errorCategory.EXPECTED],
4007: [errorDescription.USER_NOT_MEMBER_OF_ROOM, errorFailureType.ACCESS_RIGHTS, errorCategory.EXPECTED],
4008: [errorDescription.NEW_LOCUS_ERROR, errorFailureType.CALL_INITIATION_FAILURE, errorCategory.EXPECTED],
4009: [errorDescription.NET_WORK_UNAVAILABLE, errorFailureType.CALL_INITIATION_FAILURE, errorCategory.EXPECTED],
4010: [errorDescription.MEETING_UNAVAILABLE, errorFailureType.CALL_INITIATION_FAILURE, errorCategory.EXPECTED],
4011: [errorDescription.MEETING_ID_INVALID, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4012: [errorDescription.MEETING_SITE_INVALID, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4013: [errorDescription.LOCUS_INVALID_JOINTIME, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4014: [errorDescription.LOBBY_EXPIRED, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4015: [errorDescription.MEDIA_CONNECTION_LOST_PAIRED,
errorFailureType.MEDIA_CONNECTION_FAILURE,
errorCategory.EXPECTED
],
4016: [errorDescription.PHONE_NUMBER_NOT_A_NUMBER, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4017: [errorDescription.PHONE_NUMBER_TOO_LONG, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4018: [errorDescription.INVALID_DIALABLE_KEY, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4019: [errorDescription.ONE_ON_ONE_TO_SELF_NOT_ALLOWED,
errorFailureType.EXPECTED_FAILURE,
errorCategory.EXPECTED
],
4020: [errorDescription.REMOVED_PARTICIPANT, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4021: [errorDescription.MEETING_LINK_NOT_FOUND, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4022: [errorDescription.PHONE_NUMBER_TOO_SHORT_AFTER_IDD,
errorFailureType.EXPECTED_FAILURE,
errorCategory.EXPECTED
],
4023: [errorDescription.INVALID_INVITEE_ADDRESS, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4024: [errorDescription.PMR_USER_ACCOUNT_LOCKED_OUT, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4025: [errorDescription.GUEST_FORBIDDEN, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4026: [errorDescription.PMR_ACCOUNT_SUSPENDED, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4027: [errorDescription.EMPTY_PHONE_NUMBER_OR_COUNTRY_CODE,
errorFailureType.EXPECTED_FAILURE,
errorCategory.EXPECTED
],
4028: [errorDescription.CONVERSATION_NOT_FOUND, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4029: [errorDescription.START_RECORDING_FAILED, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
4030: [errorDescription.RECORDING_IN_PROGRESS_FAILED, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
5000: [errorDescription.SIP_CALLEE_BUSY, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED],
5001: [errorDescription.SIP_CALLEE_NOT_FOUND, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED]
}
};
export const trigger = {
USER_INTERACTION: 'user-interaction',
MERCURY_EVENT: 'mercury-event',
LOCI_UPDATE: 'loci-update',
MEDIA_ENGINE_EVENT: 'media-engine-event',
TIMEOUT: 'timeout',
SIGNALING: 'signaling',
OTHER: 'other'
};
export const pstnAudioType = {
DIAL_IN: 'dial-in',
DIAL_OUT: 'dial-out'
};
export const displayLocation = {
TOAST: 'toast',
ROOM_LIST: 'room-list',
CALL_PANE: 'call-pane',
CALL_VIEW: 'call-view',
OTHER: 'other'
};
export const mediaType = {
AUDIO: 'audio',
VIDEO: 'video',
SHARE: 'share',
WHITEBOARD: 'whiteboard'
};
export const reconnection = {
RECOVERED_BY_NEW: 'new', // always set to new due to /media request, no retries with ice restart
RECOVERED_BY_RETRY: 'retry'
};
export const errorCodes = {
// ordered by error code values
USER_CREATE_FAILED: 1400006,
USER_ALREADY_PARTICIPANT: 1403001,
CONVO_ALREADY_EXISTS: 1403010,
ALREADY_ANNOUNCEMENT_SPACE: 1403014,
NOT_ANNOUNCEMENT_SPACE: 1403015,
USER_NOT_MODERATOR_IN_ANNOUNCEMENT_SPACE: 1403016,
TEMP_ID_ALREADY_EXISTS: 1409001,
PARENT_ACTIVITY_ID_NOT_FOUND_OR_INVALID: 14000015
};
export const statusCodes = {
// ordered by status codes
NETWORK_OR_CORS: 0,
BAD_REQUEST: 400,
FORBIDDEN: 403,
NOT_FOUND: 404,
CONFLICT: 409
};
export const errorObjects = {
category: {
media: 'media',
expected: 'expected'
},
name: {
mediaEngine: 'media-engine'
}
};
export const UNKNOWN = 'unknown';
export const OS_NAME = {
WINDOWS: 'windows',
MAC: 'mac',
IOS: 'ios',
ANDROID: 'android',
CHROME: 'chrome',
LINUX: 'linux',
OTHERS: 'others'
};
export const CLIENT_NAME = 'webex-js-sdk'; | 3007: [errorDescription.ROOM_TOO_LARGE_FREE_ACCOUNT, errorFailureType.EXPECTED_FAILURE, errorCategory.EXPECTED], |
session.go | package session
import (
"fmt"
"github.com/devfeel/dotweb/logger"
"net/http"
"net/url"
"strconv"
"time"
"github.com/devfeel/dotweb/framework/crypto"
)
const (
DefaultSessionGCLifeTime = 60 // second
DefaultSessionMaxLifeTime = 20 * 60 // second
DefaultSessionCookieName = "dotweb_sessionId"
DefaultSessionLength = 20
SessionMode_Runtime = "runtime"
SessionMode_Redis = "redis"
LogTarget_Session = "dotweb_session"
)
type (
SessionStore interface {
SessionRead(sessionId string) (*SessionState, error)
SessionExist(sessionId string) bool
SessionUpdate(state *SessionState) error
SessionRemove(sessionId string) error
SessionCount() int // get all active session length
SessionGC() int // gc session and return out of date state num
}
// session config info
StoreConfig struct {
StoreName string
Maxlifetime int64 // session life time, with second
CookieName string // custom cookie name which sessionid store
ServerIP string // if use redis, connection string, like "redis://:[email protected]:6379/0"
BackupServerUrl string // if use redis, if ServerIP is down, use this server, like "redis://:[email protected]:6379/0"
StoreKeyPre string // if use redis, set custom redis key-pre; default is dotweb:session:
}
SessionManager struct {
GCLifetime int64 `json:"gclifetime"`
appLog logger.AppLog
store SessionStore
storeConfig *StoreConfig
}
)
// GetSessionStore create new session store with store config
func GetSessionStore(config *StoreConfig) SessionStore |
// NewDefaultRuntimeConfig create new store with default config and use runtime store
func NewDefaultRuntimeConfig() *StoreConfig {
return NewStoreConfig(SessionMode_Runtime, DefaultSessionMaxLifeTime, "", "")
}
// NewDefaultRedisConfig create new store with default config and use redis store
func NewDefaultRedisConfig(serverIp string) *StoreConfig {
return NewStoreConfig(SessionMode_Redis, DefaultSessionMaxLifeTime, serverIp, "")
}
// NewRedisConfig create new store with config and use redis store
// must set serverIp and storeKeyPre
func NewRedisConfig(serverIp string, storeKeyPre string) *StoreConfig {
return NewStoreConfig(SessionMode_Redis, DefaultSessionMaxLifeTime, serverIp, storeKeyPre)
}
// NewStoreConfig create new store config
func NewStoreConfig(storeName string, maxlifetime int64, serverIp string, storeKeyPre string) *StoreConfig {
return &StoreConfig{
StoreName: storeName,
Maxlifetime: maxlifetime,
ServerIP: serverIp,
StoreKeyPre: storeKeyPre,
}
}
// NewDefaultSessionManager create new session manager with default config info
func NewDefaultSessionManager(appLog logger.AppLog, config *StoreConfig) (*SessionManager, error) {
return NewSessionManager(DefaultSessionGCLifeTime, appLog, config)
}
// NewSessionManager create new seesion manager
func NewSessionManager(gcLifetime int64, appLog logger.AppLog, config *StoreConfig) (*SessionManager, error) {
if gcLifetime <= 0 {
gcLifetime = DefaultSessionGCLifeTime
}
if config.CookieName == "" {
config.CookieName = DefaultSessionCookieName
}
manager := &SessionManager{
store: GetSessionStore(config),
appLog: appLog,
GCLifetime: gcLifetime,
storeConfig: config,
}
// enable GC
go func() {
time.AfterFunc(time.Duration(manager.GCLifetime)*time.Second, func() { manager.GC() })
}()
return manager, nil
}
// NewSessionID create new session id with DefaultSessionLength
func (manager *SessionManager) NewSessionID() string {
val := cryptos.GetRandString(DefaultSessionLength)
return val
}
// StoreConfig return store config
func (manager *SessionManager) StoreConfig() *StoreConfig {
return manager.storeConfig
}
// GetClientSessionID get session id from client
// default mode is from cookie
func (manager *SessionManager) GetClientSessionID(req *http.Request) (string, error) {
cookie, err := req.Cookie(manager.storeConfig.CookieName)
if err != nil {
return "", err
}
if cookie.Value == "" {
return "", nil
}
// TODO: check client validity
// check ip & agent
return url.QueryUnescape(cookie.Value)
}
func (manager *SessionManager) GetSessionState(sessionId string) (session *SessionState, err error) {
session, err = manager.store.SessionRead(sessionId)
if err != nil {
session = NewSessionState(manager.store, sessionId, make(map[interface{}]interface{}))
}
return session, nil
}
// GC loop gc session data
func (manager *SessionManager) GC() {
num := manager.store.SessionGC()
if num > 0 {
manager.appLog.Debug("SessionManger.GC => "+strconv.Itoa(num), LogTarget_Session)
}
time.AfterFunc(time.Duration(manager.GCLifetime)*time.Second, func() { manager.GC() })
}
| {
switch config.StoreName {
case SessionMode_Runtime:
return NewRuntimeStore(config)
case SessionMode_Redis:
store, err := NewRedisStore(config)
if err != nil {
panic(fmt.Sprintf("redis session [%v] ping error -> %v", config.StoreName, err.Error()))
} else {
return store
}
default:
panic("not support session store -> " + config.StoreName)
}
return nil
} |
SCALAR2VECTOR.js | function SCALAR2VECTOR() {
SCALAR2VECTOR.prototype.define = function SCALAR2VECTOR() {
this.nout = -1;
var model = scicos_model();
model.sim = list(new ScilabString(["scalar2vector"]), new ScilabDouble([4]));
model.out = new ScilabDouble([this.nout]);
model.in = new ScilabDouble([1]);
model.blocktype = new ScilabString(["c"]);
model.dep_ut = new ScilabBoolean([true, false]);
var exprs = new ScilabString([this.nout]);
var gr_i = new ScilabString(["xstringb(orig(1),orig(2),\"SCALAR2VECTOR\",sz(1),sz(2));"]);
this.x = new standard_define(new ScilabDouble([3, 2]), model, exprs, gr_i);
return new BasicBlock(this.x);
} | }
} | SCALAR2VECTOR.prototype.details = function SCALAR2VECTOR() {
return this.x; |
method-self-arg.rs | // run-pass
// Test method calls with self as an argument
static mut COUNT: usize = 1;
#[derive(Copy, Clone)]
struct Foo;
impl Foo {
fn foo(self, x: &Foo) {
unsafe { COUNT *= 2; }
// Test internal call.
Foo::bar(&self);
Foo::bar(x);
Foo::baz(self);
Foo::baz(*x);
Foo::qux(Box::new(self));
Foo::qux(Box::new(*x));
}
fn bar(&self) {
unsafe { COUNT *= 3; }
}
fn baz(self) {
unsafe { COUNT *= 5; }
}
fn qux(self: Box<Foo>) {
unsafe { COUNT *= 7; }
}
}
fn main() | {
let x = Foo;
// Test external call.
Foo::bar(&x);
Foo::baz(x);
Foo::qux(Box::new(x));
x.foo(&x);
unsafe { assert_eq!(COUNT, 2*3*3*3*5*5*5*7*7*7); }
} |
|
file_system.rs | use azure_core::Etag;
use bytes::Bytes;
use chrono::{DateTime, Utc};
use std::convert::TryFrom;
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct FileSystem {
pub name: String,
#[serde(with = "azure_core::parsing::rfc2822_time_format")]
pub last_modified: DateTime<Utc>,
pub etag: Etag,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct FileSystemList {
#[serde(rename = "filesystems")]
pub file_systems: Vec<FileSystem>,
}
impl TryFrom<Bytes> for FileSystemList {
type Error = crate::Error;
fn try_from(response: Bytes) -> Result<Self, Self::Error> {
Ok(serde_json::from_slice::<FileSystemList>(response.as_ref())?)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Path {
pub content_length: i64,
pub etag: Etag,
pub group: String,
pub is_directory: bool,
#[serde(with = "azure_core::parsing::rfc2822_time_format")]
pub last_modified: DateTime<Utc>,
pub name: String,
pub owner: String,
pub permissions: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct | {
pub paths: Vec<Path>,
}
impl TryFrom<Bytes> for PathList {
type Error = crate::Error;
fn try_from(response: Bytes) -> Result<Self, Self::Error> {
Ok(serde_json::from_slice::<PathList>(response.as_ref())?)
}
}
| PathList |
validate-regulators-config.py | #!/usr/bin/env python3
import argparse
import json
import jsonschema
import os
import sys
r"""
Validates the phosphor-regulators configuration file. Checks it against a JSON
schema as well as doing some extra checks that can't be encoded in the schema.
"""
def handle_validation_error():
sys.exit("Validation failed.")
def get_values(json_element, key, result = None):
r"""
Finds all occurrences of a key within the specified JSON element and its
children. Returns the associated values.
To search the entire configuration file, pass the root JSON element
json_element: JSON element within the config file.
key: key name.
result: list of values found with the specified key.
"""
if result is None:
result = []
if type(json_element) is dict:
for json_key in json_element:
if json_key == key:
result.append(json_element[json_key])
elif type(json_element[json_key]) in (list, dict):
get_values(json_element[json_key], key, result)
elif type(json_element) is list:
for item in json_element:
if type(item) in (list, dict):
get_values(item, key, result)
return result
def get_rule_ids(config_json):
r"""
Get all rule IDs in the configuration file.
config_json: Configuration file JSON
"""
rule_ids = []
for rule in config_json.get('rules', {}):
rule_ids.append(rule['id'])
return rule_ids
def get_device_ids(config_json):
r"""
Get all device IDs in the configuration file.
config_json: Configuration file JSON
"""
device_ids = []
for chassis in config_json.get('chassis', {}):
for device in chassis.get('devices', {}):
device_ids.append(device['id'])
return device_ids
def check_number_of_elements_in_masks(config_json):
r"""
Check if the number of bit masks in the 'masks' property matches the number
of byte values in the 'values' property.
config_json: Configuration file JSON
"""
i2c_write_bytes = get_values(config_json, 'i2c_write_bytes')
i2c_compare_bytes = get_values(config_json, 'i2c_compare_bytes')
for object in i2c_write_bytes:
if 'masks' in object:
if len(object.get('masks', [])) != len(object.get('values', [])):
sys.stderr.write("Error: Invalid i2c_write_bytes action.\n"+\
"The masks array must have the same size as the values array. "+\
"masks: "+str(object.get('masks', []))+\
", values: "+str(object.get('values', []))+'.\n')
handle_validation_error()
for object in i2c_compare_bytes:
if 'masks' in object:
if len(object.get('masks', [])) != len(object.get('values', [])):
sys.stderr.write("Error: Invalid i2c_compare_bytes action.\n"+\
"The masks array must have the same size as the values array. "+\
"masks: "+str(object.get('masks', []))+\
", values: "+str(object.get('values', []))+'.\n')
handle_validation_error()
def check_rule_id_exists(config_json):
r"""
Check if a rule_id property specifies a rule ID that does not exist.
config_json: Configuration file JSON
"""
rule_ids = get_values(config_json, 'rule_id')
valid_rule_ids = get_rule_ids(config_json)
for rule_id in rule_ids:
if rule_id not in valid_rule_ids:
sys.stderr.write("Error: Rule ID does not exist.\n"+\
"Found rule_id value that specifies invalid rule ID "+\
rule_id+'\n')
handle_validation_error()
def check_device_id_exists(config_json):
r"""
Check if a device_id property specifies a device ID that does not exist.
config_json: Configuration file JSON
"""
device_ids = get_values(config_json, 'device_id')
valid_device_ids = get_device_ids(config_json)
for device_id in device_ids:
if device_id not in valid_device_ids:
sys.stderr.write("Error: Device ID does not exist.\n"+\
"Found device_id value that specifies invalid device ID "+\
device_id+'\n')
handle_validation_error()
def check_set_device_value_exists(config_json):
r"""
Check if a set_device action specifies a device ID that does not exist.
config_json: Configuration file JSON
"""
device_ids = get_values(config_json, 'set_device')
valid_device_ids = get_device_ids(config_json)
for device_id in device_ids:
if device_id not in valid_device_ids:
sys.stderr.write("Error: Device ID does not exist.\n"+\
"Found set_device action that specifies invalid device ID "+\
device_id+'\n')
handle_validation_error()
def check_run_rule_value_exists(config_json):
r"""
Check if any run_rule actions specify a rule ID that does not exist.
config_json: Configuration file JSON
"""
rule_ids = get_values(config_json, 'run_rule')
valid_rule_ids = get_rule_ids(config_json)
for rule_id in rule_ids:
if rule_id not in valid_rule_ids:
sys.stderr.write("Error: Rule ID does not exist.\n"+\
"Found run_rule action that specifies invalid rule ID "+\
rule_id+'\n')
handle_validation_error()
def check_infinite_loops_in_rule(config_json, rule_json, call_stack=[]):
r"""
Check if a 'run_rule' action in the specified rule causes an
infinite loop.
config_json: Configuration file JSON.
rule_json: A rule in the JSON config file.
call_stack: Current call stack of rules.
"""
call_stack.append(rule_json['id'])
for action in rule_json.get('actions', {}):
if 'run_rule' in action:
run_rule_id = action['run_rule']
if run_rule_id in call_stack:
call_stack.append(run_rule_id)
sys.stderr.write(\
"Infinite loop caused by run_rule actions.\n"+\
str(call_stack)+'\n')
handle_validation_error()
else:
for rule in config_json.get('rules', {}):
if rule['id'] == run_rule_id:
check_infinite_loops_in_rule(\
config_json, rule, call_stack)
call_stack.pop()
def check_infinite_loops(config_json):
r"""
Check if rule in config file is called recursively, causing an
infinite loop.
config_json: Configuration file JSON
"""
for rule in config_json.get('rules', {}):
check_infinite_loops_in_rule(config_json, rule)
def check_duplicate_object_id(config_json):
r"""
Check that there aren't any JSON objects with the same 'id' property value.
config_json: Configuration file JSON
"""
json_ids = get_values(config_json, 'id')
unique_ids = set()
for id in json_ids:
if id in unique_ids:
sys.stderr.write("Error: Duplicate ID.\n"+\
"Found multiple objects with the ID "+id+'\n')
handle_validation_error()
else:
unique_ids.add(id)
def check_duplicate_rule_id(config_json):
r"""
Check that there aren't any "rule" elements with the same 'id' field.
config_json: Configuration file JSON
"""
rule_ids = []
for rule in config_json.get('rules', {}):
rule_id = rule['id']
if rule_id in rule_ids:
sys.stderr.write("Error: Duplicate rule ID.\n"+\
"Found multiple rules with the ID "+rule_id+'\n')
handle_validation_error()
else:
rule_ids.append(rule_id)
def check_duplicate_chassis_number(config_json):
r"""
Check that there aren't any "chassis" elements with the same 'number' field.
config_json: Configuration file JSON
"""
numbers = []
for chassis in config_json.get('chassis', {}):
number = chassis['number']
if number in numbers:
sys.stderr.write("Error: Duplicate chassis number.\n"+\
"Found multiple chassis with the number "+str(number)+'\n')
handle_validation_error()
else:
numbers.append(number)
def check_duplicate_device_id(config_json):
r"""
Check that there aren't any "devices" with the same 'id' field.
config_json: Configuration file JSON
"""
device_ids = []
for chassis in config_json.get('chassis', {}):
for device in chassis.get('devices', {}):
device_id = device['id']
if device_id in device_ids:
sys.stderr.write("Error: Duplicate device ID.\n"+\
"Found multiple devices with the ID "+device_id+'\n')
handle_validation_error()
else:
device_ids.append(device_id)
def check_duplicate_rail_id(config_json):
r"""
Check that there aren't any "rails" with the same 'id' field.
config_json: Configuration file JSON
"""
rail_ids = []
for chassis in config_json.get('chassis', {}):
for device in chassis.get('devices', {}):
for rail in device.get('rails', {}):
rail_id = rail['id']
if rail_id in rail_ids:
sys.stderr.write("Error: Duplicate rail ID.\n"+\
"Found multiple rails with the ID "+rail_id+'\n')
handle_validation_error()
else:
rail_ids.append(rail_id)
def check_for_duplicates(config_json):
r"""
Check for duplicate ID.
"""
check_duplicate_rule_id(config_json)
check_duplicate_chassis_number(config_json)
check_duplicate_device_id(config_json)
check_duplicate_rail_id(config_json)
check_duplicate_object_id(config_json)
def | (config, schema):
r"""
Validates the specified config file using the specified
schema file.
config: Path of the file containing the config JSON
schema: Path of the file containing the schema JSON
"""
with open(config) as config_handle:
config_json = json.load(config_handle)
with open(schema) as schema_handle:
schema_json = json.load(schema_handle)
try:
jsonschema.validate(config_json, schema_json)
except jsonschema.ValidationError as e:
print(e)
handle_validation_error()
return config_json
def validate_JSON_format(file):
with open(file) as json_data:
try:
return json.load(json_data)
except ValueError as err:
return False
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='phosphor-regulators configuration file validator')
parser.add_argument('-s', '--schema-file', dest='schema_file',
help='The phosphor-regulators schema file')
parser.add_argument('-c', '--configuration-file', dest='configuration_file',
help='The phosphor-regulators configuration file')
args = parser.parse_args()
if not args.schema_file:
parser.print_help()
sys.exit("Error: Schema file is required.")
if not os.path.exists(args.schema_file):
parser.print_help()
sys.exit("Error: Schema file does not exist.")
if not os.access(args.schema_file, os.R_OK):
parser.print_help()
sys.exit("Error: Schema file is not readable.")
if not validate_JSON_format(args.schema_file):
parser.print_help()
sys.exit("Error: Schema file is not in the JSON format.")
if not args.configuration_file:
parser.print_help()
sys.exit("Error: Configuration file is required.")
if not os.path.exists(args.configuration_file):
parser.print_help()
sys.exit("Error: Configuration file does not exist.")
if not os.access(args.configuration_file, os.R_OK):
parser.print_help()
sys.exit("Error: Configuration file is not readable.")
if not validate_JSON_format(args.configuration_file):
parser.print_help()
sys.exit("Error: Configuration file is not in the JSON format.")
config_json = validate_schema(args.configuration_file, args.schema_file)
check_for_duplicates(config_json)
check_infinite_loops(config_json)
check_run_rule_value_exists(config_json)
check_set_device_value_exists(config_json)
check_rule_id_exists(config_json)
check_device_id_exists(config_json)
check_number_of_elements_in_masks(config_json)
| validate_schema |
ProfileAvatar.tsx | import React from 'react'
import { NoProfileAvatarIcon } from '@pancakeswap-libs/uikit'
import { Profile } from 'state/types'
import styled from 'styled-components'
export interface ProfileAvatarProps {
profile: Profile
}
const TeamAvatar = styled.img`
border: 1px solid ${({ theme }) => theme.card.background};
border-radius: 50%;
bottom: 0px;
position: absolute;
right: 0px;
min-width: 20px;
min-height: 20px;
width: 37.5%;
height: 37.5%;
z-index: 5;
${({ theme }) => theme.mediaQueries.sm} {
border-width: 2px;
}
`
const AvatarWrapper = styled.div<{ bg: string }>`
background: url('${({ bg }) => bg}');
background-repeat: no-repeat;
background-size: cover;
border-radius: 50%;
position: relative;
width: 100%;
height: 100%;
& > img {
border-radius: 50%;
}
`
// TODO: replace with no profile avatar icon
const AvatarInactive = styled(NoProfileAvatarIcon)`
width: 100%;
height: 100%;
`
const ProfileAvatar: React.FC<ProfileAvatarProps> = ({ profile }) => {
return ( | <TeamAvatar src={`/images/teams/${profile.team.images.alt}`} alt={profile.team.name} />
</AvatarWrapper>
)
}
export default ProfileAvatar | <AvatarWrapper bg={`/images/nfts/${profile.nft?.images?.md}`}>
{!profile.isActive && <AvatarInactive />} |
vlanrange.go | package aci
import (
"bytes"
"fmt"
)
func nameVR(from, to string) string {
return "from-[vlan-" + from + "]-to-[vlan-" + to + "]"
}
// VlanRangeAdd creates a new VLAN range for a VLAN pool.
func (c *Client) VlanRangeAdd(vlanpoolName, vlanpoolMode, from, to string) error {
pool := nameVP(vlanpoolName, vlanpoolMode)
rang := nameVR(from, to)
| j := fmt.Sprintf(`{"fvnsEncapBlk":{"attributes":{"dn":"uni/infra/%s/%s","from":"vlan-%s","to":"vlan-%s","rn":"%s","status":"created"}}}`,
pool, rang, from, to, rang)
url := c.getURL(api)
c.debugf("VlanRangeAdd: url=%s json=%s", url, j)
body, errPost := c.post(url, contentTypeJSON, bytes.NewBufferString(j))
if errPost != nil {
return errPost
}
c.debugf("VlanRangeAdd: reply: %s", string(body))
return parseJSONError(body)
}
// VlanRangeDel deletes an existing VLAN range from a VLAN pool.
func (c *Client) VlanRangeDel(vlanpoolName, vlanpoolMode, from, to string) error {
pool := nameVP(vlanpoolName, vlanpoolMode)
rang := nameVR(from, to)
api := "/api/node/mo/uni/infra/" + pool + ".json"
j := fmt.Sprintf(`{"fvnsVlanInstP":{"attributes":{"dn":"uni/infra/%s","status":"modified"},"children":[{"fvnsEncapBlk":{"attributes":{"dn":"uni/infra/%s/%s","status":"deleted"}}}]}}`,
pool, pool, rang)
url := c.getURL(api)
c.debugf("VlanRangeAdd: url=%s json=%s", url, j)
body, errPost := c.post(url, contentTypeJSON, bytes.NewBufferString(j))
if errPost != nil {
return errPost
}
c.debugf("VlanRangeDel: reply: %s", string(body))
return parseJSONError(body)
}
// VlanRangeList retrieves the list of VLAN ranges from a VLAN pool.
func (c *Client) VlanRangeList(vlanpoolName, vlanpoolMode string) ([]map[string]interface{}, error) {
pool := nameVP(vlanpoolName, vlanpoolMode)
key := "fvnsEncapBlk"
api := "/api/node/mo/uni/infra/" + pool + ".json?query-target=children&target-subtree-class=" + key
url := c.getURL(api)
c.debugf("VlanRangeList: url=%s", url)
body, errGet := c.get(url)
if errGet != nil {
return nil, errGet
}
c.debugf("VlanRangeList: reply: %s", string(body))
return jsonImdataAttributes(c, body, key, "VlanRangeList")
} | api := "/api/node/mo/uni/infra/" + pool + "/" + rang + ".json"
|
event_expansion.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
metav1 "github.com/lavalamp/client-go-flat/apimachinery/pkg/apis/meta/v1"
"github.com/lavalamp/client-go-flat/apimachinery/pkg/fields"
"github.com/lavalamp/client-go-flat/apimachinery/pkg/runtime"
"github.com/lavalamp/client-go-flat/apimachinery/pkg/types"
"github.com/lavalamp/client-go-flat/pkg/api"
"github.com/lavalamp/client-go-flat/pkg/api/v1"
)
// The EventExpansion interface allows manually adding extra methods to the EventInterface.
type EventExpansion interface {
// CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace.
CreateWithEventNamespace(event *v1.Event) (*v1.Event, error)
// UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace.
UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error)
PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error)
// Search finds events about the specified object
Search(objOrRef runtime.Object) (*v1.EventList, error)
// Returns the appropriate field selector based on the API version being used to communicate with the server.
// The returned field selector can be used with List and Watch to filter desired events.
GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector
}
// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns,
// or an error. The namespace to create the event within is deduced from the
// event; it must either match this event client's namespace, or this event
// client must have been created with the "" namespace.
func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
if e.ns != "" && event.Namespace != e.ns {
return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns)
}
result := &v1.Event{}
err := e.client.Post().
NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0).
Resource("events").
Body(event).
Do().
Into(result)
return result, err
}
// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns,
// or an error. The namespace and key to update the event within is deduced from the event. The
// namespace must either match this event client's namespace, or this event client must have been
// created with the "" namespace. Update also requires the ResourceVersion to be set in the event
// object.
func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
result := &v1.Event{}
err := e.client.Put().
NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0).
Resource("events").
Name(event.Name).
Body(event).
Do().
Into(result)
return result, err
}
// PatchWithEventNamespace modifies an existing event. It returns the copy of
// the event that the server returns, or an error. The namespace and name of the
// target event is deduced from the incompleteEvent. The namespace must either
// match this event client's namespace, or this event client must have been
// created with the "" namespace.
func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) {
if e.ns != "" && incompleteEvent.Namespace != e.ns {
return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns)
}
result := &v1.Event{}
err := e.client.Patch(types.StrategicMergePatchType).
NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0).
Resource("events").
Name(incompleteEvent.Name).
Body(data).
Do().
Into(result)
return result, err
}
// Search finds events about the specified object. The namespace of the
// object must match this event's client namespace unless the event client
// was made with the "" namespace.
func (e *events) Search(objOrRef runtime.Object) (*v1.EventList, error) {
ref, err := api.GetReference(objOrRef)
if err != nil {
return nil, err
}
if e.ns != "" && ref.Namespace != e.ns {
return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns)
}
stringRefKind := string(ref.Kind)
var refKind *string
if stringRefKind != "" {
refKind = &stringRefKind
}
stringRefUID := string(ref.UID)
var refUID *string
if stringRefUID != "" {
refUID = &stringRefUID
}
fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID)
return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()})
}
// Returns the appropriate field selector based on the API version being used to communicate with the server.
// The returned field selector can be used with List and Watch to filter desired events.
func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector {
apiVersion := e.client.APIVersion().String()
field := fields.Set{}
if involvedObjectName != nil {
field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName
}
if involvedObjectNamespace != nil {
field["involvedObject.namespace"] = *involvedObjectNamespace
}
if involvedObjectKind != nil {
field["involvedObject.kind"] = *involvedObjectKind
}
if involvedObjectUID != nil {
field["involvedObject.uid"] = *involvedObjectUID
}
return field.AsSelector()
}
// Returns the appropriate field label to use for name of the involved object as per the given API version.
func | (version string) string {
return "involvedObject.name"
}
// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset.
type EventSinkImpl struct {
Interface EventInterface
}
func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) {
return e.Interface.CreateWithEventNamespace(event)
}
func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) {
return e.Interface.UpdateWithEventNamespace(event)
}
func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) {
return e.Interface.PatchWithEventNamespace(event, data)
}
| GetInvolvedObjectNameFieldLabel |
test_vimeo.py | from test.unit_tests.providers import common
from test.unit_tests.providers.common import ProviderTestCase
from totalimpact.providers.provider import Provider, ProviderContentMalformedError
from test.utils import http
import os
import collections
from nose.tools import assert_equals, assert_items_equal, raises, nottest
datadir = os.path.join(os.path.split(__file__)[0], "../../../extras/sample_provider_pages/vimeo")
SAMPLE_EXTRACT_METRICS_PAGE = os.path.join(datadir, "metrics")
SAMPLE_EXTRACT_BIBLIO_PAGE = os.path.join(datadir, "biblio")
class TestVimeo(ProviderTestCase):
provider_name = "vimeo"
testitem_aliases = ("url", "http://vimeo.com/48605764")
testitem_metrics = ("url", "http://vimeo.com/48605764")
testitem_biblio = ("url", "http://vimeo.com/48605764")
def setUp(self):
ProviderTestCase.setUp(self)
def test_is_relevant_alias(self):
# ensure that it matches an appropriate ids
assert_equals(self.provider.is_relevant_alias(self.testitem_aliases), True)
assert_equals(self.provider.is_relevant_alias(("url", "NOT A VIMEO ID")), False)
def test_extract_metrics_success(self):
f = open(SAMPLE_EXTRACT_METRICS_PAGE, "r")
metrics_dict = self.provider._extract_metrics(f.read(), id=self.testitem_metrics[1])
print metrics_dict
assert_equals(metrics_dict["vimeo:plays"], 83)
def test_extract_biblio_success(self):
f = open(SAMPLE_EXTRACT_BIBLIO_PAGE, "r")
biblio_dict = self.provider._extract_biblio(f.read(), self.testitem_biblio[1])
print biblio_dict
expected = {'repository': 'Vimeo', 'title': 'Wheat Rust Inoculation Protocol Video', 'url': 'http://vimeo.com/48605764', 'year': '2012', 'authors': 'Huang Lab', 'published_date': '2012-08-31 12:20:16'}
assert_equals(biblio_dict, expected)
def | (self):
provenance_url = self.provider.provenance_url("github:forks", [self.testitem_aliases])
assert_equals(provenance_url, 'http://vimeo.com/48605764')
@http
def test_metrics(self):
metrics_dict = self.provider.metrics([self.testitem_metrics])
print metrics_dict
expected = {'vimeo:plays': (83, 'http://vimeo.com/48605764')}
for key in expected:
assert metrics_dict[key][0] >= expected[key][0], [key, metrics_dict[key], expected[key]]
assert metrics_dict[key][1] == expected[key][1], [key, metrics_dict[key], expected[key]]
@http
def test_biblio(self):
biblio_dict = self.provider.biblio([self.testitem_biblio])
print biblio_dict
expected = {'repository': 'Vimeo', 'title': u'Wheat Rust Inoculation Protocol Video', 'url': u'http://vimeo.com/48605764', 'year': u'2012', 'authors': u'Huang Lab', 'published_date': u'2012-08-31 12:20:16'}
assert_items_equal(biblio_dict.keys(), expected.keys())
for key in ['year', 'published_date', 'title', 'url']:
assert_equals(biblio_dict[key], expected[key])
| test_provenance_url |
Schemas.ts | import { Factory } from "../../../CodeGenerator";
import { UnSupportError } from "../../../Exception";
import * as ConverterContext from "../ConverterContext";
import * as Guard from "../Guard";
import * as InferredType from "../InferredType";
import * as Name from "../Name";
import { Store } from "../store";
import * as ToTypeNode from "../toTypeNode";
import { OpenApi } from "../types";
import * as Reference from "./Reference";
import * as Schema from "./Schema";
| if (!schema.type && typeof schema.nullable === "boolean") {
const typeNode = factory.TypeNode.create({
type: "any",
});
return factory.UnionTypeNode.create({
typeNodes: [
typeNode,
factory.TypeNode.create({
type: "null",
}),
],
});
}
};
export const generateNamespace = (
entryPoint: string,
currentPoint: string,
store: Store.Type,
factory: Factory.Type,
schemas: Record<string, OpenApi.Schema | OpenApi.Reference>,
context: ToTypeNode.Context,
convertContext: ConverterContext.Types,
): void => {
const basePath = "components/schemas";
store.addComponent("schemas", {
kind: "namespace",
name: Name.Components.Schemas,
});
Object.entries(schemas).forEach(([name, targetSchema]) => {
if (Guard.isReference(targetSchema)) {
const schema = targetSchema;
const reference = Reference.generate<OpenApi.Schema>(entryPoint, currentPoint, schema);
if (reference.type === "local") {
const { maybeResolvedName } = context.resolveReferencePath(currentPoint, reference.path);
store.addStatement(`${basePath}/${name}`, {
kind: "typeAlias",
name: convertContext.escapeDeclarationText(name),
value: factory.TypeAliasDeclaration.create({
export: true,
name: convertContext.escapeDeclarationText(name),
type: factory.TypeReferenceNode.create({
name: convertContext.escapeDeclarationText(maybeResolvedName),
}),
}),
});
return;
}
Schema.addSchema(
entryPoint,
reference.referencePoint,
store,
factory,
reference.path,
reference.name,
reference.data,
context,
convertContext,
);
if (store.hasStatement(`${basePath}/${name}`, ["interface", "typeAlias"])) {
return;
}
return store.addStatement(`${basePath}/${name}`, {
kind: "typeAlias",
name: convertContext.escapeDeclarationText(name),
value: factory.TypeAliasDeclaration.create({
export: true,
name: convertContext.escapeDeclarationText(name),
comment: reference.data.description,
type: factory.TypeReferenceNode.create({
name: convertContext.escapeDeclarationText(context.resolveReferencePath(currentPoint, reference.path).name),
}),
}),
});
}
const schema = InferredType.getInferredType(targetSchema);
if (!schema) {
const typeNode = createNullableTypeNode(factory, targetSchema);
if (!typeNode) {
throw new UnSupportError("schema.type not specified \n" + JSON.stringify(targetSchema));
}
return typeNode;
}
const path = `${basePath}/${name}`;
if (Guard.isAllOfSchema(schema)) {
return store.addStatement(path, {
kind: "typeAlias",
name: convertContext.escapeDeclarationText(name),
value: Schema.generateMultiTypeAlias(entryPoint, currentPoint, factory, name, schema.allOf, context, "allOf", convertContext),
});
}
if (Guard.isOneOfSchema(schema)) {
return store.addStatement(path, {
kind: "typeAlias",
name: convertContext.escapeDeclarationText(name),
value: Schema.generateMultiTypeAlias(entryPoint, currentPoint, factory, name, schema.oneOf, context, "oneOf", convertContext),
});
}
if (Guard.isAnyOfSchema(schema)) {
return store.addStatement(path, {
kind: "typeAlias",
name: convertContext.escapeDeclarationText(name),
value: Schema.generateMultiTypeAlias(entryPoint, currentPoint, factory, name, schema.anyOf, context, "anyOf", convertContext),
});
}
if (Guard.isArraySchema(schema)) {
return store.addStatement(path, {
kind: "typeAlias",
name: convertContext.escapeDeclarationText(name),
value: Schema.generateArrayTypeAlias(entryPoint, currentPoint, factory, name, schema, context, convertContext),
});
}
if (Guard.isObjectSchema(schema)) {
return store.addStatement(path, {
kind: "interface",
name: convertContext.escapeDeclarationText(name),
value: Schema.generateInterface(entryPoint, currentPoint, factory, name, schema, context, convertContext),
});
}
if (Guard.isObjectSchema(schema)) {
return store.addStatement(path, {
kind: "interface",
name: convertContext.escapeDeclarationText(name),
value: Schema.generateInterface(entryPoint, currentPoint, factory, name, schema, context, convertContext),
});
}
if (Guard.isPrimitiveSchema(schema)) {
return store.addStatement(path, {
kind: "typeAlias",
name,
value: Schema.generateTypeAlias(entryPoint, currentPoint, factory, name, schema, convertContext),
});
}
throw new UnSupportError("schema.type = Array[] not supported. " + JSON.stringify(schema));
});
}; | const createNullableTypeNode = (factory: Factory.Type, schema: OpenApi.Schema) => { |
setup.py | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "couch", "__about__.py")) as f:
exec(f.read(), ABOUT)
def | (fpath):
with open(path.join(HERE, fpath), encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog_checks_base'
setup(
name='datadog-couch',
version=ABOUT["__version__"],
description='The CouchDB check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent couch check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='[email protected]',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.couch'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
| get_requirements |
repair_test.go | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"fmt"
"io"
"io/ioutil"
"os"
"testing"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/wal/walpb"
)
type corruptFunc func(string, int64) error
// TestRepairTruncate ensures a truncated file can be repaired
func TestRepairTruncate(t *testing.T) {
corruptf := func(p string, offset int64) error {
f, err := openLast(p)
if err != nil {
return err
}
defer f.Close()
return f.Truncate(offset - 4)
}
testRepair(t, makeEnts(10), corruptf, 9)
}
func testRepair(t *testing.T, ents [][]raftpb.Entry, corrupt corruptFunc, expectedEnts int) {
p, err := ioutil.TempDir(os.TempDir(), "waltest")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(p)
// create WAL
w, err := Create(p, nil)
defer func() {
if err = w.Close(); err != nil {
t.Fatal(err)
}
}()
if err != nil {
t.Fatal(err)
}
for _, es := range ents {
if err = w.Save(raftpb.HardState{}, es); err != nil {
t.Fatal(err)
}
}
offset, err := w.tail().Seek(0, os.SEEK_CUR)
if err != nil {
t.Fatal(err)
}
w.Close()
err = corrupt(p, offset)
if err != nil {
t.Fatal(err)
}
// verify we broke the wal
w, err = Open(p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
_, _, _, err = w.ReadAll()
if err != io.ErrUnexpectedEOF {
t.Fatalf("err = %v, want error %v", err, io.ErrUnexpectedEOF)
}
w.Close()
// repair the wal
if ok := Repair(p); !ok {
t.Fatalf("fix = %t, want %t", ok, true)
}
// read it back
w, err = Open(p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
_, _, walEnts, err := w.ReadAll()
if err != nil {
t.Fatal(err)
}
if len(walEnts) != expectedEnts {
t.Fatalf("len(ents) = %d, want %d", len(walEnts), expectedEnts)
}
// write some more entries to repaired log
for i := 1; i <= 10; i++ {
es := []raftpb.Entry{{Index: uint64(expectedEnts + i)}}
if err = w.Save(raftpb.HardState{}, es); err != nil {
t.Fatal(err)
}
}
w.Close()
// read back entries following repair, ensure it's all there
w, err = Open(p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
_, _, walEnts, err = w.ReadAll()
if err != nil {
t.Fatal(err)
}
if len(walEnts) != expectedEnts+10 {
t.Fatalf("len(ents) = %d, want %d", len(walEnts), expectedEnts+10)
}
}
func | (ents int) (ret [][]raftpb.Entry) {
for i := 1; i <= ents; i++ {
ret = append(ret, []raftpb.Entry{{Index: uint64(i)}})
}
return ret
}
// TestRepairWriteTearLast repairs the WAL in case the last record is a torn write
// that straddled two sectors.
func TestRepairWriteTearLast(t *testing.T) {
corruptf := func(p string, offset int64) error {
f, err := openLast(p)
if err != nil {
return err
}
defer f.Close()
// 512 bytes perfectly aligns the last record, so use 1024
if offset < 1024 {
return fmt.Errorf("got offset %d, expected >1024", offset)
}
if terr := f.Truncate(1024); terr != nil {
return terr
}
if terr := f.Truncate(offset); terr != nil {
return terr
}
return nil
}
testRepair(t, makeEnts(50), corruptf, 40)
}
// TestRepairWriteTearMiddle repairs the WAL when there is write tearing
// in the middle of a record.
func TestRepairWriteTearMiddle(t *testing.T) {
corruptf := func(p string, offset int64) error {
f, err := openLast(p)
if err != nil {
return err
}
defer f.Close()
// corrupt middle of 2nd record
_, werr := f.WriteAt(make([]byte, 512), 4096+512)
return werr
}
ents := makeEnts(5)
// 4096 bytes of data so a middle sector is easy to corrupt
dat := make([]byte, 4096)
for i := range dat {
dat[i] = byte(i)
}
for i := range ents {
ents[i][0].Data = dat
}
testRepair(t, ents, corruptf, 1)
}
| makeEnts |
app.py | import os
from flask import Flask, escape, request, jsonify
from marshmallow import ValidationError |
import src.settings
from src.secret.controllers.secret import secret_blueprint
app = Flask(__name__)
app.config["MONGO_URI"] = os.environ.get('MONGO_URL', 'mongodb://localhost:27017/db')
print(os.environ.get('MONGO_URL'))
mongo = PyMongo(app)
# set default version to v1
version = os.environ.get('API_VERSION', 'v1')
prefix = f"/api/{version}"
@app.errorhandler(ValidationError)
def validation_error_handler(err):
errors = err.messages
return jsonify(errors), 400
@app.errorhandler(UserExistsException)
def user_error_handler(e):
return jsonify({"error": e.msg}), 400
@app.errorhandler(AccessDeniedException)
def user_error_handler(e):
return jsonify({"error": e.msg}), 401
@app.errorhandler(UserNotFoundException)
def user_error_handler(e):
return jsonify({"error": e.msg}), 404
app.register_blueprint(auth_blueprint, url_prefix=f'{prefix}/auth')
app.register_blueprint(secret_blueprint, url_prefix=f'{prefix}/secret')
@app.route(f'{prefix}/ping', methods=['GET'])
def ping():
"""
Check if server is alive
:return: "pong"
"""
return "pong" | from flask_pymongo import PyMongo
from src.auth.auth_exception import UserExistsException, UserNotFoundException, AccessDeniedException
from src.auth.controllers.auth import auth_blueprint |
device.routes.js | /*
* Copyright © 2016-2018 The BeiDouApp Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* eslint-disable import/no-unresolved, import/default */
import devicesTemplate from './devices.tpl.html';
/* eslint-enable import/no-unresolved, import/default */
/*@ngInject*/
export default function D | $stateProvider, types) {
$stateProvider
.state('home.devices', {
url: '/devices',
params: {'topIndex': 0},
module: 'private',
auth: ['TENANT_ADMIN', 'CUSTOMER_USER'],
views: {
"content@home": {
templateUrl: devicesTemplate,
controller: 'DeviceController',
controllerAs: 'vm'
}
},
data: {
devicesType: 'tenant',
searchEnabled: true,
searchByEntitySubtype: true,
searchEntityType: types.entityType.device,
pageTitle: 'device.devices'
},
ncyBreadcrumb: {
label: '{"icon": "devices_other", "label": "device.devices"}'
}
})
.state('home.customers.devices', {
url: '/:customerId/devices',
params: {'topIndex': 0},
module: 'private',
auth: ['TENANT_ADMIN'],
views: {
"content@home": {
templateUrl: devicesTemplate,
controllerAs: 'vm',
controller: 'DeviceController'
}
},
data: {
devicesType: 'customer',
searchEnabled: true,
searchByEntitySubtype: true,
searchEntityType: types.entityType.device,
pageTitle: 'customer.devices'
},
ncyBreadcrumb: {
label: '{"icon": "devices_other", "label": "{{ vm.customerDevicesTitle }}", "translate": "false"}'
}
});
}
| eviceRoutes( |
directory_object_item_request_builder.go | package item
import (
ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go"
i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87 "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph"
i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph/odataerrors"
)
// DirectoryObjectItemRequestBuilder provides operations to manage the transitiveMemberOf property of the microsoft.graph.orgContact entity.
type DirectoryObjectItemRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string;
// The request adapter to use to execute the requests.
requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter;
// Url template to use to build the URL for the current request builder
urlTemplate string;
}
// DirectoryObjectItemRequestBuilderGetOptions options for Get
type DirectoryObjectItemRequestBuilderGetOptions struct {
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Request query parameters
Q *DirectoryObjectItemRequestBuilderGetQueryParameters;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// DirectoryObjectItemRequestBuilderGetQueryParameters get transitiveMemberOf from contacts
type DirectoryObjectItemRequestBuilderGetQueryParameters struct {
// Expand related entities
Expand []string;
// Select properties to be returned
Select []string;
}
// NewDirectoryObjectItemRequestBuilderInternal instantiates a new DirectoryObjectItemRequestBuilder and sets the default values.
func NewDirectoryObjectItemRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*DirectoryObjectItemRequestBuilder) {
m := &DirectoryObjectItemRequestBuilder{
}
m.urlTemplate = "{+baseurl}/contacts/{orgContact_id}/transitiveMemberOf/{directoryObject_id}{?select,expand}";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = urlTplParams;
m.requestAdapter = requestAdapter;
return m
}
// NewDirectoryObjectItemRequestBuilder instantiates a new DirectoryObjectItemRequestBuilder and sets the default values.
func | (rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*DirectoryObjectItemRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewDirectoryObjectItemRequestBuilderInternal(urlParams, requestAdapter)
}
// CreateGetRequestInformation get transitiveMemberOf from contacts
func (m *DirectoryObjectItemRequestBuilder) CreateGetRequestInformation(options *DirectoryObjectItemRequestBuilderGetOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.GET
if options != nil && options.Q != nil {
requestInfo.AddQueryParameters(*(options.Q))
}
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// Get get transitiveMemberOf from contacts
func (m *DirectoryObjectItemRequestBuilder) Get(options *DirectoryObjectItemRequestBuilderGetOptions)(i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.DirectoryObjectable, error) {
requestInfo, err := m.CreateGetRequestInformation(options);
if err != nil {
return nil, err
}
errorMapping := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ErrorMappings {
"4XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue,
"5XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.CreateDirectoryObjectFromDiscriminatorValue, nil, errorMapping)
if err != nil {
return nil, err
}
return res.(i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.DirectoryObjectable), nil
}
| NewDirectoryObjectItemRequestBuilder |
updater.py | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
"""
This module updates the userbot based on Upstream revision
"""
from os import remove, execle, path, makedirs, getenv
from shutil import rmtree
import asyncio
import sys
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from userbot import CMD_HELP, bot, HEROKU_APIKEY, HEROKU_APPNAME, UPSTREAM_REPO_URL
from userbot.events import register
requirements_path = path.join(
path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt')
async def gen_chlog(repo, diff):
ch_log = ''
d_form = "%d/%m/%y"
for c in repo.iter_commits(diff):
ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} <{c.author}>\n'
return ch_log
async def update_requirements():
reqs = str(requirements_path)
try:
process = await asyncio.create_subprocess_shell(
' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
await process.communicate()
return process.returncode
except Exception as e:
return repr(e)
@register(outgoing=True, pattern="^.update(?: |$)(.*)")
async def upstream(ups):
"For .update command, check if the bot is up to date, update if specified"
await ups.edit("`Checking for updates, please wait....`")
conf = ups.pattern_match.group(1)
off_repo = UPSTREAM_REPO_URL
force_update = False
try:
txt = "`Oops.. Updater cannot continue due to "
txt += "some problems occured`\n\n**LOGTRACE:**\n"
repo = Repo()
except NoSuchPathError as error:
await ups.edit(f'{txt}\n`directory {error} is not found`')
repo.__del__()
return
except GitCommandError as error:
await ups.edit(f'{txt}\n`Early failure! {error}`')
repo.__del__()
return
except InvalidGitRepositoryError as error:
if conf != "now":
await ups.edit(
f"`Unfortunately, the directory {error} does not seem to be a git repository.\
\nBut we can fix that by force updating the userbot using .update now.`"
)
return
repo = Repo.init()
origin = repo.create_remote('upstream', off_repo)
origin.fetch()
force_update = True
repo.create_head('master', origin.refs.master)
repo.heads.master.set_tracking_branch(origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
if ac_br != 'master':
await ups.edit(
f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). '
'in that case, Updater is unable to identify '
'which branch is to be merged. '
'please checkout to any official branch`')
repo.__del__()
return
try:
repo.create_remote('upstream', off_repo)
except BaseException:
pass
ups_rem = repo.remote('upstream')
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}')
if not changelog and not force_update:
await ups.edit(
f'\n`Your BOT is` **up-to-date** `with` **{ac_br}**\n')
repo.__del__()
return
if conf != "now" and not force_update:
changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`'
if len(changelog_str) > 4096:
await ups.edit("`Changelog is too big, view the file to see it.`")
file = open("output.txt", "w+")
file.write(changelog_str)
file.close()
await ups.client.send_file(
ups.chat_id,
"output.txt",
reply_to=ups.id,
)
remove("output.txt")
else:
await ups.edit(changelog_str)
await ups.respond('`do \".update now\" to update`')
return
if force_update:
await ups.edit(
'`Force-Syncing to latest stable userbot code, please wait...`')
else:
await ups.edit('`Updating userbot, please wait....`')
# We're in a Heroku Dyno, handle it's memez.
if HEROKU_APIKEY is not None:
import heroku3
heroku = heroku3.from_key(HEROKU_APIKEY)
heroku_app = None
heroku_applications = heroku.apps()
if not HEROKU_APPNAME:
await ups.edit(
'`[HEROKU MEMEZ] Please set up the HEROKU_APPNAME variable to be able to update userbot.`'
)
repo.__del__()
return
for app in heroku_applications:
if app.name == HEROKU_APPNAME:
heroku_app = app
break
if heroku_app is None:
await ups.edit(
f'{txt}\n`Invalid Heroku credentials for updating userbot dyno.`'
)
repo.__del__()
return
await ups.edit('`[HEROKU MEMEZ]\
\nUserbot dyno build in progress, please wait for it to complete.`'
) | ups_rem.fetch(ac_br)
repo.git.reset("--hard", "FETCH_HEAD")
heroku_git_url = heroku_app.git_url.replace(
"https://", "https://api:" + HEROKU_APIKEY + "@")
if "heroku" in repo.remotes:
remote = repo.remote("heroku")
remote.set_url(heroku_git_url)
else:
remote = repo.create_remote("heroku", heroku_git_url)
try:
remote.push(refspec="HEAD:refs/heads/master", force=True)
except GitCommandError as error:
await ups.edit(f'{txt}\n`Here is the error log:\n{error}`')
repo.__del__()
return
await ups.edit('`Successfully Updated!\n'
'Restarting, please wait...`')
else:
# Classic Updater, pretty straightforward.
try:
ups_rem.pull(ac_br)
except GitCommandError:
repo.git.reset("--hard", "FETCH_HEAD")
reqs_upgrade = await update_requirements()
await ups.edit('`Successfully Updated!\n'
'Bot is restarting... Wait for a second!`')
# Spin a new instance of bot
args = [sys.executable, "-m", "userbot"]
execle(sys.executable, *args, os.environ)
return
CMD_HELP.update({
'update':
".update\
\nUsage: Checks if the main userbot repository has any updates and shows a changelog if so.\
\n\n.update now\
\nUsage: Updates your userbot, if there are any updates in the main userbot repository."
}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.