prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>alert.js<|end_file_name|><|fim▁begin|>angular.module("ui.bootstrap.alert", []).directive('alert', function () {
return {
restrict:'EA',
templateUrl:'template/alert/alert.html',
transclude:true,
replace:true,
scope: {
type: '=',<|fim▁hole|> close: '&'
},
link: function(scope, iElement, iAttrs, controller) {
scope.closeable = "close" in iAttrs;
}
};
});<|fim▁end|> | |
<|file_name|>version.go<|end_file_name|><|fim▁begin|>// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.<|fim▁hole|>
// Heapster version. Update this whenever making a new release.
// The version is of the format Major.Minor.Patch
// Increment major number for new feature additions and behavioral changes.
// Increment minor number for bug fixes and performance enhancements.
// Increment patch number for critical fixes to existing releases.
const HeapsterVersion = "0.15.0"<|fim▁end|> | // See the License for the specific language governing permissions and
// limitations under the License.
package version |
<|file_name|>addunit.go<|end_file_name|><|fim▁begin|>// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package main
import (
"errors"
"fmt"
"launchpad.net/gnuflag"
<|fim▁hole|> "launchpad.net/juju-core/cmd"
"launchpad.net/juju-core/juju"
"launchpad.net/juju-core/state/api/params"
"launchpad.net/juju-core/state/statecmd"
)
// UnitCommandBase provides support for commands which deploy units. It handles the parsing
// and validation of --to and --num-units arguments.
type UnitCommandBase struct {
ToMachineSpec string
NumUnits int
}
func (c *UnitCommandBase) SetFlags(f *gnuflag.FlagSet) {
f.IntVar(&c.NumUnits, "num-units", 1, "")
f.StringVar(&c.ToMachineSpec, "to", "", "the machine or container to deploy the unit in, bypasses constraints")
}
func (c *UnitCommandBase) Init(args []string) error {
if c.NumUnits < 1 {
return errors.New("--num-units must be a positive integer")
}
if c.ToMachineSpec != "" {
if c.NumUnits > 1 {
return errors.New("cannot use --num-units > 1 with --to")
}
if !cmd.IsMachineOrNewContainer(c.ToMachineSpec) {
return fmt.Errorf("invalid --to parameter %q", c.ToMachineSpec)
}
}
return nil
}
// AddUnitCommand is responsible adding additional units to a service.
type AddUnitCommand struct {
cmd.EnvCommandBase
UnitCommandBase
ServiceName string
}
const addUnitDoc = `
Service units can be added to a specific machine using the --to argument.
Examples:
juju add-unit mysql --to 23 (Add unit to machine 23)
juju add-unit mysql --to 24/lxc/3 (Add unit to lxc container 3 on host machine 24)
juju add-unit mysql --to lxc:25 (Add unit to a new lxc container on host machine 25)
`
func (c *AddUnitCommand) Info() *cmd.Info {
return &cmd.Info{
Name: "add-unit",
Args: "<service name>",
Purpose: "add a service unit",
Doc: addUnitDoc,
}
}
func (c *AddUnitCommand) SetFlags(f *gnuflag.FlagSet) {
c.EnvCommandBase.SetFlags(f)
c.UnitCommandBase.SetFlags(f)
f.IntVar(&c.NumUnits, "n", 1, "number of service units to add")
}
func (c *AddUnitCommand) Init(args []string) error {
switch len(args) {
case 1:
c.ServiceName = args[0]
case 0:
return errors.New("no service specified")
}
if err := cmd.CheckEmpty(args[1:]); err != nil {
return err
}
return c.UnitCommandBase.Init(args)
}
// Run connects to the environment specified on the command line
// and calls conn.AddUnits.
func (c *AddUnitCommand) Run(_ *cmd.Context) error {
conn, err := juju.NewConnFromName(c.EnvName)
if err != nil {
return err
}
defer conn.Close()
params := params.AddServiceUnits{
ServiceName: c.ServiceName,
NumUnits: c.NumUnits,
ToMachineSpec: c.ToMachineSpec,
}
_, err = statecmd.AddServiceUnits(conn.State, params)
return err
}<|fim▁end|> | |
<|file_name|>quantum.py<|end_file_name|><|fim▁begin|># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
from devstack import cfg
from devstack import component as comp
from devstack import log as logging
from devstack import shell as sh
from devstack import utils
from devstack.components import db
LOG = logging.getLogger("devstack.components.quantum")
# Openvswitch special settings
VSWITCH_PLUGIN = 'openvswitch'
V_PROVIDER = "quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin"
# Config files (some only modified if running as openvswitch)
PLUGIN_CONF = "plugins.ini"
QUANTUM_CONF = 'quantum.conf'
PLUGIN_LOC = ['etc']<|fim▁hole|>AGENT_CONF = 'ovs_quantum_plugin.ini'
AGENT_LOC = ["etc", "quantum", "plugins", "openvswitch"]
AGENT_BIN_LOC = ["quantum", "plugins", "openvswitch", 'agent']
CONFIG_FILES = [PLUGIN_CONF, AGENT_CONF]
# This db will be dropped and created
DB_NAME = 'ovs_quantum'
# Opensvswitch bridge setup/teardown/name commands
OVS_BRIDGE_DEL = ['ovs-vsctl', '--no-wait', '--', '--if-exists', 'del-br', '%OVS_BRIDGE%']
OVS_BRIDGE_ADD = ['ovs-vsctl', '--no-wait', 'add-br', '%OVS_BRIDGE%']
OVS_BRIDGE_EXTERN_ID = ['ovs-vsctl', '--no-wait', 'br-set-external-id', '%OVS_BRIDGE%', 'bridge-id', '%OVS_EXTERNAL_ID%']
OVS_BRIDGE_CMDS = [OVS_BRIDGE_DEL, OVS_BRIDGE_ADD, OVS_BRIDGE_EXTERN_ID]
# Subdirs of the downloaded
CONFIG_DIR = 'etc'
BIN_DIR = 'bin'
# What to start (only if openvswitch enabled)
APP_Q_SERVER = 'quantum-server'
APP_Q_AGENT = 'ovs_quantum_agent.py'
APP_OPTIONS = {
APP_Q_SERVER: ["%QUANTUM_CONFIG_FILE%"],
APP_Q_AGENT: ["%OVS_CONFIG_FILE%", "-v"],
}
class QuantumUninstaller(comp.PkgUninstallComponent):
def __init__(self, *args, **kargs):
comp.PkgUninstallComponent.__init__(self, *args, **kargs)
class QuantumInstaller(comp.PkgInstallComponent):
def __init__(self, *args, **kargs):
comp.PkgInstallComponent.__init__(self, *args, **kargs)
self.q_vswitch_agent = False
self.q_vswitch_service = False
plugin = self.cfg.getdefaulted("quantum", "q_plugin", VSWITCH_PLUGIN)
if plugin == VSWITCH_PLUGIN:
self.q_vswitch_agent = True
self.q_vswitch_service = True
def _get_download_locations(self):
places = list()
places.append({
'uri': ("git", "quantum_repo"),
'branch': ("git", "quantum_branch"),
})
return places
def known_options(self):
return set(['no-ovs-db-init', 'no-ovs-bridge-init'])
def _get_config_files(self):
return list(CONFIG_FILES)
def _get_target_config_name(self, config_fn):
if config_fn == PLUGIN_CONF:
tgt_loc = [self.app_dir] + PLUGIN_LOC + [config_fn]
return sh.joinpths(*tgt_loc)
elif config_fn == AGENT_CONF:
tgt_loc = [self.app_dir] + AGENT_LOC + [config_fn]
return sh.joinpths(*tgt_loc)
else:
return comp.PkgInstallComponent._get_target_config_name(self, config_fn)
def _config_adjust(self, contents, config_fn):
if config_fn == PLUGIN_CONF and self.q_vswitch_service:
# Need to fix the "Quantum plugin provider module"
newcontents = contents
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
provider = config.get("PLUGIN", "provider")
if provider != V_PROVIDER:
config.set("PLUGIN", "provider", V_PROVIDER)
with io.BytesIO() as outputstream:
config.write(outputstream)
outputstream.flush()
newcontents = cfg.add_header(config_fn, outputstream.getvalue())
return newcontents
elif config_fn == AGENT_CONF and self.q_vswitch_agent:
# Need to adjust the sql connection
newcontents = contents
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
db_dsn = config.get("DATABASE", "sql_connection")
if db_dsn:
generated_dsn = db.fetch_dbdsn(self.cfg, self.pw_gen, DB_NAME)
if generated_dsn != db_dsn:
config.set("DATABASE", "sql_connection", generated_dsn)
with io.BytesIO() as outputstream:
config.write(outputstream)
outputstream.flush()
newcontents = cfg.add_header(config_fn, outputstream.getvalue())
return newcontents
else:
return comp.PkgInstallComponent._config_adjust(self, contents, config_fn)
def _setup_bridge(self):
if not self.q_vswitch_agent or \
'no-ovs-bridge-init' in self.options:
return
bridge = self.cfg.getdefaulted("quantum", "ovs_bridge", 'br-int')
LOG.info("Fixing up ovs bridge named %s.", bridge)
external_id = self.cfg.getdefaulted("quantum", 'ovs_bridge_external_name', bridge)
params = dict()
params['OVS_BRIDGE'] = bridge
params['OVS_EXTERNAL_ID'] = external_id
cmds = list()
for cmd_templ in OVS_BRIDGE_CMDS:
cmds.append({
'cmd': cmd_templ,
'run_as_root': True,
})
utils.execute_template(*cmds, params=params)
def post_install(self):
comp.PkgInstallComponent.post_install(self)
self._setup_db()
self._setup_bridge()
def _setup_db(self):
if not self.q_vswitch_service or \
'no-ovs-db-init' in self.options:
return
LOG.info("Fixing up database named %s.", DB_NAME)
db.drop_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
db.create_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
def _get_source_config(self, config_fn):
if config_fn == PLUGIN_CONF:
srcloc = [self.app_dir] + PLUGIN_LOC + [config_fn]
srcfn = sh.joinpths(*srcloc)
contents = sh.load_file(srcfn)
return (srcfn, contents)
elif config_fn == AGENT_CONF:
srcloc = [self.app_dir] + AGENT_LOC + [config_fn]
srcfn = sh.joinpths(*srcloc)
contents = sh.load_file(srcfn)
return (srcfn, contents)
else:
return comp.PkgInstallComponent._get_source_config(self, config_fn)
class QuantumRuntime(comp.ProgramRuntime):
def __init__(self, *args, **kargs):
comp.ProgramRuntime.__init__(self, *args, **kargs)
self.q_vswitch_agent = False
self.q_vswitch_service = False
plugin = self.cfg.getdefaulted("quantum", "q_plugin", VSWITCH_PLUGIN)
if plugin == VSWITCH_PLUGIN:
# Default to on if not specified
self.q_vswitch_agent = True
self.q_vswitch_service = True
def _get_apps_to_start(self):
app_list = comp.ProgramRuntime._get_apps_to_start(self)
if self.q_vswitch_service:
app_list.append({
'name': APP_Q_SERVER,
'path': sh.joinpths(self.app_dir, BIN_DIR, APP_Q_SERVER),
})
if self.q_vswitch_agent:
full_pth = [self.app_dir] + AGENT_BIN_LOC + [APP_Q_AGENT]
app_list.append({
'name': APP_Q_AGENT,
'path': sh.joinpths(*full_pth)
})
return app_list
def _get_app_options(self, app_name):
return APP_OPTIONS.get(app_name)
def _get_param_map(self, app_name):
param_dict = comp.ProgramRuntime._get_param_map(self, app_name)
if app_name == APP_Q_AGENT:
tgt_loc = [self.app_dir] + AGENT_LOC + [AGENT_CONF]
param_dict['OVS_CONFIG_FILE'] = sh.joinpths(*tgt_loc)
elif app_name == APP_Q_SERVER:
param_dict['QUANTUM_CONFIG_FILE'] = sh.joinpths(self.app_dir, CONFIG_DIR, QUANTUM_CONF)
return param_dict<|fim▁end|> | |
<|file_name|>IdleController.js<|end_file_name|><|fim▁begin|>'use strict';
angular.module('upsConsole')
.controller('IdleController', function(Keepalive, Idle, $rootScope, $scope, $log, appConfig, Auth) {
var self = this;
self.config = appConfig;
/**
* idle service, keepalive, auth token refresh
*/
Idle.watch();
self.idleCountdown = appConfig.idleWarningDuration + 1;
$rootScope.$on('KeepaliveResponse', function() {
Auth.keycloak.updateToken(45).success(function(refreshed) {
if (refreshed) {
$log.debug('token was successfully refreshed');
} else {
$log.debug('token is still valid');
}
}).error(function() {
$log.debug('failed to refresh the token, or the session has expired');
});
});
$rootScope.$on('IdleStart', function() {
$log.debug('idleStart');
});<|fim▁hole|> });
});
$rootScope.$on('IdleEnd', function() {
$log.debug('idleEnd');
$scope.$apply(function() {
self.idleCountdown = appConfig.idleWarningDuration + 1;
});
});
$rootScope.$on('IdleTimeout', function() {
$log.debug('idleTimeout');
Auth.logout();
});
})
.config( function( KeepaliveProvider, IdleProvider, appConfigProvider ) {
var appConfig = appConfigProvider.$get();
IdleProvider.idle( appConfig.idleDuration );
IdleProvider.timeout( appConfig.idleWarningDuration );
KeepaliveProvider.interval( appConfig.keepaliveInterval );
});<|fim▁end|> | $rootScope.$on('IdleWarn', function() {
$log.debug('idleWarn');
$scope.$apply(function() {
self.idleCountdown = self.idleCountdown - 1; |
<|file_name|>Composing.ts<|end_file_name|><|fim▁begin|>import * as Behaviour from './Behaviour';
import * as ComposeApis from '../../behaviour/composing/ComposeApis';
import { ComposeSchema } from '../../behaviour/composing/ComposeSchema';
import { ComposingBehaviour } from '../../behaviour/composing/ComposingTypes';<|fim▁hole|>const Composing = Behaviour.create({
fields: ComposeSchema,
name: 'composing',
apis: ComposeApis
}) as ComposingBehaviour;
export {
Composing
};<|fim▁end|> | |
<|file_name|>mpf.rs<|end_file_name|><|fim▁begin|>use libc::{c_double, c_int, c_long, c_ulong, c_void};
use std::mem::uninitialized;
use std::cmp;
use std::cmp::Ordering::{self, Greater, Less, Equal};
use std::ops::{Div, Mul, Add, Sub, Neg};
use super::mpz::mp_bitcnt_t;
type mp_exp_t = c_long;
#[repr(C)]
pub struct mpf_struct {
_mp_prec: c_int,
_mp_size: c_int,
_mp_exp: mp_exp_t,
_mp_d: *mut c_void
}
pub type mpf_srcptr = *const mpf_struct;
pub type mpf_ptr = *mut mpf_struct;
#[link(name = "gmp")]
extern "C" {
fn __gmpf_init2(x: mpf_ptr, prec: mp_bitcnt_t);
fn __gmpf_init_set(rop: mpf_ptr, op: mpf_srcptr);
fn __gmpf_clear(x: mpf_ptr);
fn __gmpf_get_prec(op: mpf_srcptr) -> mp_bitcnt_t;
fn __gmpf_set_prec(rop: mpf_ptr, prec: mp_bitcnt_t);
fn __gmpf_set(rop: mpf_ptr, op: mpf_srcptr);
fn __gmpf_cmp(op1: mpf_srcptr, op2: mpf_srcptr) -> c_int;
fn __gmpf_cmp_d(op1: mpf_srcptr, op2: c_double) -> c_int;
fn __gmpf_cmp_ui(op1: mpf_srcptr, op2: c_ulong) -> c_int;
fn __gmpf_reldiff(rop: mpf_ptr, op1: mpf_srcptr, op2: mpf_srcptr);
fn __gmpf_add(rop: mpf_ptr, op1: mpf_srcptr, op2: mpf_srcptr);
fn __gmpf_sub(rop: mpf_ptr, op1: mpf_srcptr, op2: mpf_srcptr);
fn __gmpf_mul(rop: mpf_ptr, op1: mpf_srcptr, op2: mpf_srcptr);
fn __gmpf_div(rop: mpf_ptr, op1: mpf_srcptr, op2: mpf_srcptr);
fn __gmpf_neg(rop: mpf_ptr, op: mpf_srcptr);
fn __gmpf_abs(rop: mpf_ptr, op: mpf_srcptr);
fn __gmpf_ceil(rop: mpf_ptr, op: mpf_srcptr);
fn __gmpf_floor(rop: mpf_ptr, op: mpf_srcptr);
fn __gmpf_trunc(rop: mpf_ptr, op: mpf_srcptr);
}
pub struct Mpf {
pub mpf: mpf_struct,
}
unsafe impl Send for Mpf { }
impl Drop for Mpf {
fn drop(&mut self) { unsafe { __gmpf_clear(&mut self.mpf) } }
}
impl Mpf {
pub fn new(precision: usize) -> Mpf {
unsafe {
let mut mpf = uninitialized();
__gmpf_init2(&mut mpf, precision as c_ulong);
Mpf { mpf: mpf }
}
}
pub fn set(&mut self, other: &Mpf) {
unsafe { __gmpf_set(&mut self.mpf, &other.mpf) }
}
pub fn get_prec(&self) -> usize {
unsafe { __gmpf_get_prec(&self.mpf) as usize }
}
pub fn set_prec(&mut self, precision: usize) {
unsafe { __gmpf_set_prec(&mut self.mpf, precision as c_ulong) }
}
pub fn abs(&self) -> Mpf {
unsafe {
let mut res = Mpf::new(self.get_prec());
__gmpf_abs(&mut res.mpf, &self.mpf);
res
}
}
pub fn ceil(&self) -> Mpf {
unsafe {
let mut res = Mpf::new(self.get_prec());
__gmpf_ceil(&mut res.mpf, &self.mpf);
res
}
}
pub fn floor(&self) -> Mpf {
unsafe {
let mut res = Mpf::new(self.get_prec());
__gmpf_floor(&mut res.mpf, &self.mpf);
res
}
}
pub fn trunc(&self) -> Mpf {
unsafe {
let mut res = Mpf::new(self.get_prec());
__gmpf_trunc(&mut res.mpf, &self.mpf);
res
}
}
pub fn reldiff(&self, other: &Mpf) -> Mpf {
unsafe {
let mut res = Mpf::new(cmp::max(self.get_prec(), other.get_prec()));
__gmpf_reldiff(&mut res.mpf, &self.mpf, &other.mpf);
res
}
}
}
impl Clone for Mpf {
fn clone(&self) -> Mpf {
unsafe {
let mut mpf = uninitialized();
__gmpf_init_set(&mut mpf, &self.mpf);
Mpf { mpf: mpf }
}
}
}
impl Eq for Mpf { }
impl PartialEq for Mpf {
fn eq(&self, other: &Mpf) -> bool {
unsafe { __gmpf_cmp(&self.mpf, &other.mpf) == 0 }
}
}
impl Ord for Mpf {
fn cmp(&self, other: &Mpf) -> Ordering {
let cmp = unsafe { __gmpf_cmp(&self.mpf, &other.mpf) };
if cmp == 0 {
Equal
} else if cmp > 0 {
Greater
} else {
Less
}
}
}
impl PartialOrd for Mpf {
fn partial_cmp(&self, other: &Mpf) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a, 'b> Add<&'a Mpf> for &'b Mpf {
type Output = Mpf;
fn add(self, other: &Mpf) -> Mpf {
unsafe {
let mut res = Mpf::new(cmp::max(self.get_prec(), other.get_prec()));
__gmpf_add(&mut res.mpf, &self.mpf, &other.mpf);
res
}
}
}
impl<'a> Add<&'a Mpf> for Mpf {
type Output = Mpf;
#[inline]
fn add(mut self, other: &Mpf) -> Mpf {
unsafe {
__gmpf_add(&mut self.mpf, &self.mpf, &other.mpf);
self
}
}
}
impl<'a, 'b> Sub<&'a Mpf> for &'b Mpf {
type Output = Mpf;
fn sub(self, other: &Mpf) -> Mpf {
unsafe {
let mut res = Mpf::new(cmp::max(self.get_prec(), other.get_prec()));
__gmpf_sub(&mut res.mpf, &self.mpf, &other.mpf);
res
}
}
}
impl<'a> Sub<&'a Mpf> for Mpf {
type Output = Mpf;
#[inline]
fn sub(mut self, other: &Mpf) -> Mpf {
unsafe {
__gmpf_sub(&mut self.mpf, &self.mpf, &other.mpf);
self
}
}<|fim▁hole|> type Output = Mpf;
fn mul(self, other: &Mpf) -> Mpf {
unsafe {
let mut res = Mpf::new(cmp::max(self.get_prec(), other.get_prec()));
__gmpf_mul(&mut res.mpf, &self.mpf, &other.mpf);
res
}
}
}
impl<'a> Mul<&'a Mpf> for Mpf {
type Output = Mpf;
#[inline]
fn mul(mut self, other: &Mpf) -> Mpf {
unsafe {
__gmpf_mul(&mut self.mpf, &self.mpf, &other.mpf);
self
}
}
}
impl<'a, 'b> Div<&'a Mpf> for &'b Mpf {
type Output = Mpf;
fn div(self, other: &Mpf) -> Mpf {
unsafe {
if __gmpf_cmp_ui(&self.mpf, 0) == 0 {
panic!("divide by zero")
}
let mut res = Mpf::new(cmp::max(self.get_prec(), other.get_prec()));
__gmpf_div(&mut res.mpf, &self.mpf, &other.mpf);
res
}
}
}
impl<'a> Div<&'a Mpf> for Mpf {
type Output = Mpf;
#[inline]
fn div(mut self, other: &Mpf) -> Mpf {
unsafe {
if __gmpf_cmp_ui(&self.mpf, 0) == 0 {
panic!("divide by zero")
}
__gmpf_div(&mut self.mpf, &self.mpf, &other.mpf);
self
}
}
}
impl<'b> Neg for &'b Mpf {
type Output = Mpf;
fn neg(self) -> Mpf {
unsafe {
let mut res = Mpf::new(self.get_prec());
__gmpf_neg(&mut res.mpf, &self.mpf);
res
}
}
}
impl Neg for Mpf {
type Output = Mpf;
#[inline]
fn neg(mut self) -> Mpf {
unsafe {
__gmpf_neg(&mut self.mpf, &self.mpf);
self
}
}
}
gen_overloads!(Mpf);<|fim▁end|> | }
impl<'a, 'b> Mul<&'a Mpf> for &'b Mpf { |
<|file_name|>test_compute_utils.py<|end_file_name|><|fim▁begin|># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#<|fim▁hole|># not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For misc util methods used with compute.
"""
from nova import db
from nova import flags
from nova import context
from nova import test
from nova import log as logging
from nova import utils
import nova.image.fake
from nova.compute import utils as compute_utils
from nova.compute import instance_types
from nova.notifier import test_notifier
from nova.tests import fake_network
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
flags.DECLARE('stub_network', 'nova.compute.manager')
class UsageInfoTestCase(test.TestCase):
def setUp(self):
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
spectacular=True)
super(UsageInfoTestCase, self).setUp()
self.stubs.Set(nova.network.API, 'get_instance_nw_info',
fake_get_nw_info)
self.flags(connection_type='fake',
stub_network=True,
notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
def _create_instance(self, params={}):
"""Create a test instance"""
inst = {}
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst.update(params)
return db.instance_create(self.context, inst)['id']
def test_notify_usage_exists(self):
"""Ensure 'exists' notification generates apropriate usage data."""
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
compute_utils.notify_usage_exists(instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.exists')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance.uuid)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description',
'bandwidth', 'audit_period_beginning',
'audit_period_ending'):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
image_ref_url = "%s/images/1" % utils.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance['uuid'])<|fim▁end|> | # Licensed under the Apache License, Version 2.0 (the "License"); you may |
<|file_name|>scope.cc<|end_file_name|><|fim▁begin|>// Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ------------------------------------------------------------------------
#include <time.h>
#include <assert.h>
#include "engine/globals.h"
#include "public/logging.h"
#include "engine/memory.h"
#include "engine/utils.h"
#include "engine/opcode.h"
#include "engine/map.h"
#include "engine/scope.h"
#include "engine/type.h"
#include "engine/node.h"
#include "engine/proc.h"
namespace sawzall {
Scope* Scope::New(Proc* proc) {
Scope* s = NEWP(proc, Scope);
return s;
}
bool Scope::Insert(Object* obj) {
assert(obj != NULL);
if (obj->is_anonymous() || Lookup(obj->name()) == NULL) {
// object doesn't exist yet in this scope => insert it
list_.Append(obj);
obj->set_scope(this);
return true;
} else {
// object exists already
return false;
}
}
void Scope::InsertOrDie(Object* obj) {
if (!Insert(obj))
FatalError("identifier '%s' already declared in this scope", obj->name());
}
bool Scope::InsertOrOverload(Intrinsic* fun) {
assert(fun != NULL);
if (Insert(fun))
return true;
Object* obj = Lookup(fun->name());
Intrinsic* existing = obj->AsIntrinsic();
if (existing != NULL && existing->add_overload(fun)) {
fun->object()->set_scope(this);
return true;
}
return false;
}
void Scope::InsertOrOverloadOrDie(Intrinsic* fun) {
if (!InsertOrOverload(fun))
FatalError("identifier '%s' already declared in this scope", fun->name());
}
Object* Scope::Lookup(szl_string name) const {
return Lookup(name, strlen(name));
}
static bool SamePossiblyDottedName(szl_string dotted_name, szl_string name,
int length) {
const char* p;
const char* q;
for (p = dotted_name, q = name; *p != '\0' && q < name + length;
p++, q++) {
if (*p != *q) {
// Possible mismatch, check for the exception case.
if (*p == '.' && *q == '_')
continue; // Was '.' vs '_', treat it as a match
else
return false; // Not '.' vs '_', really was a mismatch
}
}
return (*p == '\0' && q == name + length);
}
Object* Scope::Lookup(szl_string name, int length) const {
assert(name != NULL);
for (int i = 0; i < list_.length(); i++) {
Object* obj = list_[i];
if (!obj->is_anonymous()) {
if (memcmp(obj->name(), name, length) == 0 && obj->name()[length] == '\0')
return obj;
// Temporarily find dotted names (package-qualified names using dot as
// the separator) when given a name that matches except for using
// underscores where the first name uses dots.
if (obj->AsTypeName() != NULL && obj->type()->is_tuple() &&
obj->type()->as_tuple()->is_message() &&
SamePossiblyDottedName(obj->name(), name, length)) {
return obj;
}
}
}
return NULL;
}
Object* Scope::LookupOrDie(szl_string name) const {
Object* obj = Lookup(name);
if (obj == NULL)
FatalError("identifier '%s' not found in this scope", name);
return obj;
}
Field* Scope::LookupByTag(int tag) const {
assert(tag > 0); // tags must be > 0, 0 indicates no tag
for (int i = 0; i < list_.length(); i++) {
Field* field = list_[i]->AsField();
if (field != NULL && field->tag() == tag)
return field;
}
return NULL;
}
void Scope::Clone(CloneMap* cmap, Scope* src, Scope* dst) {
// Scope entries are just for lookup, so we never clone them; instead
// we rely on their having already been cloned where originally written.
for (int i = 0; i < src->num_entries(); i++) {
// Block scope entries can be VarDecl, TypeName, QuantVarDecl<|fim▁hole|> if (obj->AsVarDecl() != NULL) {
VarDecl* vardecl = cmap->Find(obj->AsVarDecl());
assert(vardecl != NULL);
dst->InsertOrDie(vardecl);
} else if (obj->AsTypeName() != NULL) {
TypeName* tname = cmap->Find(obj->AsTypeName());
assert(tname != NULL);
dst->InsertOrDie(tname);
} else {
ShouldNotReachHere();
}
}
}
void Scope::Print() const {
if (is_empty()) {
F.print("{}\n");
} else {
F.print("{\n");
for (int i = 0; i < num_entries(); i++) {
Object* obj = entry_at(i);
F.print(" %s: %T;", obj->display_name(), obj->type());
// print more detail, if possible
VarDecl* var = obj->AsVarDecl();
if (var != NULL) {
const char* kind = "";
if (var->is_local())
kind = "local";
else if (var->is_param())
kind = "parameter";
else if (var->is_static())
kind = "static";
else
ShouldNotReachHere();
F.print(" # %s, offset = %d", kind, var->offset());
}
F.print("\n");
}
F.print("}\n");
}
}
// Simulate multiple inheritance.
// These should be in the header but that introduces too many dependencies.
bool Scope::Insert(BadExpr* x) { return Insert(x->object()); }
bool Scope::Insert(Field* x) { return Insert(x->object()); }
bool Scope::Insert(Intrinsic* x) { return Insert(x->object()); }
bool Scope::Insert(Literal* x) { return Insert(x->object()); }
bool Scope::Insert(TypeName* x) { return Insert(x->object()); }
bool Scope::Insert(VarDecl* x) { return Insert(x->object()); }
void Scope::InsertOrDie(BadExpr* x) { InsertOrDie(x->object()); }
void Scope::InsertOrDie(Field* x) { InsertOrDie(x->object()); }
void Scope::InsertOrDie(Intrinsic* x) { InsertOrDie(x->object()); }
void Scope::InsertOrDie(Literal* x) { InsertOrDie(x->object()); }
void Scope::InsertOrDie(TypeName* x) { InsertOrDie(x->object()); }
void Scope::InsertOrDie(VarDecl* x) { InsertOrDie(x->object()); }
} // namespace sawzall<|fim▁end|> | Object* obj = src->entry_at(i); |
<|file_name|>test_translate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import unittest
from openerp.tools.translate import quote, unquote, xml_translate
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
<|fim▁hole|> """)
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
def test_translate_xml_base(self):
""" Test xml_translate() without formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah blah blah</h1>
Put some more text here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah', 'Put some more text here'])
def test_translate_xml_inline1(self):
""" Test xml_translate() with formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah <i>blah</i> blah</h1>
Put some <b>more text</b> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put some <b>more text</b> here'])
def test_translate_xml_inline2(self):
""" Test xml_translate() with formatting elements embedding other elements. """
terms = []
source = """<form string="Form stuff">
<b><h1>Blah <i>blah</i> blah</h1></b>
Put <em>some <b>more text</b></em> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put <em>some <b>more text</b></em> here'])
def test_translate_xml_inline3(self):
""" Test xml_translate() with formatting elements without actual text. """
terms = []
source = """<form string="Form stuff">
<div>
<span class="before"/>
<h1>Blah blah blah</h1>
<span class="after">
<i class="hack"/>
</span>
</div>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah'])
def test_translate_xml_t(self):
""" Test xml_translate() with t-* attributes. """
terms = []
source = """<t t-name="stuff">
stuff before
<span t-field="o.name"/>
stuff after
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_off(self):
""" Test xml_translate() with attribute translate="off". """
terms = []
source = """<div>
stuff before
<div translation="off">Do not translate this</div>
stuff after
</div>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_attribute(self):
""" Test xml_translate() with <attribute> elements. """
terms = []
source = """<field name="foo" position="attributes">
<attribute name="string">Translate this</attribute>
<attribute name="option">Do not translate this</attribute>
</field>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Translate this'])
def test_translate_xml_a(self):
""" Test xml_translate() with <a> elements. """
terms = []
source = """<t t-name="stuff">
<ul class="nav navbar-nav">
<li>
<a class="oe_menu_leaf" href="/web#menu_id=42&action=54">
<span class="oe_menu_text">Blah</span>
</a>
</li>
<li class="dropdown" id="menu_more_container" style="display: none;">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">More <b class="caret"/></a>
<ul class="dropdown-menu" id="menu_more"/>
</li>
</ul>
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['<span class="oe_menu_text">Blah</span>', 'More <b class="caret"/>'])<|fim▁end|> | test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n" |
<|file_name|>WifiPeerListListener.java<|end_file_name|><|fim▁begin|><|fim▁hole|>
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import android.net.wifi.p2p.WifiP2pDeviceList;
import android.net.wifi.p2p.WifiP2pManager.PeerListListener;
import android.util.Log;
/*
* @author NickHuang
*/
public class WifiPeerListListener implements PeerListListener{
private static final String TAG = "WifiPeerListListener";
private static final boolean D = true;
private WifiConnector mConnector;
private ExecutorService mExecutor;
public WifiPeerListListener(WifiConnector mConnector){
this.mConnector = mConnector;
mExecutor = Executors.newCachedThreadPool();
}
@Override
public void onPeersAvailable(WifiP2pDeviceList peers) {
// TODO Auto-generated method stub
mExecutor.submit(new UpdateAvailableWifiDevices(peers));
}
private class UpdateAvailableWifiDevices implements Runnable{
private WifiP2pDeviceList wifiP2pDeviceList;
public UpdateAvailableWifiDevices(WifiP2pDeviceList wifiP2pDeviceList){
this.wifiP2pDeviceList = wifiP2pDeviceList;
}
@Override
public void run() {
// TODO Auto-generated method stub
if(D) Log.d(TAG, "Peers available. Count = " + wifiP2pDeviceList.getDeviceList().size());
mConnector.clearPeersWifiP2pDevices();
/*
for(WifiP2pDevice device : wifiP2pDeviceList.getDeviceList()){
Log.d(TAG, "Available device: " + device.deviceName);
mConnector.addWifiP2pDevice(device);
}
*/
mConnector.addAllPeersWifiP2pDevices(wifiP2pDeviceList.getDeviceList());
mConnector.sendPeersAvaliableBroadcast();
}
}
}<|fim▁end|> | package hrylab.xjtu.wifip2papp.wifidirect; |
<|file_name|>static-methods-in-traits.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
mod a {
pub trait Foo {
fn foo() -> Self;
}
impl Foo for isize {
fn foo() -> isize {
3
}
}
<|fim▁hole|> impl Foo for usize {
fn foo() -> usize {
5
}
}
}
pub fn main() {
let x: isize = a::Foo::foo();
let y: usize = a::Foo::foo();
assert_eq!(x, 3);
assert_eq!(y, 5);
}<|fim▁end|> | |
<|file_name|>overlay.js<|end_file_name|><|fim▁begin|>/**
* @module popoff/overlay
*
* Because overlay-component is hopelessly out of date.
* This is modern rewrite.
*/
const Emitter = require('events').EventEmitter;
const inherits = require('inherits');
const extend = require('xtend/mutable');
module.exports = Overlay;
/**
* Initialize a new `Overlay`.
*
* @param {Object} options
* @api public
*/
function Overlay(options) {
if (!(this instanceof Overlay)) return new Overlay(options);
Emitter.call(this);
extend(this, options);
if (!this.container) {
this.container = document.body || document.documentElement;
}
//create overlay element
this.element = document.createElement('div');
this.element.classList.add('popoff-overlay');
if (this.closable) {
this.element.addEventListener('click', e => {
this.hide();
});
this.element.classList.add('popoff-closable');
}
}
inherits(Overlay, Emitter);
//close overlay by click
Overlay.prototype.closable = true;
/**
* Show the overlay.
*
* Emits "show" event.
*
* @return {Overlay}
* @api public
*/
Overlay.prototype.show = function () {
this.emit('show');
this.container.appendChild(this.element);
//class removed in a timeout to save animation
setTimeout( () => {
this.element.classList.add('popoff-visible');
this.emit('afterShow');
}, 10);
return this;
};
/**
* Hide the overlay.
*
* Emits "hide" event.
*
* @return {Overlay}
* @api public
*/
Overlay.prototype.hide = function () {
this.emit('hide');
this.element.classList.remove('popoff-visible');
this.element.addEventListener('transitionend', end);
this.element.addEventListener('webkitTransitionEnd', end);
this.element.addEventListener('otransitionend', end);
this.element.addEventListener('oTransitionEnd', end);
this.element.addEventListener('msTransitionEnd', end);
var to = setTimeout(end, 1000);
var that = this;
function end () {
that.element.removeEventListener('transitionend', end);
that.element.removeEventListener('webkitTransitionEnd', end);
that.element.removeEventListener('otransitionend', end);
that.element.removeEventListener('oTransitionEnd', end);
that.element.removeEventListener('msTransitionEnd', end);
clearInterval(to);
that.container.removeChild(that.element);<|fim▁hole|>};<|fim▁end|> | that.emit('afterHide');
}
return this; |
<|file_name|>special.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.tools.special Special functions.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
# Import the relevant PTS classes and modules
from ...magic.core.frame import Frame
from ..basics.remote import Remote, connected_remotes
from . import time
from . import filesystem as fs
from .logging import log
# -----------------------------------------------------------------
def remote_convolution(image, kernel, host_id):
"""
This function ...
:param image:
:param kernel:
:param host_id:
"""
# Check whether we are already connected to the specified remote host
if host_id in connected_remotes and connected_remotes[host_id] is not None:
remote = connected_remotes[host_id]
else:
# Debugging
log.debug("Logging in to remote host ...")
# Create a remote instance for the specified host ID
remote = Remote()
remote.setup(host_id)
# Debugging
log.debug("Creating temporary directory remotely ...")
# Create a temporary directory to do the convolution
remote_home_directory = remote.home_directory
remote_temp_path = fs.join(remote_home_directory, time.unique_name("convolution"))
remote.create_directory(remote_temp_path)
# Debugging
#log.debug("Uploading the kernel to the remote directory ...")
# Upload the kernel FITS file to the remote directory
#remote_kernel_path = fs.join(remote_temp_path, "kernel.fits")
#remote.upload(kernel_path, remote_temp_path, new_name="kernel.fits", compress=True, show_output=True)
# Debugging
log.debug("Creating a local temporary directory ...")
# Create a temporary directory locally to contain the frames
local_temp_path = fs.join(fs.home(), time.unique_name("convolution"))
fs.create_directory(local_temp_path)
# Debugging
log.debug("Saving the image frames to the temporary directory ...")
# Save the frames
local_frame_paths = []
constant_frames = []
for frame_name in image.frames:
frame_path = fs.join(local_temp_path, frame_name + ".fits")
# Only upload and convolve non-constant frames
if not image.frames[frame_name].is_constant():
image.frames[frame_name].save(frame_path)
local_frame_paths.append(frame_path)
else:
log.debug("The " + frame_name + " frame is constant, so this won't be uploaded and convolved")
constant_frames.append(frame_name)
# Debugging
log.debug("Saving the kernel to the temporary directory ...")
local_kernel_path = fs.join(local_temp_path, "kernel.fits")
kernel.save(local_kernel_path)
# Debugging
log.debug("Uploading the image frames to the remote directory ...")
# Upload the frames
remote_frame_paths = []
for local_frame_path in local_frame_paths:
# Determine the name of the local frame file
frame_file_name = fs.name(local_frame_path)
# Debugging
log.debug("Uploading the " + fs.strip_extension(frame_file_name) + " frame ...")
# Upload the frame file
remote_frame_path = fs.join(remote_temp_path, frame_file_name)
remote.upload(local_frame_path, remote_temp_path, new_name=frame_file_name, compress=True, show_output=True)
remote_frame_paths.append(remote_frame_path)
# Debugging
log.debug("Uploading the kernel to the remote directory ...")
# Upload the kernel
remote_kernel_path = fs.join(remote_temp_path, "kernel.fits")
remote.upload(local_kernel_path, remote_temp_path, new_name="kernel.fits", compress=True, show_output=True)
# Debugging
log.debug("Creating a python script to perform the convolution remotely ...")
# Create a python script that does the convolution
#script_file = tempfile.NamedTemporaryFile()
#local_script_path = script_file.name
local_script_path = fs.join(local_temp_path, "convolve.py")
script_file = open(local_script_path, 'w')
script_file.write("#!/usr/bin/env python\n")
script_file.write("# -*- coding: utf8 -*-\n")
script_file.write("\n")
script_file.write("# Import astronomical modules\n")
script_file.write("from astropy.units import Unit\n")
script_file.write("\n")
script_file.write("# Import the relevant PTS classes and modules\n")
script_file.write("from pts.magic.core.frame import Frame\n")
script_file.write("from pts.magic.core.image import Image\n")
script_file.write("from pts.magic.core.kernel import ConvolutionKernel\n")
script_file.write("from pts.core.tools.logging import log\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Opening the kernel frame ...')\n")
script_file.write("\n")
script_file.write("# Open the kernel\n")
script_file.write("kernel = ConvolutionKernel.from_file('" + remote_kernel_path + "')\n")
script_file.write("\n")
for remote_frame_path in remote_frame_paths:
frame_name = fs.strip_extension(fs.name(remote_frame_path))
script_file.write("# Inform the user\n")
script_file.write("log.info('Opening the " + frame_name + " frame ...')\n")
script_file.write("\n")
script_file.write("# Open the frame\n")
script_file.write("frame = Frame.from_file('" + remote_frame_path + "')\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Convolving the " + frame_name + " frame ...')\n")
script_file.write("\n")
script_file.write("# Do the convolution and save the result\n")
script_file.write("frame.convolve(kernel, allow_huge=True)\n")
script_file.write("frame.save('" + remote_frame_path + "')\n") # overwrite the frame
script_file.write("\n")
#script_file.write("# Save the image\n")
#script_file.write("image.save(" + remote_image_path + ")\n")
# Write to disk
#script_file.flush()
script_file.close()
# Debugging
log.debug("Uploading the python script ...")
# Upload the script file
remote_script_path = fs.join(remote_temp_path, "convolve.py")
remote.upload(local_script_path, remote_temp_path, new_name="convolve.py", show_output=True)
# Close the local script (it is automatically removed)
#script_file.close()
# Debugging
log.debug("Executing the script remotely ...")
# Execute the script file remotely
remote.execute("python " + remote_script_path, output=False, show_output=True)
# Debugging
log.debug("Downloading the results ...")
# Download the resulting FITS file (the convolved image)
#local_result_path = self.full_output_path("convolved.fits")
#remote.download(remote_image_path, fs.directory_of(local_result_path), new_name="convolved.fits", compress=True)
for remote_frame_path in remote_frame_paths:
# Determine the name of the local frame file
frame_file_name = fs.name(remote_frame_path)
# Debugging
log.debug("Downloading the " + fs.strip_extension(frame_file_name) + " frame ...")
# Download
remote.download(remote_frame_path, local_temp_path, new_name=frame_file_name, compress=True, show_output=True)
# Remove the temporary directory on the remote's filesystem
remote.remove_directory(remote_temp_path)
# Load the result
#self.image = Image.from_file(local_result_path)
for frame_name in image.frames.keys():
if frame_name in constant_frames: continue # Skip constant frames, these are not convolved
local_frame_path = fs.join(local_temp_path, frame_name + ".fits")
image.frames[frame_name] = Frame.from_file(local_frame_path)
# Remove the local temporary directory
fs.remove_directory(local_temp_path)
# -----------------------------------------------------------------
def remote_convolution_frame(frame, kernel_path, host_id):
"""
This function ...
:param frame:
:param kernel_path:
:param host_id:
:return:
"""
# Check whether the frame is constant. If it is, we don't have to convolve!
if frame.is_constant(): return frame.copy()
# Check whether we are already connected to the specified remote host
if host_id in connected_remotes and connected_remotes[host_id] is not None:
remote = connected_remotes[host_id]
else:
# Debugging
log.debug("Logging in to remote host ...")
# Create a remote instance for the specified host ID
remote = Remote()
remote.setup(host_id)
# Debugging
log.debug("Creating temporary directory remotely ...")
# Create a temporary directory to do the convolution
remote_home_directory = remote.home_directory
remote_temp_path = fs.join(remote_home_directory, time.unique_name("convolution"))
remote.create_directory(remote_temp_path)
# Debugging
log.debug("Creating local temporary directory ...")
# Create a temporary directory locally to contain the frames
local_temp_path = fs.join(fs.home(), time.unique_name("convolution"))
fs.create_directory(local_temp_path)
# Debugging
log.debug("Writing the frame to the temporary directory ...")
# Write the frame
local_frame_path = fs.join(local_temp_path, frame.name + ".fits")
frame.save(local_frame_path)
# Debugging
#log.debug("Writing the kernel to the temporary directory ...")
# Write the kernel
#local_kernel_path = fs.join(local_temp_path, "kernel.fits")
#kernel.save(local_kernel_path)
# Debugging
log.debug("Uploading the frame to the remote directory ...")
# Upload the frame file
remote_frame_path = fs.join(remote_temp_path, frame.name)
remote.upload(local_frame_path, remote_temp_path, new_name=frame.name, compress=True, show_output=True)
# Debugging
#log.debug("Uploading the kernel to the remote directory ...")
# Upload the kernel FITS file to the remote directory
#remote_kernel_path = fs.join(remote_temp_path, "kernel.fits")
#remote.upload(local_kernel_path, remote_temp_path, new_name="kernel.fits", compress=True, show_output=True)
# Debugging
log.debug("Uploading the kernel to the remote directory ...")
# Upload the kernel FITS file to the remote directory
remote_kernel_path = fs.join(remote_temp_path, "kernel.fits")
remote.upload(kernel_path, remote_temp_path, new_name="kernel.fits", compress=True, show_output=True)
# Debugging
log.debug("Creating a python script to perform the convolution remotely ...")
# Create the script
local_script_path = fs.join(local_temp_path, "convolve.py")
script_file = open(local_script_path, 'w')
script_file.write("#!/usr/bin/env python\n")
script_file.write("# -*- coding: utf8 -*-\n")
script_file.write("\n")
script_file.write("# Import the relevant PTS classes and modules\n")
script_file.write("from pts.magic.core.frame import Frame\n")
script_file.write("from pts.core.tools.logging import log\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Opening the kernel frame ...')\n")
script_file.write("\n")
script_file.write("# Open the kernel frame\n")
script_file.write("kernel = Frame.from_file('" + remote_kernel_path + "')\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Opening the frame ...')\n")
script_file.write("\n")
script_file.write("# Open the frame\n")
script_file.write("frame = Frame.from_file('" + remote_frame_path + "')\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Convolving the frame ...')\n")
script_file.write("\n")
script_file.write("# Do the convolution and save the result\n")
script_file.write("convolved = frame.convolved(kernel, allow_huge=True)\n")
script_file.write("convolved.save('" + remote_frame_path + "')\n") # overwrite the frame
# Write to disk
script_file.close()
# Debugging
log.debug("Uploading the python script ...")
# Upload the script file
remote_script_path = fs.join(remote_temp_path, "convolve.py")
remote.upload(local_script_path, remote_temp_path, new_name="convolve.py", show_output=True)
# Debugging<|fim▁hole|>
# Debugging
log.debug("Downloading the result ...")
# Determine the name of the local frame file
frame_file_name = fs.name(remote_frame_path)
# Debugging
log.debug("Downloading the " + fs.strip_extension(frame_file_name) + " frame ...")
# Download
remote.download(remote_frame_path, local_temp_path, new_name=frame_file_name, compress=True, show_output=True)
# Remove the temporary directory on the remote's filesystem
remote.remove_directory(remote_temp_path)
# Load the convolved frame
convolved = Frame.from_file(local_frame_path)
# Remove the local temporary directory
fs.remove_directory(local_temp_path)
# Return the convolved frame
return convolved
# -----------------------------------------------------------------
def remote_filter_convolution_no_pts(host_id, datacube_path, wavelengths, filters):
"""
This function ...
:param host_id:
:param datacube_path:
:param wavelengths:
:param filters:
:return:
"""
# Check whether we are already connected to the specified remote host
if host_id in connected_remotes and connected_remotes[host_id] is not None:
remote = connected_remotes[host_id]
else:
# Debugging
log.debug("Logging in to remote host ...")
# Create a remote instance for the specified host ID
remote = Remote()
remote.setup(host_id)
# Debugging
log.debug("Creating temporary directory remotely ...")
# Create a temporary directory to do the convolution
remote_home_directory = remote.home_directory
remote_temp_path = fs.join(remote_home_directory, time.unique_name("filter-convolution"))
remote.create_directory(remote_temp_path)
# Debugging
log.debug("Creating local temporary directory ...")
# Create a temporary directory locally to contain the frames
local_temp_path = fs.join(fs.home(), time.unique_name("filter-convolution"))
fs.create_directory(local_temp_path)
integrated_transmissions = dict()
# Loop over the filters
for fltr in filters:
# Get the transmission data
fltr_wavelengths = fltr._Wavelengths
fltr_transmission = fltr._Transmission
fltr_integrated_transmission = fltr._IntegratedTransmission
integrated_transmissions[fltr.name] = fltr_integrated_transmission
# Save the transmission data
path = fs.join(local_temp_path, "transmission__" + str(fltr) + ".dat")
np.savetxt(path, (fltr_wavelengths, fltr_transmission))
#print(integrated_transmissions)
#print(local_temp_path)
integrated_path = fs.join(local_temp_path, "integrated_transmissions.txt")
with open(integrated_path, 'w') as integrated_trans_file:
for fltr_name in integrated_transmissions:
integrated_trans_file.write(fltr_name + ": " + str(integrated_transmissions[fltr_name]) + "\n")
# NOT FINISHED ...
# -----------------------------------------------------------------
def remote_filter_convolution(host_id, datacube_path, wavelengths, filters, keep_output=False):
"""
This function ...
:param host_id:
:param datacube_path:
:param wavelengths:
:param filters:
:param keep_output:
:return:
"""
# Check whether we are already connected to the specified remote host
if host_id in connected_remotes and connected_remotes[host_id] is not None:
remote = connected_remotes[host_id]
else:
# Debugging
log.debug("Logging in to remote host ...")
# Create a remote instance for the specified host ID
remote = Remote()
remote.setup(host_id)
# Debugging
log.debug("Creating temporary directory remotely ...")
# Create a temporary directory to do the convolution
remote_home_directory = remote.home_directory
remote_temp_path = fs.join(remote_home_directory, time.unique_name("filter-convolution"))
remote.create_directory(remote_temp_path)
# Debugging
log.debug("Creating local temporary directory ...")
# Create a temporary directory locally to contain the frames
local_temp_path = fs.join(fs.home(), time.unique_name("filter-convolution"))
fs.create_directory(local_temp_path)
# Debugging
log.debug("Uploading the datacube to the temporary remote directory ...")
# Upload the frame file
datacube_name = fs.name(datacube_path)
remote_datacube_path = fs.join(remote_temp_path, datacube_name)
remote.upload(datacube_path, remote_temp_path, compress=True, show_output=True)
# Debugging
log.debug("Writing the wavelengths to the temporary local directory ...")
local_wavelengths_path = fs.join(local_temp_path, "wavelengths.txt")
np.savetxt(local_wavelengths_path, wavelengths)
# Debugging
log.debug("Uploading the wavelengths file to the remote directory ...")
# Upload the kernel FITS file to the remote directory
remote_wavelengths_path = fs.join(remote_temp_path, "wavelengths.txt")
remote.upload(local_wavelengths_path, remote_temp_path, compress=True, show_output=True)
# Debugging
log.debug("Creating a python script to perform the filter convolution remotely ...")
# Create the script
local_script_path = fs.join(local_temp_path, "make_images.py")
script_file = open(local_script_path, 'w')
script_file.write("#!/usr/bin/env python\n")
script_file.write("# -*- coding: utf8 -*-\n")
script_file.write("\n")
script_file.write("# Import standard modules\n")
script_file.write("import numpy as np\n")
script_file.write("\n")
script_file.write("# Import the relevant PTS classes and modules\n")
script_file.write("from pts.magic.core.image import Image\n")
script_file.write("from pts.magic.core.frame import Frame\n")
script_file.write("from pts.core.basics.filter import Filter\n")
script_file.write("from pts.core.tools.logging import log\n")
script_file.write("from pts.core.tools import filesystem as fs\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Loading the datacube ...')\n")
script_file.write("\n")
script_file.write("# Open the datacube as an Image\n")
script_file.write("datacube = Image.from_file('" + remote_datacube_path + "', always_call_first_primary=False)\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Loading the wavelengths ...')\n")
script_file.write("\n")
script_file.write("# Load the wavelengths from the text file\n")
script_file.write("wavelengths = np.loadtxt('" + remote_wavelengths_path + "')\n")
script_file.write("\n")
script_file.write("# Convert the frames from neutral surface brightness to wavelength surface brightness\n")
script_file.write("for l in range(len(wavelengths)):\n")
script_file.write("\n")
script_file.write(" # Get the wavelength\n")
script_file.write(" wavelength = wavelengths[l]\n")
script_file.write("\n")
script_file.write(" # Determine the name of the frame in the datacube\n")
script_file.write(" frame_name = 'frame' + str(l)\n")
script_file.write("\n")
script_file.write(" # Divide this frame by the wavelength in micron\n")
script_file.write(" datacube.frames[frame_name] /= wavelength\n")
script_file.write("\n")
script_file.write(" # Set the new unit\n")
script_file.write(" datacube.frames[frame_name].unit = 'W / (m2 * arcsec2 * micron)'\n")
script_file.write("\n")
script_file.write("# Convert the datacube to a numpy array where wavelength is the third dimension\n")
script_file.write("fluxdensities = datacube.asarray()\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Creating the filters ...')\n")
script_file.write("\n")
script_file.write("filters = dict()\n")
script_file.write("\n")
for filter_name in filters:
fltr = filters[filter_name]
script_file.write("# Inform the user\n")
script_file.write("log.info('Creating the " + str(fltr) + " filter')\n")
script_file.write("\n")
script_file.write("fltr = Filter.from_string('" + str(fltr) + "')\n")
script_file.write("filters['" + filter_name + "'] = fltr\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Performing the filter convolutions ...')\n")
script_file.write("\n")
script_file.write("# Loop over the filters, perform the convolution\n")
script_file.write("for filter_name in filters:\n")
script_file.write("\n")
script_file.write(" log.info('Making the observed image for the ' + str(fltr) + ' filter ...')\n")
script_file.write(" fltr = filters[filter_name]\n")
script_file.write(" data = fltr.convolve(wavelengths, fluxdensities)\n")
script_file.write(" frame = Frame(data)\n")
script_file.write(" frame.unit = 'W/(m2 * arcsec2 * micron)'\n")
script_file.write(" path = fs.join('" + remote_temp_path + "', filter_name + '.fits')\n")
script_file.write(" frame.save(path)\n")
# Write to disk
script_file.close()
# Debugging
log.debug("Uploading the python script ...")
# Upload the script file
remote_script_path = fs.join(remote_temp_path, "make_images.py")
remote.upload(local_script_path, remote_temp_path, new_name="make_images.py", show_output=True)
# Debugging
log.debug("Executing the script remotely ...")
# Execute the script file remotely
remote.execute("python " + remote_script_path, output=False, show_output=True)
# Remove the datacube in the remote directory
remote.remove_file(remote_datacube_path)
# Debugging
log.debug("Downloading the convolved frames ...")
# Download
local_downloaded_temp_path = fs.join(fs.home(), fs.name(remote_temp_path))
fs.create_directory(local_downloaded_temp_path)
remote.download(remote_temp_path, local_downloaded_temp_path, compress=True, show_output=True)
# Remove the temporary directory on the remote's filesystem
remote.remove_directory(remote_temp_path)
# Remove the local temporary directory
fs.remove_directory(local_temp_path)
# Create a dictionary to contain the frames
frames = dict()
# Loop over the filters, load the frame
for filter_name in filters:
# Determine the path to the resulting FITS file
path = fs.join(local_downloaded_temp_path, filter_name + ".fits")
# Check whether the frame exists
if not fs.is_file(path): raise RuntimeError("The image for filter " + str(filters[filter_name]) + " is missing")
# Load the FITS file
frame = Frame.from_file(path)
# Add the frame to the dictionary
frames[filter_name] = frame
# Remove the downloaded temporary directory
if not keep_output: fs.remove_directory(local_downloaded_temp_path)
# Return the dictionary of frames
return frames
# -----------------------------------------------------------------<|fim▁end|> | log.debug("Executing the script remotely ...")
# Execute the script file remotely
remote.execute("python " + remote_script_path, output=False, show_output=True) |
<|file_name|>config.go<|end_file_name|><|fim▁begin|>package main
import (
"io/ioutil"
"github.com/BurntSushi/toml"
)
// ServerConfig defining configuration for pop, imap
type ServerConfig struct {
POP pop
IMAP imapClient
DB db
HTTP httpClient
}
type pop struct {
Port int
TLS bool
Cert string
Key string
}
type imapClient struct {
Server string
Port int
AddressFmt string `toml:"address_fmt"`
Folder string
}
type db struct {
Type string
DBname string
User string
Pass string
Host string
Port int
}
type httpClient struct {
Port int
}
// MustReadServerConfig from path
func MustReadServerConfig(path string) *ServerConfig {
config, err := ReadServerConfig(path)
if err != nil {<|fim▁hole|> }
return config
}
// ReadServerConfig from path
func ReadServerConfig(path string) (*ServerConfig, error) {
var config = ServerConfig{}
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
err = toml.Unmarshal(data, &config)
if err != nil {
return nil, err
}
return &config, nil
}<|fim▁end|> | panic("unable to read config: " + err.Error()) |
<|file_name|>test_base_output.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# This tool helps you to rebase package to the latest version
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# he Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors: Petr Hracek <[email protected]>
# Tomas Hozza <[email protected]>
import six
from rebasehelper.base_output import OutputLogger
class TestBaseOutput(object):
"""
Class is used for testing OutputTool
"""<|fim▁hole|> 'logs': ['logfile1.log', 'logfile2.log']}
new_rpm_data = {'rpm': ['rpm-0.2.0.x86_64.rpm', ' rpm-devel-0.2.0.x86_64.rpm'],
'srpm': 'rpm-0.2.0.src.rpm',
'logs': ['logfile3.log', 'logfile4.log']}
patches_data = {'deleted': ['del_patch1.patch', 'del_patch2.patch'],
'modified': ['mod_patch1.patch', 'mod_patch2.patch']}
info_data = {'Information text': 'some information text'}
info_data2 = {'Next Information': 'some another information text'}
def setup(self):
OutputLogger.set_info_text('Information text', 'some information text')
OutputLogger.set_info_text('Next Information', 'some another information text')
OutputLogger.set_patch_output('Patches:', self.patches_data)
OutputLogger.set_build_data('old', self.old_rpm_data)
OutputLogger.set_build_data('new', self.new_rpm_data)
def test_base_output_global(self):
expect_dict = self.info_data
expect_dict.update(self.info_data2)
build_dict = {'old': self.old_rpm_data,
'new': self.new_rpm_data}
expected_result = {'build': build_dict,
'patch': self.patches_data,
'information': expect_dict}
for key, value in six.iteritems(expected_result):
assert value == expected_result[key]
def test_base_output_info(self):
"""
Test Output logger info
:return:
"""
info_results = OutputLogger.get_summary_info()
expect_dict = self.info_data
expect_dict.update(self.info_data2)
assert info_results == expect_dict
def test_base_output_patches(self):
"""
Test Output logger patches
:return:
"""
patch_results = OutputLogger.get_patches()
expected_patches = self.patches_data
assert patch_results == expected_patches
def test_base_output_builds_old(self):
"""
Test Output logger old builds
:return:
"""
build_results = OutputLogger.get_build('old')
assert build_results == self.old_rpm_data
def test_base_output_builds_new(self):
"""
Test Output logger new builds
:return:
"""
build_results = OutputLogger.get_build('new')
assert build_results == self.new_rpm_data<|fim▁end|> | old_rpm_data = {'rpm': ['rpm-0.1.0.x86_64.rpm', ' rpm-devel-0.1.0.x86_64.rpm'],
'srpm': 'rpm-0.1.0.src.rpm', |
<|file_name|>exportWCON.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Mon Aug 15 20:55:19 2016
@author: ajaver
"""
import json
import os
from collections import OrderedDict
import zipfile
import numpy as np
import pandas as pd
import tables
from tierpsy.helper.misc import print_flush
from tierpsy.analysis.feat_create.obtainFeaturesHelper import WormStats
from tierpsy.helper.params import read_unit_conversions, read_ventral_side, read_fps
def getWCONMetaData(fname, READ_FEATURES=False, provenance_step='FEAT_CREATE'):
def _order_metadata(metadata_dict):
ordered_fields = ['strain', 'timestamp', 'gene', 'chromosome', 'allele',
'strain_description', 'sex', 'stage', 'ventral_side', 'media', 'arena', 'food',
'habituation', 'who', 'protocol', 'lab', 'software']
extra_fields = metadata_dict.keys() - set(ordered_fields)
ordered_fields += sorted(extra_fields)
ordered_metadata = OrderedDict()
for field in ordered_fields:
if field in metadata_dict:
ordered_metadata[field] = metadata_dict[field]
return ordered_metadata
with tables.File(fname, 'r') as fid:
if not '/experiment_info' in fid:
experiment_info = {}
else:
experiment_info = fid.get_node('/experiment_info').read()
experiment_info = json.loads(experiment_info.decode('utf-8'))
provenance_tracking = fid.get_node('/provenance_tracking/' + provenance_step).read()
provenance_tracking = json.loads(provenance_tracking.decode('utf-8'))
commit_hash = provenance_tracking['commit_hash']
if 'tierpsy' in commit_hash:
tierpsy_version = commit_hash['tierpsy']
else:
tierpsy_version = commit_hash['MWTracker']
MWTracker_ver = {"name":"tierpsy (https://github.com/ver228/tierpsy-tracker)",
"version": tierpsy_version,
"featureID":"@OMG"}
if not READ_FEATURES:
experiment_info["software"] = MWTracker_ver
else:
#add open_worm_analysis_toolbox info and save as a list of "softwares"
open_worm_ver = {"name":"open_worm_analysis_toolbox (https://github.com/openworm/open-worm-analysis-toolbox)",
"version":commit_hash['open_worm_analysis_toolbox'],
"featureID":""}
experiment_info["software"] = [MWTracker_ver, open_worm_ver]
return _order_metadata(experiment_info)
def __reformatForJson(A):
if isinstance(A, (int, float)):
return A
good = ~np.isnan(A) & (A != 0)
dd = A[good]
if dd.size > 0:
dd = np.abs(np.floor(np.log10(np.abs(dd)))-2)
precision = max(2, int(np.min(dd)))
A = np.round(A.astype(np.float64), precision)
A = np.where(np.isnan(A), None, A)
#wcon specification require to return a single number if it is only one element list
if A.size == 1:
return A[0]
else:
return A.tolist()
def __addOMGFeat(fid, worm_feat_time, worm_id):
worm_features = OrderedDict()
#add time series features
for col_name, col_dat in worm_feat_time.iteritems():
if not col_name in ['worm_index', 'timestamp']:
worm_features[col_name] = col_dat.values
worm_path = '/features_events/worm_%i' % worm_id
worm_node = fid.get_node(worm_path)
#add event features
for feature_name in worm_node._v_children:
feature_path = worm_path + '/' + feature_name
worm_features[feature_name] = fid.get_node(feature_path)[:]
return worm_features
def _get_ventral_side(features_file):
ventral_side = read_ventral_side(features_file)
if not ventral_side or ventral_side == 'unknown':
ventral_type = '?'
else:
#we will merge the ventral and dorsal contours so the ventral contour is clockwise
ventral_type='CW'
return ventral_type
def _getData(features_file, READ_FEATURES=False, IS_FOR_WCON=True):
if IS_FOR_WCON:
lab_prefix = '@OMG '
else:
lab_prefix = ''
with pd.HDFStore(features_file, 'r') as fid:
if not '/features_timeseries' in fid:
return {} #empty file nothing to do here
features_timeseries = fid['/features_timeseries']
feat_time_group_by_worm = features_timeseries.groupby('worm_index');
ventral_side = _get_ventral_side(features_file)
with tables.File(features_file, 'r') as fid:
#fps used to adjust timestamp to real time
fps = read_fps(features_file)
#get pointers to some useful data
skeletons = fid.get_node('/coordinates/skeletons')
dorsal_contours = fid.get_node('/coordinates/dorsal_contours')
ventral_contours = fid.get_node('/coordinates/ventral_contours')
#let's append the data of each individual worm as a element in a list
all_worms_feats = []
#group by iterator will return sorted worm indexes
for worm_id, worm_feat_time in feat_time_group_by_worm:
worm_id = int(worm_id)
#read worm skeletons data
worm_skel = skeletons[worm_feat_time.index]
worm_dor_cnt = dorsal_contours[worm_feat_time.index]
worm_ven_cnt = ventral_contours[worm_feat_time.index]
#start ordered dictionary with the basic features
worm_basic = OrderedDict()
worm_basic['id'] = str(worm_id)
worm_basic['head'] = 'L'
worm_basic['ventral'] = ventral_side
worm_basic['ptail'] = worm_ven_cnt.shape[1]-1 #index starting with 0
worm_basic['t'] = worm_feat_time['timestamp'].values/fps #convert from frames to seconds
worm_basic['x'] = worm_skel[:, :, 0]
worm_basic['y'] = worm_skel[:, :, 1]
contour = np.hstack((worm_ven_cnt, worm_dor_cnt[:, ::-1, :]))
worm_basic['px'] = contour[:, :, 0]
worm_basic['py'] = contour[:, :, 1]
if READ_FEATURES:
worm_features = __addOMGFeat(fid, worm_feat_time, worm_id)
for feat in worm_features:
worm_basic[lab_prefix + feat] = worm_features[feat]
if IS_FOR_WCON:
for x in worm_basic:
if not x in ['id', 'head', 'ventral', 'ptail']:
worm_basic[x] = __reformatForJson(worm_basic[x])
#append features
all_worms_feats.append(worm_basic)
return all_worms_feats
def _getUnits(features_file, READ_FEATURES=False):
fps_out, microns_per_pixel_out, _ = read_unit_conversions(features_file)
xy_units = microns_per_pixel_out[1]
time_units = fps_out[2]
units = OrderedDict()
units["size"] = "mm" #size of the plate
units['t'] = time_units #frames or seconds
for field in ['x', 'y', 'px', 'py']:
units[field] = xy_units #(pixels or micrometers)
if READ_FEATURES:
#TODO how to change microns to pixels when required
ws = WormStats()
for field, unit in ws.features_info['units'].iteritems():
units['@OMG ' + field] = unit
return units
def exportWCONdict(features_file, READ_FEATURES=False):
metadata = getWCONMetaData(features_file, READ_FEATURES)
data = _getData(features_file, READ_FEATURES)
units = _getUnits(features_file, READ_FEATURES)
#units = {x:units[x].replace('degrees', '1') for x in units}
#units = {x:units[x].replace('radians', '1') for x in units}
wcon_dict = OrderedDict()
wcon_dict['metadata'] = metadata
wcon_dict['units'] = units
wcon_dict['data'] = data
return wcon_dict
def getWCOName(features_file):
return features_file.replace('_features.hdf5', '.wcon.zip')
def exportWCON(features_file, READ_FEATURES=False):
base_name = os.path.basename(features_file).replace('_features.hdf5', '')
print_flush("{} Exporting data to WCON...".format(base_name))
wcon_dict = exportWCONdict(features_file, READ_FEATURES)
wcon_file = getWCOName(features_file)
#with gzip.open(wcon_file, 'wt') as fid:
# json.dump(wcon_dict, fid, allow_nan=False)
with zipfile.ZipFile(wcon_file, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zip_name = os.path.basename(wcon_file).replace('.zip', '')
wcon_txt = json.dumps(wcon_dict, allow_nan=False, separators=(',', ':'))
zf.writestr(zip_name, wcon_txt)
print_flush("{} Finised to export to WCON.".format(base_name))
if __name__ == '__main__':<|fim▁hole|> wcon_file = getWCOName(features_file)
wcon_dict = exportWCONdict(features_file)
wcon_txt = json.dumps(wcon_dict, allow_nan=False, indent=4)
#%%
with zipfile.ZipFile(wcon_file, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zip_name = os.path.basename(wcon_file).replace('.zip', '')
zf.writestr(zip_name, wcon_txt)
#%%
# import wcon
# wc = wcon.WCONWorms()
# wc = wc.load_from_file(JSON_path, validate_against_schema = False)<|fim▁end|> |
features_file = '/Users/ajaver/OneDrive - Imperial College London/Local_Videos/single_worm/global_sample_v3/883 RC301 on food R_2011_03_07__11_10_27___8___1_features.hdf5'
#exportWCON(features_file)
|
<|file_name|>mrp_bom.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp import models, api, fields
class mrp_bom(models.Model):
_inherit = 'mrp.bom'
def _bom_explode(self, cr, uid, bom, product, factor, properties=None,
level=0, routing_id=False, previous_products=None,
master_bom=None, context=None):
res = super(mrp_bom, self)._bom_explode(
cr, uid, bom, product, factor,
properties=properties, level=level,
routing_id=routing_id,
previous_products=previous_products,
master_bom=master_bom, context=context
)
results = res[0] # product_lines
results2 = res[1] # workcenter_lines
indice = 0
for bom_line_id in bom.bom_line_ids:
line = results[indice]
line['largura'] = bom_line_id.largura
line['comprimento'] = bom_line_id.comprimento
line['unidades'] = bom_line_id.unidades
indice += 1
return results, results2
class mrp_bom_line(models.Model):
_inherit = 'mrp.bom.line'
largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
@api.onchange('largura', 'comprimento', 'unidades')
def compute_quantity(self):
self.product_qty = (self.largura or 1) * \
(self.comprimento or 1) * (self.unidades or 1)
class mrp_production_product_line(models.Model):
_inherit = 'mrp.production.product.line'
largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
class stock_move(models.Model):
_inherit = 'stock.move'
<|fim▁hole|> largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
class mrp_production(models.Model):
_inherit = 'mrp.production'
def _make_production_consume_line(self, cr, uid, line, context=None):
move_id = super(mrp_production, self)\
._make_production_consume_line(
cr, uid, line, context=context)
self.pool['stock.move'].write(cr, uid, move_id,
{'unidades': line.unidades,
'comprimento': line.comprimento,
'largura': line.largura})
return move_id<|fim▁end|> | |
<|file_name|>memozo.py<|end_file_name|><|fim▁begin|>import os
import functools
import codecs
import pickle<|fim▁hole|>class Memozo(object):
def __init__(self, path='./'):
self.base_path = path
memozo_file = os.path.join(self.base_path, utils.MEMOZO_FILE_NAME)
if not os.path.exists(memozo_file):
with codecs.open(memozo_file, 'w', encoding=utils.ENCODING) as f:
f.write('datetime\thash\tfile name\tfunction name\tparameters\n')
f.write('--------\t----\t---------\t-------------\t----------\n')
def __call__(self, name=None, ext='file'):
def wrapper(func):
_name = func.__name__ if name is None else name
@functools.wraps(func)
def _wrapper(*args, **kwargs):
bound_args = utils.get_bound_args(func, *args, **kwargs)
args_str = utils.get_args_str(bound_args)
sha1 = utils.get_hash(_name, func.__name__, args_str)
file_path = os.path.join(self.base_path, "{}_{}.{}".format(_name, sha1, ext))
if utils.log_exisits(self.base_path, _name, func.__name__, args_str) and os.path.exists(file_path):
with open(file_path, 'r') as f:
obj = f.readlines()
return obj
obj = func(*args, **kwargs)
with open(file_path, 'w') as f:
f.writelines(obj)
utils.write(self.base_path, _name, func.__name__, args_str)
return obj
return _wrapper
return wrapper
def codecs(self, name=None, ext='file', encoding=None):
def wrapper(func):
_name = func.__name__ if name is None else name
@functools.wraps(func)
def _wrapper(*args, **kwargs):
bound_args = utils.get_bound_args(func, *args, **kwargs)
args_str = utils.get_args_str(bound_args)
sha1 = utils.get_hash(_name, func.__name__, args_str)
file_path = os.path.join(self.base_path, "{}_{}.{}".format(_name, sha1, ext))
if utils.log_exisits(self.base_path, _name, func.__name__, args_str) and os.path.exists(file_path):
with codecs.open(file_path, 'r', encoding) as f:
obj = f.readlines()
return obj
obj = func(*args, **kwargs)
with codecs.open(file_path, 'w', encoding) as f:
f.writelines(obj)
utils.write(self.base_path, _name, func.__name__, args_str)
return obj
return _wrapper
return wrapper
def generator(self, name=None, ext='file', line_type='str', delimiter='\t'):
def wrapper(func):
_name = func.__name__ if name is None else name
@functools.wraps(func)
def _wrapper(*args, **kwargs):
# get cached data path
bound_args = utils.get_bound_args(func, *args, **kwargs)
args_str = utils.get_args_str(bound_args)
sha1 = utils.get_hash(_name, func.__name__, args_str)
file_path = os.path.join(self.base_path, "{}_{}.{}".format(_name, sha1, ext))
# if cached data exists, return generator using cached data
if utils.log_exisits(self.base_path, _name, func.__name__, args_str) and os.path.exists(file_path):
def gen_cached_data():
with codecs.open(file_path, 'r', utils.ENCODING) as f:
for line in f:
if line_type == 'tuple':
line = line.split(delimiter)
yield line
return gen_cached_data()
gen = func(*args, **kwargs)
# if no cached data exists, generator not only yield value but save value at each iteration
def generator_with_cache(gen, file_path):
with codecs.open(file_path, 'w', utils.ENCODING) as f:
for e in gen:
if line_type == 'str':
f.write(e)
elif line_type == 'tuple':
f.write(delimiter.join(e) + '\n')
yield e
utils.write(self.base_path, _name, func.__name__, args_str)
return generator_with_cache(gen, file_path)
return _wrapper
return wrapper
def pickle(self, name=None, ext='pickle', protocol=None):
def wrapper(func):
_name = func.__name__ if name is None else name
@functools.wraps(func)
def _wrapper(*args, **kwargs):
bound_args = utils.get_bound_args(func, *args, **kwargs)
args_str = utils.get_args_str(bound_args)
sha1 = utils.get_hash(_name, func.__name__, args_str)
file_path = os.path.join(self.base_path, "{}_{}.{}".format(_name, sha1, ext))
if utils.log_exisits(self.base_path, _name, func.__name__, args_str) and os.path.exists(file_path):
with open(file_path, 'rb') as f:
obj = pickle.load(f)
return obj
obj = func(*args, **kwargs)
with open(file_path, 'wb') as f:
pickle.dump(obj, f, protocol=protocol)
utils.write(self.base_path, _name, func.__name__, args_str)
return obj
return _wrapper
return wrapper<|fim▁end|> |
from . import utils
|
<|file_name|>fileimport.py<|end_file_name|><|fim▁begin|># Copyright (C) 2018-2019 Matthias Klumpp <[email protected]>
#
# Licensed under the GNU Lesser General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the license, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of<|fim▁hole|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import logging as log
from glob import glob
from laniakea import LkModule
from laniakea.dud import Dud
from laniakea.utils import get_dir_shorthand_for_uuid, random_string
from laniakea.db import session_scope, Job, JobResult, JobKind, SourcePackage
from laniakea.msgstream import EventEmitter
from .rubiconfig import RubiConfig
from .utils import safe_rename
def accept_upload(conf, dud, event_emitter):
'''
Accept the upload and move its data to the right places.
'''
job_success = dud.get('X-Spark-Success') == 'Yes'
job_id = dud.get('X-Spark-Job')
# mark job as accepted and done
with session_scope() as session:
job = session.query(Job).filter(Job.uuid == job_id).one_or_none()
if not job:
log.error('Unable to mark job \'{}\' as done: The Job was not found.'.format(job_id))
# this is a weird situation, there is no proper way to handle it as this indicates a bug
# in the Laniakea setup or some other oddity.
# The least harmful thing to do is to just leave the upload alone and try again later.
return
job.result = JobResult.SUCCESS if job_success else JobResult.FAILURE
job.latest_log_excerpt = None
# move the log file and Firehose reports to the log storage
log_target_dir = os.path.join(conf.log_storage_dir, get_dir_shorthand_for_uuid(job_id))
firehose_target_dir = os.path.join(log_target_dir, 'firehose')
for fname in dud.get_files():
if fname.endswith('.log'):
os.makedirs(log_target_dir, exist_ok=True)
# move the logfile to its destination and ensure it is named correctly
target_fname = os.path.join(log_target_dir, job_id + '.log')
safe_rename(fname, target_fname)
elif fname.endswith('.firehose.xml'):
os.makedirs(firehose_target_dir, exist_ok=True)
# move the firehose report to its own directory and rename it
fh_target_fname = os.path.join(firehose_target_dir, job_id + '.firehose.xml')
safe_rename(fname, fh_target_fname)
# handle different job data
if job.module == LkModule.ISOTOPE:
from .import_isotope import handle_isotope_upload
handle_isotope_upload(session,
success=job_success,
conf=conf,
dud=dud,
job=job,
event_emitter=event_emitter)
elif job.kind == JobKind.PACKAGE_BUILD:
# the package has been imported by Dak, so we just announce this
# event to the world
spkg = session.query(SourcePackage) \
.filter(SourcePackage.source_uuid == job.trigger) \
.filter(SourcePackage.version == job.version) \
.one_or_none()
if spkg:
suite_target_name = '?'
if job.data:
suite_target_name = job.data.get('suite', '?')
event_data = {'pkgname': spkg.name,
'version': job.version,
'architecture': job.architecture,
'suite': suite_target_name,
'job_id': job_id}
if job_success:
event_emitter.submit_event_for_mod(LkModule.ARCHIVE, 'package-build-success', event_data)
else:
event_emitter.submit_event_for_mod(LkModule.ARCHIVE, 'package-build-failed', event_data)
else:
event_emitter.submit_event('upload-accepted', {'job_id': job_id, 'job_failed': not job_success})
# remove the upload description file from incoming
os.remove(dud.get_dud_file())
log.info("Upload {} accepted.", dud.get_filename())
def reject_upload(conf, dud, reason='Unknown', event_emitter=None):
'''
If a file has issues, we reject it and put it into the rejected queue.
'''
os.makedirs(conf.rejected_dir, exist_ok=True)
# move the files referenced by the .dud file
random_suffix = random_string(4)
for fname in dud.get_files():
target_fname = os.path.join(conf.rejected_dir, os.path.basename(fname))
if os.path.isfile(target_fname):
target_fname = target_fname + '+' + random_suffix
# move the file to the rejected dir
safe_rename(fname, target_fname)
# move the .dud file itself
target_fname = os.path.join(conf.rejected_dir, dud.get_filename())
if os.path.isfile(target_fname):
target_fname = target_fname + '+' + random_suffix
safe_rename(dud.get_dud_file(), target_fname)
# also store the reject reason for future reference
with open(target_fname + '.reason', 'w') as f:
f.write(reason + '\n')
log.info('Upload {} rejected.', dud.get_filename())
if event_emitter:
event_emitter.submit_event('upload-rejected', {'dud_filename': dud.get_filename(), 'reason': reason})
def import_files_from(conf, incoming_dir):
'''
Import files from an untrusted incoming source.
IMPORTANT: We assume that the uploader can not edit their files post-upload.
If they could, we would be vulnerable to timing attacks here.
'''
emitter = EventEmitter(LkModule.RUBICON)
for dud_file in glob(os.path.join(incoming_dir, '*.dud')):
dud = Dud(dud_file)
try:
dud.validate(keyrings=conf.trusted_gpg_keyrings)
except Exception as e:
reason = 'Signature validation failed: {}'.format(str(e))
reject_upload(conf, dud, reason, emitter)
continue
# if we are here, the file is good to go
accept_upload(conf, dud, emitter)
def import_files(options):
conf = RubiConfig()
if not options.incoming_dir:
print('No incoming directory set. Can not process any files.')
sys.exit(1)
import_files_from(conf, options.incoming_dir)<|fim▁end|> | |
<|file_name|>latest_blessed_model_strategy_test.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0<|fim▁hole|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for LatestBlessedModelStrategy."""
import tensorflow as tf
from tfx import types
from tfx.components.model_validator import constants as model_validator
from tfx.dsl.input_resolution.strategies import latest_blessed_model_strategy
from tfx.orchestration import metadata
from tfx.types import standard_artifacts
from tfx.utils import test_case_utils
from ml_metadata.proto import metadata_store_pb2
class LatestBlessedModelStrategyTest(test_case_utils.TfxTest):
def setUp(self):
super().setUp()
self._connection_config = metadata_store_pb2.ConnectionConfig()
self._connection_config.sqlite.SetInParent()
self._metadata = self.enter_context(
metadata.Metadata(connection_config=self._connection_config))
self._store = self._metadata.store
def _set_model_blessing_bit(self, artifact: types.Artifact, model_id: int,
is_blessed: int):
artifact.mlmd_artifact.custom_properties[
model_validator.ARTIFACT_PROPERTY_BLESSED_KEY].int_value = is_blessed
artifact.mlmd_artifact.custom_properties[
model_validator
.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY].int_value = model_id
def testStrategy(self):
# Model with id 1, will be blessed.
model_one = standard_artifacts.Model()
model_one.uri = 'model_one'
model_one.id = 1
# Model with id 2, will be blessed.
model_two = standard_artifacts.Model()
model_two.uri = 'model_two'
model_two.id = 2
# Model with id 3, will not be blessed.
model_three = standard_artifacts.Model()
model_three.uri = 'model_three'
model_three.id = 3
model_blessing_one = standard_artifacts.ModelBlessing()
self._set_model_blessing_bit(model_blessing_one, model_one.id, 1)
model_blessing_two = standard_artifacts.ModelBlessing()
self._set_model_blessing_bit(model_blessing_two, model_two.id, 1)
strategy = latest_blessed_model_strategy.LatestBlessedModelStrategy()
result = strategy.resolve_artifacts(
self._store, {
'model': [model_one, model_two, model_three],
'model_blessing': [model_blessing_one, model_blessing_two]
})
self.assertIsNotNone(result)
self.assertEqual([a.uri for a in result['model']], ['model_two'])
if __name__ == '__main__':
tf.test.main()<|fim▁end|> | |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 Corey Farwell
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::{Write, Read};
use std::process::{Command, Stdio};
pub struct DotBuilder {<|fim▁hole|>
impl DotBuilder {
pub fn new_digraph(name: &str) -> Self {
DotBuilder{buf: format!("digraph \"{}\" {}", name, "{")}
}
pub fn set_ratio(&mut self, ratio: &str) {
self.buf.push_str(&format!("ratio={};", ratio))
}
pub fn set_node_attrs(&mut self, node: &str, attrs: &str) {
self.buf.push_str(&format!("\"{}\" [{}];", node, attrs));
}
pub fn add_edge(&mut self, from: &str, to: &str) {
self.buf.push_str(&format!("\"{}\" -> \"{}\";", from, to));
}
pub fn finish(&mut self) {
self.buf.push_str("}");
}
pub fn png_bytes(&self) -> Vec<u8> {
let child = Command::new("dot").arg("-Tpng")
.stdin(Stdio::piped()).stdout(Stdio::piped())
.spawn().unwrap();
child.stdin.unwrap().write_all(self.buf.as_bytes()).unwrap();
let mut ret = vec![];
child.stdout.unwrap().read_to_end(&mut ret).unwrap();
ret
}
}<|fim▁end|> | buf: String,
} |
<|file_name|>issue-4446.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//<|fim▁hole|>// except according to those terms.
// pretty-expanded FIXME #23616
#![feature(old_io)]
use std::old_io::println;
use std::sync::mpsc::channel;
use std::thread;
pub fn main() {
let (tx, rx) = channel();
tx.send("hello, world").unwrap();
thread::spawn(move|| {
println(rx.recv().unwrap());
}).join().ok().unwrap();
}<|fim▁end|> | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed |
<|file_name|>xSlidingWindowMaximum.py<|end_file_name|><|fim▁begin|>'''
Given an array nums, there is a sliding window of size k which is moving from the very left of the array to the very right. You can only see the k numbers in the window. Each time the sliding window moves right by one position.
For example,
Given nums = [1,3,-1,-3,5,3,6,7], and k = 3.
<|fim▁hole|>Window position Max
--------------- -----
[1 3 -1] -3 5 3 6 7 3
1 [3 -1 -3] 5 3 6 7 3
1 3 [-1 -3 5] 3 6 7 5
1 3 -1 [-3 5 3] 6 7 5
1 3 -1 -3 [5 3 6] 7 6
1 3 -1 -3 5 [3 6 7] 7
Therefore, return the max sliding window as [3,3,5,5,6,7].
Note:
You may assume k is always valid, ie: 1 ≤ k ≤ input array's size for non-empty array.
Follow up:
Could you solve it in linear time?
Hint:
How about using a data structure such as deque (double-ended queue)?
The queue size need not be the same as the window’s size.
Remove redundant elements and the queue should store only elements that need to be considered.
Hide Tags Heap
Hide Similar Problems (H) Minimum Window Substring (E) Min Stack (H) Longest Substring with At Most Two Distinct Characters
@author: Chauncey
'''
import sys
import collections
class Solution:
# @param {integer[]} nums
# @param {integer} k
# @return {integer[]}
def maxSlidingWindow(self, nums, k):
res = []
l = len(nums)
if l == 0: return res
dq = collections.deque()
for i in xrange(l):
while dq and nums[dq[-1]] <= nums[i]:
dq.pop()
dq.append(i)
if i >= k - 1:
res.append(nums[dq[0]])
if dq[0] == i - k + 1:
dq.popleft()
return res
if __name__ == '__main__':
solution = Solution();
print solution.maxSlidingWindow([1,3,-1,-3,5,3,6,7], 3)<|fim▁end|> | |
<|file_name|>merge_measures_blockbox.py<|end_file_name|><|fim▁begin|>from django.core.management.base import BaseCommand
from lizard_blockbox import import_helpers
class Command(BaseCommand):
args = ""
help = "Merge the measure shapes to get one json."
<|fim▁hole|><|fim▁end|> | def handle(self, *args, **kwargs):
import_helpers.merge_measures_blockbox(self.stdout) |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools<|fim▁hole|>
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)<|fim▁end|> | import os |
<|file_name|>activation.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::InheritTypes::{EventCast, EventTargetCast};
use dom::bindings::js::{JSRef, Temporary, OptionalRootable, Rootable};
use dom::element::{Element, ActivationElementHelpers};
use dom::event::{Event, EventHelpers, EventBubbles, EventCancelable};
use dom::eventtarget::EventTarget;
use dom::mouseevent::MouseEvent;
use dom::node::window_from_node;
use std::borrow::ToOwned;
/// Trait for elements with defined activation behavior
pub trait Activatable : Copy {
fn as_element(&self) -> Temporary<Element>;
// Is this particular instance of the element activatable?
fn is_instance_activatable(&self) -> bool;
// https://html.spec.whatwg.org/multipage/#run-pre-click-activation-steps
fn pre_click_activation(&self);
// https://html.spec.whatwg.org/multipage/#run-canceled-activation-steps
fn canceled_activation(&self);
// https://html.spec.whatwg.org/multipage/#run-post-click-activation-steps
fn activation_behavior(&self, event: JSRef<Event>, target: JSRef<EventTarget>);
// https://html.spec.whatwg.org/multipage/#implicit-submission
fn implicit_submission(&self, ctrlKey: bool, shiftKey: bool, altKey: bool, metaKey: bool);
// https://html.spec.whatwg.org/multipage/#run-synthetic-click-activation-steps
fn synthetic_click_activation(&self, ctrlKey: bool, shiftKey: bool, altKey: bool, metaKey: bool) {
let element = self.as_element().root();
// Step 1
if element.r().click_in_progress() {
return;
}
// Step 2
element.r().set_click_in_progress(true);
// Step 3
self.pre_click_activation();
// Step 4
// https://html.spec.whatwg.org/multipage/#fire-a-synthetic-mouse-event
let win = window_from_node(element.r()).root();
let target: JSRef<EventTarget> = EventTargetCast::from_ref(element.r());
let mouse = MouseEvent::new(win.r(), "click".to_owned(),
EventBubbles::DoesNotBubble, EventCancelable::NotCancelable, Some(win.r()), 1,
0, 0, 0, 0, ctrlKey, shiftKey, altKey, metaKey,
0, None).root();
let event: JSRef<Event> = EventCast::from_ref(mouse.r());
event.fire(target);
// Step 5
if event.DefaultPrevented() {<|fim▁hole|> } else {
// post click activation
self.activation_behavior(event, target);
}
// Step 6
element.r().set_click_in_progress(false);
}
}<|fim▁end|> | self.canceled_activation(); |
<|file_name|>tokenizer.rs<|end_file_name|><|fim▁begin|>use std::collections::hashmap::HashMap;
use std::char::is_digit;
use super::token;
use super::token::XPathToken;
pub struct XPathTokenizer {
xpath: XPathString,
start: uint,
prefer_recognition_of_operator_names: bool,
}
pub type TokenResult = Result<XPathToken, TokenizerErr>;
#[deriving(Show,PartialEq,Clone)]
pub enum TokenizerErr {
MissingLocalName,
MismatchedQuoteCharacters,
UnableToCreateToken,
}
struct XPathString {
xpath: Vec<char>,
}
impl XPathString {
fn new(xpath: &str) -> XPathString {
XPathString {
xpath: xpath.chars().collect(),
}
}
fn len(& self) -> uint {
self.xpath.len()
}
fn str_at_is(& self, offset: uint, needle: &[char]) -> bool {
let s_len = needle.len();
if self.xpath.len() < offset + s_len { return false; }
let xpath_chars = self.xpath.slice(offset, offset + s_len);
needle == xpath_chars
}
fn valid_ncname_start_char(& self, offset: uint) -> bool {
let c = self.xpath[offset];
if c >= 'A' && c <= 'Z' { return true }
if c == '_' { return true }
if c >= 'a' && c <= 'z' { return true }
// TODO: All non-ASCII codepoints
return false;
}
fn valid_ncname_follow_char(& self, offset: uint) -> bool {
let c = self.xpath[offset];
if self.valid_ncname_start_char(offset) { return true }
if c == '-' { return true }
if c == '.' { return true }
if c >= '0' && c <= '9' { return true }
// TODO: All non-ASCII codepoints
return false;
}
fn while_valid_string(& self, offset: uint) -> uint {
let mut offset = offset;
if offset < self.xpath.len() && self.valid_ncname_start_char(offset) {
offset += 1;
while offset < self.xpath.len() && self.valid_ncname_follow_char(offset) {
offset += 1;
}
}
return offset;
}
fn while_valid_number(& self, offset: uint) -> uint {
let mut offset = offset;
while offset < self.xpath.len() && is_number_char(self.xpath[offset]) {
offset += 1;
}
return offset;
}
fn while_not_character(& self, offset: uint, end_char: char) -> uint {
let mut offset = offset;
while offset < self.xpath.len() && self.xpath[offset] != end_char {
offset += 1;
}
return offset;
}
fn substr(& self, start: uint, end: uint) -> String {
String::from_chars(self.xpath.slice(start, end))
}
fn safe_substr(& self, start: uint, end: uint) -> Option<String> {
if self.xpath.len() >= end {<|fim▁hole|> } else {
None
}
}
fn char_at(&self, offset: uint) -> char {
self.xpath[offset]
}
fn char_at_is(&self, offset: uint, c: char) -> bool {
let has_one_more = self.xpath.len() >= offset + 1;
has_one_more && self.xpath[offset] == c
}
fn char_at_is_not(&self, offset: uint, c: char) -> bool {
let has_one_more = self.xpath.len() >= offset + 1;
! has_one_more || self.xpath[offset] != c
}
fn char_at_is_not_digit(& self, offset: uint) -> bool {
let has_more_chars = self.xpath.len() >= offset + 1;
! has_more_chars || ! is_digit(self.xpath[offset])
}
fn is_xml_space(&self, offset: uint) -> bool {
let c = self.xpath[offset];
return
c == ' ' ||
c == '\t' ||
c == '\n' ||
c == '\r';
}
fn end_of_whitespace(& self, offset: uint) -> uint {
let mut offset = offset;
while offset < self.xpath.len() && self.is_xml_space(offset) {
offset += 1;
}
offset
}
}
static QUOTE_CHARS: [char, .. 2] = ['\'', '\"'];
impl XPathTokenizer {
pub fn new(xpath: & str) -> XPathTokenizer {
XPathTokenizer {
xpath: XPathString::new(xpath),
start: 0,
prefer_recognition_of_operator_names: false,
}
}
pub fn has_more_tokens(& self) -> bool {
self.xpath.len() > self.start
}
fn two_char_tokens(& self) -> HashMap<String, XPathToken> {
let mut m = HashMap::new();
m.insert("<=".to_string(), token::LessThanOrEqual);
m.insert(">=".to_string(), token::GreaterThanOrEqual);
m.insert("!=".to_string(), token::NotEqual);
m.insert("::".to_string(), token::DoubleColon);
m.insert("//".to_string(), token::DoubleSlash);
m.insert("..".to_string(), token::ParentNode);
m
}
fn single_char_tokens(&self) -> HashMap<char, XPathToken> {
let mut m = HashMap::new();
m.insert('/', token::Slash);
m.insert('(', token::LeftParen);
m.insert(')', token::RightParen);
m.insert('[', token::LeftBracket);
m.insert(']', token::RightBracket);
m.insert('@', token::AtSign);
m.insert('$', token::DollarSign);
m.insert('+', token::PlusSign);
m.insert('-', token::MinusSign);
m.insert('|', token::Pipe);
m.insert('=', token::Equal);
m.insert('<', token::LessThan);
m.insert('>', token::GreaterThan);
m
}
fn named_operators(& self) -> Vec<(& 'static str, XPathToken)> {
vec!(("and", token::And),
("or", token::Or),
("mod", token::Remainder),
("div", token::Divide),
("*", token::Multiply))
}
fn tokenize_literal(& mut self, quote_char: char) -> TokenResult {
let mut offset = self.start;
offset += 1; // Skip over the starting quote
let start_of_string = offset;
offset = self.xpath.while_not_character(offset, quote_char);
let end_of_string = offset;
if self.xpath.char_at_is_not(offset, quote_char) {
return Err(MismatchedQuoteCharacters);
}
offset += 1; // Skip over ending quote
self.start = offset;
return Ok(token::Literal(self.xpath.substr(start_of_string, end_of_string)));
}
fn raw_next_token(& mut self) -> TokenResult {
match self.xpath.safe_substr(self.start, self.start + 2) {
Some(first_two) => {
match self.two_char_tokens().find(&first_two) {
Some(token) => {
self.start += 2;
return Ok(token.clone());
}
_ => {}
}
},
_ => {}
}
let c = self.xpath.char_at(self.start);
match self.single_char_tokens().find(&c) {
Some(token) => {
self.start += 1;
return Ok(token.clone());
}
_ => {}
}
for quote_char in QUOTE_CHARS.iter() {
if *quote_char == c {
return self.tokenize_literal(*quote_char);
}
}
if '.' == c {
if self.xpath.char_at_is_not_digit(self.start + 1) {
// Ugly. Should we use START / FOLLOW constructs?
self.start += 1;
return Ok(token::CurrentNode);
}
}
if is_number_char(c) {
let mut offset = self.start;
let current_start = self.start;
offset = self.xpath.while_valid_number(offset);
self.start = offset;
let substr = self.xpath.substr(current_start, offset);
match from_str(substr.as_slice()) {
Some(value) => Ok(token::Number(value)),
None => fail!("Not really a number!")
}
} else {
let mut offset = self.start;
let current_start = self.start;
if self.prefer_recognition_of_operator_names {
for &(ref name, ref token) in self.named_operators().iter() {
let name_chars: Vec<char> = name.chars().collect();
let name_chars_slice = name_chars.as_slice();
if self.xpath.str_at_is(offset, name_chars_slice) {
self.start += name_chars.len();
return Ok(token.clone());
}
}
}
if self.xpath.char_at_is(offset, '*') {
self.start = offset + 1;
return Ok(token::String("*".to_string()));
}
offset = self.xpath.while_valid_string(offset);
if self.xpath.char_at_is(offset, ':') && self.xpath.char_at_is_not(offset + 1, ':') {
let prefix = self.xpath.substr(current_start, offset);
offset += 1;
let current_start = offset;
offset = self.xpath.while_valid_string(offset);
if current_start == offset {
return Err(MissingLocalName);
}
let name = self.xpath.substr(current_start, offset);
self.start = offset;
return Ok(token::PrefixedName(prefix, name));
} else {
self.start = offset;
return Ok(token::String(self.xpath.substr(current_start, offset)));
}
}
}
fn consume_whitespace(& mut self) {
self.start = self.xpath.end_of_whitespace(self.start);
}
fn next_token(& mut self) -> TokenResult {
self.consume_whitespace();
let old_start = self.start;
let token = self.raw_next_token();
if token.is_err() { return token; }
let token = token.unwrap();
if old_start == self.start {
return Err(UnableToCreateToken);
}
self.consume_whitespace();
if ! (token.precedes_node_test() ||
token.precedes_expression() ||
token.is_operator()) {
// See http://www.w3.org/TR/xpath/#exprlex
self.prefer_recognition_of_operator_names = true;
} else {
self.prefer_recognition_of_operator_names = false;
}
return Ok(token);
}
}
impl Iterator<TokenResult> for XPathTokenizer {
fn next(&mut self) -> Option<TokenResult> {
if self.has_more_tokens() {
Some(self.next_token())
} else {
None
}
}
}
fn is_number_char(c: char) -> bool {
return is_digit(c) || '.' == c;
}
pub struct XPathTokenDisambiguator<T, I> {
source: ::std::iter::Peekable<T, I>,
}
impl<T, I: Iterator<T>> XPathTokenDisambiguator<T, I> {
pub fn new(source: I) -> XPathTokenDisambiguator<T, I> {
XPathTokenDisambiguator{
source: source.peekable(),
}
}
}
static node_test_names : [&'static str, .. 4] =
[ "comment", "text", "processing-instruction", "node" ];
impl<I: Iterator<TokenResult>> Iterator<TokenResult> for XPathTokenDisambiguator<TokenResult, I> {
fn next(&mut self) -> Option<TokenResult> {
let token = self.source.next();
let next = self.source.peek();
match (token, next) {
(Some(Ok(token::String(val))), Some(&Ok(token::LeftParen))) => {
if node_test_names.contains(&val.as_slice()) {
Some(Ok(token::NodeTest(val)))
} else {
Some(Ok(token::Function(val)))
}
},
(Some(Ok(token::String(val))), Some(&Ok(token::DoubleColon))) => {
Some(Ok(token::Axis(val)))
},
(token, _) => token,
}
}
}
pub struct XPathTokenDeabbreviator<I> {
source: I,
buffer: Vec<XPathToken>,
}
impl<I> XPathTokenDeabbreviator<I> {
pub fn new(source: I) -> XPathTokenDeabbreviator<I> {
XPathTokenDeabbreviator {
source: source,
buffer: vec!(),
}
}
fn push(&mut self, token: XPathToken) {
self.buffer.push(token);
}
fn expand_token(&mut self, token: XPathToken) {
match token {
token::AtSign => {
self.push(token::String("attribute".to_string()));
self.push(token::DoubleColon);
}
token::DoubleSlash => {
self.push(token::Slash);
self.push(token::String("descendant-or-self".to_string()));
self.push(token::DoubleColon);
self.push(token::String("node".to_string()));
self.push(token::LeftParen);
self.push(token::RightParen);
self.push(token::Slash);
}
token::CurrentNode => {
self.push(token::String("self".to_string()));
self.push(token::DoubleColon);
self.push(token::String("node".to_string()));
self.push(token::LeftParen);
self.push(token::RightParen);
}
token::ParentNode => {
self.push(token::String("parent".to_string()));
self.push(token::DoubleColon);
self.push(token::String("node".to_string()));
self.push(token::LeftParen);
self.push(token::RightParen);
}
_ => {
self.push(token);
}
}
}
}
impl<I: Iterator<TokenResult>> Iterator<TokenResult> for XPathTokenDeabbreviator<I> {
fn next(&mut self) -> Option<TokenResult> {
if self.buffer.is_empty() {
let token = self.source.next();
match token {
None => return token,
Some(Err(_)) => return token,
Some(Ok(token)) => self.expand_token(token),
}
}
match self.buffer.remove(0) {
Some(t) => Some(Ok(t)),
None => fail!("No tokens left to return"), // Can't happen, we always add one
}
}
}<|fim▁end|> | Some(self.substr(start, end)) |
<|file_name|>bench_native.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT<|fim▁hole|>// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(plugin, test)]
#![plugin(regex_macros)]
extern crate rand;
extern crate regex;
extern crate test;
mod bench;<|fim▁end|> | // file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-04 09:48
from __future__ import unicode_literals
<|fim▁hole|>
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DateMixin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='File',
fields=[
('datemixin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='api.DateMixin')),
('file_id', models.CharField(default=api.utils.generate_uid, max_length=20)),
('name', models.CharField(max_length=255)),
('_file', models.FileField(upload_to='files')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to=settings.AUTH_USER_MODEL)),
],
bases=('api.datemixin',),
),
]<|fim▁end|> | import api.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion |
<|file_name|>user.ts<|end_file_name|><|fim▁begin|><|fim▁hole|> loginName?: string;
loginPassword?: string;
userId?: number;
}<|fim▁end|> | export class User { |
<|file_name|>mock_api.py<|end_file_name|><|fim▁begin|>from contextlib import contextmanager
import json
import mock
# Mock tastypi API.
class ProjectData(object):
def get(self):
return dict()
def mock_version(repo):
class MockVersion(object):
def __init__(self, x=None):
pass
def put(self, x=None):
return x
def get(self, **kwargs):
# SCIENTIST DOG
version = json.loads("""
{
"active": false,
"built": false,
"id": "12095",
"identifier": "remotes/origin/zip_importing",
"resource_uri": "/api/v1/version/12095/",
"slug": "zip_importing",
"uploaded": false,
"verbose_name": "zip_importing"
}""")
project = json.loads("""
{
"absolute_url": "/projects/docs/",
"analytics_code": "",
"copyright": "",
"default_branch": "",
"default_version": "latest",
"description": "Make docs.readthedocs.org work :D",
"django_packages_url": "",
"documentation_type": "sphinx",
"id": "2599",
"modified_date": "2012-03-12T19:59:09.130773",
"name": "docs",
"project_url": "",
"pub_date": "2012-02-19T18:10:56.582780",
"repo": "git://github.com/rtfd/readthedocs.org",<|fim▁hole|> "requirements_file": "",
"resource_uri": "/api/v1/project/2599/",
"slug": "docs",
"subdomain": "http://docs.readthedocs.org/",
"suffix": ".rst",
"theme": "default",
"use_virtualenv": false,
"users": [
"/api/v1/user/1/"
],
"version": ""
}""")
version['project'] = project
project['repo'] = repo
if 'slug' in kwargs:
return {'objects': [version], 'project': project}
else:
return version
return MockVersion
class MockApi(object):
def __init__(self, repo):
self.version = mock_version(repo)
def project(self, x):
return ProjectData()
@contextmanager
def mock_api(repo):
api_mock = MockApi(repo)
with (
mock.patch('readthedocs.restapi.client.api', api_mock) and
mock.patch('readthedocs.api.client.api', api_mock) and
mock.patch('readthedocs.projects.tasks.api_v2', api_mock) and
mock.patch('readthedocs.projects.tasks.api_v1', api_mock)):
yield api_mock<|fim▁end|> | "repo_type": "git", |
<|file_name|>messages.py<|end_file_name|><|fim▁begin|>"""ACME protocol messages."""
import collections
import six
from acme import challenges
from acme import errors
from acme import fields
from acme import jose
from acme import util
OLD_ERROR_PREFIX = "urn:acme:error:"
ERROR_PREFIX = "urn:ietf:params:acme:error:"
ERROR_CODES = {
'badCSR': 'The CSR is unacceptable (e.g., due to a short key)',
'badNonce': 'The client sent an unacceptable anti-replay nonce',
'connection': ('The server could not connect to the client to verify the'
' domain'),
'dnssec': 'The server could not validate a DNSSEC signed domain',
# deprecate invalidEmail
'invalidEmail': 'The provided email for a registration was invalid',
'invalidContact': 'The provided contact URI was invalid',
'malformed': 'The request message was malformed',
'rateLimited': 'There were too many requests of a given type',
'serverInternal': 'The server experienced an internal error',
'tls': 'The server experienced a TLS error during domain verification',
'unauthorized': 'The client lacks sufficient authorization',
'unknownHost': 'The server could not resolve a domain name',
}
ERROR_TYPE_DESCRIPTIONS = dict(
(ERROR_PREFIX + name, desc) for name, desc in ERROR_CODES.items())
ERROR_TYPE_DESCRIPTIONS.update(dict( # add errors with old prefix, deprecate me
(OLD_ERROR_PREFIX + name, desc) for name, desc in ERROR_CODES.items()))
def is_acme_error(err):
"""Check if argument is an ACME error."""
if isinstance(err, Error) and (err.typ is not None):
return (ERROR_PREFIX in err.typ) or (OLD_ERROR_PREFIX in err.typ)
else:
return False
@six.python_2_unicode_compatible
class Error(jose.JSONObjectWithFields, errors.Error):
"""ACME error.
https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00
:ivar unicode typ:
:ivar unicode title:
:ivar unicode detail:
"""
typ = jose.Field('type', omitempty=True, default='about:blank')
title = jose.Field('title', omitempty=True)
detail = jose.Field('detail', omitempty=True)
@classmethod
def with_code(cls, code, **kwargs):
"""Create an Error instance with an ACME Error code.
:unicode code: An ACME error code, like 'dnssec'.
:kwargs: kwargs to pass to Error.
"""
if code not in ERROR_CODES:
raise ValueError("The supplied code: %s is not a known ACME error"
" code" % code)
typ = ERROR_PREFIX + code
return cls(typ=typ, **kwargs)
@property
def description(self):
"""Hardcoded error description based on its type.
:returns: Description if standard ACME error or ``None``.
:rtype: unicode
"""
return ERROR_TYPE_DESCRIPTIONS.get(self.typ)
@property
def code(self):
"""ACME error code.
Basically self.typ without the ERROR_PREFIX.
:returns: error code if standard ACME code or ``None``.
:rtype: unicode
"""
code = str(self.typ).split(':')[-1]
if code in ERROR_CODES:
return code
def __str__(self):
return b' :: '.join(
part.encode('ascii', 'backslashreplace') for part in
(self.typ, self.description, self.detail, self.title)
if part is not None).decode()
class _Constant(jose.JSONDeSerializable, collections.Hashable): # type: ignore
"""ACME constant."""
__slots__ = ('name',)
POSSIBLE_NAMES = NotImplemented
def __init__(self, name):
self.POSSIBLE_NAMES[name] = self
self.name = name
def to_partial_json(self):
return self.name
@classmethod
def from_json(cls, value):
if value not in cls.POSSIBLE_NAMES:
raise jose.DeserializationError(
'{0} not recognized'.format(cls.__name__))
return cls.POSSIBLE_NAMES[value]
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self.name)
def __eq__(self, other):
return isinstance(other, type(self)) and other.name == self.name
def __hash__(self):
return hash((self.__class__, self.name))
def __ne__(self, other):
return not self == other
class Status(_Constant):
"""ACME "status" field."""
POSSIBLE_NAMES = {} # type: dict
STATUS_UNKNOWN = Status('unknown')
STATUS_PENDING = Status('pending')
STATUS_PROCESSING = Status('processing')
STATUS_VALID = Status('valid')
STATUS_INVALID = Status('invalid')
STATUS_REVOKED = Status('revoked')
class IdentifierType(_Constant):
"""ACME identifier type."""
POSSIBLE_NAMES = {} # type: dict
IDENTIFIER_FQDN = IdentifierType('dns') # IdentifierDNS in Boulder
class Identifier(jose.JSONObjectWithFields):
"""ACME identifier.
:ivar IdentifierType typ:
:ivar unicode value:
"""
typ = jose.Field('type', decoder=IdentifierType.from_json)
value = jose.Field('value')
class Directory(jose.JSONDeSerializable):
"""Directory."""
_REGISTERED_TYPES = {} # type: dict
class Meta(jose.JSONObjectWithFields):
"""Directory Meta."""
terms_of_service = jose.Field('terms-of-service', omitempty=True)
website = jose.Field('website', omitempty=True)
caa_identities = jose.Field('caa-identities', omitempty=True)
@classmethod
def _canon_key(cls, key):
return getattr(key, 'resource_type', key)
@classmethod
def register(cls, resource_body_cls):
"""Register resource."""
resource_type = resource_body_cls.resource_type
assert resource_type not in cls._REGISTERED_TYPES
cls._REGISTERED_TYPES[resource_type] = resource_body_cls
return resource_body_cls
def __init__(self, jobj):
canon_jobj = util.map_keys(jobj, self._canon_key)
# TODO: check that everything is an absolute URL; acme-spec is
# not clear on that
self._jobj = canon_jobj
def __getattr__(self, name):
try:
return self[name.replace('_', '-')]
except KeyError as error:
raise AttributeError(str(error) + ': ' + name)
def __getitem__(self, name):
try:
return self._jobj[self._canon_key(name)]
except KeyError:
raise KeyError('Directory field not found')
def to_partial_json(self):
return self._jobj
@classmethod
def from_json(cls, jobj):
jobj['meta'] = cls.Meta.from_json(jobj.pop('meta', {}))
return cls(jobj)
class Resource(jose.JSONObjectWithFields):
"""ACME Resource.
:ivar acme.messages.ResourceBody body: Resource body.
"""
body = jose.Field('body')
class ResourceWithURI(Resource):
"""ACME Resource with URI.
:ivar unicode uri: Location of the resource.
"""
uri = jose.Field('uri') # no ChallengeResource.uri
class ResourceBody(jose.JSONObjectWithFields):
"""ACME Resource Body."""<|fim▁hole|>
:ivar acme.jose.jwk.JWK key: Public key.
:ivar tuple contact: Contact information following ACME spec,
`tuple` of `unicode`.
:ivar unicode agreement:
"""
# on new-reg key server ignores 'key' and populates it based on
# JWS.signature.combined.jwk
key = jose.Field('key', omitempty=True, decoder=jose.JWK.from_json)
contact = jose.Field('contact', omitempty=True, default=())
agreement = jose.Field('agreement', omitempty=True)
status = jose.Field('status', omitempty=True)
phone_prefix = 'tel:'
email_prefix = 'mailto:'
@classmethod
def from_data(cls, phone=None, email=None, **kwargs):
"""Create registration resource from contact details."""
details = list(kwargs.pop('contact', ()))
if phone is not None:
details.append(cls.phone_prefix + phone)
if email is not None:
details.append(cls.email_prefix + email)
kwargs['contact'] = tuple(details)
return cls(**kwargs)
def _filter_contact(self, prefix):
return tuple(
detail[len(prefix):] for detail in self.contact
if detail.startswith(prefix))
@property
def phones(self):
"""All phones found in the ``contact`` field."""
return self._filter_contact(self.phone_prefix)
@property
def emails(self):
"""All emails found in the ``contact`` field."""
return self._filter_contact(self.email_prefix)
@Directory.register
class NewRegistration(Registration):
"""New registration."""
resource_type = 'new-reg'
resource = fields.Resource(resource_type)
class UpdateRegistration(Registration):
"""Update registration."""
resource_type = 'reg'
resource = fields.Resource(resource_type)
class RegistrationResource(ResourceWithURI):
"""Registration Resource.
:ivar acme.messages.Registration body:
:ivar unicode new_authzr_uri: Deprecated. Do not use.
:ivar unicode terms_of_service: URL for the CA TOS.
"""
body = jose.Field('body', decoder=Registration.from_json)
new_authzr_uri = jose.Field('new_authzr_uri', omitempty=True)
terms_of_service = jose.Field('terms_of_service', omitempty=True)
class ChallengeBody(ResourceBody):
"""Challenge Resource Body.
.. todo::
Confusingly, this has a similar name to `.challenges.Challenge`,
as well as `.achallenges.AnnotatedChallenge`. Please use names
such as ``challb`` to distinguish instances of this class from
``achall``.
:ivar acme.challenges.Challenge: Wrapped challenge.
Conveniently, all challenge fields are proxied, i.e. you can
call ``challb.x`` to get ``challb.chall.x`` contents.
:ivar acme.messages.Status status:
:ivar datetime.datetime validated:
:ivar messages.Error error:
"""
__slots__ = ('chall',)
uri = jose.Field('uri')
status = jose.Field('status', decoder=Status.from_json,
omitempty=True, default=STATUS_PENDING)
validated = fields.RFC3339Field('validated', omitempty=True)
error = jose.Field('error', decoder=Error.from_json,
omitempty=True, default=None)
def to_partial_json(self):
jobj = super(ChallengeBody, self).to_partial_json()
jobj.update(self.chall.to_partial_json())
return jobj
@classmethod
def fields_from_json(cls, jobj):
jobj_fields = super(ChallengeBody, cls).fields_from_json(jobj)
jobj_fields['chall'] = challenges.Challenge.from_json(jobj)
return jobj_fields
def __getattr__(self, name):
return getattr(self.chall, name)
class ChallengeResource(Resource):
"""Challenge Resource.
:ivar acme.messages.ChallengeBody body:
:ivar unicode authzr_uri: URI found in the 'up' ``Link`` header.
"""
body = jose.Field('body', decoder=ChallengeBody.from_json)
authzr_uri = jose.Field('authzr_uri')
@property
def uri(self): # pylint: disable=missing-docstring,no-self-argument
# bug? 'method already defined line None'
# pylint: disable=function-redefined
return self.body.uri # pylint: disable=no-member
class Authorization(ResourceBody):
"""Authorization Resource Body.
:ivar acme.messages.Identifier identifier:
:ivar list challenges: `list` of `.ChallengeBody`
:ivar tuple combinations: Challenge combinations (`tuple` of `tuple`
of `int`, as opposed to `list` of `list` from the spec).
:ivar acme.messages.Status status:
:ivar datetime.datetime expires:
"""
identifier = jose.Field('identifier', decoder=Identifier.from_json)
challenges = jose.Field('challenges', omitempty=True)
combinations = jose.Field('combinations', omitempty=True)
status = jose.Field('status', omitempty=True, decoder=Status.from_json)
# TODO: 'expires' is allowed for Authorization Resources in
# general, but for Key Authorization '[t]he "expires" field MUST
# be absent'... then acme-spec gives example with 'expires'
# present... That's confusing!
expires = fields.RFC3339Field('expires', omitempty=True)
@challenges.decoder
def challenges(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(ChallengeBody.from_json(chall) for chall in value)
@property
def resolved_combinations(self):
"""Combinations with challenges instead of indices."""
return tuple(tuple(self.challenges[idx] for idx in combo)
for combo in self.combinations)
@Directory.register
class NewAuthorization(Authorization):
"""New authorization."""
resource_type = 'new-authz'
resource = fields.Resource(resource_type)
class AuthorizationResource(ResourceWithURI):
"""Authorization Resource.
:ivar acme.messages.Authorization body:
:ivar unicode new_cert_uri: Deprecated. Do not use.
"""
body = jose.Field('body', decoder=Authorization.from_json)
new_cert_uri = jose.Field('new_cert_uri', omitempty=True)
@Directory.register
class CertificateRequest(jose.JSONObjectWithFields):
"""ACME new-cert request.
:ivar acme.jose.util.ComparableX509 csr:
`OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
"""
resource_type = 'new-cert'
resource = fields.Resource(resource_type)
csr = jose.Field('csr', decoder=jose.decode_csr, encoder=jose.encode_csr)
class CertificateResource(ResourceWithURI):
"""Certificate Resource.
:ivar acme.jose.util.ComparableX509 body:
`OpenSSL.crypto.X509` wrapped in `.ComparableX509`
:ivar unicode cert_chain_uri: URI found in the 'up' ``Link`` header
:ivar tuple authzrs: `tuple` of `AuthorizationResource`.
"""
cert_chain_uri = jose.Field('cert_chain_uri')
authzrs = jose.Field('authzrs')
@Directory.register
class Revocation(jose.JSONObjectWithFields):
"""Revocation message.
:ivar .ComparableX509 certificate: `OpenSSL.crypto.X509` wrapped in
`.ComparableX509`
"""
resource_type = 'revoke-cert'
resource = fields.Resource(resource_type)
certificate = jose.Field(
'certificate', decoder=jose.decode_cert, encoder=jose.encode_cert)
reason = jose.Field('reason')<|fim▁end|> |
class Registration(ResourceBody):
"""Registration Resource Body. |
<|file_name|>abstract.py<|end_file_name|><|fim▁begin|>import sys
#from OpenGL.GLUT import *
#from OpenGL.GLU import *
#from OpenGL.GL import *
class abstract:
params = {}
windowId = None
terminated = False
def initParams(self):
return self
def __init__(self):
self.initParams().init()
return
def init(self):
return
def mouse(self, button, state, x, y):
return
def mouseMotion(self, x, y):
return
def keyboard(self, asciiCode, x, y):
return
def keyboardSpecial(self, key, x, y):
return<|fim▁hole|> def timer(self, value):
return
def render(self):
return
def reshape(self, width, height):
return
def run(self):
return self
def destroy(self):
del self
return
def select(self):
return self.activate()
def activate(self):
return self
def redisplay(self):
return self
def hide(self):
return self
def show(self):
return self
def title(self, title):
return self
def setPosition(self, x, y):
return self
def setResolution(self, width, height):
return self<|fim▁end|> |
def idle(self):
return
|
<|file_name|>generator.rs<|end_file_name|><|fim▁begin|>// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the<|fim▁hole|>// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::{U256, H2048, Bytes};
use header::BlockNumber;
use transaction::SignedTransaction;
use super::fork::Fork;
use super::bloom::Bloom;
use super::complete::{BlockFinalizer, CompleteBlock, Complete};
use super::block::Block;
use super::transaction::Transaction;
/// Chain iterator interface.
pub trait ChainIterator: Iterator + Sized {
/// Should be called to create a fork of current iterator.
/// Blocks generated by fork will have lower difficulty than current chain.
fn fork(&self, fork_number: usize) -> Fork<Self> where Self: Clone;
/// Should be called to make every consecutive block have given bloom.
fn with_bloom(&mut self, bloom: H2048) -> Bloom<Self>;
/// Should be called to make every consecutive block have given transaction.
fn with_transaction(&mut self, transaction: SignedTransaction) -> Transaction<Self>;
/// Should be called to complete block. Without complete, block may have incorrect hash.
fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self>;
/// Completes and generates block.
fn generate<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Option<Bytes> where Self::Item: CompleteBlock;
}
impl<I> ChainIterator for I where I: Iterator + Sized {
fn fork(&self, fork_number: usize) -> Fork<Self> where I: Clone {
Fork {
iter: self.clone(),
fork_number: fork_number
}
}
fn with_bloom(&mut self, bloom: H2048) -> Bloom<Self> {
Bloom {
iter: self,
bloom: bloom
}
}
fn with_transaction(&mut self, transaction: SignedTransaction) -> Transaction<Self> {
Transaction {
iter: self,
transaction: transaction,
}
}
fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self> {
Complete {
iter: self,
finalizer: finalizer
}
}
fn generate<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Option<Bytes> where <I as Iterator>::Item: CompleteBlock {
self.complete(finalizer).next()
}
}
/// Blockchain generator.
#[derive(Clone)]
pub struct ChainGenerator {
/// Next block number.
number: BlockNumber,
/// Next block difficulty.
difficulty: U256,
}
impl ChainGenerator {
fn prepare_block(&self) -> Block {
let mut block = Block::default();
block.header.set_number(self.number);
block.header.set_difficulty(self.difficulty);
block
}
}
impl Default for ChainGenerator {
fn default() -> Self {
ChainGenerator {
number: 0,
difficulty: 1000.into(),
}
}
}
impl Iterator for ChainGenerator {
type Item = Block;
fn next(&mut self) -> Option<Self::Item> {
let block = self.prepare_block();
self.number += 1;
Some(block)
}
}
mod tests {
use util::hash::{H256, H2048};
use util::sha3::Hashable;
use views::BlockView;
use blockchain::generator::{ChainIterator, ChainGenerator, BlockFinalizer};
#[test]
fn canon_chain_generator() {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis_rlp = canon_chain.generate(&mut finalizer).unwrap();
let genesis = BlockView::new(&genesis_rlp);
assert_eq!(genesis.header_view().parent_hash(), H256::default());
assert_eq!(genesis.header_view().number(), 0);
let b1_rlp = canon_chain.generate(&mut finalizer).unwrap();
let b1 = BlockView::new(&b1_rlp);
assert_eq!(b1.header_view().parent_hash(), genesis.header_view().sha3());
assert_eq!(b1.header_view().number(), 1);
let mut fork_chain = canon_chain.fork(1);
let b2_rlp_fork = fork_chain.generate(&mut finalizer.fork()).unwrap();
let b2_fork = BlockView::new(&b2_rlp_fork);
assert_eq!(b2_fork.header_view().parent_hash(), b1.header_view().sha3());
assert_eq!(b2_fork.header_view().number(), 2);
let b2_rlp = canon_chain.generate(&mut finalizer).unwrap();
let b2 = BlockView::new(&b2_rlp);
assert_eq!(b2.header_view().parent_hash(), b1.header_view().sha3());
assert_eq!(b2.header_view().number(), 2);
assert!(b2.header_view().difficulty() > b2_fork.header_view().difficulty());
}
#[test]
fn with_bloom_generator() {
let bloom = H2048([0x1; 256]);
let mut gen = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let block0_rlp = gen.with_bloom(bloom).generate(&mut finalizer).unwrap();
let block1_rlp = gen.generate(&mut finalizer).unwrap();
let block0 = BlockView::new(&block0_rlp);
let block1 = BlockView::new(&block1_rlp);
assert_eq!(block0.header_view().number(), 0);
assert_eq!(block0.header_view().parent_hash(), H256::default());
assert_eq!(block1.header_view().number(), 1);
assert_eq!(block1.header_view().parent_hash(), block0.header_view().sha3());
}
#[test]
fn generate_1000_blocks() {
let generator = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let blocks: Vec<_> = generator.take(1000).complete(&mut finalizer).collect();
assert_eq!(blocks.len(), 1000);
}
}<|fim▁end|> | // GNU General Public License for more details.
|
<|file_name|>mapapi.shape.js<|end_file_name|><|fim▁begin|>/**
* License and Terms of Use
*
* Copyright (c) 2011 SignpostMarv
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
(function(window, undefined){
var
Array = window['Array'],
EventTarget = window['EventTarget'],
mapapi = window['mapapi'],
gridPoint = mapapi['gridPoint'],
bounds = mapapi['bounds'],
ctype_digit = mapapi['utils']['ctype_digit']
;
if(mapapi == undefined){
throw 'mapapi.js is not loaded.';
}else if(EventTarget == undefined){
throw 'EventTarget is not loaded';
}
function extend(a,b){
a.prototype = new b;
a.prototype['constructor'] = a;
}
function shape(options){
EventTarget['call'](this);
this['opts'] = {};
for(var i in this['defaultOpts']){
this['opts'][i] = this['defaultOpts'][i];
}
if(options != undefined){
this['options'](options);
}
}
extend(shape, EventTarget);
shape.prototype['defaultOpts'] = {'fillStyle':'rgba(255,255,255,0.5)', 'strokeStyle':'rgb(255,255,255)', 'lineWidth':0};
shape.prototype['options'] = function(options){
options = options || {};
for(var i in options){
this['opts'] = options[i];
}
}
shape.prototype['withinShape'] = function(pos){
if(pos instanceof gridPoint){
return true;
}
return false;
}
shape.prototype['coords'] = function(value){
if(value != undefined){
this['options']({'coords':value});
}
var
coords = this['opts']['coords']
;
return coords != undefined ? coords : [];
}
shape.prototype['clickable'] = function(value){
if(value != undefined){
this['options']({'clickable':!!value});
}
var
clickable = this['opts']['clickable'];
;
return clickable != undefined ? clickable : false;
}
shape.prototype['strokeStyle'] = function(value){
if(typeof value == 'string'){
this['options']({'strokeStyle':value});
}
return this['opts']['strokeStyle'];
}
shape.prototype['lineWidth'] = function(value){
if(typeof value == 'number'){
this['options']({'lineWidth':Math.max(0,value)});
}
return Math.max(0, this['opts']['lineWidth']);
}
shape.prototype['intersects'] = function(value){
if(value instanceof bounds && this['bounds'] instanceof bounds){
return this['bounds']['intersects'](value);
}
return false;
}
mapapi['shape'] = shape;
function shapeManager(){
Array['call'](this);
}
extend(shapeManager, Array);
shapeManager.prototype['push'] = function(){
for(var i=0;i<arguments['length'];++i){
if(!(arguments[i] instanceof shape)){
throw 'Arguments of mapapi.shapeManager::push() should be instances of mapapi.shape';
}
}
Array.prototype['push']['apply'](this, arguments);
}
shapeManager.prototype['intersects'] = function(value){
if(value instanceof bounds){
var
shpmngr = new this['constructor']
;
for(var i=0;i<this['length'];++i){
if(this[i]['intersects'](value)){
shpmngr['push'](this[i]);
}
}
return shpmngr;
}else{
throw 'Intersection argument must be an instance of mapapi.bounds';
}
}
shapeManager.prototype['click'] = function(value){
var
value = gridPoint['fuzzy'](value),
ret
;
for(var i=0;i<this['length'];++i){
if(this[i]['clickable']() && this[i]['withinShape'](value)){
ret = this[i]['fire']('click',{'pos':value});
if(ret != undefined && ret == false){
break;
}
}
}
}
mapapi['shapeManager'] = shapeManager;
function poly(options){
shape['call'](this, options);
}
extend(poly, shape);
poly.prototype['options'] = function(options){
var
options = options || {},
coords = options['coords'],
fillStyle = options['fillStyle'],
strokeStyle = options['strokeStyle'],
lineWidth = options['lineWidth']
;
if(options['coords'] != undefined){
if(coords instanceof Array){
for(var i=0;i<coords['length'];++i){
coords[i] = gridPoint['fuzzy'](coords[i]);
}
var
swx = coords[0]['x'],
swy = coords[0]['y'],
nex = coords[0]['x'],
ney = coords[0]['y']
;
for(var i=1;i<coords['length'];++i){
swx = (coords[i]['x'] < swx) ? coords[i]['x'] : swx;
swy = (coords[i]['y'] < swy) ? coords[i]['y'] : swy;
nex = (coords[i]['x'] > nex) ? coords[i]['x'] : nex;
ney = (coords[i]['y'] > ney) ? coords[i]['y'] : ney;
}
this['bounds'] = new bounds(new gridPoint(swx, swy), new gridPoint(nex, ney));
this['opts']['coords'] = coords;
this['fire']('changedcoords');
}else{
throw 'coords must be array';
}
}
if(typeof fillStyle == 'string'){
var diff = this['opts']['fillStyle'] != fillStyle;
this['opts']['fillStyle'] = fillStyle;
if(diff){
this['fire']('changedfillstyle');
}<|fim▁hole|> if(diff){
this['fire']('changedstrokestyle');
}
}
if(typeof lineWidth == 'number'){
var diff = this['opts']['lineWidth'] != Math.max(0,lineWidth);
this['opts']['lineWidth'] = Math.max(0,lineWidth);
if(diff){
this['fire']('changedlinewidth');
}
}
if(options['clickable'] != undefined){
this['opts']['clickable'] = !!options['clickable'];
}
}
poly.prototype['fillStyle'] = function(value){
if(value != undefined){
this['options']({'fillStyle':value});
}
return this['opts']['fillStyle'];
}
shape['polygon'] = poly;
function rectangle(options){
poly['call'](this, options);
}
extend(rectangle, poly);
rectangle.prototype['options'] = function(options){
var
options = options || {},
coords = options['coords']
;
if(coords != undefined){
if(coords instanceof Array){
if(coords['length'] == 2){
for(var i=0;i<coords['length'];++i){
coords[i] = gridPoint['fuzzy'](coords[i]);
}
var
sw = coords[0],
ne = coords[1],
foo,bar
;
if(ne['y'] > sw['y']){
foo = new gridPoint(ne['x'], sw['y']);
bar = new gridPoint(sw['x'], ne['y']);
ne = foo;
sw = bar;
}
if(sw['x'] > ne['x']){
foo = new gridPoint(ne['x'], sw['y']);
bar = new gridPoint(sw['x'], ne['y']);
sw = foo;
ne = bar;
}
options['coords'] = [sw, ne];
}else{
throw 'When supplying mapapi.shape.rectangle::options with an Array for the coordinates, there should only be two entries';
}
}else{
throw 'something other than array was given to mapapi.shape.rectangle::options';
}
}
poly.prototype['options']['call'](this, options);
}
rectangle.prototype['withinShape'] = function(value){
if(value == undefined){
throw 'Must specify an instance of mapapi.gridPoint';
}else if(!(this['bounds'] instanceof bounds)){
throw 'Coordinates not set';
}
value = gridPoint['fuzzy'](value);
return this['bounds']['isWithin'](value);
}
shape['rectangle'] = rectangle;
function square(options){
rectangle['call'](this, options);
}
extend(square, rectangle);
square.prototype['options'] = function(options){
options = options || {};
var
coords = options['coords']
;
if(coords instanceof Array && coords['length'] <= 2){
var
sw = coords[0],
ne = coords[1]
;
if(Math.abs(ne['x'] - sw['x']) != Math.abs(ne['y'] - sw['y'])){
throw 'coordinates should form a square';
}
}
rectangle.prototype['options']['call'](this, options);
}
shape['square'] = square;
function line(options){
shape['call'](this, options);
}
extend(line, shape);
line.prototype['defaultOpts'] = {'strokeStyle':'rgb(255,255,255)', 'lineWidth':1};
line.prototype['options'] = function(options){
var
options = options || {},
coords = options['coords'],
strokeStyle = options['strokeStyle'],
lineWidth = options['lineWidth']
;
if(options['coords'] != undefined){
if(coords instanceof Array){
if(coords['length'] >= 2){
for(var i=0;i<coords['length'];++i){
coords[i] = gridPoint['fuzzy'](coords[i]);
}
this['opts']['coords'] = coords;
this['fire']('changedcoords');
}else{
throw 'mapapi.shape.line requires two or more coordinates';
}
}else{
throw 'mapapi.shape.line requires coordinates be passed as an array';
}
}
if(typeof strokeStyle == 'string'){
var diff = this['opts']['strokeStyle'] != strokeStyle;
this['opts']['strokeStyle'] = strokeStyle;
if(diff){
this['fire']('changedstrokestyle');
}
}
if(ctype_digit(lineWidth)){
lineWidth = Math.max(0,lineWidth * 1);
var diff = this['opts']['lineWidth'] != lineWidth;
this['opts']['lineWidth'] = lineWidth;
if(diff){
this['fire']('changedlinewidth');
}
}
if(options['clickable'] != undefined){
this['opts']['clickable'] = !!options['clickable'];
}
}
line.prototype['intersects'] = function(value){
if(value instanceof bounds){
var
coords = this['coords']()
;
for(var i=0;i<coords['length'];++i){
if(value['isWithin'](coords[i])){
return true;
}
}
}
return false;
}
shape['line'] = line;
function circle(options){
shape['call'](this, options);
}
extend(circle, shape);
circle.prototype['options'] = function(options){
var
opts = this['opts'],
options = options || {},
coords = options['coords'],
radius = options['radius'],
strokeStyle = options['strokeStyle'],
lineWidth = options['lineWidth'],
diffPos=false,diffRadius=false,diff
;
if(coords != undefined){
coords[0] = gridPoint['fuzzy'](coords[0]);
diffPos = opts['coords'] == undefined || !pos['equals'](opts['coords'][0]);
opts['coords'] = [coords[0]];
}
if(radius != undefined){
if(typeof radius != 'number'){
throw 'radius should be specified as a number';
}else if(radius <= 0){
throw 'radius should be greater than zero';
}
diffRadius = radius != opts['radius'];
opts['radius'] = radius;
}
if(diffPos || diffRadius){
this['fire']('changedcoords');
}
if(typeof fillStyle == 'string'){
var diff = this['opts']['fillStyle'] != fillStyle;
this['opts']['fillStyle'] = fillStyle;
if(diff){
this['fire']('changedfillstyle');
}
}
if(typeof strokeStyle == 'string'){
var diff = this['opts']['strokeStyle'] != strokeStyle;
this['opts']['strokeStyle'] = strokeStyle;
if(diff){
this['fire']('changedstrokestyle');
}
}
if(typeof lineWidth == 'number'){
var diff = this['opts']['lineWidth'] != Math.max(0,lineWidth);
this['opts']['lineWidth'] = Math.max(0,lineWidth);
if(diff){
this['fire']('changedlinewidth');
}
}
if(options['clickable'] != undefined){
this['opts']['clickable'] = !!options['clickable'];
}
}
circle.prototype['radius'] = function(value){
if(value != undefined){
this['options']({'radius':value});
}
return this['opts']['radius'];
}
circle.prototype['fillStyle'] = function(value){
if(value != undefined){
this['options']({'fillStyle':value});
}
return this['opts']['fillStyle'];
}
circle.prototype['withinShape'] = function(pos){
pos = gridPoint['fuzzy'](pos);
return (this['coords']()[0] instanceof gridPoint && typeof this['radius']() == 'number') && (this['coords']()[0]['distance'](pos) <= this['radius']());
}
circle.prototype['intersects'] = function(value){
if(value instanceof bounds && this['coords']()[0] instanceof gridPoint){
if(value['isWithin'](this['coords']()[0])){
return true;
}else if(typeof this['radius']() == 'number'){
var
sw = value['sw'],
ne = value['ne'],
distanceTests = [sw,ne,{'x':sw['x'], 'y':ne['y']}, {'x':ne['x'], 'y':sw['y']}]
;
for(var i=0;i<distanceTests.length;++i){
if(this['withinShape'](distanceTests[i])){
return true;
}
}
}
}
return false;
}
shape['circle'] = circle;
})(window);<|fim▁end|> | }
if(typeof strokeStyle == 'string'){
var diff = this['opts']['strokeStyle'] != strokeStyle;
this['opts']['strokeStyle'] = strokeStyle; |
<|file_name|>hmac_plugin.py<|end_file_name|><|fim▁begin|># Copyright (C) 2014 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
import logging
import requests
from base64 import b64decode, b64encode
from bottle import request, abort
from future.moves.urllib.parse import urlparse
from ycmd import hmac_utils
from ycmd.utils import ToBytes
from ycmd.bottle_utils import SetResponseHeader
_HMAC_HEADER = 'x-ycm-hmac'
_HOST_HEADER = 'host'
# This class implements the Bottle plugin API:
# http://bottlepy.org/docs/dev/plugindev.html
#
# We want to ensure that every request coming in has a valid HMAC set in the
# x-ycm-hmac header and that every response coming out sets such a valid header.
# This is to prevent security issues with possible remote code execution.
# The x-ycm-hmac value is encoded as base64 during transport instead of sent raw
# because https://tools.ietf.org/html/rfc5987 says header values must be in the
# ISO-8859-1 character set.
class HmacPlugin( object ):
name = 'hmac'
api = 2
def __init__( self, hmac_secret ):
self._hmac_secret = hmac_secret
self._logger = logging.getLogger( __name__ )
def __call__( self, callback ):
def wrapper( *args, **kwargs ):
if not HostHeaderCorrect( request ):
self._logger.info( 'Dropping request with bad Host header.' )
abort( requests.codes.unauthorized,
'Unauthorized, received bad Host header.' )
return
body = ToBytes( request.body.read() )
if not RequestAuthenticated( request.method, request.path, body,
self._hmac_secret ):
self._logger.info( 'Dropping request with bad HMAC.' )
abort( requests.codes.unauthorized, 'Unauthorized, received bad HMAC.' )
return
body = callback( *args, **kwargs )
SetHmacHeader( body, self._hmac_secret )
return body
return wrapper
<|fim▁hole|>def HostHeaderCorrect( request ):
host = urlparse( 'http://' + request.headers[ _HOST_HEADER ] ).hostname
return host == '127.0.0.1' or host == 'localhost'
def RequestAuthenticated( method, path, body, hmac_secret ):
if _HMAC_HEADER not in request.headers:
return False
return hmac_utils.SecureBytesEqual(
hmac_utils.CreateRequestHmac(
ToBytes( method ),
ToBytes( path ),
ToBytes( body ),
ToBytes( hmac_secret ) ),
ToBytes( b64decode( request.headers[ _HMAC_HEADER ] ) ) )
def SetHmacHeader( body, hmac_secret ):
value = b64encode( hmac_utils.CreateHmac( ToBytes( body ),
ToBytes( hmac_secret ) ) )
SetResponseHeader( _HMAC_HEADER, value )<|fim▁end|> | |
<|file_name|>test_pipeline_files.py<|end_file_name|><|fim▁begin|>import os
import random
import time
import hashlib
import warnings
from tempfile import mkdtemp
from shutil import rmtree
from six.moves.urllib.parse import urlparse
from six import BytesIO
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.pipelines.files import FilesPipeline, FSFilesStore, S3FilesStore, GCSFilesStore
from scrapy.item import Item, Field
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.utils.python import to_bytes
from scrapy.utils.test import assert_aws_environ, get_s3_content_and_delete
from scrapy.utils.test import assert_gcs_environ, get_gcs_content_and_delete
from scrapy.utils.boto import is_botocore
from tests import mock
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class FilesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': self.tempdir}))
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider(None)
def tearDown(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
self.assertEqual(file_path(Request("https://dev.mydeco.com/mydeco.pdf")),
'full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(file_path(Request("http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.txt")),
'full/4ce274dd83db0368bafd7e406f382ae088e39219.txt')
self.assertEqual(file_path(Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.doc")),
'full/94ccc495a17b9ac5d40e3eabf3afcb8c2c9b9e1a.doc')
self.assertEqual(file_path(Request("http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg")),
'full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532/")),
'full/97ee6f8a46cbbb418ea91502fd24176865cf39b2')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532")),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object()),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
def test_fs_store(self):
assert isinstance(self.pipeline.store, FSFilesStore)
self.assertEqual(self.pipeline.store.basedir, self.tempdir)
path = 'some/image/key.jpg'
fullpath = os.path.join(self.tempdir, 'some', 'image', 'key.jpg')
self.assertEqual(self.pipeline.store._get_filesystem_path(path), fullpath)
@defer.inlineCallbacks
def test_file_not_expired(self):
item_url = "http://example.com/file.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True),
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc', 'last_modified': time.time()}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)])
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
@defer.inlineCallbacks
def test_file_expired(self):
item_url = "http://example.com/file2.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc',
'last_modified': time.time() - (self.pipeline.expires * 60 * 60 * 24 * 2)}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)]),
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True)
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)<|fim▁hole|> p.stop()
class FilesPipelineTestCaseFields(unittest.TestCase):
def test_item_fields_default(self):
class TestItem(Item):
name = Field()
file_urls = Field()
files = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'file_urls': [url]})
pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': 's3://example/files/'}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['files'], [results[0][1]])
def test_item_fields_override_settings(self):
class TestItem(Item):
name = Field()
files = Field()
stored_file = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'files': [url]})
pipeline = FilesPipeline.from_settings(Settings({
'FILES_STORE': 's3://example/files/',
'FILES_URLS_FIELD': 'files',
'FILES_RESULT_FIELD': 'stored_file'
}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['stored_file'], [results[0][1]])
class FilesPipelineTestCaseCustomSettings(unittest.TestCase):
default_cls_settings = {
"EXPIRES": 90,
"FILES_URLS_FIELD": "file_urls",
"FILES_RESULT_FIELD": "files"
}
file_cls_attr_settings_map = {
("EXPIRES", "FILES_EXPIRES", "expires"),
("FILES_URLS_FIELD", "FILES_URLS_FIELD", "files_urls_field"),
("FILES_RESULT_FIELD", "FILES_RESULT_FIELD", "files_result_field")
}
def setUp(self):
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
def _generate_fake_settings(self, prefix=None):
def random_string():
return "".join([chr(random.randint(97, 123)) for _ in range(10)])
settings = {
"FILES_EXPIRES": random.randint(100, 1000),
"FILES_URLS_FIELD": random_string(),
"FILES_RESULT_FIELD": random_string(),
"FILES_STORE": self.tempdir
}
if not prefix:
return settings
return {prefix.upper() + "_" + k if k != "FILES_STORE" else k: v for k, v in settings.items()}
def _generate_fake_pipeline(self):
class UserDefinedFilePipeline(FilesPipeline):
EXPIRES = 1001
FILES_URLS_FIELD = "alfa"
FILES_RESULT_FIELD = "beta"
return UserDefinedFilePipeline
def test_different_settings_for_different_instances(self):
"""
If there are different instances with different settings they should keep
different settings.
"""
custom_settings = self._generate_fake_settings()
another_pipeline = FilesPipeline.from_settings(Settings(custom_settings))
one_pipeline = FilesPipeline(self.tempdir)
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
default_value = self.default_cls_settings[pipe_attr]
self.assertEqual(getattr(one_pipeline, pipe_attr), default_value)
custom_value = custom_settings[settings_attr]
self.assertNotEqual(default_value, custom_value)
self.assertEqual(getattr(another_pipeline, pipe_ins_attr), custom_value)
def test_subclass_attributes_preserved_if_no_settings(self):
"""
If subclasses override class attributes and there are no special settings those values should be kept.
"""
pipe_cls = self._generate_fake_pipeline()
pipe = pipe_cls.from_settings(Settings({"FILES_STORE": self.tempdir}))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
custom_value = getattr(pipe, pipe_ins_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr])
self.assertEqual(getattr(pipe, pipe_ins_attr), getattr(pipe, pipe_attr))
def test_subclass_attrs_preserved_custom_settings(self):
"""
If file settings are defined but they are not defined for subclass
settings should be preserved.
"""
pipeline_cls = self._generate_fake_pipeline()
settings = self._generate_fake_settings()
pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
value = getattr(pipeline, pipe_ins_attr)
setting_value = settings.get(settings_attr)
self.assertNotEqual(value, self.default_cls_settings[pipe_attr])
self.assertEqual(value, setting_value)
def test_no_custom_settings_for_subclasses(self):
"""
If there are no settings for subclass and no subclass attributes, pipeline should use
attributes of base class.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
user_pipeline = UserDefinedFilesPipeline.from_settings(Settings({"FILES_STORE": self.tempdir}))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = self.default_cls_settings.get(pipe_attr.upper())
self.assertEqual(getattr(user_pipeline, pipe_ins_attr), custom_value)
def test_custom_settings_for_subclasses(self):
"""
If there are custom settings for subclass and NO class attributes, pipeline should use custom
settings.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
prefix = UserDefinedFilesPipeline.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = UserDefinedFilesPipeline.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr])
self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
def test_custom_settings_and_class_attrs_for_subclasses(self):
"""
If there are custom settings for subclass AND class attributes
setting keys are preferred and override attributes.
"""
pipeline_cls = self._generate_fake_pipeline()
prefix = pipeline_cls.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_cls_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_cls_attr])
self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
def test_cls_attrs_with_DEFAULT_prefix(self):
class UserDefinedFilesPipeline(FilesPipeline):
DEFAULT_FILES_RESULT_FIELD = "this"
DEFAULT_FILES_URLS_FIELD = "that"
pipeline = UserDefinedFilesPipeline.from_settings(Settings({"FILES_STORE": self.tempdir}))
self.assertEqual(pipeline.files_result_field, "this")
self.assertEqual(pipeline.files_urls_field, "that")
def test_user_defined_subclass_default_key_names(self):
"""Test situation when user defines subclass of FilesPipeline,
but uses attribute names for default pipeline (without prefixing
them with pipeline class name).
"""
settings = self._generate_fake_settings()
class UserPipe(FilesPipeline):
pass
pipeline_cls = UserPipe.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
expected_value = settings.get(settings_attr)
self.assertEqual(getattr(pipeline_cls, pipe_inst_attr),
expected_value)
class TestS3FilesStore(unittest.TestCase):
@defer.inlineCallbacks
def test_persist(self):
assert_aws_environ()
uri = os.environ.get('S3_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No S3 URI available for testing")
data = b"TestS3FilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {'foo': 'bar'}
path = ''
store = S3FilesStore(uri)
yield store.persist_file(
path, buf, info=None, meta=meta,
headers={'Content-Type': 'image/png'})
s = yield store.stat_file(path, info=None)
self.assertIn('last_modified', s)
self.assertIn('checksum', s)
self.assertEqual(s['checksum'], '3187896a9657a28163abb31667df64c8')
u = urlparse(uri)
content, key = get_s3_content_and_delete(
u.hostname, u.path[1:], with_key=True)
self.assertEqual(content, data)
if is_botocore():
self.assertEqual(key['Metadata'], {'foo': 'bar'})
self.assertEqual(
key['CacheControl'], S3FilesStore.HEADERS['Cache-Control'])
self.assertEqual(key['ContentType'], 'image/png')
else:
self.assertEqual(key.metadata, {'foo': 'bar'})
self.assertEqual(
key.cache_control, S3FilesStore.HEADERS['Cache-Control'])
self.assertEqual(key.content_type, 'image/png')
class TestGCSFilesStore(unittest.TestCase):
@defer.inlineCallbacks
def test_persist(self):
assert_gcs_environ()
uri = os.environ.get('GCS_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No GCS URI available for testing")
data = b"TestGCSFilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {'foo': 'bar'}
path = 'full/filename'
store = GCSFilesStore(uri)
store.POLICY = 'authenticatedRead'
expected_policy = {'role': 'READER', 'entity': 'allAuthenticatedUsers'}
yield store.persist_file(path, buf, info=None, meta=meta, headers=None)
s = yield store.stat_file(path, info=None)
self.assertIn('last_modified', s)
self.assertIn('checksum', s)
self.assertEqual(s['checksum'], 'zc2oVgXkbQr2EQdSdw3OPA==')
u = urlparse(uri)
content, acl, blob = get_gcs_content_and_delete(u.hostname, u.path[1:]+path)
self.assertEqual(content, data)
self.assertEqual(blob.metadata, {'foo': 'bar'})
self.assertEqual(blob.cache_control, GCSFilesStore.CACHE_CONTROL)
self.assertEqual(blob.content_type, 'application/octet-stream')
self.assertIn(expected_policy, acl)
class ItemWithFiles(Item):
file_urls = Field()
files = Field()
def _create_item_with_files(*files):
item = ItemWithFiles()
item['file_urls'] = files
return item
def _prepare_request_object(item_url):
return Request(
item_url,
meta={'response': Response(item_url, status=200, body=b'data')})
if __name__ == "__main__":
unittest.main()<|fim▁end|> | self.assertNotEqual(result['files'][0]['checksum'], 'abc')
for p in patchers: |
<|file_name|>cheese.py<|end_file_name|><|fim▁begin|>"""
Contains CheesePreprocessor
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from ...preprocessors.base import Preprocessor
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class CheesePreprocessor(Preprocessor):
"""
Adds a cheese tag to the resources object
"""
def __init__(self, **kw):
"""
Public constructor
"""
super(CheesePreprocessor, self).__init__(**kw)
def preprocess(self, nb, resources):
"""
Sphinx preprocessing to apply on each notebook.
Parameters
----------
nb : NotebookNode
Notebook being converted
resources : dictionary<|fim▁hole|> Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
"""
resources['cheese'] = 'real'
return nb, resources<|fim▁end|> | |
<|file_name|>jquery.zclip.js<|end_file_name|><|fim▁begin|>/*
* zClip :: jQuery ZeroClipboard v1.1.1
* http://steamdev.com/zclip
*
* Copyright 2011, SteamDev
* Released under the MIT license.
* http://www.opensource.org/licenses/mit-license.php
*
* Date: Wed Jun 01, 2011
*/
(function ($) {
$.fn.zclip = function (params) {
if (typeof params == "object" && !params.length) {
var settings = $.extend({
path: 'ZeroClipboard.swf',
copy: null,
beforeCopy: null,
afterCopy: null,
clickAfter: true,
setHandCursor: true,
setCSSEffects: true
}, params);
return this.each(function () {
var o = $(this);
if (o.is(':visible') && (typeof settings.copy == 'string' || $.isFunction(settings.copy))) {
ZeroClipboard.setMoviePath(settings.path);
var clip = new ZeroClipboard.Client();
if($.isFunction(settings.copy)){
o.bind('zClip_copy',settings.copy);
}
if($.isFunction(settings.beforeCopy)){
o.bind('zClip_beforeCopy',settings.beforeCopy);
}
if($.isFunction(settings.afterCopy)){
o.bind('zClip_afterCopy',settings.afterCopy);
}
clip.setHandCursor(settings.setHandCursor);
clip.setCSSEffects(settings.setCSSEffects);
clip.addEventListener('mouseOver', function (client) {
o.trigger('mouseenter');
});
clip.addEventListener('mouseOut', function (client) {
o.trigger('mouseleave');
});
clip.addEventListener('mouseDown', function (client) {
o.trigger('mousedown');
if(!$.isFunction(settings.copy)){
clip.setText(settings.copy);
} else {
clip.setText(o.triggerHandler('zClip_copy'));
}
if ($.isFunction(settings.beforeCopy)) {
o.trigger('zClip_beforeCopy');
}
});
clip.addEventListener('complete', function (client, text) {
if ($.isFunction(settings.afterCopy)) {
o.trigger('zClip_afterCopy');
} else {
if (text.length > 500) {
text = text.substr(0, 500) + "...\n\n(" + (text.length - 500) + " characters not shown)";
}
o.removeClass('hover');
alert("Copied text to clipboard:\n\n " + text);
}
if (settings.clickAfter) {
o.trigger('click');
}
});
clip.glue(o[0], o.parent()[0]);
$(window).bind('load resize',function(){clip.reposition();});
}
});
} else if (typeof params == "string") {
return this.each(function () {
var o = $(this);
params = params.toLowerCase();
var zclipId = o.data('zclipId');
var clipElm = $('#' + zclipId + '.zclip');
if (params == "remove") {
clipElm.remove();
o.removeClass('active hover');
} else if (params == "hide") {
clipElm.hide();
o.removeClass('active hover');
} else if (params == "show") {
clipElm.show();
}
});
}
}
})(jQuery);
// ZeroClipboard
// Simple Set Clipboard System
// Author: Joseph Huckaby
var ZeroClipboard = {
version: "1.0.7",
clients: {},
// registered upload clients on page, indexed by id
moviePath: 'ZeroClipboard.swf',
// URL to movie
nextId: 1,
// ID of next movie
$: function (thingy) {
// simple DOM lookup utility function
if (typeof(thingy) == 'string') thingy = document.getElementById(thingy);
if (!thingy.addClass) {
// extend element with a few useful methods
thingy.hide = function () {
this.style.display = 'none';
};
thingy.show = function () {
this.style.display = '';
};
thingy.addClass = function (name) {
this.removeClass(name);
this.className += ' ' + name;
};
thingy.removeClass = function (name) {
var classes = this.className.split(/\s+/);
var idx = -1;
for (var k = 0; k < classes.length; k++) {
if (classes[k] == name) {
idx = k;
k = classes.length;
}
}
if (idx > -1) {
classes.splice(idx, 1);
this.className = classes.join(' ');
}
return this;
};
thingy.hasClass = function (name) {
return !!this.className.match(new RegExp("\\s*" + name + "\\s*"));
};
}
return thingy;
},
setMoviePath: function (path) {
// set path to ZeroClipboard.swf
this.moviePath = path;
},
dispatch: function (id, eventName, args) {
// receive event from flash movie, send to client
var client = this.clients[id];
if (client) {
client.receiveEvent(eventName, args);
}
},
register: function (id, client) {
// register new client to receive events
this.clients[id] = client;
},
getDOMObjectPosition: function (obj, stopObj) {
// get absolute coordinates for dom element
var info = {
left: 0,
top: 0,
width: obj.width ? obj.width : obj.offsetWidth,
height: obj.height ? obj.height : obj.offsetHeight
};
if (obj && (obj != stopObj)) {
info.left += obj.offsetLeft;
info.top += obj.offsetTop;
}
return info;
},
Client: function (elem) {
// constructor for new simple upload client
this.handlers = {};
// unique ID
this.id = ZeroClipboard.nextId++;
this.movieId = 'ZeroClipboardMovie_' + this.id;
// register client with singleton to receive flash events
ZeroClipboard.register(this.id, this);
// create movie
if (elem) this.glue(elem);
}
};
ZeroClipboard.Client.prototype = {
id: 0,
// unique ID for us
ready: false,
// whether movie is ready to receive events or not
movie: null,
// reference to movie object
clipText: '',
// text to copy to clipboard
handCursorEnabled: true,
// whether to show hand cursor, or default pointer cursor
cssEffects: true,
// enable CSS mouse effects on dom container
handlers: null,
// user event handlers
glue: function (elem, appendElem, stylesToAdd) {
// glue to DOM element
// elem can be ID or actual DOM element object
this.domElement = ZeroClipboard.$(elem);
// float just above object, or zIndex 99 if dom element isn't set
var zIndex = 99;
if (this.domElement.style.zIndex) {
<|fim▁hole|>
if (typeof(appendElem) == 'string') {
appendElem = ZeroClipboard.$(appendElem);
} else if (typeof(appendElem) == 'undefined') {
appendElem = document.getElementsByTagName('body')[0];
}
// find X/Y position of domElement
var box = ZeroClipboard.getDOMObjectPosition(this.domElement, appendElem);
// create floating DIV above element
this.div = document.createElement('div');
this.div.className = "zclip";
this.div.id = "zclip-" + this.movieId;
$(this.domElement).data('zclipId', 'zclip-' + this.movieId);
var style = this.div.style;
style.position = 'absolute';
style.left = '' + box.left + 'px';
style.top = '' + box.top + 'px';
style.width = '' + box.width + 'px';
style.height = '' + box.height + 'px';
style.zIndex = zIndex;
if (typeof(stylesToAdd) == 'object') {
for (addedStyle in stylesToAdd) {
style[addedStyle] = stylesToAdd[addedStyle];
}
}
// style.backgroundColor = '#f00'; // debug
appendElem.appendChild(this.div);
this.div.innerHTML = this.getHTML(box.width, box.height);
},
getHTML: function (width, height) {
// return HTML for movie
var html = '';
var flashvars = 'id=' + this.id + '&width=' + width + '&height=' + height;
if (navigator.userAgent.match(/MSIE/)) {
// IE gets an OBJECT tag
var protocol = location.href.match(/^https/i) ? 'https://' : 'http://';
html += '<object classid="clsid:d27cdb6e-ae6d-11cf-96b8-444553540000" codebase="' + protocol + 'download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=9,0,0,0" width="' + width + '" height="' + height + '" id="' + this.movieId + '" align="middle"><param name="allowScriptAccess" value="always" /><param name="allowFullScreen" value="false" /><param name="movie" value="' + ZeroClipboard.moviePath + '" /><param name="loop" value="false" /><param name="menu" value="false" /><param name="quality" value="best" /><param name="bgcolor" value="#ffffff" /><param name="flashvars" value="' + flashvars + '"/><param name="wmode" value="transparent"/></object>';
} else {
// all other browsers get an EMBED tag
html += '<embed id="' + this.movieId + '" src="' + ZeroClipboard.moviePath + '" loop="false" menu="false" quality="best" bgcolor="#ffffff" width="' + width + '" height="' + height + '" name="' + this.movieId + '" align="middle" allowScriptAccess="always" allowFullScreen="false" type="application/x-shockwave-flash" pluginspage="http://www.macromedia.com/go/getflashplayer" flashvars="' + flashvars + '" wmode="transparent" />';
}
return html;
},
hide: function () {
// temporarily hide floater offscreen
if (this.div) {
this.div.style.left = '-2000px';
}
},
show: function () {
// show ourselves after a call to hide()
this.reposition();
},
destroy: function () {
// destroy control and floater
if (this.domElement && this.div) {
this.hide();
this.div.innerHTML = '';
var body = document.getElementsByTagName('body')[0];
try {
body.removeChild(this.div);
} catch (e) {;
}
this.domElement = null;
this.div = null;
}
},
reposition: function (elem) {
// reposition our floating div, optionally to new container
// warning: container CANNOT change size, only position
if (elem) {
this.domElement = ZeroClipboard.$(elem);
if (!this.domElement) this.hide();
}
if (this.domElement && this.div) {
var box = ZeroClipboard.getDOMObjectPosition(this.domElement);
var style = this.div.style;
style.left = '' + box.left + 'px';
style.top = '' + box.top + 'px';
}
},
setText: function (newText) {
// set text to be copied to clipboard
this.clipText = newText;
if (this.ready) {
this.movie.setText(newText);
}
},
addEventListener: function (eventName, func) {
// add user event listener for event
// event types: load, queueStart, fileStart, fileComplete, queueComplete, progress, error, cancel
eventName = eventName.toString().toLowerCase().replace(/^on/, '');
if (!this.handlers[eventName]) {
this.handlers[eventName] = [];
}
this.handlers[eventName].push(func);
},
setHandCursor: function (enabled) {
// enable hand cursor (true), or default arrow cursor (false)
this.handCursorEnabled = enabled;
if (this.ready) {
this.movie.setHandCursor(enabled);
}
},
setCSSEffects: function (enabled) {
// enable or disable CSS effects on DOM container
this.cssEffects = !! enabled;
},
receiveEvent: function (eventName, args) {
// receive event from flash
eventName = eventName.toString().toLowerCase().replace(/^on/, '');
// special behavior for certain events
switch (eventName) {
case 'load':
// movie claims it is ready, but in IE this isn't always the case...
// bug fix: Cannot extend EMBED DOM elements in Firefox, must use traditional function
this.movie = document.getElementById(this.movieId);
if (!this.movie) {
var self = this;
setTimeout(function () {
self.receiveEvent('load', null);
}, 1);
return;
}
// firefox on pc needs a "kick" in order to set these in certain cases
if (!this.ready && navigator.userAgent.match(/Firefox/) && navigator.userAgent.match(/Windows/)) {
var self = this;
setTimeout(function () {
self.receiveEvent('load', null);
}, 100);
this.ready = true;
return;
}
this.ready = true;
try {
this.movie.setText(this.clipText);
} catch (e) {}
try {
this.movie.setHandCursor(this.handCursorEnabled);
} catch (e) {}
break;
case 'mouseover':
if (this.domElement && this.cssEffects) {
this.domElement.addClass('hover');
if (this.recoverActive) {
this.domElement.addClass('active');
}
}
break;
case 'mouseout':
if (this.domElement && this.cssEffects) {
this.recoverActive = false;
if (this.domElement.hasClass('active')) {
this.domElement.removeClass('active');
this.recoverActive = true;
}
this.domElement.removeClass('hover');
}
break;
case 'mousedown':
if (this.domElement && this.cssEffects) {
this.domElement.addClass('active');
}
break;
case 'mouseup':
if (this.domElement && this.cssEffects) {
this.domElement.removeClass('active');
this.recoverActive = false;
}
break;
} // switch eventName
if (this.handlers[eventName]) {
for (var idx = 0, len = this.handlers[eventName].length; idx < len; idx++) {
var func = this.handlers[eventName][idx];
if (typeof(func) == 'function') {
// actual function reference
func(this, args);
} else if ((typeof(func) == 'object') && (func.length == 2)) {
// PHP style object + method, i.e. [myObject, 'myMethod']
func[0][func[1]](this, args);
} else if (typeof(func) == 'string') {
// name of function
window[func](this, args);
}
} // foreach event handler defined
} // user defined handler for event
}
};<|fim▁end|> | zIndex = parseInt(this.domElement.style.zIndex, 10) + 1;
}
|
<|file_name|>NodeListTest.java<|end_file_name|><|fim▁begin|>package ru.job4j.pro.collections.list;
import org.junit.Test;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* NodeListTest class.
*
* @author Vladimir Ivanov
* @version 0.1
* @since 30.08.2017
*/
public class NodeListTest {
/**
* Test list is not cycled.
*/
@Test
public void whenListIsNotCycledThenGetFalse() {
Node<Integer> first = new Node<>(1);
Node<Integer> two = new Node<>(2);
Node<Integer> third = new Node<>(3);
Node<Integer> four = new Node<>(4);
Node<Integer> five = new Node<>(5);
first.next = two;
two.next = third;
third.next = four;
four.next = five;
NodeList<Integer> list = new NodeList<>(first);
boolean result = list.hasCycle();
assertThat(result, is(false));
}<|fim▁hole|> /**
* Test list is cycled.
*/
@Test
public void whenListIsCycledThenGetTrue() {
Node<Integer> first = new Node<>(1);
Node<Integer> two = new Node<>(2);
Node<Integer> third = new Node<>(3);
Node<Integer> four = new Node<>(4);
first.next = two;
two.next = third;
third.next = four;
four.next = first;
NodeList<Integer> list = new NodeList<>(first);
boolean result = list.hasCycle();
assertThat(result, is(true));
}
/**
* Test list is cycled.
*/
@Test
public void whenBigListIsCycledThenGetTrue() {
Node<Integer> node = new Node<>(0);
Node<Integer> cycleFrom = null;
Node<Integer> cycleTo = null;
NodeList<Integer> list = new NodeList<>(node);
for (int value = 1; value < 10000000; value++) {
node.next = new Node<>(value);
node = node.next;
if (value == 900000) {
cycleTo = node;
} else if (value == 9990000) {
cycleFrom = node;
}
}
cycleFrom.next = cycleTo;
boolean result = list.hasCycle();
assertThat(result, is(true));
}
}<|fim▁end|> | |
<|file_name|>map.py<|end_file_name|><|fim▁begin|>import json
import math
__author__ = 'apostol3'
class Map:
def __init__(self, w, h):
self.max_time = 120
self.size = (w, h)
self.walls = []
self.headline = []
self.cars = []
self.finish = []
self.objects = []
self.car_size = (1.8/2, 4.6/2)
def start_new_wall(self):
self.walls.append([])
def append_wall_point(self, x, y):
if x > self.size[0] or y > self.size[1]:
self.start_new_wall()
return
self.walls[-1].append((x, y))
def append_headline_point(self, x, y):
if x > self.size[0] or y > self.size[1]:
return
self.headline.append((x, y))
def create_car(self, x, y):
self.cars.append((x, y, 3 * math.pi / 2))
def append_finish_point(self, x, y):
if x > self.size[0] or y > self.size[1]:
self.finish.clear()
if len(self.finish) < 2:
self.finish.append((x, y))
else:
self.finish = [(x, y)]
@staticmethod
def open_from_file(file):
f = open(file, 'r')
doc = json.load(f)
f.close()
size = doc['size']
map = Map(*size)<|fim▁hole|> map.cars = doc['cars']
return map
def save_to_file(self, file):
filename = open(file, 'w')
doc = {'size': self.size, 'max_time': self.max_time, 'finish': self.finish,
'walls': self.walls, 'headline': self.headline, 'cars': self.cars}
if len(doc['walls']) != 0 and len(doc['walls'][-1]) == 0:
doc['walls'].pop()
out_inf = json.dumps(doc, indent=4)
filename.write(out_inf)
filename.close()<|fim▁end|> | map.max_time = doc['max_time']
map.walls = doc['walls']
map.finish = doc['finish']
map.headline = doc['headline'] |
<|file_name|>util.py<|end_file_name|><|fim▁begin|>import json
from dateutil import parser as datetime_parser<|fim▁hole|>
from occam.app import get_redis
from occam.runtime import OCCAM_SERVER_CONFIG_KEY
def get_servers():
redis = get_redis()
servers = json.loads(redis.get(OCCAM_SERVER_CONFIG_KEY))
return servers.items()
def iterate_servers():
redis = get_redis()
servers = json.loads(redis.get(OCCAM_SERVER_CONFIG_KEY))
for server_name, server_location in servers.iteritems():
yield server_name, server_location
def sorted_by_time_element(l, element_getter=None):
if not element_getter:
element_getter = lambda x: x
key_getter = lambda x: datetime_parser.parse(element_getter(x))
return sorted(l, key=key_getter)<|fim▁end|> | |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|><|fim▁hole|># 2015 by Pablo Martín <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from testing.unusual_fields.models import UnusualModel
class UnusualModelAdmin(admin.ModelAdmin):
pass
class ResourceAdmin(admin.ModelAdmin):
pass
admin.site.register(UnusualModel, UnusualModelAdmin)<|fim▁end|> | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2013 by Yaco Sistemas <[email protected]> |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import render
def index(request):<|fim▁hole|><|fim▁end|> | return render(request, 'first.html') |
<|file_name|>AWSInterface.java<|end_file_name|><|fim▁begin|>package edu.purdue.eaps.weatherpipe;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Date;
import java.util.List;
import java.util.TimeZone;
import java.lang.System;
import java.lang.Runtime;
import org.apache.commons.io.FileUtils;
import org.apache.log4j.PropertyConfigurator;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.profile.ProfileCredentialsProvider;
import com.amazonaws.event.ProgressEvent;
import com.amazonaws.event.ProgressListener;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce;
import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient;
import com.amazonaws.services.elasticmapreduce.model.Cluster;
import com.amazonaws.services.elasticmapreduce.model.DescribeClusterRequest;
import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult;
import com.amazonaws.services.elasticmapreduce.model.HadoopJarStepConfig;
import com.amazonaws.services.elasticmapreduce.model.JobFlowInstancesConfig;
import com.amazonaws.services.elasticmapreduce.model.RunJobFlowRequest;
import com.amazonaws.services.elasticmapreduce.model.RunJobFlowResult;
import com.amazonaws.services.elasticmapreduce.model.StepConfig;
import com.amazonaws.services.elasticmapreduce.model.TerminateJobFlowsRequest;
import com.amazonaws.services.identitymanagement.AmazonIdentityManagementClient;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.CreateBucketRequest;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.HeadBucketRequest;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.transfer.Download;
import com.amazonaws.services.s3.transfer.MultipleFileDownload;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.Upload;
public class AWSInterface extends MapReduceInterface {
private String jobBucketNamePrefix = "weatherpipe";
private AmazonElasticMapReduce emrClient;
private AmazonS3 s3client;
private TransferManager transMan;
private Region region;
private String jobSetupDirName;
private String jobLogDirName;
//private String defaultInstance = "c3.xlarge";
private String jobBucketName;
private String jobID;
private int bytesTransfered = 0;
public AWSInterface(String job, String bucket){
String weatherPipeBinaryPath = WeatherPipe.class.getProtectionDomain().getCodeSource().getLocation().getPath();
String log4jConfPath = weatherPipeBinaryPath.substring(0, weatherPipeBinaryPath.lastIndexOf("/")) + "/log4j.properties";
PropertyConfigurator.configure(log4jConfPath);
jobBucketName = bucket;
AwsBootstrap(job);
}
private void AwsBootstrap(String job) {
AWSCredentials credentials;
ClientConfiguration conf;
String userID;
MessageDigest md = null;
byte[] shaHash;
StringBuffer hexSha;
DateFormat df;
TimeZone tz;
String isoDate;
File jobDir;
File jobSetupDir;
File jobLogDir;
int i;
conf = new ClientConfiguration();
// 2 minute timeout
conf.setConnectionTimeout(120000);
credentials = new ProfileCredentialsProvider("default").getCredentials();
// TODO: add better credential searching later
region = Region.getRegion(Regions.US_EAST_1);
s3client = new AmazonS3Client(credentials, conf);
s3client.setRegion(region);
<|fim▁hole|>
emrClient = new AmazonElasticMapReduceClient(credentials, conf);
emrClient.setRegion(region);
if(jobBucketName == null) {
userID = new AmazonIdentityManagementClient(credentials).getUser().getUser().getUserId();
try {
md = MessageDigest.getInstance("SHA-256");
md.update(userID.getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (NoSuchAlgorithmException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
shaHash = md.digest();
hexSha = new StringBuffer();
for(byte b : shaHash) {
hexSha.append(String.format("%02X", b));
}
jobBucketName = jobBucketNamePrefix + "." + hexSha;
if(jobBucketName.length() > 63) {
jobBucketName = jobBucketName.substring(0,62);
}
}
jobBucketName = jobBucketName.toLowerCase();
if(job == null) {
tz = TimeZone.getTimeZone("UTC");
df = new SimpleDateFormat("yyyy-MM-dd'T'HH.mm");
df.setTimeZone(tz);
isoDate = df.format(new Date());
jobID = isoDate + "." + Calendar.getInstance().get(Calendar.MILLISECOND);
// UUID Code if date isn't good
// jobID = UUID.randomUUID().toString();
} else {
jobID = job;
}
jobDirName = "WeatherPipeJob" + jobID;
jobDir = new File(jobDirName);
i = 0;
while(jobDir.exists()) {
i++;
jobDirName = jobDirName + "-" + i;
jobDir = new File(jobDirName);
}
jobDir.mkdir();
jobSetupDirName = jobDirName + "/" + "job_setup";
jobSetupDir = new File(jobSetupDirName);
jobSetupDir.mkdir();
jobLogDirName = jobDirName + "/" + "logs";
jobLogDir = new File(jobLogDirName);
jobLogDir.mkdir();
}
private void UploadFileToS3(String jobBucketName, String key, File file) {
Upload upload;
PutObjectRequest request;
request = new PutObjectRequest(
jobBucketName, key, file);
bytesTransfered = 0;
// Subscribe to the event and provide event handler.
request.setGeneralProgressListener(new ProgressListener() {
@Override
public void progressChanged(ProgressEvent progressEvent) {
bytesTransfered += progressEvent.getBytesTransferred();
}
});
System.out.println();
upload = transMan.upload(request);
while(!upload.isDone()) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
continue;
}
System.out.print("\rTransfered: " + bytesTransfered/1024 + "K / " + file.length()/1024 + "K");
}
// If we got an error the count could be off
System.out.print("\rTransfered: " + bytesTransfered/1024 + "K / " + bytesTransfered/1024 + "K");
System.out.println();
System.out.println("Transfer Complete");
}
public String FindOrCreateWeatherPipeJobDirectory() {
String bucketLocation = null;
try {
if(!(s3client.doesBucketExist(jobBucketName))) {
// Note that CreateBucketRequest does not specify region. So bucket is
// created in the region specified in the client.
s3client.createBucket(new CreateBucketRequest(
jobBucketName));
} else {
s3client.headBucket(new HeadBucketRequest(jobBucketName));
}
bucketLocation = "s3n://" + jobBucketName + "/";
} catch (AmazonServiceException ase) {
if(ase.getStatusCode() == 403) {
System.out.println("You do not have propper permissions to access " + jobBucketName +
". S3 uses a global name space, please make sure you are using a unique bucket name.");
System.exit(1);
} else {
System.out.println("Caught an AmazonServiceException, which " +
"means your request made it " +
"to Amazon S3, but was rejected with an error response" +
" for some reason.");
System.out.println("Error Message: " + ase.getMessage());
System.out.println("HTTP Status Code: " + ase.getStatusCode());
System.out.println("AWS Error Code: " + ase.getErrorCode());
System.out.println("Error Type: " + ase.getErrorType());
System.out.println("Request ID: " + ase.getRequestId());
}
System.exit(1);
} catch (AmazonClientException ace) {
System.out.println("Caught an AmazonClientException, which " +
"means the client encountered " +
"an internal error while trying to " +
"communicate with S3, " +
"such as not being able to access the network.");
System.out.println("Error Message: " + ace.getMessage());
System.exit(1);
}
return bucketLocation;
}
public String UploadInputFileList(ArrayList<String> fileList, String dataDirName) {
String key = jobID + "_input";
String uploadFileString = "";
PrintWriter inputFile = null;
File file = new File(jobSetupDirName + "/" + key);
for (String s : fileList) uploadFileString += dataDirName + " " + s + "\n";
try {
inputFile = new PrintWriter(file);
inputFile.print(uploadFileString);
inputFile.close();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
System.exit(1);
}
UploadFileToS3(jobBucketName, key, file);
return "s3n://" + jobBucketName + "/" + key;
}
public String UploadMPJarFile(String fileLocation) {
String key = jobID + "WeatherPipeMapreduce.jar";
File jarFile = new File(fileLocation);
UploadFileToS3(jobBucketName, key, jarFile);
try {
FileUtils.copyFile(new File(fileLocation), new File(jobSetupDirName + "/" + key));
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
System.exit(1);
}
return "s3n://" + jobBucketName + "/" + key;
}
public void CreateMRJob(String jobInputLocation, String jobJarLocation, int numInstances, String instanceType) {
// Modified from https://mpouttuclarke.wordpress.com/2011/06/24/how-to-run-an-elastic-mapreduce-job-using-the-java-sdk/
// first run aws emr create-default-roles
String hadoopVersion = "2.4.0";
String flowName = "WeatherPipe_" + jobID;
String logS3Location = "s3n://" + jobBucketName + "/" + jobID + ".log";
String outS3Location = "s3n://" + jobBucketName + "/" + jobID + "_output";
String[] arguments = new String[] {jobInputLocation, outS3Location};
List<String> jobArguments = Arrays.asList(arguments);
DescribeClusterRequest describeClusterRequest = new DescribeClusterRequest();
DescribeClusterResult describeClusterResult;
File rawOutputFile = new File(jobDirName + "/" + jobID + "_raw_map_reduce_output");
File localLogDir = new File(jobLogDirName);
int normalized_hours;
double cost;
long startTimeOfProgram, endTimeOfProgram, elapsedTime;
final String resultId;
String line, lastStateMsg;
StringBuilder jobOutputBuild;
int i;
Download download;
int fileLength;
BufferedReader lineRead;
MultipleFileDownload logDirDownload;
startTimeOfProgram = System.currentTimeMillis();
if(instanceType == null) {
instanceType = "c3.xlarge";
System.out.println("Instance type is set to default: " + instanceType);
System.out.println();
}
try {
// Configure instances to use
JobFlowInstancesConfig instances = new JobFlowInstancesConfig();
System.out.println("Using EMR Hadoop v" + hadoopVersion);
instances.setHadoopVersion(hadoopVersion);
System.out.println("Using instance count: " + numInstances);
instances.setInstanceCount(numInstances);
System.out.println("Using master instance type: " + instanceType);
instances.setMasterInstanceType("c3.xlarge");
// do these need to be different??
System.out.println("Using slave instance type: " + instanceType);
instances.setSlaveInstanceType(instanceType);
// Configure the job flow
System.out.println("Configuring flow: " + flowName);
RunJobFlowRequest request = new RunJobFlowRequest(flowName, instances);
System.out.println("\tusing log URI: " + logS3Location);
request.setLogUri(logS3Location);
request.setServiceRole("EMR_DefaultRole");
request.setAmiVersion("3.1.0");
// this may change for some people
request.setJobFlowRole("EMR_EC2_DefaultRole");
System.out.println("\tusing jar URI: " + jobJarLocation);
HadoopJarStepConfig jarConfig = new HadoopJarStepConfig(jobJarLocation);
System.out.println("\tusing args: " + jobArguments);
jarConfig.setArgs(jobArguments);
StepConfig stepConfig =
new StepConfig(jobJarLocation.substring(jobJarLocation.indexOf('/') + 1),
jarConfig);
request.setSteps(Arrays.asList(new StepConfig[] { stepConfig }));
System.out.println("Configured hadoop jar succesfully!\n");
//Run the job flow
RunJobFlowResult result = emrClient.runJobFlow(request);
System.out.println("Trying to run job flow!\n");
describeClusterRequest.setClusterId(result.getJobFlowId());
resultId = result.getJobFlowId();
//Check the status of the running job
String lastState = "";
Runtime.getRuntime().addShutdownHook(new Thread() {public void run()
{ List<String> jobIds = new ArrayList<String>();
jobIds.add(resultId);
TerminateJobFlowsRequest tjfr = new TerminateJobFlowsRequest(jobIds);
emrClient.terminateJobFlows(tjfr);
System.out.println();
System.out.println("Amazon EMR job shutdown");
}});
while (true)
{
describeClusterResult = emrClient.describeCluster(describeClusterRequest);
Cluster cluster = describeClusterResult.getCluster();
lastState = cluster.getStatus().getState();
lastStateMsg = "\rCurrent State of Cluster: " + lastState;
System.out.print(lastStateMsg + " ");
if(!lastState.startsWith("TERMINATED")) {
lastStateMsg = lastStateMsg + " ";
for(i = 0; i < 10; i++) {
lastStateMsg = lastStateMsg + ".";
System.out.print(lastStateMsg);
Thread.sleep(1000);
}
continue;
} else {
lastStateMsg = lastStateMsg + " ";
System.out.print(lastStateMsg);
}
// it reaches here when the emr has "terminated"
normalized_hours = cluster.getNormalizedInstanceHours();
cost = normalized_hours * 0.011;
endTimeOfProgram = System.currentTimeMillis(); // returns milliseconds
elapsedTime = (endTimeOfProgram - startTimeOfProgram)/(1000);
logDirDownload = transMan.downloadDirectory(jobBucketName, jobID + ".log", localLogDir);
while(!logDirDownload.isDone()) {
Thread.sleep(1000);
}
System.out.println();
if(!lastState.endsWith("ERRORS")) {
bytesTransfered = 0;
fileLength = (int)s3client.getObjectMetadata(jobBucketName, jobID + "_output" + "/part-r-00000").getContentLength();
GetObjectRequest fileRequest = new GetObjectRequest(jobBucketName, jobID + "_output" + "/part-r-00000");
fileRequest.setGeneralProgressListener(new ProgressListener() {
@Override
public void progressChanged(ProgressEvent progressEvent) {
bytesTransfered += progressEvent.getBytesTransferred();
}
});
download = transMan.download(new GetObjectRequest(jobBucketName, jobID + "_output" + "/part-r-00000"), rawOutputFile);
System.out.println("Downloading Output");
while(!download.isDone()) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
continue;
}
// System.out.print("\rTransfered: " + bytesTransfered/1024 + "K / " + fileLength/1024 + "K ");
}
/* Printing this stuff isn't working
// If we got an error the count could be off
System.out.print("\rTransfered: " + bytesTransfered/1024 + "K / " + bytesTransfered/1024 + "K ");
System.out.println();
*/
System.out.println("Transfer Complete");
System.out.println("The job has ended and output has been downloaded to " + jobDirName);
System.out.printf("Normalized instance hours: %d\n", normalized_hours);
System.out.printf("Approximate cost of this run: $%2.02f\n", cost);
System.out.println("The job took " + elapsedTime + " seconds to finish" );
lineRead = new BufferedReader(new FileReader(rawOutputFile));
jobOutputBuild = new StringBuilder("");
while((line = lineRead.readLine()) != null) {
if(line.startsWith("Run#")) {
jobOutputBuild = new StringBuilder("");
jobOutputBuild.append(line.split("\t")[1]);
} else {
jobOutputBuild.append("\n");
jobOutputBuild.append(line);
}
}
jobOutput = jobOutputBuild.toString();
break;
}
jobOutput = "FAILED";
System.out.println("The job has ended with errors, please check the log in " + localLogDir);
System.out.printf("Normalized instance hours: %d\n", normalized_hours);
System.out.printf("Approximate cost of this run = $%2.02f\n", cost);
System.out.println("The job took " + elapsedTime + " seconds to finish" );
break;
}
} catch (AmazonServiceException ase) {
System.out.println("Caught Exception: " + ase.getMessage());
System.out.println("Reponse Status Code: " + ase.getStatusCode());
System.out.println("Error Code: " + ase.getErrorCode());
System.out.println("Request ID: " + ase.getRequestId());
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public void addJobBucketName (String jobBucketName){
this.jobBucketName = jobBucketName;
}
protected void close() {
transMan.shutdownNow();
}
}<|fim▁end|> | transMan = new TransferManager(s3client); |
<|file_name|>form.element.view.hidden.js<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2011-2013 Lp digital system
*
* This file is part of BackBee.
*
* BackBee is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* BackBee is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with BackBee. If not, see <http://www.gnu.org/licenses/>.
*/
define(['Core', 'Core/Renderer', 'BackBone'], function (Core, Renderer, Backbone) {
'use strict';
var HiddenView = Backbone.View.extend({
initialize: function (template, formTag, element) {
this.el = formTag;
this.template = template;
this.element = element;
this.bindEvents();
},
bindEvents: function () {
var self = this;
Core.Mediator.subscribe('before:form:submit', function (form) {
if (form.attr('id') === self.el) {
var element = form.find('.element_' + self.element.getKey()),
input = element.find('input[name="' + self.element.getKey() + '"]'),
span = element.find('span.updated'),
oldValue = self.element.value;
if (input.val() !== oldValue) {
span.text('updated');
} else {
span.text('');
}
}
});
},
/**
* Render the template into the DOM with the Renderer
* @returns {String} html
*/
render: function () {
return Renderer.render(this.template, {element: this.element});<|fim▁hole|>
return HiddenView;
});<|fim▁end|> | }
}); |
<|file_name|>summarize.py<|end_file_name|><|fim▁begin|>import numpy as np
import re
import sys
import operator
import matplotlib.pyplot as plt
def parse_transposes(fn):
size = re.compile('(\d+) x (\d+)')
tp = re.compile('Throughput: ([\d\.]+) GB')<|fim▁hole|> for l in f:
s = size.search(l)
if s:
sizes.append((int(s.group(1)), int(s.group(2))))
else:
t = tp.search(l)
if t:
tps.append(float(t.group(1)))
return sizes, tps
def top_n(kv, n=5):
return sorted(kv, reverse=True, key=operator.itemgetter(0))[:n]
if __name__ == '__main__':
sizes, tps = parse_transposes(sys.argv[1])
np.savez(sys.argv[1], sizes=sizes, tps=tps)
print("Median throughput: %s GB/s" % np.median(tps))
print("Max throughputs:")
for tp, size in top_n(zip(tps, sizes)):
print(" %s GB/s, at dimension %s" % (tp, size))
fig = plt.figure()
ax = fig.add_subplot(111)
n, bins, patches = ax.hist(tps, 50, label=sys.argv[1])
ax.set_xlabel('GB/s')
ax.set_title("Skinny Matrix Transpose Throughput")
ax.legend()
plt.show()<|fim▁end|> | sizes = []
tps = []
with open(fn, 'r') as f: |
<|file_name|>main.js<|end_file_name|><|fim▁begin|>var dropzoneOverlay = document.querySelector('.dropzone-overlay');
function getDataTransferFiles(event) {
var dataTransferItemsList = [];
if (event.dataTransfer) {
var dt = event.dataTransfer;
if (dt.files && dt.files.length) {
dataTransferItemsList = dt.files;
} else if (dt.items && dt.items.length) {
// During the drag even the dataTransfer.files is null
// but Chrome implements some drag store, which is accesible via dataTransfer.items
dataTransferItemsList = dt.items;
}
} else if (event.target && event.target.files) {
dataTransferItemsList = event.target.files;
}
if (dataTransferItemsList.length > 0) {<|fim▁hole|> dataTransferItemsList = [dataTransferItemsList[0]];
}
// Convert from DataTransferItemsList to the native Array
return Array.prototype.slice.call(dataTransferItemsList);
}
function showDragFocus() {
dropzoneOverlay.className = 'dropzone-overlay active';
}
function hideDragFocus() {
dropzoneOverlay.className = 'dropzone-overlay';
}
function onFileDragEnter(ev) {
ev.preventDefault();
showDragFocus();
}
function onFileDragOver(ev) {
ev.preventDefault();
}
function onFileDrop(ev) {
ev.preventDefault();
hideDragFocus();
var fileList = getDataTransferFiles(ev);
updateStickerImage(fileList[0]);
return null;
}
function onFileDragLeave(ev) {
ev.preventDefault();
console.log(ev.target)
if (ev.target !== document.body) {
return;
}
hideDragFocus();
}
function drawImage(canvas, imageBitmap) {
var ctx = canvas.getContext('2d');
ctx.drawImage(file, 0, 0);
}
function updateStickerImage(file) {
var reader = new FileReader();
reader.onload = function(ev) {
var dataURL = ev.target.result;
document.querySelectorAll('.sticker-img').forEach(function(img) {
img.style = 'background-image: url(' + dataURL + ')';
});
}
reader.readAsDataURL(file);
}
document.body.ondragenter = onFileDragEnter;
document.body.ondragover = onFileDragOver;
document.body.ondragleave = onFileDragLeave;
document.body.ondrop = onFileDrop;<|fim▁end|> | |
<|file_name|>pep8.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012-2013 Paul Tagliamonte <[email protected]>
# Copyright (c) 2013 Leo Cavaille <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a<|fim▁hole|># and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from debile.slave.wrappers.pep8 import parse_pep8
from debile.slave.utils import cd
from debile.utils.commands import run_command
def pep8(dsc, analysis):
run_command(["dpkg-source", "-x", dsc, "source-pep8"])
with cd('source-pep8'):
out, _, ret = run_command(['pep8', '.'])
failed = ret != 0
for issue in parse_pep8(out.splitlines()):
analysis.results.append(issue)
return (analysis, out, failed, None, None)
def version():
out, _, ret = run_command(['pep8', '--version'])
if ret != 0:
raise Exception("pep8 is not installed")
return ('pep8', out.strip())<|fim▁end|> | # copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, |
<|file_name|>rnn_char_windowing.py<|end_file_name|><|fim▁begin|># coding: utf-8
# # Simple Character-level Language Model using vanilla RNN
# 2017-04-21 jkang
# Python3.5
# TensorFlow1.0.1
#
# - <p style="color:red">Different window sizes were applied</p> e.g. n_window = 3 (three-character window)
# - input: 'hello_world_good_morning_see_you_hello_grea'
# - output: 'ello_world_good_morning_see_you_hello_great'
#
# ### Reference:
# - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
# - https://github.com/aymericdamien/TensorFlow-Examples
# - https://hunkim.github.io/ml/
#
# ### Comment:
# - 단어 단위가 아닌 문자 단위로 훈련함
# - 하나의 example만 훈련에 사용함
# : 하나의 example을 windowing하여 여러 샘플을 만들어 냄 (새로운 샘플의 크기는 window_size)
# - Cell의 종류는 BasicRNNCell을 사용함 (첫번째 Reference 참조)
# - dynamic_rnn방식 사용 (기존 tf.nn.rnn보다 더 시간-계산 효율적이라고 함)
# - AdamOptimizer를 사용
# In[1]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random
# Input/Ouput data
char_raw = 'hello_world_good_morning_see_you_hello_great'
char_list = sorted(list(set(char_raw)))
char_to_idx = {c: i for i, c in enumerate(char_list)}
idx_to_char = {i: c for i, c in enumerate(char_list)}
char_data = [char_to_idx[c] for c in char_raw]
char_data_one_hot = tf.one_hot(char_data, depth=len(
char_list), on_value=1., off_value=0., axis=1, dtype=tf.float32)
char_input = char_data_one_hot[:-1, :] # 'hello_world_good_morning_see_you_hello_grea'
char_output = char_data_one_hot[1:, :] # 'ello_world_good_morning_see_you_hello_great'
with tf.Session() as sess:
char_input = char_input.eval()
char_output = char_output.eval()
# In[2]:
# Learning parameters
learning_rate = 0.001
max_iter = 1000
# Network Parameters
n_input_dim = char_input.shape[1]
n_input_len = char_input.shape[0]
n_output_dim = char_output.shape[1]
n_output_len = char_output.shape[0]
n_hidden = 100
n_window = 2 # number of characters in one window (like a mini-batch)
# TensorFlow graph
# (batch_size) x (time_step) x (input_dimension)
x_data = tf.placeholder(tf.float32, [None, None, n_input_dim])
# (batch_size) x (time_step) x (output_dimension)
y_data = tf.placeholder(tf.float32, [None, None, n_output_dim])
# Parameters
weights = {
'out': tf.Variable(tf.truncated_normal([n_hidden, n_output_dim]))
}
biases = {
'out': tf.Variable(tf.truncated_normal([n_output_dim]))
}
# In[3]:
def make_window_batch(x, y, window_size):
'''
This function will generate samples based on window_size from (x, y)
Although (x, y) is one example, it will create multiple examples with the length of window_size
x: (time_step) x (input_dim)
y: (time_step) x (output_dim)
x_out: (total_batch) x (batch_size) x (window_size) x (input_dim)
y_out: (total_batch) x (batch_size) x (window_size) x (output_dim)
total_batch x batch_size <= examples
'''
# (batch_size) x (window_size) x (dim)
# n_examples is calculated by sliding one character with window_size
n_examples = x.shape[0] - window_size + 1 # n_examples = batch_size
x_batch = np.empty((n_examples, window_size, x.shape[1]))
y_batch = np.empty((n_examples, window_size, y.shape[1]))
for i in range(n_examples):
x_batch[i, :, :] = x[i:i + window_size, :]
y_batch[i, :, :] = y[i:i + window_size, :]
z = list(zip(x_batch, y_batch))
random.shuffle(z)
x_batch, y_batch = zip(*z)
x_batch = np.array(x_batch)
y_batch = np.array(y_batch)
# (total_batch) x (batch_size) x (window_size) x (dim)
# total_batch is set to 1 (no mini-batch)
x_new = x_batch.reshape((n_examples, window_size, x_batch.shape[2]))
y_new = y_batch.reshape((n_examples, window_size, y_batch.shape[2]))
return x_new, y_new, n_examples
# In[4]:
def RNN(x, weights, biases):
cell = tf.contrib.rnn.BasicRNNCell(n_hidden) # Make RNNCell
outputs, states = tf.nn.dynamic_rnn(cell, x, time_major=False, dtype=tf.float32)
'''
**Notes on tf.nn.dynamic_rnn**
- 'x' can have shape (batch)x(time)x(input_dim), if time_major=False or
(time)x(batch)x(input_dim), if time_major=True
- 'outputs' can have the same shape as 'x'
(batch)x(time)x(input_dim), if time_major=False or
(time)x(batch)x(input_dim), if time_major=True
- 'states' is the final state, determined by batch and hidden_dim
'''
# outputs[-1] is outputs for the last example in the mini-batch
return tf.matmul(outputs[-1], weights['out']) + biases['out']
def softmax(x):
rowmax = np.max(x, axis=1)
x -= rowmax.reshape((x.shape[0] ,1)) # for numerical stability
x = np.exp(x)
sum_x = np.sum(x, axis=1).reshape((x.shape[0],1))
return x / sum_x
pred = RNN(x_data, weights, biases)
cost = tf.reduce_mean(tf.squared_difference(pred, y_data))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# In[5]:
# Learning
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(max_iter):
loss = 0<|fim▁hole|> y_train = y_batch[ibatch, :, :].reshape((1,-1,n_output_dim))
x_test = char_input.reshape((1, n_input_len, n_input_dim))
y_test = char_output.reshape((1, n_input_len, n_input_dim))
c, _ = sess.run([cost, optimizer], feed_dict={
x_data: x_train, y_data: y_train})
p = sess.run(pred, feed_dict={x_data: x_test, y_data: y_test})
loss += c
mean_mse = loss / n_examples
if i == (max_iter-1):
pred_act = softmax(p)
if (i+1) % 100 == 0:
pred_out = np.argmax(p, axis=1)
accuracy = np.sum(char_data[1:] == pred_out)/n_output_len*100
print('Epoch:{:>4}/{},'.format(i+1,max_iter),
'Cost:{:.4f},'.format(mean_mse),
'Acc:{:>.1f},'.format(accuracy),
'Predict:', ''.join([idx_to_char[i] for i in pred_out]))
# In[6]:
# Probability plot
fig, ax = plt.subplots()
fig.set_size_inches(15,20)
plt.title('Input Sequence', y=1.08, fontsize=20)
plt.xlabel('Probability of Next Character(y) Given Current One(x)'+
'\n[window_size={}, accuracy={:.1f}]'.format(n_window, accuracy),
fontsize=20, y=1.5)
plt.ylabel('Character List', fontsize=20)
plot = plt.imshow(pred_act.T, cmap=plt.get_cmap('plasma'))
fig.colorbar(plot, fraction=0.015, pad=0.04)
plt.xticks(np.arange(len(char_data)-1), list(char_raw)[:-1], fontsize=15)
plt.yticks(np.arange(len(char_list)), [idx_to_char[i] for i in range(len(char_list))], fontsize=15)
ax.xaxis.tick_top()
# Annotate
for i, idx in zip(range(len(pred_out)), pred_out):
annotation = idx_to_char[idx]
ax.annotate(annotation, xy=(i-0.2, idx+0.2), fontsize=12)
plt.show()
# f.savefig('result_' + idx + '.png')<|fim▁end|> | x_batch, y_batch, n_examples = make_window_batch(char_input, char_output, n_window)
for ibatch in range(x_batch.shape[0]):
x_train = x_batch[ibatch, :, :].reshape((1,-1,n_input_dim)) |
<|file_name|>Transfers.java<|end_file_name|><|fim▁begin|>//#############################################################################
//# #
//# Copyright (C) <2014> <IMS MAXIMS> #
//# #
//# This program is free software: you can redistribute it and/or modify #
//# it under the terms of the GNU Affero General Public License as #
//# published by the Free Software Foundation, either version 3 of the #
//# License, or (at your option) any later version. #
//# #
//# This program is distributed in the hope that it will be useful, #
//# but WITHOUT ANY WARRANTY; without even the implied warranty of #
//# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
//# GNU Affero General Public License for more details. #
//# #
//# You should have received a copy of the GNU Affero General Public License #
//# along with this program. If not, see <http://www.gnu.org/licenses/>. #
//# #
//#############################################################################
//#EOH
// This code was generated by Barbara Worwood using IMS Development Environment (version 1.80 build 5007.25751)
// Copyright (C) 1995-2014 IMS MAXIMS. All rights reserved.
// WARNING: DO NOT MODIFY the content of this file
package ims.nursing.vo;
/**
* Linked to nursing.assessment.Transfers business object (ID: 1015100016).
*/
public class Transfers extends ims.nursing.assessment.vo.TransfersRefVo implements ims.vo.ImsCloneable, Comparable
{
private static final long serialVersionUID = 1L;
public Transfers()
{
}
public Transfers(Integer id, int version)
{
super(id, version);
}
public Transfers(ims.nursing.vo.beans.TransfersBean bean)
{
this.id = bean.getId();
this.version = bean.getVersion();
this.patienttransfers = bean.getPatientTransfers() == null ? null : ims.nursing.vo.lookups.Transfers.buildLookup(bean.getPatientTransfers());
this.assistancerequired = bean.getAssistanceRequired() == null ? null : ims.nursing.vo.lookups.Ability.buildLookup(bean.getAssistanceRequired());
}
public void populate(ims.vo.ValueObjectBeanMap map, ims.nursing.vo.beans.TransfersBean bean)
{
this.id = bean.getId();
this.version = bean.getVersion();
this.patienttransfers = bean.getPatientTransfers() == null ? null : ims.nursing.vo.lookups.Transfers.buildLookup(bean.getPatientTransfers());
this.assistancerequired = bean.getAssistanceRequired() == null ? null : ims.nursing.vo.lookups.Ability.buildLookup(bean.getAssistanceRequired());
}
public ims.vo.ValueObjectBean getBean()
{
return this.getBean(new ims.vo.ValueObjectBeanMap());
}
public ims.vo.ValueObjectBean getBean(ims.vo.ValueObjectBeanMap map)
{
ims.nursing.vo.beans.TransfersBean bean = null;
if(map != null)
bean = (ims.nursing.vo.beans.TransfersBean)map.getValueObjectBean(this);
if (bean == null)
{
bean = new ims.nursing.vo.beans.TransfersBean();
map.addValueObjectBean(this, bean);
bean.populate(map, this);
}
return bean;
}
public Object getFieldValueByFieldName(String fieldName)
{
if(fieldName == null)
throw new ims.framework.exceptions.CodingRuntimeException("Invalid field name");
fieldName = fieldName.toUpperCase();
if(fieldName.equals("PATIENTTRANSFERS"))
return getPatientTransfers();
if(fieldName.equals("ASSISTANCEREQUIRED"))
return getAssistanceRequired();
return super.getFieldValueByFieldName(fieldName);
}
public boolean getPatientTransfersIsNotNull()
{
return this.patienttransfers != null;
}
public ims.nursing.vo.lookups.Transfers getPatientTransfers()
{
return this.patienttransfers;
}
public void setPatientTransfers(ims.nursing.vo.lookups.Transfers value)
{
this.isValidated = false;
this.patienttransfers = value;
}
public boolean getAssistanceRequiredIsNotNull()
{
return this.assistancerequired != null;
}
public ims.nursing.vo.lookups.Ability getAssistanceRequired()
{
return this.assistancerequired;
}
public void setAssistanceRequired(ims.nursing.vo.lookups.Ability value)
{
this.isValidated = false;
this.assistancerequired = value;
}
public boolean isValidated()
{
if(this.isBusy)
return true;
this.isBusy = true;
if(!this.isValidated)
{
this.isBusy = false;
return false;
}
this.isBusy = false;
return true;
}
public String[] validate()
{
return validate(null);
}
public String[] validate(String[] existingErrors)
{
if(this.isBusy)
return null;
this.isBusy = true;
java.util.ArrayList<String> listOfErrors = new java.util.ArrayList<String>();
if(existingErrors != null)
{
for(int x = 0; x < existingErrors.length; x++)
{
listOfErrors.add(existingErrors[x]);
}
}
int errorCount = listOfErrors.size();
if(errorCount == 0)
{
this.isBusy = false;
this.isValidated = true;
return null;
}
String[] result = new String[errorCount];
for(int x = 0; x < errorCount; x++)
result[x] = (String)listOfErrors.get(x);
this.isBusy = false;
this.isValidated = false;
return result;
}
public void clearIDAndVersion()
{
this.id = null;
this.version = 0;
}
public Object clone()
{
if(this.isBusy)
return this;
this.isBusy = true;
Transfers clone = new Transfers(this.id, this.version);
if(this.patienttransfers == null)
clone.patienttransfers = null;
else
clone.patienttransfers = (ims.nursing.vo.lookups.Transfers)this.patienttransfers.clone();
if(this.assistancerequired == null)
clone.assistancerequired = null;
else
clone.assistancerequired = (ims.nursing.vo.lookups.Ability)this.assistancerequired.clone();
clone.isValidated = this.isValidated;
this.isBusy = false;
return clone;
}
public int compareTo(Object obj)
{
return compareTo(obj, true);
}
public int compareTo(Object obj, boolean caseInsensitive)
{
if (obj == null)
{
return -1;
}
if(caseInsensitive); // this is to avoid eclipse warning only.
if (!(Transfers.class.isAssignableFrom(obj.getClass())))
{
throw new ClassCastException("A Transfers object cannot be compared an Object of type " + obj.getClass().getName());
}
Transfers compareObj = (Transfers)obj;
int retVal = 0;
if (retVal == 0)
{
if(this.getID_Transfers() == null && compareObj.getID_Transfers() != null)
return -1;
if(this.getID_Transfers() != null && compareObj.getID_Transfers() == null)
return 1;
if(this.getID_Transfers() != null && compareObj.getID_Transfers() != null)
retVal = this.getID_Transfers().compareTo(compareObj.getID_Transfers());
}
return retVal;
}
public synchronized static int generateValueObjectUniqueID()
{
return ims.vo.ValueObject.generateUniqueID();
}
public int countFieldsWithValue()<|fim▁hole|> if(this.assistancerequired != null)
count++;
return count;
}
public int countValueObjectFields()
{
return 2;
}
protected ims.nursing.vo.lookups.Transfers patienttransfers;
protected ims.nursing.vo.lookups.Ability assistancerequired;
private boolean isValidated = false;
private boolean isBusy = false;
}<|fim▁end|> | {
int count = 0;
if(this.patienttransfers != null)
count++; |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
class HTTPError(Exception):
'''
Represents an HTTP Exception when response status code >= 300.
:ivar int status:
the status code of the response
:ivar str message:
the message
:ivar list headers:
the returned headers, as a list of (name, value) pairs
:ivar bytes body:
the body of the response
'''
def __init__(self, status, message, respheader, respbody):
self.status = status
self.respheader = respheader
self.respbody = respbody
Exception.__init__(self, message)
class HTTPResponse(object):
'''
Represents a response from an HTTP request.
:ivar int status:
the status code of the response
:ivar str message:
the message
:ivar dict headers:
the returned headers
:ivar bytes body:
the body of the response
'''
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
class HTTPRequest(object):
'''
Represents an HTTP Request.
:ivar str host:
the host name to connect to
:ivar str method:
the method to use to connect (string such as GET, POST, PUT, etc.)
:ivar str path:
the uri fragment
:ivar dict query:
query parameters
:ivar dict headers:
header values
:ivar bytes body:
the body of the request.
'''
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = {} # list of (name, value)
self.headers = {} # list of (header name, header value)<|fim▁hole|><|fim▁end|> | self.body = '' |
<|file_name|>0027_merge.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [<|fim▁hole|> ]
operations = [
]<|fim▁end|> | ('ccx', '0026_auto_20170831_0420'),
('ccx', '0026_auto_20170831_0554'), |
<|file_name|>include.js<|end_file_name|><|fim▁begin|>function TableSort(id) {
this.tbl = document.getElementById(id);
this.lastSortedTh = null;
if (this.tbl && this.tbl.nodeName == "TABLE") {
var headings = this.tbl.tHead.rows[0].cells;
for (var i=0; headings[i]; i++) {
if (headings[i].className.match(/asc|dsc/)) {
this.lastSortedTh = headings[i];
}
}
this.makeSortable();
}
}
TableSort.prototype.makeSortable = function () {
var headings = this.tbl.tHead.rows[0].cells;
for (var i=0; headings[i]; i++) {
if(!headings[i].className.match(/notSortable/)) {
headings[i].cIdx = i;
var a = document.createElement("a");
a.href = "#";
a.innerHTML = headings[i].innerHTML;
a.onclick = function (that) {
return function () {
that.sortCol(this);
return false;
}
}(this);
headings[i].innerHTML = "";
headings[i].appendChild(a);
}
}
}
TableSort.prototype.sortCol = function (el) {
/*
* Get cell data for column that is to be sorted from HTML table
*/
var rows = this.tbl.rows;
var alpha = [], numeric = [];
var aIdx = 0, nIdx = 0;
var th = el.parentNode;
var cellIndex = th.cIdx;
for (var i=1; rows[i]; i++) {
var cell = rows[i].cells[cellIndex];
var content = cell.textContent ? cell.textContent : cell.innerText;
/*
* Split data into two separate arrays, one for numeric content and <|fim▁hole|> * order of content is determined in order to actually reorder the HTML
* table's rows.
*/
if(content && String(content).length >= 0) {
var num = content.replace(/(\$|\,|\s)/g, "");
if (parseFloat(num) == num) {
numeric[nIdx++] = {
value: Number(num),
row: rows[i]
}
} else {
alpha[aIdx++] = {
value: content,
row: rows[i]
}
}
} else {
alpha[aIdx++] = {
value: "",
row: rows[i]
}
}
}
/*
* Sort according to direction (ascending or descending)
*/
var col = [], top, bottom;
if (th.className.match("asc")) {
top = bubbleSort(alpha, -1);
bottom = bubbleSort(numeric, -1);
th.className = th.className.replace(/asc/, "dsc");
} else {
top = bubbleSort(numeric, 1);
bottom = bubbleSort(alpha, 1);
if (th.className.match("dsc")) {
th.className = th.className.replace(/dsc/, "asc");
} else {
th.className += "asc";
}
}
/*
* Clear asc/dsc class names from the last sorted column's th if it isnt the
* same as the one that was just clicked
*/
if (this.lastSortedTh && th != this.lastSortedTh) {
this.lastSortedTh.className = this.lastSortedTh.className.replace(/dsc|asc/g, "");
}
this.lastSortedTh = th;
/*
* Reorder HTML table based on new order of data found in the col array
*/
col = top.concat(bottom);
var tBody = this.tbl.tBodies[0];
for (var i=0; col[i]; i++) {
tBody.appendChild(col[i].row);
}
}
function bubbleSort(arr, dir) {
// Pre-calculate directional information
var start, end;
if (dir === 1) {
start = 0;
end = arr.length;
} else if (dir === -1) {
start = arr.length-1;
end = -1;
}
// Bubble sort: http://en.wikipedia.org/wiki/Bubble_sort
var unsorted = true;
while (unsorted) {
unsorted = false;
for (var i=start; i!=end; i=i+dir) {
if (arr[i+dir] && arr[i].value > arr[i+dir].value) {
var a = arr[i];
var b = arr[i+dir];
var c = a;
arr[i] = b;
arr[i+dir] = c;
unsorted = true;
}
}
}
return arr;
}<|fim▁end|> | * one for everything else (alphabetic). Store both the actual data
* that will be used for comparison by the sort algorithm (thus the need
* to parseFloat() the numeric data) as well as a reference to the
* element's parent row. The row reference will be used after the new |
<|file_name|>item.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
""" This class is a base class for nearly all configuration
elements like service, hosts or contacts.
"""
import time
import cPickle # for hashing compute
# Try to import md5 function
try:
from hashlib import md5
except ImportError:
from md5 import md5
from copy import copy
from shinken.graph import Graph
from shinken.commandcall import CommandCall
from shinken.property import StringProp, ListProp, BoolProp
from shinken.brok import Brok
from shinken.util import strip_and_uniq
from shinken.acknowledge import Acknowledge
from shinken.comment import Comment
from shinken.complexexpression import ComplexExpressionFactory
from shinken.log import logger
class Item(object):
properties = {
'imported_from': StringProp(default='unknown'),
'use': ListProp(default=''),
'name': StringProp(default=''),
# TODO: find why we can't uncomment this line below.
#'register': BoolProp(default='1'),
}
running_properties = {
# All errors and warning raised during the configuration parsing
# and that will raised real warning/errors during the is_correct
'configuration_warnings': ListProp(default=[]),
'configuration_errors': ListProp(default=[]),
'hash': StringProp(default=''),
# We save all template we asked us to load from
'tags': ListProp(default=set(), fill_brok=['full_status']),
}
macros = {
}
def __init__(self, params={}):
# We have our own id of My Class type :)
# use set attr for going into the slots
# instead of __dict__ :)
cls = self.__class__
self.id = cls.id
cls.id += 1
self.customs = {} # for custom variables
self.plus = {} # for value with a +
self.init_running_properties()
# [0] = + -> new key-plus
# [0] = _ -> new custom entry in UPPER case
for key in params:
if len(params[key]) >= 1 and params[key][0] == '+':
# Special case: a _MACRO can be a plus. so add to plus
# but upper the key for the macro name
if key[0] == "_":
self.plus[key.upper()] = params[key][1:] # we remove the +
else:
self.plus[key] = params[key][1:] # we remove the +
elif key[0] == "_":
custom_name = key.upper()
self.customs[custom_name] = params[key]
else:
setattr(self, key, params[key])
def init_running_properties(self):
for prop, entry in self.__class__.running_properties.items():
# Copy is slow, so we check type
# Type with __iter__ are list or dict, or tuple.
# Item need it's own list, so we copy
val = entry.default
if hasattr(val, '__iter__'):
setattr(self, prop, copy(val))
else:
setattr(self, prop, val)
# each instance to have his own running prop!
def copy(self):
""" Return a copy of the item, but give him a new id """
cls = self.__class__
i = cls({}) # Dummy item but with it's own running properties
for prop in cls.properties:
if hasattr(self, prop):
val = getattr(self, prop)
setattr(i, prop, val)
# Also copy the customs tab
i.customs = copy(self.customs)
return i
def clean(self):
""" Clean useless things not requested once item has been fully initialized&configured.
Like temporary attributes such as "imported_from", etc.. """
for name in ('imported_from', 'use', 'plus', 'templates',):
try:
delattr(self, name)
except AttributeError:
pass
def __str__(self):
return str(self.__dict__) + '\n'
def is_tpl(self):
""" Return if the elements is a template """
try:
return self.register == '0'
except Exception, exp:
return False
# If a prop is absent and is not required, put the default value
def fill_default(self):
""" Fill missing properties if they are missing """
cls = self.__class__
for prop, entry in cls.properties.items():
if not hasattr(self, prop) and entry.has_default:
setattr(self, prop, entry.default)
# We load every useful parameter so no need to access global conf later
# Must be called after a change in a global conf parameter
def load_global_conf(cls, conf):
""" Used to put global values in the sub Class like
hosts or services """
# conf have properties, if 'enable_notifications':
# { [...] 'class_inherit': [(Host, None), (Service, None),
# (Contact, None)]}
# get the name and put the value if None, put the Name
# (not None) if not (not clear?)
for prop, entry in conf.properties.items():
# If we have a class_inherit, and the arbiter really send us it
# if 'class_inherit' in entry and hasattr(conf, prop):
if hasattr(conf, prop):
for (cls_dest, change_name) in entry.class_inherit:
if cls_dest == cls: # ok, we've got something to get
value = getattr(conf, prop)
if change_name is None:
setattr(cls, prop, value)
else:
setattr(cls, change_name, value)
# Make this method a classmethod
load_global_conf = classmethod(load_global_conf)
# Use to make python properties
def pythonize(self):
cls = self.__class__
for prop, tab in cls.properties.items():
try:
new_val = tab.pythonize(getattr(self, prop))
setattr(self, prop, new_val)
except AttributeError, exp:
#print exp
pass # Will be catch at the is_correct moment
except KeyError, exp:
#print "Missing prop value", exp
err = "the property '%s' of '%s' do not have value" % (prop, self.get_name())
self.configuration_errors.append(err)
except ValueError, exp:
err = "incorrect type for property '%s' of '%s'" % (prop, self.get_name())
self.configuration_errors.append(err)
# Compute a hash of this element values. Should be launched
# When we got all our values, but not linked with other objects
def compute_hash(self):
# ID will always changed between runs, so we remove it
# for hash compute
i = self.id
del self.id
m = md5()
tmp = cPickle.dumps(self, cPickle.HIGHEST_PROTOCOL)
m.update(tmp)
self.hash = m.digest()
# and put again our id
self.id = i
def get_templates(self):
if hasattr(self, 'use') and self.use != '':
return self.use.split(',')
else:
return []
# We fillfull properties with template ones if need
def get_property_by_inheritance(self, items, prop):
# If I have the prop, I take mine but I check if I must
# add a plus property
if hasattr(self, prop):
value = getattr(self, prop)
# Maybe this value is 'null'. If so, we should NOT inherit
# and just delete this entry, and hope of course.
# Keep "null" values, because in "inheritance chaining" they must
# be passed from one level to the next.
#if value == 'null':
# delattr(self, prop)
# return None
# Manage the additive inheritance for the property,
# if property is in plus, add or replace it
# Template should keep the '+' at the beginning of the chain
if self.has_plus(prop):
value = self.get_plus_and_delete(prop) + ',' + value
if self.is_tpl():
value = '+' + value
return value
# Ok, I do not have prop, Maybe my templates do?
# Same story for plus
for i in self.templates:
value = i.get_property_by_inheritance(items, prop)
if value is not None:
# If our template give us a '+' value, we should continue to loop
still_loop = False
if value.startswith('+'):
# Templates should keep their + inherited from their parents
if not self.is_tpl():
value = value[1:]
still_loop = True
# Maybe in the previous loop, we set a value, use it too
if hasattr(self, prop):
# If the current value is strong, it will simplify the problem
if value.startswith('+'):
# In this case we can remove the + from our current
# tpl because our value will be final
value = ','.join([getattr(self, prop), value[1:]])
else: # If not, se should keep the + sign of need
value = ','.join([getattr(self, prop), value])
# Ok, we can set it
setattr(self, prop, value)
# If we only got some '+' values, we must still loop
# for an end value without it
if not still_loop:
# And set my own value in the end if need
if self.has_plus(prop):
value = ','.join([getattr(self, prop), self.get_plus_and_delete(prop)])
# Template should keep their '+'
if self.is_tpl() and not value.startswith('+'):
value = '+' + value
setattr(self, prop, value)
return value
# Maybe templates only give us + values, so we didn't quit, but we already got a
# self.prop value after all
template_with_only_plus = hasattr(self, prop)
# I do not have endingprop, my templates too... Maybe a plus?
# warning: if all my templates gave me '+' values, do not forgot to
# add the already set self.prop value
if self.has_plus(prop):
if template_with_only_plus:
value = ','.join([getattr(self, prop), self.get_plus_and_delete(prop)])
else:
value = self.get_plus_and_delete(prop)
# Template should keep their '+' chain
# We must say it's a '+' value, so our son will now that it must
# still loop
if self.is_tpl() and not value.startswith('+'):
value = '+' + value
setattr(self, prop, value)
return value
# Ok so in the end, we give the value we got if we have one, or None
# Not even a plus... so None :)
return getattr(self, prop, None)
# We fillfull properties with template ones if need
def get_customs_properties_by_inheritance(self, items):
for i in self.templates:
tpl_cv = i.get_customs_properties_by_inheritance(items)
if tpl_cv is not {}:
for prop in tpl_cv:
if prop not in self.customs:
value = tpl_cv[prop]
else:
value = self.customs[prop]
if self.has_plus(prop):
value = self.get_plus_and_delete(prop) + ',' + value
self.customs[prop] = value
for prop in self.customs:
value = self.customs[prop]
if self.has_plus(prop):
value = self.get_plus_and_delete(prop) + ',' + value
self.customs[prop] = value
# We can get custom properties in plus, we need to get all
# entires and put
# them into customs
cust_in_plus = self.get_all_plus_and_delete()
for prop in cust_in_plus:
self.customs[prop] = cust_in_plus[prop]
return self.customs
def has_plus(self, prop):
try:
self.plus[prop]
except:
return False
return True
def get_all_plus_and_delete(self):
res = {}
props = self.plus.keys() # we delete entries, so no for ... in ...
for prop in props:
res[prop] = self.get_plus_and_delete(prop)
return res
def get_plus_and_delete(self, prop):
val = self.plus[prop]
del self.plus[prop]
return val
# Check is required prop are set:
# template are always correct
def is_correct(self):
state = True
properties = self.__class__.properties
# Raised all previously saw errors like unknown contacts and co
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[item::%s] %s" % (self.get_name(), err))
for prop, entry in properties.items():
if not hasattr(self, prop) and entry.required:
logger.warning("[item::%s] %s property is missing" % (self.get_name(), prop))
state = False
return state
# This function is used by service and hosts
# to transform Nagios2 parameters to Nagios3
# ones, like normal_check_interval to
# check_interval. There is a old_parameters tab
# in Classes that give such modifications to do.
def old_properties_names_to_new(self):
old_properties = self.__class__.old_properties
for old_name, new_name in old_properties.items():
# Ok, if we got old_name and NO new name,
# we switch the name
if hasattr(self, old_name) and not hasattr(self, new_name):
value = getattr(self, old_name)
setattr(self, new_name, value)
# The arbiter is asking us our raw value before all explode or linking
def get_raw_import_values(self):
r = {}
properties = self.__class__.properties.keys()
# Register is not by default in the properties
if not 'register' in properties:
properties.append('register')
for prop in properties:
if hasattr(self, prop):
v = getattr(self, prop)
#print prop, ":", v
r[prop] = v
return r
def add_downtime(self, downtime):
self.downtimes.append(downtime)
def del_downtime(self, downtime_id):
d_to_del = None
for dt in self.downtimes:
if dt.id == downtime_id:
d_to_del = dt
dt.can_be_deleted = True
if d_to_del is not None:
self.downtimes.remove(d_to_del)
def add_comment(self, comment):
self.comments.append(comment)
def del_comment(self, comment_id):
c_to_del = None
for c in self.comments:
if c.id == comment_id:
c_to_del = c
c.can_be_deleted = True
if c_to_del is not None:
self.comments.remove(c_to_del)
def acknowledge_problem(self, sticky, notify, persistent, author, comment, end_time=0):
if self.state != self.ok_up:
if notify:
self.create_notifications('ACKNOWLEDGEMENT')
self.problem_has_been_acknowledged = True
if sticky == 2:
sticky = True
else:
sticky = False
a = Acknowledge(self, sticky, notify, persistent, author, comment, end_time=end_time)
self.acknowledgement = a
if self.my_type == 'host':
comment_type = 1
else:
comment_type = 2
c = Comment(self, persistent, author, comment,
comment_type, 4, 0, False, 0)
self.add_comment(c)
self.broks.append(self.get_update_status_brok())
# Look if we got an ack that is too old with an expire date and should
# be delete
def check_for_expire_acknowledge(self):
if self.acknowledgement and self.acknowledgement.end_time != 0 and self.acknowledgement.end_time < time.time():
self.unacknowledge_problem()
# Delete the acknowledgement object and reset the flag
# but do not remove the associated comment.
def unacknowledge_problem(self):
if self.problem_has_been_acknowledged:
logger.debug("[item::%s] deleting acknowledge of %s" % (self.get_name(), self.get_dbg_name()))
self.problem_has_been_acknowledged = False
# Should not be deleted, a None is Good
self.acknowledgement = None
# del self.acknowledgement
# find comments of non-persistent ack-comments and delete them too
for c in self.comments:
if c.entry_type == 4 and not c.persistent:<|fim▁hole|> self.broks.append(self.get_update_status_brok())
# Check if we have an acknowledgement and if this is marked as sticky.
# This is needed when a non-ok state changes
def unacknowledge_problem_if_not_sticky(self):
if hasattr(self, 'acknowledgement') and self.acknowledgement is not None:
if not self.acknowledgement.sticky:
self.unacknowledge_problem()
# Will flatten some parameters tagged by the 'conf_send_preparation'
# property because they are too "linked" to be send like that (like realms)
def prepare_for_conf_sending(self):
cls = self.__class__
for prop, entry in cls.properties.items():
# Is this property need preparation for sending?
if entry.conf_send_preparation is not None:
f = entry.conf_send_preparation
if f is not None:
val = f(getattr(self, prop))
setattr(self, prop, val)
if hasattr(cls, 'running_properties'):
for prop, entry in cls.running_properties.items():
# Is this property need preparation for sending?
if entry.conf_send_preparation is not None:
f = entry.conf_send_preparation
if f is not None:
val = f(getattr(self, prop))
setattr(self, prop, val)
# Get the property for an object, with good value
# and brok_transformation if need
def get_property_value_for_brok(self, prop, tab):
entry = tab[prop]
# Get the current value, or the default if need
value = getattr(self, prop, entry.default)
# Apply brok_transformation if need
# Look if we must preprocess the value first
pre_op = entry.brok_transformation
if pre_op is not None:
value = pre_op(self, value)
return value
# Fill data with info of item by looking at brok_type
# in props of properties or running_properties
def fill_data_brok_from(self, data, brok_type):
cls = self.__class__
# Now config properties
for prop, entry in cls.properties.items():
# Is this property intended for broking?
if brok_type in entry.fill_brok:
data[prop] = self.get_property_value_for_brok(prop, cls.properties)
# Maybe the class do not have running_properties
if hasattr(cls, 'running_properties'):
# We've got prop in running_properties too
for prop, entry in cls.running_properties.items():
#if 'fill_brok' in cls.running_properties[prop]:
if brok_type in entry.fill_brok:
data[prop] = self.get_property_value_for_brok(prop, cls.running_properties)
# Get a brok with initial status
def get_initial_status_brok(self):
cls = self.__class__
my_type = cls.my_type
data = {'id': self.id}
self.fill_data_brok_from(data, 'full_status')
b = Brok('initial_' + my_type + '_status', data)
return b
# Get a brok with update item status
def get_update_status_brok(self):
cls = self.__class__
my_type = cls.my_type
data = {'id': self.id}
self.fill_data_brok_from(data, 'full_status')
b = Brok('update_' + my_type + '_status', data)
return b
# Get a brok with check_result
def get_check_result_brok(self):
cls = self.__class__
my_type = cls.my_type
data = {}
self.fill_data_brok_from(data, 'check_result')
b = Brok(my_type + '_check_result', data)
return b
# Get brok about the new schedule (next_check)
def get_next_schedule_brok(self):
cls = self.__class__
my_type = cls.my_type
data = {}
self.fill_data_brok_from(data, 'next_schedule')
b = Brok(my_type + '_next_schedule', data)
return b
# Link one command property to a class (for globals like oc*p_command)
def linkify_one_command_with_commands(self, commands, prop):
if hasattr(self, prop):
command = getattr(self, prop).strip()
if command != '':
if hasattr(self, 'poller_tag'):
cmdCall = CommandCall(commands, command,
poller_tag=self.poller_tag)
elif hasattr(self, 'reactionner_tag'):
cmdCall = CommandCall(commands, command,
reactionner_tag=self.reactionner_tag)
else:
cmdCall = CommandCall(commands, command)
setattr(self, prop, cmdCall)
else:
setattr(self, prop, None)
# We look at the 'trigger' prop and we create a trigger for it
def explode_trigger_string_into_triggers(self, triggers):
src = getattr(self, 'trigger', '')
if src:
# Change on the fly the characters
src = src.replace(r'\n', '\n').replace(r'\t', '\t')
t = triggers.create_trigger(src, 'inner-trigger-' + self.__class__.my_type + '' + str(self.id))
if t:
# Maybe the trigger factory give me a already existing trigger,
# so my name can be dropped
self.triggers.append(t.get_name())
# Link with triggers. Can be with a "in source" trigger, or a file name
def linkify_with_triggers(self, triggers):
# Get our trigger string and trigger names in the same list
self.triggers.extend(self.trigger_name)
#print "I am linking my triggers", self.get_full_name(), self.triggers
new_triggers = []
for tname in self.triggers:
t = triggers.find_by_name(tname)
if t:
new_triggers.append(t)
else:
self.configuration_errors.append('the %s %s does have a unknown trigger_name "%s"' % (self.__class__.my_type, self.get_full_name(), tname))
self.triggers = new_triggers
class Items(object):
def __init__(self, items):
self.items = {}
self.configuration_warnings = []
self.configuration_errors = []
for i in items:
self.items[i.id] = i
self.templates = {}
# We should keep a graph of templates relations
self.templates_graph = Graph()
def __iter__(self):
return self.items.itervalues()
def __len__(self):
return len(self.items)
def __delitem__(self, key):
try:
del self.items[key]
except KeyError: # we don't want it, we do not have it. All is perfect
pass
def __setitem__(self, key, value):
self.items[key] = value
def __getitem__(self, key):
return self.items[key]
def __contains__(self, key):
return key in self.items
def compute_hash(self):
for i in self:
i.compute_hash()
# We create the reversed list so search will be faster
# We also create a twins list with id of twins (not the original
# just the others, higher twins)
def create_reversed_list(self):
self.reversed_list = {}
self.twins = []
name_property = self.__class__.name_property
for id in self.items:
if hasattr(self.items[id], name_property):
name = getattr(self.items[id], name_property)
if name not in self.reversed_list:
self.reversed_list[name] = id
else:
self.twins.append(id)
def find_id_by_name(self, name):
if hasattr(self, 'reversed_list'):
if name in self.reversed_list:
return self.reversed_list[name]
else:
return None
else: # ok, an early ask, with no reversed list from now...
name_property = self.__class__.name_property
for i in self:
if hasattr(i, name_property):
i_name = getattr(i, name_property)
if i_name == name:
return i.id
return None
def find_by_name(self, name):
id = self.find_id_by_name(name)
if id is not None:
return self.items[id]
else:
return None
# prepare_for_conf_sending to flatten some properties
def prepare_for_sending(self):
for i in self:
i.prepare_for_conf_sending()
# It's used to change old Nagios2 names to
# Nagios3 ones
def old_properties_names_to_new(self):
for i in self:
i.old_properties_names_to_new()
def pythonize(self):
for id in self.items:
self.items[id].pythonize()
def create_tpl_list(self):
for id in self.items:
i = self.items[id]
if i.is_tpl():
self.templates[id] = i
def find_tpl_by_name(self, name):
for i in self.templates.values():
if hasattr(i, 'name') and i.name == name:
return i
return None
# We will link all templates, and create the template
# graph too
def linkify_templates(self):
# First we create a list of all templates
self.create_tpl_list()
for i in self:
tpls = i.get_templates()
new_tpls = []
for tpl in tpls:
tpl = tpl.strip()
# We save this template in the 'tags' set
i.tags.add(tpl)
# Then we link it
t = self.find_tpl_by_name(tpl)
# If it's ok, add the template and update the
# template graph too
if t is not None:
# add the template object to us
new_tpls.append(t)
else: # not find? not good!
err = "the template '%s' defined for '%s' is unknown" % (tpl, i.get_name())
i.configuration_warnings.append(err)
i.templates = new_tpls
# Now we will create the template graph, so
# we look only for templates here. First we should declare our nodes
for tpl in self.templates.values():
self.templates_graph.add_node(tpl)
# And then really create our edge
for tpl in self.templates.values():
for father in tpl.templates:
self.templates_graph.add_edge(father, tpl)
def is_correct(self):
# we are ok at the beginning. Hope we still ok at the end...
r = True
# Some class do not have twins, because they do not have names
# like servicedependencies
twins = getattr(self, 'twins', None)
if twins is not None:
# Ok, look at no twins (it's bad!)
for id in twins:
i = self.items[id]
logger.error("[items] %s.%s is duplicated from %s" %\
(i.__class__.my_type, i.get_name(), getattr(i, 'imported_from', "unknown source")))
r = False
# Then look if we have some errors in the conf
# Juts print warnings, but raise errors
for err in self.configuration_warnings:
logger.warning("[items] %s" % err)
for err in self.configuration_errors:
logger.error("[items] %s" % err)
r = False
# Then look for individual ok
for i in self:
# Alias and display_name hook hook
prop_name = getattr(self.__class__, 'name_property', None)
if prop_name and not hasattr(i, 'alias') and hasattr(i, prop_name):
setattr(i, 'alias', getattr(i, prop_name))
if prop_name and getattr(i, 'display_name', '') == '' and hasattr(i, prop_name):
setattr(i, 'display_name', getattr(i, prop_name))
# Now other checks
if not i.is_correct():
n = getattr(i, 'imported_from', "unknown source")
logger.error("[items] In %s is incorrect ; from %s" % (i.get_name(), n))
r = False
return r
def remove_templates(self):
""" Remove useless templates (& properties) of our items ; otherwise we could get errors on config.is_correct() """
tpls = [i for i in self if i.is_tpl()]
for i in tpls:
del self[i.id]
del self.templates
del self.templates_graph
def clean(self):
""" Request to remove the unnecessary attributes/others from our items """
for i in self:
i.clean()
Item.clean(self)
# If a prop is absent and is not required, put the default value
def fill_default(self):
for i in self:
i.fill_default()
def __str__(self):
s = ''
cls = self.__class__
for id in self.items:
s = s + str(cls) + ':' + str(id) + str(self.items[id]) + '\n'
return s
# Inheritance for just a property
def apply_partial_inheritance(self, prop):
for i in self:
i.get_property_by_inheritance(self, prop)
if not i.is_tpl():
# If a "null" attribute was inherited, delete it
try:
if getattr(i, prop) == 'null':
delattr(i, prop)
except:
pass
def apply_inheritance(self):
# We check for all Class properties if the host has it
# if not, it check all host templates for a value
cls = self.inner_class
for prop in cls.properties:
self.apply_partial_inheritance(prop)
for i in self:
i.get_customs_properties_by_inheritance(self)
# We remove twins
# Remember: item id respect the order of conf. So if and item
# is defined multiple times,
# we want to keep the first one.
# Services are also managed here but they are specials:
# We remove twins services with the same host_name/service_description
# Remember: host service are take into account first before hostgroup service
# Id of host service are lower than hostgroups one, so they are
# in self.twins_services
# and we can remove them.
def remove_twins(self):
for id in self.twins:
i = self.items[id]
type = i.__class__.my_type
logger.warning("[items] %s.%s is already defined '%s'" % (type, i.get_name(), getattr(i, 'imported_from', "unknown source")))
del self[id] # bye bye
# do not remove twins, we should look in it, but just void it
self.twins = []
#del self.twins #no more need
# We've got a contacts property with , separated contacts names
# and we want have a list of Contacts
def linkify_with_contacts(self, contacts):
for i in self:
if hasattr(i, 'contacts'):
contacts_tab = i.contacts.split(',')
contacts_tab = strip_and_uniq(contacts_tab)
new_contacts = []
for c_name in contacts_tab:
if c_name != '':
c = contacts.find_by_name(c_name)
if c is not None:
new_contacts.append(c)
# Else: Add in the errors tab.
# will be raised at is_correct
else:
err = "the contact '%s' defined for '%s' is unknown" % (c_name, i.get_name())
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.contacts = list(set(new_contacts))
# Make link between an object and its escalations
def linkify_with_escalations(self, escalations):
for i in self:
if hasattr(i, 'escalations'):
escalations_tab = i.escalations.split(',')
escalations_tab = strip_and_uniq(escalations_tab)
new_escalations = []
for es_name in [e for e in escalations_tab if e != '']:
es = escalations.find_by_name(es_name)
if es is not None:
new_escalations.append(es)
else: # Escalation not find, not good!
err = "the escalation '%s' defined for '%s' is unknown" % (es_name, i.get_name())
i.configuration_errors.append(err)
i.escalations = new_escalations
# Make link between item and it's resultmodulations
def linkify_with_resultmodulations(self, resultmodulations):
for i in self:
if hasattr(i, 'resultmodulations'):
resultmodulations_tab = i.resultmodulations.split(',')
resultmodulations_tab = strip_and_uniq(resultmodulations_tab)
new_resultmodulations = []
for rm_name in resultmodulations_tab:
rm = resultmodulations.find_by_name(rm_name)
if rm is not None:
new_resultmodulations.append(rm)
else:
err = "the result modulation '%s' defined on the %s '%s' do not exist" % (rm_name, i.__class__.my_type, i.get_name())
i.configuration_errors.append(err)
continue
i.resultmodulations = new_resultmodulations
# Make link between item and it's business_impact_modulations
def linkify_with_business_impact_modulations(self, business_impact_modulations):
for i in self:
if hasattr(i, 'business_impact_modulations'):
business_impact_modulations_tab = i.business_impact_modulations.split(',')
business_impact_modulations_tab = strip_and_uniq(business_impact_modulations_tab)
new_business_impact_modulations = []
for rm_name in business_impact_modulations_tab:
rm = business_impact_modulations.find_by_name(rm_name)
if rm is not None:
new_business_impact_modulations.append(rm)
else:
err = "the business impact modulation '%s' defined on the %s '%s' do not exist" % (rm_name, i.__class__.my_type, i.get_name())
i.configuration_errors.append(err)
continue
i.business_impact_modulations = new_business_impact_modulations
# If we've got a contact_groups properties, we search for all
# theses groups and ask them their contacts, and then add them
# all into our contacts property
def explode_contact_groups_into_contacts(self, contactgroups):
for i in self:
if hasattr(i, 'contact_groups'):
cgnames = i.contact_groups.split(',')
cgnames = strip_and_uniq(cgnames)
for cgname in cgnames:
cg = contactgroups.find_by_name(cgname)
if cg is None:
err = "The contact group '%s' defined on the %s '%s' do not exist" % (cgname, i.__class__.my_type, i.get_name())
i.configuration_errors.append(err)
continue
cnames = contactgroups.get_members_by_name(cgname)
# We add contacts into our contacts
if cnames != []:
if hasattr(i, 'contacts'):
i.contacts += ',' + cnames
else:
i.contacts = cnames
# Link a timeperiod property (prop)
def linkify_with_timeperiods(self, timeperiods, prop):
for i in self:
if hasattr(i, prop):
tpname = getattr(i, prop).strip()
# some default values are '', so set None
if tpname == '':
setattr(i, prop, None)
continue
# Ok, get a real name, search for it
tp = timeperiods.find_by_name(tpname)
# If not found, it's an error
if tp is None:
err = "The %s of the %s '%s' named '%s' is unknown!" % (prop, i.__class__.my_type, i.get_name(), tpname)
i.configuration_errors.append(err)
continue
# Got a real one, just set it :)
setattr(i, prop, tp)
# Link one command property
def linkify_one_command_with_commands(self, commands, prop):
for i in self:
if hasattr(i, prop):
command = getattr(i, prop).strip()
if command != '':
if hasattr(i, 'poller_tag'):
cmdCall = CommandCall(commands, command,
poller_tag=i.poller_tag)
elif hasattr(i, 'reactionner_tag'):
cmdCall = CommandCall(commands, command,
reactionner_tag=i.reactionner_tag)
else:
cmdCall = CommandCall(commands, command)
# TODO: catch None?
setattr(i, prop, cmdCall)
else:
setattr(i, prop, None)
# Link a command list (commands with , between) in real CommandCalls
def linkify_command_list_with_commands(self, commands, prop):
for i in self:
if hasattr(i, prop):
coms = getattr(i, prop).split(',')
coms = strip_and_uniq(coms)
com_list = []
for com in coms:
if com != '':
if hasattr(i, 'poller_tag'):
cmdCall = CommandCall(commands, com,
poller_tag=i.poller_tag)
elif hasattr(i, 'reactionner_tag'):
cmdCall = CommandCall(commands, com,
reactionner_tag=i.reactionner_tag)
else:
cmdCall = CommandCall(commands, com)
# TODO: catch None?
com_list.append(cmdCall)
else: # TODO: catch?
pass
setattr(i, prop, com_list)
# Link with triggers. Can be with a "in source" trigger, or a file name
def linkify_with_triggers(self, triggers):
for i in self:
i.linkify_with_triggers(triggers)
# We've got a notificationways property with , separated contacts names
# and we want have a list of NotificationWay
def linkify_with_checkmodulations(self, checkmodulations):
for i in self:
if not hasattr(i, 'checkmodulations'):
continue
new_checkmodulations = []
for cw_name in i.checkmodulations:
cw = checkmodulations.find_by_name(cw_name)
if cw is not None:
new_checkmodulations.append(cw)
else:
err = "The checkmodulations of the %s '%s' named '%s' is unknown!" % (i.__class__.my_type, i.get_name(), cw_name)
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.checkmodulations = new_checkmodulations
# We've got list of macro modulations as list of names, and
# we want real objects
def linkify_with_macromodulations(self, macromodulations):
for i in self:
if not hasattr(i, 'macromodulations'):
continue
new_macromodulations = []
for cw_name in i.macromodulations:
cw = macromodulations.find_by_name(cw_name)
if cw is not None:
new_macromodulations.append(cw)
else:
err = "The macromodulations of the %s '%s' named '%s' is unknown!" % (i.__class__.my_type, i.get_name(), cw_name)
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.macromodulations = new_macromodulations
def evaluate_hostgroup_expression(self, expr, hosts, hostgroups, look_in='hostgroups'):
#print "\n"*10, "looking for expression", expr
if look_in=='hostgroups':
f = ComplexExpressionFactory(look_in, hostgroups, hosts)
else: # templates
f = ComplexExpressionFactory(look_in, hosts, hosts)
expr_tree = f.eval_cor_pattern(expr)
#print "RES of ComplexExpressionFactory"
#print expr_tree
#print "Try to resolve the Tree"
set_res = expr_tree.resolve_elements()
#print "R2d2 final is", set_res
# HOOK DBG
return list(set_res)
# If we've got a hostgroup_name property, we search for all
# theses groups and ask them their hosts, and then add them
# all into our host_name property
def explode_host_groups_into_hosts(self, hosts, hostgroups):
for i in self:
hnames_list = []
if hasattr(i, 'hostgroup_name'):
hnames_list.extend(self.evaluate_hostgroup_expression(i.hostgroup_name, hosts, hostgroups))
# Maybe there is no host in the groups, and do not have any
# host_name too, so tag is as template to do not look at
if hnames_list == [] and not hasattr(i, 'host_name'):
i.register = '0'
if hasattr(i, 'host_name'):
hst = i.host_name.split(',')
for h in hst:
h = h.strip()
# If the host start with a !, it's to be removed from
# the hostgroup get list
if h.startswith('!'):
hst_to_remove = h[1:].strip()
try:
hnames_list.remove(hst_to_remove)
# was not in it
except ValueError:
pass
# Else it's an host to add, but maybe it's ALL
elif h == '*':
for newhost in set(h.host_name for h in hosts.items.values() \
if getattr(h, 'host_name', '') != '' and not h.is_tpl()):
hnames_list.append(newhost)
#print "DBG in item.explode_host_groups_into_hosts , added '%s' to group '%s'" % (newhost, i)
else:
hnames_list.append(h)
i.host_name = ','.join(list(set(hnames_list)))
# Ok, even with all of it, there is still no host, put it as a template
if i.host_name == '':
i.register = '0'
# Take our trigger strings and create true objects with it
def explode_trigger_string_into_triggers(self, triggers):
for i in self:
i.explode_trigger_string_into_triggers(triggers)<|fim▁end|> | self.del_comment(c.id) |
<|file_name|>stats_views_dataverses.py<|end_file_name|><|fim▁begin|>from .stats_view_base import StatsViewSwagger, StatsViewSwaggerKeyRequired
from .stats_util_dataverses import StatsMakerDataverses
class DataverseCountByMonthView(StatsViewSwaggerKeyRequired):
"""API View - Dataverse counts by Month."""
# Define the swagger attributes
# Note: api_path must match the path in urls.py
#
api_path = '/dataverses/count/monthly'
summary = ('Number of published Dataverses by'
' the month they were created*. (*'
' Not month published)')
description = ('Returns a list of counts and'
' cumulative counts of all Dataverses added in a month')
description_200 = 'A list of Dataverse counts by month'
param_names = StatsViewSwagger.PARAM_DV_API_KEY +\
StatsViewSwagger.BASIC_DATE_PARAMS +\
StatsViewSwagger.PUBLISH_PARAMS +\
StatsViewSwagger.PRETTY_JSON_PARAM +\
StatsViewSwagger.PARAM_AS_CSV
tags = [StatsViewSwagger.TAG_DATAVERSES]
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic"""
stats_datasets = StatsMakerDataverses(**request.GET.dict())
pub_state = self.get_pub_state(request)
if pub_state == self.PUB_STATE_ALL:
stats_result = stats_datasets.get_dataverse_counts_by_month()
elif pub_state == self.PUB_STATE_UNPUBLISHED:
stats_result = stats_datasets.get_dataverse_counts_by_month_unpublished()
else:
stats_result = stats_datasets.get_dataverse_counts_by_month_published()
return stats_result
class DataverseTotalCounts(StatsViewSwaggerKeyRequired):
"""API View - Total count of all Dataverses"""
# Define the swagger attributes
# Note: api_path must match the path in urls.py
#
api_path = '/dataverses/count'
summary = ('Simple count of published Dataverses')
description = ('Returns number of published Dataverses')
description_200 = 'Number of published Dataverses'
param_names = StatsViewSwagger.PARAM_DV_API_KEY + StatsViewSwagger.PUBLISH_PARAMS + StatsViewSwagger.PRETTY_JSON_PARAM
tags = [StatsViewSwagger.TAG_DATAVERSES]
result_name = StatsViewSwagger.RESULT_NAME_TOTAL_COUNT
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic"""
stats_datasets = StatsMakerDataverses(**request.GET.dict())
pub_state = self.get_pub_state(request)
if pub_state == self.PUB_STATE_ALL:
stats_result = stats_datasets.get_dataverse_count()
elif pub_state == self.PUB_STATE_UNPUBLISHED:
stats_result = stats_datasets.get_dataverse_count_unpublished()
else:
stats_result = stats_datasets.get_dataverse_count_published()
return stats_result
class DataverseAffiliationCounts(StatsViewSwaggerKeyRequired):
"""API View - Number of Dataverses by Affiliation"""
# Define the swagger attributes
# Note: api_path must match the path in urls.py
#
api_path = '/dataverses/count/by-affiliation'
summary = ('Number of Dataverses by Affiliation')
description = ('Number of Dataverses by Affiliation.')
description_200 = 'Number of published Dataverses by Affiliation.'
param_names = StatsViewSwagger.PARAM_DV_API_KEY\
+ StatsViewSwagger.PUBLISH_PARAMS\
+ StatsViewSwagger.PRETTY_JSON_PARAM\
+ StatsViewSwagger.PARAM_AS_CSV
result_name = StatsViewSwagger.RESULT_NAME_AFFILIATION_COUNTS
tags = [StatsViewSwagger.TAG_DATAVERSES]
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic"""
stats_datasets = StatsMakerDataverses(**request.GET.dict())
pub_state = self.get_pub_state(request)
if pub_state == self.PUB_STATE_ALL:
stats_result = stats_datasets.get_dataverse_affiliation_counts()
elif pub_state == self.PUB_STATE_UNPUBLISHED:
stats_result = stats_datasets.get_dataverse_affiliation_counts_unpublished()
else:
stats_result = stats_datasets.get_dataverse_affiliation_counts_published()
return stats_result
class DataverseTypeCounts(StatsViewSwaggerKeyRequired):
# Define the swagger attributes
# Note: api_path must match the path in urls.py
#
api_path = '/dataverses/count/by-type'
summary = ('Number of Dataverses by Type')
description = ('Number of Dataverses by Type.')
description_200 = 'Number of published Dataverses by Type.'
param_names = StatsViewSwagger.PARAM_DV_API_KEY + StatsViewSwagger.PUBLISH_PARAMS +\
StatsViewSwagger.PRETTY_JSON_PARAM +\
StatsViewSwagger.DV_TYPE_UNCATEGORIZED_PARAM +\
StatsViewSwagger.PARAM_AS_CSV
result_name = StatsViewSwagger.RESULT_NAME_DATAVERSE_TYPE_COUNTS
tags = [StatsViewSwagger.TAG_DATAVERSES]
def is_show_uncategorized(self, request):
"""Return the result of the "?show_uncategorized" query string param"""
show_uncategorized = request.GET.get('show_uncategorized', False)<|fim▁hole|> if show_uncategorized is True or show_uncategorized == 'true':
return True
return False
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic"""
stats_datasets = StatsMakerDataverses(**request.GET.dict())
if self.is_show_uncategorized(request):
exclude_uncategorized = False
else:
exclude_uncategorized = True
pub_state = self.get_pub_state(request)
if pub_state == self.PUB_STATE_ALL:
stats_result = stats_datasets.get_dataverse_counts_by_type(exclude_uncategorized)
elif pub_state == self.PUB_STATE_UNPUBLISHED:
stats_result = stats_datasets.get_dataverse_counts_by_type_unpublished(exclude_uncategorized)
else:
stats_result = stats_datasets.get_dataverse_counts_by_type_published(exclude_uncategorized)
return stats_result<|fim▁end|> | |
<|file_name|>nb.py<|end_file_name|><|fim▁begin|>import numpy as np
import util
from datetime import datetime
from scipy.stats import norm
import better_exceptions
from scipy.stats import multivariate_normal as mvn
class NaiveBayers(object):
def __init__(self):
# Gaussian deviation
self.gaussians = dict()
# Class priors
self.priors = dict()
def fit(self, X, Y, smoothing=10e-3):
N, D = X.shape
# 1,2,3,4,5,6,7,8,9,0 - is labels
labels = set(Y)
for c in labels:
# get the current slice [0:number] where X in our class
current_x = X[Y == c]
# Compute mean and variance. Store in the dictionary by class key
self.gaussians[c] = {
'mean': current_x.mean(axis=0),
'var': np.var(current_x.T) + smoothing,
}
# Simple calculate prior probability. Divide current class by all classes
self.priors[c] = float(len(Y[Y == c])) / len(Y)
def score(self, X, Y):
# Get the predictions
P = self.predict(X)
# Return mean of array
return np.mean(P == Y)
def predict(self, X):
# N - samples, D - features (classes)
N, D = X.shape
# Hyperparameter (10)
K = len(self.gaussians)
# Fill by Zeros
P = np.zeros((N, K))
# for each class and mean/covariance
for c, g in self.gaussians.items():
mean, var = g['mean'], g['var']
log = np.log(self.priors[c])
<|fim▁hole|> P[:, c] = mvn.logpdf(X, mean=mean, cov=var) + log
return np.argmax(P, axis=1)
if __name__ == '__main__':
# Get train data
X, Y = util.get_data(40000)
Ntrain = len(Y) // 2
Xtest, Ytest = util.get_test_data(40000)
Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]
# Xtest, Ytest = X[Ntrain:], Y[Ntrain:]
model = NaiveBayers()
t0 = datetime.now()
model.fit(Xtrain, Ytrain)
print("Training time: ", (datetime.now() - t0))
t0 = datetime.now()
print("Training accuracy: ", model.score(Xtrain, Ytrain))
print("Time to compute train accuracy: ", (datetime.now() - t0), "Train size: ", len(Ytrain))
t0 = datetime.now()
print("Test accuracy: ", model.score(Xtest, Ytest))
print("Time to compute test accuracy: ", (datetime.now() - t0), "Test size: ", len(Ytest))<|fim▁end|> | # Calculate Log of the probability density function, all at once |
<|file_name|>p2p-feefilter.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
FeeFilterTest -- test processing of feefilter messages
'''
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True;
time.sleep(1)
return False;
# TestNode: bare-bones "peer". Used to track which invs are received from a node
# and to send the node feefilter messages.
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.txinvs = []
def on_inv(self, conn, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:<|fim▁hole|> self.txinvs = []
def send_filter(self, feerate):
self.send_message(msg_feefilter(feerate))
self.sync_with_ping()
class FeeFilterTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
# Node1 will be used to generate txs which should be relayed from Node0
# to our test node
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
sync_blocks(self.nodes)
node0.generate(21)
sync_blocks(self.nodes)
# Setup the p2p connections and start up the network thread.
test_node = TestNode()
connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
test_node.add_connection(connection)
NetworkThread().start()
test_node.wait_for_verack()
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Set a filter of 15 sat/byte
test_node.send_filter(15000)
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
sync_mempools(self.nodes) # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Remove fee filter and check that txs are received again
test_node.send_filter(0)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()<|fim▁end|> | |
<|file_name|>ketabir.py<|end_file_name|><|fim▁begin|>"""All things that are specifically related to adinebook website"""
from collections import defaultdict
from logging import getLogger
from typing import Optional
from langid import classify
from regex import compile as regex_compile
from requests import RequestException
from mechanicalsoup import StatefulBrowser
from lib.commons import first_last, dict_to_sfn_cit_ref, request, USER_AGENT,\
LANG
ISBN_SEARCH = regex_compile(r'ISBN: </b> ([-\d]++)').search
DATE_SEARCH = regex_compile(
r'تاریخ نشر:</b>(?<year>\d\d)/(?<month>\d\d)/(?<day>\d\d)').search
PUBLISHER_SEARCH = regex_compile(
r'Publisher_ctl00_NameLabel" class="linkk">(.*?)</span>').search
VOLUME_SEARCH = regex_compile(r'\bجلد (\d+)').search
TITLE_SEARCH = regex_compile(r'BookTitle" class="h4">([^<]++)').search
AUTHORS_FINDALL = regex_compile(
r'rptAuthor_ctl\d\d_NameLabel" class="linkk">([^>:]++):([^<]++)<').findall
LOCATION_SEARCH = regex_compile(r'محل نشر:</b>([^<]++)<').search
def ketabir_scr(url: str, date_format='%Y-%m-%d') -> tuple:
"""Return the response namedtuple."""
dictionary = url2dictionary(url)
dictionary['date_format'] = date_format
if 'language' not in dictionary:
# Assume that language is either fa or en.
# Todo: give warning about this assumption?
dictionary['language'] = \
classify(dictionary['title'])[0]
return dict_to_sfn_cit_ref(dictionary)
def isbn2url(isbn: str) -> Optional[str]:
"""Return the ketab.ir book-url for the given isbn."""
browser = StatefulBrowser(user_agent=USER_AGENT)
browser.open('http://www.ketab.ir/Search.aspx')
browser.select_form()
browser['ctl00$ContentPlaceHolder1$TxtIsbn'] = isbn
browser.submit_selected()
first_link = browser.get_current_page().select_one('.HyperLink2')
if first_link is None:
return
return browser.absolute_url(first_link['href'])
def url2dictionary(ketabir_url: str) -> Optional[dict]:
try:
# Try to see if ketabir is available,
# ottobib should continoue its work in isbn.py if it is not.<|fim▁hole|> logger.exception(ketabir_url)
return
html = r.content.decode('utf-8')
d = defaultdict(lambda: None, cite_type='book')
d['title'] = TITLE_SEARCH(html)[1]
# initiating name lists:
others = []
authors = []
editors = []
translators = []
# building lists:
for role, name in AUTHORS_FINDALL(html):
if role == 'نويسنده':
authors.append(first_last(name))
elif role == 'مترجم':
translators.append(first_last(name))
elif role == 'ويراستار':
editors.append(first_last(name))
else:
others.append(('', f'{name} ({role})'))
if authors:
d['authors'] = authors
if others:
d['others'] = others
if editors:
d['editors'] = editors
if translators:
d['translators'] = translators
m = PUBLISHER_SEARCH(html)
if m:
d['publisher'] = m[1]
m = DATE_SEARCH(html)
if m:
if LANG != 'fa':
d['month'] = m['month']
d['year'] = '۱۳' + m['year']
else:
d['month'] = m['month']
d['year'] = '۱۳' + m['year']
m = ISBN_SEARCH(html)
if m:
d['isbn'] = m[1]
m = VOLUME_SEARCH(html)
if m:
d['volume'] = m[1]
m = LOCATION_SEARCH(html)
if m:
d['publisher-location'] = m[1]
return d
logger = getLogger(__name__)<|fim▁end|> | r = request(ketabir_url)
except RequestException: |
<|file_name|>controlbuf.go<|end_file_name|><|fim▁begin|>/*
*
* Copyright 2014 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package transport
import (
"bytes"
"fmt"
"runtime"
"sync"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
)
var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
e.SetMaxDynamicTableSizeLimit(v)
}
type itemNode struct {
it interface{}
next *itemNode
}
type itemList struct {
head *itemNode
tail *itemNode
}
func (il *itemList) enqueue(i interface{}) {
n := &itemNode{it: i}
if il.tail == nil {
il.head, il.tail = n, n
return
}
il.tail.next = n
il.tail = n
}
// peek returns the first item in the list without removing it from the
// list.
func (il *itemList) peek() interface{} {
return il.head.it
}
func (il *itemList) dequeue() interface{} {
if il.head == nil {
return nil
}
i := il.head.it
il.head = il.head.next
if il.head == nil {
il.tail = nil
}
return i
}
func (il *itemList) dequeueAll() *itemNode {
h := il.head
il.head, il.tail = nil, nil
return h
}
func (il *itemList) isEmpty() bool {
return il.head == nil
}
// The following defines various control items which could flow through
// the control buffer of transport. They represent different aspects of
// control tasks, e.g., flow control, settings, streaming resetting, etc.
// registerStream is used to register an incoming stream with loopy writer.
type registerStream struct {
streamID uint32
wq *writeQuota
}
// headerFrame is also used to register stream on the client-side.
type headerFrame struct {
streamID uint32
hf []hpack.HeaderField
endStream bool // Valid on server side.
initStream func(uint32) (bool, error) // Used only on the client side.
onWrite func()
wq *writeQuota // write quota for the stream created.
cleanup *cleanupStream // Valid on the server side.
onOrphaned func(error) // Valid on client-side
}
type cleanupStream struct {
streamID uint32
idPtr *uint32
rst bool
rstCode http2.ErrCode
onWrite func()
}
type dataFrame struct {
streamID uint32
endStream bool
h []byte
d []byte
// onEachWrite is called every time
// a part of d is written out.
onEachWrite func()
}
type incomingWindowUpdate struct {
streamID uint32
increment uint32
}
type outgoingWindowUpdate struct {
streamID uint32
increment uint32
}
type incomingSettings struct {
ss []http2.Setting
}
type outgoingSettings struct {
ss []http2.Setting
}
type settingsAck struct {
}
type incomingGoAway struct {
}
type goAway struct {
code http2.ErrCode
debugData []byte
headsUp bool
closeConn bool
}
type ping struct {
ack bool
data [8]byte
}
type outFlowControlSizeRequest struct {
resp chan uint32
}
type outStreamState int
const (
active outStreamState = iota
empty
waitingOnStreamQuota
)
type outStream struct {
id uint32
state outStreamState
itl *itemList
bytesOutStanding int
wq *writeQuota
next *outStream
prev *outStream
}
func (s *outStream) deleteSelf() {
if s.prev != nil {
s.prev.next = s.next
}
if s.next != nil {
s.next.prev = s.prev
}
s.next, s.prev = nil, nil
}
type outStreamList struct {
// Following are sentinel objects that mark the
// beginning and end of the list. They do not
// contain any item lists. All valid objects are
// inserted in between them.
// This is needed so that an outStream object can
// deleteSelf() in O(1) time without knowing which
// list it belongs to.
head *outStream
tail *outStream
}
func newOutStreamList() *outStreamList {
head, tail := new(outStream), new(outStream)
head.next = tail
tail.prev = head
return &outStreamList{
head: head,
tail: tail,
}
}
func (l *outStreamList) enqueue(s *outStream) {
e := l.tail.prev
e.next = s
s.prev = e
s.next = l.tail
l.tail.prev = s
}
// remove from the beginning of the list.
func (l *outStreamList) dequeue() *outStream {
b := l.head.next
if b == l.tail {
return nil
}<|fim▁hole|>}
// controlBuffer is a way to pass information to loopy.
// Information is passed as specific struct types called control frames.
// A control frame not only represents data, messages or headers to be sent out
// but can also be used to instruct loopy to update its internal state.
// It shouldn't be confused with an HTTP2 frame, although some of the control frames
// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
type controlBuffer struct {
ch chan struct{}
done <-chan struct{}
mu sync.Mutex
consumerWaiting bool
list *itemList
err error
}
func newControlBuffer(done <-chan struct{}) *controlBuffer {
return &controlBuffer{
ch: make(chan struct{}, 1),
list: &itemList{},
done: done,
}
}
func (c *controlBuffer) put(it interface{}) error {
_, err := c.executeAndPut(nil, it)
return err
}
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
var wakeUp bool
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return false, c.err
}
if f != nil {
if !f(it) { // f wasn't successful
c.mu.Unlock()
return false, nil
}
}
if c.consumerWaiting {
wakeUp = true
c.consumerWaiting = false
}
c.list.enqueue(it)
c.mu.Unlock()
if wakeUp {
select {
case c.ch <- struct{}{}:
default:
}
}
return true, nil
}
// Note argument f should never be nil.
func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return false, c.err
}
if !f(it) { // f wasn't successful
c.mu.Unlock()
return false, nil
}
c.mu.Unlock()
return true, nil
}
func (c *controlBuffer) get(block bool) (interface{}, error) {
for {
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return nil, c.err
}
if !c.list.isEmpty() {
h := c.list.dequeue()
c.mu.Unlock()
return h, nil
}
if !block {
c.mu.Unlock()
return nil, nil
}
c.consumerWaiting = true
c.mu.Unlock()
select {
case <-c.ch:
case <-c.done:
c.finish()
return nil, ErrConnClosing
}
}
}
func (c *controlBuffer) finish() {
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return
}
c.err = ErrConnClosing
// There may be headers for streams in the control buffer.
// These streams need to be cleaned out since the transport
// is still not aware of these yet.
for head := c.list.dequeueAll(); head != nil; head = head.next {
hdr, ok := head.it.(*headerFrame)
if !ok {
continue
}
if hdr.onOrphaned != nil { // It will be nil on the server-side.
hdr.onOrphaned(ErrConnClosing)
}
}
c.mu.Unlock()
}
type side int
const (
clientSide side = iota
serverSide
)
// Loopy receives frames from the control buffer.
// Each frame is handled individually; most of the work done by loopy goes
// into handling data frames. Loopy maintains a queue of active streams, and each
// stream maintains a queue of data frames; as loopy receives data frames
// it gets added to the queue of the relevant stream.
// Loopy goes over this list of active streams by processing one node every iteration,
// thereby closely resemebling to a round-robin scheduling over all streams. While
// processing a stream, loopy writes out data bytes from this stream capped by the min
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
type loopyWriter struct {
side side
cbuf *controlBuffer
sendQuota uint32
oiws uint32 // outbound initial window size.
// estdStreams is map of all established streams that are not cleaned-up yet.
// On client-side, this is all streams whose headers were sent out.
// On server-side, this is all streams whose headers were received.
estdStreams map[uint32]*outStream // Established streams.
// activeStreams is a linked-list of all streams that have data to send and some
// stream-level flow control quota.
// Each of these streams internally have a list of data items(and perhaps trailers
// on the server-side) to be sent out.
activeStreams *outStreamList
framer *framer
hBuf *bytes.Buffer // The buffer for HPACK encoding.
hEnc *hpack.Encoder // HPACK encoder.
bdpEst *bdpEstimator
draining bool
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
}
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
side: s,
cbuf: cbuf,
sendQuota: defaultWindowSize,
oiws: defaultWindowSize,
estdStreams: make(map[uint32]*outStream),
activeStreams: newOutStreamList(),
framer: fr,
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
bdpEst: bdpEst,
}
return l
}
const minBatchSize = 1000
// run should be run in a separate goroutine.
// It reads control frames from controlBuf and processes them by:
// 1. Updating loopy's internal state, or/and
// 2. Writing out HTTP2 frames on the wire.
//
// Loopy keeps all active streams with data to send in a linked-list.
// All streams in the activeStreams linked-list must have both:
// 1. Data to send, and
// 2. Stream level flow control quota available.
//
// In each iteration of run loop, other than processing the incoming control
// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
// This results in writing of HTTP2 frames into an underlying write buffer.
// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
// if the batch size is too low to give stream goroutines a chance to fill it up.
func (l *loopyWriter) run() (err error) {
defer func() {
if err == ErrConnClosing {
// Don't log ErrConnClosing as error since it happens
// 1. When the connection is closed by some other known issue.
// 2. User closed the connection.
// 3. A graceful close of connection.
infof("transport: loopyWriter.run returning. %v", err)
err = nil
}
}()
for {
it, err := l.cbuf.get(true)
if err != nil {
return err
}
if err = l.handle(it); err != nil {
return err
}
if _, err = l.processData(); err != nil {
return err
}
gosched := true
hasdata:
for {
it, err := l.cbuf.get(false)
if err != nil {
return err
}
if it != nil {
if err = l.handle(it); err != nil {
return err
}
if _, err = l.processData(); err != nil {
return err
}
continue hasdata
}
isEmpty, err := l.processData()
if err != nil {
return err
}
if !isEmpty {
continue hasdata
}
if gosched {
gosched = false
if l.framer.writer.offset < minBatchSize {
runtime.Gosched()
continue hasdata
}
}
l.framer.writer.Flush()
break hasdata
}
}
}
func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
}
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
// Otherwise update the quota.
if w.streamID == 0 {
l.sendQuota += w.increment
return nil
}
// Find the stream and update it.
if str, ok := l.estdStreams[w.streamID]; ok {
str.bytesOutStanding -= int(w.increment)
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
str.state = active
l.activeStreams.enqueue(str)
return nil
}
}
return nil
}
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
return l.framer.fr.WriteSettings(s.ss...)
}
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
if err := l.applySettings(s.ss); err != nil {
return err
}
return l.framer.fr.WriteSettingsAck()
}
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
str := &outStream{
id: h.streamID,
state: empty,
itl: &itemList{},
wq: h.wq,
}
l.estdStreams[h.streamID] = str
return nil
}
func (l *loopyWriter) headerHandler(h *headerFrame) error {
if l.side == serverSide {
str, ok := l.estdStreams[h.streamID]
if !ok {
warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
return nil
}
// Case 1.A: Server is responding back with headers.
if !h.endStream {
return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
}
// else: Case 1.B: Server wants to close stream.
if str.state != empty { // either active or waiting on stream quota.
// add it str's list of items.
str.itl.enqueue(h)
return nil
}
if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
return err
}
return l.cleanupStreamHandler(h.cleanup)
}
// Case 2: Client wants to originate stream.
str := &outStream{
id: h.streamID,
state: empty,
itl: &itemList{},
wq: h.wq,
}
str.itl.enqueue(h)
return l.originateStream(str)
}
func (l *loopyWriter) originateStream(str *outStream) error {
hdr := str.itl.dequeue().(*headerFrame)
sendPing, err := hdr.initStream(str.id)
if err != nil {
if err == ErrConnClosing {
return err
}
// Other errors(errStreamDrain) need not close transport.
return nil
}
if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
return err
}
l.estdStreams[str.id] = str
if sendPing {
return l.pingHandler(&ping{data: [8]byte{}})
}
return nil
}
func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
if onWrite != nil {
onWrite()
}
l.hBuf.Reset()
for _, f := range hf {
if err := l.hEnc.WriteField(f); err != nil {
warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
}
}
var (
err error
endHeaders, first bool
)
first = true
for !endHeaders {
size := l.hBuf.Len()
if size > http2MaxFrameLen {
size = http2MaxFrameLen
} else {
endHeaders = true
}
if first {
first = false
err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
StreamID: streamID,
BlockFragment: l.hBuf.Next(size),
EndStream: endStream,
EndHeaders: endHeaders,
})
} else {
err = l.framer.fr.WriteContinuation(
streamID,
endHeaders,
l.hBuf.Next(size),
)
}
if err != nil {
return err
}
}
return nil
}
func (l *loopyWriter) preprocessData(df *dataFrame) error {
str, ok := l.estdStreams[df.streamID]
if !ok {
return nil
}
// If we got data for a stream it means that
// stream was originated and the headers were sent out.
str.itl.enqueue(df)
if str.state == empty {
str.state = active
l.activeStreams.enqueue(str)
}
return nil
}
func (l *loopyWriter) pingHandler(p *ping) error {
if !p.ack {
l.bdpEst.timesnap(p.data)
}
return l.framer.fr.WritePing(p.ack, p.data)
}
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
o.resp <- l.sendQuota
return nil
}
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
c.onWrite()
if str, ok := l.estdStreams[c.streamID]; ok {
// On the server side it could be a trailers-only response or
// a RST_STREAM before stream initialization thus the stream might
// not be established yet.
delete(l.estdStreams, c.streamID)
str.deleteSelf()
}
if c.rst { // If RST_STREAM needs to be sent.
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
return err
}
}
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
return ErrConnClosing
}
return nil
}
func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
if l.side == clientSide {
l.draining = true
if len(l.estdStreams) == 0 {
return ErrConnClosing
}
}
return nil
}
func (l *loopyWriter) goAwayHandler(g *goAway) error {
// Handling of outgoing GoAway is very specific to side.
if l.ssGoAwayHandler != nil {
draining, err := l.ssGoAwayHandler(g)
if err != nil {
return err
}
l.draining = draining
}
return nil
}
func (l *loopyWriter) handle(i interface{}) error {
switch i := i.(type) {
case *incomingWindowUpdate:
return l.incomingWindowUpdateHandler(i)
case *outgoingWindowUpdate:
return l.outgoingWindowUpdateHandler(i)
case *incomingSettings:
return l.incomingSettingsHandler(i)
case *outgoingSettings:
return l.outgoingSettingsHandler(i)
case *headerFrame:
return l.headerHandler(i)
case *registerStream:
return l.registerStreamHandler(i)
case *cleanupStream:
return l.cleanupStreamHandler(i)
case *incomingGoAway:
return l.incomingGoAwayHandler(i)
case *dataFrame:
return l.preprocessData(i)
case *ping:
return l.pingHandler(i)
case *goAway:
return l.goAwayHandler(i)
case *outFlowControlSizeRequest:
return l.outFlowControlSizeRequestHandler(i)
default:
return fmt.Errorf("transport: unknown control message type %T", i)
}
}
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
for _, s := range ss {
switch s.ID {
case http2.SettingInitialWindowSize:
o := l.oiws
l.oiws = s.Val
if o < l.oiws {
// If the new limit is greater make all depleted streams active.
for _, stream := range l.estdStreams {
if stream.state == waitingOnStreamQuota {
stream.state = active
l.activeStreams.enqueue(stream)
}
}
}
case http2.SettingHeaderTableSize:
updateHeaderTblSize(l.hEnc, s.Val)
}
}
return nil
}
// processData removes the first stream from active streams, writes out at most 16KB
// of its data and then puts it at the end of activeStreams if there's still more data
// to be sent and stream has some stream-level flow control.
func (l *loopyWriter) processData() (bool, error) {
if l.sendQuota == 0 {
return true, nil
}
str := l.activeStreams.dequeue() // Remove the first stream.
if str == nil {
return true, nil
}
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
// A data item is represented by a dataFrame, since it later translates into
// multiple HTTP2 data frames.
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
// maximum possilbe HTTP2 frame size.
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
return false, err
}
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
return false, nil
}
} else {
l.activeStreams.enqueue(str)
}
return false, nil
}
var (
idx int
buf []byte
)
if len(dataItem.h) != 0 { // data header has not been written out yet.
buf = dataItem.h
} else {
idx = 1
buf = dataItem.d
}
size := http2MaxFrameLen
if len(buf) < size {
size = len(buf)
}
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
str.state = waitingOnStreamQuota
return false, nil
} else if strQuota < size {
size = strQuota
}
if l.sendQuota < uint32(size) { // connection-level flow control.
size = int(l.sendQuota)
}
// Now that outgoing flow controls are checked we can replenish str's write quota
str.wq.replenish(size)
var endStream bool
// If this is the last data message on this stream and all of it can be written in this iteration.
if dataItem.endStream && size == len(buf) {
// buf contains either data or it contains header but data is empty.
if idx == 1 || len(dataItem.d) == 0 {
endStream = true
}
}
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
return false, err
}
buf = buf[size:]
str.bytesOutStanding += size
l.sendQuota -= uint32(size)
if idx == 0 {
dataItem.h = buf
} else {
dataItem.d = buf
}
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
str.itl.dequeue()
}
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
return false, err
}
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
return false, err
}
} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
str.state = waitingOnStreamQuota
} else { // Otherwise add it back to the list of active streams.
l.activeStreams.enqueue(str)
}
return false, nil
}<|fim▁end|> | b.deleteSelf()
return b |
<|file_name|>creation.py<|end_file_name|><|fim▁begin|>from django.conf import settings
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.utils.functional import cached_property
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_ops = 'GIST_GEOMETRY_OPS'<|fim▁hole|> template_postgis = getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis')
cursor = self.connection.cursor()
cursor.execute('SELECT 1 FROM pg_database WHERE datname = %s LIMIT 1;', (template_postgis,))
if cursor.fetchone():
return template_postgis
return None
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography or self.connection.ops.geometry:
# Geography and Geometry (PostGIS 2.0+) columns are
# created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
# PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5
# we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops
# which are fast on multidimensional cases, or just plain
# gist index for the 2d case.
if f.geography:
index_ops = ''
elif self.connection.ops.geometry:
if f.dim > 2:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops_nd)
else:
index_ops = ''
else:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_ops + ' );')
return output
def sql_table_creation_suffix(self):
if self.template_postgis is not None:
return ' TEMPLATE %s' % (
self.connection.ops.quote_name(self.template_postgis),)
return ''
def _create_test_db(self, verbosity, autoclobber):
test_database_name = super(PostGISCreation, self)._create_test_db(verbosity, autoclobber)
if self.template_postgis is None:
# Connect to the test database in order to create the postgis extension
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
cursor = self.connection.cursor()
cursor.execute("CREATE EXTENSION postgis")
cursor.connection.commit()
return test_database_name<|fim▁end|> | geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
@cached_property
def template_postgis(self): |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright(C) 2014 Bezleputh
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
<|fim▁hole|>
from .module import RegionsjobModule
__all__ = ['RegionsjobModule']<|fim▁end|> | |
<|file_name|>networks.py<|end_file_name|><|fim▁begin|># coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python2 python3
"""Generator and discriminator for a progressive GAN model.
See https://arxiv.org/abs/1710.10196 for details about the model.
See https://github.com/tkarras/progressive_growing_of_gans for the original
theano implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples.progressive_gan import layers
class ResolutionSchedule(object):
"""Image resolution upscaling schedule."""
def __init__(self, start_resolutions=(4, 4), scale_base=2, num_resolutions=4):
"""Initializer.
Args:
start_resolutions: An tuple of integers of HxW format for start image
resolutions. Defaults to (4, 4).
scale_base: An integer of resolution base multiplier. Defaults to 2.
num_resolutions: An integer of how many progressive resolutions (including
`start_resolutions`). Defaults to 4.
"""
self._start_resolutions = start_resolutions
self._scale_base = scale_base
self._num_resolutions = num_resolutions
@property
def start_resolutions(self):
return tuple(self._start_resolutions)
@property
def scale_base(self):
return self._scale_base
@property
def num_resolutions(self):
return self._num_resolutions
@property
def final_resolutions(self):
"""Returns the final resolutions."""
return tuple([
r * self._scale_base**(self._num_resolutions - 1)
for r in self._start_resolutions
])
def scale_factor(self, block_id):
"""Returns the scale factor for network block `block_id`."""
if block_id < 1 or block_id > self._num_resolutions:
raise ValueError('`block_id` must be in [1, {}]'.format(
self._num_resolutions))
return self._scale_base**(self._num_resolutions - block_id)
def block_name(block_id):
"""Returns the scope name for the network block `block_id`."""
return 'progressive_gan_block_{}'.format(block_id)
def min_total_num_images(stable_stage_num_images, transition_stage_num_images,
num_blocks):
"""Returns the minimum total number of images.
Computes the minimum total number of images required to reach the desired
`resolution`.
Args:
stable_stage_num_images: Number of images in the stable stage.
transition_stage_num_images: Number of images in the transition stage.
num_blocks: Number of network blocks.
Returns:
An integer of the minimum total number of images.
"""
return (num_blocks * stable_stage_num_images +
(num_blocks - 1) * transition_stage_num_images)
def compute_progress(current_image_id, stable_stage_num_images,
transition_stage_num_images, num_blocks):
"""Computes the training progress.
The training alternates between stable phase and transition phase.
The `progress` indicates the training progress, i.e. the training is at
- a stable phase p if progress = p
- a transition stage between p and p + 1 if progress = p + fraction
where p = 0,1,2.,...
Note the max value of progress is `num_blocks` - 1.
In terms of LOD (of the original implementation):
progress = `num_blocks` - 1 - LOD
Args:
current_image_id: An scalar integer `Tensor` of the current image id, count
from 0.
stable_stage_num_images: An integer representing the number of images in
each stable stage.
transition_stage_num_images: An integer representing the number of images in
each transition stage.
num_blocks: Number of network blocks.
Returns:
A scalar float `Tensor` of the training progress.
"""
# Note when current_image_id >= min_total_num_images - 1 (which means we
# are already at the highest resolution), we want to keep progress constant.
# Therefore, cap current_image_id here.
capped_current_image_id = tf.minimum(
current_image_id,
min_total_num_images(stable_stage_num_images, transition_stage_num_images,
num_blocks) - 1)
stage_num_images = stable_stage_num_images + transition_stage_num_images
progress_integer = tf.math.floordiv(capped_current_image_id, stage_num_images)
progress_fraction = tf.maximum(
0.0,
tf.cast(
tf.math.mod(capped_current_image_id, stage_num_images) -
stable_stage_num_images,
dtype=tf.float32) /
tf.cast(transition_stage_num_images, dtype=tf.float32))
return tf.cast(progress_integer, dtype=tf.float32) + progress_fraction
def _generator_alpha(block_id, progress):
"""Returns the block output parameter for the generator network.
The generator has N blocks with `block_id` = 1,2,...,N. Each block
block_id outputs a fake data output(block_id). The generator output is a
linear combination of all block outputs, i.e.
SUM_block_id(output(block_id) * alpha(block_id, progress)) where
alpha(block_id, progress) = _generator_alpha(block_id, progress). Note it
garantees that SUM_block_id(alpha(block_id, progress)) = 1 for any progress.
With a fixed block_id, the plot of alpha(block_id, progress) against progress
is a 'triangle' with its peak at (block_id - 1, 1).
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block output parameter.
"""
return tf.maximum(0.0,
tf.minimum(progress - (block_id - 2), block_id - progress))
def _discriminator_alpha(block_id, progress):
"""Returns the block input parameter for discriminator network.
The discriminator has N blocks with `block_id` = 1,2,...,N. Each block
block_id accepts an
- input(block_id) transformed from the real data and
- the output of block block_id + 1, i.e. output(block_id + 1)
The final input is a linear combination of them,
i.e. alpha * input(block_id) + (1 - alpha) * output(block_id + 1)
where alpha = _discriminator_alpha(block_id, progress).
With a fixed block_id, alpha(block_id, progress) stays to be 1
when progress <= block_id - 1, then linear decays to 0 when
block_id - 1 < progress <= block_id, and finally stays at 0
when progress > block_id.
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block input parameter.
"""<|fim▁hole|>def blend_images(x, progress, resolution_schedule, num_blocks):
"""Blends images of different resolutions according to `progress`.
When training `progress` is at a stable stage for resolution r, returns
image `x` downscaled to resolution r and then upscaled to `final_resolutions`,
call it x'(r).
Otherwise when training `progress` is at a transition stage from resolution
r to 2r, returns a linear combination of x'(r) and x'(2r).
Args:
x: An image `Tensor` of NHWC format with resolution `final_resolutions`.
progress: A scalar float `Tensor` of training progress.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks.
Returns:
An image `Tensor` which is a blend of images of different resolutions.
"""
x_blend = []
for block_id in range(1, num_blocks + 1):
alpha = _generator_alpha(block_id, progress)
scale = resolution_schedule.scale_factor(block_id)
x_blend.append(alpha * layers.upscale(layers.downscale(x, scale), scale))
return tf.add_n(x_blend)
def num_filters(block_id, fmap_base=4096, fmap_decay=1.0, fmap_max=256):
"""Computes number of filters of block `block_id`."""
return int(min(fmap_base / math.pow(2.0, block_id * fmap_decay), fmap_max))
def generator(z,
progress,
num_filters_fn,
resolution_schedule,
num_blocks=None,
kernel_size=3,
colors=3,
to_rgb_activation=None,
scope='progressive_gan_generator',
reuse=None):
"""Generator network for the progressive GAN model.
Args:
z: A `Tensor` of latent vector. The first dimension must be batch size.
progress: A scalar float `Tensor` of training progress.
num_filters_fn: A function that maps `block_id` to # of filters for the
block.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks. None means maximum number of
blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.
kernel_size: An integer of convolution kernel size.
colors: Number of output color channels. Defaults to 3.
to_rgb_activation: Activation function applied when output rgb.
scope: A string or variable scope.
reuse: Whether to reuse `scope`. Defaults to None which means to inherit the
reuse option of the parent scope.
Returns:
A `Tensor` of model output and a dictionary of model end points.
"""
if num_blocks is None:
num_blocks = resolution_schedule.num_resolutions
start_h, start_w = resolution_schedule.start_resolutions
final_h, final_w = resolution_schedule.final_resolutions
def _conv2d(scope, x, kernel_size, filters, padding='SAME'):
return layers.custom_conv2d(
x=x,
filters=filters,
kernel_size=kernel_size,
padding=padding,
activation=lambda x: layers.pixel_norm(tf.nn.leaky_relu(x)),
he_initializer_slope=0.0,
scope=scope)
def _to_rgb(x):
return layers.custom_conv2d(
x=x,
filters=colors,
kernel_size=1,
padding='SAME',
activation=to_rgb_activation,
scope='to_rgb')
end_points = {}
with tf.variable_scope(scope, reuse=reuse):
with tf.name_scope('input'):
x = tf.layers.flatten(z)
end_points['latent_vector'] = x
with tf.variable_scope(block_name(1)):
x = tf.expand_dims(tf.expand_dims(x, 1), 1)
x = layers.pixel_norm(x)
# Pad the 1 x 1 image to 2 * (start_h - 1) x 2 * (start_w - 1)
# with zeros for the next conv.
x = tf.pad(
tensor=x,
paddings=[[0] * 2, [start_h - 1] * 2, [start_w - 1] * 2, [0] * 2])
# The output is start_h x start_w x num_filters_fn(1).
x = _conv2d('conv0', x, (start_h, start_w), num_filters_fn(1), 'VALID')
x = _conv2d('conv1', x, kernel_size, num_filters_fn(1))
lods = [x]
for block_id in range(2, num_blocks + 1):
with tf.variable_scope(block_name(block_id)):
x = layers.upscale(x, resolution_schedule.scale_base)
x = _conv2d('conv0', x, kernel_size, num_filters_fn(block_id))
x = _conv2d('conv1', x, kernel_size, num_filters_fn(block_id))
lods.append(x)
outputs = []
for block_id in range(1, num_blocks + 1):
with tf.variable_scope(block_name(block_id)):
lod = _to_rgb(lods[block_id - 1])
scale = resolution_schedule.scale_factor(block_id)
lod = layers.upscale(lod, scale)
end_points['upscaled_rgb_{}'.format(block_id)] = lod
# alpha_i is used to replace lod_select. Note sum(alpha_i) is
# garanteed to be 1.
alpha = _generator_alpha(block_id, progress)
end_points['alpha_{}'.format(block_id)] = alpha
outputs.append(lod * alpha)
predictions = tf.add_n(outputs)
batch_size = tf.compat.dimension_value(z.shape[0])
predictions.set_shape([batch_size, final_h, final_w, colors])
end_points['predictions'] = predictions
return predictions, end_points
def discriminator(x,
progress,
num_filters_fn,
resolution_schedule,
num_blocks=None,
kernel_size=3,
scope='progressive_gan_discriminator',
reuse=None):
"""Discriminator network for the progressive GAN model.
Args:
x: A `Tensor`of NHWC format representing images of size `resolution`.
progress: A scalar float `Tensor` of training progress.
num_filters_fn: A function that maps `block_id` to # of filters for the
block.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks. None means maximum number of
blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.
kernel_size: An integer of convolution kernel size.
scope: A string or variable scope.
reuse: Whether to reuse `scope`. Defaults to None which means to inherit the
reuse option of the parent scope.
Returns:
A `Tensor` of model output and a dictionary of model end points.
"""
if num_blocks is None:
num_blocks = resolution_schedule.num_resolutions
def _conv2d(scope, x, kernel_size, filters, padding='SAME'):
return layers.custom_conv2d(
x=x,
filters=filters,
kernel_size=kernel_size,
padding=padding,
activation=tf.nn.leaky_relu,
he_initializer_slope=0.0,
scope=scope)
def _from_rgb(x, block_id):
return _conv2d('from_rgb', x, 1, num_filters_fn(block_id))
end_points = {}
with tf.variable_scope(scope, reuse=reuse):
x0 = x
end_points['rgb'] = x0
lods = []
for block_id in range(num_blocks, 0, -1):
with tf.variable_scope(block_name(block_id)):
scale = resolution_schedule.scale_factor(block_id)
lod = layers.downscale(x0, scale)
end_points['downscaled_rgb_{}'.format(block_id)] = lod
lod = _from_rgb(lod, block_id)
# alpha_i is used to replace lod_select.
alpha = _discriminator_alpha(block_id, progress)
end_points['alpha_{}'.format(block_id)] = alpha
lods.append((lod, alpha))
lods_iter = iter(lods)
x, _ = next(lods_iter)
for block_id in range(num_blocks, 1, -1):
with tf.variable_scope(block_name(block_id)):
x = _conv2d('conv0', x, kernel_size, num_filters_fn(block_id))
x = _conv2d('conv1', x, kernel_size, num_filters_fn(block_id - 1))
x = layers.downscale(x, resolution_schedule.scale_base)
lod, alpha = next(lods_iter)
x = alpha * lod + (1.0 - alpha) * x
with tf.variable_scope(block_name(1)):
x = layers.scalar_concat(x, layers.minibatch_mean_stddev(x))
x = _conv2d('conv0', x, kernel_size, num_filters_fn(1))
x = _conv2d('conv1', x, resolution_schedule.start_resolutions,
num_filters_fn(0), 'VALID')
end_points['last_conv'] = x
logits = layers.custom_dense(x=x, units=1, scope='logits')
end_points['logits'] = logits
return logits, end_points<|fim▁end|> | return tf.clip_by_value(block_id - progress, 0.0, 1.0)
|
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>from .spec import BASIC_PROPS_SET, encode_basic_properties
def encode_message(frame, headers, body, frame_size):
"""Encode message headers and body as a sequence of frames."""
for f in frame.encode():
yield f
props, headers = split_headers(headers, BASIC_PROPS_SET)
if headers:
props['headers'] = headers
yield encode_basic_properties(len(body), props)
for chunk in encode_body(body, frame_size):
yield chunk
def split_headers(user_headers, properties_set):
"""Split bitfield properties from named headers."""
props = {}
headers = {}
for key, value in user_headers.iteritems():
if key in properties_set:
props[key] = value
else:
headers[key] = value
return props, headers
def encode_body(body, frame_size):
"""Generate a sequence of chunks for body where each chunk is less than frame_size"""
limit = frame_size - 7 - 1 # spec is broken...<|fim▁hole|> payload, body = body[:limit], body[limit:]
yield (0x03, payload)<|fim▁end|> | while body: |
<|file_name|>HeatMap.py<|end_file_name|><|fim▁begin|>import redis
class BetaRedis(redis.StrictRedis):<|fim▁hole|> return self.execute_command('GEOADD', name, *values)
def geopos(self, name, *values):
return self.execute_command('GEOPOS', name, *values)
class RedisHeatMap:
REDIS_KEY = 'heatmap'
REDIS_KEY_GEO = REDIS_KEY + '_GEO'
REDIS_KEY_HASH = REDIS_KEY + '_HASH'
def __init__(self, host='localhost', port=6379, db=0):
self.r = BetaRedis(host=host, port=port, db=db)
self.r.flushdb()
def gen(self, data, distance=200000, min_sum=1):
for point in data:
try:
res = self.r.georadius(self.REDIS_KEY_GEO, point['lng'], point['lat'], distance, 'm')
if not res:
self.r.geoadd(self.REDIS_KEY_GEO, point['lng'], point['lat'], point['key'])
self.r.hset(self.REDIS_KEY_HASH, point['key'], 1)
else:
self.r.hincrby(self.REDIS_KEY_HASH, res[0])
except redis.exceptions.ResponseError as e:
pass
for key in self.r.hscan_iter(self.REDIS_KEY_HASH):
lng, lat = map(lambda x: x.decode(), self.r.geopos(self.REDIS_KEY_GEO, key[0].decode())[0])
if int(key[1]) >= min_sum:
yield {'key': key[0].decode(), 'lat': lat, 'lng': lng, 'sum': int(key[1])}<|fim▁end|> | def georadius(self, name, *values):
return self.execute_command('GEORADIUS', name, *values)
def geoadd(self, name, *values): |
<|file_name|>dn.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): return self._dn
@property
def cn(self): return self._cn
@cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail<|fim▁hole|> @mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'<|fim▁end|> | |
<|file_name|>ComponentWrapper.java<|end_file_name|><|fim▁begin|>/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package optas.gui.wizard;
/**
*<|fim▁hole|> * @author chris
*/
public class ComponentWrapper {
public String componentName;
public String componentContext;
public boolean contextComponent;
public ComponentWrapper(String componentName, String componentContext, boolean contextComponent) {
this.componentContext = componentContext;
this.componentName = componentName;
this.contextComponent = contextComponent;
}
@Override
public String toString() {
if (contextComponent) {
return componentName;
}
return /*componentContext + "." + */ componentName;
}
}<|fim▁end|> | |
<|file_name|>ForceLocalePreference.java<|end_file_name|><|fim▁begin|>/*
* Geopaparazzi - Digital field mapping on Android based devices
* Copyright (C) 2016 HydroloGIS (www.hydrologis.com)
*
* This program is free software: you can redistribute it and/or modify<|fim▁hole|> * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package eu.geopaparazzi.core.preferences;
import android.content.Context;
import android.content.res.TypedArray;
import android.preference.DialogPreference;
import android.util.AttributeSet;
import android.view.View;
import android.view.ViewGroup.LayoutParams;
import android.widget.ArrayAdapter;
import android.widget.LinearLayout;
import android.widget.Spinner;
import eu.geopaparazzi.core.R;
import eu.geopaparazzi.library.locale.LocaleUtils;
/**
* A custom preference to force a particular locale, even if the OS is on another.
*
* @author Andrea Antonello (www.hydrologis.com)
*/
public class ForceLocalePreference extends DialogPreference {
public static final String PREFS_KEY_FORCELOCALE = "PREFS_KEY_FORCELOCALE";//NON-NLS
private Context context;
private Spinner localesSpinner;
/**
* @param ctxt the context to use.
* @param attrs attributes.
*/
public ForceLocalePreference(Context ctxt, AttributeSet attrs) {
super(ctxt, attrs);
this.context = ctxt;
setPositiveButtonText(ctxt.getString(android.R.string.ok));
setNegativeButtonText(ctxt.getString(android.R.string.cancel));
}
@Override
protected View onCreateDialogView() {
LinearLayout mainLayout = new LinearLayout(context);
LinearLayout.LayoutParams layoutParams = new LinearLayout.LayoutParams(LayoutParams.MATCH_PARENT,
LayoutParams.MATCH_PARENT);
mainLayout.setLayoutParams(layoutParams);
mainLayout.setOrientation(LinearLayout.VERTICAL);
mainLayout.setPadding(25, 25, 25, 25);
localesSpinner = new Spinner(context);
localesSpinner.setLayoutParams(new LinearLayout.LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.WRAP_CONTENT));
localesSpinner.setPadding(15, 5, 15, 5);
final String[] localesArray = context.getResources().getStringArray(R.array.locales);
ArrayAdapter<String> adapter = new ArrayAdapter<>(context, android.R.layout.simple_spinner_item, localesArray);
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
localesSpinner.setAdapter(adapter);
final String currentLocale = LocaleUtils.getCurrentLocale(context);
if (currentLocale != null) {
for (int i = 0; i < localesArray.length; i++) {
if (localesArray[i].equals(currentLocale.trim())) {
localesSpinner.setSelection(i);
break;
}
}
}
mainLayout.addView(localesSpinner);
return mainLayout;
}
@Override
protected void onBindDialogView(View v) {
super.onBindDialogView(v);
}
@Override
protected void onDialogClosed(boolean positiveResult) {
super.onDialogClosed(positiveResult);
if (positiveResult) {
String selectedLocale = localesSpinner.getSelectedItem().toString();
LocaleUtils.changeLang(context, selectedLocale);
}
}
@Override
protected Object onGetDefaultValue(TypedArray a, int index) {
return (a.getString(index));
}
@Override
protected void onSetInitialValue(boolean restoreValue, Object defaultValue) {
}
}<|fim▁end|> | * it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* |
<|file_name|>LightboxGallery.stories.tsx<|end_file_name|><|fim▁begin|>// Copyright 2020 Signal Messenger, LLC
// SPDX-License-Identifier: AGPL-3.0-only
import * as React from 'react';
import { storiesOf } from '@storybook/react';
import { action } from '@storybook/addon-actions';
import { number } from '@storybook/addon-knobs';
import { LightboxGallery, Props } from './LightboxGallery';
import { setup as setupI18n } from '../../js/modules/i18n';
import enMessages from '../../_locales/en/messages.json';
import { IMAGE_JPEG, VIDEO_MP4 } from '../types/MIME';
const i18n = setupI18n('en', enMessages);
const story = storiesOf('Components/LightboxGallery', module);
const createProps = (overrideProps: Partial<Props> = {}): Props => ({
close: action('close'),
i18n,
media: overrideProps.media || [],
onSave: action('onSave'),
selectedIndex: number('selectedIndex', overrideProps.selectedIndex || 0),
});
story.add('Image and Video', () => {
const props = createProps({
media: [
{
attachment: {
contentType: IMAGE_JPEG,
fileName: 'tina-rolf-269345-unsplash.jpg',<|fim▁hole|> },
contentType: IMAGE_JPEG,
index: 0,
message: {
attachments: [],
id: 'image-msg',
received_at: Date.now(),
},
objectURL: '/fixtures/tina-rolf-269345-unsplash.jpg',
},
{
attachment: {
contentType: VIDEO_MP4,
fileName: 'pixabay-Soap-Bubble-7141.mp4',
url: '/fixtures/pixabay-Soap-Bubble-7141.mp4',
},
contentType: VIDEO_MP4,
index: 1,
message: {
attachments: [],
id: 'video-msg',
received_at: Date.now(),
},
objectURL: '/fixtures/pixabay-Soap-Bubble-7141.mp4',
},
],
});
return <LightboxGallery {...props} />;
});
story.add('Missing Media', () => {
const props = createProps({
media: [
{
attachment: {
contentType: IMAGE_JPEG,
fileName: 'tina-rolf-269345-unsplash.jpg',
url: '/fixtures/tina-rolf-269345-unsplash.jpg',
},
contentType: IMAGE_JPEG,
index: 0,
message: {
attachments: [],
id: 'image-msg',
received_at: Date.now(),
},
objectURL: undefined,
},
],
});
return <LightboxGallery {...props} />;
});<|fim▁end|> | url: '/fixtures/tina-rolf-269345-unsplash.jpg',
caption:
'Still from The Lighthouse, starring Robert Pattinson and Willem Defoe.', |
<|file_name|>people.tsx<|end_file_name|><|fim▁begin|>/// <reference path="../../../typings/index.d.ts" />
import * as React from 'react';
import * as ReactDOM from 'react-dom';
import { UserList } from '../components/users';
export class People extends React.Component<{}, {}> {
render () {
return (
<div className="row">
<div id="content">
<UserList /><|fim▁hole|> </div>
);
}
}<|fim▁end|> | </div> |
<|file_name|>Bacon.js<|end_file_name|><|fim▁begin|>version https://git-lfs.github.com/spec/v1<|fim▁hole|>size 89408<|fim▁end|> | oid sha256:5a4f668a21f7ea9a0b8ab69c0e5fec6461ab0f73f7836acd640fe43ea9919fcf |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Scrapy settings for saymedia project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'saymedia'
SPIDER_MODULES = ['saymedia.spiders']
NEWSPIDER_MODULE = 'saymedia.spiders'
ROBOTSTXT_OBEY = True
DOWNLOADER_MIDDLEWARES = {
'saymedia.middleware.ErrorConverterMiddleware': 1,<|fim▁hole|> # 'saymedia.middleware.MysqlDownloaderMiddleware': 1,
'saymedia.middleware.OriginHostMiddleware': 2,
'saymedia.middleware.TimerDownloaderMiddleware': 998,
}
SPIDER_REPORTS = {
'xml': 'saymedia.reports.XmlReport',
'firebase': 'saymedia.reports.FirebaseReport',
}
SPIDER_MIDDLEWARES = {
'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': None,
}
ITEM_PIPELINES = {
'saymedia.pipelines.DatabaseWriterPipeline': 0,
}
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'SEO Spider (+http://www.saymedia.com)'
DATABASE = {
'USER': 'YOUR_DATABASE_USER',
'PASS': 'YOUR_DATABASE_PASS',
}
FIREBASE_URL = "YOUR_FIREBASE_URL"
try:
# Only used in development environments
from .local_settings import *
except ImportError:
pass<|fim▁end|> | |
<|file_name|>headless.rs<|end_file_name|><|fim▁begin|>use crate::errors::*;
use crate::math::prelude::Vector2;
use super::super::events::Event;
use super::Visitor;
pub struct HeadlessVisitor {}
impl Visitor for HeadlessVisitor {
#[inline]
fn show(&self) {}
#[inline]
fn hide(&self) {}
#[inline]
fn position(&self) -> Vector2<i32> {
(0, 0).into()
}
<|fim▁hole|> (0, 0).into()
}
#[inline]
fn device_pixel_ratio(&self) -> f32 {
1.0
}
#[inline]
fn resize(&self, _: Vector2<u32>) {}
#[inline]
fn poll_events(&mut self, _: &mut Vec<Event>) {}
#[inline]
fn is_current(&self) -> bool {
true
}
#[inline]
fn make_current(&self) -> Result<()> {
Ok(())
}
#[inline]
fn swap_buffers(&self) -> Result<()> {
Ok(())
}
}<|fim▁end|> | #[inline]
fn dimensions(&self) -> Vector2<u32> { |
<|file_name|>obfuscated_file_util.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "webkit/browser/fileapi/obfuscated_file_util.h"
#include <queue>
#include <string>
#include <vector>
#include "base/file_util.h"
#include "base/format_macros.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/strings/sys_string_conversions.h"
#include "base/strings/utf_string_conversions.h"
#include "base/time/time.h"
#include "url/gurl.h"
#include "webkit/browser/fileapi/file_observers.h"
#include "webkit/browser/fileapi/file_system_context.h"
#include "webkit/browser/fileapi/file_system_operation_context.h"
#include "webkit/browser/fileapi/file_system_url.h"
#include "webkit/browser/fileapi/native_file_util.h"
#include "webkit/browser/fileapi/sandbox_file_system_backend.h"
#include "webkit/browser/fileapi/sandbox_isolated_origin_database.h"
#include "webkit/browser/fileapi/sandbox_origin_database.h"
#include "webkit/browser/fileapi/sandbox_prioritized_origin_database.h"
#include "webkit/browser/fileapi/timed_task_helper.h"
#include "webkit/browser/quota/quota_manager.h"
#include "webkit/common/database/database_identifier.h"
#include "webkit/common/fileapi/file_system_util.h"
// Example of various paths:
// void ObfuscatedFileUtil::DoSomething(const FileSystemURL& url) {
// base::FilePath virtual_path = url.path();
// base::FilePath local_path = GetLocalFilePath(url);
//
// NativeFileUtil::DoSomething(local_path);
// file_util::DoAnother(local_path);
// }
namespace fileapi {
namespace {
typedef SandboxDirectoryDatabase::FileId FileId;
typedef SandboxDirectoryDatabase::FileInfo FileInfo;
void InitFileInfo(
SandboxDirectoryDatabase::FileInfo* file_info,
SandboxDirectoryDatabase::FileId parent_id,
const base::FilePath::StringType& file_name) {
DCHECK(file_info);
file_info->parent_id = parent_id;
file_info->name = file_name;
}
// Costs computed as per crbug.com/86114, based on the LevelDB implementation of
// path storage under Linux. It's not clear if that will differ on Windows, on
// which base::FilePath uses wide chars [since they're converted to UTF-8 for
// storage anyway], but as long as the cost is high enough that one can't cheat
// on quota by storing data in paths, it doesn't need to be all that accurate.
const int64 kPathCreationQuotaCost = 146; // Bytes per inode, basically.
const int64 kPathByteQuotaCost = 2; // Bytes per byte of path length in UTF-8.
int64 UsageForPath(size_t length) {
return kPathCreationQuotaCost +
static_cast<int64>(length) * kPathByteQuotaCost;
}
bool AllocateQuota(FileSystemOperationContext* context, int64 growth) {
if (context->allowed_bytes_growth() == quota::QuotaManager::kNoLimit)
return true;
int64 new_quota = context->allowed_bytes_growth() - growth;
if (growth > 0 && new_quota < 0)
return false;
context->set_allowed_bytes_growth(new_quota);
return true;
}
void UpdateUsage(
FileSystemOperationContext* context,
const FileSystemURL& url,
int64 growth) {
context->update_observers()->Notify(
&FileUpdateObserver::OnUpdate, MakeTuple(url, growth));
}
void TouchDirectory(SandboxDirectoryDatabase* db, FileId dir_id) {
DCHECK(db);
if (!db->UpdateModificationTime(dir_id, base::Time::Now()))
NOTREACHED();
}
enum IsolatedOriginStatus {
kIsolatedOriginMatch,
kIsolatedOriginDontMatch,
kIsolatedOriginStatusMax,
};
} // namespace
class ObfuscatedFileEnumerator
: public FileSystemFileUtil::AbstractFileEnumerator {
public:
ObfuscatedFileEnumerator(
SandboxDirectoryDatabase* db,
FileSystemOperationContext* context,
ObfuscatedFileUtil* obfuscated_file_util,
const FileSystemURL& root_url,
bool recursive)
: db_(db),
context_(context),
obfuscated_file_util_(obfuscated_file_util),
root_url_(root_url),
recursive_(recursive),
current_file_id_(0) {
base::FilePath root_virtual_path = root_url.path();
FileId file_id;
if (!db_->GetFileWithPath(root_virtual_path, &file_id))
return;
FileRecord record = { file_id, root_virtual_path };
recurse_queue_.push(record);
}
virtual ~ObfuscatedFileEnumerator() {}
virtual base::FilePath Next() OVERRIDE {
ProcessRecurseQueue();
if (display_stack_.empty())
return base::FilePath();
current_file_id_ = display_stack_.back();
display_stack_.pop_back();
FileInfo file_info;
base::FilePath platform_file_path;
base::File::Error error =
obfuscated_file_util_->GetFileInfoInternal(
db_, context_, root_url_, current_file_id_,
&file_info, ¤t_platform_file_info_, &platform_file_path);
if (error != base::File::FILE_OK)
return Next();
base::FilePath virtual_path =
current_parent_virtual_path_.Append(file_info.name);
if (recursive_ && file_info.is_directory()) {
FileRecord record = { current_file_id_, virtual_path };
recurse_queue_.push(record);
}
return virtual_path;
}
virtual int64 Size() OVERRIDE {
return current_platform_file_info_.size;
}
virtual base::Time LastModifiedTime() OVERRIDE {
return current_platform_file_info_.last_modified;
}
virtual bool IsDirectory() OVERRIDE {
return current_platform_file_info_.is_directory;
}
private:
typedef SandboxDirectoryDatabase::FileId FileId;
typedef SandboxDirectoryDatabase::FileInfo FileInfo;
struct FileRecord {
FileId file_id;
base::FilePath virtual_path;
};
void ProcessRecurseQueue() {
while (display_stack_.empty() && !recurse_queue_.empty()) {
FileRecord entry = recurse_queue_.front();
recurse_queue_.pop();
if (!db_->ListChildren(entry.file_id, &display_stack_)) {
display_stack_.clear();
return;
}
current_parent_virtual_path_ = entry.virtual_path;
}
}
SandboxDirectoryDatabase* db_;
FileSystemOperationContext* context_;
ObfuscatedFileUtil* obfuscated_file_util_;
FileSystemURL root_url_;
bool recursive_;
std::queue<FileRecord> recurse_queue_;
std::vector<FileId> display_stack_;
base::FilePath current_parent_virtual_path_;
FileId current_file_id_;
base::File::Info current_platform_file_info_;
};
class ObfuscatedOriginEnumerator
: public ObfuscatedFileUtil::AbstractOriginEnumerator {
public:
typedef SandboxOriginDatabase::OriginRecord OriginRecord;
ObfuscatedOriginEnumerator(
SandboxOriginDatabaseInterface* origin_database,
const base::FilePath& base_file_path)
: base_file_path_(base_file_path) {
if (origin_database)
origin_database->ListAllOrigins(&origins_);
}
virtual ~ObfuscatedOriginEnumerator() {}
// Returns the next origin. Returns empty if there are no more origins.
virtual GURL Next() OVERRIDE {
OriginRecord record;
if (!origins_.empty()) {
record = origins_.back();
origins_.pop_back();
}
current_ = record;
return webkit_database::GetOriginFromIdentifier(record.origin);
}
// Returns the current origin's information.
virtual bool HasTypeDirectory(const std::string& type_string) const OVERRIDE {
if (current_.path.empty())
return false;
if (type_string.empty()) {
NOTREACHED();
return false;
}
base::FilePath path =
base_file_path_.Append(current_.path).AppendASCII(type_string);
return base::DirectoryExists(path);
}
private:
std::vector<OriginRecord> origins_;
OriginRecord current_;
base::FilePath base_file_path_;
};
ObfuscatedFileUtil::ObfuscatedFileUtil(
quota::SpecialStoragePolicy* special_storage_policy,
const base::FilePath& file_system_directory,
leveldb::Env* env_override,
base::SequencedTaskRunner* file_task_runner,
const GetTypeStringForURLCallback& get_type_string_for_url,
const std::set<std::string>& known_type_strings,
SandboxFileSystemBackendDelegate* sandbox_delegate)
: special_storage_policy_(special_storage_policy),
file_system_directory_(file_system_directory),
env_override_(env_override),
db_flush_delay_seconds_(10 * 60), // 10 mins.
file_task_runner_(file_task_runner),
get_type_string_for_url_(get_type_string_for_url),
known_type_strings_(known_type_strings),
sandbox_delegate_(sandbox_delegate) {
}
ObfuscatedFileUtil::~ObfuscatedFileUtil() {
DropDatabases();
}
base::File ObfuscatedFileUtil::CreateOrOpen(
FileSystemOperationContext* context,
const FileSystemURL& url, int file_flags) {
base::File file = CreateOrOpenInternal(context, url, file_flags);
if (file.IsValid() && file_flags & base::File::FLAG_WRITE &&
context->quota_limit_type() == quota::kQuotaLimitTypeUnlimited &&
sandbox_delegate_) {
sandbox_delegate_->StickyInvalidateUsageCache(url.origin(), url.type());
}
return file.Pass();
}
base::File::Error ObfuscatedFileUtil::EnsureFileExists(
FileSystemOperationContext* context,
const FileSystemURL& url,
bool* created) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(url, true);
if (!db)
return base::File::FILE_ERROR_FAILED;
FileId file_id;
if (db->GetFileWithPath(url.path(), &file_id)) {
FileInfo file_info;
if (!db->GetFileInfo(file_id, &file_info)) {
NOTREACHED();
return base::File::FILE_ERROR_FAILED;
}
if (file_info.is_directory())
return base::File::FILE_ERROR_NOT_A_FILE;
if (created)
*created = false;
return base::File::FILE_OK;
}
FileId parent_id;
if (!db->GetFileWithPath(VirtualPath::DirName(url.path()), &parent_id))
return base::File::FILE_ERROR_NOT_FOUND;
FileInfo file_info;
InitFileInfo(&file_info, parent_id,
VirtualPath::BaseName(url.path()).value());
int64 growth = UsageForPath(file_info.name.size());
if (!AllocateQuota(context, growth))
return base::File::FILE_ERROR_NO_SPACE;
base::File::Error error = CreateFile(context, base::FilePath(), url,
&file_info);
if (created && base::File::FILE_OK == error) {
*created = true;
UpdateUsage(context, url, growth);
context->change_observers()->Notify(
&FileChangeObserver::OnCreateFile, MakeTuple(url));
}
return error;
}
base::File::Error ObfuscatedFileUtil::CreateDirectory(
FileSystemOperationContext* context,
const FileSystemURL& url,
bool exclusive,
bool recursive) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(url, true);
if (!db)
return base::File::FILE_ERROR_FAILED;
FileId file_id;
if (db->GetFileWithPath(url.path(), &file_id)) {
FileInfo file_info;
if (exclusive)
return base::File::FILE_ERROR_EXISTS;
if (!db->GetFileInfo(file_id, &file_info)) {
NOTREACHED();
return base::File::FILE_ERROR_FAILED;
}
if (!file_info.is_directory())
return base::File::FILE_ERROR_NOT_A_DIRECTORY;
return base::File::FILE_OK;
}
std::vector<base::FilePath::StringType> components;
VirtualPath::GetComponents(url.path(), &components);
FileId parent_id = 0;
size_t index;
for (index = 0; index < components.size(); ++index) {
base::FilePath::StringType name = components[index];
if (name == FILE_PATH_LITERAL("/"))
continue;
if (!db->GetChildWithName(parent_id, name, &parent_id))
break;
}
if (!db->IsDirectory(parent_id))
return base::File::FILE_ERROR_NOT_A_DIRECTORY;
if (!recursive && components.size() - index > 1)
return base::File::FILE_ERROR_NOT_FOUND;
bool first = true;
for (; index < components.size(); ++index) {
FileInfo file_info;
file_info.name = components[index];
if (file_info.name == FILE_PATH_LITERAL("/"))
continue;
file_info.modification_time = base::Time::Now();
file_info.parent_id = parent_id;
int64 growth = UsageForPath(file_info.name.size());
if (!AllocateQuota(context, growth))
return base::File::FILE_ERROR_NO_SPACE;
base::File::Error error = db->AddFileInfo(file_info, &parent_id);
if (error != base::File::FILE_OK)
return error;
UpdateUsage(context, url, growth);
context->change_observers()->Notify(
&FileChangeObserver::OnCreateDirectory, MakeTuple(url));
if (first) {
first = false;
TouchDirectory(db, file_info.parent_id);
}
}
return base::File::FILE_OK;
}
base::File::Error ObfuscatedFileUtil::GetFileInfo(
FileSystemOperationContext* context,
const FileSystemURL& url,
base::File::Info* file_info,
base::FilePath* platform_file_path) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(url, false);
if (!db)
return base::File::FILE_ERROR_NOT_FOUND;
FileId file_id;
if (!db->GetFileWithPath(url.path(), &file_id))
return base::File::FILE_ERROR_NOT_FOUND;
FileInfo local_info;
return GetFileInfoInternal(db, context, url,
file_id, &local_info,
file_info, platform_file_path);
}
scoped_ptr<FileSystemFileUtil::AbstractFileEnumerator>
ObfuscatedFileUtil::CreateFileEnumerator(
FileSystemOperationContext* context,
const FileSystemURL& root_url) {
return CreateFileEnumerator(context, root_url, false /* recursive */);
}
base::File::Error ObfuscatedFileUtil::GetLocalFilePath(
FileSystemOperationContext* context,
const FileSystemURL& url,
base::FilePath* local_path) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(url, false);
if (!db)
return base::File::FILE_ERROR_NOT_FOUND;
FileId file_id;
if (!db->GetFileWithPath(url.path(), &file_id))
return base::File::FILE_ERROR_NOT_FOUND;
FileInfo file_info;
if (!db->GetFileInfo(file_id, &file_info) || file_info.is_directory()) {
NOTREACHED();
// Directories have no local file path.
return base::File::FILE_ERROR_NOT_FOUND;
}
*local_path = DataPathToLocalPath(url, file_info.data_path);
if (local_path->empty())
return base::File::FILE_ERROR_NOT_FOUND;
return base::File::FILE_OK;
}
base::File::Error ObfuscatedFileUtil::Touch(
FileSystemOperationContext* context,
const FileSystemURL& url,
const base::Time& last_access_time,
const base::Time& last_modified_time) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(url, false);
if (!db)
return base::File::FILE_ERROR_NOT_FOUND;
FileId file_id;
if (!db->GetFileWithPath(url.path(), &file_id))
return base::File::FILE_ERROR_NOT_FOUND;
FileInfo file_info;
if (!db->GetFileInfo(file_id, &file_info)) {
NOTREACHED();
return base::File::FILE_ERROR_FAILED;
}
if (file_info.is_directory()) {
if (!db->UpdateModificationTime(file_id, last_modified_time))
return base::File::FILE_ERROR_FAILED;
return base::File::FILE_OK;
}
return NativeFileUtil::Touch(
DataPathToLocalPath(url, file_info.data_path),
last_access_time, last_modified_time);
}
base::File::Error ObfuscatedFileUtil::Truncate(
FileSystemOperationContext* context,
const FileSystemURL& url,
int64 length) {
base::File::Info file_info;
base::FilePath local_path;
base::File::Error error =
GetFileInfo(context, url, &file_info, &local_path);
if (error != base::File::FILE_OK)
return error;
int64 growth = length - file_info.size;
if (!AllocateQuota(context, growth))
return base::File::FILE_ERROR_NO_SPACE;
error = NativeFileUtil::Truncate(local_path, length);
if (error == base::File::FILE_OK) {
UpdateUsage(context, url, growth);
context->change_observers()->Notify(
&FileChangeObserver::OnModifyFile, MakeTuple(url));
}
return error;
}
base::File::Error ObfuscatedFileUtil::CopyOrMoveFile(
FileSystemOperationContext* context,
const FileSystemURL& src_url,
const FileSystemURL& dest_url,
CopyOrMoveOption option,
bool copy) {
// Cross-filesystem copies and moves should be handled via CopyInForeignFile.
DCHECK(src_url.origin() == dest_url.origin());
DCHECK(src_url.type() == dest_url.type());
SandboxDirectoryDatabase* db = GetDirectoryDatabase(src_url, true);
if (!db)
return base::File::FILE_ERROR_FAILED;
FileId src_file_id;
if (!db->GetFileWithPath(src_url.path(), &src_file_id))
return base::File::FILE_ERROR_NOT_FOUND;
FileId dest_file_id;
bool overwrite = db->GetFileWithPath(dest_url.path(),
&dest_file_id);
FileInfo src_file_info;
base::File::Info src_platform_file_info;
base::FilePath src_local_path;
base::File::Error error = GetFileInfoInternal(
db, context, src_url, src_file_id,
&src_file_info, &src_platform_file_info, &src_local_path);
if (error != base::File::FILE_OK)
return error;
if (src_file_info.is_directory())
return base::File::FILE_ERROR_NOT_A_FILE;
FileInfo dest_file_info;
base::File::Info dest_platform_file_info; // overwrite case only
base::FilePath dest_local_path; // overwrite case only
if (overwrite) {
base::File::Error error = GetFileInfoInternal(
db, context, dest_url, dest_file_id,
&dest_file_info, &dest_platform_file_info, &dest_local_path);
if (error == base::File::FILE_ERROR_NOT_FOUND)
overwrite = false; // fallback to non-overwrite case
else if (error != base::File::FILE_OK)
return error;
else if (dest_file_info.is_directory())
return base::File::FILE_ERROR_INVALID_OPERATION;
}
if (!overwrite) {
FileId dest_parent_id;
if (!db->GetFileWithPath(VirtualPath::DirName(dest_url.path()),
&dest_parent_id)) {
return base::File::FILE_ERROR_NOT_FOUND;
}
dest_file_info = src_file_info;
dest_file_info.parent_id = dest_parent_id;
dest_file_info.name =
VirtualPath::BaseName(dest_url.path()).value();
}
int64 growth = 0;
if (copy)
growth += src_platform_file_info.size;
else
growth -= UsageForPath(src_file_info.name.size());
if (overwrite)
growth -= dest_platform_file_info.size;
else
growth += UsageForPath(dest_file_info.name.size());
if (!AllocateQuota(context, growth))
return base::File::FILE_ERROR_NO_SPACE;
/*
* Copy-with-overwrite
* Just overwrite data file
* Copy-without-overwrite
* Copy backing file
* Create new metadata pointing to new backing file.
* Move-with-overwrite
* transaction:
* Remove source entry.
* Point target entry to source entry's backing file.
* Delete target entry's old backing file
* Move-without-overwrite
* Just update metadata
*/
error = base::File::FILE_ERROR_FAILED;
if (copy) {
if (overwrite) {
error = NativeFileUtil::CopyOrMoveFile(
src_local_path,
dest_local_path,
option,
fileapi::NativeFileUtil::CopyOrMoveModeForDestination(
dest_url, true /* copy */));
} else { // non-overwrite
error = CreateFile(context, src_local_path, dest_url, &dest_file_info);
}
} else {
if (overwrite) {
if (db->OverwritingMoveFile(src_file_id, dest_file_id)) {
if (base::File::FILE_OK !=
NativeFileUtil::DeleteFile(dest_local_path))
LOG(WARNING) << "Leaked a backing file.";
error = base::File::FILE_OK;
} else {
error = base::File::FILE_ERROR_FAILED;
}
} else { // non-overwrite
if (db->UpdateFileInfo(src_file_id, dest_file_info))
error = base::File::FILE_OK;
else
error = base::File::FILE_ERROR_FAILED;
}
}
if (error != base::File::FILE_OK)
return error;
if (overwrite) {
context->change_observers()->Notify(
&FileChangeObserver::OnModifyFile,
MakeTuple(dest_url));
} else {
context->change_observers()->Notify(
&FileChangeObserver::OnCreateFileFrom,
MakeTuple(dest_url, src_url));
}
if (!copy) {
context->change_observers()->Notify(
&FileChangeObserver::OnRemoveFile, MakeTuple(src_url));
TouchDirectory(db, src_file_info.parent_id);
}
TouchDirectory(db, dest_file_info.parent_id);
UpdateUsage(context, dest_url, growth);
return error;
}
base::File::Error ObfuscatedFileUtil::CopyInForeignFile(
FileSystemOperationContext* context,
const base::FilePath& src_file_path,
const FileSystemURL& dest_url) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(dest_url, true);
if (!db)
return base::File::FILE_ERROR_FAILED;
base::File::Info src_platform_file_info;
if (!base::GetFileInfo(src_file_path, &src_platform_file_info))
return base::File::FILE_ERROR_NOT_FOUND;
FileId dest_file_id;
bool overwrite = db->GetFileWithPath(dest_url.path(),
&dest_file_id);
FileInfo dest_file_info;
base::File::Info dest_platform_file_info; // overwrite case only
if (overwrite) {
base::FilePath dest_local_path;
base::File::Error error = GetFileInfoInternal(
db, context, dest_url, dest_file_id,
&dest_file_info, &dest_platform_file_info, &dest_local_path);
if (error == base::File::FILE_ERROR_NOT_FOUND)
overwrite = false; // fallback to non-overwrite case
else if (error != base::File::FILE_OK)
return error;
else if (dest_file_info.is_directory())
return base::File::FILE_ERROR_INVALID_OPERATION;
}
if (!overwrite) {
FileId dest_parent_id;
if (!db->GetFileWithPath(VirtualPath::DirName(dest_url.path()),
&dest_parent_id)) {
return base::File::FILE_ERROR_NOT_FOUND;
}
if (!dest_file_info.is_directory())
return base::File::FILE_ERROR_FAILED;
InitFileInfo(&dest_file_info, dest_parent_id,
VirtualPath::BaseName(dest_url.path()).value());
}
int64 growth = src_platform_file_info.size;
if (overwrite)
growth -= dest_platform_file_info.size;
else
growth += UsageForPath(dest_file_info.name.size());
if (!AllocateQuota(context, growth))
return base::File::FILE_ERROR_NO_SPACE;
base::File::Error error;
if (overwrite) {
base::FilePath dest_local_path =
DataPathToLocalPath(dest_url, dest_file_info.data_path);
error = NativeFileUtil::CopyOrMoveFile(
src_file_path, dest_local_path,
FileSystemOperation::OPTION_NONE,
fileapi::NativeFileUtil::CopyOrMoveModeForDestination(dest_url,
true /* copy */));
} else {
error = CreateFile(context, src_file_path, dest_url, &dest_file_info);
}
if (error != base::File::FILE_OK)
return error;
if (overwrite) {
context->change_observers()->Notify(
&FileChangeObserver::OnModifyFile, MakeTuple(dest_url));
} else {
context->change_observers()->Notify(
&FileChangeObserver::OnCreateFile, MakeTuple(dest_url));
}
UpdateUsage(context, dest_url, growth);
TouchDirectory(db, dest_file_info.parent_id);
return base::File::FILE_OK;
}
base::File::Error ObfuscatedFileUtil::DeleteFile(
FileSystemOperationContext* context,
const FileSystemURL& url) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(url, true);
if (!db)
return base::File::FILE_ERROR_FAILED;
FileId file_id;
if (!db->GetFileWithPath(url.path(), &file_id))
return base::File::FILE_ERROR_NOT_FOUND;
FileInfo file_info;
base::File::Info platform_file_info;
base::FilePath local_path;
base::File::Error error = GetFileInfoInternal(
db, context, url, file_id, &file_info, &platform_file_info, &local_path);
if (error != base::File::FILE_ERROR_NOT_FOUND &&
error != base::File::FILE_OK)
return error;
if (file_info.is_directory())
return base::File::FILE_ERROR_NOT_A_FILE;
int64 growth = -UsageForPath(file_info.name.size()) - platform_file_info.size;
AllocateQuota(context, growth);
if (!db->RemoveFileInfo(file_id)) {
NOTREACHED();
return base::File::FILE_ERROR_FAILED;
}
UpdateUsage(context, url, growth);
TouchDirectory(db, file_info.parent_id);
context->change_observers()->Notify(
&FileChangeObserver::OnRemoveFile, MakeTuple(url));
if (error == base::File::FILE_ERROR_NOT_FOUND)
return base::File::FILE_OK;
error = NativeFileUtil::DeleteFile(local_path);
if (base::File::FILE_OK != error)
LOG(WARNING) << "Leaked a backing file.";
return base::File::FILE_OK;
}
base::File::Error ObfuscatedFileUtil::DeleteDirectory(
FileSystemOperationContext* context,
const FileSystemURL& url) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(url, true);
if (!db)
return base::File::FILE_ERROR_FAILED;
FileId file_id;
if (!db->GetFileWithPath(url.path(), &file_id))
return base::File::FILE_ERROR_NOT_FOUND;
FileInfo file_info;
if (!db->GetFileInfo(file_id, &file_info)) {
NOTREACHED();
return base::File::FILE_ERROR_FAILED;
}
if (!file_info.is_directory())
return base::File::FILE_ERROR_NOT_A_DIRECTORY;
if (!db->RemoveFileInfo(file_id))
return base::File::FILE_ERROR_NOT_EMPTY;
int64 growth = -UsageForPath(file_info.name.size());
AllocateQuota(context, growth);
UpdateUsage(context, url, growth);
TouchDirectory(db, file_info.parent_id);
context->change_observers()->Notify(
&FileChangeObserver::OnRemoveDirectory, MakeTuple(url));
return base::File::FILE_OK;
}
webkit_blob::ScopedFile ObfuscatedFileUtil::CreateSnapshotFile(
FileSystemOperationContext* context,
const FileSystemURL& url,
base::File::Error* error,
base::File::Info* file_info,
base::FilePath* platform_path) {
// We're just returning the local file information.
*error = GetFileInfo(context, url, file_info, platform_path);
if (*error == base::File::FILE_OK && file_info->is_directory) {
*file_info = base::File::Info();
*error = base::File::FILE_ERROR_NOT_A_FILE;
}
return webkit_blob::ScopedFile();
}
scoped_ptr<FileSystemFileUtil::AbstractFileEnumerator>
ObfuscatedFileUtil::CreateFileEnumerator(
FileSystemOperationContext* context,
const FileSystemURL& root_url,
bool recursive) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(root_url, false);
if (!db) {
return scoped_ptr<AbstractFileEnumerator>(new EmptyFileEnumerator());
}
return scoped_ptr<AbstractFileEnumerator>(
new ObfuscatedFileEnumerator(db, context, this, root_url, recursive));
}
bool ObfuscatedFileUtil::IsDirectoryEmpty(
FileSystemOperationContext* context,
const FileSystemURL& url) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(url, false);
if (!db)
return true; // Not a great answer, but it's what others do.
FileId file_id;
if (!db->GetFileWithPath(url.path(), &file_id))
return true; // Ditto.
FileInfo file_info;
if (!db->GetFileInfo(file_id, &file_info)) {
DCHECK(!file_id);
// It's the root directory and the database hasn't been initialized yet.
return true;
}
if (!file_info.is_directory())
return true;
std::vector<FileId> children;
// TODO(ericu): This could easily be made faster with help from the database.
if (!db->ListChildren(file_id, &children))
return true;
return children.empty();
}
base::FilePath ObfuscatedFileUtil::GetDirectoryForOriginAndType(
const GURL& origin,
const std::string& type_string,
bool create,
base::File::Error* error_code) {
base::FilePath origin_dir = GetDirectoryForOrigin(origin, create, error_code);
if (origin_dir.empty())
return base::FilePath();
if (type_string.empty())
return origin_dir;
base::FilePath path = origin_dir.AppendASCII(type_string);
base::File::Error error = base::File::FILE_OK;
if (!base::DirectoryExists(path) &&
(!create || !base::CreateDirectory(path))) {
error = create ?
base::File::FILE_ERROR_FAILED :
base::File::FILE_ERROR_NOT_FOUND;
}
if (error_code)
*error_code = error;
return path;
}
bool ObfuscatedFileUtil::DeleteDirectoryForOriginAndType(
const GURL& origin,
const std::string& type_string) {
base::File::Error error = base::File::FILE_OK;
base::FilePath origin_type_path = GetDirectoryForOriginAndType(
origin, type_string, false, &error);
if (origin_type_path.empty())
return true;
if (error != base::File::FILE_ERROR_NOT_FOUND) {
// TODO(dmikurube): Consider the return value of DestroyDirectoryDatabase.
// We ignore its error now since 1) it doesn't matter the final result, and
// 2) it always returns false in Windows because of LevelDB's
// implementation.
// Information about failure would be useful for debugging.
if (!type_string.empty())
DestroyDirectoryDatabase(origin, type_string);
if (!base::DeleteFile(origin_type_path, true /* recursive */))
return false;
}
base::FilePath origin_path = VirtualPath::DirName(origin_type_path);
DCHECK_EQ(origin_path.value(),
GetDirectoryForOrigin(origin, false, NULL).value());
if (!type_string.empty()) {
// At this point we are sure we had successfully deleted the origin/type
// directory (i.e. we're ready to just return true).
// See if we have other directories in this origin directory.
for (std::set<std::string>::iterator iter = known_type_strings_.begin();
iter != known_type_strings_.end();
++iter) {
if (*iter == type_string)
continue;
if (base::DirectoryExists(origin_path.AppendASCII(*iter))) {
// Other type's directory exists; just return true here.
return true;
}
}
}
// No other directories seem exist. Try deleting the entire origin directory.
InitOriginDatabase(origin, false);
if (origin_database_) {
origin_database_->RemovePathForOrigin(
webkit_database::GetIdentifierFromOrigin(origin));
}
if (!base::DeleteFile(origin_path, true /* recursive */))
return false;
return true;
}
ObfuscatedFileUtil::AbstractOriginEnumerator*
ObfuscatedFileUtil::CreateOriginEnumerator() {
std::vector<SandboxOriginDatabase::OriginRecord> origins;
InitOriginDatabase(GURL(), false);
return new ObfuscatedOriginEnumerator(
origin_database_.get(), file_system_directory_);
}
bool ObfuscatedFileUtil::DestroyDirectoryDatabase(
const GURL& origin,
const std::string& type_string) {
std::string key = GetDirectoryDatabaseKey(origin, type_string);
if (key.empty())
return true;
DirectoryMap::iterator iter = directories_.find(key);
if (iter != directories_.end()) {
SandboxDirectoryDatabase* database = iter->second;
directories_.erase(iter);
delete database;
}
base::File::Error error = base::File::FILE_OK;
base::FilePath path = GetDirectoryForOriginAndType(
origin, type_string, false, &error);
if (path.empty() || error == base::File::FILE_ERROR_NOT_FOUND)
return true;
return SandboxDirectoryDatabase::DestroyDatabase(path, env_override_);
}
// static
int64 ObfuscatedFileUtil::ComputeFilePathCost(const base::FilePath& path) {
return UsageForPath(VirtualPath::BaseName(path).value().size());
}
void ObfuscatedFileUtil::MaybePrepopulateDatabase(
const std::vector<std::string>& type_strings_to_prepopulate) {
SandboxPrioritizedOriginDatabase database(file_system_directory_,
env_override_);
std::string origin_string = database.GetPrimaryOrigin();
if (origin_string.empty() || !database.HasOriginPath(origin_string))
return;
const GURL origin = webkit_database::GetOriginFromIdentifier(origin_string);
// Prepopulate the directory database(s) if and only if this instance
// has primary origin and the directory database is already there.
for (size_t i = 0; i < type_strings_to_prepopulate.size(); ++i) {
const std::string type_string = type_strings_to_prepopulate[i];
// Only handles known types.
if (!ContainsKey(known_type_strings_, type_string))
continue;
base::File::Error error = base::File::FILE_ERROR_FAILED;
base::FilePath path = GetDirectoryForOriginAndType(
origin, type_string, false, &error);
if (error != base::File::FILE_OK)
continue;
scoped_ptr<SandboxDirectoryDatabase> db(
new SandboxDirectoryDatabase(path, env_override_));
if (db->Init(SandboxDirectoryDatabase::FAIL_ON_CORRUPTION)) {
directories_[GetDirectoryDatabaseKey(origin, type_string)] = db.release();
MarkUsed();
// Don't populate more than one database, as it may rather hurt
// performance.
break;
}
}
}
base::FilePath ObfuscatedFileUtil::GetDirectoryForURL(
const FileSystemURL& url,
bool create,
base::File::Error* error_code) {
return GetDirectoryForOriginAndType(
url.origin(), CallGetTypeStringForURL(url), create, error_code);
}
std::string ObfuscatedFileUtil::CallGetTypeStringForURL(
const FileSystemURL& url) {
DCHECK(!get_type_string_for_url_.is_null());
return get_type_string_for_url_.Run(url);
}
base::File::Error ObfuscatedFileUtil::GetFileInfoInternal(
SandboxDirectoryDatabase* db,
FileSystemOperationContext* context,
const FileSystemURL& url,
FileId file_id,
FileInfo* local_info,
base::File::Info* file_info,
base::FilePath* platform_file_path) {
DCHECK(db);
DCHECK(context);
DCHECK(file_info);
DCHECK(platform_file_path);
if (!db->GetFileInfo(file_id, local_info)) {
NOTREACHED();
return base::File::FILE_ERROR_FAILED;
}
if (local_info->is_directory()) {
file_info->size = 0;
file_info->is_directory = true;
file_info->is_symbolic_link = false;
file_info->last_modified = local_info->modification_time;
*platform_file_path = base::FilePath();
// We don't fill in ctime or atime.
return base::File::FILE_OK;
}
if (local_info->data_path.empty())
return base::File::FILE_ERROR_INVALID_OPERATION;
base::FilePath local_path = DataPathToLocalPath(url, local_info->data_path);
base::File::Error error = NativeFileUtil::GetFileInfo(
local_path, file_info);
// We should not follow symbolic links in sandboxed file system.
if (base::IsLink(local_path)) {
LOG(WARNING) << "Found a symbolic file.";
error = base::File::FILE_ERROR_NOT_FOUND;
}
if (error == base::File::FILE_OK) {
*platform_file_path = local_path;
} else if (error == base::File::FILE_ERROR_NOT_FOUND) {
LOG(WARNING) << "Lost a backing file.";
InvalidateUsageCache(context, url.origin(), url.type());
if (!db->RemoveFileInfo(file_id))
return base::File::FILE_ERROR_FAILED;
}
return error;
}
base::File ObfuscatedFileUtil::CreateAndOpenFile(
FileSystemOperationContext* context,
const FileSystemURL& dest_url,
FileInfo* dest_file_info, int file_flags) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(dest_url, true);
base::FilePath root, dest_local_path;
base::File::Error error = GenerateNewLocalPath(db, context, dest_url, &root,
&dest_local_path);
if (error != base::File::FILE_OK)
return base::File(error);
if (base::PathExists(dest_local_path)) {
if (!base::DeleteFile(dest_local_path, true /* recursive */))
return base::File(base::File::FILE_ERROR_FAILED);
LOG(WARNING) << "A stray file detected";
InvalidateUsageCache(context, dest_url.origin(), dest_url.type());
}
base::File file = NativeFileUtil::CreateOrOpen(dest_local_path, file_flags);
if (!file.IsValid())
return file.Pass();
if (!file.created()) {
file.Close();
base::DeleteFile(dest_local_path, false /* recursive */);
return base::File(base::File::FILE_ERROR_FAILED);
}
error = CommitCreateFile(root, dest_local_path, db, dest_file_info);
if (error != base::File::FILE_OK) {
file.Close();
base::DeleteFile(dest_local_path, false /* recursive */);
return base::File(error);
}
return file.Pass();
}
base::File::Error ObfuscatedFileUtil::CreateFile(
FileSystemOperationContext* context,
const base::FilePath& src_file_path,
const FileSystemURL& dest_url,
FileInfo* dest_file_info) {
SandboxDirectoryDatabase* db = GetDirectoryDatabase(dest_url, true);
base::FilePath root, dest_local_path;
base::File::Error error = GenerateNewLocalPath(db, context, dest_url, &root,
&dest_local_path);
if (error != base::File::FILE_OK)
return error;
bool created = false;
if (src_file_path.empty()) {
if (base::PathExists(dest_local_path)) {
if (!base::DeleteFile(dest_local_path, true /* recursive */))
return base::File::FILE_ERROR_FAILED;
LOG(WARNING) << "A stray file detected";
InvalidateUsageCache(context, dest_url.origin(), dest_url.type());
}
error = NativeFileUtil::EnsureFileExists(dest_local_path, &created);
} else {
error = NativeFileUtil::CopyOrMoveFile(
src_file_path, dest_local_path,
FileSystemOperation::OPTION_NONE,
fileapi::NativeFileUtil::CopyOrMoveModeForDestination(dest_url,
true /* copy */));
created = true;
}
if (error != base::File::FILE_OK)
return error;
if (!created)
return base::File::FILE_ERROR_FAILED;
return CommitCreateFile(root, dest_local_path, db, dest_file_info);
}
base::File::Error ObfuscatedFileUtil::CommitCreateFile(
const base::FilePath& root,
const base::FilePath& local_path,
SandboxDirectoryDatabase* db,
FileInfo* dest_file_info) {
// This removes the root, including the trailing slash, leaving a relative
// path.
dest_file_info->data_path = base::FilePath(
local_path.value().substr(root.value().length() + 1));
FileId file_id;
base::File::Error error = db->AddFileInfo(*dest_file_info, &file_id);
if (error != base::File::FILE_OK)
return error;
TouchDirectory(db, dest_file_info->parent_id);
return base::File::FILE_OK;
}
base::FilePath ObfuscatedFileUtil::DataPathToLocalPath(
const FileSystemURL& url, const base::FilePath& data_path) {
base::File::Error error = base::File::FILE_OK;
base::FilePath root = GetDirectoryForURL(url, false, &error);
if (error != base::File::FILE_OK)
return base::FilePath();
return root.Append(data_path);
}
std::string ObfuscatedFileUtil::GetDirectoryDatabaseKey(
const GURL& origin, const std::string& type_string) {
if (type_string.empty()) {
LOG(WARNING) << "Unknown filesystem type requested:" << type_string;
return std::string();
}
// For isolated origin we just use a type string as a key.
return webkit_database::GetIdentifierFromOrigin(origin) +
type_string;
}
// TODO(ericu): How to do the whole validation-without-creation thing?
// We may not have quota even to create the database.
// Ah, in that case don't even get here?
// Still doesn't answer the quota issue, though.
SandboxDirectoryDatabase* ObfuscatedFileUtil::GetDirectoryDatabase(
const FileSystemURL& url, bool create) {
std::string key = GetDirectoryDatabaseKey(
url.origin(), CallGetTypeStringForURL(url));
if (key.empty())
return NULL;
DirectoryMap::iterator iter = directories_.find(key);
if (iter != directories_.end()) {
MarkUsed();
return iter->second;
}
base::File::Error error = base::File::FILE_OK;
base::FilePath path = GetDirectoryForURL(url, create, &error);
if (error != base::File::FILE_OK) {
LOG(WARNING) << "Failed to get origin+type directory: "
<< url.DebugString() << " error:" << error;
return NULL;
}
MarkUsed();
SandboxDirectoryDatabase* database =
new SandboxDirectoryDatabase(path, env_override_);
directories_[key] = database;
return database;
}
base::FilePath ObfuscatedFileUtil::GetDirectoryForOrigin(
const GURL& origin, bool create, base::File::Error* error_code) {
if (!InitOriginDatabase(origin, create)) {
if (error_code) {
*error_code = create ?
base::File::FILE_ERROR_FAILED :
base::File::FILE_ERROR_NOT_FOUND;
}
return base::FilePath();
}
base::FilePath directory_name;
std::string id = webkit_database::GetIdentifierFromOrigin(origin);
bool exists_in_db = origin_database_->HasOriginPath(id);
if (!exists_in_db && !create) {
if (error_code)
*error_code = base::File::FILE_ERROR_NOT_FOUND;
return base::FilePath();
}
if (!origin_database_->GetPathForOrigin(id, &directory_name)) {
if (error_code)
*error_code = base::File::FILE_ERROR_FAILED;
return base::FilePath();
}
base::FilePath path = file_system_directory_.Append(directory_name);
bool exists_in_fs = base::DirectoryExists(path);
if (!exists_in_db && exists_in_fs) {
if (!base::DeleteFile(path, true)) {
if (error_code)
*error_code = base::File::FILE_ERROR_FAILED;
return base::FilePath();
}
exists_in_fs = false;
}
if (!exists_in_fs) {
if (!create || !base::CreateDirectory(path)) {
if (error_code)
*error_code = create ?
base::File::FILE_ERROR_FAILED :
base::File::FILE_ERROR_NOT_FOUND;
return base::FilePath();
}
}
if (error_code)
*error_code = base::File::FILE_OK;
return path;
}
void ObfuscatedFileUtil::InvalidateUsageCache(
FileSystemOperationContext* context,
const GURL& origin,
FileSystemType type) {
if (sandbox_delegate_)
sandbox_delegate_->InvalidateUsageCache(origin, type);
}
void ObfuscatedFileUtil::MarkUsed() {
if (!timer_)
timer_.reset(new TimedTaskHelper(file_task_runner_.get()));
if (timer_->IsRunning()) {
timer_->Reset();
} else {
timer_->Start(FROM_HERE,
base::TimeDelta::FromSeconds(db_flush_delay_seconds_),
base::Bind(&ObfuscatedFileUtil::DropDatabases,
base::Unretained(this)));
}
}
void ObfuscatedFileUtil::DropDatabases() {
origin_database_.reset();
STLDeleteContainerPairSecondPointers(
directories_.begin(), directories_.end());
directories_.clear();
timer_.reset();
}
bool ObfuscatedFileUtil::InitOriginDatabase(const GURL& origin_hint,
bool create) {
if (origin_database_)
return true;
if (!create && !base::DirectoryExists(file_system_directory_))
return false;
if (!base::CreateDirectory(file_system_directory_)) {
LOG(WARNING) << "Failed to create FileSystem directory: " <<
file_system_directory_.value();
return false;
}
SandboxPrioritizedOriginDatabase* prioritized_origin_database =
new SandboxPrioritizedOriginDatabase(file_system_directory_,
env_override_);
origin_database_.reset(prioritized_origin_database);
if (origin_hint.is_empty() || !HasIsolatedStorage(origin_hint))
return true;
const std::string isolated_origin_string =
webkit_database::GetIdentifierFromOrigin(origin_hint);
// TODO(kinuko): Deprecate this after a few release cycles, e.g. around M33.
base::FilePath isolated_origin_dir = file_system_directory_.Append(
SandboxIsolatedOriginDatabase::kObsoleteOriginDirectory);
if (base::DirectoryExists(isolated_origin_dir) &&
prioritized_origin_database->GetSandboxOriginDatabase()) {
SandboxIsolatedOriginDatabase::MigrateBackFromObsoleteOriginDatabase(
isolated_origin_string,
file_system_directory_,
prioritized_origin_database->GetSandboxOriginDatabase());
}
prioritized_origin_database->InitializePrimaryOrigin(
isolated_origin_string);
return true;
}
base::File::Error ObfuscatedFileUtil::GenerateNewLocalPath(
SandboxDirectoryDatabase* db,
FileSystemOperationContext* context,
const FileSystemURL& url,
base::FilePath* root,
base::FilePath* local_path) {
DCHECK(local_path);
int64 number;
if (!db || !db->GetNextInteger(&number))
return base::File::FILE_ERROR_FAILED;
base::File::Error error = base::File::FILE_OK;
*root = GetDirectoryForURL(url, false, &error);
if (error != base::File::FILE_OK)
return error;
// We use the third- and fourth-to-last digits as the directory.
int64 directory_number = number % 10000 / 100;
base::FilePath new_local_path = root->AppendASCII(
base::StringPrintf("%02" PRId64, directory_number));
error = NativeFileUtil::CreateDirectory(
new_local_path, false /* exclusive */, false /* recursive */);
if (error != base::File::FILE_OK)
return error;
*local_path =
new_local_path.AppendASCII(base::StringPrintf("%08" PRId64, number));
return base::File::FILE_OK;
}
base::File ObfuscatedFileUtil::CreateOrOpenInternal(
FileSystemOperationContext* context,
const FileSystemURL& url, int file_flags) {
DCHECK(!(file_flags & (base::File::FLAG_DELETE_ON_CLOSE |
base::File::FLAG_HIDDEN | base::File::FLAG_EXCLUSIVE_READ |
base::File::FLAG_EXCLUSIVE_WRITE)));
SandboxDirectoryDatabase* db = GetDirectoryDatabase(url, true);
if (!db)
return base::File(base::File::FILE_ERROR_FAILED);
FileId file_id;
if (!db->GetFileWithPath(url.path(), &file_id)) {
// The file doesn't exist.
if (!(file_flags & (base::File::FLAG_CREATE |
base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_OPEN_ALWAYS))) {
return base::File(base::File::FILE_ERROR_NOT_FOUND);
}
FileId parent_id;
if (!db->GetFileWithPath(VirtualPath::DirName(url.path()), &parent_id))
return base::File(base::File::FILE_ERROR_NOT_FOUND);
FileInfo file_info;
InitFileInfo(&file_info, parent_id,
VirtualPath::BaseName(url.path()).value());
int64 growth = UsageForPath(file_info.name.size());
if (!AllocateQuota(context, growth))
return base::File(base::File::FILE_ERROR_NO_SPACE);
base::File file = CreateAndOpenFile(context, url, &file_info, file_flags);
if (file.IsValid()) {
UpdateUsage(context, url, growth);
context->change_observers()->Notify(
&FileChangeObserver::OnCreateFile, MakeTuple(url));
}
return file.Pass();
}
if (file_flags & base::File::FLAG_CREATE)
return base::File(base::File::FILE_ERROR_EXISTS);
base::File::Info platform_file_info;
base::FilePath local_path;
FileInfo file_info;
base::File::Error error = GetFileInfoInternal(
db, context, url, file_id, &file_info, &platform_file_info, &local_path);
if (error != base::File::FILE_OK)
return base::File(error);
if (file_info.is_directory())
return base::File(base::File::FILE_ERROR_NOT_A_FILE);
int64 delta = 0;
if (file_flags & (base::File::FLAG_CREATE_ALWAYS |
base::File::FLAG_OPEN_TRUNCATED)) {
// The file exists and we're truncating.
delta = -platform_file_info.size;
AllocateQuota(context, delta);
}
base::File file = NativeFileUtil::CreateOrOpen(local_path, file_flags);
if (!file.IsValid()) {
error = file.error_details();
if (error == base::File::FILE_ERROR_NOT_FOUND) {
// TODO(tzik): Also invalidate on-memory usage cache in UsageTracker.
// TODO(tzik): Delete database entry after ensuring the file lost.
InvalidateUsageCache(context, url.origin(), url.type());
LOG(WARNING) << "Lost a backing file.";
return base::File(base::File::FILE_ERROR_FAILED);
}
return file.Pass();
}
// If truncating we need to update the usage.<|fim▁hole|> if (delta) {
UpdateUsage(context, url, delta);
context->change_observers()->Notify(
&FileChangeObserver::OnModifyFile, MakeTuple(url));
}
return file.Pass();
}
bool ObfuscatedFileUtil::HasIsolatedStorage(const GURL& origin) {
return special_storage_policy_.get() &&
special_storage_policy_->HasIsolatedStorage(origin);
}
} // namespace fileapi<|fim▁end|> | |
<|file_name|>test_cortex.py<|end_file_name|><|fim▁begin|>import time
from cortex import *
class MyDataHandler:
def __init__(self):
self.alldata = []
def MyErrorHandler(self, iLevel, msg):
print("ERROR: ")
print(iLevel, msg.contents)
return 0
def MyDataHandler(self, Frame):
print("got called")
try:
print("Received multi-cast frame no %d\n"%(Frame.contents.iFrame))
print "Bodies: ", Frame.contents.nBodies
print "BodyData: ", Frame.contents.BodyData[0].szName
print "Number of Markers of Body[0]: ", Frame.contents.BodyData[0].nMarkers
for i in range(Frame.contents.BodyData[0].nMarkers):
print "MarkerX ", Frame.contents.BodyData[0].Markers[i][0]
print "MarkerY ", Frame.contents.BodyData[0].Markers[i][1]
print "MarkerZ ", Frame.contents.BodyData[0].Markers[i][2]
print "BodyMarker[2].x: ", Frame.contents.BodyData[0].Markers[3][0]
print "Unidentified markers: ", Frame.contents.nUnidentifiedMarkers
print "Delay: ", Frame.contents.fDelay
print "", Frame.contents.UnidentifiedMarkers[0][0]
self.alldata.append(Frame.contents.UnidentifiedMarkers[0][0])
except:
print("Frame empty")
return 0
if __name__ == "__main__":
my_obj = MyDataHandler()
<|fim▁hole|>
if Cortex_Initialize() != 0:
print("ERROR: unable to initialize")
Cortex_Exit()
exit(0)
pBodyDefs = Cortex_GetBodyDefs()
if pBodyDefs == None:
print("Failed to get body defs")
else:
print("Got body defs")
print("bodydefs: ", pBodyDefs.contents.nBodyDefs)
print "Marker names: "
print "", pBodyDefs.contents.BodyDefs[0].szName
for i in range(pBodyDefs.contents.BodyDefs[0].nMarkers):
print "Marker: ", pBodyDefs.contents.BodyDefs[0].szMarkerNames[i]
Cortex_FreeBodyDefs(pBodyDefs)
pBodyDefs = None
pResponse = c_void_p
nBytes = c_int
retval = Cortex_Request("GetContextFrameRate", pResponse, nBytes)
if retval != 0:
print("ERROR, GetContextFrameRate")
#contextFrameRate = cast(pResponse, POINTER(c_float))
#print("ContextFrameRate = %3.1f Hz", contextFrameRate)
print("*** Starting live mode ***")
retval = Cortex_Request("LiveMode", pResponse, nBytes)
time.sleep(1.0)
retval = Cortex_Request("Pause", pResponse, nBytes)
print("*** Paused live mode ***")
print("****** Cortex_Exit ******")
retval = Cortex_Exit();
print my_obj.alldata<|fim▁end|> | Cortex_SetErrorMsgHandlerFunc(my_obj.MyErrorHandler)
Cortex_SetDataHandlerFunc(my_obj.MyDataHandler) |
<|file_name|>ns.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pyplot as plt
def readmesh(fname):
"""
input
-----
fname: string
gmsh file name
output
------
V: array
vertices
E: array
element ids<|fim▁hole|>
mesh = gmsh.Mesh()
mesh.read_msh(fname)
return mesh.Verts[:, :2], mesh.Elmts[2][1]
def identify_boundary(V):
"""
input
-----
V: array
vertices
output
------
d: dictionary
inflow, outflow, wall, cylinder
(unsorted)
"""
d = {}
II = np.where(np.abs(V[:, 0]) < 1e-13)
d["inflow"] = II
II = np.where(np.abs(V[:, 0] - 22.0) < 1e-13)
d["outflow"] = II
II = np.where(np.abs(V[:, 1]) < 1e-13)
J = np.where(np.abs(V[:, 1] - 4.1) < 1e-13)
d["wall"] = np.vstack((II, J)).ravel()
II = np.where(
np.abs(2 * np.sqrt((V[:, 0] - 2.0) ** 2 + (V[:, 1] - 2.0) ** 2) - 1.0) < 1e-13
)
d["cylinder"] = II
return d
if __name__ == "__main__":
V, E = readmesh("ns.msh")
d = identify_boundary(V)
plt.ion()
plt.triplot(V[:, 0], V[:, 1], E)
plt.axis("scaled")
II = d["inflow"]
plt.plot(V[II, 0], V[II, 1], "ro", markersize=10)
II = d["outflow"]
plt.plot(V[II, 0], V[II, 1], "bo", markersize=10)
II = d["wall"]
plt.plot(V[II, 0], V[II, 1], "gs", markersize=10)
II = d["cylinder"]
plt.plot(V[II, 0], V[II, 1], "m*", markersize=10)
plt.show()<|fim▁end|> | """
import gmsh |
<|file_name|>apachelogs_test.py<|end_file_name|><|fim▁begin|>''' Simple test for apachelogs '''
import unittest
from apachelogs import ApacheLogFile
class apachelogs_test(unittest.TestCase):
def test_foo(self):
log = ApacheLogFile('test.log')
line = iter(log).next()
self.assertEquals(line.ip, '127.0.0.1')
self.assertEquals(line.ident, '-')
self.assertEquals(line.http_user, 'frank')
self.assertEquals(line.time, '5/Oct/2000:13:55:36 -0700')
self.assertEquals(line.request_line, 'GET /apache_pb.gif?foo=bar&baz=zip HTTP/1.0')<|fim▁hole|> self.assertEquals(line.referrer, 'http://www.example.com/start.html')
self.assertEquals(line.user_agent, 'Mozilla/4.08 [en] (Win98; I ;Nav)')
log.close()
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()<|fim▁end|> | self.assertEquals(line.http_response_code, '200')
self.assertEquals(line.http_response_size, '2326') |
<|file_name|>personalised-recommendations.js<|end_file_name|><|fim▁begin|>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* Load top 5 recommendations */
function loadTopRecommendations() {
// Get category, costRating and crowdRating from user input radio buttons
const chosenCategory = document.querySelector('input[name="recommendation-category"]:checked').value;
const preferredCost = document.querySelector('input[name="price"]:checked').value;
const preferredCrowd = document.querySelector('input[name="crowd"]:checked').value;
const url = "recommender?category=" + chosenCategory +"&cost-rating="
+ preferredCost +"&crowd-rating=" + preferredCrowd;
fetch(url).then(response => response.json()).then((recommendations) => {
displayRecommendation(recommendations);
});
}
/* Update HTML to display recommendation */
function displayRecommendation(recommendations) {
const topRecommendationsList = document.getElementById("top-recommendations-list");
topRecommendationsList.innerHTML = "";
for (var i = 0; i < recommendations.length; i++) {
const recommendationBox = document.createElement("div");<|fim▁hole|> recommendationBox.className = "recommendation-box";
// if highest recommendation, label with 'Most Recommended' in the HTML
if (i == 0) {
recommendationBox.innerHTML = "<p class=\"top-recommendation\">Most Recommended</p>";
}
const recommendation = recommendations[i];
const nameHTML = "<h3><b>#" + (i + 1) + " " + recommendation.name + "</b></h3>";
const locationHTML = "<p>latitiude: " + recommendation.lat + ", longitude: " + recommendation.lng + "</p>";
const ratingHTML = "<p>crowd: " + recommendation.crowdRating + "/5, price: " + recommendation.costRating + "/5</p>";
const descriptionHTML = "<p>" + recommendation.description + "</p>";
recommendationBox.innerHTML += nameHTML + locationHTML + ratingHTML + descriptionHTML;
topRecommendationsList.append(recommendationBox);
}
}<|fim▁end|> | |
<|file_name|>Repairing.py<|end_file_name|><|fim▁begin|>#This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 2011 Jekin Trivedi <[email protected]> (See the file COPYING for details).
from atlas import *
from physics import *
from physics import Quaternion
from physics import Vector3D
import server
class Repairing(server.Task):
"""A very simple Repair system for Repairing structures."""
materials = ["wood"]
def consume_materials (self) :
""" A method which gets the material to be consumed from the inventory & returns the consume operation """
for item in self.character.contains:
if item.type[0] == str(self.materials[0]):
set = Operation("set", Entity(item.id, status = -1), to = item)
return set
else :
print "No Wood in inventory"
return 0
def repair_operation(self, op):
""" The repair op is FROM the the character,
TO the structure that is getting Repaired which we
term the target. """
if len(op) < 1:
sys.stderr.write("Repair task has no target in repair op")
<|fim▁hole|> def tick_operation(self, op):
""" This method is called repeatedly, each time a Repair turn occurs.
In this example the interval is fixed, but it can be varied. """
# print "Repair.tick"
res=Oplist()
current_status = 0
if self.target() is None:
# print "Target is no more"
self.irrelevant()
return
if self.character.stamina <= 0:
# print "I am exhausted"
self.irrelevant()
return
if square_distance(self.character.location, self.target().location) > self.target().location.bbox.square_bounding_radius():
self.progress = current_status
self.rate = 0
return self.next_tick(1.75)
# Some entity do not have status defined. If not present we assume that the entity is unharmed & stop the task
if hasattr ( self.target(), 'status' ) :
current_status = self.target().status
else:
set = Operation("set", Entity(self.self.target(), status = 1),
to = self.target)
res.append(set)
current_status = 1.0
self.irrelevant()
if current_status < 0.9:
set=Operation("set", Entity(self.target().id, status=current_status+0.1), to=self.target())
res.append(set)
consume = self.consume_materials ()
if consume :
res.append(consume)
else :
self.irrelevant()
else:
set = Operation("set", Entity(self.target().id, status = 1),
to = self.target())
res.append(set)
self.irrelevant()
self.progress = current_status
self.rate = 0.1 / 1.75
res.append(self.next_tick(1.75))
return res<|fim▁end|> | # FIXME Use weak references, once we have them
self.target = server.world.get_object_ref(op[0].id)
self.tool = op.to
|
<|file_name|>field_slider.js<|end_file_name|><|fim▁begin|>(function($) {
"use strict";
// Return true for float value, false otherwise
function is_float (mixed_var) {
return +mixed_var === mixed_var && (!(isFinite(mixed_var))) || Boolean((mixed_var % 1));
}
// Return number of integers after the decimal point.
function decimalCount(res){
var q = res.toString().split('.');
return q[1].length;
}
function loadSelect(myClass, min, max, res, step){
//var j = step + ((decCount ) - (step )); // 18;
for( var i = min; i <= max; i=i+res ){
//var step = 2;
//if (j === (step + ((decCount ) - (step )))) {
var n = i;
if (is_float(res)){
var decCount = decimalCount(res);
n = i.toFixed(decCount);
}
$(myClass).append(
'<option value="' + n + '">' + n + '</option>'
);
//j = 0;<|fim▁hole|> }
$(document).ready(function() {
$('div.redux-slider-container').each(function() {
var start, toClass, defClassOne, defClassTwo, connectVal;
var DISPLAY_NONE = 0;
var DISPLAY_LABEL = 1;
var DISPLAY_TEXT = 2;
var DISPLAY_SELECT = 3;
var mainID = $(this).data('id');
var minVal = $(this).data('min');
var maxVal = $(this).data('max');
var stepVal = $(this).data('step');
var handles = $(this).data('handles');
var defValOne = $(this).data('default-one');
var defValTwo = $(this).data('default-two');
var resVal = $(this).data('resolution');
var displayValue = parseInt(($(this).data('display')));
var rtlVal = Boolean($(this).data('rtl'));
var floatMark = ($(this).data('float-mark'));
var rtl;
if (rtlVal === true) {
rtl = 'rtl';
} else {
rtl = 'ltr';
}
// range array
var range = [minVal, maxVal];
// Set default values for dual slides.
var startTwo = [defValOne, defValTwo];
// Set default value for single slide
var startOne = [defValOne];
var inputOne, inputTwo;
if (displayValue == DISPLAY_TEXT) {
defClassOne = $('.redux-slider-input-one-' + mainID);
defClassTwo = $('.redux-slider-input-two-' + mainID);
inputOne = defClassOne;
inputTwo = defClassTwo;
} else if (displayValue == DISPLAY_SELECT) {
defClassOne = $('.redux-slider-select-one-' + mainID);
defClassTwo = $('.redux-slider-select-two-' + mainID);
loadSelect(defClassOne, minVal, maxVal, resVal, stepVal);
if (handles === 2) {
loadSelect(defClassTwo, minVal, maxVal, resVal, stepVal);
}
} else if (displayValue == DISPLAY_LABEL) {
defClassOne = $('#redux-slider-label-one-' + mainID);
defClassTwo = $('#redux-slider-label-two-' + mainID);
} else if (displayValue == DISPLAY_NONE) {
defClassOne = $('.redux-slider-value-one-' + mainID);
defClassTwo = $('.redux-slider-value-two-' + mainID);
}
var classOne, classTwo;
if (displayValue == DISPLAY_LABEL) {
var x = [defClassOne, 'html'];
var y = [defClassTwo, 'html'];
classOne = [x];
classTwo = [x, y];
} else {
classOne = [defClassOne];
classTwo = [defClassOne, defClassTwo];
}
if (handles === 2) {
start = startTwo;
toClass = classTwo;
connectVal = true;
} else {
start = startOne;
toClass = classOne;
connectVal = 'lower';
}
var slider = $(this).noUiSlider({
range: range,
start: start,
handles: handles,
step: stepVal,
connect: connectVal,
behaviour: "tap-drag",
direction: rtl,
serialization: {
resolution: resVal,
to: toClass,
mark: floatMark,
},
slide: function() {
if (displayValue == DISPLAY_LABEL) {
if (handles === 2) {
var inpSliderVal = slider.val();
$('input.redux-slider-value-one-' + mainID).attr('value', inpSliderVal[0]);
$('input.redux-slider-value-two-' + mainID).attr('value', inpSliderVal[1]);
} else {
$('input.redux-slider-value-one-' + mainID).attr('value', slider.val());
}
}
if (displayValue == DISPLAY_SELECT) {
$('.redux-slider-select-one').select2('val', slider.val()[0]);
if (handles === 2) {
$('.redux-slider-select-two').select2('val', slider.val()[1]);
}
}
// Uncomment when selectize is live
// var selectize = select[0].selectize;
// selectize.setValue(slider.val()[0]);
redux_change(jQuery(this).parents('.redux-field-container:first').find('input'));
},
});
if (displayValue === DISPLAY_TEXT) {
inputOne.keydown(function( e ) {
var sliderOne = slider.val();
var value = parseInt( sliderOne[0] );
switch ( e.which ) {
case 38:
slider.val([value + 1, null] );
break;
case 40:
slider.val([value - 1, null]);
break;
case 13:
e.preventDefault();
break;
}
});
if (handles === 2) {
inputTwo.keydown(function( e ) {
var sliderTwo = slider.val();
var value = parseInt(sliderTwo[1]);
switch ( e.which ) {
case 38:
slider.val([null, value + 1] );
break;
case 40:
slider.val([null, value - 1] );
break;
case 13:
e.preventDefault();
break;
}
});
}
}
});
$('select.redux-slider-select-one, select.redux-slider-select-two').select2({
width: 'resolve',
triggerChange: true,
allowClear: true
});
// select = $('.slider-select').selectize({
// create: true,
// sortField: 'text'
// });
});
})(jQuery);<|fim▁end|> | //}
//j++;
} |
<|file_name|>hashivault_approle_role_get.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = '''
---
module: hashivault_approle_role_get
version_added: "3.8.0"
short_description: Hashicorp Vault approle role get module
description:
- Module to get a approle role from Hashicorp Vault.
options:
name:
description:
- role name.
mount_point:
description:
- mount point for role
default: approle
extends_documentation_fragment: hashivault
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_approle_role_get:
name: 'ashley'
register: 'vault_approle_role_get'
- debug: msg="Role is {{vault_approle_role_get.role}}"
'''
def main():
argspec = hashivault_argspec()
argspec['name'] = dict(required=True, type='str')
argspec['mount_point'] = dict(required=False, type='str', default='approle')
module = hashivault_init(argspec)
result = hashivault_approle_role_get(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
<|fim▁hole|>def hashivault_approle_role_get(params):
name = params.get('name')
client = hashivault_auth_client(params)
result = client.get_role(name, mount_point=params.get('mount_point'))
return {'role': result}
if __name__ == '__main__':
main()<|fim▁end|> |
@hashiwrapper |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals
from django.contrib.gis.db.models import F, Collect, Count, Extent, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, MultiPoint, Point
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import override_settings
from django.utils import timezone
from ..utils import no_oracle
from .models import (
Article, Author, Book, City, DirectoryEntry, Event, Location, Parcel,
)
@skipUnlessDBFeature("gis_enabled")
class RelatedGeoModelTest(TestCase):
fixtures = ['initial']
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.order_by('id')
qs2 = City.objects.order_by('id').select_related()
qs3 = City.objects.order_by('id').select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@skipUnlessDBFeature("has_transform_method")
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@skipUnlessDBFeature("supports_extent_aggr")
def test_related_extent_aggregate(self):
"Testing the `Extent` aggregate on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.aggregate(Extent('location__point'))['location__point__extent']
e2 = City.objects.exclude(state='NM').aggregate(Extent('location__point'))['location__point__extent']
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e):
self.assertAlmostEqual(ref_val, e_val, tol)
@skipUnlessDBFeature("supports_extent_aggr")
def test_related_extent_annotate(self):
"""
Test annotation with Extent GeoAggregate.
"""
cities = City.objects.annotate(points_extent=Extent('location__point')).order_by('name')
tol = 4
self.assertAlmostEqual(
cities[0].points_extent,
(-97.516111, 33.058333, -97.516111, 33.058333),
tol
)
@skipUnlessDBFeature("has_unionagg_method")
def test_related_union_aggregate(self):
"Testing the `Union` aggregate on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.aggregate(Union()`).
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.aggregate(Union('location__point'))['location__point__union']
u2 = City.objects.exclude(
name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth'),
).aggregate(Union('location__point'))['location__point__union']
u3 = aggs['location__point__union']
self.assertEqual(type(u1), MultiPoint)
self.assertEqual(type(u3), MultiPoint)
# Ordering of points in the result of the union is not defined and
# implementation-dependent (DB backend, GEOS version)
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u1})
self.assertSetEqual({p.ewkt for p in ref_u2}, {p.ewkt for p in u2})
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u3})
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry(
'POLYGON((-97.501205 33.052520,-97.501205 33.052576,'
'-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))',<|fim▁hole|>
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if connection.features.supports_transform:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if connection.features.supports_transform:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertIsInstance(d['point'], Geometry)
self.assertIsInstance(t[1], Geometry)
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
@override_settings(USE_TZ=True)
def test_07b_values(self):
"Testing values() and values_list() with aware datetime. See #21565."
Event.objects.create(name="foo", when=timezone.now())
list(Event.objects.values_list('when'))
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
# TODO: fix on Oracle -- qs2 returns an empty result for an unknown reason
@no_oracle
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertIn('Aurora', names)
self.assertIn('Kecksburg', names)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a values(), see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertIsInstance(qs[0]['point'], GEOSGeometry)
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertIsNone(b.author)
@skipUnlessDBFeature("supports_collect_aggr")
def test_collect(self):
"""
Testing the `Collect` aggregate.
"""
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry(
'MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,'
'-95.363151 29.763374,-96.801611 32.782057)'
)
coll = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertTrue(ref_geom.equals(coll))
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.<|fim▁end|> | srid=4326
)
pcity = City.objects.get(name='Aurora') |
<|file_name|>Root.prod.js<|end_file_name|><|fim▁begin|>import React, { PropTypes } from 'react';
import { Provider } from 'react-redux';
import Routers from './Routers';
/**
* Component is exported for conditional usage in Root.js
*/
const Root = ({ store }) => (
/**
* Provider is a component provided to us by the 'react-redux' bindings that
* wraps our app - thus making the Redux store/state available to our 'connect()'
* calls in component hierarchy below.
*/
<Provider store={store}>
<div>
{Routers}
</div>
</Provider>
);
Root.propTypes = {
store: PropTypes.object.isRequired // eslint-disable-line react/forbid-prop-types<|fim▁hole|>
module.exports = Root;<|fim▁end|> | }; |
<|file_name|>one_D_helmholtz.py<|end_file_name|><|fim▁begin|>from numpy import array, zeros, ones, sqrt, ravel, mod, random, inner, conjugate
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix, bmat, eye
from scipy import rand, mat, real, imag, linspace, hstack, vstack, exp, cos, sin, pi
from pyamg.util.linalg import norm
import pyamg
from scipy.optimize import fminbound, fmin
__all__ = ['one_D_helmholtz', 'min_wave']
def min_wave(A, omega, x, tol=1e-5, maxiter=25):
'''
parameters
----------
A {matrix}
1D Helmholtz Operator
omega {scalar}
Wavenumber used to discretize Helmholtz problem
x {array}
1D mesh for the problem
tol {scalar}
minimization tolerance
maxit {integer}
maximum iters for minimization algorithm
returns
-------
Applies minimization algorithm to find numerically lowest energy wavenumber
for the matrix A, i.e., the omega shift that minimizes <Ac, c> / <c, c>,
for c = cosine((omega+shift)x)
'''
x = ravel(x)
# Define scalar objective function, ignoring the
# boundaries by only considering A*c at [1:-1]
def obj_fcn(alpha):
c = cos((omega+alpha)*x)
Ac = (A*c)[1:-1]
return norm(Ac)/norm(c[1:-1])
(xopt, fval, ierr, numfunc) = fminbound(obj_fcn, -0.99*omega, \
0.99*omega, xtol=tol, maxfun=maxiter, full_output=True, disp=0)
#print "Minimizer = %1.4f, Function Value at Min = %1.4e\nError Flag = %d,\
# Number of function evals = %d" % (xopt, fval, ierr, numfunc)
<|fim▁hole|>def one_D_helmholtz(h, omega=1.0, nplane_waves=2):
'''
parameters
----------
h {int}
Number of grid spacings for 1-D Helmholtz
omega {float}
Defines Helmholtz wave number
nplane_waves {int}
Defines the number of planewaves used for the near null-space modes, B.
1: B = [ exp(ikx) ]
2: B = [ real(exp(ikx)), complex(exp(ikx)) ]
returns
-------
dictionary containing:
A {matrix-like}
LHS of linear system for Helmholtz problem,
-laplace(u) - omega^2 u = f
mesh_h {float}
mesh size
vertices {array-like}
[X, Y]
elements {None}
None, just using 1-D finite-differencing
'''
# Ensure Repeatability of "random" initial guess
random.seed(10)
# Mesh Spacing
mesh_h = 1.0/(float(h)-1.0)
# Construct Real Operator
reA = pyamg.gallery.poisson( (h,), format='csr')
reA = reA - mesh_h*mesh_h*omega*omega*\
eye(reA.shape[0], reA.shape[1], format='csr')
dimen = reA.shape[0]
# Construct Imaginary Operator
imA = csr_matrix( coo_matrix( (array([2.0*mesh_h*omega]), \
(array([0]), array([0]))), shape=reA.shape) )
# Enforce Radiation Boundary Conditions at first grid point
reA.data[1] = -2.0
# In order to maintain symmetry scale the first equation by 1/2
reA.data[0] = 0.5*reA.data[0]
reA.data[1] = 0.5*reA.data[1]
imA.data[0] = 0.5*imA.data[0]
# Create complex-valued system
complexA = reA + 1.0j*imA
# For this case, the CG (continuous Galerkin) case is the default elements and vertices
# because there is no DG mesh to speak of
elements = None
vertices = hstack((linspace(-1.0,1.0,h).reshape(-1,1), zeros((h,1))))
# Near null-space modes are 1-D Plane waves: [exp(ikx), i exp(ikx)]
B = zeros( (dimen, nplane_waves), dtype=complex )
shift = min_wave(complexA, omega, vertices[:,0], tol=1e-9, maxiter=15)
if nplane_waves == 1:
B[:,0] = exp(1.0j*(omega+shift)*vertices[:,0])
elif nplane_waves == 2:
B[:,0] = cos((omega+shift)*vertices[:,0])
B[:,1] = sin((omega+shift)*vertices[:,0])
return {'A' : complexA, 'B' : B, 'mesh_h' : mesh_h, \
'elements' : elements, 'vertices' : vertices}<|fim▁end|> | return xopt
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | #__all__ = [ 'search', 'ham_distance', 'lev_distance', 'distance', 'distance_matrix' ] |
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":<|fim▁hole|> from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)<|fim▁end|> | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kitchen_sink.settings")
|
<|file_name|>require-dot-notation.js<|end_file_name|><|fim▁begin|>/**
* Requires member expressions to use dot notation when possible
*
* Types: `Boolean` or `Object`
*
* Values:
* - `true`
* - `"except_snake_case"` (*deprecated* use `"allExcept": ["snake_case"]`) allow quoted snake cased identifiers
* - `Object`:
* - `'allExcept'` array of exceptions:
* - `'keywords'` allow quoted identifiers made of reserved words
* - `'snake_case'` allow quoted snake cased identifiers
*
* N.B.: keywords are always allowed with es3 enabled (http://jscs.info/overview.html#es3)
*
* JSHint: [`sub`](http://www.jshint.com/docs/options/#sub)
*
* #### Example
*
* ```js
* "requireDotNotation": true
* ```
*
* ##### Valid
*
* ```js
* var a = b[c];
* var a = b.c;
* var a = b[c.d];
* var a = b[1];
* var a = b.while; // reserved words can be property names in ES5
* ```
*
* ##### Invalid
*
* ```js
* var a = b['c'];
* var a = b['snake_cased'];
* var a = b['_camelCased'];
* var a = b['camelCased_'];
* ```
*
* #### Example for allExcept snake_case
*
* ```js
* "requireDotNotation": { "allExcept": [ "snake_case" ] }
* ```
*
* ##### Valid
* ```
* var a = b[c];
* var a = b.c;
* var a = b['snake_cased'];
* var a = b['camelCased_butWithSnakes'];
* ```
*
* #### Example for allExcept keywords
*
* ```js
* "requireDotNotation": { "allExcept": [ "keywords" ] }
* ```
*
* ##### Valid
*
* ```
* var a = b['yield']; // reserved word in ES5
* var a = b['let'];
* ```
*
* ##### Invalid
*
* ```
* var a = b['await']; // reserved word in ES6
* ```
*
* #### Example for allExcept keywords with esnext
*
* ```js
* "requireDotNotation": { "allExcept": [ "keywords" ] }
* "esnext": true
* ```
*<|fim▁hole|> *
* ```
* var a = b['await']; // reserved word in ES6
* ```
*
* #### Example for `"es3": true`
*
* ```js
* "requireDotNotation": true,
* "es3": true
* ```
*
* ##### Valid
*
* ```js
* var a = b[c];
* var a = b.c;
* var a = b[c.d];
* var a = b[1];
* var a = b['while']; // reserved word in ES3
* ```
*
* ##### Invalid
*
* ```js
* var a = b['c'];
* ```
*/
var assert = require('assert');
var utils = require('../utils');
var reservedWords = require('reserved-words');
module.exports = function() {};
module.exports.prototype = {
configure: function(options) {
if (typeof options !== 'object') {
assert(
options === true || options === 'except_snake_case',
this.getOptionName() + ' option requires either a true value or an object'
);
var _options = {};
if (options === 'except_snake_case') {
_options.allExcept = ['snake_case'];
}
return this.configure(_options);
}
assert(
!options.allExcept || Array.isArray(options.allExcept),
'allExcept value of ' + this.getOptionName() + ' option requires an array with exceptions'
);
if (Array.isArray(options.allExcept)) {
this._exceptSnakeCase = options.allExcept.indexOf('snake_case') > -1;
this._exceptKeywords = options.allExcept.indexOf('keywords') > -1;
}
},
getOptionName: function() {
return 'requireDotNotation';
},
check: function(file, errors) {
var exceptSnakeCase = this._exceptSnakeCase;
var exceptKeywords = this._exceptKeywords;
var dialect = file.getDialect();
file.iterateNodesByType('MemberExpression', function(node) {
if (!node.computed || node.property.type !== 'Literal') {
return;
}
var value = node.property.value;
if (// allow numbers, nulls, and anything else
typeof value !== 'string' ||
// allow invalid identifiers
!utils.isValidIdentifierName(value) ||
// allow quoted snake cased identifiers if allExcept: ['snake_case']
(exceptSnakeCase && utils.isSnakeCased(utils.trimUnderscores(value))) ||
// allow quoted reserved words if allExcept: ['keywords']
((dialect === 'es3' || exceptKeywords) && reservedWords.check(value, dialect, true))
) {
return;
}
errors.add(
'Use dot notation instead of brackets for member expressions',
node.property.loc.start
);
});
}
};<|fim▁end|> | * ##### Valid |
<|file_name|>plugin.py<|end_file_name|><|fim▁begin|>"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
import rest_framework_custom as rfc
from storageadmin.util import handle_exception
from storageadmin.models import (Plugin, InstalledPlugin)
from storageadmin.serializers import PluginSerializer
import time
import logging
logger = logging.getLogger(__name__)
<|fim▁hole|>class PluginView(rfc.GenericView):
serializer_class = PluginSerializer
def get_queryset(self, *args, **kwargs):
return Plugin.objects.all()
#if 'available_plugins' in request.session:
# if request.session['available_plugins'] == None:
# request.session['available_plugins'] = ['backup']
#else:
# request.session['available_plugins'] = ['backup']
#if 'installed_plugins' in request.session:
# if request.session['installed_plugins'] == None:
# request.session['installed_plugins'] = []
#else:
# request.session['installed_plugins'] = []
#data = {
# 'installed': request.session['installed_plugins'],
# 'available': request.session['available_plugins']
# }
#return Response(data)<|fim▁end|> | |
<|file_name|>rmo.rs<|end_file_name|><|fim▁begin|>// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//! Ref/Mut/Owned types.<|fim▁hole|><|fim▁end|> |
pub use lrs_base::rmo::*; |
<|file_name|>jquery.pagination.js<|end_file_name|><|fim▁begin|>/**
* jQuery EasyUI 1.5.1
*
* Copyright (c) 2009-2016 www.jeasyui.com. All rights reserved.
*
* Licensed under the freeware license: http://www.jeasyui.com/license_freeware.php
* To use it on other terms please contact us: [email protected]
*
*/
(function($){
function _1(_2){
var _3=$.data(_2,"pagination");
var _4=_3.options;
var bb=_3.bb={};
var _5=$(_2).addClass("pagination").html("<table cellspacing=\"0\" cellpadding=\"0\" border=\"0\"><tr></tr></table>");
var tr=_5.find("tr");
var aa=$.extend([],_4.layout);
if(!_4.showPageList){
_6(aa,"list");
}
if(!_4.showRefresh){
_6(aa,"refresh");
}
if(aa[0]=="sep"){
aa.shift();
}
if(aa[aa.length-1]=="sep"){
aa.pop();
}
for(var _7=0;_7<aa.length;_7++){
var _8=aa[_7];
if(_8=="list"){
var ps=$("<select class=\"pagination-page-list\"></select>");
ps.bind("change",function(){
_4.pageSize=parseInt($(this).val());
_4.onChangePageSize.call(_2,_4.pageSize);
_10(_2,_4.pageNumber);
});
for(var i=0;i<_4.pageList.length;i++){
$("<option></option>").text(_4.pageList[i]).appendTo(ps);
}
$("<td></td>").append(ps).appendTo(tr);
}else{
if(_8=="sep"){
$("<td><div class=\"pagination-btn-separator\"></div></td>").appendTo(tr);
}else{
if(_8=="first"){
bb.first=_9("first");
}else{
if(_8=="prev"){
bb.prev=_9("prev");
}else{
if(_8=="next"){
bb.next=_9("next");
}else{
if(_8=="last"){
bb.last=_9("last");
}else{
if(_8=="manual"){
$("<span style=\"padding-left:6px;\"></span>").html(_4.beforePageText).appendTo(tr).wrap("<td></td>");
bb.num=$("<input class=\"pagination-num\" type=\"text\" value=\"1\" size=\"2\">").appendTo(tr).wrap("<td></td>");
bb.num.unbind(".pagination").bind("keydown.pagination",function(e){
if(e.keyCode==13){
var _a=parseInt($(this).val())||1;
_10(_2,_a);
return false;
}
});
bb.after=$("<span style=\"padding-right:6px;\"></span>").appendTo(tr).wrap("<td></td>");
}else{
if(_8=="refresh"){
bb.refresh=_9("refresh");
}else{
if(_8=="links"){
$("<td class=\"pagination-links\"></td>").appendTo(tr);
}
}
}
}
}
}
}
}
}
}
if(_4.buttons){
$("<td><div class=\"pagination-btn-separator\"></div></td>").appendTo(tr);
if($.isArray(_4.buttons)){
for(var i=0;i<_4.buttons.length;i++){
var _b=_4.buttons[i];
if(_b=="-"){
$("<td><div class=\"pagination-btn-separator\"></div></td>").appendTo(tr);
}else{
var td=$("<td></td>").appendTo(tr);
var a=$("<a href=\"javascript:;\"></a>").appendTo(td);
a[0].onclick=eval(_b.handler||function(){
});
a.linkbutton($.extend({},_b,{plain:true}));
}
}
}else{
var td=$("<td></td>").appendTo(tr);
$(_4.buttons).appendTo(td).show();
}
}
$("<div class=\"pagination-info\"></div>").appendTo(_5);
$("<div style=\"clear:both;\"></div>").appendTo(_5);
function _9(_c){
var _d=_4.nav[_c];
var a=$("<a href=\"javascript:;\"></a>").appendTo(tr);
a.wrap("<td></td>");
a.linkbutton({iconCls:_d.iconCls,plain:true}).unbind(".pagination").bind("click.pagination",function(){
_d.handler.call(_2);
});
return a;
};
function _6(aa,_e){
var _f=$.inArray(_e,aa);
if(_f>=0){
aa.splice(_f,1);
}
return aa;
};
};
function _10(_11,_12){
var _13=$.data(_11,"pagination").options;
_14(_11,{pageNumber:_12});
_13.onSelectPage.call(_11,_13.pageNumber,_13.pageSize);
};
function _14(_15,_16){
var _17=$.data(_15,"pagination");
var _18=_17.options;
var bb=_17.bb;
$.extend(_18,_16||{});
var ps=$(_15).find("select.pagination-page-list");
if(ps.length){
ps.val(_18.pageSize+"");
_18.pageSize=parseInt(ps.val());
}
var _19=Math.ceil(_18.total/_18.pageSize)||1;
if(_18.pageNumber<1){
_18.pageNumber=1;
}
if(_18.pageNumber>_19){
_18.pageNumber=_19;
}
if(_18.total==0){
_18.pageNumber=0;
_19=0;
}
if(bb.num){
bb.num.val(_18.pageNumber);
}
if(bb.after){
bb.after.html(_18.afterPageText.replace(/{pages}/,_19));
}
var td=$(_15).find("td.pagination-links");
if(td.length){
td.empty();
var _1a=_18.pageNumber-Math.floor(_18.links/2);
if(_1a<1){
_1a=1;
}
var _1b=_1a+_18.links-1;
if(_1b>_19){
_1b=_19;
}
_1a=_1b-_18.links+1;
if(_1a<1){
_1a=1;
}
for(var i=_1a;i<=_1b;i++){
var a=$("<a class=\"pagination-link\" href=\"javascript:;\"></a>").appendTo(td);
a.linkbutton({plain:true,text:i});
if(i==_18.pageNumber){
a.linkbutton("select");
}else{
a.unbind(".pagination").bind("click.pagination",{pageNumber:i},function(e){
_10(_15,e.data.pageNumber);
});
}
}
}
var _1c=_18.displayMsg;
_1c=_1c.replace(/{from}/,_18.total==0?0:_18.pageSize*(_18.pageNumber-1)+1);<|fim▁hole|>_1c=_1c.replace(/{to}/,Math.min(_18.pageSize*(_18.pageNumber),_18.total));
_1c=_1c.replace(/{total}/,_18.total);
$(_15).find("div.pagination-info").html(_1c);
if(bb.first){
bb.first.linkbutton({disabled:((!_18.total)||_18.pageNumber==1)});
}
if(bb.prev){
bb.prev.linkbutton({disabled:((!_18.total)||_18.pageNumber==1)});
}
if(bb.next){
bb.next.linkbutton({disabled:(_18.pageNumber==_19)});
}
if(bb.last){
bb.last.linkbutton({disabled:(_18.pageNumber==_19)});
}
_1d(_15,_18.loading);
};
function _1d(_1e,_1f){
var _20=$.data(_1e,"pagination");
var _21=_20.options;
_21.loading=_1f;
if(_21.showRefresh&&_20.bb.refresh){
_20.bb.refresh.linkbutton({iconCls:(_21.loading?"pagination-loading":"pagination-load")});
}
};
$.fn.pagination=function(_22,_23){
if(typeof _22=="string"){
return $.fn.pagination.methods[_22](this,_23);
}
_22=_22||{};
return this.each(function(){
var _24;
var _25=$.data(this,"pagination");
if(_25){
_24=$.extend(_25.options,_22);
}else{
_24=$.extend({},$.fn.pagination.defaults,$.fn.pagination.parseOptions(this),_22);
$.data(this,"pagination",{options:_24});
}
_1(this);
_14(this);
});
};
$.fn.pagination.methods={options:function(jq){
return $.data(jq[0],"pagination").options;
},loading:function(jq){
return jq.each(function(){
_1d(this,true);
});
},loaded:function(jq){
return jq.each(function(){
_1d(this,false);
});
},refresh:function(jq,_26){
return jq.each(function(){
_14(this,_26);
});
},select:function(jq,_27){
return jq.each(function(){
_10(this,_27);
});
}};
$.fn.pagination.parseOptions=function(_28){
var t=$(_28);
return $.extend({},$.parser.parseOptions(_28,[{total:"number",pageSize:"number",pageNumber:"number",links:"number"},{loading:"boolean",showPageList:"boolean",showRefresh:"boolean"}]),{pageList:(t.attr("pageList")?eval(t.attr("pageList")):undefined)});
};
$.fn.pagination.defaults={total:1,pageSize:10,pageNumber:1,pageList:[10,20,30,50],loading:false,buttons:null,showPageList:true,showRefresh:true,links:10,layout:["list","sep","first","prev","sep","manual","sep","next","last","sep","refresh"],onSelectPage:function(_29,_2a){
},onBeforeRefresh:function(_2b,_2c){
},onRefresh:function(_2d,_2e){
},onChangePageSize:function(_2f){
},beforePageText:"Page",afterPageText:"of {pages}",displayMsg:"Displaying {from} to {to} of {total} items",nav:{first:{iconCls:"pagination-first",handler:function(){
var _30=$(this).pagination("options");
if(_30.pageNumber>1){
$(this).pagination("select",1);
}
}},prev:{iconCls:"pagination-prev",handler:function(){
var _31=$(this).pagination("options");
if(_31.pageNumber>1){
$(this).pagination("select",_31.pageNumber-1);
}
}},next:{iconCls:"pagination-next",handler:function(){
var _32=$(this).pagination("options");
var _33=Math.ceil(_32.total/_32.pageSize);
if(_32.pageNumber<_33){
$(this).pagination("select",_32.pageNumber+1);
}
}},last:{iconCls:"pagination-last",handler:function(){
var _34=$(this).pagination("options");
var _35=Math.ceil(_34.total/_34.pageSize);
if(_34.pageNumber<_35){
$(this).pagination("select",_35);
}
}},refresh:{iconCls:"pagination-refresh",handler:function(){
var _36=$(this).pagination("options");
if(_36.onBeforeRefresh.call(this,_36.pageNumber,_36.pageSize)!=false){
$(this).pagination("select",_36.pageNumber);
_36.onRefresh.call(this,_36.pageNumber,_36.pageSize);
}
}}}};
})(jQuery);<|fim▁end|> | |
<|file_name|>ActivitiesService.java<|end_file_name|><|fim▁begin|>package net.inpercima.runandfun.service;
import static net.inpercima.runandfun.runkeeper.constants.RunkeeperConstants.ACTIVITIES_MEDIA;
import static net.inpercima.runandfun.runkeeper.constants.RunkeeperConstants.ACTIVITIES_URL_PAGE_SIZE_ONE;
import static net.inpercima.runandfun.runkeeper.constants.RunkeeperConstants.ACTIVITIES_URL_SPECIFIED_PAGE_SIZE_NO_EARLIER_THAN;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import javax.inject.Inject;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.RangeQueryBuilder;
import org.springframework.data.domain.Pageable;
import org.springframework.data.elasticsearch.core.ElasticsearchRestTemplate;
import org.springframework.data.elasticsearch.core.SearchHits;
import org.springframework.data.elasticsearch.core.mapping.IndexCoordinates;
import org.springframework.data.elasticsearch.core.query.NativeSearchQueryBuilder;
import org.springframework.stereotype.Service;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import net.inpercima.restapi.service.RestApiService;
import net.inpercima.runandfun.app.model.AppActivity;
import net.inpercima.runandfun.runkeeper.model.RunkeeperActivities;
import net.inpercima.runandfun.runkeeper.model.RunkeeperActivityItem;
/**
* @author Marcel Jänicke
* @author Sebastian Peters
* @since 26.01.2015
*/
@NoArgsConstructor
@Service
@Slf4j
public class ActivitiesService {
// initial release in 2008 according to http://en.wikipedia.org/wiki/RunKeeper
private static final LocalDate INITIAL_RELEASE_OF_RUNKEEPER = LocalDate.of(2008, 01, 01);
@Inject
private AuthService authService;
@Inject
private RestApiService restApiService;
@Inject
private ActivityRepository repository;
@Inject
private ElasticsearchRestTemplate elasticsearchRestTemplate;
public int indexActivities(final String accessToken) {<|fim▁hole|> listActivities(accessToken, calculateFetchDate()).stream().filter(item -> !repository.existsById(item.getId()))
.forEach(item -> addActivity(item, username, activities));
log.info("new activities: {}", activities.size());
if (!activities.isEmpty()) {
repository.saveAll(activities);
}
return activities.size();
}
/**
* List activities live from runkeeper with an accessToken and a date. The full
* size will be determined every time but with the given date only the last
* items will be collected with a max. of the full size.
*
* @param accessToken
* @param from
* @return list of activity items
*/
private List<RunkeeperActivityItem> listActivities(final String accessToken, final LocalDate from) {
log.debug("list activities for token {} until {}", accessToken, from);
// get one item only to get full size
int pageSize = restApiService
.getForObject(ACTIVITIES_URL_PAGE_SIZE_ONE, ACTIVITIES_MEDIA, accessToken, RunkeeperActivities.class)
.getBody().getSize();
// list new activities from given date with max. full size
return restApiService.getForObject(
String.format(ACTIVITIES_URL_SPECIFIED_PAGE_SIZE_NO_EARLIER_THAN, pageSize,
from.format(DateTimeFormatter.ISO_LOCAL_DATE)),
ACTIVITIES_MEDIA, accessToken, RunkeeperActivities.class).getBody().getItemsAsList();
}
private LocalDate calculateFetchDate() {
final AppActivity activity = getLastActivity();
return activity == null ? INITIAL_RELEASE_OF_RUNKEEPER : activity.getDate().toLocalDate();
}
private void addActivity(final RunkeeperActivityItem item, final String username,
final Collection<AppActivity> activities) {
final AppActivity activity = new AppActivity(item.getId(), username, item.getType(), item.getDate(),
item.getDistance(), item.getDuration());
log.debug("prepare {}", activity);
activities.add(activity);
}
/**
* Get last activity from app repository.
*
* @return last activity
*/
public AppActivity getLastActivity() {
return repository.findTopByOrderByDateDesc();
}
/**
* Count activities from app repository.
*
* @return count
*/
public Long countActivities() {
return repository.count();
}
/**
* List activites from app repository.
*
* @param pageable
* @param types
* @param minDate
* @param maxDate
* @param minDistance
* @param maxDistance
* @param query
* @return
*/
public SearchHits<AppActivity> listActivities(final Pageable pageable, final String types, final LocalDate minDate,
final LocalDate maxDate, final Float minDistance, final Float maxDistance, final String query) {
final BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery();
if (!Strings.isNullOrEmpty(types)) {
final BoolQueryBuilder typesQuery = QueryBuilders.boolQuery();
for (final String type : Splitter.on(',').split(types)) {
typesQuery.should(QueryBuilders.termQuery(AppActivity.FIELD_TYPE, type));
}
queryBuilder.must(typesQuery);
}
if (minDate != null || maxDate != null) {
addDateQuery(queryBuilder, minDate, maxDate);
}
if (minDistance != null || maxDistance != null) {
addDistanceQuery(queryBuilder, minDistance, maxDistance);
}
if (!Strings.isNullOrEmpty(query)) {
addFulltextQuery(queryBuilder, query);
}
if (!queryBuilder.hasClauses()) {
queryBuilder.must(QueryBuilders.matchAllQuery());
}
log.info("{}", queryBuilder);
return elasticsearchRestTemplate.search(
new NativeSearchQueryBuilder().withPageable(pageable).withQuery(queryBuilder).build(),
AppActivity.class, IndexCoordinates.of("activity"));
}
private static void addFulltextQuery(final BoolQueryBuilder queryBuilder, final String query) {
queryBuilder.must(QueryBuilders.termQuery("_all", query.trim()));
}
private static void addDateQuery(final BoolQueryBuilder queryBuilder, final LocalDate minDate,
final LocalDate maxDate) {
final RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(AppActivity.FIELD_DATE);
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyyMMdd'T'HHmmss'Z'");
if (minDate != null) {
LocalDateTime minDateTime = minDate.atStartOfDay();
rangeQuery.gte(minDateTime.format(formatter));
}
if (maxDate != null) {
LocalDateTime maxDateTime = maxDate.atStartOfDay();
rangeQuery.lte(maxDateTime.format(formatter));
}
queryBuilder.must(rangeQuery);
}
private static void addDistanceQuery(final BoolQueryBuilder queryBuilder, final Float minDistance,
final Float maxDistance) {
final RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(AppActivity.FIELD_DISTANCE);
if (minDistance != null) {
rangeQuery.gte(minDistance);
}
if (maxDistance != null) {
rangeQuery.lte(maxDistance);
}
queryBuilder.must(rangeQuery);
}
}<|fim▁end|> | final Collection<AppActivity> activities = new ArrayList<>();
final String username = authService.getAppState(accessToken).getUsername(); |
<|file_name|>aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateModelDeploymentMonitoringJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_async]<|fim▁hole|>from google.cloud import aiplatform_v1beta1
async def sample_update_model_deployment_monitoring_job():
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob()
model_deployment_monitoring_job.display_name = "display_name_value"
model_deployment_monitoring_job.endpoint = "endpoint_value"
request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest(
model_deployment_monitoring_job=model_deployment_monitoring_job,
)
# Make the request
operation = client.update_model_deployment_monitoring_job(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_async]<|fim▁end|> | |
<|file_name|>resolver.js<|end_file_name|><|fim▁begin|>define("resolver",
[],
function() {
"use strict";
/*
* This module defines a subclass of Ember.DefaultResolver that adds two
* important features:
*
* 1) The resolver makes the container aware of es6 modules via the AMD
* output. The loader's _seen is consulted so that classes can be
* resolved directly via the module loader, without needing a manual
* `import`.
* 2) is able provide injections to classes that implement `extend`
* (as is typical with Ember).
*/
function classFactory(klass) {
return {
create: function (injections) {
if (typeof klass.extend === 'function') {
return klass.extend(injections);
} else {
return klass;
}
}
};
}
var underscore = Ember.String.underscore;
var classify = Ember.String.classify;
var get = Ember.get;
function parseName(fullName) {
var nameParts = fullName.split(":"),
type = nameParts[0], fullNameWithoutType = nameParts[1],
name = fullNameWithoutType,
namespace = get(this, 'namespace'),
root = namespace;
return {
fullName: fullName,
type: type,
fullNameWithoutType: fullNameWithoutType,
name: name,
root: root,
resolveMethodName: "resolve" + classify(type)
};
}
function chooseModuleName(seen, moduleName) {
var underscoredModuleName = Ember.String.underscore(moduleName);
if (moduleName !== underscoredModuleName && seen[moduleName] && seen[underscoredModuleName]) {
throw new TypeError("Ambigous module names: `" + moduleName + "` and `" + underscoredModuleName + "`");
}
if (seen[moduleName]) {
return moduleName;
} else if (seen[underscoredModuleName]) {
return underscoredModuleName;
} else {
return moduleName;
}
}
function resolveOther(parsedName) {
var prefix = this.namespace.modulePrefix;
Ember.assert('module prefix must be defined', prefix);
var pluralizedType = parsedName.type + 's';
var name = parsedName.fullNameWithoutType;
var moduleName = prefix + '/' + pluralizedType + '/' + name;
// allow treat all dashed and all underscored as the same thing
// supports components with dashes and other stuff with underscores.
var normalizedModuleName = chooseModuleName(requirejs._eak_seen, moduleName);
if (requirejs._eak_seen[normalizedModuleName]) {
var module = require(normalizedModuleName, null, null, true /* force sync */);
if (module === undefined) {
throw new Error("Module: '" + name + "' was found but returned undefined. Did you forget to `export default`?");
}
if (Ember.ENV.LOG_MODULE_RESOLVER) {
Ember.Logger.info('hit', moduleName);
}
return module;
} else {
if (Ember.ENV.LOG_MODULE_RESOLVER) {
Ember.Logger.info('miss', moduleName);
}
return this._super(parsedName);<|fim▁hole|>
function resolveTemplate(parsedName) {
return Ember.TEMPLATES[parsedName.name] || Ember.TEMPLATES[Ember.String.underscore(parsedName.name)];
}
// Ember.DefaultResolver docs:
// https://github.com/emberjs/ember.js/blob/master/packages/ember-application/lib/system/resolver.js
var Resolver = Ember.DefaultResolver.extend({
resolveTemplate: resolveTemplate,
resolveOther: resolveOther,
parseName: parseName,
normalize: function(fullName) {
// replace `.` with `/` in order to make nested controllers work in the following cases
// 1. `needs: ['posts/post']`
// 2. `{{render "posts/post"}}`
// 3. `this.render('posts/post')` from Route
return Ember.String.dasherize(fullName.replace(/\./g, '/'));
}
});
return Resolver;
});<|fim▁end|> | }
} |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>"""Schedule models.
Much of this module is derived from the work of Eldarion on the
`Symposion <https://github.com/pinax/symposion>`_ project.
Copyright (c) 2010-2014, Eldarion, Inc. and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Eldarion, Inc. nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from bisect import bisect_left
from itertools import tee
from cached_property import cached_property
from sqlalchemy import func
from pygotham.core import db
__all__ = ('Day', 'Room', 'Slot', 'Presentation')
def pairwise(iterable):
"""Return values from ``iterable`` two at a time.
Recipe from
https://docs.python.org/3/library/itertools.html#itertools-recipes.
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
rooms_slots = db.Table(
'rooms_slots',
db.Column('slot_id', db.Integer, db.ForeignKey('slots.id')),
db.Column('room_id', db.Integer, db.ForeignKey('rooms.id')),
)
class Day(db.Model):
"""Day of talks."""
__tablename__ = 'days'
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.Date)
event_id = db.Column(
db.Integer, db.ForeignKey('events.id'), nullable=False)
event = db.relationship(
'Event', backref=db.backref('days', lazy='dynamic'))
def __str__(self):
"""Return a printable representation."""
return self.date.strftime('%B %d, %Y')
@cached_property
def rooms(self):<|fim▁hole|> def __iter__(self):
"""Iterate over the schedule for the day."""
if not self.rooms:
raise StopIteration
def rowspan(start, end):
"""Find the rowspan for an entry in the schedule table.
This uses a binary search for the given end time from a
sorted list of start times in order to find the index of the
first start time that occurs after the given end time. This
method is used to prevent issues that can occur with
overlapping start and end times being included in the same
list.
"""
return bisect_left(times, end) - times.index(start)
times = sorted({slot.start for slot in self.slots})
# While we typically only care about the start times here, the
# list is iterated over two items at a time. Without adding a
# final element, the last time slot would be omitted. Any value
# could be used here as bisect_left only assumes the list is
# sorted, but using a meaningful value feels better.
times.append(self.slots[-1].end)
slots = db.session.query(
Slot.id,
Slot.content_override,
Slot.kind,
Slot.start,
Slot.end,
func.count(rooms_slots.c.slot_id).label('room_count'),
func.min(Room.order).label('order'),
).join(rooms_slots, Room).filter(Slot.day == self).order_by(
func.count(rooms_slots.c.slot_id), func.min(Room.order)
).group_by(
Slot.id, Slot.content_override, Slot.kind, Slot.start, Slot.end
).all()
for time, next_time in pairwise(times):
row = {'time': time, 'slots': []}
for slot in slots:
if slot.start == time:
slot.rowspan = rowspan(slot.start, slot.end)
slot.colspan = slot.room_count
if not slot.content_override:
slot.presentation = Presentation.query.filter(
Presentation.slot_id == slot.id).first()
row['slots'].append(slot)
if row['slots'] or next_time is None:
yield row
class Room(db.Model):
"""Room of talks."""
__tablename__ = 'rooms'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False)
order = db.Column(db.Integer, nullable=False)
def __str__(self):
"""Return a printable representation."""
return self.name
class Slot(db.Model):
"""Time slot."""
__tablename__ = 'slots'
id = db.Column(db.Integer, primary_key=True)
kind = db.Column(
db.Enum(
'break', 'meal', 'keynote', 'talk', 'tutorial', name='slotkind'),
nullable=False,
)
content_override = db.Column(db.Text)
start = db.Column(db.Time, nullable=False)
end = db.Column(db.Time, nullable=False)
day_id = db.Column(db.Integer, db.ForeignKey('days.id'), nullable=False)
day = db.relationship('Day', backref=db.backref('slots', lazy='dynamic'))
rooms = db.relationship(
'Room',
secondary=rooms_slots,
backref=db.backref('slots', lazy='dynamic'),
order_by=Room.order,
)
def __str__(self):
"""Return a printable representation."""
start = self.start.strftime('%I:%M %p')
end = self.end.strftime('%I:%M %p')
rooms = ', '.join(map(str, self.rooms))
return '{} - {} on {}, {}'.format(start, end, self.day, rooms)
@cached_property
def duration(self):
"""Return the duration as a :class:`~datetime.timedelta`."""
return self.end - self.start
class Presentation(db.Model):
"""Presentation of a talk."""
__tablename__ = 'presentations'
id = db.Column(db.Integer, primary_key=True)
slot_id = db.Column(db.Integer, db.ForeignKey('slots.id'), nullable=False)
slot = db.relationship(
'Slot', backref=db.backref('presentation', uselist=False))
talk_id = db.Column(db.Integer, db.ForeignKey('talks.id'), nullable=False)
talk = db.relationship(
'Talk', backref=db.backref('presentation', uselist=False))
def __str__(self):
"""Return a printable representation."""
return str(self.talk)
def is_in_all_rooms(self):
"""Return whether the instance is in all rooms."""
return self.slot.number_of_rooms == 4
@cached_property
def number_of_rooms(self):
"""Return the number of rooms for the instance."""
return len(self.slot.rooms)<|fim▁end|> | """Return the rooms for the day."""
return Room.query.join(rooms_slots, Slot).filter(
Slot.day == self).order_by(Room.order).all()
|
<|file_name|>history_traversal.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use script_thread::{CommonScriptMsg, MainThreadScriptMsg, ScriptChan};
use std::sync::mpsc::Sender;
#[derive(JSTraceable)]
pub struct HistoryTraversalTaskSource(pub Sender<MainThreadScriptMsg>);
impl ScriptChan for HistoryTraversalTaskSource {
fn send(&self, msg: CommonScriptMsg) -> Result<(), ()> {
self.0.send(MainThreadScriptMsg::Common(msg)).map_err(|_| ())
}
<|fim▁hole|> fn clone(&self) -> Box<ScriptChan + Send> {
box HistoryTraversalTaskSource((&self.0).clone())
}
}<|fim▁end|> | |
<|file_name|>_configuration_profile_preferences_operations.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ConfigurationProfilePreferencesOperations(object):
"""ConfigurationProfilePreferencesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~automanage_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
configuration_profile_preference_name, # type: str
resource_group_name, # type: str
parameters, # type: "models.ConfigurationProfilePreference"
**kwargs # type: Any
):
# type: (...) -> "models.ConfigurationProfilePreference"
"""Creates a configuration profile preference.
:param configuration_profile_preference_name: Name of the configuration profile preference.
:type configuration_profile_preference_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param parameters: Parameters supplied to create or update configuration profile preference.
:type parameters: ~automanage_client.models.ConfigurationProfilePreference
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigurationProfilePreference, or the result of cls(response)
:rtype: ~automanage_client.models.ConfigurationProfilePreference
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ConfigurationProfilePreference"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'configurationProfilePreferenceName': self._serialize.url("configuration_profile_preference_name", configuration_profile_preference_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConfigurationProfilePreference')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConfigurationProfilePreference', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConfigurationProfilePreference', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automanage/configurationProfilePreferences/{configurationProfilePreferenceName}'} # type: ignore
def get(
self,
configuration_profile_preference_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ConfigurationProfilePreference"
"""Get information about a configuration profile preference.
:param configuration_profile_preference_name: The configuration profile preference name.
:type configuration_profile_preference_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigurationProfilePreference, or the result of cls(response)
:rtype: ~automanage_client.models.ConfigurationProfilePreference
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ConfigurationProfilePreference"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30-preview"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'configurationProfilePreferenceName': self._serialize.url("configuration_profile_preference_name", configuration_profile_preference_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConfigurationProfilePreference', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automanage/configurationProfilePreferences/{configurationProfilePreferenceName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
configuration_profile_preference_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete a configuration profile preference.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param configuration_profile_preference_name: Name of the configuration profile preference.
:type configuration_profile_preference_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30-preview"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'configurationProfilePreferenceName': self._serialize.url("configuration_profile_preference_name", configuration_profile_preference_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automanage/configurationProfilePreferences/{configurationProfilePreferenceName}'} # type: ignore
def update(
self,
configuration_profile_preference_name, # type: str
resource_group_name, # type: str
parameters, # type: "models.ConfigurationProfilePreferenceUpdate"
**kwargs # type: Any
):
# type: (...) -> "models.ConfigurationProfilePreference"
"""Updates a configuration profile preference.
:param configuration_profile_preference_name: Name of the configuration profile preference.
:type configuration_profile_preference_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param parameters: Parameters supplied to create or update configuration profile preference.
:type parameters: ~automanage_client.models.ConfigurationProfilePreferenceUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigurationProfilePreference, or the result of cls(response)
:rtype: ~automanage_client.models.ConfigurationProfilePreference
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ConfigurationProfilePreference"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'configurationProfilePreferenceName': self._serialize.url("configuration_profile_preference_name", configuration_profile_preference_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConfigurationProfilePreferenceUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConfigurationProfilePreference', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automanage/configurationProfilePreferences/{configurationProfilePreferenceName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ConfigurationProfilePreferenceList"]<|fim▁hole|>
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConfigurationProfilePreferenceList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~automanage_client.models.ConfigurationProfilePreferenceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ConfigurationProfilePreferenceList"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ConfigurationProfilePreferenceList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automanage/configurationProfilePreferences'} # type: ignore
def list_by_subscription(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ConfigurationProfilePreferenceList"]
"""Retrieve a list of configuration profile preferences within a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConfigurationProfilePreferenceList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~automanage_client.models.ConfigurationProfilePreferenceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ConfigurationProfilePreferenceList"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ConfigurationProfilePreferenceList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Automanage/configurationProfilePreferences'} # type: ignore<|fim▁end|> | """Retrieve a list of configuration profile preferences within a given resource group. |
<|file_name|>builder.rs<|end_file_name|><|fim▁begin|>use llvm_sys::*;
use llvm_sys::prelude::*;
use llvm_sys::core as llvm;
use super::*;
macro_rules! build_op_str {
($op_name: ident, $fn: path, $($argn: ident: $argv: path),*) => {
impl Builder {
pub fn $op_name(&mut self, $($argn: $argv),*, name: &str) -> LLVMValueRef {
let c_name = CString::new(name).unwrap();
unsafe {
$fn(self.ptr, $($argn),*, c_name.as_ptr())
}
}
}
}
}
macro_rules! build_op {
($op_name: ident, $fn: path, $($argn: ident: $argv: path),*) => {
impl Builder {
pub fn $op_name(&mut self, $($argn: $argv),*) -> LLVMValueRef {
unsafe {
$fn(self.ptr, $($argn),*)
}
}
}
}
}
#[derive(Debug)]
pub struct Builder {
pub ptr: LLVMBuilderRef
}
impl_llvm_ref!(Builder, LLVMBuilderRef);
// http://llvm.org/docs/doxygen/html/group__LLVMCCoreInstructionBuilder.html
//TODO: Get/Set Volatile
//TODO: Get/Set Ordering
//TODO: Almost everything from LLVMBuildAdd and upwards
build_op_str!(build_add, llvm::LLVMBuildAdd, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_sub, llvm::LLVMBuildSub, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_mul, llvm::LLVMBuildMul, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_fadd, llvm::LLVMBuildFAdd, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_fsub, llvm::LLVMBuildFSub, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_fmul, llvm::LLVMBuildFMul, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_fdiv, llvm::LLVMBuildFDiv, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_nswadd, llvm::LLVMBuildNSWAdd, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_nswsub, llvm::LLVMBuildNSWSub, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_nswmul, llvm::LLVMBuildNSWMul, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_nuwadd, llvm::LLVMBuildNUWAdd, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_nuwsub, llvm::LLVMBuildNUWSub, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_nuwmul, llvm::LLVMBuildNUWMul, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_udiv, llvm::LLVMBuildUDiv, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_sdiv, llvm::LLVMBuildSDiv, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_exact_sdiv, llvm::LLVMBuildExactSDiv, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_urem, llvm::LLVMBuildURem, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_srem, llvm::LLVMBuildSRem, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_frem, llvm::LLVMBuildFRem, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_shl, llvm::LLVMBuildShl, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_lshr, llvm::LLVMBuildLShr, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_ashr, llvm::LLVMBuildAShr, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_and, llvm::LLVMBuildAnd, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_or, llvm::LLVMBuildOr, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_xor, llvm::LLVMBuildXor, lhs: LLVMValueRef, rhs: LLVMValueRef);
// TODO: LLVMBuildBinOp
build_op_str!(build_neg, llvm::LLVMBuildNeg, v: LLVMValueRef);
build_op_str!(build_fneg, llvm::LLVMBuildFNeg, v: LLVMValueRef);
build_op_str!(build_nswneg, llvm::LLVMBuildNSWNeg, v: LLVMValueRef);
build_op_str!(build_nuwneg, llvm::LLVMBuildNUWNeg, v: LLVMValueRef);
build_op_str!(build_not, llvm::LLVMBuildNot, v: LLVMValueRef);
build_op_str!(build_malloc, llvm::LLVMBuildMalloc, typ: LLVMTypeRef);
build_op_str!(build_array_malloc, llvm::LLVMBuildArrayMalloc, typ: LLVMTypeRef,
val: LLVMValueRef);
build_op_str!(build_alloca, llvm::LLVMBuildAlloca, ty: LLVMTypeRef);
build_op_str!(build_array_alloca, llvm::LLVMBuildArrayAlloca, ty: LLVMTypeRef,
val: LLVMValueRef);
build_op!(build_free, llvm::LLVMBuildFree, pval: LLVMValueRef);
build_op_str!(build_load, llvm::LLVMBuildLoad, ptr: LLVMValueRef);
build_op!(build_store, llvm::LLVMBuildStore, val: LLVMValueRef, pval: LLVMValueRef);
build_op_str!(build_trunc, llvm::LLVMBuildTrunc, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_zext, llvm::LLVMBuildZExt, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_sext, llvm::LLVMBuildSExt, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_fp_to_ui, llvm::LLVMBuildFPToUI, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_fp_to_si, llvm::LLVMBuildFPToSI, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_ui_to_fp, llvm::LLVMBuildUIToFP, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_si_to_fp, llvm::LLVMBuildSIToFP, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_fp_trunc, llvm::LLVMBuildFPTrunc, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_fp_ext, llvm::LLVMBuildFPExt, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_ptr_to_int, llvm::LLVMBuildPtrToInt, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_int_to_ptr, llvm::LLVMBuildIntToPtr, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_bit_cast, llvm::LLVMBuildBitCast, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_addr_space_cast, llvm::LLVMBuildAddrSpaceCast, val: LLVMValueRef,
dest_ty: LLVMTypeRef);
build_op_str!(build_zext_or_bit_cast, llvm::LLVMBuildZExtOrBitCast, val: LLVMValueRef,
dest_ty: LLVMTypeRef);
build_op_str!(build_sext_or_bit_cast, llvm::LLVMBuildSExtOrBitCast, val: LLVMValueRef,
dest_ty: LLVMTypeRef);
build_op_str!(build_trunc_or_bit_cast, llvm::LLVMBuildTruncOrBitCast, val: LLVMValueRef,
dest_ty: LLVMTypeRef);
// TODO: improve LLVMOpcode
build_op_str!(build_cast, llvm::LLVMBuildCast, op: LLVMOpcode,
val: LLVMValueRef,
dest_ty: LLVMTypeRef);
build_op_str!(build_pointer_cast, llvm::LLVMBuildPointerCast, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_int_cast, llvm::LLVMBuildIntCast, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_fpcast, llvm::LLVMBuildFPCast, val: LLVMValueRef, dest_ty: LLVMTypeRef);
build_op_str!(build_icmp, llvm::LLVMBuildICmp, op: LLVMIntPredicate,
lhs: LLVMValueRef,
rhs: LLVMValueRef);
build_op_str!(build_fcmp, llvm::LLVMBuildFCmp, op: LLVMRealPredicate,
lhs: LLVMValueRef,<|fim▁hole|>build_op_str!(build_select, llvm::LLVMBuildSelect, i: LLVMValueRef,
the: LLVMValueRef,
els: LLVMValueRef);
build_op_str!(build_vaarg, llvm::LLVMBuildVAArg, list: LLVMValueRef, ty: LLVMTypeRef);
build_op_str!(build_extract_element, llvm::LLVMBuildExtractElement, vec_val: LLVMValueRef,
index: LLVMValueRef);
build_op_str!(build_insert_element, llvm::LLVMBuildInsertElement, vec_val: LLVMValueRef,
eltval: LLVMValueRef,
index: LLVMValueRef);
build_op_str!(build_shuffle_vector, llvm::LLVMBuildShuffleVector, v1: LLVMValueRef,
v2: LLVMValueRef,
mask: LLVMValueRef);
// TODO: Both these types use unsigned, change this to libc::unsigned
build_op_str!(build_extract_value, llvm::LLVMBuildExtractValue, aggval: LLVMValueRef,
index: u32);
build_op_str!(build_insert_value, llvm::LLVMBuildInsertValue, aggval: LLVMValueRef,
eltval: LLVMValueRef,
index: u32);
// TODO: LLVMBuildAtomicRMW
build_op_str!(build_is_null, llvm::LLVMBuildIsNull, val: LLVMValueRef);
build_op_str!(build_is_not_null, llvm::LLVMBuildIsNotNull, val: LLVMValueRef);
build_op_str!(build_ptr_diff, llvm::LLVMBuildPtrDiff, lhs: LLVMValueRef, rhs: LLVMValueRef);
build_op_str!(build_fence, llvm::LLVMBuildFence, ordering: LLVMAtomicOrdering,
singlethread: LLVMBool);
build_op!(build_ret, llvm::LLVMBuildRet, ret_val: LLVMValueRef);
build_op!(build_ret_void, llvm::LLVMBuildRetVoid,); // TODO: Fix the trailing comma
build_op!(build_br, llvm::LLVMBuildBr, dest: LLVMBasicBlockRef);
build_op!(build_cond_br, llvm::LLVMBuildCondBr, cond: LLVMValueRef,
then: LLVMBasicBlockRef,
else_: LLVMBasicBlockRef);
impl Builder {
pub fn position_at_end(&mut self, basic_block: LLVMBasicBlockRef) {
unsafe {
llvm::LLVMPositionBuilderAtEnd(self.ptr, basic_block);
}
}
pub fn build_call(&mut self, func: Function, mut args: Vec<LLVMValueRef>,
name: &str) -> LLVMValueRef {
let c_name = CString::new(name).unwrap();
unsafe {
llvm::LLVMBuildCall(
self.ptr,
func.ptr,
args.as_mut_ptr(),
args.len() as u32,
c_name.as_ptr()
)
}
}
pub fn build_global_string(&self, s: &str, name: &str) -> LLVMValueRef {
let c_s = CString::new(s).unwrap();
let c_name = CString::new(name).unwrap();
unsafe {
llvm::LLVMBuildGlobalString(self.ptr, c_s.as_ptr(), c_name.as_ptr())
}
}
pub fn build_global_string_ptr(&self, s: &str, name: &str) -> LLVMValueRef {
let c_s = CString::new(s).unwrap();
let c_name = CString::new(name).unwrap();
unsafe {
llvm::LLVMBuildGlobalStringPtr(self.ptr, c_s.as_ptr(), c_name.as_ptr())
}
}
pub fn build_in_bounds_gep(&self, ptr: LLVMValueRef, mut indices: Vec<LLVMValueRef>,
name: &str) -> LLVMValueRef {
let c_name = CString::new(name).unwrap();
unsafe {
llvm::LLVMBuildInBoundsGEP(self.ptr, ptr, indices.as_mut_ptr(),
indices.len() as u32, c_name.as_ptr())
}
}
pub fn build_gep(&self, ptr: LLVMValueRef, mut indices: Vec<LLVMValueRef>,
name: &str) -> LLVMValueRef {
let c_name = CString::new(name).unwrap();
unsafe {
llvm::LLVMBuildGEP(self.ptr, ptr, indices.as_mut_ptr(),
indices.len() as u32, c_name.as_ptr())
}
}
}
impl Drop for Builder {
fn drop(&mut self) {
unsafe {
llvm::LLVMDisposeBuilder(self.ptr);
}
}
}<|fim▁end|> | rhs: LLVMValueRef);
build_op_str!(build_phi, llvm::LLVMBuildPhi, ty: LLVMTypeRef);
//build_call is manually defined in impl Builder |
<|file_name|>reconnecting.js<|end_file_name|><|fim▁begin|>module.exports = client => {
console.log(`Reconnecting... [at ${new Date()}]`);
<|fim▁hole|><|fim▁end|> | }; |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>"""Module containing helper functions that are used by other parts of worker."""
import datetime
import getpass
import json
import logging
import signal
import re
from contextlib import contextmanager
from os import path as os_path, walk, getcwd, chdir, environ as os_environ, killpg, getpgid
from queue import Queue, Empty
from shlex import split
from subprocess import Popen, PIPE, check_output, CalledProcessError, TimeoutExpired
from threading import Thread
from traceback import format_exc
from urllib.parse import unquote, urlparse, parse_qs
import tenacity
import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import HTTPError
from requests.packages.urllib3.util.retry import Retry
from selinon import StoragePool
from sqlalchemy.exc import SQLAlchemyError
from f8a_worker.enums import EcosystemBackend
from f8a_worker.errors import (TaskError,
NotABugTaskError,
F8AConfigurationException)
from f8a_worker.models import (Analysis,
Ecosystem,
Package,
Version)
from f8a_worker.defaults import configuration
logger = logging.getLogger(__name__)
def get_latest_analysis(ecosystem, package, version, db_session=None):
"""Get latest analysis for the given EPV."""
if not db_session:
storage = StoragePool.get_connected_storage("BayesianPostgres")
db_session = storage.session
try:
return db_session.query(Analysis). \
filter(Ecosystem.name == ecosystem). \
filter(Package.name == package). \
filter(Version.identifier == version). \
order_by(Analysis.started_at.desc()). \
first()
except SQLAlchemyError:
db_session.rollback()
raise
@contextmanager
def cwd(target):
"""Manage cwd in a pushd/popd fashion."""
curdir = getcwd()
chdir(target)
try:
yield
finally:
chdir(curdir)
@contextmanager
def username():
"""Workaround for failing getpass.getuser().
http://blog.dscpl.com.au/2015/12/unknown-user-when-running-docker.html
"""
user = ''
try:
user = getpass.getuser()
except KeyError:
os_environ['LOGNAME'] = 'f8aworker'
try:
yield
finally:
if not user:
del os_environ['LOGNAME']
def assert_not_none(name, value):
"""Assert value is not None."""
if value is None:
raise ValueError('Parameter %r is None' % name)
class TimedCommand(object):
"""Execute arbitrary shell command in a timeout-able manner."""
def __init__(self, command):
"""Initialize command."""
# parse with shlex if not execve friendly
if isinstance(command, str):
command = split(command)
self.command = command
def run(self, timeout=None, is_json=False, **kwargs):
"""Run the self.command and wait up to given time period for results.
:param timeout: how long to wait, in seconds, for the command to finish
before terminating it
:param is_json: hint whether output of the command is a JSON
:return: triplet (return code, stdout, stderr), stdout will be a
dictionary if `is_json` is True
"""
logger.debug("running command '%s'; timeout '%s'", self.command, timeout)
# this gets executed in a separate thread
def target(**kwargs):
try:
self.process = Popen(self.command, universal_newlines=True, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except Exception:
self.output = {} if is_json else []
self.error = format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = PIPE
if 'update_env' in kwargs:
# make sure we update environment, not override it
kwargs['env'] = dict(os_environ, **kwargs['update_env'])
kwargs.pop('update_env')
# thread
thread = Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
# timeout reached, terminate the thread
if thread.is_alive():
logger.error('Command {cmd} timed out after {t} seconds'.format(cmd=self.command,
t=timeout))
# this is tricky - we need to make sure we kill the process with all its subprocesses;
# using just kill might create zombie process waiting for subprocesses to finish
# and leaving us hanging on thread.join()
# TODO: we should do the same for get_command_output!
killpg(getpgid(self.process.pid), signal.SIGKILL)
thread.join()
if not self.error:
self.error = 'Killed by timeout after {t} seconds'.format(t=timeout)
if self.output:
if is_json:
self.output = json.loads(self.output)
else:
self.output = [f for f in self.output.split('\n') if f]
return self.status, self.output, self.error
@staticmethod
def get_command_output(args, graceful=True, is_json=False, timeout=300, **kwargs):
"""Wrap the function to get command output with implicit timeout of 5 minutes."""
kwargs['timeout'] = 10800
return get_command_output(args, graceful, is_json, **kwargs)
def get_command_output(args, graceful=True, is_json=False, **kwargs):
"""Improved version of subprocess.check_output.
:param graceful: bool, if False, raise Exception when command fails
:param is_json: bool, if True, return decoded json
:return: list of strings, output which command emitted
"""
logger.debug("running command %s", args)
try:
# Using universal_newlines mostly for the side-effect of decoding
# the output as UTF-8 text on Python 3.x
out = check_output(args, universal_newlines=True, **kwargs)
except (CalledProcessError, TimeoutExpired) as ex:
# TODO: we may want to use subprocess.Popen to be able to also print stderr here
# (while not mixing it with stdout that is returned if the subprocess succeeds)
if isinstance(ex, TimeoutExpired):
logger.warning("command %s timed out:\n%s", args, ex.output)
else:
logger.warning("command %s ended with %s\n%s", args, ex.returncode, ex.output)
if not graceful:
logger.error(ex)
# we don't know whether this is a bug or the command was simply called
# with invalid/unsupported input. Caller needs to catch the exception
# and decide.
raise TaskError("Error during running command %s: %r" % (args, ex.output))
else:
logger.debug("Ignoring because graceful flag is set")
return []
else:
if is_json:
# FIXME: some error handling here would be great
return json.loads(out)
else:
return [f for f in out.split('\n') if f] # py2 & 3 compat
def get_all_files_from(target, path_filter=None, file_filter=None):
"""Enumerate all files in target directory, can be filtered with custom delegates."""
for root, dirs, files in walk(target):
for file in files:
joined = os_path.abspath(os_path.join(root, file))
# filter the list early on
if path_filter and not path_filter(joined):
continue
if file_filter and not file_filter(file):
continue
yield joined
def hidden_path_filter(item):
"""Filter out hidden files or files in hidden directories."""<|fim▁hole|>
def json_serial(obj):
"""Return time obj formatted according to ISO."""
if isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError('Type {t} not serializable'.format(t=type(obj)))
def in_path(directory, path):
"""Check whether directory is in path.
:param directory: str
:param path: str
:return: True if directory is in path
"""
return any(directory == x for x in path.split(os_path.sep))
def skip_git_files(path):
"""Git skipping closure of in_path."""
return not in_path('.git', path)
class ThreadPool(object):
"""Implementation of thread pool."""
def __init__(self, target, num_workers=10, timeout=3):
"""Initialize `ThreadPool`.
:param target: Function that accepts exactly one argument
:param num_workers: int, number of worker threads to spawn
:param timeout: int, maximum number of seconds workers wait for new task
"""
self.target = target
self.num_workers = num_workers
self.timeout = timeout
self.queue = Queue()
self._threads = [Thread(target=self._work) for i in range(0, num_workers)]
def add_task(self, arg):
"""Enqueue a new task.
:param arg: argument for the `target` that was passed to constructor
"""
self.queue.put(arg)
def start(self):
"""Start processing by all threads."""
[t.start() for t in self._threads]
def join(self):
"""Join all threads."""
[t.join() for t in self._threads]
self.queue.join()
def _work(self):
while True:
try:
arg = self.queue.get(block=True, timeout=self.timeout)
except Empty:
break
try:
self.target(arg)
finally:
self.queue.task_done()
def __enter__(self):
"""Enter context manager."""
self.start()
return self
def __exit__(self, *_args, **_kwargs):
"""Exit context manager."""
self.join()
def compute_digest(target, function='sha256', raise_on_error=False):
"""Compute digest of a provided file.
:param target: str, file path
:param function: str, prefix name of the hashing function
:param raise_on_error: bool, raise an error when computation wasn't successful if set to True
:returns str or None, computed digest
`function` requires an executable with matching name on the system (sha256sum, sha1sum etc.)
"""
function += 'sum'
# returns e.g.:
# 65ecde5d025fcf57ceaa32230e2ff884ab204065b86e0e34e609313c7bdc7b47 /etc/passwd
data = TimedCommand.get_command_output([function, target], graceful=not raise_on_error)
try:
return data[0].split(' ')[0].strip()
except IndexError as exc:
logger.error("unable to compute digest of %r, likely it doesn't exist or is a directory",
target)
if raise_on_error:
raise RuntimeError("can't compute digest of %s" % target) from exc
class MavenCoordinates(object):
"""Represents Maven coordinates.
https://maven.apache.org/pom.html#Maven_Coordinates
"""
_default_packaging = 'jar'
def __init__(self, groupId, artifactId, version='',
classifier='', packaging=None):
"""Initialize attributes."""
self.groupId = groupId
self.artifactId = artifactId
self.classifier = classifier
self.packaging = packaging or MavenCoordinates._default_packaging
self.version = version
def is_valid(self):
"""Check if the current coordinates are valid."""
return self.groupId and self.artifactId and self.version and self.packaging
def to_str(self, omit_version=False):
"""Return string representation of the coordinates."""
mvnstr = "{g}:{a}".format(g=self.groupId, a=self.artifactId)
pack = self.packaging
if pack == MavenCoordinates._default_packaging:
pack = ''
if pack:
mvnstr += ":{p}".format(p=pack)
if self.classifier:
if not pack:
mvnstr += ':'
mvnstr += ":{c}".format(c=self.classifier)
if not self.version or omit_version:
if self.classifier or pack:
mvnstr += ':'
else:
mvnstr += ":{v}".format(v=self.version)
return mvnstr
def to_repo_url(self, ga_only=False):
"""Return relative path to the artifact in Maven repository."""
if ga_only:
return "{g}/{a}".format(g=self.groupId.replace('.', '/'),
a=self.artifactId)
dir_path = "{g}/{a}/{v}/".format(g=self.groupId.replace('.', '/'),
a=self.artifactId,
v=self.version)
classifier = "-{c}".format(c=self.classifier) if self.classifier else ''
filename = "{a}-{v}{c}.{e}".format(a=self.artifactId,
v=self.version,
c=classifier,
e=self.packaging)
return dir_path + filename
@staticmethod
def _parse_string(coordinates_str):
"""Parse string representation into a dictionary."""
a = {'groupId': '',
'artifactId': '',
'packaging': MavenCoordinates._default_packaging,
'classifier': '',
'version': ''}
ncolons = coordinates_str.count(':')
if ncolons == 1:
a['groupId'], a['artifactId'] = coordinates_str.split(':')
elif ncolons == 2:
a['groupId'], a['artifactId'], a['version'] = coordinates_str.split(':')
elif ncolons == 3:
a['groupId'], a['artifactId'], a['packaging'], a['version'] = coordinates_str.split(':')
elif ncolons == 4:
a['groupId'], a['artifactId'], a['packaging'], a['classifier'], a['version'] = \
coordinates_str.split(':')
else:
raise ValueError('Invalid Maven coordinates %s', coordinates_str)
return a
def __repr__(self):
"""Represent as string."""
return self.to_str()
def __eq__(self, other):
"""Implement == operator."""
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
"""Implement != operator."""
return not self.__eq__(other)
@classmethod
def normalize_str(cls, coordinates_str):
"""Normalize string representation."""
return cls.from_str(coordinates_str).to_str()
@classmethod
def from_str(cls, coordinates_str):
"""Create instance from string."""
coordinates = MavenCoordinates._parse_string(coordinates_str)
return cls(**coordinates)
def parse_gh_repo(potential_url):
"""Cover the following variety of URL forms for Github repo referencing.
1) www.github.com/foo/bar
2) (same as above, but with ".git" in the end)
3) (same as the two above, but without "www.")
# all of the three above, but starting with "http://", "https://", "git://" or "git+https://"
4) [email protected]:foo/bar
5) (same as above, but with ".git" in the end)
6) (same as the two above but with "ssh://" in front or with "git+ssh" instead of "git")
We return repository name in form `<username>/<reponame>` or `None` if this does not
seem to be a Github repo (or if someone invented yet another form that we can't parse yet...)
Notably, the Github repo *must* have exactly username and reponame, nothing else and nothing
more. E.g. `github.com/<username>/<reponame>/<something>` is *not* recognized.
"""
# TODO: reduce cyclomatic complexity
if not potential_url:
return None
repo_name = None
# transform 4-6 to a URL-like string, so that we can handle it together with 1-3
if '@' in potential_url:
split = potential_url.split('@')
if len(split) == 2 and split[1].startswith('github.com:'):
potential_url = 'http://' + split[1].replace('github.com:', 'github.com/')
# make it parsable by urlparse if it doesn't contain scheme
if not potential_url.startswith(('http://', 'https://', 'git://', 'git+https://')):
potential_url = 'http://' + potential_url
# urlparse should handle it now
parsed = urlparse(potential_url)
if parsed.netloc in ['github.com', 'www.github.com'] and \
parsed.scheme in ['http', 'https', 'git', 'git+https']:
repo_name = parsed.path
if repo_name.endswith('.git'):
repo_name = repo_name[:-len('.git')]
if repo_name:
repo_name = repo_name.strip('/')
if len(repo_name.split('/')) > 2:
temp_list = repo_name.split('/')
repo_name = temp_list[0] + '/' + temp_list[1]
if repo_name.count('/') != 1:
return None
return repo_name
def url2git_repo(url):
"""Convert URL to git repo URL and force use HTTPS."""
if url.startswith('git+'):
return url[len('git+'):]
if url.startswith('git@'):
url = url[len('git@'):]
url = url.split(':')
if len(url) != 2:
raise ValueError("Unable to parse git repo URL '%s'" % str(url))
return 'https://{}/{}'.format(url[0], url[1])
if not url.startswith(('http://', 'https://', 'git://')):
return 'http://' + url
return url
def case_sensitivity_transform(ecosystem, name):
"""Transform package name to lowercase for ecosystem that are not case sensitive.
:param ecosystem: name of ecosystem in which the package is sits
:param name: name of ecosystem
:return: transformed package name base on ecosystem package case sensitivity
"""
if Ecosystem.by_name(StoragePool.get_connected_storage('BayesianPostgres').session,
ecosystem).is_backed_by(EcosystemBackend.pypi):
return name.lower()
return name
def get_session_retry(retries=3, backoff_factor=0.2, status_forcelist=(404, 500, 502, 504),
session=None):
"""Set HTTP Adapter with retries to session."""
session = session or requests.Session()
retry = Retry(total=retries, read=retries, connect=retries,
backoff_factor=backoff_factor, status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
return session
def normalize_package_name(ecosystem_backend, name):
"""Normalize package name.
:param ecosystem_backend: str, ecosystem backend
:param name: str, package name
:return: str, normalized package name for supported ecosystem backend,
the same package name otherwise
"""
normalized_name = name
if ecosystem_backend == 'pypi':
# https://www.python.org/dev/peps/pep-0503/#normalized-names
normalized_name = re.sub(r'[-_.]+', '-', name).lower()
elif ecosystem_backend == 'maven':
# https://maven.apache.org/pom.html#Maven_Coordinates
normalized_name = MavenCoordinates.normalize_str(name)
elif ecosystem_backend == 'npm':
normalized_name = name
elif ecosystem_backend == 'go':
# go package name is the host+path part of a URL, thus it can be URL encoded
normalized_name = unquote(name)
return normalized_name
def get_user_email(user_profile):
"""Return default email if user_profile doesn't contain any."""
default_email = '[email protected]'
if user_profile is not None:
return user_profile.get('email', default_email)
else:
return default_email
@tenacity.retry(stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(multiplier=2, min=10, max=60))
def get_response(url):
"""Wrap requests which tries to get response.
:param url: URL where to do the request
:param sleep_time: sleep time between retries
:param retry_count: number of retries
:return: content of response's json
"""
try:
response = requests.get(url, headers=get_header())
# If status code is 404 or 204 then don't retry
if response.status_code in [404, 204]:
return {}
response.raise_for_status()
response = response.json()
return response
except HTTPError as err:
message = "Failed to get results from {url} with {err}".format(url=url, err=err)
logger.error(message)
raise NotABugTaskError(message) from err
def add_maven_coords_to_set(coordinates_str, gav_set):
"""Add Maven coordinates to the gav_set set."""
artifact_coords = MavenCoordinates.from_str(coordinates_str)
gav_set.add("{ecosystem}:{group_id}:{artifact_id}:{version}".format(
ecosystem="maven",
group_id=artifact_coords.groupId,
artifact_id=artifact_coords.artifactId,
version=artifact_coords.version
))
def peek(iterable):
"""Peeks the iterable to check if it's empty."""
try:
first = next(iterable)
except StopIteration:
return None
return first
@tenacity.retry(stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(multiplier=2, min=10, max=60))
def get_gh_contributors(url):
"""Get number of contributors from Git URL.
:param url: URL where to do the request
:return: length of contributor's list
"""
try:
response = requests.get("{}?per_page=1".format(url),
headers=get_header())
# If status code is 404 or 204 then don't retry
if response.status_code == 404:
return -1
if response.status_code == 204:
return 0
response.raise_for_status()
contributors_count = int(parse_qs(response.links['last']['url'])['page'][0]) \
if response.links else 1
return contributors_count
except HTTPError as err:
raise NotABugTaskError(err) from err
def store_data_to_s3(arguments, s3, result):
"""Store data to S3 bucket."""
try:
s3.store_data(arguments, result)
except Exception as e:
logger.error(e)
@tenacity.retry(stop=tenacity.stop_after_attempt(4),
wait=tenacity.wait_exponential(multiplier=3, min=10, max=60))
def get_gh_query_response(repo_name, status, type, start_date, end_date, event):
"""Get details of PRs and Issues from given Github repo.
:param repo_name: Github repo name
:param status: status of issue Ex. open/closed
:param type: type of issue to set in search query Ex. pr/issue
:param start_date: date since data has to be collected
:param end_date: date upto data has to be collected
:param event: even which need to be considered Ex. created/closed
:return: count of issue/pr based on criteria
"""
try:
"""
Create search query for given criteria
page and per_page is set to 1, as search query provides count of entities
matching with given criteria in all pages we dont need to collect all data.
"""
url = "{GITHUB_API}search/issues?" \
"page=1" \
"&per_page=1" \
"&q=repo:{repo_name}" \
"+is:{type}" \
"+{event}:{start_date}..{end_date}"\
.format(GITHUB_API=configuration.GITHUB_API,
repo_name=repo_name,
start_date=start_date,
end_date=end_date,
type=type,
event=event)
# If status is set to closed by default open & closed both are set
if status:
url = '{url}+is:{status}'.format(url=url, status=status)
response = requests.get(url, headers=get_header())
response.raise_for_status()
resp = response.json()
return resp.get('total_count', 0)
except Exception as e:
logger.error(e)
raise
@tenacity.retry(stop=tenacity.stop_after_attempt(2),
wait=tenacity.wait_exponential(multiplier=1, min=4, max=10))
def execute_gh_queries(repo_name, start_date, end_date):
"""Get details of Github PR/Issues based on given date range.
:param repo_name: Github repo name
:param start_date: date since data has to be collected
:param end_date: date upto data has to be collected
:return: count of issue/pr based on criteria
"""
try:
# Get PR details based on date range provided
pr_opened = get_gh_query_response(repo_name, '',
'pr', start_date, end_date, 'created')
pr_closed = get_gh_query_response(repo_name, 'closed',
'pr', start_date, end_date, 'closed')
# Get Issue details based on date range provided
issues_opened = get_gh_query_response(repo_name,
'', 'issue', start_date, end_date, 'created')
issues_closed = get_gh_query_response(repo_name,
'closed', 'issue', start_date, end_date, 'closed')
return pr_opened, pr_closed, issues_opened, issues_closed
except Exception as e:
logger.error(e)
raise
def get_gh_pr_issue_counts(repo_name):
"""Get details of Github PR/Issues for given repo.
:param repo_name: Github repo name
:return: Dict having Issue/PR details
"""
today = datetime.date.today()
# Get previous month start and end dates
last_month_end_date = today
last_month_start_date = today - datetime.timedelta(days=30)
# Get PR/Issue counts for previous month
try:
pr_opened_last_month, \
pr_closed_last_month, \
issues_opened_last_month, \
issues_closed_last_month = execute_gh_queries(repo_name,
last_month_start_date,
last_month_end_date)
except Exception as e:
logger.error(e)
pr_opened_last_month = \
pr_closed_last_month = \
issues_opened_last_month = \
issues_closed_last_month = -1
# Get previous year and start and end dates of year
last_year_start_date = today - datetime.timedelta(days=365)
last_year_end_date = today
# Get PR/Issue counts for previous year
try:
pr_opened_last_year, \
pr_closed_last_year, \
issues_opened_last_year, \
issues_closed_last_year = execute_gh_queries(repo_name,
last_year_start_date,
last_year_end_date)
except Exception as e:
logger.error(e)
pr_opened_last_year = \
pr_closed_last_year = \
issues_opened_last_year = \
issues_closed_last_year = -1
# Set output in required format by data importer
result = {
"updated_pull_requests": {
"year": {"opened": pr_opened_last_year, "closed": pr_closed_last_year},
"month": {"opened": pr_opened_last_month, "closed": pr_closed_last_month}
},
"updated_issues": {
"year": {"opened": issues_opened_last_year, "closed": issues_closed_last_year},
"month": {"opened": issues_opened_last_month, "closed": issues_closed_last_month}
}
}
return result
def get_header():
"""Get random Github token from env variables."""
headers = {
'Accept': 'application/vnd.github.mercy-preview+json, ' # for topics
'application/vnd.github.v3+json' # recommended by GitHub for License API
}
try:
_, header = configuration.select_random_github_token()
headers.update(header)
except F8AConfigurationException as e:
logger.error(e)
headers.update({})
return headers<|fim▁end|> | return not any(sub.startswith('.') for sub in item.split(os_path.sep)) |
Subsets and Splits