text
stringlengths 4
1.02M
| meta
dict |
---|---|
import asyncio
import discord
import os
from datetime import datetime
from discord.ext import commands
from Cogs import DisplayName
# This is the Torment module. It spams the target with pings for awhile
class Torment:
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.waitBetween = 1 # number of seconds to wait before sending another message
self.settings = settings
self.toTorment = False
@commands.command(pass_context=True, hidden=True)
async def tormentdelay(self, ctx, delay : int = None):
"""Sets the delay in seconds between messages (owner only)."""
channel = ctx.message.channel
author = ctx.message.author
server = ctx.message.guild
# Only allow owner to change server stats
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return
elif isOwner == False:
return
if delay == None:
if self.waitBetween == 1:
await ctx.message.author.send('Current torment delay is *1 second.*')
else:
await ctx.message.author.send('Current torment delay is *{} seconds.*'.format(self.waitBetween))
return
try:
delay = int(delay)
except Exception:
await ctx.message.author.send('Delay must be an int.')
return
if delay < 1:
await ctx.message.author.send('Delay must be at least *1 second*.')
return
self.waitBetween = delay
if self.waitBetween == 1:
await ctx.message.author.send('Current torment delay is now *1 second.*')
else:
await ctx.message.author.send('Current torment delay is now *{} seconds.*'.format(self.waitBetween))
@commands.command(pass_context=True, hidden=True)
async def canceltorment(self, ctx):
"""Cancels tormenting if it's in progress - must be false when next torment attempt starts to work (owner only)."""
channel = ctx.message.channel
author = ctx.message.author
server = ctx.message.guild
# Only allow owner to change server stats
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return
elif isOwner == False:
return
if not self.toTorment:
await ctx.message.author.send('Not currently tormenting.')
return
# Cancel it!
self.toTorment = False
await ctx.message.author.send('Tormenting cancelled.')
@commands.command(pass_context=True, hidden=True)
async def torment(self, ctx, *, member = None, times : int = None):
"""Deals some vigilante justice (owner only)."""
channel = ctx.message.channel
author = ctx.message.author
server = ctx.message.guild
message = ctx.message
# Only allow owner to change server stats
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return
elif isOwner == False:
return
usage = 'Usage: `{}torment [role/member] [times]`'.format(ctx.prefix)
isRole = False
if member == None:
await ctx.channel.send(usage)
return
# Check for formatting issues
if times == None:
# Either xp wasn't set - or it's the last section
if type(member) is str:
# It' a string - the hope continues
roleCheck = DisplayName.checkRoleForInt(member, server)
if roleCheck and roleCheck["Role"]:
isRole = True
member = roleCheck["Role"]
times = roleCheck["Int"]
else:
# Role is invalid - check for member instead
nameCheck = DisplayName.checkNameForInt(member, server)
if not nameCheck:
await ctx.channel.send(usage)
return
if not nameCheck["Member"]:
msg = 'I couldn\'t find that user or role on the server.'.format(member)
await ctx.channel.send(msg)
return
member = nameCheck["Member"]
times = nameCheck["Int"]
# Set the torment flag
self.toTorment = True
if times == None:
# Still no times - roll back to default
times = 25
if times > 100:
times = 100
if times == 0:
await ctx.channel.send('Oooooh - I bet they feel *sooooo* tormented...')
return
if times < 0:
await ctx.channel.send('I just uh... *un-tormented* them. Yeah.')
return
# Delete original torment message
await message.delete()
for i in range(0, times):
# Do this over time
try:
await channel.send('*{}*'.format(member.mention))
except Exception:
pass
for j in range(0, self.waitBetween):
# Wait for 1 second, then check if we should cancel - then wait some more
await asyncio.sleep(1)
if not self.toTorment:
return
@commands.command(pass_context=True, hidden=True)
async def stealthtorment(self, ctx, *, member = None, times : int = None):
"""Deals some sneaky vigilante justice (owner only)."""
channel = ctx.message.channel
author = ctx.message.author
server = ctx.message.guild
message = ctx.message
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return
elif isOwner == False:
return
usage = 'Usage: `{}torment [role/member] [times]`'.format(ctx.prefix)
isRole = False
if member == None:
await ctx.channel.send(usage)
return
# Check for formatting issues
if times == None:
# Either xp wasn't set - or it's the last section
if type(member) is str:
# It' a string - the hope continues
roleCheck = DisplayName.checkRoleForInt(member, server)
if roleCheck and roleCheck["Role"]:
isRole = True
member = roleCheck["Role"]
times = roleCheck["Int"]
else:
# Role is invalid - check for member instead
nameCheck = DisplayName.checkNameForInt(member, server)
if not nameCheck:
await ctx.channel.send(usage)
return
if not nameCheck["Member"]:
msg = 'I couldn\'t find that user or role on the server.'.format(member)
await ctx.channel.send(msg)
return
member = nameCheck["Member"]
times = nameCheck["Int"]
# Set the torment flag
self.toTorment = True
if times == None:
# Still no times - roll back to default
times = 25
if times > 100:
times = 100
if times == 0:
await ctx.channel.send('Oooooh - I bet they feel *sooooo* tormented...')
return
if times < 0:
await ctx.channel.send('I just uh... *un-tormented* them. Yeah.')
return
# Delete original torment message
await message.delete()
for i in range(0, times):
# Do this over time
try:
tmessage = await ctx.channel.send('*{}*'.format(member.mention))
await tmessage.delete()
except Exception:
pass
for j in range(0, self.waitBetween):
# Wait for 1 second, then check if we should cancel - then wait some more
await asyncio.sleep(1)
if not self.toTorment:
return
@commands.command(pass_context=True, hidden=True)
async def servertorment(self, ctx, *, member = None, times : int = None):
"""Deals some vigilante justice in all channels (owner only)."""
channel = ctx.message.channel
author = ctx.message.author
server = ctx.message.guild
message = ctx.message
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return
elif isOwner == False:
return
usage = 'Usage: `{}torment [role/member] [times]`'.format(ctx.prefix)
isRole = False
if member == None:
await ctx.channel.send(usage)
return
# Check for formatting issues
if times == None:
# Either xp wasn't set - or it's the last section
if type(member) is str:
# It' a string - the hope continues
roleCheck = DisplayName.checkRoleForInt(member, server)
if roleCheck and roleCheck["Role"]:
isRole = True
member = roleCheck["Role"]
times = roleCheck["Int"]
else:
# Role is invalid - check for member instead
nameCheck = DisplayName.checkNameForInt(member, server)
if not nameCheck:
await ctx.channel.send(usage)
return
if not nameCheck["Member"]:
msg = 'I couldn\'t find that user or role on the server.'.format(member)
await ctx.channel.send(msg)
return
member = nameCheck["Member"]
times = nameCheck["Int"]
# Set the torment flag
self.toTorment = True
if times == None:
# Still no times - roll back to default
times = 25
if times > 100:
times = 100
if times == 0:
await ctx.channel.send('Oooooh - I bet they feel *sooooo* tormented...')
return
if times < 0:
await ctx.channel.send('I just uh... *un-tormented* them. Yeah.')
return
# Delete original torment message
await message.delete()
for i in range(0, times):
# Do this over time
for channel in server.channels:
# Get user's permissions
if channel.permissions_for(member).read_messages and type(channel) is discord.TextChannel:
# Only ping where they can read
try:
await channel.send('*{}*'.format(member.mention))
except Exception:
pass
for j in range(0, self.waitBetween):
# Wait for 1 second, then check if we should cancel - then wait some more
await asyncio.sleep(1)
if not self.toTorment:
return
@commands.command(pass_context=True, hidden=True)
async def stealthservertorment(self, ctx, *, member = None, times : int = None):
"""Deals some sneaky vigilante justice in all channels (owner only)."""
channel = ctx.message.channel
author = ctx.message.author
server = ctx.message.guild
message = ctx.message
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return
elif isOwner == False:
return
usage = 'Usage: `{}torment [role/member] [times]`'.format(ctx.prefix)
isRole = False
if member == None:
await ctx.channel.send(usage)
return
# Check for formatting issues
if times == None:
# Either xp wasn't set - or it's the last section
if type(member) is str:
# It' a string - the hope continues
roleCheck = DisplayName.checkRoleForInt(member, server)
if roleCheck and roleCheck["Role"]:
isRole = True
member = roleCheck["Role"]
times = roleCheck["Int"]
else:
# Role is invalid - check for member instead
nameCheck = DisplayName.checkNameForInt(member, server)
if not nameCheck:
await ctx.channel.send(usage)
return
if not nameCheck["Member"]:
msg = 'I couldn\'t find that user or role on the server.'.format(member)
await ctx.channel.send(msg)
return
member = nameCheck["Member"]
times = nameCheck["Int"]
# Set the torment flag
self.toTorment = True
if times == None:
# Still no times - roll back to default
times = 25
if times > 100:
times = 100
if times == 0:
await ctx.channel.send('Oooooh - I bet they feel *sooooo* tormented...')
return
if times < 0:
await ctx.channel.send('I just uh... *un-tormented* them. Yeah.')
return
# Delete original torment message
await message.delete()
for i in range(0, times):
# Do this over time
for channel in server.channels:
# Get user's permissions
if channel.permissions_for(member).read_messages and type(channel) is discord.TextChannel:
# Only ping where they can read
try:
tmessage = await channel.send('*{}*'.format(member.mention))
await tmessage.delete()
except Exception:
pass
for j in range(0, self.waitBetween):
# Wait for 1 second, then check if we should cancel - then wait some more
await asyncio.sleep(1)
if not self.toTorment:
return
| {
"content_hash": "e5f6f31107cc3266ae667396d8cbc607",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 117,
"avg_line_length": 28.686893203883496,
"alnum_prop": 0.6380404433539216,
"repo_name": "TheMasterGhost/CorpBot",
"id": "1bdaacf730795b872322c0b3ca8cd3d758f0c2f4",
"size": "11819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Cogs/Torment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3746"
},
{
"name": "Python",
"bytes": "810947"
},
{
"name": "Shell",
"bytes": "4328"
}
],
"symlink_target": ""
} |
"""
Command line argument parser for neon deep learning library
This is a wrapper around the configargparse ArgumentParser class.
It adds in the default neon command line arguments and allows
additional arguments to be added using the argparse library
methods. Lower priority defaults can also be read from a configuration file
(specified by the -c command line argument).
"""
import configargparse
import logging
from logging.handlers import RotatingFileHandler
import numpy as np
import os
import inspect
from neon import __version__ as neon_version
from neon.backends import gen_backend
from neon.backends.backend import Backend
from neon.backends.util.check_gpu import get_compute_capability, get_device_count
from neon.callbacks.callbacks import Callbacks
logger = logging.getLogger(__name__)
def extract_valid_args(args, func, startidx=0):
"""
Given a namespace of argparser args, extract those applicable to func.
Arguments:
args (Namespace): a namespace of args from argparse
func (Function): a function to inspect, to determine valid args
startidx (int): Start index
Returns:
dict of (arg, value) pairs from args that are valid for func
"""
func_args = inspect.getargspec(func).args[startidx:]
return dict((k, v) for k, v in list(vars(args).items()) if k in func_args)
class NeonArgparser(configargparse.ArgumentParser):
"""
Setup the command line arg parser and parse the
arguments in sys.arg (or from configuration file). Use the parsed
options to configure the logging module.
Arguments:
desc (String) : Docstring from the calling function. This will be used
for the description of the command receiving the
arguments.
"""
def __init__(self, *args, **kwargs):
self._PARSED = False
self.work_dir = os.path.join(os.path.expanduser('~'), 'nervana')
if 'default_config_files' not in kwargs:
kwargs['default_config_files'] = [os.path.join(self.work_dir,
'neon.cfg')]
if 'add_config_file_help' not in kwargs:
# turn off the auto-generated config help for config files since it
# referenced unsettable config options like --version
kwargs['add_config_file_help'] = False
self.defaults = kwargs.pop('default_overrides', dict())
super(NeonArgparser, self).__init__(*args, **kwargs)
# ensure that default values are display via --help
self.formatter_class = configargparse.ArgumentDefaultsHelpFormatter
self.setup_default_args()
def setup_default_args(self):
"""
Setup the default arguments used by neon
"""
self.add_argument('--version', action='version', version=neon_version)
self.add_argument('-c', '--config', is_config_file=True,
help='Read values for these arguments from the '
'configuration file specified here first.')
self.add_argument('-v', '--verbose', action='count',
default=self.defaults.get('verbose', 1),
help="verbosity level. Add multiple v's to "
"further increase verbosity")
# we store the negation of no_progress_bar in args.progress_bar during
# parsing
self.add_argument('--no_progress_bar',
action="store_true",
help="suppress running display of progress bar and "
"training loss")
# runtime specifc options
rt_grp = self.add_argument_group('runtime')
rt_grp.add_argument('-w', '--data_dir',
default=os.path.join(self.work_dir, 'data'),
help='working directory in which to cache '
'downloaded and preprocessed datasets')
rt_grp.add_argument('-e', '--epochs', type=int,
default=self.defaults.get('epochs', 10),
help='number of complete passes over the dataset to run')
rt_grp.add_argument('-s', '--save_path', type=str,
default=self.defaults.get('save_path'),
help='file path to save model snapshots')
rt_grp.add_argument('--serialize', nargs='?', type=int,
default=self.defaults.get('serialize', 0),
const=1, metavar='N',
help='serialize model every N epochs')
rt_grp.add_argument('--model_file', help='load model from pkl file')
rt_grp.add_argument('-l', '--log', dest='logfile', nargs='?',
const=os.path.join(self.work_dir, 'neon_log.txt'),
help='log file')
rt_grp.add_argument('-o', '--output_file',
default=self.defaults.get('output_file', None),
help='hdf5 data file for metrics computed during '
'the run, optional. Can be used by nvis for '
'visualization.')
rt_grp.add_argument('-eval', '--eval_freq', type=int,
default=self.defaults.get('eval_freq', None),
help='frequency (in epochs) to test the eval set.')
rt_grp.add_argument('-H', '--history', type=int,
default=self.defaults.get('history', 1),
help='number of checkpoint files to retain')
rt_grp.add_argument('--log_token', type=str,
default='',
help='access token for data logging in real time')
be_grp = self.add_argument_group('backend')
be_grp.add_argument('-b', '--backend', choices=Backend.backend_choices(),
default='gpu' if get_compute_capability() >= 3.0
else 'cpu',
help='backend type. Multi-GPU support is a premium '
'feature available exclusively through the '
'Nervana cloud. Please contact '
'[email protected] for details.')
be_grp.add_argument('-i', '--device_id', type=int,
default=self.defaults.get('device_id', 0),
help='gpu device id (only used with GPU backend)')
be_grp.add_argument('-m', '--max_devices', type=int,
default=self.defaults.get('max_devices', get_device_count()),
help='max number of GPUs (only used with mgpu backend')
be_grp.add_argument('-r', '--rng_seed', type=int,
default=self.defaults.get('rng_seed', None),
metavar='SEED',
help='random number generator seed')
be_grp.add_argument('-u', '--rounding',
const=True,
type=int,
nargs='?',
metavar='BITS',
default=self.defaults.get('rounding', False),
help='use stochastic rounding [will round to BITS number '
'of bits if specified]')
be_grp.add_argument('-d', '--datatype', choices=['f16', 'f32', 'f64'],
default=self.defaults.get('datatype', 'f32'),
metavar='default datatype',
help='default floating point '
'precision for backend [f64 for cpu only]')
be_grp.add_argument('-z', '--batch_size', type=int,
default=self.defaults.get('batch_size', 128),
help='batch size')
be_grp.add_argument('--caffe', action='store_true',
help='match caffe when computing conv and pool layer output '
'sizes and dropout implementation')
be_grp.add_argument('--deterministic', action='store_true',
help='Use deterministic kernels where applicable')
return
def add_yaml_arg(self):
"""
Add the yaml file argument, this is needed for scripts that
parse the model config from yaml files
"""
# yaml configuration file
self.add_argument('yaml_file',
type=configargparse.FileType('r'),
help='neon model specification file')
def add_argument(self, *args, **kwargs):
"""
Method by which command line arguments are added to the parser. Passed
straight through to parent add_argument method.
Arguments:
*args:
**kwargs:
"""
if self._PARSED:
logger.warn('Adding arguments after arguments were parsed = '
'may need to rerun parse_args')
# reset so warning only comes once
self._PARSED = False
super(NeonArgparser, self).add_argument(*args, **kwargs)
return
# we never use this alias from ConfigArgParse, but defining this here
# prevents documentation indent warnings
def add(self):
""" Ignored. """
pass
# we never use this alias from ConfigArgParse, but defining this here
# prevents documentation indent warnings
def add_arg(self):
""" Ignored. """
pass
def parse_args(self, gen_be=True):
"""
Parse the command line arguments and setup neon
runtime environment accordingly
Arguments:
gen_be (bool): if False, the arg parser will not
generate the backend
Returns:
namespace: contains the parsed arguments as attributes
"""
args = super(NeonArgparser, self).parse_args()
err_msg = None # used for relaying exception to logger
# set up the logging
# max thresh is 50 (critical only), min is 10 (debug or higher)
try:
log_thresh = max(10, 40 - args.verbose * 10)
except (AttributeError, TypeError):
# if defaults are not set or not -v given
# for latter will get type error
log_thresh = 30
args.log_thresh = log_thresh
# logging formater
fmtr = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# get the parent logger for neon
main_logger = logging.getLogger('neon')
main_logger.setLevel(log_thresh)
# setup a console stderr log handler
stderrlog = logging.StreamHandler()
stderrlog.setFormatter(fmtr)
# expand any user directories in paths
for path in ['data_dir', 'save_path', 'model_file', 'output_file',
'logfile']:
if getattr(args, path):
setattr(args, path, os.path.expanduser(getattr(args, path)))
if args.logfile:
# add log to file as well
filelog = RotatingFileHandler(filename=args.logfile, mode='w',
maxBytes=10000000, backupCount=5)
filelog.setFormatter(fmtr)
filelog.setLevel(log_thresh)
main_logger.addHandler(filelog)
# if a log file is specified and progress bar displayed,
# log only errors to console.
if args.no_progress_bar:
stderrlog.setLevel(log_thresh)
else:
stderrlog.setLevel(logging.ERROR)
else:
stderrlog.setLevel(log_thresh)
# add this handler instead
main_logger.propagate = False
main_logger.addHandler(stderrlog)
# need to write out float otherwise numpy
# generates type in bytes not bits (f16 == 128 bits)
args.datatype = 'float' + args.datatype[1:]
args.datatype = np.dtype(args.datatype).type
# invert no_progress_bar meaning and store in args.progress_bar
args.progress_bar = not args.no_progress_bar
if args.backend == 'cpu' and args.rounding > 0:
err_msg = 'CPU backend does not support stochastic rounding'
logger.exception(err_msg)
raise NotImplementedError(err_msg)
# done up front to avoid losing data due to incorrect path
if args.save_path:
savedir = os.path.dirname(os.path.abspath(args.save_path))
if not os.access(savedir, os.R_OK | os.W_OK):
try:
os.makedirs(savedir)
except OSError:
err_msg = 'Can not create save_path %s' % (savedir)
if os.path.exists(args.save_path):
logger.warning('save file %s exists, attempting to overwrite' % args.save_path)
if not os.access(args.save_path, os.R_OK | os.W_OK):
err_msg = 'Can not write to save_path file %s' % args.save_path
if err_msg:
logger.exception(err_msg)
raise IOError(err_msg)
if (args.serialize > 0) and (args.save_path is None):
args.save_path = "neon_model.pkl"
logger.warn('No path given for model serialization, using default "%s"',
args.save_path)
if (args.save_path is not None) and (args.serialize == 0):
args.serialize = 1
logger.warn('No schedule given for model serialization, using default %d',
args.serialize)
if args.model_file:
err_msg = None
if not os.path.exists(args.model_file):
err_msg = 'Model file %s not present' % args.model_file
if not os.access(args.model_file, os.R_OK):
err_msg = 'No read access for model file %s' % args.model_file
if err_msg:
logger.exception(err_msg)
raise IOError(err_msg)
if args.caffe:
args.compat_mode = 'caffe'
else:
args.compat_mode = None
if args.deterministic:
logger.warn('--deterministic flag is deprecated. Specify random seed for '
'deterministic behavior.')
# extended parsers may need to generate backend after argparsing
if gen_be:
# generate the backend
gen_backend(backend=args.backend,
rng_seed=args.rng_seed,
device_id=args.device_id,
batch_size=args.batch_size,
datatype=args.datatype,
max_devices=args.max_devices,
compat_mode=args.compat_mode)
# display what command line / config options were set (and from where)
logger.info(self.format_values())
self._PARSED = True
self.args = args
args.callback_args = extract_valid_args(args, Callbacks.__init__, startidx=1)
return args
| {
"content_hash": "341385da33a66f38206976abd949c7ec",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 95,
"avg_line_length": 44.10086455331412,
"alnum_prop": 0.5477357380905705,
"repo_name": "Jokeren/neon",
"id": "f1efb95d29d8b31b4d15bb8238b98ec490a3b2d6",
"size": "16049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neon/util/argparser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6736"
},
{
"name": "C++",
"bytes": "135410"
},
{
"name": "CSS",
"bytes": "1472272"
},
{
"name": "Cuda",
"bytes": "14937"
},
{
"name": "Makefile",
"bytes": "12228"
},
{
"name": "Perl",
"bytes": "130963"
},
{
"name": "Python",
"bytes": "1943953"
}
],
"symlink_target": ""
} |
"""The fsl module provides classes for interfacing with the `FSL
<http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This
was written to work with FSL version 4.1.4.
These are the base tools for working with FSL.
Preprocessing tools are found in fsl/preprocess.py
Model tools are found in fsl/model.py
DTI tools are found in fsl/dti.py
XXX Make this doc current!
Currently these tools are supported:
* BET v2.1: brain extraction
* FAST v4.1: segmentation and bias correction
* FLIRT v5.5: linear registration
* MCFLIRT: motion correction
* FNIRT v1.0: non-linear warp
Examples
--------
See the docstrings of the individual classes for examples.
"""
from glob import glob
import os
import warnings
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.base import CommandLine, traits, CommandLineInputSpec
from nipype.utils.misc import isdefined
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class Info(object):
"""Handle fsl output type and version information.
version refers to the version of fsl on the system
output type refers to the type of file fsl defaults to writing
eg, NIFTI, NIFTI_GZ
"""
ftypes = {'NIFTI': '.nii',
'NIFTI_PAIR': '.img',
'NIFTI_GZ': '.nii.gz',
'NIFTI_PAIR_GZ': '.img.gz'}
@staticmethod
def version():
"""Check for fsl version on system
Parameters
----------
None
Returns
-------
version : str
Version number as string or None if FSL not found
"""
# find which fsl being used....and get version from
# /path/to/fsl/etc/fslversion
clout = CommandLine(command='which', args='fsl').run()
if clout.runtime.returncode is not 0:
return None
out = clout.runtime.stdout
basedir = os.path.split(os.path.split(out)[0])[0]
clout = CommandLine(command='cat',
args='%s/etc/fslversion' % (basedir)).run()
out = clout.runtime.stdout
return out.strip('\n')
@classmethod
def output_type_to_ext(cls, output_type):
"""Get the file extension for the given output type.
Parameters
----------
output_type : {'NIFTI', 'NIFTI_GZ', 'NIFTI_PAIR', 'NIFTI_PAIR_GZ'}
String specifying the output type.
Returns
-------
extension : str
The file extension for the output type.
"""
try:
return cls.ftypes[output_type]
except KeyError:
msg = 'Invalid FSLOUTPUTTYPE: ', output_type
raise KeyError(msg)
@classmethod
def output_type(cls):
"""Get the global FSL output file type FSLOUTPUTTYPE.
This returns the value of the environment variable
FSLOUTPUTTYPE. An exception is raised if it is not defined.
Returns
-------
fsl_ftype : string
Represents the current environment setting of FSLOUTPUTTYPE
"""
try:
return os.environ['FSLOUTPUTTYPE']
except KeyError:
raise Exception('FSL environment variables not set')
@staticmethod
def standard_image(img_name=None):
'''Grab an image from the standard location.
Returns a list of standard images if called without arguments.
Could be made more fancy to allow for more relocatability'''
try:
fsldir = os.environ['FSLDIR']
except KeyError:
raise Exception('FSL environment variables not set')
stdpath = os.path.join(fsldir, 'data','standard')
if img_name is None:
return [filename.replace(stdpath+'/','') for filename in glob(os.path.join(stdpath,'*nii*'))]
return os.path.join(stdpath, img_name)
class FSLCommandInputSpec(CommandLineInputSpec):
"""
Base Input Specification for all FSL Commands
All command support specifying FSLOUTPUTTYPE dynamically
via output_type.
Example
-------
fsl.ExtractRoi(tmin=42, tsize=1, output_type='NIFTI')
"""
output_type = traits.Enum('NIFTI', Info.ftypes.keys(),
desc='FSL output type')
class FSLCommand(CommandLine):
"""Base support for FSL commands.
"""
input_spec = FSLCommandInputSpec
_output_type = None
def __init__(self, **inputs):
super(FSLCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._output_update, 'output_type')
if self._output_type is None:
self._output_type = Info.output_type()
if not isdefined(self.inputs.output_type):
self.inputs.output_type = self._output_type
else:
self._output_update()
def _output_update(self):
self._output_type = self.inputs.output_type
self.inputs.environ.update({'FSLOUTPUTTYPE': self.inputs.output_type})
@classmethod
def set_default_output_type(cls, output_type):
"""Set the default output type for FSL classes.
This method is used to set the default output type for all fSL
subclasses. However, setting this will not update the output
type for any existing instances. For these, assign the
<instance>.inputs.output_type.
"""
if output_type in Info.ftypes:
cls._output_type = output_type
else:
raise AttributeError('Invalid FSL output_type: %s' % output_type)
def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None):
"""Generate a filename based on the given parameters.
The filename will take the form: cwd/basename<suffix><ext>.
If change_ext is True, it will use the extentions specified in
<instance>intputs.output_type.
Parameters
----------
basename : str
Filename to base the new filename on.
cwd : str
Path to prefix to the new filename. (default is os.getcwd())
suffix : str
Suffix to add to the `basename`. (defaults is '' )
change_ext : bool
Flag to change the filename extension to the FSL output type.
(default True)
Returns
-------
fname : str
New filename based on given parameters.
"""
if basename == '':
msg = 'Unable to generate filename for command %s. ' % self.cmd
msg += 'basename is not set!'
raise ValueError(msg)
if cwd is None:
cwd = os.getcwd()
if ext is None:
ext = Info.output_type_to_ext(self.inputs.output_type)
if change_ext:
if suffix:
suffix = ''.join((suffix, ext))
else:
suffix = ext
fname = fname_presuffix(basename, suffix = suffix,
use_ext = False, newpath = cwd)
return fname
def check_fsl():
ver = Info.version()
if ver:
return 0
else:
return 1
def no_fsl():
"""Checks if FSL is NOT installed
used with skipif to skip tests that will
fail if FSL is not installed"""
if Info.version() == None:
return True
else:
return False
| {
"content_hash": "643cf695005662a6f471d6d168e6d4e5",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 105,
"avg_line_length": 29.80081300813008,
"alnum_prop": 0.599099713545219,
"repo_name": "satra/NiPypeold",
"id": "b8a227b01105c4971e891ab4bc1dd3776acd28aa",
"size": "7445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/interfaces/fsl/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "931"
},
{
"name": "Objective-C",
"bytes": "4736"
},
{
"name": "Python",
"bytes": "1389618"
},
{
"name": "Tcl",
"bytes": "43377"
}
],
"symlink_target": ""
} |
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import print_function
import six
import socket
try:
import ssl
from ssl import SSLError
if hasattr(ssl, "match_hostname"):
from ssl import match_hostname
else:
from backports.ssl_match_hostname import match_hostname
HAVE_SSL = True
except ImportError:
# dummy class of SSLError for ssl none-support environment.
class SSLError(Exception):
pass
HAVE_SSL = False
from six.moves.urllib.parse import urlparse
if six.PY3:
from base64 import encodebytes as base64encode
else:
from base64 import encodestring as base64encode
import os
import errno
import struct
import uuid
import hashlib
import threading
import logging
# websocket modules
from ._exceptions import *
from ._abnf import *
from ._utils import NoLock, validate_utf8
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
# websocket supported version.
VERSION = 13
DEFAULT_SOCKET_OPTION = [(socket.SOL_TCP, socket.TCP_NODELAY, 1),]
if hasattr(socket, "SO_KEEPALIVE"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1))
if hasattr(socket, "TCP_KEEPIDLE"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPIDLE, 30))
if hasattr(socket, "TCP_KEEPINTVL"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPINTVL, 10))
if hasattr(socket, "TCP_KEEPCNT"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPCNT, 3))
logger = logging.getLogger()
default_timeout = None
traceEnabled = False
def enableTrace(tracable):
"""
turn on/off the tracability.
tracable: boolean value. if set True, tracability is enabled.
"""
global traceEnabled
traceEnabled = tracable
if tracable:
if not logger.handlers:
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
def _dump(title, message):
if traceEnabled:
logger.debug("--- " + title + " ---")
logger.debug(message)
logger.debug("-----------------------")
def setdefaulttimeout(timeout):
"""
Set the global timeout setting to connect.
timeout: default socket timeout time. This value is second.
"""
global default_timeout
default_timeout = timeout
def getdefaulttimeout():
"""
Return the global timeout setting(second) to connect.
"""
return default_timeout
def _parse_url(url):
"""
parse url and the result is tuple of
(hostname, port, resource path and the flag of secure mode)
url: url string.
"""
if ":" not in url:
raise ValueError("url is invalid")
scheme, url = url.split(":", 1)
parsed = urlparse(url, scheme="ws")
if parsed.hostname:
hostname = parsed.hostname
else:
raise ValueError("hostname is invalid")
port = 0
if parsed.port:
port = parsed.port
is_secure = False
if scheme == "ws":
if not port:
port = 80
elif scheme == "wss":
is_secure = True
if not port:
port = 443
else:
raise ValueError("scheme %s is invalid" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if parsed.query:
resource += "?" + parsed.query
return (hostname, port, resource, is_secure)
DEFAULT_NO_PROXY_HOST = ["localhost", "127.0.0.1"]
def _is_no_proxy_host(hostname, no_proxy):
if not no_proxy:
v = os.environ.get("no_proxy", "").replace(" ", "")
no_proxy = v.split(",")
if not no_proxy:
no_proxy = DEFAULT_NO_PROXY_HOST
return hostname in no_proxy
def _get_proxy_info(hostname, is_secure, **options):
"""
try to retrieve proxy host and port from environment if not provided in options.
result is (proxy_host, proxy_port, proxy_auth).
proxy_auth is tuple of username and password of proxy authentication information.
hostname: websocket server name.
is_secure: is the connection secure? (wss)
looks for "https_proxy" in env before falling back to "http_proxy"
options: "http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth infomation. tuple of username and password.
defualt is None
"""
if _is_no_proxy_host(hostname, options.get("http_no_proxy", None)):
return None, 0, None
http_proxy_host = options.get("http_proxy_host", None)
if http_proxy_host:
return http_proxy_host, options.get("http_proxy_port", 0), options.get("http_proxy_auth", None)
env_keys = ["http_proxy"]
if is_secure:
env_keys.insert(0, "https_proxy")
for key in env_keys:
value = os.environ.get(key, None)
if value:
proxy = urlparse(value)
auth = (proxy.username, proxy.password) if proxy.username else None
return proxy.hostname, proxy.port, auth
return None, 0, None
def _extract_err_message(exception):
message = getattr(exception, 'strerror', '')
if not message:
message = getattr(exception, 'message', '')
return message
def create_connection(url, timeout=None, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied, the global default timeout setting returned by getdefauttimeout() is used.
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value, it means "use default_timeout value"
options: "header" -> custom http header list.
"cookie" -> cookie value.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth infomation. tuple of username and password.
defualt is None
"enable_multithread" -> enable lock for multithread.
"sockopt" -> socket options
"sslopt" -> ssl option
"subprotocols" - array of available sub protocols. default is None.
"""
sockopt = options.get("sockopt", [])
sslopt = options.get("sslopt", {})
fire_cont_frame = options.get("fire_cont_frame", False)
enable_multithread = options.get("enable_multithread", False)
websock = WebSocket(sockopt=sockopt, sslopt=sslopt,
fire_cont_frame = fire_cont_frame, enable_multithread=enable_multithread)
websock.settimeout(timeout if timeout is not None else default_timeout)
websock.connect(url, **options)
return websock
_MAX_INTEGER = (1 << 32) -1
_AVAILABLE_KEY_CHARS = list(range(0x21, 0x2f + 1)) + list(range(0x3a, 0x7e + 1))
_MAX_CHAR_BYTE = (1<<8) -1
# ref. Websocket gets an update, and it breaks stuff.
# http://axod.blogspot.com/2010/06/websocket-gets-update-and-it-breaks.html
def _create_sec_websocket_key():
uid = uuid.uuid4()
return base64encode(uid.bytes).decode('utf-8').strip()
_HEADERS_TO_CHECK = {
"upgrade": "websocket",
"connection": "upgrade",
}
class _FrameBuffer(object):
_HEADER_MASK_INDEX = 5
_HEADER_LENGHT_INDEX = 6
def __init__(self):
self.clear()
def clear(self):
self.header = None
self.length = None
self.mask = None
def has_received_header(self):
return self.header is None
def recv_header(self, recv_fn):
header = recv_fn(2)
b1 = header[0]
if six.PY2:
b1 = ord(b1)
fin = b1 >> 7 & 1
rsv1 = b1 >> 6 & 1
rsv2 = b1 >> 5 & 1
rsv3 = b1 >> 4 & 1
opcode = b1 & 0xf
b2 = header[1]
if six.PY2:
b2 = ord(b2)
has_mask = b2 >> 7 & 1
length_bits = b2 & 0x7f
self.header = (fin, rsv1, rsv2, rsv3, opcode, has_mask, length_bits)
def has_mask(self):
if not self.header:
return False
return self.header[_FrameBuffer._HEADER_MASK_INDEX]
def has_received_length(self):
return self.length is None
def recv_length(self, recv_fn):
bits = self.header[_FrameBuffer._HEADER_LENGHT_INDEX]
length_bits = bits & 0x7f
if length_bits == 0x7e:
v = recv_fn(2)
self.length = struct.unpack("!H", v)[0]
elif length_bits == 0x7f:
v = recv_fn(8)
self.length = struct.unpack("!Q", v)[0]
else:
self.length = length_bits
def has_received_mask(self):
return self.mask is None
def recv_mask(self, recv_fn):
self.mask = recv_fn(4) if self.has_mask() else ""
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/recieve data.
The following example is a echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setscokopt.
sslopt: dict object for ssl socket option.
fire_cont_frame: fire recv event for each cont frame. default is False
enable_multithread: if set to True, lock send method.
"""
def __init__(self, get_mask_key=None, sockopt=None, sslopt=None,
fire_cont_frame=False, enable_multithread=False):
"""
Initalize WebSocket object.
"""
if sockopt is None:
sockopt = []
if sslopt is None:
sslopt = {}
self.connected = False
self.sock = None
self._timeout = None
self.sockopt = sockopt
self.sslopt = sslopt
self.get_mask_key = get_mask_key
self.fire_cont_frame = fire_cont_frame
# Buffers over the packets from the layer beneath until desired amount
# bytes of bytes are received.
self._recv_buffer = []
# These buffer over the build-up of a single frame.
self._frame_buffer = _FrameBuffer()
self._cont_data = None
self._recving_frames = None
if enable_multithread:
self.lock = threading.Lock()
else:
self.lock = NoLock()
self.subprotocol = None
def fileno(self):
return self.sock.fileno()
def set_mask_key(self, func):
"""
set function to create musk key. You can custumize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the fuct must 1 argument as integer.
The argument means length of mask key.
This func must be return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self._timeout
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self._timeout = timeout
if self.sock:
self.sock.settimeout(timeout)
timeout = property(gettimeout, settimeout)
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme. ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: "header" -> custom http header list.
"cookie" -> cookie value.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth infomation. tuple of username and password.
defualt is None
"subprotocols" - array of available sub protocols. default is None.
"""
hostname, port, resource, is_secure = _parse_url(url)
proxy_host, proxy_port, proxy_auth = _get_proxy_info(hostname, is_secure, **options)
if not proxy_host:
addrinfo_list = socket.getaddrinfo(hostname, port, 0, 0, socket.SOL_TCP)
else:
proxy_port = proxy_port and proxy_port or 80
addrinfo_list = socket.getaddrinfo(proxy_host, proxy_port, 0, 0, socket.SOL_TCP)
if not addrinfo_list:
raise WebSocketException("Host not found.: " + hostname + ":" + str(port))
err = None
for addrinfo in addrinfo_list:
family = addrinfo[0]
self.sock = socket.socket(family)
self.sock.settimeout(self.timeout)
for opts in DEFAULT_SOCKET_OPTION:
self.sock.setsockopt(*opts)
for opts in self.sockopt:
self.sock.setsockopt(*opts)
address = addrinfo[4]
try:
self.sock.connect(address)
except socket.error as error:
error.remote_ip = str(address[0])
if error.errno in (errno.ECONNREFUSED, ):
err = error
continue
else:
raise
else:
break
else:
raise err
if proxy_host:
self._tunnel(hostname, port, proxy_auth)
if is_secure:
if HAVE_SSL:
sslopt = dict(cert_reqs=ssl.CERT_REQUIRED)
certPath = os.path.join(
os.path.dirname(__file__), "cacert.pem")
if os.path.isfile(certPath):
sslopt['ca_certs'] = certPath
sslopt.update(self.sslopt)
check_hostname = sslopt.pop('check_hostname', True)
self.sock = ssl.wrap_socket(self.sock, **sslopt)
if (sslopt["cert_reqs"] != ssl.CERT_NONE
and check_hostname):
match_hostname(self.sock.getpeercert(), hostname)
else:
raise WebSocketException("SSL not available.")
self._handshake(hostname, port, resource, **options)
def _tunnel(self, host, port, auth):
logger.debug("Connecting proxy...")
connect_header = "CONNECT %s:%d HTTP/1.0\r\n" % (host, port)
# TODO: support digest auth.
if auth and auth[0]:
auth_str = auth[0]
if auth[1]:
auth_str += ":" + auth[1]
encoded_str = base64encode(auth_str.encode()).strip().decode()
connect_header += "Proxy-Authorization: Basic %s\r\n" % encoded_str
connect_header += "\r\n"
_dump("request header", connect_header)
self._send(connect_header)
status, resp_headers = self._read_headers()
if status != 200:
raise WebSocketException("failed CONNECT via proxy")
def _get_resp_headers(self, success_status = 101):
status, resp_headers = self._read_headers()
if status != success_status:
self.close()
raise WebSocketException("Handshake status %d" % status)
return resp_headers
def _get_handshake_headers(self, resource, host, port, options):
headers = []
headers.append("GET %s HTTP/1.1" % resource)
headers.append("Upgrade: websocket")
headers.append("Connection: Upgrade")
if port == 80:
hostport = host
else:
hostport = "%s:%d" % (host, port)
headers.append("Host: %s" % hostport)
if "origin" in options:
headers.append("Origin: %s" % options["origin"])
else:
headers.append("Origin: http://%s" % hostport)
key = _create_sec_websocket_key()
headers.append("Sec-WebSocket-Key: %s" % key)
headers.append("Sec-WebSocket-Version: %s" % VERSION)
subprotocols = options.get("subprotocols")
if subprotocols:
headers.append("Sec-WebSocket-Protocol: %s" % ",".join(subprotocols))
if "header" in options:
headers.extend(options["header"])
cookie = options.get("cookie", None)
if cookie:
headers.append("Cookie: %s" % cookie)
headers.append("")
headers.append("")
return headers, key
def _handshake(self, host, port, resource, **options):
headers, key = self._get_handshake_headers(resource, host, port, options)
header_str = "\r\n".join(headers)
self._send(header_str)
_dump("request header", header_str)
resp_headers = self._get_resp_headers()
success = self._validate_header(resp_headers, key, options.get("subprotocols"))
if not success:
self.close()
raise WebSocketException("Invalid WebSocket Header")
self.connected = True
def _validate_header(self, headers, key, subprotocols):
for k, v in _HEADERS_TO_CHECK.items():
r = headers.get(k, None)
if not r:
return False
r = r.lower()
if v != r:
return False
if subprotocols:
subproto = headers.get("sec-websocket-protocol", None)
if not subproto or subproto not in subprotocols:
logger.error("Invalid subprotocol: " + str(subprotocols))
return False
self.subprotocol = subproto
result = headers.get("sec-websocket-accept", None)
if not result:
return False
result = result.lower()
if isinstance(result, six.text_type):
result = result.encode('utf-8')
value = (key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11").encode('utf-8')
hashed = base64encode(hashlib.sha1(value).digest()).strip().lower()
return hashed == result
def _read_headers(self):
status = None
headers = {}
if traceEnabled:
logger.debug("--- response header ---")
while True:
line = self._recv_line()
line = line.decode('utf-8')
if line == "\r\n" or line == "\n":
break
line = line.strip()
if traceEnabled:
logger.debug(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
headers[key.lower()] = value.strip().lower()
else:
raise WebSocketException("Invalid header")
if traceEnabled:
logger.debug("-----------------------")
return status, headers
def send(self, payload, opcode=ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicode,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
return self.send_frame(frame)
def send_frame(self, frame):
"""
Send the data frame.
frame: frame data created by ABNF.create_frame
>>> ws = create_connection("ws://echo.websocket.org/")
>>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1)
>>> ws.send_frame(frame)
"""
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
length = len(data)
if traceEnabled:
logger.debug("send: " + repr(data))
with self.lock:
while data:
l = self._send(data)
data = data[l:]
return length
def send_binary(self, payload):
return self.send(payload, ABNF.OPCODE_BINARY)
def ping(self, payload=""):
"""
send ping data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
if six.PY3 and opcode == ABNF.OPCODE_TEXT:
return data.decode("utf-8")
elif opcode == ABNF.OPCODE_TEXT or opcode == ABNF.OPCODE_BINARY:
return data
else:
return ''
def recv_data(self, control_frame=False):
"""
Recieve data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
opcode, frame = self.recv_data_frame(control_frame)
return opcode, frame.data
def recv_data_frame(self, control_frame=False):
"""
Recieve data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketProtocolException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
if not self._recving_frames and frame.opcode == ABNF.OPCODE_CONT:
raise WebSocketProtocolException("Illegal frame")
if self._recving_frames and frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
raise WebSocketProtocolException("Illegal frame")
if self._cont_data:
self._cont_data[1] += frame.data
else:
if frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
self._recving_frames = frame.opcode
self._cont_data = [frame.opcode, frame.data]
if frame.fin:
self._recving_frames = None
if frame.fin or self.fire_cont_frame:
data = self._cont_data
self._cont_data = None
frame.data = data[1]
if not self.fire_cont_frame and data[0] == ABNF.OPCODE_TEXT and not validate_utf8(frame.data):
raise WebSocketPayloadException("cannot decode: " + repr(frame.data))
return [data[0], frame]
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PING:
if len(frame.data) < 126:
self.pong(frame.data)
else:
raise WebSocketProtocolException("Ping message is too long")
if control_frame:
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PONG:
if control_frame:
return (frame.opcode, frame)
def recv_frame(self):
"""
recieve data as frame from server.
return value: ABNF frame object.
"""
frame_buffer = self._frame_buffer
# Header
if frame_buffer.has_received_header():
frame_buffer.recv_header(self._recv_strict)
(fin, rsv1, rsv2, rsv3, opcode, has_mask, _) = frame_buffer.header
# Frame length
if frame_buffer.has_received_length():
frame_buffer.recv_length(self._recv_strict)
length = frame_buffer.length
# Mask
if frame_buffer.has_received_mask():
frame_buffer.recv_mask(self._recv_strict)
mask = frame_buffer.mask
# Payload
payload = self._recv_strict(length)
if has_mask:
payload = ABNF.mask(mask, payload)
# Reset for next frame
frame_buffer.clear()
frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
frame.validate()
return frame
def send_close(self, status=STATUS_NORMAL, reason=six.b("")):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string or bytes.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status=STATUS_NORMAL, reason=six.b("")):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
timeout = self.sock.gettimeout()
self.sock.settimeout(3)
try:
frame = self.recv_frame()
if logger.isEnabledFor(logging.ERROR):
recv_status = struct.unpack("!H", frame.data)[0]
if recv_status != STATUS_NORMAL:
logger.error("close status: " + repr(recv_status))
except:
pass
self.sock.settimeout(timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.shutdown()
def abort(self):
"""
Low-level asynchonous abort, wakes up other threads that are waiting in recv_*
"""
if self.connected:
self.sock.shutdown(socket.SHUT_RDWR)
def shutdown(self):
"close socket, immediately."
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
def _send(self, data):
if isinstance(data, six.text_type):
data = data.encode('utf-8')
if not self.sock:
raise WebSocketConnectionClosedException("socket is already closed.")
try:
return self.sock.send(data)
except socket.timeout as e:
message = _extract_err_message(e)
raise WebSocketTimeoutException(message)
except Exception as e:
message = _extract_err_message(e)
if message and "timed out" in message:
raise WebSocketTimeoutException(message)
else:
raise
def _recv(self, bufsize):
if not self.sock:
raise WebSocketConnectionClosedException("socket is already closed.")
try:
bytes = self.sock.recv(bufsize)
except socket.timeout as e:
message = _extract_err_message(e)
raise WebSocketTimeoutException(message)
except SSLError as e:
message = _extract_err_message(e)
if message == "The read operation timed out":
raise WebSocketTimeoutException(message)
else:
raise
if not bytes:
self.sock.close()
self.sock = None
self.connected = False
raise WebSocketConnectionClosedException()
return bytes
def _recv_strict(self, bufsize):
shortage = bufsize - sum(len(x) for x in self._recv_buffer)
while shortage > 0:
bytes = self._recv(shortage)
self._recv_buffer.append(bytes)
shortage -= len(bytes)
unified = six.b("").join(self._recv_buffer)
if shortage == 0:
self._recv_buffer = []
return unified
else:
self._recv_buffer = [unified[bufsize:]]
return unified[:bufsize]
def _recv_line(self):
line = []
while True:
c = self._recv(1)
line.append(c)
if c == six.b("\n"):
break
return six.b("").join(line)
if __name__ == "__main__":
enableTrace(True)
ws = create_connection("ws://echo.websocket.org/")
print("Sending 'Hello, World'...")
ws.send("Hello, World")
print("Sent")
print("Receiving...")
result = ws.recv()
print("Received '%s'" % result)
ws.close()
| {
"content_hash": "6470a3167b855e8cf70c69b471540b95",
"timestamp": "",
"source": "github",
"line_count": 984,
"max_line_length": 114,
"avg_line_length": 32.083333333333336,
"alnum_prop": 0.5698447893569845,
"repo_name": "duncanhawthorne/robot-robot",
"id": "1408e1d7909d930e035f17de0d7446f88198bfdf",
"size": "31570",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libs/websocket/_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "129717"
},
{
"name": "Python",
"bytes": "3610613"
}
],
"symlink_target": ""
} |
from django import template
from reversion.models import Version
register = template.Library()
@register.inclusion_tag("includes/last_modified.html")
def last_modified(obj):
"""Get all versions for specific object, display:
"Created on ASD by DSA."
"Last modified on ASD by DSA."
"""
versions = Version.objects.get_for_object(obj).select_related(
"revision", "revision__user"
)
try:
last, *_, created = versions
except ValueError: # either len(versions) == 0 or len(versions) == 1
try:
created = versions[0]
last = None
except IndexError: # len(versions) == 0
created = None
last = None
return {
"created": created,
"last_modified": last,
}
| {
"content_hash": "4bbc5b90ed4587cc66b1661f66d4ce01",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 73,
"avg_line_length": 25.322580645161292,
"alnum_prop": 0.5961783439490446,
"repo_name": "pbanaszkiewicz/amy",
"id": "4a4653321adc034815f989fe5f0c1c09e139bc3c",
"size": "785",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "amy/workshops/templatetags/revisions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5850"
},
{
"name": "Dockerfile",
"bytes": "1034"
},
{
"name": "HTML",
"bytes": "313293"
},
{
"name": "JavaScript",
"bytes": "39427"
},
{
"name": "Makefile",
"bytes": "1780"
},
{
"name": "Python",
"bytes": "2707815"
}
],
"symlink_target": ""
} |
import numpy
from fgivenx.parallel import parallel_apply
from fgivenx.io import CacheException, Cache
def compute_samples(f, x, samples, **kwargs):
r""" Apply f(x,theta) to x array and theta in samples.
Parameters
----------
f: function
list of functions :math:`f(x;\theta)` with dependent variable
:math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
x values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
list of theta samples to evaluate :math:`f(x;\theta)` at.
`shape = (nfunc, nsamples, npars)`
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`
cache: str, optional
File root for saving previous calculations for re-use
default None
Returns
-------
2D numpy.array:
samples at each x. `shape=(len(x),len(samples),)`
"""
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if cache:
cache = Cache(cache + '_fsamples')
try:
return cache.check(x, samples)
except CacheException as e:
print(e)
fsamples = []
for fi, s in zip(f, samples):
if len(s) > 0:
fsamps = parallel_apply(fi, s, precurry=(x,), parallel=parallel,
tqdm_kwargs=tqdm_kwargs)
fsamps = numpy.array(fsamps).transpose().copy()
fsamples.append(fsamps)
fsamples = numpy.concatenate(fsamples, axis=1)
if cache:
cache.save(x, samples, fsamples)
return fsamples
def samples_from_getdist_chains(params, file_root, latex=False, **kwargs):
""" Extract samples and weights from getdist chains.
Parameters
----------
params: list(str)
Names of parameters to be supplied to second argument of f(x|theta).
file_root: str, optional
Root name for getdist chains files. This variable automatically
defines:
- chains_file = file_root.txt
- paramnames_file = file_root.paramnames
but can be overidden by chains_file or paramnames_file.
latex: bool, optional
Also return an array of latex strings for those paramnames.
Any additional keyword arguments are forwarded onto getdist, e.g:
samples_from_getdist_chains(params, file_root,
settings={'ignore_rows':0.5})
Returns
-------
samples: numpy.array
2D Array of samples. `shape=(len(samples), len(params))`
weights: numpy.array
Array of weights. `shape = (len(params),)`
latex: list(str), optional
list of latex strings for each parameter
(if latex is provided as an argument)
"""
import getdist
samples = getdist.loadMCSamples(file_root, **kwargs)
weights = samples.weights
indices = [samples.index[p] for p in params]
samps = samples.samples[:, indices]
if latex:
latex = [samples.parLabel(p) for p in params]
return samps, weights, latex
else:
return samps, weights
| {
"content_hash": "0beb20c3ca193212bc37d5b5c25e8451",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 76,
"avg_line_length": 29.53211009174312,
"alnum_prop": 0.6095060577819198,
"repo_name": "williamjameshandley/fgivenx",
"id": "345daad0de884f576f0a65453cb7de64b9e03b87",
"size": "3219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fgivenx/samples.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67429"
},
{
"name": "TeX",
"bytes": "6533"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from models import Media
admin.site.register(Media) | {
"content_hash": "0edc2888a84a73bfba83773fe7a63e07",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 32,
"avg_line_length": 21.25,
"alnum_prop": 0.8352941176470589,
"repo_name": "jamesmfriedman/django-primer",
"id": "c35444da7d1e3b747f58d241546ef7b01cee0ad9",
"size": "85",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "primer/media/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "210640"
},
{
"name": "JavaScript",
"bytes": "76063"
},
{
"name": "PHP",
"bytes": "232"
},
{
"name": "Python",
"bytes": "137085"
},
{
"name": "Shell",
"bytes": "4521"
}
],
"symlink_target": ""
} |
"""Syncronizes cell Zookeeper with LDAP data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import time
import click
from treadmill import cli
from treadmill import context
from treadmill import plugin_manager
from treadmill import zknamespace as z
from treadmill import zkutils
_LOGGER = logging.getLogger(__name__)
_DEFAULT_INTERVAL = 60
def _run_sync(cellsync_plugins, once, interval):
"""Sync Zookeeper with LDAP, runs with lock held.
"""
while True:
# Sync app groups
if not cellsync_plugins:
cellsync_plugins = plugin_manager.names('treadmill.cellsync')
for name in cellsync_plugins:
try:
plugin = plugin_manager.load('treadmill.cellsync', name)
plugin()
except Exception: # pylint: disable=W0703
_LOGGER.exception('Error processing sync plugin: %s', name)
if once:
return
time.sleep(interval)
def init():
"""Return top level command handler.
"""
@click.command()
@click.option('--no-lock', is_flag=True, default=False,
help='Run without lock.')
@click.option('--sync-plugins', default=None, type=cli.LIST,
help='List of plugins to run.')
@click.option('--once', is_flag=True, default=False,
help='Run once.')
@click.option('--interval', type=int, default=_DEFAULT_INTERVAL,
help='Time interval between runs (seconds).')
def top(no_lock, sync_plugins, once, interval):
"""Sync LDAP data with Zookeeper data.
"""
if not no_lock:
_LOGGER.info('Waiting for leader lock.')
lock = zkutils.make_lock(context.GLOBAL.zk.conn,
z.path.election(__name__))
with lock:
_run_sync(sync_plugins, once, interval)
else:
_LOGGER.info('Running without lock.')
_run_sync(sync_plugins, once, interval)
return top
| {
"content_hash": "f6c3980a77884ef798dba1268c0f1add",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 75,
"avg_line_length": 30.070422535211268,
"alnum_prop": 0.6018735362997658,
"repo_name": "Morgan-Stanley/treadmill",
"id": "e770f5f3fe2255dd6b6479e0eaedd4468a0c3d48",
"size": "2135",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/sproc/cellsync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3750"
},
{
"name": "Python",
"bytes": "3372983"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "51646"
}
],
"symlink_target": ""
} |
import logging
import emission.analysis.configs.config_utils as eacc
def get_config(user_id, time_query):
# right now, we are not doing any server side overrides, so we pick the
# last user defined configuration for this user
SYNC_CONFIG_KEY = "config/sync_config"
return eacc.get_last_entry(user_id, time_query, SYNC_CONFIG_KEY)
| {
"content_hash": "dfc4475f78493021e73dc4b8a84a0133",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 75,
"avg_line_length": 43.375,
"alnum_prop": 0.7435158501440923,
"repo_name": "yw374cornell/e-mission-server",
"id": "2eec2ac0bb06c60d988f8fa4951b405d9672e99f",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emission/analysis/configs/sync_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "717871"
},
{
"name": "HTML",
"bytes": "114875"
},
{
"name": "JavaScript",
"bytes": "7620696"
},
{
"name": "Jupyter Notebook",
"bytes": "97095629"
},
{
"name": "Python",
"bytes": "1584848"
},
{
"name": "Shell",
"bytes": "2299"
},
{
"name": "Smarty",
"bytes": "3456"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class XhoverformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="xhoverformat", parent_name="isosurface", **kwargs):
super(XhoverformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| {
"content_hash": "b1f1de93d98ff970d8cdc31771c8e85f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 87,
"avg_line_length": 37.72727272727273,
"alnum_prop": 0.6385542168674698,
"repo_name": "plotly/plotly.py",
"id": "e9c582a63b65c79a1e9a9be41dde1d6f4585d98e",
"size": "415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/isosurface/_xhoverformat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import enum
import numpy as np
from itertools import cycle
from src.util.debug_util import assert_type_in_container
from base_grid import BaseGrid, BaseFace
class TriangleGrid(BaseGrid):
"""
正三角形の面を持つグリッドの基底クラス
"""
BAND_TYPE = enum.Enum('BAND_TYPE', 'HORIZON UPPER_RIGHT LOWER_RIGHT')
def __init__(self, vertices, triangle_faces, n_face, n_div, upper_direction,
is_face_assertion_enabled=True):
"""
:type vertices: np.ndarray
:param vertices: 頂点座標配列
:type triangle_faces: list(TriangleFace) or tuple(TriangleFace)
:param triangle_faces: TriangleFaceの集合
:type n_face: int or long
:param n_face: 面の数
:type n_div: int or long
:param n_div: 面の分割数
:type is_face_assertion_enabled: bool
:param is_face_assertion_enabled: メンバのアサーションチェックを有効にするかどうか
:type upper_direction: (float, float, float)
:param upper_direction: グリッドの上方向を表す単位ベクトル
"""
# assertion
if is_face_assertion_enabled:
assert_type_in_container(triangle_faces, TriangleFace)
super(TriangleGrid, self).__init__(vertices, triangle_faces, n_face,
n_div, upper_direction,
is_face_assertion_enabled=False)
def divide_face(self, n_div, epsilon=np.finfo(float).eps):
"""
指定数で面を分割したGrid3dオブジェクトを返す
:type n_div: int
:param n_div: 分割数
:type epsilon: float
:param epsilon: 浮動小数点座標を等号比較する時の許容誤差
:rtype : IcosahedronGrid
:return : 分割後のGrid3dオブジェクト
"""
new_vertices = np.empty(shape=(0, 3))
new_grid_faces = []
for grid_face in self.grid_faces:
# グリッド面の三頂点
top_vertex = self.vertices[grid_face.top_vertex_idx()]
left_vertex = self.vertices[grid_face.left_vertex_idx()]
right_vertex = self.vertices[grid_face.right_vertex_idx()]
left_vector = left_vertex - top_vertex
right_vector = right_vertex - top_vertex
# 一旦GridFaceの頂点情報をクリア
new_face = TriangleFace(grid_face.face_id,
left_face_id=grid_face.left_face_id,
right_face_id=grid_face.right_face_id,
bottom_face_id=grid_face.bottom_face_id,
n_div=n_div)
for sum_length in xrange(n_div + 1):
for i in xrange(sum_length + 1):
alpha = sum_length - i
beta = i
new_vertex = left_vector * float(
alpha) / n_div + right_vector * float(
beta) / n_div + top_vertex
# 重複チェック
check_duplicate = (
np.abs(new_vertex - new_vertices) < epsilon).all(axis=1)
if len(new_vertices) > 0 and check_duplicate.any():
v_idx = int(np.argwhere(check_duplicate)[0])
else:
v_idx = len(new_vertices)
new_vertices = np.vstack((new_vertices, new_vertex))
# 新しく頂点情報を追加
new_face.set_vertex_idx(v_idx, alpha, beta)
new_grid_faces.append(new_face)
return TriangleGrid(new_vertices, new_grid_faces, self.n_face,
n_div, self.upper_direction)
def traverse_band(self, band_type, center_face_id):
"""
帯状にグリッド上の頂点を走査し、走査順に頂点インデックスを返す
頂点インデックスはself.verticesに対応する
:type band_type: BaseGrid.BAND_TYPE
:param band_type: 帯の走査方向
:type center_face_id: int or long
:param center_face_id: 帯の中心となる面のID
:rtype: list(list)
:return: 走査順にソートされた頂点のインデックス
"""
if band_type == TriangleGrid.BAND_TYPE.HORIZON:
direction_loop = cycle(
(BaseFace.UNI_SCAN_DIRECTION.HORIZON,
BaseFace.UNI_SCAN_DIRECTION.HORIZON_REVERSED))
next_fid_func_loop = cycle((lambda face: face.right_face_id,
lambda face: face.left_face_id))
elif band_type == TriangleGrid.BAND_TYPE.UPPER_RIGHT:
direction_loop = cycle(
(BaseFace.UNI_SCAN_DIRECTION.UPPER_LEFT_REVERSED,
BaseFace.UNI_SCAN_DIRECTION.UPPER_LEFT,
BaseFace.UNI_SCAN_DIRECTION.UPPER_LEFT_REVERSED,
BaseFace.UNI_SCAN_DIRECTION.UPPER_LEFT,
BaseFace.UNI_SCAN_DIRECTION.HORIZON_REVERSED,
BaseFace.UNI_SCAN_DIRECTION.UPPER_RIGHT,
BaseFace.UNI_SCAN_DIRECTION.UPPER_RIGHT_REVERSED,
BaseFace.UNI_SCAN_DIRECTION.UPPER_LEFT,
BaseFace.UNI_SCAN_DIRECTION.UPPER_LEFT_REVERSED,
BaseFace.UNI_SCAN_DIRECTION.UPPER_LEFT))
next_fid_func_loop = cycle((lambda face: face.right_face_id,
lambda face: face.bottom_face_id,
lambda face: face.right_face_id,
lambda face: face.bottom_face_id,
lambda face: face.left_face_id,
lambda face: face.left_face_id,
lambda face: face.bottom_face_id,
lambda face: face.bottom_face_id,
lambda face: face.right_face_id,
lambda face: face.bottom_face_id))
elif band_type == TriangleGrid.BAND_TYPE.LOWER_RIGHT:
direction_loop = cycle(
(BaseFace.UNI_SCAN_DIRECTION.UPPER_RIGHT_REVERSED,
BaseFace.UNI_SCAN_DIRECTION.UPPER_RIGHT,
BaseFace.UNI_SCAN_DIRECTION.UPPER_RIGHT_REVERSED,
BaseFace.UNI_SCAN_DIRECTION.UPPER_RIGHT,
BaseFace.UNI_SCAN_DIRECTION.UPPER_LEFT_REVERSED,
BaseFace.UNI_SCAN_DIRECTION.UPPER_LEFT,
BaseFace.UNI_SCAN_DIRECTION.UPPER_RIGHT_REVERSED,
BaseFace.UNI_SCAN_DIRECTION.UPPER_RIGHT,
BaseFace.UNI_SCAN_DIRECTION.UPPER_RIGHT_REVERSED,
BaseFace.UNI_SCAN_DIRECTION.UPPER_RIGHT))
next_fid_func_loop = cycle((lambda face: face.bottom_face_id,
lambda face: face.left_face_id,
lambda face: face.bottom_face_id,
lambda face: face.left_face_id,
lambda face: face.right_face_id,
lambda face: face.bottom_face_id,
lambda face: face.bottom_face_id,
lambda face: face.left_face_id,
lambda face: face.bottom_face_id,
lambda face: face.left_face_id))
else:
raise NotImplementedError
result = [[] for _ in xrange(self.n_div + 1)]
face_id = center_face_id
while True:
direction = direction_loop.next()
face = self.find_face_from_id(face_id)
traversed_rows = face.traverse(direction)
for result_row, traversed_row in zip(result, traversed_rows):
# 重複を防ぐため、先頭要素は飛ばす
result_row += traversed_row[1:]
face_id = next_fid_func_loop.next()(face)
if face_id == center_face_id:
break
return result
class TriangleFace(BaseFace):
"""
TriangleGridの持つ面クラス
"""
def __init__(self, face_id, left_face_id, right_face_id, bottom_face_id,
n_div=1, vidx_table=None):
"""
:type face_id: int or long
:param face_id: 面を一意に識別するためのID
:type left_face_id: int or long
:param left_face_id: 左辺に隣接するface_id
:type right_face_id: int or long
:param right_face_id: 右辺に隣接するface_id
:type bottom_face_id: int or long
:param bottom_face_id: 底辺に隣接するface_id
:type n_div: int or long
:param n_div: 面の分割数
:type vidx_table: dict((int or long, int or long), int or long)
:param vidx_table: 頂点座標(alpha, beta)と頂点インデックスのペア
"""
super(TriangleFace, self).__init__(face_id, n_div, vidx_table)
self.left_face_id = left_face_id
self.right_face_id = right_face_id
self.bottom_face_id = bottom_face_id
def set_vertex_idx(self, idx, alpha, beta):
"""
頂点インデックスを登録する
:type idx: int or long
:param idx: 登録する頂点のインデックス
:type alpha: int or long
:param alpha: alpha座標
:type beta: int or long
:param beta: beta座標
"""
assert isinstance(idx, (int, long)) and idx >= 0
assert isinstance(alpha, (int, long)) and 0 <= alpha <= self.n_div
assert isinstance(beta, (int, long)) and 0 <= beta <= self.n_div
self.vidx_table[(alpha, beta)] = idx
def get_vertex_idx(self, alpha, beta):
"""
座標から頂点インデックスを取得する
:type alpha: int or long
:param alpha: alpha座標
:type beta: int or long
:param beta: beta座標
:rtype: int or long
:return: 頂点インデックス
"""
return self.vidx_table[(alpha, beta)]
def get_coordinates(self, vertex_idx):
"""
頂点インデックスから座標を取得する
:type vertex_idx: int or long
:param vertex_idx: 頂点インデックス
:rtype: tuple(int, int)
:return: 面中における頂点座標
"""
return [k for k, v in self.vidx_table.items() if v == vertex_idx]
def vidx_table_as_copy(self):
"""
自身の持つvidx_tableをコピーして返す
:rtype: dict((int or long, int or long), int or long)
:return: 頂点座標(alpha, beta)と頂点インデックスのペア
"""
return dict(self.vidx_table)
def top_vertex_idx(self):
"""
面中のalpha=0,beta=0にある頂点インデックスを取得する
:rtype: int
:return: 頂点インデックス
"""
return self.get_vertex_idx(0, 0)
def left_vertex_idx(self):
"""
面中のalpha=1,beta=0にある頂点インデックスを取得する
:rtype: int
:return: 頂点インデックス
"""
return self.get_vertex_idx(self.n_div, 0)
def right_vertex_idx(self):
"""
面中のalpha=0,beta=1にある頂点インデックスを取得する
:rtype: int
:return: 頂点インデックス
"""
return self.get_vertex_idx(0, self.n_div)
def traverse(self, direction):
"""
単一面の頂点インデックスを指定方向に走査し、入れ子リストとして返す
:type direction: icosahedronface.direction
:param direction: 操作方向の指定
:rtype: list(list(int))
:return: 頂点インデックスの入れ子リスト
"""
if direction == TriangleFace.UNI_SCAN_DIRECTION.HORIZON:
coordinates = self.__horizon_row_coordinates
is_reversed = False
elif direction == TriangleFace.UNI_SCAN_DIRECTION.UPPER_RIGHT:
coordinates = self.__upper_right_row_coordinates
is_reversed = False
elif direction == TriangleFace.UNI_SCAN_DIRECTION.UPPER_LEFT:
coordinates = self.__upper_left_row_coordinates
is_reversed = False
elif direction == TriangleFace.UNI_SCAN_DIRECTION.HORIZON_REVERSED:
coordinates = self.__horizon_row_coordinates
is_reversed = True
elif direction == TriangleFace.UNI_SCAN_DIRECTION.UPPER_RIGHT_REVERSED:
coordinates = self.__upper_right_row_coordinates
is_reversed = True
elif direction == TriangleFace.UNI_SCAN_DIRECTION.UPPER_LEFT_REVERSED:
coordinates = self.__upper_left_row_coordinates
is_reversed = True
else:
raise KeyError
rows = xrange(self.n_div, -1, -1) if is_reversed \
else xrange(self.n_div + 1)
return [[self.get_vertex_idx(alpha, beta)
for alpha, beta in zip(*coordinates(row, is_reversed))]
for row in rows]
def __horizon_row_coordinates(self, row, is_reversed):
"""
ある行で面上の頂点を水平にトラバースするときの順序に従った座標配列を返す
:type row: int
:param row: 現在注目している行
:type is_reversed: bool
:param is_reversed: 面が、グリッドの基準上方向ベクトルupper_direction
に対して上下逆さまかどうか
:rtype: list(list(int), list(int))
:return: alpha, betaの座標配列
"""
alpha = xrange(row, -1, -1)
beta = xrange(row + 1)
if is_reversed:
alpha = reversed(list(alpha))
beta = reversed(list(beta))
return alpha, beta
def __upper_right_row_coordinates(self, row, is_reversed):
"""
ある行で面上の頂点を右上にトラバースするときの順序に従った座標配列を返す
:type row: int
:param row: 現在注目している行
:type is_reversed: bool
:param is_reversed: 面が、グリッドの基準上方向ベクトルupper_direction
に対して上下逆さまかどうか
:rtype: list(list(int), list(int))
:return: alpha, betaの座標配列
"""
alpha = [self.n_div - row for i in xrange(row + 1)]
beta = xrange(row, -1, -1)
if is_reversed:
alpha = reversed(alpha)
beta = reversed(list(beta))
return alpha, beta
def __upper_left_row_coordinates(self, row, is_reversed):
"""
ある行で面上の頂点を左上にトラバースするときの順序に従った座標配列を返す
:type row: int
:param row: 現在注目している行
:type is_reversed: bool
:param is_reversed: 面が、グリッドの基準上方向ベクトルupper_direction
に対して上下逆さまかどうか
:rtype: list(list(int), list(int))
:return: alpha, betaの座標配列
"""
alpha = xrange(row + 1)
beta = [self.n_div - row for _ in xrange(row + 1)]
if is_reversed:
alpha = reversed(list(alpha))
beta = reversed(beta)
return alpha, beta
| {
"content_hash": "7d7cde7f7951eeefc017e48bb3b3377f",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 80,
"avg_line_length": 32.17767653758542,
"alnum_prop": 0.5364575959224126,
"repo_name": "kanairen/RegularIcosahedronDict",
"id": "93d111815676c73c86367362043323c3aec55707",
"size": "15805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/obj/grid/triangle_grid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "97663"
}
],
"symlink_target": ""
} |
import logging
import math
import time
from django.utils import timezone
from modularodm import Q
from oauthlib.oauth2 import OAuth2Error
from dateutil.relativedelta import relativedelta
from framework.celery_tasks import app as celery_app
from scripts import utils as scripts_utils
from website.app import init_app
from addons.box.models import Provider as Box
from addons.googledrive.models import GoogleDriveProvider
from addons.mendeley.models import Mendeley
from website.oauth.models import ExternalAccount
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
PROVIDER_CLASSES = (Box, GoogleDriveProvider, Mendeley, )
def look_up_provider(addon_short_name):
for Provider in PROVIDER_CLASSES:
if Provider.short_name == addon_short_name:
return Provider
return None
def get_targets(delta, addon_short_name):
# NOTE: expires_at is the access_token's expiration date,
# NOT the refresh token's
return ExternalAccount.find(
Q('expires_at', 'lt', timezone.now() - delta) &
Q('date_last_refreshed', 'lt', timezone.now() - delta) &
Q('provider', 'eq', addon_short_name)
)
def main(delta, Provider, rate_limit, dry_run):
allowance = rate_limit[0]
last_call = time.time()
for record in get_targets(delta, Provider.short_name):
if Provider(record).has_expired_credentials:
logger.info(
'Found expired record {}, skipping'.format(record.__repr__())
)
continue
logger.info(
'Refreshing tokens on record {0}; expires at {1}'.format(
record.__repr__(),
record.expires_at.strftime('%c')
)
)
if not dry_run:
if allowance < 1:
try:
time.sleep(rate_limit[1] - (time.time() - last_call))
except (ValueError, IOError):
pass # Value/IOError indicates negative sleep time in Py 3.5/2.7, respectively
allowance = rate_limit[0]
allowance -= 1
last_call = time.time()
success = False
try:
success = Provider(record).refresh_oauth_key(force=True)
except OAuth2Error as e:
logger.error(e)
else:
logger.info(
'Status of record {}: {}'.format(
record.__repr__(),
'SUCCESS' if success else 'FAILURE')
)
@celery_app.task(name='scripts.refresh_addon_tokens')
def run_main(addons=None, rate_limit=(5, 1), dry_run=True):
"""
:param dict addons: of form {'<addon_short_name>': int(<refresh_token validity duration in days>)}
:param tuple rate_limit: of form (<requests>, <seconds>). Default is five per second
"""
init_app(set_backends=True, routes=False)
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
for addon in addons:
days = math.ceil(int(addons[addon])*0.75)
delta = relativedelta(days=days)
Provider = look_up_provider(addon)
if not Provider:
logger.error('Unable to find Provider class for addon {}'.format(addon))
else:
main(delta, Provider, rate_limit, dry_run=dry_run)
| {
"content_hash": "17fe9b4a698f155434e16012776e5aa6",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 102,
"avg_line_length": 34.708333333333336,
"alnum_prop": 0.6071428571428571,
"repo_name": "acshi/osf.io",
"id": "9c1cc8623a1a85fd6dcfd1d4466dabb06470454c",
"size": "3373",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "scripts/refresh_addon_tokens.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "176516"
},
{
"name": "HTML",
"bytes": "181969"
},
{
"name": "JavaScript",
"bytes": "2017102"
},
{
"name": "Mako",
"bytes": "756427"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "8555915"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
} |
"""
The figure module provides the top-level
:class:`~matplotlib.artist.Artist`, the :class:`Figure`, which
contains all the plot elements. The following classes are defined
:class:`SubplotParams`
control the default spacing of the subplots
:class:`Figure`
top level container for all plot elements
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
from operator import itemgetter
import numpy as np
from matplotlib import rcParams
from matplotlib import docstring
from matplotlib import __version__ as _mpl_version
import matplotlib.artist as martist
from matplotlib.artist import Artist, allow_rasterization
import matplotlib.cbook as cbook
from matplotlib.cbook import Stack, iterable
from matplotlib import _image
from matplotlib.image import FigureImage
import matplotlib.colorbar as cbar
from matplotlib.axes import Axes, SubplotBase, subplot_class_factory
from matplotlib.blocking_input import BlockingMouseInput, BlockingKeyMouseInput
from matplotlib.legend import Legend
from matplotlib.patches import Rectangle
from matplotlib.projections import (get_projection_names,
process_projection_requirements)
from matplotlib.text import Text, _process_text_args
from matplotlib.transforms import (Affine2D, Bbox, BboxTransformTo,
TransformedBbox)
from matplotlib.backend_bases import NonGuiException
docstring.interpd.update(projection_names=get_projection_names())
class AxesStack(Stack):
"""
Specialization of the Stack to handle all tracking of Axes in a Figure.
This stack stores ``key, (ind, axes)`` pairs, where:
* **key** should be a hash of the args and kwargs
used in generating the Axes.
* **ind** is a serial number for tracking the order
in which axes were added.
The AxesStack is a callable, where ``ax_stack()`` returns
the current axes. Alternatively the :meth:`current_key_axes` will
return the current key and associated axes.
"""
def __init__(self):
Stack.__init__(self)
self._ind = 0
def as_list(self):
"""
Return a list of the Axes instances that have been added to the figure
"""
ia_list = [a for k, a in self._elements]
ia_list.sort()
return [a for i, a in ia_list]
def get(self, key):
"""
Return the Axes instance that was added with *key*.
If it is not present, return None.
"""
item = dict(self._elements).get(key)
if item is None:
return None
return item[1]
def _entry_from_axes(self, e):
ind, k = dict([(a, (ind, k)) for (k, (ind, a)) in self._elements])[e]
return (k, (ind, e))
def remove(self, a):
"""Remove the axes from the stack."""
Stack.remove(self, self._entry_from_axes(a))
def bubble(self, a):
"""
Move the given axes, which must already exist in the
stack, to the top.
"""
return Stack.bubble(self, self._entry_from_axes(a))
def add(self, key, a):
"""
Add Axes *a*, with key *key*, to the stack, and return the stack.
If *a* is already on the stack, don't add it again, but
return *None*.
"""
# All the error checking may be unnecessary; but this method
# is called so seldom that the overhead is negligible.
if not isinstance(a, Axes):
raise ValueError("second argument, %s, is not an Axes" % a)
try:
hash(key)
except TypeError:
raise ValueError("first argument, %s, is not a valid key" % key)
a_existing = self.get(key)
if a_existing is not None:
Stack.remove(self, (key, a_existing))
warnings.warn(
"key %s already existed; Axes is being replaced" % key)
# I don't think the above should ever happen.
if a in self:
return None
self._ind += 1
return Stack.push(self, (key, (self._ind, a)))
def current_key_axes(self):
"""
Return a tuple of ``(key, axes)`` for the active axes.
If no axes exists on the stack, then returns ``(None, None)``.
"""
if not len(self._elements):
return self._default, self._default
else:
key, (index, axes) = self._elements[self._pos]
return key, axes
def __call__(self):
return self.current_key_axes()[1]
def __contains__(self, a):
return a in self.as_list()
class SubplotParams:
"""
A class to hold the parameters for a subplot
"""
def __init__(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
All dimensions are fraction of the figure width or height.
All values default to their rc params
The following attributes are available
*left* : 0.125
The left side of the subplots of the figure
*right* : 0.9
The right side of the subplots of the figure
*bottom* : 0.1
The bottom of the subplots of the figure
*top* : 0.9
The top of the subplots of the figure
*wspace* : 0.2
The amount of width reserved for blank space between subplots
*hspace* : 0.2
The amount of height reserved for white space between subplots
"""
self.validate = True
self.update(left, bottom, right, top, wspace, hspace)
def update(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Update the current values. If any kwarg is None, default to
the current value, if set, otherwise to rc
"""
thisleft = getattr(self, 'left', None)
thisright = getattr(self, 'right', None)
thistop = getattr(self, 'top', None)
thisbottom = getattr(self, 'bottom', None)
thiswspace = getattr(self, 'wspace', None)
thishspace = getattr(self, 'hspace', None)
self._update_this('left', left)
self._update_this('right', right)
self._update_this('bottom', bottom)
self._update_this('top', top)
self._update_this('wspace', wspace)
self._update_this('hspace', hspace)
def reset():
self.left = thisleft
self.right = thisright
self.top = thistop
self.bottom = thisbottom
self.wspace = thiswspace
self.hspace = thishspace
if self.validate:
if self.left >= self.right:
reset()
raise ValueError('left cannot be >= right')
if self.bottom >= self.top:
reset()
raise ValueError('bottom cannot be >= top')
def _update_this(self, s, val):
if val is None:
val = getattr(self, s, None)
if val is None:
key = 'figure.subplot.' + s
val = rcParams[key]
setattr(self, s, val)
class Figure(Artist):
"""
The Figure instance supports callbacks through a *callbacks*
attribute which is a :class:`matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'dpi_changed', and
the callback will be called with ``func(fig)`` where fig is the
:class:`Figure` instance.
*patch*
The figure patch is drawn by a
:class:`matplotlib.patches.Rectangle` instance
*suppressComposite*
For multiple figure images, the figure will make composite
images depending on the renderer option_image_nocomposite
function. If suppressComposite is True|False, this will
override the renderer.
"""
def __str__(self):
return "Figure(%gx%g)" % tuple(self.bbox.size)
def __init__(self,
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
linewidth=0.0, # the default linewidth of the frame
frameon=None, # whether or not to draw the figure frame
subplotpars=None, # default to rc
tight_layout=None, # default to rc figure.autolayout
):
"""
*figsize*
w,h tuple in inches
*dpi*
Dots per inch
*facecolor*
The figure patch facecolor; defaults to rc ``figure.facecolor``
*edgecolor*
The figure patch edge color; defaults to rc ``figure.edgecolor``
*linewidth*
The figure patch edge linewidth; the default linewidth of the frame
*frameon*
If *False*, suppress drawing the figure frame
*subplotpars*
A :class:`SubplotParams` instance, defaults to rc
*tight_layout*
If *False* use *subplotpars*; if *True* adjust subplot
parameters using :meth:`tight_layout` with default padding.
When providing a dict containing the keys `pad`, `w_pad`, `h_pad`
and `rect`, the default :meth:`tight_layout` paddings will be
overridden.
Defaults to rc ``figure.autolayout``.
"""
Artist.__init__(self)
self.callbacks = cbook.CallbackRegistry()
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
if frameon is None:
frameon = rcParams['figure.frameon']
self.dpi_scale_trans = Affine2D()
self.dpi = dpi
self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)
self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)
self.frameon = frameon
self.transFigure = BboxTransformTo(self.bbox)
# the figurePatch name is deprecated
self.patch = self.figurePatch = Rectangle(
xy=(0, 0), width=1, height=1,
facecolor=facecolor, edgecolor=edgecolor,
linewidth=linewidth)
self._set_artist_props(self.patch)
self.patch.set_aa(False)
self._hold = rcParams['axes.hold']
self.canvas = None
self._suptitle = None
if subplotpars is None:
subplotpars = SubplotParams()
self.subplotpars = subplotpars
self.set_tight_layout(tight_layout)
self._axstack = AxesStack() # track all figure axes and current axes
self.clf()
self._cachedRenderer = None
# TODO: I'd like to dynamically add the _repr_html_ method
# to the figure in the right context, but then IPython doesn't
# use it, for some reason.
def _repr_html_(self):
# We can't use "isinstance" here, because then we'd end up importing
# webagg unconditiionally.
if (self.canvas is not None and
'WebAgg' in self.canvas.__class__.__name__):
from matplotlib.backends import backend_webagg
return backend_webagg.ipython_inline_display(self)
def show(self, warn=True):
"""
If using a GUI backend with pyplot, display the figure window.
If the figure was not created using
:func:`~matplotlib.pyplot.figure`, it will lack a
:class:`~matplotlib.backend_bases.FigureManagerBase`, and
will raise an AttributeError.
For non-GUI backends, this does nothing, in which case
a warning will be issued if *warn* is True (default).
"""
try:
manager = getattr(self.canvas, 'manager')
except AttributeError as err:
raise AttributeError("%s\n"
"Figure.show works only "
"for figures managed by pyplot, normally "
"created by pyplot.figure()." % err)
if manager is not None:
try:
manager.show()
return
except NonGuiException:
pass
if warn:
import warnings
warnings.warn(
"matplotlib is currently using a non-GUI backend, "
"so cannot show the figure")
def _get_axes(self):
return self._axstack.as_list()
axes = property(fget=_get_axes, doc="Read-only: list of axes in Figure")
def _get_dpi(self):
return self._dpi
def _set_dpi(self, dpi):
self._dpi = dpi
self.dpi_scale_trans.clear().scale(dpi, dpi)
self.callbacks.process('dpi_changed', self)
dpi = property(_get_dpi, _set_dpi)
def get_tight_layout(self):
"""
Return the Boolean flag, True to use :meth`tight_layout` when drawing.
"""
return self._tight
def set_tight_layout(self, tight):
"""
Set whether :meth:`tight_layout` is used upon drawing.
If None, the rcParams['figure.autolayout'] value will be set.
When providing a dict containing the keys `pad`, `w_pad`, `h_pad`
and `rect`, the default :meth:`tight_layout` paddings will be
overridden.
ACCEPTS: [True | False | dict | None ]
"""
if tight is None:
tight = rcParams['figure.autolayout']
self._tight = bool(tight)
self._tight_parameters = tight if isinstance(tight, dict) else {}
def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right'):
"""
Date ticklabels often overlap, so it is useful to rotate them
and right align them. Also, a common use case is a number of
subplots with shared xaxes where the x-axis is date data. The
ticklabels are often long, and it helps to rotate them on the
bottom subplot and turn them off on other subplots, as well as
turn off xlabels.
*bottom*
The bottom of the subplots for :meth:`subplots_adjust`
*rotation*
The rotation of the xtick labels
*ha*
The horizontal alignment of the xticklabels
"""
allsubplots = np.alltrue([hasattr(ax, 'is_last_row') for ax
in self.axes])
if len(self.axes) == 1:
for label in self.axes[0].get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
else:
if allsubplots:
for ax in self.get_axes():
if ax.is_last_row():
for label in ax.get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
else:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.set_xlabel('')
if allsubplots:
self.subplots_adjust(bottom=bottom)
def get_children(self):
'get a list of artists contained in the figure'
children = [self.patch]
children.extend(self.artists)
children.extend(self.axes)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.images)
children.extend(self.legends)
return children
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the figure.
Returns True,{}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
# inside = mouseevent.x >= 0 and mouseevent.y >= 0
inside = self.bbox.contains(mouseevent.x, mouseevent.y)
return inside, {}
def get_window_extent(self, *args, **kwargs):
'get the figure bounding box in display space; kwargs are void'
return self.bbox
def suptitle(self, t, **kwargs):
"""
Add a centered title to the figure.
kwargs are :class:`matplotlib.text.Text` properties. Using figure
coordinates, the defaults are:
*x* : 0.5
The x location of the text in figure coords
*y* : 0.98
The y location of the text in figure coords
*horizontalalignment* : 'center'
The horizontal alignment of the text
*verticalalignment* : 'top'
The vertical alignment of the text
A :class:`matplotlib.text.Text` instance is returned.
Example::
fig.suptitle('this is the figure title', fontsize=12)
"""
x = kwargs.pop('x', 0.5)
y = kwargs.pop('y', 0.98)
if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):
kwargs['horizontalalignment'] = 'center'
if ('verticalalignment' not in kwargs) and ('va' not in kwargs):
kwargs['verticalalignment'] = 'top'
sup = self.text(x, y, t, **kwargs)
if self._suptitle is not None:
self._suptitle.set_text(t)
self._suptitle.set_position((x, y))
self._suptitle.update_from(sup)
sup.remove()
else:
self._suptitle = sup
return self._suptitle
def set_canvas(self, canvas):
"""
Set the canvas the contains the figure
ACCEPTS: a FigureCanvas instance
"""
self.canvas = canvas
def hold(self, b=None):
"""
Set the hold state. If hold is None (default), toggle the
hold state. Else set the hold state to boolean value b.
e.g.::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def figimage(self, X,
xo=0,
yo=0,
alpha=None,
norm=None,
cmap=None,
vmin=None,
vmax=None,
origin=None,
**kwargs):
"""
Adds a non-resampled image to the figure.
call signatures::
figimage(X, **kwargs)
adds a non-resampled array *X* to the figure.
::
figimage(X, xo, yo)
with pixel offsets *xo*, *yo*,
*X* must be a float array:
* If *X* is MxN, assume luminance (grayscale)
* If *X* is MxNx3, assume RGB
* If *X* is MxNx4, assume RGBA
Optional keyword arguments:
========= =========================================================
Keyword Description
========= =========================================================
xo or yo An integer, the *x* and *y* image offset in pixels
cmap a :class:`matplotlib.colors.Colormap` instance, e.g.,
cm.jet. If *None*, default to the rc ``image.cmap``
value
norm a :class:`matplotlib.colors.Normalize` instance. The
default is normalization(). This scales luminance -> 0-1
vmin|vmax are used to scale a luminance image to 0-1. If either
is *None*, the min and max of the luminance values will
be used. Note if you pass a norm instance, the settings
for *vmin* and *vmax* will be ignored.
alpha the alpha blending value, default is *None*
origin [ 'upper' | 'lower' ] Indicates where the [0,0] index of
the array is in the upper left or lower left corner of
the axes. Defaults to the rc image.origin value
========= =========================================================
figimage complements the axes image
(:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled
to fit the current axes. If you want a resampled image to
fill the entire figure, you can define an
:class:`~matplotlib.axes.Axes` with size [0,1,0,1].
An :class:`matplotlib.image.FigureImage` instance is returned.
.. plot:: mpl_examples/pylab_examples/figimage_demo.py
Additional kwargs are Artist kwargs passed on to
:class:`~matplotlib.image.FigureImage`
"""
if not self._hold:
self.clf()
im = FigureImage(self, cmap, norm, xo, yo, origin, **kwargs)
im.set_array(X)
im.set_alpha(alpha)
if norm is None:
im.set_clim(vmin, vmax)
self.images.append(im)
im._remove_method = lambda h: self.images.remove(h)
return im
def set_size_inches(self, *args, **kwargs):
"""
set_size_inches(w,h, forward=False)
Set the figure size in inches (1in == 2.54cm)
Usage::
fig.set_size_inches(w,h) # OR
fig.set_size_inches((w,h) )
optional kwarg *forward=True* will cause the canvas size to be
automatically updated; e.g., you can resize the figure window
from the shell
ACCEPTS: a w,h tuple with w,h in inches
See Also
--------
matplotlib.Figure.get_size_inches
"""
forward = kwargs.get('forward', False)
if len(args) == 1:
w, h = args[0]
else:
w, h = args
dpival = self.dpi
self.bbox_inches.p1 = w, h
if forward:
dpival = self.dpi
canvasw = w * dpival
canvash = h * dpival
manager = getattr(self.canvas, 'manager', None)
if manager is not None:
manager.resize(int(canvasw), int(canvash))
def get_size_inches(self):
"""
Returns the current size of the figure in inches (1in == 2.54cm)
as an numpy array.
Returns
-------
size : ndarray
The size of the figure in inches
See Also
--------
matplotlib.Figure.set_size_inches
"""
return np.array(self.bbox_inches.p1)
def get_edgecolor(self):
'Get the edge color of the Figure rectangle'
return self.patch.get_edgecolor()
def get_facecolor(self):
'Get the face color of the Figure rectangle'
return self.patch.get_facecolor()
def get_figwidth(self):
'Return the figwidth as a float'
return self.bbox_inches.width
def get_figheight(self):
'Return the figheight as a float'
return self.bbox_inches.height
def get_dpi(self):
'Return the dpi as a float'
return self.dpi
def get_frameon(self):
'get the boolean indicating frameon'
return self.frameon
def set_edgecolor(self, color):
"""
Set the edge color of the Figure rectangle
ACCEPTS: any matplotlib color - see help(colors)
"""
self.patch.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the face color of the Figure rectangle
ACCEPTS: any matplotlib color - see help(colors)
"""
self.patch.set_facecolor(color)
def set_dpi(self, val):
"""
Set the dots-per-inch of the figure
ACCEPTS: float
"""
self.dpi = val
def set_figwidth(self, val):
"""
Set the width of the figure in inches
ACCEPTS: float
"""
self.bbox_inches.x1 = val
def set_figheight(self, val):
"""
Set the height of the figure in inches
ACCEPTS: float
"""
self.bbox_inches.y1 = val
def set_frameon(self, b):
"""
Set whether the figure frame (background) is displayed or invisible
ACCEPTS: boolean
"""
self.frameon = b
def delaxes(self, a):
'remove a from the figure and update the current axes'
self._axstack.remove(a)
for func in self._axobservers:
func(self)
def _make_key(self, *args, **kwargs):
'make a hashable key out of args and kwargs'
def fixitems(items):
#items may have arrays and lists in them, so convert them
# to tuples for the key
ret = []
for k, v in items:
# some objects can define __getitem__ without being
# iterable and in those cases the conversion to tuples
# will fail. So instead of using the iterable(v) function
# we simply try and convert to a tuple, and proceed if not.
try:
v = tuple(v)
except Exception:
pass
ret.append((k, v))
return tuple(ret)
def fixlist(args):
ret = []
for a in args:
if iterable(a):
a = tuple(a)
ret.append(a)
return tuple(ret)
key = fixlist(args), fixitems(six.iteritems(kwargs))
return key
@docstring.dedent_interpd
def add_axes(self, *args, **kwargs):
"""
Add an axes at position *rect* [*left*, *bottom*, *width*,
*height*] where all quantities are in fractions of figure
width and height. kwargs are legal
:class:`~matplotlib.axes.Axes` kwargs plus *projection* which
sets the projection type of the axes. (For backward
compatibility, ``polar=True`` may also be provided, which is
equivalent to ``projection='polar'``). Valid values for
*projection* are: %(projection_names)s. Some of these
projections support additional kwargs, which may be provided
to :meth:`add_axes`. Typical usage::
rect = l,b,w,h
fig.add_axes(rect)
fig.add_axes(rect, frameon=False, axisbg='g')
fig.add_axes(rect, polar=True)
fig.add_axes(rect, projection='polar')
fig.add_axes(ax)
If the figure already has an axes with the same parameters,
then it will simply make that axes current and return it. If
you do not want this behavior, e.g., you want to force the
creation of a new Axes, you must use a unique set of args and
kwargs. The axes :attr:`~matplotlib.axes.Axes.label`
attribute has been exposed for this purpose. e.g., if you want
two axes that are otherwise identical to be added to the
figure, make sure you give them unique labels::
fig.add_axes(rect, label='axes1')
fig.add_axes(rect, label='axes2')
In rare circumstances, add_axes may be called with a single
argument, an Axes instance already created in the present
figure but not in the figure's list of axes. For example,
if an axes has been removed with :meth:`delaxes`, it can
be restored with::
fig.add_axes(ax)
In all cases, the :class:`~matplotlib.axes.Axes` instance
will be returned.
In addition to *projection*, the following kwargs are supported:
%(Axes)s
"""
if not len(args):
return
# shortcut the projection "key" modifications later on, if an axes
# with the exact args/kwargs exists, return it immediately.
key = self._make_key(*args, **kwargs)
ax = self._axstack.get(key)
if ax is not None:
self.sca(ax)
return ax
if isinstance(args[0], Axes):
a = args[0]
assert(a.get_figure() is self)
else:
rect = args[0]
projection_class, kwargs, key = process_projection_requirements(
self, *args, **kwargs)
# check that an axes of this type doesn't already exist, if it
# does, set it as active and return it
ax = self._axstack.get(key)
if ax is not None and isinstance(ax, projection_class):
self.sca(ax)
return ax
# create the new axes using the axes class given
a = projection_class(self, rect, **kwargs)
self._axstack.add(key, a)
self.sca(a)
return a
@docstring.dedent_interpd
def add_subplot(self, *args, **kwargs):
"""
Add a subplot. Examples::
fig.add_subplot(111)
# equivalent but more general
fig.add_subplot(1,1,1)
# add subplot with red background
fig.add_subplot(212, axisbg='r')
# add a polar subplot
fig.add_subplot(111, projection='polar')
# add Subplot instance sub
fig.add_subplot(sub)
*kwargs* are legal :class:`~matplotlib.axes.Axes` kwargs plus
*projection*, which chooses a projection type for the axes.
(For backward compatibility, *polar=True* may also be
provided, which is equivalent to *projection='polar'*). Valid
values for *projection* are: %(projection_names)s. Some of
these projections
support additional *kwargs*, which may be provided to
:meth:`add_axes`.
The :class:`~matplotlib.axes.Axes` instance will be returned.
If the figure already has a subplot with key (*args*,
*kwargs*) then it will simply make that subplot current and
return it.
.. seealso:: :meth:`~matplotlib.pyplot.subplot` for an
explanation of the args.
The following kwargs are supported:
%(Axes)s
"""
if not len(args):
return
if len(args) == 1 and isinstance(args[0], int):
args = tuple([int(c) for c in str(args[0])])
if len(args) != 3:
raise ValueError("Integer subplot specification must " +
"be a three digit number. " +
"Not {n:d}".format(n=len(args)))
if isinstance(args[0], SubplotBase):
a = args[0]
assert(a.get_figure() is self)
# make a key for the subplot (which includes the axes object id
# in the hash)
key = self._make_key(*args, **kwargs)
else:
projection_class, kwargs, key = process_projection_requirements(
self, *args, **kwargs)
# try to find the axes with this key in the stack
ax = self._axstack.get(key)
if ax is not None:
if isinstance(ax, projection_class):
# the axes already existed, so set it as active & return
self.sca(ax)
return ax
else:
# Undocumented convenience behavior:
# subplot(111); subplot(111, projection='polar')
# will replace the first with the second.
# Without this, add_subplot would be simpler and
# more similar to add_axes.
self._axstack.remove(ax)
a = subplot_class_factory(projection_class)(self, *args, **kwargs)
self._axstack.add(key, a)
self.sca(a)
return a
def clf(self, keep_observers=False):
"""
Clear the figure.
Set *keep_observers* to True if, for example,
a gui widget is tracking the axes in the figure.
"""
self.suppressComposite = None
self.callbacks = cbook.CallbackRegistry()
for ax in tuple(self.axes): # Iterate over the copy.
ax.cla()
self.delaxes(ax) # removes ax from self._axstack
toolbar = getattr(self.canvas, 'toolbar', None)
if toolbar is not None:
toolbar.update()
self._axstack.clear()
self.artists = []
self.lines = []
self.patches = []
self.texts = []
self.images = []
self.legends = []
if not keep_observers:
self._axobservers = []
self._suptitle = None
def clear(self):
"""
Clear the figure -- synonym for :meth:`clf`.
"""
self.clf()
@allow_rasterization
def draw(self, renderer):
"""
Render the figure using :class:`matplotlib.backend_bases.RendererBase`
instance *renderer*.
"""
# draw the figure bounding box, perhaps none for white figure
if not self.get_visible():
return
renderer.open_group('figure')
if self.get_tight_layout() and self.axes:
try:
self.tight_layout(renderer, **self._tight_parameters)
except ValueError:
pass
# ValueError can occur when resizing a window.
if self.frameon:
self.patch.draw(renderer)
# a list of (zorder, func_to_call, list_of_args)
dsu = []
for a in self.patches:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.lines:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.artists:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
# override the renderer default if self.suppressComposite
# is not None
not_composite = renderer.option_image_nocomposite()
if self.suppressComposite is not None:
not_composite = self.suppressComposite
if (len(self.images) <= 1 or not_composite or
not cbook.allequal([im.origin for im in self.images])):
for a in self.images:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
else:
# make a composite image blending alpha
# list of (_image.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag), im.ox, im.oy, im.get_alpha())
for im in self.images]
im = _image.from_images(self.bbox.height * mag,
self.bbox.width * mag,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
def draw_composite():
gc = renderer.new_gc()
gc.set_clip_rectangle(self.bbox)
gc.set_clip_path(self.get_clip_path())
renderer.draw_image(gc, l, b, im)
gc.restore()
dsu.append((self.images[0].get_zorder(), self.images[0],
draw_composite, []))
# render the axes
for a in self.axes:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
# render the figure text
for a in self.texts:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.legends:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
dsu = [row for row in dsu if not row[1].get_animated()]
dsu.sort(key=itemgetter(0))
for zorder, a, func, args in dsu:
func(*args)
renderer.close_group('figure')
self._cachedRenderer = renderer
self.canvas.draw_event(renderer)
def draw_artist(self, a):
"""
draw :class:`matplotlib.artist.Artist` instance *a* only --
this is available only after the figure is drawn
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def get_axes(self):
return self.axes
def legend(self, handles, labels, *args, **kwargs):
"""
Place a legend in the figure. Labels are a sequence of
strings, handles is a sequence of
:class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances, and loc can be a
string or an integer specifying the legend location
USAGE::
legend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right')
The *loc* location codes are::
'best' : 0, (currently not supported for figure legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
*loc* can also be an (x,y) tuple in figure coords, which
specifies the lower left of the legend box. figure coords are
(0,0) is the left, bottom of the figure and 1,1 is the right,
top.
Keyword arguments:
*prop*: [ *None* | FontProperties | dict ]
A :class:`matplotlib.font_manager.FontProperties`
instance. If *prop* is a dictionary, a new instance will be
created with *prop*. If *None*, use rc settings.
*numpoints*: integer
The number of points in the legend line, default is 4
*scatterpoints*: integer
The number of points in the legend line, default is 4
*scatteryoffsets*: list of floats
a list of yoffsets for scatter symbols in legend
*markerscale*: [ *None* | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*fancybox*: [ *None* | *False* | *True* ]
if *True*, draw a frame with a round fancybox. If *None*, use rc
*shadow*: [ *None* | *False* | *True* ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*ncol* : integer
number of columns. default is 1
*mode* : [ "expand" | *None* ]
if mode is "expand", the legend will be horizontally expanded
to fill the axes area (or *bbox_to_anchor*)
*title* : string
the legend title
Padding and spacing between various elements use following keywords
parameters. The dimensions of these values are given as a fraction
of the fontsize. Values from rcParams will be used if None.
================ ====================================================
Keyword Description
================ ====================================================
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ====================================================
.. Note:: Not all kinds of artist are supported by the legend.
See LINK (FIXME) for details.
**Example:**
.. plot:: mpl_examples/pylab_examples/figlegend_demo.py
"""
l = Legend(self, handles, labels, *args, **kwargs)
self.legends.append(l)
l._remove_method = lambda h: self.legends.remove(h)
return l
@docstring.dedent_interpd
def text(self, x, y, s, *args, **kwargs):
"""
Add text to figure.
Call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text to figure at location *x*, *y* (relative 0-1
coords). See :func:`~matplotlib.pyplot.text` for the meaning
of the other arguments.
kwargs control the :class:`~matplotlib.text.Text` properties:
%(Text)s
"""
override = _process_text_args({}, *args, **kwargs)
t = Text(x=x, y=y, text=s)
t.update(override)
self._set_artist_props(t)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
return t
def _set_artist_props(self, a):
if a != self:
a.set_figure(self)
a.set_transform(self.transFigure)
@docstring.dedent_interpd
def gca(self, **kwargs):
"""
Get the current axes, creating one if necessary
The following kwargs are supported for ensuring the returned axes
adheres to the given projection etc., and for axes creation if
the active axes does not exist:
%(Axes)s
"""
ckey, cax = self._axstack.current_key_axes()
# if there exists an axes on the stack see if it maches
# the desired axes configuration
if cax is not None:
# if no kwargs are given just return the current axes
# this is a convenience for gca() on axes such as polar etc.
if not kwargs:
return cax
# if the user has specified particular projection detail
# then build up a key which can represent this
else:
# we don't want to modify the original kwargs
# so take a copy so that we can do what we like to it
kwargs_copy = kwargs.copy()
projection_class, _, key = process_projection_requirements(
self, **kwargs_copy)
# let the returned axes have any gridspec by removing it from
# the key
ckey = ckey[1:]
key = key[1:]
# if the cax matches this key then return the axes, otherwise
# continue and a new axes will be created
if key == ckey and isinstance(cax, projection_class):
return cax
# no axes found, so create one which spans the figure
return self.add_subplot(1, 1, 1, **kwargs)
def sca(self, a):
'Set the current axes to be a and return a'
self._axstack.bubble(a)
for func in self._axobservers:
func(self)
return a
def _gci(self):
"""
helper for :func:`~matplotlib.pyplot.gci`;
do not use elsewhere.
"""
# Look first for an image in the current Axes:
cax = self._axstack.current_key_axes()[1]
if cax is None:
return None
im = cax._gci()
if im is not None:
return im
# If there is no image in the current Axes, search for
# one in a previously created Axes. Whether this makes
# sense is debatable, but it is the documented behavior.
for ax in reversed(self.axes):
im = ax._gci()
if im is not None:
return im
return None
def __getstate__(self):
state = self.__dict__.copy()
# the axobservers cannot currently be pickled.
# Additionally, the canvas cannot currently be pickled, but this has
# the benefit of meaning that a figure can be detached from one canvas,
# and re-attached to another.
for attr_to_pop in ('_axobservers', 'show',
'canvas', '_cachedRenderer'):
state.pop(attr_to_pop, None)
# add version information to the state
state['__mpl_version__'] = _mpl_version
# check to see if the figure has a manager and whether it is registered
# with pyplot
if getattr(self.canvas, 'manager', None) is not None:
manager = self.canvas.manager
import matplotlib._pylab_helpers
if manager in list(six.itervalues(
matplotlib._pylab_helpers.Gcf.figs)):
state['_restore_to_pylab'] = True
return state
def __setstate__(self, state):
version = state.pop('__mpl_version__')
restore_to_pylab = state.pop('_restore_to_pylab', False)
if version != _mpl_version:
import warnings
warnings.warn("This figure was saved with matplotlib version %s "
"and is unlikely to function correctly." %
(version, ))
self.__dict__ = state
# re-initialise some of the unstored state information
self._axobservers = []
self.canvas = None
if restore_to_pylab:
# lazy import to avoid circularity
import matplotlib.pyplot as plt
import matplotlib._pylab_helpers as pylab_helpers
allnums = plt.get_fignums()
num = max(allnums) + 1 if allnums else 1
mgr = plt._backend_mod.new_figure_manager_given_figure(num, self)
# XXX The following is a copy and paste from pyplot. Consider
# factoring to pylab_helpers
if self.get_label():
mgr.set_window_title(self.get_label())
# make this figure current on button press event
def make_active(event):
pylab_helpers.Gcf.set_active(mgr)
mgr._cidgcf = mgr.canvas.mpl_connect('button_press_event',
make_active)
pylab_helpers.Gcf.set_active(mgr)
self.number = num
plt.draw_if_interactive()
def add_axobserver(self, func):
'whenever the axes state change, ``func(self)`` will be called'
self._axobservers.append(func)
def savefig(self, *args, **kwargs):
"""
Save the current figure.
Call signature::
savefig(fname, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
The output formats available depend on the backend being used.
Arguments:
*fname*:
A string containing a path to a filename, or a Python
file-like object, or possibly some backend-dependent object
such as :class:`~matplotlib.backends.backend_pdf.PdfPages`.
If *format* is *None* and *fname* is a string, the output
format is deduced from the extension of the filename. If
the filename has no extension, the value of the rc parameter
``savefig.format`` is used.
If *fname* is not a string, remember to specify *format* to
ensure that the correct backend is used.
Keyword arguments:
*dpi*: [ *None* | ``scalar > 0`` ]
The resolution in dots per inch. If *None* it will default to
the value ``savefig.dpi`` in the matplotlibrc file.
*facecolor*, *edgecolor*:
the colors of the figure rectangle
*orientation*: [ 'landscape' | 'portrait' ]
not supported on all backends; currently only on postscript output
*papertype*:
One of 'letter', 'legal', 'executive', 'ledger', 'a0' through
'a10', 'b0' through 'b10'. Only supported for postscript
output.
*format*:
One of the file extensions supported by the active
backend. Most backends support png, pdf, ps, eps and svg.
*transparent*:
If *True*, the axes patches will all be transparent; the
figure patch will also be transparent unless facecolor
and/or edgecolor are specified via kwargs.
This is useful, for example, for displaying
a plot on top of a colored background on a web page. The
transparency of these patches will be restored to their
original values upon exit of this function.
*frameon*:
If *True*, the figure patch will be colored, if *False*, the
figure background will be transparent. If not provided, the
rcParam 'savefig.frameon' will be used.
*bbox_inches*:
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure.
*pad_inches*:
Amount of padding around the figure when bbox_inches is
'tight'.
*bbox_extra_artists*:
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
kwargs.setdefault('dpi', rcParams['savefig.dpi'])
frameon = kwargs.pop('frameon', rcParams['savefig.frameon'])
transparent = kwargs.pop('transparent',
rcParams['savefig.transparent'])
if transparent:
kwargs.setdefault('facecolor', 'none')
kwargs.setdefault('edgecolor', 'none')
original_axes_colors = []
for ax in self.axes:
patch = ax.patch
original_axes_colors.append((patch.get_facecolor(),
patch.get_edgecolor()))
patch.set_facecolor('none')
patch.set_edgecolor('none')
else:
kwargs.setdefault('facecolor', rcParams['savefig.facecolor'])
kwargs.setdefault('edgecolor', rcParams['savefig.edgecolor'])
if frameon:
original_frameon = self.get_frameon()
self.set_frameon(frameon)
self.canvas.print_figure(*args, **kwargs)
if frameon:
self.set_frameon(original_frameon)
if transparent:
for ax, cc in zip(self.axes, original_axes_colors):
ax.patch.set_facecolor(cc[0])
ax.patch.set_edgecolor(cc[1])
@docstring.dedent_interpd
def colorbar(self, mappable, cax=None, ax=None, use_gridspec=True, **kw):
"""
Create a colorbar for a ScalarMappable instance, *mappable*.
Documentation for the pylab thin wrapper:
%(colorbar_doc)s
"""
if ax is None:
ax = self.gca()
# Store the value of gca so that we can set it back later on.
current_ax = self.gca()
if cax is None:
if use_gridspec and isinstance(ax, SubplotBase):
cax, kw = cbar.make_axes_gridspec(ax, **kw)
else:
cax, kw = cbar.make_axes(ax, **kw)
cax.hold(True)
cb = cbar.colorbar_factory(cax, mappable, **kw)
self.sca(current_ax)
return cb
def subplots_adjust(self, *args, **kwargs):
"""
Call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Update the :class:`SubplotParams` with *kwargs* (defaulting to rc when
*None*) and update the subplot locations
"""
self.subplotpars.update(*args, **kwargs)
for ax in self.axes:
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if (ax._sharex is not None and
isinstance(ax._sharex, SubplotBase)):
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif (ax._sharey is not None and
isinstance(ax._sharey, SubplotBase)):
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ax.update_params()
ax.set_position(ax.figbox)
def ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=1,
mouse_pop=3, mouse_stop=2):
"""
Call signature::
ginput(self, n=1, timeout=30, show_clicks=True,
mouse_add=1, mouse_pop=3, mouse_stop=2)
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is zero or negative, does not timeout.
If *n* is zero or negative, accumulate clicks until a middle click
(or potentially both mouse buttons at once) terminates the input.
Right clicking cancels last input.
The buttons used for the various actions (adding points, removing
points, terminating the inputs) can be overriden via the
arguments *mouse_add*, *mouse_pop* and *mouse_stop*, that give
the associated mouse button: 1 for left, 2 for middle, 3 for
right.
The keyboard can also be used to select points in case your mouse
does not have one or more of the buttons. The delete and backspace
keys act like right clicking (i.e., remove last point), the enter key
terminates input and any other key (not already used by the window
manager) selects a point.
"""
blocking_mouse_input = BlockingMouseInput(self,
mouse_add=mouse_add,
mouse_pop=mouse_pop,
mouse_stop=mouse_stop)
return blocking_mouse_input(n=n, timeout=timeout,
show_clicks=show_clicks)
def waitforbuttonpress(self, timeout=-1):
"""
Call signature::
waitforbuttonpress(self, timeout=-1)
Blocking call to interact with the figure.
This will return True is a key was pressed, False if a mouse
button was pressed and None if *timeout* was reached without
either being pressed.
If *timeout* is negative, does not timeout.
"""
blocking_input = BlockingKeyMouseInput(self)
return blocking_input(timeout=timeout)
def get_default_bbox_extra_artists(self):
bbox_artists = [artist for artist in self.get_children()
if artist.get_visible()]
for ax in self.axes:
if ax.get_visible():
bbox_artists.extend(ax.get_default_bbox_extra_artists())
# we don't want the figure's patch to influence the bbox calculation
bbox_artists.remove(self.patch)
return bbox_artists
def get_tightbbox(self, renderer):
"""
Return a (tight) bounding box of the figure in inches.
It only accounts axes title, axis labels, and axis
ticklabels. Needs improvement.
"""
bb = []
for ax in self.axes:
if ax.get_visible():
bb.append(ax.get_tightbbox(renderer))
if len(bb) == 0:
return self.bbox_inches
_bbox = Bbox.union([b for b in bb if b.width != 0 or b.height != 0])
bbox_inches = TransformedBbox(_bbox,
Affine2D().scale(1. / self.dpi))
return bbox_inches
def tight_layout(self, renderer=None, pad=1.08, h_pad=None,
w_pad=None, rect=None):
"""
Adjust subplot parameters to give specified padding.
Parameters:
*pad* : float
padding between the figure edge and the edges of subplots,
as a fraction of the font-size.
*h_pad*, *w_pad* : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
*rect* : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
from .tight_layout import (get_renderer, get_tight_layout_figure,
get_subplotspec_list)
subplotspec_list = get_subplotspec_list(self.axes)
if None in subplotspec_list:
warnings.warn("This figure includes Axes that are not "
"compatible with tight_layout, so its "
"results might be incorrect.")
if renderer is None:
renderer = get_renderer(self)
kwargs = get_tight_layout_figure(self, self.axes, subplotspec_list,
renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=rect)
self.subplots_adjust(**kwargs)
def figaspect(arg):
"""
Create a figure with specified aspect ratio. If *arg* is a number,
use that aspect ratio. If *arg* is an array, figaspect will
determine the width and height for a figure that would fit array
preserving aspect ratio. The figure width, height in inches are
returned. Be sure to create an axes with equal with and height,
e.g.,
Example usage::
# make a figure twice as tall as it is wide
w, h = figaspect(2.)
fig = Figure(figsize=(w,h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
# make a figure with the proper aspect for an array
A = rand(5,3)
w, h = figaspect(A)
fig = Figure(figsize=(w,h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
Thanks to Fernando Perez for this function
"""
isarray = hasattr(arg, 'shape')
# min/max sizes to respect when autoscaling. If John likes the idea, they
# could become rc parameters, for now they're hardwired.
figsize_min = np.array((4.0, 2.0)) # min length for width/height
figsize_max = np.array((16.0, 16.0)) # max length for width/height
#figsize_min = rcParams['figure.figsize_min']
#figsize_max = rcParams['figure.figsize_max']
# Extract the aspect ratio of the array
if isarray:
nr, nc = arg.shape[:2]
arr_ratio = float(nr) / nc
else:
arr_ratio = float(arg)
# Height of user figure defaults
fig_height = rcParams['figure.figsize'][1]
# New size for the figure, keeping the aspect ratio of the caller
newsize = np.array((fig_height / arr_ratio, fig_height))
# Sanity checks, don't drop either dimension below figsize_min
newsize /= min(1.0, *(newsize / figsize_min))
# Avoid humongous windows as well
newsize /= max(1.0, *(newsize / figsize_max))
# Finally, if we have a really funky aspect ratio, break it but respect
# the min/max dimensions (we don't want figures 10 feet tall!)
newsize = np.clip(newsize, figsize_min, figsize_max)
return newsize
docstring.interpd.update(Figure=martist.kwdoc(Figure))
| {
"content_hash": "08d9ff2999649658c9e7ef1251af2248",
"timestamp": "",
"source": "github",
"line_count": 1729,
"max_line_length": 79,
"avg_line_length": 33.96124927703875,
"alnum_prop": 0.5584393467191199,
"repo_name": "daodaoliang/neural-network-animation",
"id": "0243efaa34553c35c62015a8cd6574280047c098",
"size": "58719",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "matplotlib/figure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "HTML",
"bytes": "4000"
},
{
"name": "JavaScript",
"bytes": "24260"
},
{
"name": "Python",
"bytes": "4443606"
}
],
"symlink_target": ""
} |
import asyncio
import logging
from datetime import datetime
from aiohttp import web
from aiocache import cached
from aiocache.serializers import JsonSerializer
@cached(key="function_key", serializer=JsonSerializer())
async def time():
return {"time": datetime.now().isoformat()}
async def handle(request):
return web.json_response(await time())
# It is also possible to cache the whole route, but for this you will need to
# override `cached.get_from_cache` and regenerate the response since aiohttp
# forbids reusing responses
class CachedOverride(cached):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def get_from_cache(self, key):
try:
value = await self.cache.get(key)
if type(value) == web.Response:
return web.Response(
body=value.body,
status=value.status,
reason=value.reason,
headers=value.headers,
)
return value
except Exception:
logging.exception("Couldn't retrieve %s, unexpected error", key)
@CachedOverride(key="route_key", serializer=JsonSerializer())
async def handle2(request):
return web.json_response(await asyncio.sleep(3))
if __name__ == "__main__":
app = web.Application()
app.router.add_get('/handle', handle)
app.router.add_get('/handle2', handle2)
web.run_app(app)
| {
"content_hash": "72f3e991829fdeefc5c4cf8e9f154823",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 29.1,
"alnum_prop": 0.6364261168384879,
"repo_name": "argaen/aiocache",
"id": "c373f8435a7e979258cb7de4f6c47843c80292ed",
"size": "1455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/frameworks/aiohttp_example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "615"
},
{
"name": "Python",
"bytes": "242174"
},
{
"name": "Shell",
"bytes": "920"
},
{
"name": "Smarty",
"bytes": "239"
}
],
"symlink_target": ""
} |
import pytest
@pytest.fixture(name="sequence0")
def fixture_sequence_0():
return 'NLYIQWLXDGGPSSGRPPPS'
@pytest.fixture(name="sequence1")
def fixture_sequence_1():
return 'NLYIQWLKDGGPSSGRPPPS'
@pytest.fixture(name="sequence2")
def fixture_sequence_2():
return 'MALPVTALLLPLALLLHAARPSQFRVSPLDRTWNLGETVELKCQVLLSNPTSGCSWLFQPRGA'
@pytest.fixture(name="sequence3")
def fixture_sequence_3():
return 'ggctgacggctgggttaagaagtttactgaggcagttaacgcctttaagggtttggactggatc'\
'cgctgccaagttgtccaagttccttgattggatcaaatccaagatcatcccggagctcagagag'\
'agagcggagtttgttaagaatcttaggcagcttcctcttctcgaggcccagatcaccactttgg'\
'agcactccaaccctaatcaggagacccaagaacagcttttctcgaacgtccaatacctggcaca'\
'ccactgtaggaagaacgctccgctctatgcagcggaagcccggagagttttcgcactagagaaa'
| {
"content_hash": "144639b3013c5c969bd389ca348ad1e2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 78,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.79875,
"repo_name": "ecolell/amphipathic",
"id": "0abd9ebb8963d1d26d35435f5e38de798a1df5df",
"size": "800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/fixtures/sequences.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "BASIC",
"bytes": "6957"
},
{
"name": "Makefile",
"bytes": "404"
},
{
"name": "Python",
"bytes": "18826"
}
],
"symlink_target": ""
} |
import copy
import time
import uuid
from dogpile.cache import api
from dogpile.cache import proxy
import mock
from oslo_config import cfg
from keystone.common import cache
from keystone import exception
from keystone.tests import unit
CONF = cfg.CONF
NO_VALUE = api.NO_VALUE
def _copy_value(value):
if value is not NO_VALUE:
value = copy.deepcopy(value)
return value
# NOTE(morganfainberg): WARNING - It is not recommended to use the Memory
# backend for dogpile.cache in a real deployment under any circumstances. The
# backend does no cleanup of expired values and therefore will leak memory. The
# backend is not implemented in a way to share data across processes (e.g.
# Keystone in HTTPD. This proxy is a hack to get around the lack of isolation
# of values in memory. Currently it blindly stores and retrieves the values
# from the cache, and modifications to dicts/lists/etc returned can result in
# changes to the cached values. In short, do not use the dogpile.cache.memory
# backend unless you are running tests or expecting odd/strange results.
class CacheIsolatingProxy(proxy.ProxyBackend):
"""Proxy that forces a memory copy of stored values.
The default in-memory cache-region does not perform a copy on values it is
meant to cache. Therefore if the value is modified after set or after get,
the cached value also is modified. This proxy does a copy as the last
thing before storing data.
"""
def get(self, key):
return _copy_value(self.proxied.get(key))
def set(self, key, value):
self.proxied.set(key, _copy_value(value))
class TestProxy(proxy.ProxyBackend):
def get(self, key):
value = _copy_value(self.proxied.get(key))
if value is not NO_VALUE:
if isinstance(value[0], TestProxyValue):
value[0].cached = True
return value
class TestProxyValue(object):
def __init__(self, value):
self.value = value
self.cached = False
class CacheRegionTest(unit.TestCase):
def setUp(self):
super(CacheRegionTest, self).setUp()
self.region = cache.make_region()
cache.configure_cache_region(self.region)
self.region.wrap(TestProxy)
self.test_value = TestProxyValue('Decorator Test')
def _add_test_caching_option(self):
self.config_fixture.register_opt(
cfg.BoolOpt('caching', default=True), group='cache')
def _get_cacheable_function(self):
with mock.patch.object(cache.REGION, 'cache_on_arguments',
self.region.cache_on_arguments):
memoize = cache.get_memoization_decorator(section='cache')
@memoize
def cacheable_function(value):
return value
return cacheable_function
def test_region_built_with_proxy_direct_cache_test(self):
# Verify cache regions are properly built with proxies.
test_value = TestProxyValue('Direct Cache Test')
self.region.set('cache_test', test_value)
cached_value = self.region.get('cache_test')
self.assertTrue(cached_value.cached)
def test_cache_region_no_error_multiple_config(self):
# Verify configuring the CacheRegion again doesn't error.
cache.configure_cache_region(self.region)
cache.configure_cache_region(self.region)
def _get_cache_fallthrough_fn(self, cache_time):
with mock.patch.object(cache.REGION, 'cache_on_arguments',
self.region.cache_on_arguments):
memoize = cache.get_memoization_decorator(
section='cache',
expiration_section='assignment')
class _test_obj(object):
def __init__(self, value):
self.test_value = value
@memoize
def get_test_value(self):
return self.test_value
def _do_test(value):
test_obj = _test_obj(value)
# Ensure the value has been cached
test_obj.get_test_value()
# Get the now cached value
cached_value = test_obj.get_test_value()
self.assertTrue(cached_value.cached)
self.assertEqual(value.value, cached_value.value)
self.assertEqual(cached_value.value, test_obj.test_value.value)
# Change the underlying value on the test object.
test_obj.test_value = TestProxyValue(uuid.uuid4().hex)
self.assertEqual(cached_value.value,
test_obj.get_test_value().value)
# override the system time to ensure the non-cached new value
# is returned
new_time = time.time() + (cache_time * 2)
with mock.patch.object(time, 'time',
return_value=new_time):
overriden_cache_value = test_obj.get_test_value()
self.assertNotEqual(cached_value.value,
overriden_cache_value.value)
self.assertEqual(test_obj.test_value.value,
overriden_cache_value.value)
return _do_test
def test_cache_no_fallthrough_expiration_time_fn(self):
# Since we do not re-configure the cache region, for ease of testing
# this value is set the same as the expiration_time default in the
# [cache] section
cache_time = 600
expiration_time = cache.get_expiration_time_fn('role')
do_test = self._get_cache_fallthrough_fn(cache_time)
# Run the test with the assignment cache_time value
self.config_fixture.config(cache_time=cache_time,
group='role')
test_value = TestProxyValue(uuid.uuid4().hex)
self.assertEqual(cache_time, expiration_time())
do_test(value=test_value)
def test_cache_fallthrough_expiration_time_fn(self):
# Since we do not re-configure the cache region, for ease of testing
# this value is set the same as the expiration_time default in the
# [cache] section
cache_time = 599
expiration_time = cache.get_expiration_time_fn('role')
do_test = self._get_cache_fallthrough_fn(cache_time)
# Run the test with the assignment cache_time value set to None and
# the global value set.
self.config_fixture.config(cache_time=None, group='role')
test_value = TestProxyValue(uuid.uuid4().hex)
self.assertIsNone(expiration_time())
do_test(value=test_value)
def test_should_cache_fn_global_cache_enabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally enabled.
cacheable_function = self._get_cacheable_function()
self.config_fixture.config(group='cache', enabled=True)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertTrue(cached_value.cached)
def test_should_cache_fn_global_cache_disabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally disabled.
cacheable_function = self._get_cacheable_function()
self.config_fixture.config(group='cache', enabled=False)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertFalse(cached_value.cached)
def test_should_cache_fn_global_cache_disabled_section_cache_enabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally disabled and the specific
# section caching enabled.
cacheable_function = self._get_cacheable_function()
self._add_test_caching_option()
self.config_fixture.config(group='cache', enabled=False)
self.config_fixture.config(group='cache', caching=True)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertFalse(cached_value.cached)
def test_should_cache_fn_global_cache_enabled_section_cache_disabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally enabled and the specific
# section caching disabled.
cacheable_function = self._get_cacheable_function()
self._add_test_caching_option()
self.config_fixture.config(group='cache', enabled=True)
self.config_fixture.config(group='cache', caching=False)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertFalse(cached_value.cached)
def test_should_cache_fn_global_cache_enabled_section_cache_enabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally enabled and the specific
# section caching enabled.
cacheable_function = self._get_cacheable_function()
self._add_test_caching_option()
self.config_fixture.config(group='cache', enabled=True)
self.config_fixture.config(group='cache', caching=True)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertTrue(cached_value.cached)
def test_cache_dictionary_config_builder(self):
"""Validate we build a sane dogpile.cache dictionary config."""
self.config_fixture.config(group='cache',
config_prefix='test_prefix',
backend='some_test_backend',
expiration_time=86400,
backend_argument=['arg1:test',
'arg2:test:test',
'arg3.invalid'])
config_dict = cache.build_cache_config()
self.assertEqual(
CONF.cache.backend, config_dict['test_prefix.backend'])
self.assertEqual(
CONF.cache.expiration_time,
config_dict['test_prefix.expiration_time'])
self.assertEqual('test', config_dict['test_prefix.arguments.arg1'])
self.assertEqual('test:test',
config_dict['test_prefix.arguments.arg2'])
self.assertNotIn('test_prefix.arguments.arg3', config_dict)
def test_cache_debug_proxy(self):
single_value = 'Test Value'
single_key = 'testkey'
multi_values = {'key1': 1, 'key2': 2, 'key3': 3}
self.region.set(single_key, single_value)
self.assertEqual(single_value, self.region.get(single_key))
self.region.delete(single_key)
self.assertEqual(NO_VALUE, self.region.get(single_key))
self.region.set_multi(multi_values)
cached_values = self.region.get_multi(multi_values.keys())
for value in multi_values.values():
self.assertIn(value, cached_values)
self.assertEqual(len(multi_values.values()), len(cached_values))
self.region.delete_multi(multi_values.keys())
for value in self.region.get_multi(multi_values.keys()):
self.assertEqual(NO_VALUE, value)
def test_configure_non_region_object_raises_error(self):
self.assertRaises(exception.ValidationError,
cache.configure_cache_region,
"bogus")
class CacheNoopBackendTest(unit.TestCase):
def setUp(self):
super(CacheNoopBackendTest, self).setUp()
self.region = cache.make_region()
cache.configure_cache_region(self.region)
def config_overrides(self):
super(CacheNoopBackendTest, self).config_overrides()
self.config_fixture.config(group='cache',
backend='keystone.common.cache.noop')
def test_noop_backend(self):
single_value = 'Test Value'
single_key = 'testkey'
multi_values = {'key1': 1, 'key2': 2, 'key3': 3}
self.region.set(single_key, single_value)
self.assertEqual(NO_VALUE, self.region.get(single_key))
self.region.set_multi(multi_values)
cached_values = self.region.get_multi(multi_values.keys())
self.assertEqual(len(cached_values), len(multi_values.values()))
for value in cached_values:
self.assertEqual(NO_VALUE, value)
# Delete should not raise exceptions
self.region.delete(single_key)
self.region.delete_multi(multi_values.keys())
| {
"content_hash": "3df1b27cd1f50e7633d40970d1754af1",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 79,
"avg_line_length": 41.30967741935484,
"alnum_prop": 0.631578947368421,
"repo_name": "idjaw/keystone",
"id": "3c2afe6687b7a861305da6ba7155be56342b5f1e",
"size": "13381",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "keystone/tests/unit/test_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "4015455"
}
],
"symlink_target": ""
} |
print "sara" | {
"content_hash": "85cc44a593ce3cffade72d4efcf75001",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 12,
"avg_line_length": 12,
"alnum_prop": 0.75,
"repo_name": "CodeCatz/litterbox",
"id": "912e084aa282d8829942934ee66eae44d472f06f",
"size": "12",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sara.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "927158"
},
{
"name": "JavaScript",
"bytes": "796463"
},
{
"name": "Python",
"bytes": "192149"
},
{
"name": "Ruby",
"bytes": "54"
}
],
"symlink_target": ""
} |
"""
Data Manager Frame
Copyright (c) 2014, 2015 Andrew Hawkins
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Import Declarations
"""
import sqlite3
import wx
from forsteri.interface import sql as isql
"""
Constant Declarations
"""
ID_HIERARCHY = 45
ID_VARIABLE = 46
"""
Panel Class
"""
class ManagerPanel(wx.Panel):
"""
A panel that contains a combo box selection and a list control. This is
allows for adding, editing, and deleting some variable held in the HDF5
file.
Extends:
wx.Panel
"""
def __init__(self, id, connection, *args, **kwargs):
"""
Initialize the panel.
Args:
parent (wx.Frame): The associated parent for this panel.
id (int): The ID for the panel which defines what will be edited.
This can currently be ID_HIERARCHY or ID_VARIABLE. As the software
expands, so will the possibilities.
connection (sqlite3.Connection): A connection to the database.
Returns:
ManagerPanel
To Do:
Find a better way to handle the IDs.
"""
## Panel
# Initialize by the parent's constructor.
super(ManagerPanel, self).__init__(*args, **kwargs)
# Set the ID for the panel.
self.id = id
# Create the master sizer.
masterSizer = wx.BoxSizer(wx.VERTICAL)
# Create a static connection.
self.connection = connection
## Combo Box
# Get the combo box choices.
choices = self.getChoices()
# Create the label and input box.
self.combo = wx.ComboBox(self, choices=choices,
style=wx.CB_READONLY|wx.CB_SORT)
# Set the initial selection to be the first item.
self.combo.SetSelection(0)
# Bind the combo box selection to a function.
self.combo.Bind(wx.EVT_COMBOBOX, self.updateList)
## List Control
# Create the list control.
self.itemList = wx.ListCtrl(self, size=(-1, 200),
style=wx.BORDER_SUNKEN|wx.LC_REPORT|wx.LC_HRULES|wx.LC_VRULES)
# Add the title column.
self.itemList.InsertColumn(0, "Title", width=500)
# Bind the selection of an item to a function.
self.itemList.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onSelected)
self.itemList.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.onSelected)
self.itemList.Bind(wx.EVT_LEFT_DCLICK, self.onEdit)
## Manipulate Buttons
# Create the manipulate sizer.
manipSizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the buttons.
addButton = wx.Button(self, id=wx.ID_ADD)
self.editButton = wx.Button(self, id=wx.ID_EDIT)
self.deleteButton = wx.Button(self, id=wx.ID_DELETE)
# Add the buttons to the manipulate sizer.
manipSizer.AddMany([addButton, (5, 0), self.editButton, (5, 0),
self.deleteButton])
# Bind button presses to functions.
addButton.Bind(wx.EVT_BUTTON, self.onAdd)
self.editButton.Bind(wx.EVT_BUTTON, self.onEdit)
self.deleteButton.Bind(wx.EVT_BUTTON, self.onDelete)
## Key Bindings
# Create a new id for selecting all.
selectAllId = wx.NewId()
# Bind the id to a function.
self.Bind(wx.EVT_MENU, self.selectAll, id=selectAllId)
# Create a new accelerator table and set it to the frame.
accelT = wx.AcceleratorTable([(wx.ACCEL_CTRL, ord("A"), selectAllId)])
self.SetAcceleratorTable(accelT)
## Frame Operations
# Add everything to the master sizer.
masterSizer.AddSpacer(5)
masterSizer.Add(self.combo, flag=wx.ALIGN_CENTER)
masterSizer.AddSpacer(10)
masterSizer.Add(self.itemList, flag=wx.LEFT|wx.RIGHT|wx.EXPAND,
border=5)
masterSizer.AddSpacer(10)
masterSizer.Add(manipSizer, flag=wx.ALIGN_CENTER)
masterSizer.AddSpacer(5)
# Update the list display.
self.updateList(None)
# Set the sizer for the master panel.
self.SetSizer(masterSizer)
"""
Helper Functions
"""
def getChoices(self):
"""
Get the combo box choices depending on the ID.
Args:
None
Returns:
list of str: The possible choices for the combo box with regard to
the defined ID.
"""
# Pull the possible tiers from the database.
if self.id == ID_HIERARCHY:
return isql.getTiers(self.connection)
else:
return isql.getVariables(self.connection)
"""
Event Handler Functions
"""
def updateList(self, event):
"""
Update the items displayed in the list control.
Args:
event (wx._core.CommandEvent): The triggered event after completing
some action that would alter the items in the list control.
Returns:
None
"""
# Reset the edit and delete buttons.
self.editButton.Enable()
self.deleteButton.SetBackgroundColour(wx.NullColour)
# Get the list for the selected tier.
if self.id == ID_HIERARCHY:
items = isql.getForTier(self.combo.GetStringSelection(),
self.connection)
else:
items = isql.getForVariable(self.combo.GetStringSelection(),
self.connection)
# Sort the items in ascending order.
items.sort()
# Remove all items from the list control.
self.itemList.DeleteAllItems()
# Add the items to the list control.
index = 0
for item in items:
self.itemList.InsertStringItem(index, item)
index += 1
def selectAll(self, event):
"""
What to do when the "Ctrl+A" button combination is entered. Select
all items in the list control.
Args:
event (wx._core.CommandEvent): The triggered event after the button
combo "Ctrl+A" is entered.
Returns:
None
"""
# Iterate through the items in the list selecting each.
for i in range(0, self.itemList.GetItemCount()):
self.itemList.Select(i)
def onSelected(self, event):
"""
What to do when items in the list control are selected. Reset the
color of the buttons if only one item is selected. If multiple items
are selected, disable the edit button and set the delete button to be
yellow.
Args:
event (wx._core.CommandEvent): The triggered event after an item in
the list control is selected.
Returns:
None
"""
# If multiple products are selected, set the edit and delete buttons
# to be yellow.
if self.itemList.GetSelectedItemCount() > 1:
self.editButton.Disable()
self.deleteButton.SetBackgroundColour("Yellow")
# If only one product is selected, set the edit and delete buttons
# to be the default color.
else:
self.editButton.Enable()
self.deleteButton.SetBackgroundColour(wx.NullColour)
def onAdd(self, event):
"""
What to do when the add button is pressed. Open the text entry dialog
and obtain the title of the new item. Write the new item into the HDF5
file.
Args:
event (wx._core.CommandEvent): The triggered event when the add
button is pressed.
Returns:
None
"""
# Create the text entry dialog box.
dialog = wx.TextEntryDialog(self, "What is the name of the item?",
"New Item")
# If OK is not pressed, return false.
if dialog.ShowModal() != wx.ID_OK:
return False
# Get the new item value.
newItem = dialog.GetValue()
# If an empty string is input, return false.
if newItem == "":
return False
# Destroy the dialog box.
dialog.Destroy()
# Add the inputted text to the database.
if self.id == ID_HIERARCHY:
isql.addTitle(self.combo.GetStringSelection(), newItem,
self.connection)
else:
isql.addAlias(self.combo.GetStringSelection(), newItem,
self.connection)
# Update the list.
self.updateList(None)
def onEdit(self, event):
"""
What to do when the edit button is pressed. Open the text entry
dialog and fill it with what the current title is. Obtain the altered
title for the item. Write the altered item into the HDF5 file.
Args:
event (wx._core.CommandEvent): The triggered event when the edit
button is pressed.
Returns:
None
"""
# Get the selected item index from the list.
itemIndex = self.itemList.GetFirstSelected()
# Send an error if nothing is selected.
if itemIndex == -1:
errorDialog = wx.MessageDialog(self, "No item was selected.",
"Error", wx.OK|wx.ICON_ERROR)
errorDialog.ShowModal()
return False
# Get the string value of the old item.
oldItem = self.itemList.GetItemText(itemIndex)
# Create the text entry dialog box.
dialog = wx.TextEntryDialog(self, "What is the name of the item?",
"Edit Item", oldItem)
# If OK is not pressed, return false.
if dialog.ShowModal() != wx.ID_OK:
return False
# Get the new item value.
newItem = dialog.GetValue()
# If an empty string is input or there is no change, return false.
if newItem == "" or newItem == oldItem:
return False
# Destroy the dialog box.
dialog.Destroy()
# Get the selected combo box item.
selection = self.combo.GetStringSelection()
if self.id == ID_HIERARCHY:
# Set the new item in the database.
isql.setTitle(selection, oldItem, newItem, self.connection)
else:
# Set the new item in the database.
isql.setAlias(selection, oldItem, newItem, self.connection)
# Update the list.
self.updateList(None)
def onDelete(self, event):
"""
What to do when the delete button is pressed. Remove the selected item
in the list control from the HDF5 file.
Args:
event (wx._core.CommandEvent): The triggered event when the delete
button is pressed.
Returns:
None
"""
# Get the first selected item index from the list.
itemIndex = self.itemList.GetFirstSelected()
# Send an error if nothing is selected.
if itemIndex == -1:
errorDialog = wx.MessageDialog(self, "No item was selected.",
"Error", wx.OK|wx.ICON_ERROR)
errorDialog.ShowModal()
return False
# Get the number of selected products.
count = self.itemList.GetSelectedItemCount()
# Create the delete confimation dialog box.
confirmDialog = wx.MessageDialog(self,
"You are about to delete " + str(count) +
" item(s). Continue?",
"Delete Confirmation", wx.YES_NO)
# If no is selected, return false.
if confirmDialog.ShowModal() != wx.ID_YES:
return False
# Remove all selected items.
selection = self.combo.GetStringSelection()
for i in range(0, self.itemList.GetSelectedItemCount()):
# Get the item text.
item = self.itemList.GetItemText(itemIndex)
if self.id == ID_HIERARCHY:
# Remove the selected item.
isql.removeTitle(selection, item, self.connection)
else:
# Remove the selected item.
isql.removeAlias(selection, item, self.connection)
# Get the next selected item index.
itemIndex = self.itemList.GetNextSelected(itemIndex)
# Update the list.
self.updateList(None)
"""
Notebook Class
"""
class ManagerNotebook(wx.Notebook):
"""
A notebook that holds the possible panels.
Extends:
wx.Notebook
"""
def __init__(self, connection, *args, **kwargs):
"""
Initialize the notebook.
Args:
*args (): Any arguments to be passed directly to the super's
constructor.
**kwargs (): Any keyword arguments to be passed to the super's
constructor.
Returns:
ManagerNotebook
"""
# Initialize by the parent's constructor.
super(ManagerNotebook, self).__init__(*args, **kwargs)
# Create the hierarchy tab.
hierarchy = ManagerPanel(ID_HIERARCHY, connection, self)
# Add the hierarchy page to the notebook.
self.AddPage(hierarchy, "Hierarchy")
# Create the variable tab.
variable = ManagerPanel(ID_VARIABLE, connection, self)
# Add the manager page to the notebook.
self.AddPage(variable, "Input Variable")
# Bind tab seletion to functions.
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.onPageChange)
"""
Event Handler Functions
"""
def onPageChange(self, event):
"""
What to do when a new page has been selected.
Args:
event (wx._core.CommandEvent): The triggered event when a new page
is selected.
Returns:
None
"""
# Skip the event.
event.Skip()
"""
Frame Class
"""
class ManagerFrame(wx.Frame):
"""
The frame that contains the notebook and all panels. The "OK", "Apply",
and "Cancel" buttons are housed here.
Extends:
wx.Frame
"""
def __init__(self, *args, **kwargs):
"""
Initialize the frame.
Args:
*args (): Any arguments to be passed directly to the super's
constructor.
**kwargs (): Any keyword arguments to be passed to the super's
constructor.
Returns:
ManagerFrame
"""
"""Initialize the frame."""
# Initialize by the parent's constructor.
super(ManagerFrame, self).__init__(*args, **kwargs)
# Create the master panel.
masterPanel = wx.Panel(self)
# Create the master sizer.
masterSizer = wx.BoxSizer(wx.VERTICAL)
# Open a connection to the database.
self.connection = sqlite3.connect(isql.MASTER)
"""Initialize the notebook panel."""
# Create the notebook.
notebook = ManagerNotebook(self.connection, masterPanel)
"""Initialize the finish buttons."""
# Create the finish sizer.
finishSizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the buttons.
okButton = wx.Button(masterPanel, id=wx.ID_OK)
applyButton = wx.Button(masterPanel, id=wx.ID_APPLY)
cancelButton = wx.Button(masterPanel, id=wx.ID_CANCEL)
# Set the OK button to be the dafault button.
okButton.SetDefault()
# Add the buttons to the finish sizer.
finishSizer.AddMany([okButton, (5, 0), applyButton, (5, 0),
cancelButton, (5, 0)])
# Bind button presses to functions.
okButton.Bind(wx.EVT_BUTTON, self.onOK)
applyButton.Bind(wx.EVT_BUTTON, self.onApply)
cancelButton.Bind(wx.EVT_BUTTON, self.onCancel)
"""Final frame operations."""
# Add everything to the master sizer.
masterSizer.Add(notebook, 1, wx.ALL|wx.EXPAND, 5)
masterSizer.Add(finishSizer, flag=wx.ALIGN_RIGHT)
masterSizer.AddSpacer(5)
# Set the sizer for the master panel.
masterPanel.SetSizer(masterSizer)
# Bind closing the frame to a function.
self.Bind(wx.EVT_CLOSE, self.onClose)
# Set window properties.
self.SetSize((600, 400))
self.SetTitle("Data Manager")
self.Centre()
self.Show(True)
"""
Event Handler Functions
"""
def onOK(self, event):
"""
What to do when the OK button has been pressed. Remove the HDF5 copy
file from the filesystem.
Args:
event (wx._core.CommandEvent): The triggered event when a new page
is selected.
Returns:
None
"""
# Commit and close the database.
self.connection.commit()
self.connection.close()
# Close the window.
self.Close()
def onApply(self, event):
"""
What to do when the Apply button has been pressed. Make a new copy of
the HDF5 file in the filesystem.
Args:
event (wx._core.CommandEvent): The triggered event when a new page
is selected.
Returns:
None
"""
# Commit the database.
self.connection.commit()
def onCancel(self, event):
"""
What to do when the Cancel button has been pressed. Replace the HDF5
file with the copy made in the filesystem.
Args:
event (wx._core.CommandEvent): The triggered event when a new page
is selected.
Returns:
None
"""
# Close the database.
self.connection.close()
# Close the window.
self.Close()
def onClose(self, event):
"""
What to do when the frame is closed. If a copy still exists, replace
the HDF5 file with the copy in the filesystem.
Args:
event (wx._core.CommandEvent): The triggered event when a new page
is selected.
Returns:
None
"""
# Close the database.
self.connection.close()
# Destroy the window.
self.Destroy()
def main():
"""
When the file is called independently create and display the manager frame.
"""
app = wx.App()
ManagerFrame(None, title="Data Manager", size=(600, 400),
style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER)
app.MainLoop()
if __name__ == '__main__':
main()
| {
"content_hash": "02a4cba9fd049b5022adc9298cc4f016",
"timestamp": "",
"source": "github",
"line_count": 646,
"max_line_length": 79,
"avg_line_length": 29.69969040247678,
"alnum_prop": 0.6000208485353904,
"repo_name": "achawkins/Forsteri",
"id": "c7370e3cbb896168543fb1caf94589e6b9622f6d",
"size": "19205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forsteri/gui/window/data_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "33"
},
{
"name": "Python",
"bytes": "245733"
}
],
"symlink_target": ""
} |
__author__ = 'ghost'
import os
DEBUG = True
APPDIR = os.path.abspath(os.path.dirname(__file__))
STATIC = os.path.join(APPDIR, 'static')
AVATAR = os.path.join(STATIC, 'avatar')
TEMPLATES = os.path.join(APPDIR, 'templates')
SECRET_KEY = 'you-will-never-guess'
| {
"content_hash": "65a3c99cf2ee91a6f9a4f318bdccd16e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 51,
"avg_line_length": 21.833333333333332,
"alnum_prop": 0.6870229007633588,
"repo_name": "rsj217/flask--scaffold",
"id": "6df44538dcd24b746d453e0024b6cbb34f7bfbed",
"size": "308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avatar/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10129"
},
{
"name": "Python",
"bytes": "35656"
}
],
"symlink_target": ""
} |
"""Tests for grr.parsers.sqlite_file."""
import os
import StringIO
from grr.lib import flags
from grr.lib import test_lib
from grr.parsers import sqlite_file
class SQLiteFileTest(test_lib.GRRBaseTest):
"""Test parsing of sqlite database files."""
query = "SELECT * FROM moz_places;"
def testErrors(self):
"""Test empty files don't raise errors."""
database_file = sqlite_file.SQLiteFile(StringIO.StringIO())
entries = [x for x in database_file.Query(self.query)]
self.assertEqual(len(entries), 0)
# The places.sqlite contains 92 rows in table moz_places
def testTmpFiles(self):
"""This should force a write to a tmp file."""
filename = os.path.join(self.base_path, "places.sqlite")
file_stream = StringIO.StringIO(open(filename).read())
database_file = sqlite_file.SQLiteFile(file_stream)
entries = [x for x in database_file.Query(self.query)]
self.assertEqual(len(entries), 92)
# Test the tempfile is deleted
self.assertEqual(database_file._delete_file, True)
filename = database_file.name
self.assertTrue(os.path.exists(filename))
del database_file
self.assertFalse(os.path.exists(filename))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| {
"content_hash": "cb44dcd80516a65024b60eb6110f9671",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 63,
"avg_line_length": 28.22222222222222,
"alnum_prop": 0.7023622047244095,
"repo_name": "statik/grr",
"id": "528409520fd75a4fae6f8ae5791c6ba6530171b0",
"size": "1292",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "parsers/sqlite_file_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "14896"
},
{
"name": "C",
"bytes": "10598"
},
{
"name": "C++",
"bytes": "303841"
},
{
"name": "CMake",
"bytes": "3228"
},
{
"name": "CSS",
"bytes": "12812"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "83451"
},
{
"name": "JavaScript",
"bytes": "229046"
},
{
"name": "Makefile",
"bytes": "6232"
},
{
"name": "Protocol Buffer",
"bytes": "202829"
},
{
"name": "Python",
"bytes": "5266989"
},
{
"name": "Ruby",
"bytes": "5103"
},
{
"name": "Shell",
"bytes": "43667"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
} |
from SDA import *
import numpy
import unittest
class FlightBoundaryContainerTestCase(unittest.TestCase):
def setUp(self):
self.test_boundary_container = FlightBoundariesContainer(numpy.array([[[-2000, -2000], [-2000, 2000], [2000, 2000], [2000, -2000]], [[-5000, -1000], [-4500, 2000], [2000, 2000], [2000, -2000]]]))
self.test_boundary_container1 = FlightBoundariesContainer(numpy.array([numpy.array([[-2000, -2000], [-1500, 2000], [2000, 4500], [1500, -3000]])]))
self.test_boundary_container2 = FlightBoundariesContainer(numpy.array([numpy.array([[-2000, -2000], [-2000, 2000], [0,1000], [2000, 2000], [2000, -2000]])]))
def test_is_point_in_bounds(self):
self.assertTrue(self.test_boundary_container.is_point_in_bounds(numpy.array([0,0,0])))
self.assertTrue(self.test_boundary_container.is_point_in_bounds(numpy.array([10,0,1])))
self.assertTrue(self.test_boundary_container1.is_point_in_bounds(numpy.array([0,0,0])))
self.assertTrue(self.test_boundary_container1.is_point_in_bounds(numpy.array([100, 100,100])))
self.assertFalse(self.test_boundary_container2.is_point_in_bounds(numpy.array([-8000,0,0])))
| {
"content_hash": "cc390e01861bd24cdbf91c5b8743c218",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 203,
"avg_line_length": 69.70588235294117,
"alnum_prop": 0.6852320675105485,
"repo_name": "FlintHill/SUAS-Competition",
"id": "bc831aa94d09d5c22fda312766c27926dd01f133",
"size": "1185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit_tests/test_flight_boundaries_container.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "164260"
},
{
"name": "HTML",
"bytes": "46489"
},
{
"name": "JavaScript",
"bytes": "105325"
},
{
"name": "PHP",
"bytes": "2701"
},
{
"name": "Python",
"bytes": "538468"
},
{
"name": "Shell",
"bytes": "1913"
}
],
"symlink_target": ""
} |
"""
Domino Public API
Public API endpoints for Custom Metrics # noqa: E501
The version of the OpenAPI document: 5.3.0
Generated by: https://openapi-generator.tech
"""
import copy
import logging
import multiprocessing
import sys
import urllib3
from http import client as http_client
from domino._impl.custommetrics.exceptions import ApiValueError
JSON_SCHEMA_VALIDATION_KEYWORDS = {
'multipleOf', 'maximum', 'exclusiveMaximum',
'minimum', 'exclusiveMinimum', 'maxLength',
'minLength', 'pattern', 'maxItems', 'minItems',
'uniqueItems', 'maxProperties', 'minProperties',
}
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:param discard_unknown_keys: Boolean value indicating whether to discard
unknown properties. A server may send a response that includes additional
properties that are not known by the client in the following scenarios:
1. The OpenAPI document is incomplete, i.e. it does not match the server
implementation.
2. The client was generated using an older version of the OpenAPI document
and the server has been upgraded since then.
If a schema in the OpenAPI document defines the additionalProperties attribute,
then all undeclared properties received by the server are injected into the
additional properties map. In that case, there are undeclared properties, and
nothing to discard.
:param disabled_client_side_validations (string): Comma-separated list of
JSON schema validation keywords to disable JSON schema structural validation
rules. The following keywords may be specified: multipleOf, maximum,
exclusiveMaximum, minimum, exclusiveMinimum, maxLength, minLength, pattern,
maxItems, minItems.
By default, the validation is performed for data generated locally by the client
and data received from the server, independent of any validation performed by
the server side. If the input data does not satisfy the JSON schema validation
rules specified in the OpenAPI document, an exception is raised.
If disabled_client_side_validations is set, structural validation is
disabled. This can be useful to troubleshoot data validation problem, such as
when the OpenAPI document validation rules do not match the actual API data
received by the server.
:param server_index: Index to servers configuration.
:param server_variables: Mapping with string values to replace variables in
templated server configuration. The validation of enums is performed for
variables with defined enum values before.
:param server_operation_index: Mapping from operation ID to an index to server
configuration.
:param server_operation_variables: Mapping from operation ID to a mapping with
string values to replace variables in templated server configuration.
The validation of enums is performed for variables with defined enum values before.
"""
_default = None
def __init__(self, host=None,
api_key=None, api_key_prefix=None,
username=None, password=None,
discard_unknown_keys=False,
disabled_client_side_validations="",
server_index=None, server_variables=None,
server_operation_index=None, server_operation_variables=None,
):
"""Constructor
"""
self._base_path = "http://localhost" if host is None else host
"""Default Base url
"""
self.server_index = 0 if server_index is None and host is None else server_index
self.server_operation_index = server_operation_index or {}
"""Default server index
"""
self.server_variables = server_variables or {}
self.server_operation_variables = server_operation_variables or {}
"""Default server variables
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.discard_unknown_keys = discard_unknown_keys
self.disabled_client_side_validations = disabled_client_side_validations
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("domino._impl.custommetrics")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = None
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Enable client side validation
self.client_side_validation = True
# Options to pass down to the underlying urllib3 socket
self.socket_options = None
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler'):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name == 'disabled_client_side_validations':
s = set(filter(None, value.split(',')))
for v in s:
if v not in JSON_SCHEMA_VALIDATION_KEYWORDS:
raise ApiValueError(
"Invalid keyword: '{0}''".format(v))
self._disabled_client_side_validations = s
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in self.logger.items():
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in self.logger.items():
logger.setLevel(logging.DEBUG)
# turn on http_client debug
http_client.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in self.logger.items():
logger.setLevel(logging.WARNING)
# turn off http_client debug
http_client.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier, alias=None):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:param alias: The alternative identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier, self.api_key.get(alias) if alias is not None else None)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 5.3.0\n"\
"SDK Package Version: 1.0.0".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None, servers=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:param servers: an array of host settings or None
:return: URL based on host settings
"""
if index is None:
return self._base_path
variables = {} if variables is None else variables
servers = self.get_host_settings() if servers is None else servers
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server.get('variables', {}).items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
@property
def host(self):
"""Return generated host."""
return self.get_host_from_settings(self.server_index, variables=self.server_variables)
@host.setter
def host(self, value):
"""Fix base path."""
self._base_path = value
self.server_index = None
| {
"content_hash": "1453290da9343acad3f8755e29905390",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 98,
"avg_line_length": 36.34703196347032,
"alnum_prop": 0.6076005025125628,
"repo_name": "dominodatalab/python-domino",
"id": "14083c14a9dc1122d5f70a6a330cbd426cb407c1",
"size": "15937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domino/_impl/custommetrics/configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "518781"
},
{
"name": "Shell",
"bytes": "142"
}
],
"symlink_target": ""
} |
"""Punctuation.
---
layout: post
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
title: dates
date: 2014-06-10 12:31:19
categories: writing
---
Dates.
"""
from proselint.tools import existence_check, memoize
@memoize
def check(text):
"""Check the text."""
err = "garner.punctuation"
msg = "Misplaced punctuation. It's 'et al.'"
list = [
"et. al",
"et. al."
]
return existence_check(text, list, err, msg, join=True)
| {
"content_hash": "89f738f9c94d016446538e87625a62a9",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 59,
"avg_line_length": 18.035714285714285,
"alnum_prop": 0.6099009900990099,
"repo_name": "amperser/proselint",
"id": "08bd11b8e071338017f28c5892b77fe2601a4acd",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "proselint/checks/misc/punctuation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2558"
},
{
"name": "HTML",
"bytes": "241413"
},
{
"name": "JavaScript",
"bytes": "249832"
},
{
"name": "Procfile",
"bytes": "83"
},
{
"name": "Python",
"bytes": "333207"
},
{
"name": "Ruby",
"bytes": "364"
},
{
"name": "SCSS",
"bytes": "30668"
},
{
"name": "Shell",
"bytes": "1830"
}
],
"symlink_target": ""
} |
"""
Self test for Hitachi Block Storage Driver
"""
import mock
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.hitachi import hbsd_basiclib
from cinder.volume.drivers.hitachi import hbsd_common
from cinder.volume.drivers.hitachi import hbsd_iscsi
from cinder.volume.drivers.hitachi import hbsd_snm2
def _exec_hsnm(*args, **kargs):
return HBSDSNM2ISCSIDriverTest.hsnm_vals.get(args)
def _exec_hsnm_init(*args, **kargs):
return HBSDSNM2ISCSIDriverTest.hsnm_vals_init.get(args)
class HBSDSNM2ISCSIDriverTest(test.TestCase):
"""Test HBSDSNM2ISCSIDriver."""
audppool_result = " DP RAID \
Current Utilization Current Over Replication\
Available Current Replication Rotational \
\
Stripe \
Needing Preparation\n\
Pool Tier Mode Level Total Capacity Consumed Capacity \
Percent Provisioning Percent Capacity \
Utilization Percent Type Speed Encryption Status \
\
Reconstruction Progress Size Capacity\n\
30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \
1% 24835% 532.0 GB \
1% SAS 10000rpm N/A Normal \
N/A \
256KB 0.0 GB"
aureplicationlocal_result = "Pair Name LUN Pair \
LUN Status Copy Type Group \
Point-in-Time MU Number\n\
0 10 0 Split( 99%) \
ShadowImage ---:Ungrouped N/A\
"
auluref_result = " Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 5( 3D+1P) SAS"
auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \
Name Port Name Host Group\n\
HBSD-00 10000000C97BCE7A 001:HBSD-01\n\
Assigned WWN\n Name Port Name \
Host Group\n abcdefg 10000000C97BCE7A \
001:HBSD-01"
autargetini_result = "Port 00 Target Security ON\n\
Target Name \
iSCSI Name\n\
001:HBSD-01 \
iqn"
autargetini_result2 = "Port 00 Target Security ON\n\
Target Name \
iSCSI Name"
autargetmap_result = "Mapping Mode = ON\n\
Port Target H-LUN LUN\n\
00 001:HBSD-01 0 1000"
auiscsi_result = "Port 00\n\
Port Number : 3260\n\
Keep Alive Timer[sec.] : 60\n\
MTU : 1500\n\
Transfer Rate : 1Gbps\n\
Link Status : Link Up\n\
Ether Address : 00:00:87:33:D1:3E\n\
IPv4\n\
IPv4 Address : 192.168.0.1\n\
IPv4 Subnet Mask : 255.255.252.0\n\
IPv4 Default Gateway : 0.0.0.0\n\
IPv6 Status : Disable\n\
Connecting Hosts : 0\n\
Result : Normal\n\
VLAN Status : Disable\n\
VLAN ID : N/A\n\
Header Digest : Enable\n\
Data Digest : Enable\n\
Window Scale : Disable"
autargetdef_result = "Port 00\n\
Authentication Mutual\n\
Target Method CHAP Algorithm \
Authentication\n\
001:T000 None --- ---\n\
User Name : ---\n\
iSCSI Name : iqn-target"
hsnm_vals = {
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
('aureplicationlocal',
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
[0, "", ""],
('aureplicationlocal',
'-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'):
[1, "", ""],
('aureplicationlocal', '-unit None -refer -pvol 1'):
[0, "%s" % aureplicationlocal_result, ""],
('aureplicationlocal', '-unit None -refer -pvol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -refer -svol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
[0, "", ""],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 1'):
[1, "", ""],
('auluchgsize', '-unit None -lu 1 -size 256g'):
[0, "", ""],
('auludel', '-unit None -lu 1 -f'): [0, "", ""],
('auludel', '-unit None -lu 3 -f'): [1, "", ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""],
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""],
('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""],
('autargetini', '-unit None -refer'):
[0, "%s" % autargetini_result, ""],
('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'):
[0, "", ""],
('autargetmap', '-unit None -refer'):
[0, "%s" % autargetmap_result, ""],
('autargetdef',
'-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \
-authmethod None'):
[0, "", ""],
('autargetdef', '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 \
-iname iqnX.target -authmethod None'):
[1, "", ""],
('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \
-ReportFullPortalList enable'):
[0, "", ""],
('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""],
('autargetdef', '-unit None -refer'):
[0, "%s" % autargetdef_result, ""]}
hsnm_vals_init = {
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
('aureplicationlocal',
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
[0, 0, ""],
('aureplicationlocal', '-unit None -refer -pvol 1'):
[0, "%s" % aureplicationlocal_result, ""],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
[0, 0, ""],
('auluchgsize', '-unit None -lu 1 -size 256g'):
[0, 0, ""],
('auludel', '-unit None -lu 1 -f'): [0, "", ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""],
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""],
('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""],
('autargetini', '-unit None -refer'):
[0, "%s" % autargetini_result2, ""],
('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'):
[0, "", ""],
('autargetmap', '-unit None -refer'):
[0, "%s" % autargetmap_result, ""],
('autargetdef',
'-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \
-authmethod None'):
[0, "", ""],
('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \
-ReportFullPortalList enable'):
[0, "", ""],
('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""],
('autargetdef', '-unit None -refer'):
[0, "%s" % autargetdef_result, ""],
('auman', '-help'):
[0, "Version 27.50", ""]}
# The following information is passed on to tests, when creating a volume
_VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
'provider_location': '1', 'name': 'test',
'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
test_volume = {'name': 'test_volume', 'size': 128,
'id': 'test-volume-0',
'provider_location': '1', 'status': 'available'}
test_volume_error = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '3', 'status': 'available'}
test_volume_error1 = {'name': 'test_volume_error', 'size': 128,
'id': 'test-volume-error',
'provider_location': None, 'status': 'available'}
test_volume_error2 = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '1', 'status': 'available'}
test_volume_error3 = {'name': 'test_volume3', 'size': 128,
'id': 'test-volume3',
'volume_metadata': [{'key': 'type',
'value': 'V-VOL'}],
'provider_location': '1', 'status': 'available'}
test_volume_error4 = {'name': 'test_volume4', 'size': 128,
'id': 'test-volume2',
'provider_location': '3', 'status': 'available'}
test_snapshot = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
'provider_location': '1', 'status': 'available'}
test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0',
'volume': test_volume_error,
'provider_location': None, 'status': 'available'}
def __init__(self, *args, **kwargs):
super(HBSDSNM2ISCSIDriverTest, self).__init__(*args, **kwargs)
@mock.patch.object(utils, 'brick_get_connector_properties',
return_value={'ip': '0.0.0.0',
'initiator': 'iqn'})
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_init)
@mock.patch.object(utils, 'execute',
return_value=['', ''])
def setUp(self, args1, arg2, arg3, arg4):
super(HBSDSNM2ISCSIDriverTest, self).setUp()
self._setup_config()
self._setup_driver()
self.driver.check_param()
self.driver.common.create_lock_file()
self.driver.common.command.connect_storage()
self.driver.max_hostgroups = \
self.driver.common.command.get_max_hostgroups()
self.driver.add_hostgroup()
self.driver.output_param_to_log()
self.driver.do_setup_status.set()
def _setup_config(self):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.hitachi_pool_id = 30
self.configuration.hitachi_thin_pool_id = 31
self.configuration.hitachi_target_ports = "00"
self.configuration.hitachi_debug_level = 0
self.configuration.hitachi_serial_number = None
self.configuration.hitachi_unit_name = "None"
self.configuration.hitachi_group_request = True
self.configuration.hitachi_group_range = "0-1"
self.configuration.config_group = "None"
self.configuration.hitachi_ldev_range = "0-100"
self.configuration.hitachi_default_copy_method = 'FULL'
self.configuration.hitachi_copy_check_interval = 1
self.configuration.hitachi_async_copy_check_interval = 1
self.configuration.hitachi_copy_speed = 3
self.configuration.hitachi_auth_method = None
self.configuration.hitachi_auth_user = "HBSD-CHAP-user"
self.configuration.hitachi_auth_password = "HBSD-CHAP-password"
self.configuration.hitachi_add_chap_user = "False"
def _setup_driver(self):
self.driver = hbsd_iscsi.HBSDISCSIDriver(
configuration=self.configuration)
context = None
db = None
self.driver.common = hbsd_common.HBSDCommon(
self.configuration, self.driver, context, db)
# API test cases
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume(self, arg1, arg2, arg3):
"""test create_volume."""
ret = self.driver.create_volume(self._VOLUME)
vol = self._VOLUME.copy()
vol['provider_location'] = ret['provider_location']
self.assertEqual(vol['provider_location'], '1')
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_error(self, arg1, arg2, arg3):
"""test create_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_volume,
self.test_volume_error)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats(self, arg1, arg2):
"""test get_volume_stats."""
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['vendor_name'], 'Hitachi')
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats_error(self, arg1, arg2):
"""test get_volume_stats."""
self.configuration.hitachi_pool_id = 29
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats, {})
self.configuration.hitachi_pool_id = 30
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume(self, arg1, arg2):
"""test extend_volume."""
self.driver.extend_volume(self._VOLUME, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume_error(self, arg1, arg2):
"""test extend_volume."""
self.assertRaises(exception.HBSDError, self.driver.extend_volume,
self.test_volume_error3, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume(self, arg1, arg2):
"""test delete_volume."""
self.driver.delete_volume(self._VOLUME)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume_error(self, arg1, arg2):
"""test delete_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.delete_volume,
self.test_volume_error4)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
ret = self.driver.create_volume(self._VOLUME)
ret = self.driver.create_snapshot(self.test_snapshot)
self.assertEqual(ret['provider_location'], '1')
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_snapshot,
self.test_snapshot_error2)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot_error(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot_error2)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
vol = self.driver.create_volume_from_snapshot(self._VOLUME,
self.test_snapshot)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
self.assertRaises(exception.HBSDError,
self.driver.create_volume_from_snapshot,
self.test_volume_error2, self.test_snapshot)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
vol = self.driver.create_cloned_volume(self._VOLUME,
self.test_snapshot)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error1)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
self.assertRaises(exception.HBSDError,
self.driver.create_cloned_volume,
self._VOLUME, self.test_volume_error1)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection(self, arg1, arg2):
"""test initialize connection."""
connector = {
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
'iqn'}
rc = self.driver.initialize_connection(self._VOLUME, connector)
self.assertEqual(rc['driver_volume_type'], 'iscsi')
self.assertEqual(rc['data']['target_iqn'], 'iqn-target')
self.assertEqual(rc['data']['target_lun'], 1)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection_error(self, arg1, arg2):
"""test initialize connection."""
connector = {
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
'iqnX'}
self.assertRaises(exception.HBSDError,
self.driver.initialize_connection,
self._VOLUME, connector)
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection(self, arg1):
"""test terminate connection."""
connector = {
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
'iqn'}
self.driver.terminate_connection(self._VOLUME, connector)
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection_error(self, arg1):
"""test terminate connection."""
connector = {'ip': '0.0.0.0'}
self.assertRaises(exception.HBSDError,
self.driver.terminate_connection,
self._VOLUME, connector)
return
| {
"content_hash": "d933a06469acbd3ae084ad3fa8d16f38",
"timestamp": "",
"source": "github",
"line_count": 479,
"max_line_length": 79,
"avg_line_length": 47.05845511482255,
"alnum_prop": 0.5426556053413779,
"repo_name": "jumpstarter-io/cinder",
"id": "757c28d58af6a1d45f68c94ba2f5f96f38adb65d",
"size": "23154",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "cinder/tests/test_hitachi_hbsd_snm2_iscsi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(
self, plotly_name="coloraxis", parent_name="scattergeo.marker", **kwargs
):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
**kwargs,
)
| {
"content_hash": "81d84f0197a081945d3ee93b16623bbc",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 36.6,
"alnum_prop": 0.5828779599271403,
"repo_name": "plotly/plotly.py",
"id": "cb110116e5327a2fa493364060a5c48ececd8b46",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergeo/marker/_coloraxis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class RideshareApiConfig(AppConfig):
name = 'rideshare_api'
def ready(self):
from rideshare_api import signals
| {
"content_hash": "96d4546e5f080b50d17642bdc09d8296",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 20.5,
"alnum_prop": 0.7195121951219512,
"repo_name": "RideQuest/rideshare-backend",
"id": "4cc4121ab52e7a5c6806576db2220e3ad755fa36",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rideshare/rideshare_api/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42696"
}
],
"symlink_target": ""
} |
import math
import numpy
# - * - coding: utf - 8 - * -
"""
Created on Wed May 31 15:27:40 2017
@author: Kristen
"""
# - * - coding: utf - 8 - * -
"""
Created on Tue May 30 09:40:34 2017
@author: Kristen
"""
# finds the distance between two atoms
def distance(position1, position2):
return math.sqrt(math.pow(position1[0] - position2[0], 2) +
math.pow(position1[1] - position2[1], 2) +
math.pow(position1[2] - position2[2], 2))
# finds if a triplet could have an Si atom between them
def dists(positions, dist):
# if there were not enough close to make a triplet, return none
if len(positions) < 3:
return [""]
# if there is a triplet and they are close enough to have a Si,
# return the triplet, else return blank
if len(positions) == 3:
if distance(positions[1], positions[2]) <= dist:
return positions
else:
return[""]
numbers = []
if len(positions) == 5:
print(1)
# if there are more then 2 close enough to have a Si between them, findthe
# one that could not given the other two
for i in range(len(positions)):
numbers.append(0)
for i in range(1, len(positions) - 1):
for j in range(1, len(positions) - i):
# if two positions are not close enough, add a counter to both.
# If they are close enough, remove a counter from both
if distance(positions[i], positions[i + j]) > dist:
numbers[i] += 1
numbers[i + j] += 1
else:
numbers[i] -= 1
numbers[i + j] -= 1
# removetheonewiththemostcounters
del positions[numbers.index(max(numbers))]
# if these still are not close enough to have a triplet between them,
# return none. If they are close enough, return the new triplet
if distance(positions[1], positions[2]) <= dist:
return positions
else:
return[""]
# finds four membered rings and returns a list of lists of their locaitons
def find_four(opositions, far):
rings = [[]]
remov = []
# for each oxygen
for i in range(len(opositions)):
rings.append([""])
rings[i] = [opositions[i]]
# for each oxygen with an x position higher than the current
for j in range(1, len(opositions) - i):
# if th exposition is less than the possible distance between two
# oxygenatoms(variableinclusionradius)
if abs(opositions[i][0] - opositions[i + j][0]) <= far:
# if the distance between the two oxygens is less than the
# characteristic distance(variable inclusion radius)
if distance(opositions[i], opositions[i + j]) <= far:
rings[i].append(opositions[i + j])
rem = 0
if len(rings[i]) < 4:
rem = 1
elif len(rings[i]) > 4:
while len(rings[i]) != 4:
distances = []
for k in range(len(rings[i])):
tot_len = 0
for l in range(1, len(rings[i]) - k):
tot_len += distance(rings[i][k], rings[i][k + l])
distances.append(tot_len)
del rings[i][distances.index(max(distances))]
if len(rings[i]) == 4:
distances = []
for n in range(len(rings[i]) - 1):
for m in range(1, len(rings[i]) - n):
distances.append(distance(rings[i][n], rings[i][n + m]))
for n in range(2):
del distances[distances.index(max(distances))]
for n in range(4):
for m in range(1, len(distances) - n):
if abs(distances[n] - distances[n + m]) > .03:
rem = 1
if rem == 1:
remov.insert(0, i)
for n in range(len(remov)):
del rings[remov[n]]
return rings
# finds the area of triangle
def triarea(p1, p2, p3):
a = distance(p1, p2)
b = distance(p2, p3)
c = distance(p1, p3)
s = (a + b + c) / 2
return math.sqrt(s * (s - a) * (s - b) * (s - c))
def ringarea(corners):
n = len(corners)
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return float(area)
# finds if the silicon atom is within a 4 membered ring
def rem4(rings, si):
for i in range(len(rings)):
triangles = 0
distances = []
locations = []
for n in range(len(rings[i]) - 1):
for m in range(1, len(rings[i]) - n):
distances.append(distance(rings[i][n], rings[i][n + m]))
locations.append([n, n + m])
locations.append(len(rings[i]))
for n in range(2):
del locations[distances.index(max(distances))]
del distances[distances.index(max(distances))]
for n in range(len(locations)):
triangles += triarea(rings[i][locations[n][0]],
rings[i][locations[n][1]], si)
if ringarea(rings[i]) == triangles:
return"n"
return"y"
# finds the position of a Si given a triplet of oxygen
def si_finder(opositions):
# characteristic distance
dist = 1.6 * math.pow(10, - 1)
# sets up the translation to happen around a basepoint(the first point in
# the positions)
trans = [[0, 0, 0], [opositions[1][0] - opositions[0][0],
opositions[1][1] - opositions[0][1],
opositions[1][2] - opositions[0][2]],
[opositions[2][0] - opositions[0][0],
opositions[2][1] - opositions[0][1],
opositions[2][2] - opositions[0][2]]]
# finds vector perpendicular to the plane of the three points
v = numpy.matrix([numpy.linalg.det([[trans[1][1], trans[2][1]],
[trans[1][2], trans[2][2]]]),
numpy.linalg.det([[trans[1][0], trans[2][0]],
[trans[1][2], trans[2][2]]]),
numpy.linalg.det([[trans[1][0], trans[2][0]],
[trans[1][1], trans[2][1]]])])
# sets up first rotation matrix about the x axis
theta = math.atan2(v.item(1), v.item(2))
xmatr = numpy.matrix([[1, 0, 0], [0, math.cos(theta), - math.sin(theta)],
[0, math.sin(theta), math.cos(theta)]])
trans1 = numpy.matrix(trans)
rot1 = numpy.matrix.dot(trans1, xmatr)
v1 = numpy.matrix.dot(v, xmatr)
# second rotation matrix about the y axis
rho = math.atan2(v1.item(0), v1.item(2))
ymatr = numpy.matrix([[math.cos(rho), 0, math.sin(rho)],
[0, 1, 0],
[-math.sin(rho), 0, math.cos(rho)]])
rot2 = numpy.matrix.dot(rot1, ymatr)
# should be in the xy plane now. Have to rotate such that two points
# are on the x axis
alph = math.atan2(rot2.item(4), rot2.item(3))
bet = math.atan2(rot2.item(7), rot2.item(6))
r1 = math.sqrt(math.pow(rot2.item(3), 2) + math.pow(rot2.item(4), 2))
r2 = math.sqrt(math.pow(rot2.item(6), 2) + math.pow(rot2.item(7), 2))
x = r1 / 2
y = r2 * (1 - math.cos(bet - alph)) / (2.0 * math.sin(bet - alph))
z = math.sqrt(abs(math.pow(dist, 2) - math.pow(x, 2) - math.pow(y, 2)))
si_pos = numpy.matrix([x, y, z])
# rotate back to originial position
init = math.atan2(si_pos.item(1), si_pos.item(0))
r = math.sqrt(math.pow(si_pos.item(0), 2) + math.pow(si_pos.item(1), 2))
x = r * math.cos(init + alph)
y = r * math.sin(init + alph)
si_pos = numpy.matrix([x, y, z])
# undo second rotation matrix
iymatr = numpy.linalg.inv(ymatr)
si_pos = numpy.matrix.dot(si_pos, iymatr)
# undo first rotation matrix
ixmatr = numpy.linalg.inv(xmatr)
si_pos = numpy.matrix.dot(si_pos, ixmatr)
# translate back so there is no point at the origin
si_pos = [si_pos.item(0) + opositions[0][0],
si_pos.item(1) + opositions[0][1],
si_pos.item(2) + opositions[0][2]]
return si_pos
# locates all possiblee triplets
def o_locator(opositions):
# assumed oxygens are ordered by increasing x values
# used to collect all the found oxygens close enough to have a single Si
# between them
found = [[""]]
# for each oxygen
for i in range(len(opositions)):
found[i] = [opositions[i]]
# for each oxygen with an x position higher than the current
for j in range(1, len(opositions) - i):
# if the x position is less than the possible distance between two
# oxygenatoms(variableinclusionradius)
if abs(opositions[i][0] - opositions[i + j][0]) <= \
3.45 * math.pow(10, - 1):
# if the distance between the two oxygens is less than the
# characteristic distance(variable inclusion radius)
if distance(opositions[i], opositions[i + j]) <= \
3.45 * math.pow(10, - 1):
found[i].append(opositions[i + j])
found.append([""])
# removes last appended empty list
del found[len(found) - 1]
# remove all those too far apart using dist function (variable inclusion
# radius)
for n in range(len(found)):
found[n] = dists(found[n], .345)
# createanarrayforpositionstoremove
remov = []
# for all atoms with found oxygens
for n in range(len(found)):
# add empties to a list for removal
if found[n] == [""]:
remov.insert(0, n)
# remove those in the remove list
for m in range(len(remov)):
del found[remov[m]]
# return the list of those oxygen that have a possible Si between them
return found
def locate_si(positions, dist):
# assumes presorted positions by x position
doubles = []
# finds all within the given radius and adds those doubles to the list
for i in range(len(positions)):
for j in range(1, len(positions) - i):
if distance(positions[i], positions[i + j]) <= dist:
doubles.append([positions[i], positions[i + j]])
return doubles
def find_o(positions, dist):
opositions = []
for i in range(len(positions)):
# center at origin
pos2 = [positions[i][1][0] - positions[i][0][0], positions[i][1][1] -
positions[i][0][1], positions[i][1][2] - positions[i][0][2]]
# rotate until both points are in the xy plane
theta = numpy.arctan2(pos2[1], pos2[0])
phi = numpy.arctan2(pos2[2], pos2[0])
newx = math.sqrt(math.pow(pos2[0], 2) + math.pow(pos2[2], 2))
newy = newx * math.tan(theta)
# find in si position (midpoint between origin and pos 2 in the x - y
# plane with x making up the difference)
x = newx / 2
y = newy / 2
if math.pow(dist, 2) - math.pow(x, 2) - math.pow(y, 2) > 0:
z = math.sqrt(math.pow(dist, 2) - math.pow(x, 2) - math.pow(y, 2))
else:
z = 0
# current angle above x - y plane
r = math.sqrt(math.pow(x, 2) + math.pow(y, 2) + math.pow(z, 2))
alph = math.asin(z / r)
# when rotated back, it will rotate to angle phi + alph
opos = [r * math.cos(theta) * math.cos(alph + phi),
r * math.sin(theta) * math.cos(alph + phi),
r * math.sin(alph + phi)]
# append to the list
opositions.append([opos[0] + positions[i][0][0], opos[1] +
positions[i][0][1], opos[2] + positions[i][0][2]])
return opositions
def main():
# input center positions
cpfile = input("Centers_to_test.txt")
# convert data in file into floats and append to a position list
with open(cpfile) as f:
content = f.readline()
string = ""
locations = []
for i in range(len(content)):
if content[i] == " ":
locations.append(float(string))
string = ""
else:
string += content[i]
locations.append(float(string))
positions = [[""]]
for i in range(len(locations)):
if i % 3 == 0:
positions[int(i / 3)] = [locations[i]]
positions.append("")
else:
positions[int(i / 3)].append(locations[i])
del positions[len(positions) - 1]
# sort positions for the double finder function
positions = sorted(positions)
# Create a Graph of the Input Data
xypts = []
for i in range(len(positions)):
xypts.append([positions[i][0], positions[i][1]])
# print(xypts)
points = numpy.array(xypts)
from scipy.spatial import Delaunay
tri = Delaunay(points)
print(len(tri.simplices))
# print(tri.simplices)
o_locations = []
for i in range(len(tri.simplices)):
midptx1 = 0.50 * (points[tri.simplices][i][0][0] +
points[tri.simplices][i][1][0])
midpty1 = 0.50 * (points[tri.simplices][i][0][1] +
points[tri.simplices][i][1][1])
o_locations.append([midptx1, midpty1, 0])
midptx2 = (points[tri.simplices][i][1][0] +
points[tri.simplices][i][2][0]) / 2.00
midpty2 = (points[tri.simplices][i][1][1] +
points[tri.simplices][i][2][1]) / 2.00
o_locations.append([midptx2, midpty2, 0])
midptx3 = (points[tri.simplices][i][2][0] +
points[tri.simplices][i][0][0]) / 2.00
midpty3 = (points[tri.simplices][i][2][1] +
points[tri.simplices][i][0][1]) / 2.00
o_locations.append([midptx3, midpty3, 0])
print(len(o_locations))
o_locations.sort
o_locations = sorted(o_locations)
# print(o_locations)
remove = []
for i in range(len(o_locations) - 1):
if o_locations[i] == o_locations[i + 1]:
remove.append(i + 1)
remove.sort(reverse=True)
print(len(o_locations))
# print(remove)
for i in range(len(remove)):
del (o_locations[remove[i]])
print(len(o_locations))
print(len(o_locations))
xOpos = []
yOpos = []
for i in range(len(o_locations)):
xOpos.append(o_locations[i][0])
yOpos.append(o_locations[i][1])
# write O positions to an out file
out = open("OfC Positions 120106_008 Python Output.txt", "w")
out.write(str(o_locations))
out.write("nn")
positions = o_locations
# find triplets
triples = o_locator(positions)
print(triples)
# find Si positions
si_locations = []
for j in range(len(triples)):
si_locations.append(si_finder(triples[j]))
delete = []
for i in range(len(delete)):
del si_locations[delete[i]]
#--------------------------Plotting the locations ----------------------------#
xSipos = []
ySipos = []
for i in range(len(si_locations)):
xSipos.append(si_locations[i][0])
ySipos.append(si_locations[i][1])
xOpos = []
yOpos = []
for i in range(len(o_locations)):
xOpos.append(o_locations[i][0])
yOpos.append(o_locations[i][1])
import matplotlib.pyplot as plt
plt.triplot(points[:, 0], points[:, 1], tri.simplices.copy())
plt.plot(points[:, 0], points[:, 1], 'o', color='# 2E9AFE')
plt.scatter(xOpos, yOpos, label='Center Positions', color='# 2E9AFE')
plt.scatter(xOpos, yOpos, label='Oxygen Positions', color='r')
plt.scatter(xSipos, ySipos, label='Silicon Positions', color='g')
# write Si positions to an outfile
out = open("Si Positions Output 170404.txt", "w")
out.write(str(si_locations))
out.write("\n")
plt.xlabel('x (nm)')
plt.ylabel('y (nm)')
plt.title('Center Positions')
plt.legend()
plt.show()
# write O positions to an out file
out = open("OfC Positions 120106_008 Python Output.txt", "w")
out.write(str(o_locations))
out.write("nn")
#--------------------Addition of ring finding---------------------------------#
#assigns nearest 3 adjacent ring to each Si
"""import Si_Ring_Classes
si_objects = []
for loc in si_locations:
si = Si_Ring_Classes.Si(loc[0], loc[1], loc[2])
si.find_rings(locations)
si_objects.append(si)
for si in si_objects:
print(si.get_location(), end=" ")
for ring in si.get_rings():
print(ring.get_location(), end=" ")
print()
"""
if __name__ == "__main__":
main()
| {
"content_hash": "bd5ee7fe3550838e4bbc8f620d83dbe8",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 79,
"avg_line_length": 32.774809160305345,
"alnum_prop": 0.530860603237452,
"repo_name": "BursonLab/Silica-Coding-Project",
"id": "d26f5e2e66f74624b9541e15fbad1b71493afd21",
"size": "17174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Misc Parts/Input_Center_Positions_w: triplet finder.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2863"
},
{
"name": "Python",
"bytes": "293037"
}
],
"symlink_target": ""
} |
from .. import utils
from ..Node import Node
from ..Remote import Remote
from ..Route import Route
from .TCPServer import TCPServer
from .TCPProtocol import TCPProtocol
from .TCPRPC import TCPRPC
from .TCPCall import TCPCall
from .TCPEvent import TCPEvent
class TCPService(object):
"""TCPService
An Object provides all Kademlia Objects of TCP
Vars:
server: Kademlia TCP Server
protocol: Kademlia TCP Protocol
node: Present Node on TCP
rpc: Kademlia Message Compress Module for TCP
call: Remote Call Service on TCP Protocol
"""
def __init__(self, config, service, loop):
self.loop = loop
self.service = service
self.config = config
self.logger = self.service.logger
self.__logger__ = self.logger.get_logger("TCPService")
self.server = TCPServer(
service = self,
loop = self.loop,
host = self.config["server"]["host"],
port = self.config["server"]["port"]
)
self.rpc = TCPRPC(
service = self,
loop = self.loop
)
self.protocol = TCPProtocol(
service = self,
loop = self.loop
)
self.call = TCPCall(
service = self,
loop = self.loop
)
self.node = Node(
id = utils.dump_node_hex(self.config["node"]["id"]),
remote = Remote(
host = self.config["server"]["host"],
port = self.config["server"]["port"]
)
)
self.event = TCPEvent(
service = self,
loop = self.loop
)
self.queue = self.service.queue
self.storage = self.service.storage
self.route = Route(self, loop, config["kbucket"]["ksize"], self.node)
self.handler = self.service.handler
async def start(self):
await self.server.start_server()
self.__logger__.info("DDCM TCP Service has been started.")
self.__logger__.info("DDCM TCP Service is listening on " + self.config["server"]["host"] + ":" + str(self.config["server"]["port"]))
async def stop(self):
await self.server.stop_server()
self.__logger__.info("DDCM TCP Service has been stopped.")
| {
"content_hash": "3841c232314ad715690648c4e2b73dfd",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 140,
"avg_line_length": 30.773333333333333,
"alnum_prop": 0.5641247833622184,
"repo_name": "SkyZH/ddcm-protocol",
"id": "128070a01d74b905436901609807a75972b11129",
"size": "2308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ddcm/TCPService/TCPService.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "80828"
},
{
"name": "Shell",
"bytes": "68"
}
],
"symlink_target": ""
} |
from django import template
from django.template.loader import render_to_string
from src.basic.tools.templatetags.utils import parse_ttag
register = template.Library()
class RenderTemplateNode(template.Node):
def __init__(self, object_name, template_dir):
self.object_name = object_name
self.template_dir = template_dir.rstrip('/').strip('"').strip("'")
def render(self, context):
try:
obj = template.resolve_variable(self.object_name, context)
template_name = '%s.%s.html' % (obj._meta.app_label, obj._meta.module_name)
template_list = [
'%s/%s' % (self.template_dir, template_name),
'%s/default.html' % self.template_dir
]
context['object'] = obj
return render_to_string(template_list, context)
except AttributeError:
if (type(obj) in (int, unicode, str)):
return obj
return ''
except template.VariableDoesNotExist:
return ''
@register.tag()
def render_template(parser, token):
"""
Returns the proper template based on the objects content_type. If an
template doesn't exist it'll fallback to default.html.
Syntax:
{% render_template for [object] in [path to templates] %}
Usage:
{% render_template for entry in "includes/lists" %}
"""
tags = parse_ttag(token, ['for', 'in'])
if len(tags) != 3:
raise template.TemplateSyntaxError, '%r tag has invalid arguments' % tags['tag_name']
return RenderTemplateNode(object_name=tags['for'], template_dir=tags['in'])
| {
"content_hash": "7cb36eba669febd8bb25df517f89b0cf",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 93,
"avg_line_length": 33.326530612244895,
"alnum_prop": 0.6117575015309247,
"repo_name": "hittu123/ruhive",
"id": "a175cc8b33fdaac0dccafaadca4f3645f636f819",
"size": "1633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/basic/tools/templatetags/objectutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1122"
},
{
"name": "HTML",
"bytes": "147248"
},
{
"name": "JavaScript",
"bytes": "1420"
},
{
"name": "Python",
"bytes": "240403"
}
],
"symlink_target": ""
} |
import pytest
from django.contrib import auth
from server.models import Log
@pytest.mark.django_db
def test_log_get_user(client):
username = 'username'
password = 'thepassword'
user = auth.models.User.objects.create_user(
username,
'[email protected]',
password
)
Log.objects.create(user=user, checksum="1", data="{}")
Log.objects.create(user=None, checksum="2", data="{}")
response = client.get('/api/log/')
assert len(response.data) == 2
client.login(username=username, password=password)
response = client.get('/api/log/')
assert len(response.data) == 1
response = client.get('/api/log/?all=True')
assert len(response.data) == 2
| {
"content_hash": "b42406de95ed54e723ec6c79ef4eb980",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 58,
"avg_line_length": 23.225806451612904,
"alnum_prop": 0.6486111111111111,
"repo_name": "jerith/vmprof-server",
"id": "6f3816ab5a2ba16732c4d6f8f40cb6b8fc3e544b",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/web/test_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "120995"
},
{
"name": "HTML",
"bytes": "11639"
},
{
"name": "JavaScript",
"bytes": "144684"
},
{
"name": "Python",
"bytes": "52643"
}
],
"symlink_target": ""
} |
from .iq_sync import SyncIqProtocolEntity
from .iq_sync_get import GetSyncIqProtocolEntity
from .iq_sync_result import ResultSyncIqProtocolEntity
from .notification_contact_add import AddContactNotificationProtocolEntity
from .notification_contact_remove import RemoveContactNotificationProtocolEntity
from .notification_contact_update import UpdateContactNotificationProtocolEntity
from .notificiation_contacts_sync import ContactsSyncNotificationProtocolEntity
from .iq_statuses_get import GetStatusesIqProtocolEntity
from .iq_statuses_result import ResultStatusesIqProtocolEntity
| {
"content_hash": "0c15c9c10cf6894394f2e937238e32b3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 80,
"avg_line_length": 64.77777777777777,
"alnum_prop": 0.8936535162950258,
"repo_name": "biji/yowsup",
"id": "ca6f461e9e69c75482badc6cfc793af7dc01e349",
"size": "583",
"binary": false,
"copies": "28",
"ref": "refs/heads/master",
"path": "yowsup/layers/protocol_contacts/protocolentities/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "222487"
}
],
"symlink_target": ""
} |
import os
import json
import datetime
def check_file_exists(file_path):
""" Check whether the file at file_path exists. """
if os.path.isfile(file_path):
return True
else:
return False
def check_date_string(date):
""" Return True if the input string is a valid YYYY-MM-DD date, False
otherwise. """
try:
datetime.datetime.strptime(date, '%Y-%m-%d')
except ValueError:
return False
return True
def read_config(config_file):
""" Read and validate configuration file stored at config_file location. """
if not check_file_exists(config_file):
raise NameError("Error: Cannot read config file at " + config_file)
with open(config_file, "r") as config_f:
config = json.load(config_f)
if not isinstance(config['irc_port'], int):
raise ValueError("Error: Invalid irc_port, must be an integer.")
if not 0 < config['irc_port'] < 65536:
raise ValueError("Error: Invalid irc_port, must be between 1 - 65535.")
if len(config['irc_nickname']) < 3:
raise ValueError("Error: Invalid irc_nickname, must be at least 3 characters long.")
if len(config['irc_channels']) < 1:
print("Warning: no channel set in config/config.json, no channel will be connected.")
return config
| {
"content_hash": "da1a34b5d8de87b684f919a412a7518d",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 93,
"avg_line_length": 28.02127659574468,
"alnum_prop": 0.6461655277145026,
"repo_name": "icydoge/dogexam",
"id": "def2756afe0792be3cda237208685cbc9481afd5",
"size": "1357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dogexam/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24193"
}
],
"symlink_target": ""
} |
"""
parser.py
Created by Thomas Mangin on 2014-07-01.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
# Generic ======================================================================
#
# As long as pack or unpack is needed here it means our API cleanup is not complete
from struct import pack
from struct import unpack
from socket import error as SocketError
from exabgp.protocol.ip import IP
from exabgp.bgp.message.open.asn import ASN
from exabgp.bgp.message.open.routerid import RouterID
from exabgp.bgp.message.open.holdtime import HoldTime
def string (tokeniser):
return tokeniser()
def boolean (tokeniser,default):
value = tokeniser()
boolean = value.lower()
if boolean in ('true','enable','enabled'):
value = True
elif boolean in ('false','disable','disabled'):
value = False
elif boolean in ('unset',):
value = None
else:
tokeniser.rewind(value)
return default
return value
def md5 (tokeniser):
value = tokeniser()
if len(value) > 80:
raise ValueError('MD5 password must be no larger than 80 characters')
if not value:
raise ValueError('value requires the value password as an argument (quoted or unquoted). FreeBSD users should use "kernel" as the argument.')
return value
def ttl (tokeniser):
value = tokeniser()
# XXX: FIXME: Should it be a subclass of int ?
try:
ttl = int(value)
except ValueError:
if value in ('false','disable','disabled'):
return None
raise ValueError('invalid ttl-security "%s"' % value)
if ttl < 0:
raise ValueError('ttl-security can not be negative')
if ttl >= 255:
raise ValueError('ttl must be smaller than 256')
return ttl
def asn (tokeniser,value=None):
value = tokeniser() if value is None else value
try:
if value.count('.'):
high,low = value.split('.',1)
asn = (int(high) << 16) + int(low)
else:
asn = int(value)
return ASN(asn)
except ValueError:
raise ValueError('"%s" is an invalid ASN' % value)
def ip (tokeniser):
value = tokeniser()
try:
return IP.create(value)
except (IndexError,ValueError,SocketError):
raise ValueError('"%s" is an invalid IP address' % value)
def routerid (tokeniser):
return RouterID(tokeniser())
def holdtime (tokeniser):
value = tokeniser()
try:
holdtime = HoldTime(value)
except ValueError:
raise ValueError ('"%s" is an invalid hold-time' % value)
if holdtime < 3 and holdtime != 0:
raise ValueError('holdtime must be zero or at least three seconds')
# XXX: FIXME: add HoldTime.MAX and reference it ( pow -1 )
if holdtime >= pow(2,16):
raise ValueError('holdtime must be smaller than %d' % pow(2,16))
return holdtime
# Attributes ===================================================================
#
# ==================================================================== Attribute
#
from exabgp.bgp.message.update.attribute.attribute import Attribute
def attribute (tokeniser):
start = tokeniser()
if start != '[':
raise ValueError('invalid attribute, does not starts with [')
code = tokeniser().lower()
if not code.startswith('0x'):
raise ValueError('invalid attribute, code is not 0x hexadecimal')
try:
code = int(code[2:],16)
except ValueError:
raise ValueError('invalid attribute, code is not 0x hexadecimal')
flag = tokeniser().lower()
if not flag.startswith('0x'):
raise ValueError('invalid attribute, flag is not 0x hexadecimal')
try:
flag = int(flag[2:],16)
except ValueError:
raise ValueError('invalid attribute, flag is not 0x hexadecimal')
data = tokeniser().lower()
if not data.startswith('0x'):
raise ValueError('invalid attribute, data is not 0x hexadecimal')
if not len(data) % 2:
raise ValueError('invalid attribute, data is not 0x hexadecimal')
data = ''.join(chr(int(data[_:_+2],16)) for _ in range(2,len(data),2))
end = tokeniser()
if end != ']':
raise ValueError('invalid attribute, does not ends with ]')
# XXX: FIXME: class Attribute should have an unpack function which does that
from exabgp.bgp.message.update.attribute.unknown import GenericAttribute
for ((ID,flag),klass) in Attribute.registered_attributes.iteritems():
if code == ID and flag == klass.FLAG:
return klass(data)
return GenericAttribute(code,flag,data)
# ====================================================================== NextHop
#
from exabgp.bgp.message.update.attribute.nexthop import NextHop
def next_hop (tokeniser):
value = tokeniser()
if value.lower() == 'self':
# next-hop self is unsupported yet
raise ValueError('unsupported yet on new format')
else:
return NextHop(value)
# ======================================================================= Origin
#
from exabgp.bgp.message.update.attribute.origin import Origin
def origin (tokeniser):
value = tokeniser().lower()
if value == 'igp':
return Origin(Origin.IGP)
if value == 'egp':
return Origin(Origin.EGP)
if value == 'incomplete':
return Origin(Origin.INCOMPLETE)
raise ValueError('unknown origin %s' % value)
# ========================================================================== MED
#
from exabgp.bgp.message.update.attribute.med import MED
def med (tokeniser):
value = tokeniser()
if not value.isdigit():
raise ValueError('invalid MED %s' % value)
return MED(value)
# ======================================================================= ASPath
#
from exabgp.bgp.message.update.attribute.aspath import ASPath
def aspath (tokeniser):
as_seq = []
as_set = []
value = tokeniser()
inset = False
try:
if value == '[':
while True:
value = tokeniser()
if value == ',':
continue
if value in ('(','['):
inset = True
while True:
value = tokeniser()
if value == ')':
break
as_set.append(asn(tokeniser,value))
if value == ')':
inset = False
continue
if value == ']':
if inset:
inset = False
continue
break
as_seq.append(ASN(tokeniser,value))
else:
as_seq.append(asn(tokeniser,value))
except ValueError:
raise ValueError('could not parse as-path')
return ASPath(as_seq,as_set)
# ============================================================== LocalPreference
#
from exabgp.bgp.message.update.attribute.localpref import LocalPreference
def local_preference (tokeniser):
value = tokeniser()
if not value.isdigit():
raise ValueError('invalid local preference %s' % value)
return LocalPreference(value)
# ==================================================================== Community
#
from exabgp.bgp.message.update.attribute.atomicaggregate import AtomicAggregate
def atomic_aggregate (tokeniser):
return AtomicAggregate()
# =================================================================== Aggregator
#
from exabgp.bgp.message.update.attribute.aggregator import Aggregator
def aggregator (tokeniser):
value = tokeniser()
if value != '(':
tokeniser.rewind(value)
return None
try:
asn,address = tokeniser().split(':')
except (ValueError,IndexError):
raise ValueError('invalid aggregator')
value = tokeniser()
if value != ')':
raise ValueError('invalid aggregator')
local_as = ASN(asn)
local_address = RouterID(address)
# XXX: This is buggy it can be an Aggregator4
return Aggregator(local_as,local_address)
# ================================================================= OriginatorID
#
from exabgp.bgp.message.update.attribute.originatorid import OriginatorID
def originator_id (tokeniser):
value = tokeniser()
if not value.isdigit():
raise ValueError('invalid Originator ID %s' % value)
return OriginatorID(value)
# ================================================================== ClusterList
#
from exabgp.bgp.message.update.attribute.clusterlist import ClusterList
from exabgp.bgp.message.update.attribute.clusterlist import ClusterID
def cluster_list (tokeniser):
clusterids = []
value = tokeniser()
try:
if value == '[':
while True:
value = tokeniser()
if value == ']':
break
clusterids.append(ClusterID(value))
else:
clusterids.append(ClusterID(value))
if not clusterids:
raise ValueError('no cluster-id in the cluster list')
return ClusterList(clusterids)
except ValueError:
raise ValueError('invalud cluster list')
# ==================================================================== Community
#
from exabgp.bgp.message.update.attribute.community import Community
def _community (value):
separator = value.find(':')
if separator > 0:
prefix = value[:separator]
suffix = value[separator+1:]
if not prefix.isdigit() or not suffix.isdigit():
raise ValueError('invalid community %s' % value)
prefix, suffix = int(prefix), int(suffix)
# XXX: FIXME: add a Community.MAX to pow(2,16) -1
if prefix >= pow(2,16):
raise ValueError('invalid community %s (prefix too large)' % value)
# XXX: FIXME: add a Community.MAX to pow(2,16) -1
if suffix >= pow(2,16):
raise ValueError('invalid community %s (suffix too large)' % value)
return Community(pack('!L',(prefix<<16) + suffix))
elif value[:2].lower() == '0x':
value = long(value,16)
# XXX: FIXME: add a Community.MAX to pow(2,16) -1
if value >= pow(2,32):
raise ValueError('invalid community %s (too large)' % value)
return Community(pack('!L',value))
else:
low = value.lower()
if low == 'no-export':
return Community(Community.NO_EXPORT)
elif low == 'no-advertise':
return Community(Community.NO_ADVERTISE)
elif low == 'no-export-subconfed':
return Community(Community.NO_EXPORT_SUBCONFED)
# no-peer is not a correct syntax but I am sure someone will make the mistake :)
elif low == 'nopeer' or low == 'no-peer':
return Community(Community.NO_PEER)
elif value.isdigit():
value = unpack('!L',value)[0]
if value >= pow(2,32):
raise ValueError('invalid community %s (too large)' % value)
return Community(pack('!L',value))
else:
raise ValueError('invalid community name %s' % value)
from exabgp.bgp.message.update.attribute.community import Communities
def community (tokeniser):
communities = Communities()
value = tokeniser()
if value == '[':
while True:
value = tokeniser()
if value == ']':
break
communities.add(_community(value))
else:
communities.add(_community(value))
return communities
# ========================================================== ExtendedCommunities
#
from exabgp.bgp.message.update.attribute.community.extended import ExtendedCommunity
def _extended_community (value):
if value[:2].lower() == '0x':
if not len(value) % 2:
raise ValueError('invalid extended community %s' % value)
try:
raw = ''
for i in range(2,len(value),2):
raw += chr(int(value[i:i+2],16))
except ValueError:
raise ValueError('invalid extended community %s' % value)
if len(raw) != 8:
raise ValueError('invalid extended community %s' % value)
return ExtendedCommunity.unpack(raw)
elif value.count(':'):
_known_community = {
# header and subheader
'target' : chr(0x00)+chr(0x02),
'target4' : chr(0x02)+chr(0x02),
'origin' : chr(0x00)+chr(0x03),
'origin4' : chr(0x02)+chr(0x03),
'l2info' : chr(0x80)+chr(0x0A),
}
_size_community = {
'target' : 2,
'target4' : 2,
'origin' : 2,
'origin4' : 2,
'l2info' : 4,
}
components = value.split(':')
command = 'target' if len(components) == 2 else components.pop(0)
if command not in _known_community:
raise ValueError('invalid extended community %s (only origin,target or l2info are supported) ' % command)
if len(components) != _size_community[command]:
raise ValueError('invalid extended community %s, expecting %d fields ' % (command,len(components)))
header = _known_community[command]
if command == 'l2info':
# encaps, control, mtu, site
return ExtendedCommunity.unpack(header+pack('!BBHH',*[int(_) for _ in components]))
if command in ('target','origin'):
# global admin, local admin
ga,la = components
if '.' in ga or '.' in la:
gc = ga.count('.')
lc = la.count('.')
if gc == 0 and lc == 3:
# ASN first, IP second
return ExtendedCommunity.unpack(header+pack('!HBBBB',int(ga),*[int(_) for _ in la.split('.')]))
if gc == 3 and lc == 0:
# IP first, ASN second
return ExtendedCommunity.unpack(header+pack('!BBBBH',*[int(_) for _ in ga.split('.')]+[int(la)]))
else:
if command == 'target':
if ga.upper().endswith('L'):
return ExtendedCommunity.unpack(_known_community['target4']+pack('!LH',int(ga[:-1]),int(la)))
else:
return ExtendedCommunity.unpack(header+pack('!HI',int(ga),int(la)))
if command == 'origin':
if ga.upper().endswith('L'):
return ExtendedCommunity.unpack(_known_community['origin4']+pack('!LH',int(ga),int(la)))
else:
return ExtendedCommunity.unpack(header+pack('!IH',int(ga),int(la)))
raise ValueError('invalid extended community %s' % command)
else:
raise ValueError('invalid extended community %s - lc+gc' % value)
from exabgp.bgp.message.update.attribute.community import ExtendedCommunities
# This is the same code as community with a different parser, should be factored
def extended_community (tokeniser):
communities = ExtendedCommunities()
value = tokeniser()
if value == '[':
while True:
value = tokeniser()
if value == ']':
break
communities.add(_extended_community(value))
else:
communities.add(_extended_community(value))
return communities
# ===================================================== Fake Attribute: Watchdog
#
def watchdog (tokeniser):
class Watchdog (str):
ID = Attribute.ID.INTERNAL_WATCHDOG
MULTIPLE = False
watchdog = tokeniser()
if watchdog.lower() in ['announce','withdraw']:
raise ValueError('invalid watchdog name %s' % watchdog)
return Watchdog(watchdog)
# ===================================================== Fake Attribute: Withdraw
#
def withdraw (tokeniser=None):
class Withdrawn (object):
ID = Attribute.ID.INTERNAL_WITHDRAW
MULTIPLE = False
return Withdrawn()
# Qualifiers ===================================================================
#
# =========================================================== RouteDistinguisher
#
from exabgp.bgp.message.update.nlri.qualifier.rd import RouteDistinguisher
def rd (tokeniser):
try:
value = tokeniser()
separator = value.find(':')
if separator > 0:
prefix = value[:separator]
suffix = int(value[separator+1:])
# XXX: FIXME: we need much more checks here instead that the blank try/except...
if '.' in prefix:
bytes = [chr(0),chr(1)]
bytes.extend([chr(int(_)) for _ in prefix.split('.')])
bytes.extend([chr(suffix>>8),chr(suffix&0xFF)])
rd = ''.join(bytes)
else:
number = int(prefix)
if number < pow(2,16) and suffix < pow(2,32):
rd = chr(0) + chr(0) + pack('!H',number) + pack('!L',suffix)
elif number < pow(2,32) and suffix < pow(2,16):
rd = chr(0) + chr(2) + pack('!L',number) + pack('!H',suffix)
else:
raise ValueError('invalid route-distinguisher %s' % value)
except ValueError:
raise ValueError('invalid route-distinguisher %s' % value)
return RouteDistinguisher(rd)
# ============================================================== PathInformation
#
from exabgp.bgp.message.update.nlri.qualifier.path import PathInfo
def path_information (tokeniser):
try:
pi = tokeniser()
if pi.isdigit():
return PathInfo(integer=int(pi))
return PathInfo(ip=pi)
except ValueError:
raise ValueError('invaluid path-information')
# ============================================================== PathInformation
#
from exabgp.bgp.message.update.nlri.qualifier.labels import Labels
def label (tokeniser):
labels = []
value = tokeniser()
try:
if value == '[':
while True:
value = tokeniser()
if value == ']':
break
labels.append(int(value))
else:
labels.append(int(value))
except ValueError:
raise ValueError('invalid label %s' % value)
return Labels(labels)
| {
"content_hash": "3ecdb0603bf36a03d2ef3d76e97d16a9",
"timestamp": "",
"source": "github",
"line_count": 591,
"max_line_length": 144,
"avg_line_length": 26.927241962774957,
"alnum_prop": 0.6291315822546186,
"repo_name": "lochiiconnectivity/exabgp",
"id": "38fb9ee5a6f33b92bef6d862b19d94b828401c6a",
"size": "15932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/exabgp/configuration/engine/parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "1516"
},
{
"name": "Python",
"bytes": "1203596"
},
{
"name": "Shell",
"bytes": "17662"
}
],
"symlink_target": ""
} |
from flask.ext.wtf import Form
from wtforms.fields import StringField, SubmitField, TextField
from wtforms.fields.html5 import EmailField
from wtforms.validators import Email, InputRequired, Length
class ContactForm(Form):
name = StringField(
'Name', validators=[
InputRequired(),
Length(1, 500),
])
email = EmailField(
'Email', validators=[
InputRequired(),
Length(1, 500),
Email(),
])
message = TextField('Message', validators=[InputRequired()])
submit = SubmitField('Submit')
class ContactCategoryForm(Form):
name = StringField(
'Name', validators=[
InputRequired(),
Length(1, 250),
])
submit = SubmitField('Add Category')
class EditCategoryNameForm(Form):
name = TextField(
'Name', validators=[
InputRequired(),
Length(1, 250),
])
submit = SubmitField('Update name')
| {
"content_hash": "ac321cf0be09764d7ccfb1b46bd681cc",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 64,
"avg_line_length": 25.81578947368421,
"alnum_prop": 0.5922528032619776,
"repo_name": "hack4impact/asylum-connect-catalog",
"id": "109ffd75254bf3be50e5410743752fc9946338ed",
"size": "981",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/contact/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "228573"
},
{
"name": "HTML",
"bytes": "348143"
},
{
"name": "JavaScript",
"bytes": "373099"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "184367"
}
],
"symlink_target": ""
} |
class Singleton(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls, *args, **kwargs)
return cls._instance
| {
"content_hash": "1137962806b8abf0ef94e766ee3a28ab",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 73,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.6213592233009708,
"repo_name": "jeanqasaur/jeeves",
"id": "d135fa434ac2ddffa9ff122f5043e1efcdf35e49",
"size": "206",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "util/Singleton.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "9025"
},
{
"name": "Python",
"bytes": "358080"
}
],
"symlink_target": ""
} |
'''Clip all layers in the map to the specified polygon.
Adapted from Alex Tereshenkov, http://gis.stackexchange.com/a/111712/108
'''
import os
import arcpy
arcpy.env.overwriteOutput = True
mxd = arcpy.GetParameterAsText(0)
clip_layer = arcpy.GetParameterAsText(1)
out_gdb = arcpy.GetParameterAsText(2)
if not mxd:
mxd = arcpy.mapping.MapDocument("CURRENT")
else:
mxd = arcpy.mapping.MapDocument(mxd)
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.isBroken:
arcpy.AddMessage('"%s"\t skipping broken layer' % lyr)
continue
elif not lyr.isGroupLayer:
arcpy.AddMessage('"%s"\t Clipping...' % lyr)
out_layer = os.path.join(out_gdb, lyr.name)
if lyr.isFeatureLayer:
arcpy.Clip_analysis(lyr, clip_layer, out_layer)
elif lyr.isRasterLayer:
arcpy.Clip_management(lyr, '#', out_layer, clip_layer, '#', 'ClippingGeometry')
else:
arcpy.AddMessage('"%s" skipping, not a Feature or Raster layer')
else:
if not lyr.isGroupLayer:
arcpy.AddMessage('"%s"\t unknown layer type, dont know what to do with it.' % lyr)
print arcpy.GetMessages()
| {
"content_hash": "50c788ef5a37b9bb146ae77fe1d8fd43",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 94,
"avg_line_length": 33.885714285714286,
"alnum_prop": 0.6500843170320405,
"repo_name": "maphew/arcplus",
"id": "94895a124391ce2fc9418b8334ba6b41f8060c6e",
"size": "1186",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ArcToolbox/Scripts/clip_all_layers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7656"
},
{
"name": "Python",
"bytes": "76084"
},
{
"name": "Shell",
"bytes": "3824"
}
],
"symlink_target": ""
} |
'''This is the urlconf for contact urls.'''
from django.conf.urls import *
from external import views
urlpatterns = patterns('',
url(r'^new/$', views.ContactCreate.as_view(), name="contact-new"),
url(r'^(?P<slug>[\w-]+)/$', views.ContactDetail.as_view(), name="contact-detail"),
url(r'^(?P<slug>[\w-]+)/edit$', views.ContactUpdate.as_view(), name="contact-edit"),
url(r'^(?P<slug>[\w-]+)/delete$', views.ContactDelete.as_view(), name="contact-delete"),
url(r'^$', views.ContactList.as_view(), name="contact-list"),
)
| {
"content_hash": "29efdf43ddbd04beec6b3ff74372ca71",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 40.84615384615385,
"alnum_prop": 0.6459510357815442,
"repo_name": "davebridges/ExperimentDB",
"id": "82dbe8f7dd800b367e1b21fd453f1cf8c001d106",
"size": "531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "external/urls/contact.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "3273"
},
{
"name": "CSS",
"bytes": "171399"
},
{
"name": "JavaScript",
"bytes": "858989"
},
{
"name": "PHP",
"bytes": "21302"
},
{
"name": "Python",
"bytes": "247551"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
} |
""" Jegou S. et al "`The One Hundred Layers Tiramisu:
Fully Convolutional DenseNets for Semantic Segmentation
<https://arxiv.org/abs/1611.09326>`_"
"""
import tensorflow as tf
from . import TFModel
from .densenet import DenseNet
class DenseNetFC(TFModel):
""" DenseNet for semantic segmentation
**Configuration**
inputs : dict
dict with 'images' and 'masks' (see :meth:`~.TFModel._make_inputs`)
body : dict
num_layers : list of int
number of layers in downsampling/upsampling blocks
block : dict
dense block parameters
transition_down : dict
downsampling transition layer parameters
transition_up : dict
upsampling transition layer parameters
"""
@classmethod
def default_config(cls):
""" Define model defaults. See :meth: `~.TFModel.default_config` """
config = TFModel.default_config()
config['common/conv/use_bias'] = False
config['initial_block'] += dict(layout='c', filters=48, kernel_size=3, strides=1)
config['body']['block'] = dict(layout='nacd', dropout_rate=.2, growth_rate=12, bottleneck=False)
config['body']['transition_up'] = dict(layout='t', factor=2, kernel_size=3)
config['body']['transition_down'] = dict(layout='nacdp', kernel_size=1, strides=1,
pool_size=2, pool_strides=2, dropout_rate=.2,
reduction_factor=1)
config['head'].update(dict(layout='c', kernel_size=1))
config['loss'] = 'ce'
return config
def build_config(self, names=None):
""" Define model's architecture configuration. See :meth: `~.TFModel.build_config` """
config = super().build_config(names)
if config.get('head/filters') is None:
config['head/filters'] = self.num_classes('targets')
return config
@classmethod
def body(cls, inputs, name='body', **kwargs):
""" FC DenseNet body
Parameters
----------
inputs : tf.Tensor
input tensor
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body', **kwargs)
num_layers, block = cls.pop(['num_layers', 'block'], kwargs)
trans_up, trans_down = cls.pop(['transition_up', 'transition_down'], kwargs)
block = {**kwargs, **block}
trans_up = {**kwargs, **trans_up}
trans_down = {**kwargs, **trans_down}
with tf.variable_scope(name):
x, inputs = inputs, None
encoder_outputs = []
for i, n_layers in enumerate(num_layers[:-1]):
x = DenseNet.block(x, num_layers=n_layers, name='encoder-%d' % i, **block)
encoder_outputs.append(x)
x = cls.transition_down(x, name='transition_down-%d' % i, **trans_down)
x = DenseNet.block(x, num_layers=num_layers[-1], name='encoder-%d' % len(num_layers), **block)
axis = cls.channels_axis(kwargs.get('data_format'))
for i, n_layers in enumerate(num_layers[-2::-1]):
x = cls.transition_up(x, filters=num_layers[-i-1] * block['growth_rate'],
name='transition_up-%d' % i, **trans_up)
x = DenseNet.block(x, num_layers=n_layers, name='decoder-%d' % i, **block)
x = cls.crop(x, encoder_outputs[-i-1], data_format=kwargs.get('data_format'))
x = tf.concat((x, encoder_outputs[-i-1]), axis=axis)
return x
@classmethod
def transition_down(cls, inputs, name='transition_down', **kwargs):
""" A downsampling interconnect layer between two dense blocks
Parameters
----------
inputs : tf.Tensor
input tensor
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body/transition_down', **kwargs)
return DenseNet.transition_layer(inputs, name=name, **kwargs)
@classmethod
def transition_up(cls, inputs, name='transition_up', **kwargs):
""" An upsampling interconnect layer between two dense blocks
Parameters
----------
inputs : tf.Tensor
input tensor
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body/transition_up', **kwargs)
filters = kwargs.pop('filters', cls.num_channels(inputs, kwargs.get('data_format')))
return cls.upsample(inputs, filters=filters, name=name, **kwargs)
class DenseNetFC56(DenseNetFC):
""" FC DenseNet-56 architecture """
@classmethod
def default_config(cls):
config = DenseNetFC.default_config()
config['body']['num_layers'] = [4] * 6
config['body']['block']['growth_rate'] = 12
return config
class DenseNetFC67(DenseNetFC):
""" FC DenseNet-67 architecture """
@classmethod
def default_config(cls):
config = DenseNetFC.default_config()
config['body']['num_layers'] = [5] * 6
config['body']['block']['growth_rate'] = 16
return config
class DenseNetFC103(DenseNetFC):
""" FC DenseNet-103 architecture """
@classmethod
def default_config(cls):
config = DenseNetFC.default_config()
config['body']['num_layers'] = [4, 5, 7, 10, 12, 15]
config['body']['block']['growth_rate'] = 16
return config
| {
"content_hash": "63258dbb53dc0b48ed75486ccfece260",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 106,
"avg_line_length": 34.44444444444444,
"alnum_prop": 0.5641577060931899,
"repo_name": "analysiscenter/dataset",
"id": "23ad5e665ab2c753767ee6230468341c6f0e37aa",
"size": "5580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batchflow/models/tf/densenet_fc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "711078"
}
],
"symlink_target": ""
} |
from .disposable import Disposable
from .booleandisposable import BooleanDisposable
from .compositedisposable import CompositeDisposable
from .singleassignmentdisposable import SingleAssignmentDisposable
from .serialdisposable import SerialDisposable
from .refcountdisposable import RefCountDisposable
from .scheduleddisposable import ScheduledDisposable | {
"content_hash": "b2fb344b077c08a51e3e4eb7667f7673",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 66,
"avg_line_length": 44.375,
"alnum_prop": 0.9014084507042254,
"repo_name": "dbrattli/RxPY",
"id": "376709a1f2a98e041766b18db6a97a593f74e0ea",
"size": "355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rx/disposables/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1334787"
}
],
"symlink_target": ""
} |
"""
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.core.overrides import array_function_dispatch
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
from numpy.testing import suppress_warnings
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'apply_along_fields', 'assign_fields_by_name',
'drop_fields', 'find_duplicates', 'flatten_descr',
'get_fieldstructure', 'get_names', 'get_names_flat',
'join_by', 'merge_arrays', 'rec_append_fields',
'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
'rename_fields', 'repack_fields', 'require_fields',
'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
]
def _recursive_fill_fields_dispatcher(input, output):
return (input, output)
@array_function_dispatch(_recursive_fill_fields_dispatcher)
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names is not None:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def _get_fieldspec(dtype):
"""
Produce a list of name/dtype pairs corresponding to the dtype fields
Similar to dtype.descr, but the second item of each tuple is a dtype, not a
string. As a result, this handles subarray dtypes
Can be passed to the dtype constructor to reconstruct the dtype, noting that
this (deliberately) discards field offsets.
Examples
--------
>>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
>>> dt.descr
[(('a', 'A'), '<i8'), ('b', '<f8', (3,))]
>>> _get_fieldspec(dt)
[(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]
"""
if dtype.names is None:
# .descr returns a nameless field, so we should too
return [('', dtype)]
else:
fields = ((name, dtype.fields[name]) for name in dtype.names)
# keep any titles, if present
return [
(name if len(f) == 2 else (f[2], name), f[0])
for name, f in fields
]
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int))
Traceback (most recent call last):
...
AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
Traceback (most recent call last):
...
AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names is not None:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames)
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattened beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
Traceback (most recent call last):
...
AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
Traceback (most recent call last):
...
AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names is not None:
listnames.extend(get_names_flat(current))
return tuple(listnames)
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return (('', ndtype),)
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def _zip_dtype(seqarrays, flatten=False):
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
if current.names is not None and len(current.names) == 1:
# special case - dtypes of 1 field are flattened
newdtype.extend(_get_fieldspec(current))
else:
newdtype.append(('', current))
return np.dtype(newdtype)
def _zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
return _zip_dtype(seqarrays, flatten=flatten).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names is not None:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def _izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
if sys.version_info[0] >= 3:
zip_longest = itertools.zip_longest
else:
zip_longest = itertools.izip_longest
for tup in zip_longest(*seqarrays, fillvalue=fill_value):
yield tuple(zipfunc(tup))
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
usemask=None, asrecarray=None):
return seqarrays
@array_function_dispatch(_merge_arrays_dispatcher)
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
array([( 1, 10.), ( 2, 20.), (-1, 30.)],
dtype=[('f0', '<i8'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
... np.array([10., 20., 30.])), usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i8'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
dtype=[('a', '<i8'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
depending on what its corresponding type:
* ``-1`` for integers
* ``-1.0`` for floating point numbers
* ``'-'`` for characters
* ``'-1'`` for strings
* ``True`` for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
# Make sure we have named fields
if seqdtype.names is None:
seqdtype = np.dtype([('', seqdtype)])
if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = _zip_dtype(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(_izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(_izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def _drop_fields_dispatcher(base, drop_names, usemask=None, asrecarray=None):
return (base,)
@array_function_dispatch(_drop_fields_dispatcher)
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
>>> rfn.drop_fields(a, 'a')
array([((2., 3),), ((5., 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)], dtype=[('a', '<i8')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
"""
Return a new array keeping only the fields in `keep_names`,
and preserving the order of those fields.
Parameters
----------
base : array
Input array
keep_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to keep. Order of the names will be preserved.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
"""
newdtype = [(n, base.dtype[n]) for n in keep_names]
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def _rec_drop_fields_dispatcher(base, drop_names):
return (base,)
@array_function_dispatch(_rec_drop_fields_dispatcher)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def _rename_fields_dispatcher(base, namemapper):
return (base,)
@array_function_dispatch(_rename_fields_dispatcher)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names is not None:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def _append_fields_dispatcher(base, names, data, dtypes=None,
fill_value=None, usemask=None, asrecarray=None):
yield base
for d in data:
yield d
@array_function_dispatch(_append_fields_dispatcher)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(
max(len(base), len(data)),
dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
yield base
for d in data:
yield d
@array_function_dispatch(_rec_append_fields_dispatcher)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def _repack_fields_dispatcher(a, align=None, recurse=None):
return (a,)
@array_function_dispatch(_repack_fields_dispatcher)
def repack_fields(a, align=False, recurse=False):
"""
Re-pack the fields of a structured array or dtype in memory.
The memory layout of structured datatypes allows fields at arbitrary
byte offsets. This means the fields can be separated by padding bytes,
their offsets can be non-monotonically increasing, and they can overlap.
This method removes any overlaps and reorders the fields in memory so they
have increasing byte offsets, and adds or removes padding bytes depending
on the `align` option, which behaves like the `align` option to `np.dtype`.
If `align=False`, this method produces a "packed" memory layout in which
each field starts at the byte the previous field ended, and any padding
bytes are removed.
If `align=True`, this methods produces an "aligned" memory layout in which
each field's offset is a multiple of its alignment, and the total itemsize
is a multiple of the largest alignment, by adding padding bytes as needed.
Parameters
----------
a : ndarray or dtype
array or dtype for which to repack the fields.
align : boolean
If true, use an "aligned" memory layout, otherwise use a "packed" layout.
recurse : boolean
If True, also repack nested structures.
Returns
-------
repacked : ndarray or dtype
Copy of `a` with fields repacked, or `a` itself if no repacking was
needed.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
...
>>> dt = np.dtype('u1, <i8, <f8', align=True)
>>> dt
dtype({'names':['f0','f1','f2'], 'formats':['u1','<i8','<f8'], 'offsets':[0,8,16], 'itemsize':24}, align=True)
>>> print_offsets(dt)
offsets: [0, 8, 16]
itemsize: 24
>>> packed_dt = rfn.repack_fields(dt)
>>> packed_dt
dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
>>> print_offsets(packed_dt)
offsets: [0, 1, 9]
itemsize: 17
"""
if not isinstance(a, np.dtype):
dt = repack_fields(a.dtype, align=align, recurse=recurse)
return a.astype(dt, copy=False)
if a.names is None:
return a
fieldinfo = []
for name in a.names:
tup = a.fields[name]
if recurse:
fmt = repack_fields(tup[0], align=align, recurse=True)
else:
fmt = tup[0]
if len(tup) == 3:
name = (tup[2], name)
fieldinfo.append((name, fmt))
dt = np.dtype(fieldinfo, align=align)
return np.dtype((a.type, dt))
def _get_fields_and_offsets(dt, offset=0):
"""
Returns a flat list of (dtype, count, offset) tuples of all the
scalar fields in the dtype "dt", including nested fields, in left
to right order.
"""
# counts up elements in subarrays, including nested subarrays, and returns
# base dtype and count
def count_elem(dt):
count = 1
while dt.shape != ():
for size in dt.shape:
count *= size
dt = dt.base
return dt, count
fields = []
for name in dt.names:
field = dt.fields[name]
f_dt, f_offset = field[0], field[1]
f_dt, n = count_elem(f_dt)
if f_dt.names is None:
fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
else:
subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
size = f_dt.itemsize
for i in range(n):
if i == 0:
# optimization: avoid list comprehension if no subarray
fields.extend(subfields)
else:
fields.extend([(d, c, o + i*size) for d, c, o in subfields])
return fields
def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
casting=None):
return (arr,)
@array_function_dispatch(_structured_to_unstructured_dispatcher)
def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
"""
Converts and n-D structured array into an (n+1)-D unstructured array.
The new array will have a new last dimension equal in size to the
number of field-elements of the input array. If not supplied, the output
datatype is determined from the numpy type promotion rules applied to all
the field datatypes.
Nested fields, as well as each element of any subarray fields, all count
as a single field-elements.
Parameters
----------
arr : ndarray
Structured array or dtype to convert. Cannot contain object datatype.
dtype : dtype, optional
The dtype of the output unstructured array.
copy : bool, optional
See copy argument to `ndarray.astype`. If true, always return a copy.
If false, and `dtype` requirements are satisfied, a view is returned.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
See casting argument of `ndarray.astype`. Controls what kind of data
casting may occur.
Returns
-------
unstructured : ndarray
Unstructured array with one more dimension.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a
array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
>>> rfn.structured_to_unstructured(a)
array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
>>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
>>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
array([ 3. , 5.5, 9. , 11. ])
"""
if arr.dtype.names is None:
raise ValueError('arr must be a structured array')
fields = _get_fields_and_offsets(arr.dtype)
n_fields = len(fields)
if n_fields == 0 and dtype is None:
raise ValueError("arr has no fields. Unable to guess dtype")
elif n_fields == 0:
# too many bugs elsewhere for this to work now
raise NotImplementedError("arr with no fields is not supported")
dts, counts, offsets = zip(*fields)
names = ['f{}'.format(n) for n in range(n_fields)]
if dtype is None:
out_dtype = np.result_type(*[dt.base for dt in dts])
else:
out_dtype = dtype
# Use a series of views and casts to convert to an unstructured array:
# first view using flattened fields (doesn't work for object arrays)
# Note: dts may include a shape for subarrays
flattened_fields = np.dtype({'names': names,
'formats': dts,
'offsets': offsets,
'itemsize': arr.dtype.itemsize})
with suppress_warnings() as sup: # until 1.16 (gh-12447)
sup.filter(FutureWarning, "Numpy has detected")
arr = arr.view(flattened_fields)
# next cast to a packed format with all fields converted to new dtype
packed_fields = np.dtype({'names': names,
'formats': [(out_dtype, dt.shape) for dt in dts]})
arr = arr.astype(packed_fields, copy=copy, casting=casting)
# finally is it safe to view the packed fields as the unstructured type
return arr.view((out_dtype, (sum(counts),)))
def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
align=None, copy=None, casting=None):
return (arr,)
@array_function_dispatch(_unstructured_to_structured_dispatcher)
def unstructured_to_structured(arr, dtype=None, names=None, align=False,
copy=False, casting='unsafe'):
"""
Converts and n-D unstructured array into an (n-1)-D structured array.
The last dimension of the input array is converted into a structure, with
number of field-elements equal to the size of the last dimension of the
input array. By default all output fields have the input array's dtype, but
an output structured dtype with an equal number of fields-elements can be
supplied instead.
Nested fields, as well as each element of any subarray fields, all count
towards the number of field-elements.
Parameters
----------
arr : ndarray
Unstructured array or dtype to convert.
dtype : dtype, optional
The structured dtype of the output array
names : list of strings, optional
If dtype is not supplied, this specifies the field names for the output
dtype, in order. The field dtypes will be the same as the input array.
align : boolean, optional
Whether to create an aligned memory layout.
copy : bool, optional
See copy argument to `ndarray.astype`. If true, always return a copy.
If false, and `dtype` requirements are satisfied, a view is returned.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
See casting argument of `ndarray.astype`. Controls what kind of data
casting may occur.
Returns
-------
structured : ndarray
Structured array with fewer dimensions.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a = np.arange(20).reshape((4,5))
>>> a
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
>>> rfn.unstructured_to_structured(a, dt)
array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
"""
if arr.shape == ():
raise ValueError('arr must have at least one dimension')
n_elem = arr.shape[-1]
if n_elem == 0:
# too many bugs elsewhere for this to work now
raise NotImplementedError("last axis with size 0 is not supported")
if dtype is None:
if names is None:
names = ['f{}'.format(n) for n in range(n_elem)]
out_dtype = np.dtype([(n, arr.dtype) for n in names], align=align)
fields = _get_fields_and_offsets(out_dtype)
dts, counts, offsets = zip(*fields)
else:
if names is not None:
raise ValueError("don't supply both dtype and names")
# sanity check of the input dtype
fields = _get_fields_and_offsets(dtype)
if len(fields) == 0:
dts, counts, offsets = [], [], []
else:
dts, counts, offsets = zip(*fields)
if n_elem != sum(counts):
raise ValueError('The length of the last dimension of arr must '
'be equal to the number of fields in dtype')
out_dtype = dtype
if align and not out_dtype.isalignedstruct:
raise ValueError("align was True but dtype is not aligned")
names = ['f{}'.format(n) for n in range(len(fields))]
# Use a series of views and casts to convert to a structured array:
# first view as a packed structured array of one dtype
packed_fields = np.dtype({'names': names,
'formats': [(arr.dtype, dt.shape) for dt in dts]})
arr = np.ascontiguousarray(arr).view(packed_fields)
# next cast to an unpacked but flattened format with varied dtypes
flattened_fields = np.dtype({'names': names,
'formats': dts,
'offsets': offsets,
'itemsize': out_dtype.itemsize})
arr = arr.astype(flattened_fields, copy=copy, casting=casting)
# finally view as the final nested dtype and remove the last axis
return arr.view(out_dtype)[..., 0]
def _apply_along_fields_dispatcher(func, arr):
return (arr,)
@array_function_dispatch(_apply_along_fields_dispatcher)
def apply_along_fields(func, arr):
"""
Apply function 'func' as a reduction across fields of a structured array.
This is similar to `apply_along_axis`, but treats the fields of a
structured array as an extra axis. The fields are all first cast to a
common type following the type-promotion rules from `numpy.result_type`
applied to the field's dtypes.
Parameters
----------
func : function
Function to apply on the "field" dimension. This function must
support an `axis` argument, like np.mean, np.sum, etc.
arr : ndarray
Structured array for which to apply func.
Returns
-------
out : ndarray
Result of the recution operation
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
>>> rfn.apply_along_fields(np.mean, b)
array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
>>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
array([ 3. , 5.5, 9. , 11. ])
"""
if arr.dtype.names is None:
raise ValueError('arr must be a structured array')
uarr = structured_to_unstructured(arr)
return func(uarr, axis=-1)
# works and avoids axis requirement, but very, very slow:
#return np.apply_along_axis(func, -1, uarr)
def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
return dst, src
@array_function_dispatch(_assign_fields_by_name_dispatcher)
def assign_fields_by_name(dst, src, zero_unassigned=True):
"""
Assigns values from one structured array to another by field name.
Normally in numpy >= 1.14, assignment of one structured array to another
copies fields "by position", meaning that the first field from the src is
copied to the first field of the dst, and so on, regardless of field name.
This function instead copies "by field name", such that fields in the dst
are assigned from the identically named field in the src. This applies
recursively for nested structures. This is how structure assignment worked
in numpy >= 1.6 to <= 1.13.
Parameters
----------
dst : ndarray
src : ndarray
The source and destination arrays during assignment.
zero_unassigned : bool, optional
If True, fields in the dst for which there was no matching
field in the src are filled with the value 0 (zero). This
was the behavior of numpy <= 1.13. If False, those fields
are not modified.
"""
if dst.dtype.names is None:
dst[...] = src
return
for name in dst.dtype.names:
if name not in src.dtype.names:
if zero_unassigned:
dst[name] = 0
else:
assign_fields_by_name(dst[name], src[name],
zero_unassigned)
def _require_fields_dispatcher(array, required_dtype):
return (array,)
@array_function_dispatch(_require_fields_dispatcher)
def require_fields(array, required_dtype):
"""
Casts a structured array to a new dtype using assignment by field-name.
This function assigns from the old to the new array by name, so the
value of a field in the output array is the value of the field with the
same name in the source array. This has the effect of creating a new
ndarray containing only the fields "required" by the required_dtype.
If a field name in the required_dtype does not exist in the
input array, that field is created and set to 0 in the output array.
Parameters
----------
a : ndarray
array to cast
required_dtype : dtype
datatype for output array
Returns
-------
out : ndarray
array with the new dtype, with field values copied from the fields in
the input array with the same name
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
>>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
array([(1., 1), (1., 1), (1., 1), (1., 1)],
dtype=[('b', '<f4'), ('c', 'u1')])
>>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
array([(1., 0), (1., 0), (1., 0), (1., 0)],
dtype=[('b', '<f4'), ('newf', 'u1')])
"""
out = np.empty(array.shape, dtype=required_dtype)
assign_fields_by_name(out, array)
return out
def _stack_arrays_dispatcher(arrays, defaults=None, usemask=None,
asrecarray=None, autoconvert=None):
return arrays
@array_function_dispatch(_stack_arrays_dispatcher)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
(b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
mask=[(False, False, True), (False, False, True),
(False, False, False), (False, False, False),
(False, False, False)],
fill_value=(b'N/A', 1.e+20, 1.e+20),
dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = _get_fieldspec(dtype_l)
names = [n for n, d in newdescr]
for dtype_n in ndtype[1:]:
for fname, fdtype in _get_fieldspec(dtype_n):
if fname not in names:
newdescr.append((fname, fdtype))
names.append(fname)
else:
nameidx = names.index(fname)
_, cdtype = newdescr[nameidx]
if autoconvert:
newdescr[nameidx] = (fname, max(fdtype, cdtype))
elif fdtype != cdtype:
raise TypeError("Incompatible type '%s' <> '%s'" %
(cdtype, fdtype))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def _find_duplicates_dispatcher(
a, key=None, ignoremask=None, return_index=None):
return (a,)
@array_function_dispatch(_find_duplicates_dispatcher)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
(masked_array(data=[(1,), (1,), (2,), (2,)],
mask=[(False,), (False,), (False,), (False,)],
fill_value=(999999,),
dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def _join_by_dispatcher(
key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
defaults=None, usemask=None, asrecarray=None):
return (r1, r2)
@array_function_dispatch(_join_by_dispatcher)
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
if len(set(key)) != len(key):
dup = next(x for n,x in enumerate(key) if x in key[n+1:])
raise ValueError("duplicate join key %r" % dup)
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %r' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %r' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
collisions = (set(r1names) & set(r2names)) - set(key)
if collisions and not (r1postfix or r2postfix):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't both be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
# (use order of keys in `r1` for back-compatibility)
key1 = [ n for n in r1names if n in key ]
r1k = _keep_fields(r1, key1)
r2k = _keep_fields(r2, key1)
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = _get_fieldspec(r1k.dtype)
# Add the fields from r1
for fname, fdtype in _get_fieldspec(r1.dtype):
if fname not in key:
ndtype.append((fname, fdtype))
# Add the fields from r2
for fname, fdtype in _get_fieldspec(r2.dtype):
# Have we seen the current name already ?
# we need to rebuild this list every time
names = list(name for name, dtype in ndtype)
try:
nameidx = names.index(fname)
except ValueError:
#... we haven't: just add the description to the current list
ndtype.append((fname, fdtype))
else:
# collision
_, cdtype = ndtype[nameidx]
if fname in key:
# The current field is part of the key: take the largest dtype
ndtype[nameidx] = (fname, max(fdtype, cdtype))
else:
# The current field is not part of the key: add the suffixes,
# and place the new field adjacent to the old one
ndtype[nameidx:nameidx + 1] = [
(fname + r1postfix, cdtype),
(fname + r2postfix, fdtype)
]
# Rebuild a dtype from the new fields
ndtype = np.dtype(ndtype)
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def _rec_join_dispatcher(
key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
defaults=None):
return (r1, r2)
@array_function_dispatch(_rec_join_dispatcher)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| {
"content_hash": "0d0f3de645753d0a11766f0f7321480a",
"timestamp": "",
"source": "github",
"line_count": 1608,
"max_line_length": 114,
"avg_line_length": 35.274253731343286,
"alnum_prop": 0.5826413497646374,
"repo_name": "kushalbhola/MyStuff",
"id": "40060b41a7dc2f6c213923e721b9144a4c229843",
"size": "56721",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Practice/PythonApplication/env/Lib/site-packages/numpy/lib/recfunctions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1330"
},
{
"name": "C#",
"bytes": "332967"
},
{
"name": "CSS",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "7539"
},
{
"name": "Java",
"bytes": "14860"
},
{
"name": "JavaScript",
"bytes": "9843"
},
{
"name": "Jupyter Notebook",
"bytes": "374013"
},
{
"name": "PowerShell",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "6511820"
},
{
"name": "Tcl",
"bytes": "24289"
},
{
"name": "TypeScript",
"bytes": "15697"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
from scipy import stats
import math
def reduce_to_first_significant_digit(quantity, uncertainty):
first_significant_digit = math.floor(math.log10(abs(uncertainty)))
quantity = round(quantity, -first_significant_digit)
uncertainty = round(uncertainty, -first_significant_digit)
return quantity, uncertainty
# Input experimental data and output csv file
path_to_experimental_results = "pKa_results_of_replicate_experiments.csv"
path_to_experimental_pKa_values = "pKa_experimental_values.csv"
# Read experimental results with 3 replicate measurements
df_exp_results = pd.read_csv(path_to_experimental_results)
# Create new dataframe to store pKa value statistics
df_exp_pKa = pd.DataFrame()
df_exp_pKa["Molecule ID"] = np.NaN
df_exp_pKa["pKa1 mean"] = np.NaN
df_exp_pKa["pKa1 SEM"] = np.NaN
df_exp_pKa["pKa2 mean"] = np.NaN
df_exp_pKa["pKa2 SEM"] = np.NaN
df_exp_pKa["pKa3 mean"] = np.NaN
df_exp_pKa["pKa3 SEM"] = np.NaN
df_exp_pKa["Assay Type"] = np.NaN
df_exp_pKa["Experimental Molecule ID"] = np.NaN
df_exp_pKa["canonical isomeric SMILES"] = np.NaN
# Iterate over every 3rd experiment to get molecule IDs
index_range = np.arange(0,df_exp_results.shape[0],3,dtype=int)
for i in index_range:
molecule_ID = df_exp_results.loc[i,"Molecule ID"]
assay_type = df_exp_results.loc[i,"Assay Type"]
exp_molecule_ID = df_exp_results.loc[i,"Experimental Molecule ID"]
smiles = df_exp_results.loc[i,"canonical isomeric SMILES"]
s = pd.Series([molecule_ID, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, assay_type, exp_molecule_ID, smiles], index = df_exp_pKa.columns)
df_exp_pKa = df_exp_pKa.append(s, ignore_index=True)
# Calculate mean and SEM for pKa values of each molecule
for i, row in enumerate(df_exp_pKa.iterrows()):
molecule_ID = row[1]["Molecule ID"]
pKa1_SEM = np.NaN
pKa2_SEM = np.NaN
pKa3_SEM = np.NaN
# Parse pKa values of each replicate experiment for each molecule ID
df_exp_result = df_exp_results.loc[df_exp_results["Molecule ID"] == molecule_ID]
pKa1_array = df_exp_result["pKa1"]
pKa2_array = df_exp_result["pKa2"]
pKa3_array = df_exp_result["pKa3"]
# Calculate mean of 3 replicates format(a, '.2f')
pKa1_mean = float(format(np.mean(pKa1_array), '.2f'))
pKa2_mean = float(format(np.mean(pKa2_array), '.2f'))
pKa3_mean = float(format(np.mean(pKa3_array), '.2f'))
#pKa2_mean = np.mean(pKa2_array)
#pKa3_mean = np.mean(pKa3_array)
# Calculate standard error of the mean (SEM)
# ddof=0 provides a maximum likelihood estimate of the variance for normally distributed variables
pKa1_SEM = stats.sem(pKa1_array, ddof = 0)
pKa2_SEM = stats.sem(pKa2_array, ddof = 0)
pKa3_SEM = stats.sem(pKa3_array, ddof = 0)
#print(molecule_ID,pKa1_SEM)
# Reduce SEM values to 1st significat digit
# Since pKa experimental data was reported in 2 decimal points,
# SEM will be reported as 0.01 if calculated SEM value from 3 replicates is lower than 0.01.
minimum_SEM = float(0.01)
if pKa1_SEM == 0:
pKa1_SEM = minimum_SEM
elif (np.isnan(pKa1_SEM) == False):
pKa1_SEM = max(minimum_SEM, reduce_to_first_significant_digit(pKa1_mean, pKa1_SEM)[1])
if pKa2_SEM == 0:
pKa2_SEM = minimum_SEM
elif np.isnan(pKa2_SEM) == False:
pKa2_SEM = max(minimum_SEM, reduce_to_first_significant_digit(pKa2_mean, pKa2_SEM)[1])
if pKa3_SEM == 0:
pKa3_SEM = minimum_SEM
elif np.isnan(pKa3_SEM) == False:
pKa3_SEM = max(minimum_SEM, reduce_to_first_significant_digit(pKa3_mean, pKa3_SEM)[1])
# Write mean and SEM values to df_exp_pKa dataframe
df_exp_pKa.loc[i, "pKa1 mean"] = str(format(pKa1_mean, '.2f'))
df_exp_pKa.loc[i, "pKa2 mean"] = str(format(pKa2_mean, '.2f'))
df_exp_pKa.loc[i, "pKa3 mean"] = str(format(pKa3_mean, '.2f'))
df_exp_pKa.loc[i, "pKa1 SEM"] = str(format(pKa1_SEM, '.2f'))
df_exp_pKa.loc[i, "pKa2 SEM"] = str(format(pKa2_SEM, '.2f'))
df_exp_pKa.loc[i, "pKa3 SEM"] = str(format(pKa3_SEM, '.2f'))
# Replace "nan" strings with empty cells in the dataframe.
for i,row in enumerate(df_exp_pKa.iterrows()):
pKa1_mean = row[1]["pKa1 mean"]
pKa1_SEM = row[1]["pKa1 SEM"]
pKa2_mean = row[1]["pKa2 mean"]
pKa2_SEM = row[1]["pKa2 SEM"]
pKa3_mean = row[1]["pKa3 mean"]
pKa3_SEM = row[1]["pKa3 SEM"]
if pKa1_mean == "nan":
pKa1_mean = ""
if pKa1_SEM == "nan":
pKa1_SEM = ""
if pKa2_mean == "nan":
pKa2_mean = ""
if pKa2_SEM == "nan":
pKa2_SEM = ""
if pKa3_mean == "nan":
pKa3_mean = ""
if pKa3_SEM == "nan":
pKa3_SEM = ""
df_exp_pKa.loc[i, "pKa1 mean"] = pKa1_mean
df_exp_pKa.loc[i, "pKa1 SEM"] = pKa1_SEM
df_exp_pKa.loc[i, "pKa2 mean"] = pKa2_mean
df_exp_pKa.loc[i, "pKa2 SEM"] = pKa2_SEM
df_exp_pKa.loc[i, "pKa3 mean"] = pKa3_mean
df_exp_pKa.loc[i, "pKa3 SEM"] = pKa3_SEM
# Save pKa mean and SEM values in a CSV file.
df_exp_pKa.to_csv(path_to_experimental_pKa_values, index=False)
print("Done.")
| {
"content_hash": "f28279703730db7696e41f01dc333200",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 145,
"avg_line_length": 37.8235294117647,
"alnum_prop": 0.6467729393468118,
"repo_name": "MobleyLab/SAMPL6",
"id": "c864d37dcb72112d0be6546a5d527f8854619825",
"size": "5267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "physical_properties/pKa/experimental_data/calc_pKa_value_statistics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "17449"
},
{
"name": "Python",
"bytes": "764981"
},
{
"name": "Rich Text Format",
"bytes": "13630"
},
{
"name": "Shell",
"bytes": "3163"
},
{
"name": "TeX",
"bytes": "81074"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
pa = pytest.importorskip("pyarrow", minversion="0.17.0")
from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask
arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES]
arrays += [pd.array([0.1, 0.2, 0.3, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES]
arrays += [pd.array([True, False, True, None], dtype="boolean")]
@pytest.fixture(params=arrays, ids=[a.dtype.name for a in arrays])
def data(request):
return request.param
def test_arrow_array(data):
arr = pa.array(data)
expected = pa.array(
data.to_numpy(object, na_value=None),
type=pa.from_numpy_dtype(data.dtype.numpy_dtype),
)
assert arr.equals(expected)
@td.skip_if_no("pyarrow")
def test_arrow_roundtrip(data):
df = pd.DataFrame({"a": data})
table = pa.table(df)
assert table.field("a").type == str(data.dtype.numpy_dtype)
result = table.to_pandas()
assert result["a"].dtype == data.dtype
tm.assert_frame_equal(result, df)
@td.skip_if_no("pyarrow")
def test_arrow_load_from_zero_chunks(data):
# GH-41040
df = pd.DataFrame({"a": data[0:0]})
table = pa.table(df)
assert table.field("a").type == str(data.dtype.numpy_dtype)
table = pa.table(
[pa.chunked_array([], type=table.field("a").type)], schema=table.schema
)
result = table.to_pandas()
assert result["a"].dtype == data.dtype
tm.assert_frame_equal(result, df)
@td.skip_if_no("pyarrow")
def test_arrow_from_arrow_uint():
# https://github.com/pandas-dev/pandas/issues/31896
# possible mismatch in types
dtype = pd.UInt32Dtype()
result = dtype.__from_arrow__(pa.array([1, 2, 3, 4, None], type="int64"))
expected = pd.array([1, 2, 3, 4, None], dtype="UInt32")
tm.assert_extension_array_equal(result, expected)
@td.skip_if_no("pyarrow")
def test_arrow_sliced(data):
# https://github.com/pandas-dev/pandas/issues/38525
df = pd.DataFrame({"a": data})
table = pa.table(df)
result = table.slice(2, None).to_pandas()
expected = df.iloc[2:].reset_index(drop=True)
tm.assert_frame_equal(result, expected)
# no missing values
df2 = df.fillna(data[0])
table = pa.table(df2)
result = table.slice(2, None).to_pandas()
expected = df2.iloc[2:].reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def np_dtype_to_arrays(any_real_dtype):
np_dtype = np.dtype(any_real_dtype)
pa_type = pa.from_numpy_dtype(np_dtype)
# None ensures the creation of a bitmask buffer.
pa_array = pa.array([0, 1, 2, None], type=pa_type)
# Since masked Arrow buffer slots are not required to contain a specific
# value, assert only the first three values of the created np.array
np_expected = np.array([0, 1, 2], dtype=np_dtype)
mask_expected = np.array([True, True, True, False])
return np_dtype, pa_array, np_expected, mask_expected
def test_pyarrow_array_to_numpy_and_mask(np_dtype_to_arrays):
"""
Test conversion from pyarrow array to numpy array.
Modifies the pyarrow buffer to contain padding and offset, which are
considered valid buffers by pyarrow.
Also tests empty pyarrow arrays with non empty buffers.
See https://github.com/pandas-dev/pandas/issues/40896
"""
np_dtype, pa_array, np_expected, mask_expected = np_dtype_to_arrays
data, mask = pyarrow_array_to_numpy_and_mask(pa_array, np_dtype)
tm.assert_numpy_array_equal(data[:3], np_expected)
tm.assert_numpy_array_equal(mask, mask_expected)
mask_buffer = pa_array.buffers()[0]
data_buffer = pa_array.buffers()[1]
data_buffer_bytes = pa_array.buffers()[1].to_pybytes()
# Add trailing padding to the buffer.
data_buffer_trail = pa.py_buffer(data_buffer_bytes + b"\x00")
pa_array_trail = pa.Array.from_buffers(
type=pa_array.type,
length=len(pa_array),
buffers=[mask_buffer, data_buffer_trail],
offset=pa_array.offset,
)
pa_array_trail.validate()
data, mask = pyarrow_array_to_numpy_and_mask(pa_array_trail, np_dtype)
tm.assert_numpy_array_equal(data[:3], np_expected)
tm.assert_numpy_array_equal(mask, mask_expected)
# Add offset to the buffer.
offset = b"\x00" * (pa_array.type.bit_width // 8)
data_buffer_offset = pa.py_buffer(offset + data_buffer_bytes)
mask_buffer_offset = pa.py_buffer(b"\x0E")
pa_array_offset = pa.Array.from_buffers(
type=pa_array.type,
length=len(pa_array),
buffers=[mask_buffer_offset, data_buffer_offset],
offset=pa_array.offset + 1,
)
pa_array_offset.validate()
data, mask = pyarrow_array_to_numpy_and_mask(pa_array_offset, np_dtype)
tm.assert_numpy_array_equal(data[:3], np_expected)
tm.assert_numpy_array_equal(mask, mask_expected)
# Empty array
np_expected_empty = np.array([], dtype=np_dtype)
mask_expected_empty = np.array([], dtype=np.bool_)
pa_array_offset = pa.Array.from_buffers(
type=pa_array.type,
length=0,
buffers=[mask_buffer, data_buffer],
offset=pa_array.offset,
)
pa_array_offset.validate()
data, mask = pyarrow_array_to_numpy_and_mask(pa_array_offset, np_dtype)
tm.assert_numpy_array_equal(data[:3], np_expected_empty)
tm.assert_numpy_array_equal(mask, mask_expected_empty)
@td.skip_if_no("pyarrow")
def test_from_arrow_type_error(request, data):
# ensure that __from_arrow__ returns a TypeError when getting a wrong
# array type
if data.dtype != "boolean":
# TODO numeric dtypes cast any incoming array to the correct dtype
# instead of erroring
request.node.add_marker(
pytest.mark.xfail(raises=None, reason="numeric dtypes don't error but cast")
)
arr = pa.array(data).cast("string")
with pytest.raises(TypeError, match=None):
# we don't test the exact error message, only the fact that it raises
# a TypeError is relevant
data.dtype.__from_arrow__(arr)
| {
"content_hash": "c7518734590072a76310df5bbe8bb155",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 88,
"avg_line_length": 34.273743016759774,
"alnum_prop": 0.6616136919315403,
"repo_name": "gfyoung/pandas",
"id": "9f755412dbf39f25af3b8854279d93b4f1393aa7",
"size": "6135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/arrays/masked/test_arrow_compat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4912"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14336547"
},
{
"name": "Shell",
"bytes": "29174"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
} |
from model.State import State
from model.Turbine import Turbine
from model.Condensor import Condensor
from model.Reheater import Reheater
from model.Pump import Pump
from model.OFeedWater import OFeedWater
import util.plotting as plot
class OFWH(object):
def __init__(self, t0, p0, p_1, t_1, p_2, eta_t, eta_p, p_cond, TL, TH):
self.one = State('Water', P=p_1, T=t_1)
self.one.define()
self.two = State('Water', P=p_2)
self.turb_one = Turbine(self.one, self.two)
self.turb_one.isentropic(eta_t)
self.turb_one.exergyBalance(p0, t0)
self.three = State('Water', P=p_cond)
self.turb_two = Turbine(self.two, self.three)
self.turb_two.isentropic(eta_t)
self.four = State('Water', P=p_cond)
self.condensor = Condensor(p_cond, self.three, self.four)
self.five = State('Water', P=p_2)
self.pump_one = Pump(self.four, self.five)
self.pump_one.isentropic(eta_p)
self.six = State('Water', P=p_2, Q=0)
if self.six.define():
y = ((self.six.properties['h']-self.five.properties['h'])
/(self.two.properties['h']-self.five.properties['h']))
else:
print 'Failed to define state 6'
self.y = y
self.seven = State('Water', P=p_1)
self.pump_two = Pump(self.six, self.seven)
self.pump_two.isentropic(eta_p)
self.pump_two.exergyBalance(t0, p0)
self.turb_two.exergyBalanceY(t0, p0, y)
self.pump_one.exergyBalanceY(t0, p0, y)
self.superHeater = Reheater(self.seven, self.one)
self.eta = (sum([self.turb_one.w, self.turb_two.w, self.pump_one.w, self.pump_two.w])/
sum([self.superHeater.q]))
self.E = self.eta*(1/(1-float(TL)/float(TH)))
self.ofwh = OFeedWater([self.two, self.five], [y, (1-y)], self.six, [1], t0, p0)
def ofwh(p_2):
TL = 300
TH = 650
p_1 = 100*10**5 # Pa
t_1 = 620 # K
p_cond = 10*10**3 # Pa
eta_t = 0.9
eta_p = 0.95
t0 = 300 # K
p0 = 100*10**3 # P
return OFWH(t0, p0, p_1, t_1, p_2, eta_t, eta_p, p_cond, TL, TH)
if __name__ == '__main__':
pLow = 10*10**3
pHigh = 100*10**5
results = []
int_p = []
for x in range(pLow+1, pHigh-1, 10000):
int_p.append(x/1000)
results.append(ofwh(x))
thermal = [res.eta for res in results]
exergetic = [res.E for res in results]
idx = thermal.index(max(thermal))
print 'Max Thermal Efficiency of {} with an Intermediate pressure of {} kPa'.format(
max(thermal), int_p[thermal.index(max(thermal))])
print 'Max Exergetic Efficiency of {} with an Intermediate pressure of {} kPa'.format(
max(exergetic), int_p[exergetic.index(max(exergetic))])
print 'Turbine one: {}'.format(results[idx].turb_one.ef)
print 'Turbine two: {}'.format(results[idx].turb_two.ef)
print 'Pump One: {}'.format(results[idx].pump_one.ef)
print 'Pump Two: {}'.format(results[idx].pump_two.ef)
print 'Y {}'.format(results[idx].y)
print 'CFWH: {}'.format(results[idx].ofwh.ef)
plot.plotData('Thermal Efficiencies of a OFWH Cycle', 'Intermediate Pressure (kPa)',
'Thermal Efficiency', [int_p, thermal])
plot.plotData('Exergetic Efficiencies of a OFWH Cycle', 'Intermediate Pressure (kPa)',
'Exergetic Efficiency', [int_p, exergetic])
plot.plotComponent('OFWH Turbine One', [res.turb_one for res in results], int_p)
plot.plotComponent('OFWH Turbine Two', [res.turb_two for res in results], int_p)
plot.plotComponent('OFWH Pump One', [res.pump_one for res in results], int_p)
plot.plotComponent('OFWH Pump Two', [res.pump_two for res in results], int_p)
plot.plotComponent('OFWH Feed Water', [res.ofwh for res in results], int_p) | {
"content_hash": "8582e65c2a726f05aa3c95ec1d20608e",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 94,
"avg_line_length": 41.24731182795699,
"alnum_prop": 0.605318039624609,
"repo_name": "BrandonTheBuilder/thermawesome",
"id": "cd1db80fdafb36d0bcdf54bd335f38054df10ba8",
"size": "3858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OFWH.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25600"
}
],
"symlink_target": ""
} |
"""Tests for perfkitbenchmarker.benchmark_spec."""
import unittest
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import context
from perfkitbenchmarker import flags
from perfkitbenchmarker import os_types
from perfkitbenchmarker import pkb
from perfkitbenchmarker import providers
from perfkitbenchmarker import spark_service
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.providers.aws import aws_emr
from perfkitbenchmarker.providers.gcp import gcp_dataproc
from tests import mock_flags
FLAGS = flags.FLAGS
NAME = 'name'
UID = 'name0'
DATAPROC_CONFIG = """
name:
spark_service:
service_type: managed
machine_type: n1-standard-4
num_workers: 4
"""
EMR_CONFIG = """
name:
spark_service:
service_type: managed
machine_type: m1.large
num_workers: 4
"""
PKB_MANAGED_CONFIG = """
name:
spark_service:
service_type: pkb_managed
num_workers: 5
"""
class _BenchmarkSpecTestCase(unittest.TestCase):
def setUp(self):
self._mocked_flags = mock_flags.MockFlags()
self._mocked_flags.cloud = providers.GCP
self._mocked_flags.os_type = os_types.DEBIAN
self.addCleanup(context.SetThreadBenchmarkSpec, None)
def _CreateBenchmarkSpecFromYaml(self, yaml_string, benchmark_name=NAME):
config = configs.LoadConfig(yaml_string, {}, benchmark_name)
return self._CreateBenchmarkSpecFromConfigDict(config, benchmark_name)
def _CreateBenchmarkSpecFromConfigDict(self, config_dict, benchmark_name):
config_spec = benchmark_config_spec.BenchmarkConfigSpec(
benchmark_name, flag_values=self._mocked_flags, **config_dict)
return benchmark_spec.BenchmarkSpec(config_spec, benchmark_name, UID)
class ConstructSparkServiceTestCase(_BenchmarkSpecTestCase):
def setUp(self):
super(ConstructSparkServiceTestCase, self).setUp()
pkb._InitializeRunUri()
def testDataprocConfig(self):
spec = self._CreateBenchmarkSpecFromYaml(DATAPROC_CONFIG)
spec.ConstructVirtualMachines()
spec.ConstructSparkService()
self.assertTrue(hasattr(spec, 'spark_service'))
self.assertTrue(spec.spark_service is not None)
self.assertEqual(len(spec.vms), 0)
self.assertEqual(spec.config.spark_service.num_workers, 4,
str(spec.config.spark_service.__dict__))
self.assertEqual(spec.config.spark_service.service_type,
spark_service.PROVIDER_MANAGED)
self.assertEqual(spec.config.spark_service.machine_type, 'n1-standard-4',
str(spec.config.spark_service.__dict__))
self.assertTrue(isinstance(spec.spark_service,
gcp_dataproc.GcpDataproc))
def testEMRConfig(self):
self._mocked_flags.cloud = providers.AWS
self._mocked_flags.zones = ['us-west-2']
with mock_flags.PatchFlags(self._mocked_flags):
spec = self._CreateBenchmarkSpecFromYaml(EMR_CONFIG)
spec.ConstructVirtualMachines()
spec.ConstructSparkService()
self.assertTrue(hasattr(spec, 'spark_service'))
self.assertTrue(spec.spark_service is not None)
self.assertEqual(len(spec.vms), 0)
self.assertEqual(spec.config.spark_service.num_workers, 4,
str(spec.config.spark_service.__dict__))
self.assertEqual(spec.config.spark_service.service_type,
spark_service.PROVIDER_MANAGED)
self.assertEqual(spec.config.spark_service.machine_type, 'm1.large',
str(spec.config.spark_service.__dict__))
self.assertTrue(isinstance(spec.spark_service,
aws_emr.AwsEMR))
def testPkbManaged(self):
spec = self._CreateBenchmarkSpecFromYaml(PKB_MANAGED_CONFIG)
self.assertEqual(spec.config.spark_service.num_workers, 5,
str(spec.config.spark_service.__dict__))
self.assertEqual(spec.config.spark_service.service_type,
spark_service.PKB_MANAGED)
spec.ConstructVirtualMachines()
self.assertEqual(len(spec.vms), 0)
spec.ConstructSparkService()
self.assertEqual(spec.spark_service.num_workers, 5)
self.assertTrue(isinstance(spec.spark_service,
spark_service.PkbSparkService))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "da9c845ed94988335e4a1c8fa18664d6",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 77,
"avg_line_length": 35.368852459016395,
"alnum_prop": 0.7049826187717265,
"repo_name": "xiaolihope/PerfKitBenchmarker-1.7.0",
"id": "031fa7d2a28be20c84abf7e8e0c03502a61fdebf",
"size": "4925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/spark_service_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "1727478"
},
{
"name": "Shell",
"bytes": "23457"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="volume.colorbar.title.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "cc89db6c7e07eb86d77c3dd8693bd750",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 84,
"avg_line_length": 33.6,
"alnum_prop": 0.5793650793650794,
"repo_name": "plotly/python-api",
"id": "8e12d07ff10bc92a3a85b816acc1945bef41a4fc",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/volume/colorbar/title/font/_size.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
import functools
from oauthlib.common import log
from ..errors import TemporarilyUnavailableError, ServerError
from ..errors import FatalClientError, OAuth2Error
class BaseEndpoint(object):
def __init__(self):
self._available = True
self._catch_errors = False
@property
def available(self):
return self._available
@available.setter
def available(self, available):
self._available = available
@property
def catch_errors(self):
return self._catch_errors
@catch_errors.setter
def catch_errors(self, catch_errors):
self._catch_errors = catch_errors
def catch_errors_and_unavailability(f):
@functools.wraps(f)
def wrapper(endpoint, uri, *args, **kwargs):
if not endpoint.available:
e = TemporarilyUnavailableError()
log.info('Endpoint unavailable, ignoring request %s.' % uri)
return {}, e.json, 503
if endpoint.catch_errors:
try:
return f(endpoint, uri, *args, **kwargs)
except OAuth2Error:
raise
except FatalClientError:
raise
except Exception as e:
error = ServerError()
log.warning('Exception caught while processing request, %s.' % e)
return {}, error.json, 500
else:
return f(endpoint, uri, *args, **kwargs)
return wrapper
| {
"content_hash": "c3a36a261fef1bf882fc6fbafa335fa1",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 81,
"avg_line_length": 27.311475409836067,
"alnum_prop": 0.6122448979591837,
"repo_name": "collabspot/muninn",
"id": "984de2f91b2709913524fbdae9550bdc2c655bcf",
"size": "1690",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "lib/oauthlib/oauth2/rfc6749/endpoints/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "131644"
},
{
"name": "JavaScript",
"bytes": "907591"
},
{
"name": "Python",
"bytes": "1343756"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
} |
import logging
import pymssql
"""Commander Plugin for Microsoft SQL Server
Dependencies:
pip3 install pymssql
"""
class Rotator:
def __init__(self, login, password, host=None, port=1433, db=None, **kwargs):
self.host = host
self.port = port
self.login = login
self.password = password
self.db = db
def rotate_start_msg(self):
"""Display msg before starting rotation"""
host_msg = 'on default host' if self.host is None else f'on host "{self.host}"'
db_msg = '...' if self.db is None else f' to connect to db "{self.db}"...'
logging.info(
f'Rotating with Microsoft SQL plugin {host_msg} and port "{self.port}" using login "{self.login}"{db_msg}'
)
def revert(self, record, new_password):
"""Revert rotation of a Microsoft SQL password"""
self.rotate(record, new_password, revert=True)
def rotate(self, record, new_password, revert=False):
"""Rotate a Microsoft SQL password"""
if revert:
old_password = new_password
new_password = self.password
else:
old_password = self.password
user = self.login
kwargs = {'user': user, 'password': old_password}
if self.host:
kwargs['server'] = self.host
if self.db:
kwargs['database'] = self.db
connection = ''
result = False
try:
connection = pymssql.connect(**kwargs)
with connection.cursor() as cursor:
host = 'default host' if self.host is None else f'"{self.host}"'
logging.debug(f'Connected to {host}')
sql = f"ALTER LOGIN {user} WITH PASSWORD = '{new_password}';"
cursor.execute(sql)
# connection is not autocommit by default. So you must commit to save your changes.
connection.commit()
result = True
except Exception as e:
logging.error(f'Error during connection to Microsoft SQL server: {e}')
finally:
if connection:
connection.close()
return result
| {
"content_hash": "4797c19de03a8fa7c39d01234c0e7468",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 118,
"avg_line_length": 34.95161290322581,
"alnum_prop": 0.5717581910475311,
"repo_name": "Keeper-Security/Commander",
"id": "99ac18f19fce82663d564d91f3e335c8ec0faf22",
"size": "2422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keepercommander/plugins/mssql/mssql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2274231"
},
{
"name": "Shell",
"bytes": "3388"
}
],
"symlink_target": ""
} |
import os
import uuid
import functools
from os.path import join as opjoin
import racy
import glob
import os.path
from racy.rproject import ConstructibleRacyProject, LibName
from racy.rutils import memoize, run_once
from global_dico import *
from templating import *
class WixProjectError(racy.RacyProjectError):
pass
class WixProject(ConstructibleRacyProject):
var_name = 'WIX_PRJ'
prj = ''
call_prj_deps ={}
qt = False
wx = False
def __init__(self, prj, config=None, **kwargs):
if not isinstance(prj,ConstructibleRacyProject):
msg = 'WIX take a ConstructibleRacyProject as first argument'
raise WixProjectError(self, msg)
opts = prj.opts_source
self.prj = prj
self.graphviz_buffer = ''
super(WixProject, self).__init__(
build_options = opts,
config = config,
**prj.projects_db.prj_args
)
@property
def name (self):
name = super(WixProject, self).name
return LibName.SEP.join( [self.var_name, name])
@run_once
def configure_env(self):
super(WixProject, self).configure_env()
@memoize
def result(self, deps_results=True):
result = []
self.configure_env()
return result
def split_project_path(self, path):
res = []
for i in racy.renv.dirs.code:
if path.startswith(i):
temp = path.replace(i, '')
def_dir = i.split(os.path.sep)[-1]
res = temp.split(os.path.sep)
res[0] = def_dir
return res
def create_prj (self, prj):
# This dictionary contains all supported ide
prj_deps = []
for i in prj.rec_deps:
prj_deps.append( { 'PRJ_NAME' : i.base_name ,
'PRJ_TYPE' : i.get_lower('TYPE'), })
self.call_prj_deps[prj.base_name] = {
'PRJ_NAME' : prj.base_name,
'PRJ_FULL_NAME' : prj.full_name,
'PRJ_VERSION_NAME' : prj.versioned_name,
'PRJ_TYPE' : prj.get_lower('TYPE'),
'PRJ_TARGET' : prj.target_path,
}
profile_without_rc = self.prj.get('WIX_PROFILE').replace("rc",'')
profile_without_rc = profile_without_rc[1:]
profile_path = os.path.join('Bundles', self.prj.versioned_name,
profile_without_rc)
icon_path = self.prj.get('WIX_ICON')
if icon_path:
icon_path = self.prj.get_path(icon_path)
# this dictionary contains all varibles for templates
dico = {
'PRJ_INSTALL_DIR' : prj.install_path,
'PRJ_VERSION_NAME' : prj.versioned_name,
'PRJ_ROOT_DIR' : prj.root_path,
'PRJ_NAME' : prj.base_name,
'PRJ_FULL_NAME' : prj.full_name,
'HEADERS' : prj.get_includes(False),
'SOURCES' : prj.get_sources(False),
'OTHERS_FILE' : prj.get_others(),
'PRJ_TYPE' : prj.get_lower('TYPE'),
'RACY_CLEAN_CMD' : racy.get_racy_cmd() +' '+ prj.base_name,
'CALLING_PROJECT' : self.prj.base_name,
'CALLING_PROJECT_VERSION_NAME' : self.prj.versioned_name,
'CALLING_PROJECT_FULL_NAME' : self.prj.full_name,
'CALLING_PROJECT_DEPS' : self.call_prj_deps,
'CALLING_PROJECT_VERSION' : self.prj.version,
'CALLING_PROJECT_PROFILE' : profile_path,
'CALLING_PROJECT_ICON' : icon_path,
'DEPS_INCLUDES' : prj.deps_include_path,
'VERSION' : prj.version,
'ARCH' : self.prj.get_lower('ARCH'),
'DEPS' : prj_deps,
'PROJECT_SPLIT_PATH' : self.split_project_path(prj.root_path),
'uuid' : functools.partial(uuid.uuid5, uuid.NAMESPACE_OID),
}
dico.update(dico_g)
dico_vars = dico
dico_prj = dico_prj_template['dico_create_wix']['yes']
dico_vars = self.gen_file(dico_vars, dico_prj)
racy.print_msg("Create {0} wix file".format(prj.base_name))
def create_extra_dir(self, tuple_dir_targets):
folder,targets = tuple_dir_targets
if not targets == []:
self.call_prj_deps[folder] = {
'PRJ_NAME' : '',
'PRJ_FULL_NAME' : '',
'PRJ_VERSION_NAME' : '',
}
dico = {
'CALLING_PROJECT_VERSION_NAME' : self.prj.versioned_name,
'CALLING_PROJECT' : self.prj.base_name,
'TARGETS': targets,
'uuid' : functools.partial(uuid.uuid5, uuid.NAMESPACE_OID),
'EXTRA_NAME' : folder,
'ARCH' : self.prj.get_lower('ARCH'),
}
dico.update(dico_g)
dico_prj = {
'dirs':
[
('WIX_DIR' ,'${WIX_INSTALL_DIR}/${CALLING_PROJECT}/'),
('ROOT_TMP_DIR', '${TEMPLATING_PLUGIN_PATH}/rc/'),
('TPL_DIR' , '${ROOT_TMP_DIR}/wix/'),
],
'template_prj':
[
('${TPL_DIR}/extra.wxs', '${WIX_DIR}/${EXTRA_NAME}.wxs'),
]
}
self.gen_file(dico, dico_prj)
racy.print_msg("Create "+ folder+ " wix file")
# def create_targets(path,self):
# targets = []
#
# for i in os.listdir(bin_dir):
# if not i.endswith('.exe'):
# targets.append(os.path.join(bin_dir,i))
#
#
# return targets
#
def create_targets(self,path):
targets=[]
l = glob.glob(path+'\\*')
for i in l:
if os.path.isdir(i):
targets.extend(self.create_targets(i))
else:
if not i.endswith('.exe'):
targets.append(i)
return targets
def create_install_targets(self,list_dir):
# list targets = [(dir, list_targets),...]
list_targets = []
install_dir = racy.renv.dirs.install
for tdir in list_dir:
dir_path = opjoin(install_dir,tdir)
if os.path.exists(dir_path):
targets = self.create_targets(dir_path)
list_targets.append((tdir,targets))
return list_targets
def gen_file(self, dico_vars, dico_prj):
# Added vars
if dico_prj.has_key('vars'):
dico_vars = add_vars(dico_prj['vars'], dico_vars)
# Added dirs
if dico_prj.has_key('dirs'):
dico_vars = add_dirs_template(dico_prj['dirs'], dico_vars)
# Added template_prj
if dico_prj.has_key('template_prj'):
add_template_prj(dico_prj['template_prj'], dico_vars)
return dico_vars
def install (self, opts = ['rc', 'deps'] ):
result = self.result(deps_results = 'deps' in opts)
for i in self.prj.rec_deps:
if i.get_lower('TYPE') in ['exec', 'bundle', 'shared']:
self.create_prj(i)
extra_dirs = ['bin','Python','PythonHome','qtplugins']
for i in self.create_install_targets(extra_dirs):
self.create_extra_dir(i)
self.create_prj(self.prj)
return result
| {
"content_hash": "33ef3505bcac1086baf5bb42db875b01",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 81,
"avg_line_length": 30.857142857142858,
"alnum_prop": 0.5034391534391535,
"repo_name": "cfobel/sconspiracy",
"id": "aca8e692384ef085f7c1e2f90740d87540bc4dec",
"size": "7560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/racy/plugins/templating/wix_project.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "13702"
},
{
"name": "DOT",
"bytes": "1182"
},
{
"name": "IDL",
"bytes": "318"
},
{
"name": "Python",
"bytes": "342756"
},
{
"name": "Shell",
"bytes": "219"
},
{
"name": "VimL",
"bytes": "1952"
}
],
"symlink_target": ""
} |
from types import DictType
from utils import flatten
# --------------------------------------------------------------------------------
# Elements
class Element(object):
def __init__(self, tag_name, block_p, explicit_end_p, has_content_p, attrs=None):
self.tag_name = tag_name
self.block_p = block_p
self.explicit_end_p = explicit_end_p
self.has_content_p = has_content_p
self.attrs = attrs or {}
def make_block(tag_name, attrs=None, explicit_end_p=False, has_content_p=True):
return Element(tag_name, True, explicit_end_p, has_content_p, attrs=attrs)
def make_inline(tag_name, attrs=None, explicit_end_p=False, has_content_p=True):
return Element(tag_name, False, explicit_end_p, has_content_p, attrs=attrs)
# --------------------------------------------------------------------------------
# Renderers
def renderer(func):
"Simple decorator to turn a function into a tag renderer."
func.renderer_p = True
return func
def renderer_p(thing):
return getattr(thing, 'renderer_p', False)
@renderer
def raw(params, printer):
for param in flatten(params):
printer.raw(unicode(param))
| {
"content_hash": "b2d47b0863140322576c0115a09818c6",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 85,
"avg_line_length": 33.05555555555556,
"alnum_prop": 0.5756302521008403,
"repo_name": "scusack/tlhl",
"id": "a72dcbe167557f365e1bc60823cd8da431a9dffa",
"size": "1215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tlhl/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24515"
}
],
"symlink_target": ""
} |
"""Module providing views for the site navigation root"""
from Products.Five.browser import BrowserView
from Products.ZCatalog.interfaces import ICatalogBrain
from plone import api
from plone.app.contentlisting.interfaces import IContentListing
from plone.app.contentlisting.interfaces import IContentListingObject
from plone.app.contenttypes.interfaces import INewsItem
from zope.component import getMultiAdapter
from zope.component import getUtility
from dhp.sitecontent.interfaces import IResponsiveImagesTool
IMG = 'data:image/gif;base64,R0lGODlhAQABAIAAAP///wAAACwAAAAAAQABAAACAkQBADs='
class FrontPageView(BrowserView):
""" General purpose frontpage view """
def __call__(self):
self.has_newsitems = len(self.recent_news()) > 0
return self.render()
def render(self):
return self.index()
def can_edit(self):
show = False
if not api.user.is_anonymous():
show = True
return show
def portal_id(self):
portal = api.portal.get()
return portal.id
def recent_news(self):
catalog = api.portal.get_tool(name='portal_catalog')
items = catalog(object_provides=INewsItem.__identifier__,
review_state='published',
sort_on='Date',
sort_order='reverse',
sort_limit=3)[:3]
return IContentListing(items)
def section_preview(self, section):
info = {}
if section.startswith('/'):
target = section
else:
target = '/{0}'.format(section)
item = api.content.get(path=target)
if item:
info['title'] = item.Title()
info['teaser'] = item.Description()
info['url'] = item.absolute_url()
info['image'] = self.image_tag(item)
info['subitems'] = None
if target in ('/news'):
info['subitems'] = self.recent_news()
return info
def get_image_data(self, uuid):
tool = getUtility(IResponsiveImagesTool)
return tool.create(uuid)
def image_tag(self, item):
data = {}
sizes = ['small', 'medium', 'large']
idx = 0
for size in sizes:
idx += 0
img = self._get_scaled_img(item, size)
data[size] = '{0} {1}w'.format(img['url'], img['width'])
return data
def _get_scaled_img(self, item, size):
if (
ICatalogBrain.providedBy(item) or
IContentListingObject.providedBy(item)
):
obj = item.getObject()
else:
obj = item
info = {}
if hasattr(obj, 'image'):
scales = getMultiAdapter((obj, self.request), name='images')
if size == 'small':
scale = scales.scale('image', width=300, height=300)
if size == 'medium':
scale = scales.scale('image', width=600, height=600)
else:
scale = scales.scale('image', width=900, height=900)
if scale is not None:
info['url'] = scale.url
info['width'] = scale.width
info['height'] = scale.height
else:
info['url'] = IMG
info['width'] = '1px'
info['height'] = '1px'
return info
| {
"content_hash": "843faccebb79bbc0b9aef27ff884391a",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 78,
"avg_line_length": 33.277227722772274,
"alnum_prop": 0.5617375781017554,
"repo_name": "a25kk/dhp",
"id": "2c8cb5e255bf182c4490262227e7d288549df680",
"size": "3385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dhp.sitecontent/dhp/sitecontent/browser/frontpage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1121144"
},
{
"name": "Dockerfile",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "545368"
},
{
"name": "JavaScript",
"bytes": "649113"
},
{
"name": "Makefile",
"bytes": "2549"
},
{
"name": "Python",
"bytes": "32305"
},
{
"name": "Shell",
"bytes": "3097"
}
],
"symlink_target": ""
} |
import os
import sys
import ujson as json
class PluginStructure(object):
"""Provides tools for a plugin architecture."""
def __init__(self, plugin_folder = "./plugins", main_module="__init__"):
super(PluginStructure, self).__init__()
self.plugin_folder = plugin_folder
self.main_module = main_module
self.plugins = {}
def search_all_plugins(self, autoload=False):
possible_plugins = os.listdir(self.plugin_folder)
possible_order = os.path.join(self.plugin_folder, "order.json")
if os.path.exists(possible_order):
possible_plugins = json.loads(open(possible_order, "r"))
for possible_plugin in possible_plugins:
self.search_plugin(possible_plugin, autoload)
return self.plugins
def search_plugin(self, possible_plugin, autoload=False):
location = os.path.join(self.plugin_folder, possible_plugin)
if not os.path.isdir(location) or not self.main_module + ".py" in os.listdir(location):
return None
if autoload:
sys.path.append(os.path.realpath(self.plugin_folder))
try:
sys.modules[__name__ + "." + possible_plugin] = __import__(possible_plugin)
finally:
del sys.path[-1]
return sys.modules[__name__ + "." + possible_plugin]
return True
def load_all(self):
plugins = self.search_all_plugins(autoload=True)
PluginStructure.BaseArchitecture = PluginStructure()
| {
"content_hash": "a8c7aa22c598d84394c22219ead35443",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 95,
"avg_line_length": 38.743589743589745,
"alnum_prop": 0.6254136333553938,
"repo_name": "marcoapintoo/Biosignal-Intermediate-Format",
"id": "51c29dbc402c36c0f6d24f4c5285036c5746c13d",
"size": "1529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "biosignalformat/plugins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "43871"
}
],
"symlink_target": ""
} |
import signbank.settings
import os
import shutil
from zipfile import ZipFile
from datetime import datetime, date
import json
import re
from django.utils.translation import override
from signbank.dictionary.models import *
from signbank.dictionary.update import gloss_from_identifier
from django.utils.dateformat import format
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
def get_gloss_data():
glosses = Gloss.objects.all()
gloss_data = {}
for gloss in glosses:
gloss_data[gloss.pk] = gloss.get_fields_dict()
return gloss_data
def create_zip_with_json_files(data_per_file,output_path):
"""Creates a zip file filled with the output of the functions supplied.
Data should either be a json string or a list, which will be transformed to json."""
INDENTATION_CHARS = 4
zip = ZipFile(output_path,'w')
for filename, data in data_per_file.items():
if isinstance(data,list) or isinstance(data,dict):
output = json.dumps(data,indent=INDENTATION_CHARS)
zip.writestr(filename+'.json',output)
| {
"content_hash": "6a607306683ff13c4dd17f20afbac8bd",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 88,
"avg_line_length": 28.05,
"alnum_prop": 0.7344028520499108,
"repo_name": "Signbank/BSL-signbank",
"id": "84d2f39c6623b918655c8587a90237ad6b172d88",
"size": "1122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "signbank/tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "1C Enterprise",
"bytes": "1846"
},
{
"name": "CSS",
"bytes": "480831"
},
{
"name": "HTML",
"bytes": "244006"
},
{
"name": "JavaScript",
"bytes": "1011248"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "986206"
}
],
"symlink_target": ""
} |
def configuration(parent_package='', top_path=None):
import os.path as op
from numpy.distutils.misc_util import Configuration
from sfepy import Config
site_config = Config()
system = site_config.system()
os_flag = {'posix' : 0, 'windows' : 1}[system]
auto_dir = op.dirname(__file__)
auto_name = op.split(auto_dir)[-1]
config = Configuration(auto_name, parent_package, top_path)
inline = 'inline' if system == 'posix' else '__inline'
defines = [('SFEPY_PLATFORM', os_flag),
('inline', inline)]
if '-DDEBUG_FMF' in site_config.debug_flags():
defines.append(('DEBUG_FMF', None))
common_path = '../../common/extmods'
common_src = ['fmfield.c', 'refmaps.c', 'geommech.c', 'common_python.c']
common_src = [op.join(common_path, ii) for ii in common_src]
src = ['bases.pyx', 'lagrange.c']
config.add_extension('bases',
sources=src,
libraries=['sfepy_common'],
depends=common_src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir, common_path],
define_macros=defines)
src = ['lobatto_bases.pyx', 'lobatto.c', 'lobatto1d.c']
config.add_extension('lobatto_bases',
sources=src,
libraries=['sfepy_common'],
depends=common_src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir, common_path],
define_macros=defines)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| {
"content_hash": "c29f388e670f9c7dbf2a878df92c0805",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 76,
"avg_line_length": 38.5,
"alnum_prop": 0.5407792207792208,
"repo_name": "sfepy/sfepy",
"id": "5f4ab374573cbec59067fc94ca52dffe9a156a9a",
"size": "1948",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sfepy/discrete/fem/extmods/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "471175"
},
{
"name": "GLSL",
"bytes": "8269"
},
{
"name": "MATLAB",
"bytes": "1918"
},
{
"name": "Makefile",
"bytes": "489"
},
{
"name": "PowerShell",
"bytes": "3121"
},
{
"name": "Python",
"bytes": "3553188"
}
],
"symlink_target": ""
} |
REPEATABLE = 'repeatable'
FLAGS = [REPEATABLE]
def _AssertFlagsAreValid(flags):
assert isinstance(flags, list)
for f in flags:
if f not in FLAGS:
raise AssertionError(
'Unrecognized flag for a timeline interaction record: %s' % f)
def GetJavaScriptMarker(label, flags):
"""Computes the marker string of an interaction record.
This marker string can be used with JavaScript API console.time()
and console.timeEnd() to mark the beginning and end of the
interaction record..
Args:
label: The label used to identify the interaction record.
flags: the flags for the interaction record see FLAGS above.
Returns:
The interaction record marker string (e.g., Interaction.Label/flag1,flag2).
Raises:
AssertionError: If one or more of the flags is unrecognized.
"""
_AssertFlagsAreValid(flags)
marker = 'Interaction.%s' % label
if flags:
marker += '/%s' % (','.join(flags))
return marker
| {
"content_hash": "8dcd4773083b7c3b402403963a264b6d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 27.257142857142856,
"alnum_prop": 0.7054507337526206,
"repo_name": "catapult-project/catapult",
"id": "f2ab6e17f84c459151d4b441f3a6ed1ead09bf0f",
"size": "1175",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "telemetry/telemetry/web_perf/timeline_interaction_record.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
from org.myrobotlab.service import Speech
mouth = Runtime.createAndStart("mouth","Speech")
from org.myrobotlab.service import Servo
servo1 = Runtime.create("servo1","Servo")
servo2 = Runtime.create("servo2","Servo")
servo1.startService()
servo2.startService()
arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM3")
pin0 = 54
pin1 = 55
left = 300
right = 300
leftstedy = 600
rightstedy = 600
leftval = left - leftstedy
rightval = right - rightstedy
servo1.attach("arduino", 13)
servo2.attach("arduino", 12)
servo1.setSpeed(0.8)
servo2.setSpeed(0.8)
arduino.arduino.enablePin(pin0)
arduino.arduino.enablePin(pin1)
# make friendly sample rate
# arduino.setSampleRate(1000)
arduino.addListener("publishPin", "python", "publishPin")
# my call-back
def publishPin(pin):
if (pin.pin == 54):
pin0 = pin
global left
left = pin0.value
if (left <= leftstedy ):
global left
left = leftstedy
global leftstedy
leftstedy = ((leftstedy * 49) + pin0.value) / 50
global leftval
leftval = left - leftstedy
if (pin.pin == 55):
pin1 = pin
global right
right = pin1.value
if (right <= rightstedy ):
global right
right = rightstedy
global rightstedy
rightstedy = ((rightstedy * 49) + pin1.value) / 50
global rightval
rightval = right - rightstedy
if (leftval >= rightval + 50 ):
# mouth.speak("pin 0")
servo1.moveTo(30)
sleep (4)
elif (rightval >= leftval + 50 ):
# mouth.speak("pin 1")
servo1.moveTo(150)
sleep (4)
else :
servo1.moveTo(90)
# print left
# print leftstedy
# print right
# print rightstedy
print leftval
print rightval
| {
"content_hash": "c8756c1fe073a5703692b590d1e026a5",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 57,
"avg_line_length": 20.04705882352941,
"alnum_prop": 0.6637323943661971,
"repo_name": "MyRobotLab/pyrobotlab",
"id": "3ee04adc66b6b369ae4b84f8cac5a8c6b7ea2b74",
"size": "1704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/Markus/Robyn.Inmoov.listening.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1827"
},
{
"name": "C",
"bytes": "126258"
},
{
"name": "C++",
"bytes": "373018"
},
{
"name": "Java",
"bytes": "156911"
},
{
"name": "Processing",
"bytes": "17022"
},
{
"name": "Python",
"bytes": "3309101"
},
{
"name": "Shell",
"bytes": "4635"
},
{
"name": "VBA",
"bytes": "11115"
}
],
"symlink_target": ""
} |
from oslo_utils import uuidutils
from neutron.ipam.drivers.neutrondb_ipam import db_models
# Database operations for Neutron's DB-backed IPAM driver
class IpamSubnetManager(object):
@classmethod
def load_by_neutron_subnet_id(cls, session, neutron_subnet_id):
return session.query(db_models.IpamSubnet).filter_by(
neutron_subnet_id=neutron_subnet_id).first()
def __init__(self, ipam_subnet_id, neutron_subnet_id):
self._ipam_subnet_id = ipam_subnet_id
self._neutron_subnet_id = neutron_subnet_id
@property
def neutron_id(self):
return self._neutron_subnet_id
def create(self, session):
"""Create database models for an IPAM subnet.
This method creates a subnet resource for the IPAM driver and
associates it with its neutron identifier, if specified.
:param session: database sesssion.
:returns: the idenfier of created IPAM subnet
"""
if not self._ipam_subnet_id:
self._ipam_subnet_id = uuidutils.generate_uuid()
ipam_subnet = db_models.IpamSubnet(
id=self._ipam_subnet_id,
neutron_subnet_id=self._neutron_subnet_id)
session.add(ipam_subnet)
return self._ipam_subnet_id
@classmethod
def delete(cls, session, neutron_subnet_id):
"""Delete IPAM subnet.
IPAM subnet no longer has foreign key to neutron subnet,
so need to perform delete manually
:param session: database sesssion
:param neutron_subnet_id: neutron subnet id associated with ipam subnet
"""
return session.query(db_models.IpamSubnet).filter_by(
neutron_subnet_id=neutron_subnet_id).delete()
def create_pool(self, session, pool_start, pool_end):
"""Create an allocation pool for the subnet.
This method does not perform any validation on parameters; it simply
persist data on the database.
:param pool_start: string expressing the start of the pool
:param pool_end: string expressing the end of the pool
:return: the newly created pool object.
"""
ip_pool = db_models.IpamAllocationPool(
ipam_subnet_id=self._ipam_subnet_id,
first_ip=pool_start,
last_ip=pool_end)
session.add(ip_pool)
return ip_pool
def delete_allocation_pools(self, session):
"""Remove all allocation pools for the current subnet.
:param session: database session
"""
session.query(db_models.IpamAllocationPool).filter_by(
ipam_subnet_id=self._ipam_subnet_id).delete()
def list_pools(self, session):
"""Return pools for the current subnet."""
return session.query(
db_models.IpamAllocationPool).filter_by(
ipam_subnet_id=self._ipam_subnet_id)
def check_unique_allocation(self, session, ip_address):
"""Validate that the IP address on the subnet is not in use."""
iprequest = session.query(db_models.IpamAllocation).filter_by(
ipam_subnet_id=self._ipam_subnet_id, status='ALLOCATED',
ip_address=ip_address).first()
if iprequest:
return False
return True
def list_allocations(self, session, status='ALLOCATED'):
"""Return current allocations for the subnet.
:param session: database session
:param status: IP allocation status
:returns: a list of IP allocation as instance of
neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAllocation
"""
return session.query(
db_models.IpamAllocation).filter_by(
ipam_subnet_id=self._ipam_subnet_id,
status=status)
def create_allocation(self, session, ip_address,
status='ALLOCATED'):
"""Create an IP allocation entry.
:param session: database session
:param ip_address: the IP address to allocate
:param status: IP allocation status
"""
ip_request = db_models.IpamAllocation(
ip_address=ip_address,
status=status,
ipam_subnet_id=self._ipam_subnet_id)
session.add(ip_request)
def delete_allocation(self, session, ip_address):
"""Remove an IP allocation for this subnet.
:param session: database session
:param ip_address: IP address for which the allocation entry should
be removed.
"""
return session.query(db_models.IpamAllocation).filter_by(
ip_address=ip_address,
ipam_subnet_id=self._ipam_subnet_id).delete(
synchronize_session=False)
| {
"content_hash": "6c8035415886f66d45510719ed1adfa9",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 79,
"avg_line_length": 36.09230769230769,
"alnum_prop": 0.6329923273657289,
"repo_name": "sebrandon1/neutron",
"id": "10263062ee3c5b403696a905d7c01ed784ae7546",
"size": "5322",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/ipam/drivers/neutrondb_ipam/db_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9903006"
},
{
"name": "Shell",
"bytes": "14339"
}
],
"symlink_target": ""
} |
from nose.tools import * # flake8: noqa
from urlparse import urlparse
from framework.auth.core import Auth
from website.models import NodeLog
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
RetractedRegistrationFactory
)
from tests.utils import assert_logs
node_url_for = lambda n_id: '/{}nodes/{}/'.format(API_BASE, n_id)
class TestNodeLinksList(ApiTestCase):
def setUp(self):
super(TestNodeLinksList, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=False, creator=self.user)
self.pointer_project = ProjectFactory(is_public=False, creator=self.user)
self.project.add_pointer(self.pointer_project, auth=Auth(self.user))
self.private_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.project._id)
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_project.add_pointer(self.public_pointer_project, auth=Auth(self.user))
self.public_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.public_project._id)
self.user_two = AuthUserFactory()
def test_return_embedded_public_node_pointers_logged_out(self):
res = self.app.get(self.public_url)
res_json = res.json['data']
assert_equal(len(res_json), 1)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_return_embedded_public_node_pointers_logged_in(self):
res = self.app.get(self.public_url, auth=self.user_two.auth)
res_json = res.json['data']
assert_equal(len(res_json), 1)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_return_private_node_pointers_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_return_private_node_pointers_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res_json), 1)
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.pointer_project._id)
def test_return_private_node_pointers_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_deleted_links_not_returned(self):
res = self.app.get(self.public_url, expect_errors=True)
res_json = res.json['data']
original_length = len(res_json)
self.public_pointer_project.is_deleted = True
self.public_pointer_project.save()
res = self.app.get(self.public_url)
res_json = res.json['data']
assert_equal(len(res_json), original_length - 1)
def test_cannot_access_retracted_node_links_list(self):
registration = RegistrationFactory(creator=self.user, project=self.public_project)
url = '/{}nodes/{}/node_links/'.format(API_BASE, registration._id)
retraction = RetractedRegistrationFactory(registration=registration, user=registration.creator)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
class TestNodeLinkCreate(ApiTestCase):
def setUp(self):
super(TestNodeLinkCreate, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=False, creator=self.user)
self.pointer_project = ProjectFactory(is_public=False, creator=self.user)
self.private_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.project._id)
self.private_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': self.pointer_project._id,
'type': 'nodes'
}
}
}
}
}
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.public_project._id)
self.public_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
}
self.fake_url = '/{}nodes/{}/node_links/'.format(API_BASE, 'fdxlq')
self.fake_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': 'fdxlq',
'type': 'nodes'
}
}
}
}
}
self.point_to_itself_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': self.public_project._id,
'type': 'nodes'
}
}
}
}
}
self.user_two = AuthUserFactory()
self.user_two_project = ProjectFactory(is_public=True, creator=self.user_two)
self.user_two_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.user_two_project._id)
self.user_two_payload = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.user_two_project._id,
'type': 'nodes'
}
}
}
}
}
def test_add_node_link_relationships_is_a_list(self):
data = {
'data': {
'type': 'node_links',
'relationships': [{'target_node_id': self.public_pointer_project._id}]
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
def test_create_node_link_invalid_data(self):
res = self.app.post_json_api(self.public_url, "Incorrect data", auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
def test_add_node_link_no_relationships(self):
data = {
'data': {
'type': 'node_links',
'attributes': {
'id': self.public_pointer_project._id
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
def test_add_node_links_empty_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
def test_add_node_links_no_nodes_key_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'data': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Malformed request.')
def test_add_node_links_no_data_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data.')
def test_add_node_links_no_target_type_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.public_pointer_project._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /type.')
def test_add_node_links_no_target_id_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/id')
def test_add_node_links_incorrect_target_id_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'type': 'nodes',
'id': '12345'
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_add_node_links_incorrect_target_type_in_relationships(self):
data = {
'data': {
'type': 'nodes',
'relationships': {
'nodes': {
'data': {
'type': 'Incorrect!',
'id': self.public_pointer_project._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_creates_node_link_target_not_nested(self):
payload = {
'data': {
'type': 'node_links',
'id': self.pointer_project._id
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data/relationships.')
def test_creates_public_node_pointer_logged_out(self):
res = self.app.post_json_api(self.public_url, self.public_payload, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_creates_public_node_pointer_logged_in(self):
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_creates_private_node_pointer_logged_out(self):
res = self.app.post_json_api(self.private_url, self.private_payload, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_creates_private_node_pointer_logged_in_contributor(self):
res = self.app.post_json_api(self.private_url, self.private_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.pointer_project._id)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_creates_private_node_pointer_logged_in_non_contributor(self):
res = self.app.post_json_api(self.private_url, self.private_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_create_node_pointer_non_contributing_node_to_contributing_node(self):
res = self.app.post_json_api(self.private_url, self.user_two_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'project')
def test_create_node_pointer_contributing_node_to_non_contributing_node(self):
res = self.app.post_json_api(self.private_url, self.user_two_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.user_two_project._id)
def test_create_pointer_non_contributing_node_to_fake_node(self):
res = self.app.post_json_api(self.private_url, self.fake_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_create_pointer_contributing_node_to_fake_node(self):
res = self.app.post_json_api(self.private_url, self.fake_payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('detail', res.json['errors'][0])
def test_create_fake_node_pointing_to_contributing_node(self):
res = self.app.post_json_api(self.fake_url, self.private_payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
res = self.app.post_json_api(self.fake_url, self.private_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_create_node_pointer_to_itself(self):
res = self.app.post_json_api(self.public_url, self.point_to_itself_payload, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_project._id)
def test_create_node_pointer_to_itself_unauthorized(self):
res = self.app.post_json_api(self.public_url, self.point_to_itself_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_create_node_pointer_already_connected(self):
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('detail', res.json['errors'][0])
def test_cannot_add_link_to_registration(self):
registration = RegistrationFactory(creator=self.user)
url = '/{}nodes/{}/node_links/'.format(API_BASE, registration._id)
payload = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_create_node_pointer_no_type(self):
payload = {
'data': {
'relationships': {
'nodes': {
'data': {
'id': self.user_two_project._id,
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/type')
def test_create_node_pointer_incorrect_type(self):
payload = {
'data': {
'type': 'Wrong type.',
'relationships': {
'nodes': {
'data': {
'id': self.user_two_project._id,
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(res.json['errors'][0]['detail'], 'Resource identifier does not match server endpoint.')
class TestNodeLinksBulkCreate(ApiTestCase):
def setUp(self):
super(TestNodeLinksBulkCreate, self).setUp()
self.user = AuthUserFactory()
self.private_project = ProjectFactory(is_public=False, creator=self.user)
self.private_pointer_project = ProjectFactory(is_public=False, creator=self.user)
self.private_pointer_project_two = ProjectFactory(is_public=False, creator=self.user)
self.private_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.private_project._id)
self.private_payload = {
'data': [{
"type": "node_links",
"relationships": {
'nodes': {
'data': {
"id": self.private_pointer_project._id,
"type": 'nodes'
}
}
}
},
{
"type": "node_links",
"relationships": {
'nodes': {
'data': {
"id": self.private_pointer_project_two._id,
"type": 'nodes'
}
}
}
}]
}
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project_two = ProjectFactory(is_public=True, creator=self.user)
self.public_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.public_project._id)
self.public_payload = {
'data': [{
"type": "node_links",
"relationships": {
'nodes': {
'data': {
"id": self.public_pointer_project._id,
"type": 'nodes'
}
}
}
},
{
"type": "node_links",
"relationships": {
'nodes': {
'data': {
"id": self.public_pointer_project_two._id,
"type": 'nodes'
}
}
}
}]
}
self.user_two = AuthUserFactory()
self.user_two_project = ProjectFactory(is_public=True, creator=self.user_two)
self.user_two_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.user_two_project._id)
self.user_two_payload = {'data': [{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.user_two_project._id,
'type': 'nodes'
}
}
}
}
]}
def test_bulk_create_node_links_blank_request(self):
res = self.app.post_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_creates_pointers_limits(self):
payload = {'data': [self.public_payload['data'][0]] * 101}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Bulk operation limit is 100, got 101.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
res = self.app.get(self.public_url)
assert_equal(res.json['data'], [])
def test_bulk_creates_project_target_not_nested(self):
payload = {'data': [{'type': 'node_links', 'target_node_id': self.private_pointer_project._id}]}
res = self.app.post_json_api(self.public_url, payload, auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data/relationships.')
def test_bulk_creates_public_node_pointers_logged_out(self):
res = self.app.post_json_api(self.public_url, self.public_payload, expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
res = self.app.get(self.public_url)
assert_equal(res.json['data'], [])
def test_bulk_creates_public_node_pointer_logged_in_non_contrib(self):
res = self.app.post_json_api(self.public_url, self.public_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_bulk_creates_public_node_pointer_logged_in_contrib(self):
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
embedded = res_json[1]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project_two._id)
def test_bulk_creates_private_node_pointers_logged_out(self):
res = self.app.post_json_api(self.private_url, self.private_payload, expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.json['data'], [])
@assert_logs(NodeLog.POINTER_CREATED, 'private_project', index=-1)
@assert_logs(NodeLog.POINTER_CREATED, 'private_project')
def test_bulk_creates_private_node_pointer_logged_in_contributor(self):
res = self.app.post_json_api(self.private_url, self.private_payload, auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.private_pointer_project._id)
embedded = res_json[1]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.private_pointer_project_two._id)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_bulk_creates_private_node_pointers_logged_in_non_contributor(self):
res = self.app.post_json_api(self.private_url, self.private_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.json['data'], [])
def test_bulk_creates_node_pointers_non_contributing_node_to_contributing_node(self):
res = self.app.post_json_api(self.private_url, self.user_two_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'private_project')
def test_bulk_creates_node_pointers_contributing_node_to_non_contributing_node(self):
res = self.app.post_json_api(self.private_url, self.user_two_payload, auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.user_two_project._id)
res = self.app.get(self.private_url, auth=self.user.auth)
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.user_two_project._id)
def test_bulk_creates_pointers_non_contributing_node_to_fake_node(self):
fake_payload = {'data': [{'type': 'node_links', 'relationships': {'nodes': {'data': {'id': 'fdxlq', 'type': 'nodes'}}}}]}
res = self.app.post_json_api(self.private_url, fake_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_bulk_creates_pointers_contributing_node_to_fake_node(self):
fake_payload = {'data': [{'type': 'node_links', 'relationships': {'nodes': {'data': {'id': 'fdxlq', 'type': 'nodes'}}}}]}
res = self.app.post_json_api(self.private_url, fake_payload,
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_in('detail', res.json['errors'][0])
def test_bulk_creates_fake_nodes_pointing_to_contributing_node(self):
fake_url = '/{}nodes/{}/node_links/'.format(API_BASE, 'fdxlq')
res = self.app.post_json_api(fake_url, self.private_payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
res = self.app.post_json_api(fake_url, self.private_payload, auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_bulk_creates_node_pointer_to_itself(self):
point_to_itself_payload = {'data': [{'type': 'node_links', 'relationships': {'nodes': {'data': {'type': 'nodes', 'id': self.public_project._id}}}}]}
res = self.app.post_json_api(self.public_url, point_to_itself_payload, auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_project._id)
def test_bulk_creates_node_pointer_to_itself_unauthorized(self):
point_to_itself_payload = {'data': [{'type': 'node_links', 'relationships': {'nodes': {'data': {'type': 'nodes', 'id': self.public_project._id}}}}]}
res = self.app.post_json_api(self.public_url, point_to_itself_payload, bulk=True, auth=self.user_two.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
@assert_logs(NodeLog.POINTER_CREATED, 'public_project', index=-1)
def test_bulk_creates_node_pointer_already_connected(self):
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
embedded_two = res_json[1]['embeds']['target_node']['data']['id']
assert_equal(embedded_two, self.public_pointer_project_two._id)
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_in("Target Node '{}' already pointed to by '{}'.".format(self.public_pointer_project._id, self.public_project._id), res.json['errors'][0]['detail'])
def test_bulk_cannot_add_link_to_registration(self):
registration = RegistrationFactory(creator=self.user)
url = '/{}nodes/{}/node_links/'.format(API_BASE, registration._id)
payload = {'data': [{'type': 'node_links', 'relationships': {'nodes': {'data': {'type': 'nodes', 'id': self.public_pointer_project._id}}}}]}
res = self.app.post_json_api(url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 404)
def test_bulk_creates_node_pointer_no_type(self):
payload = {'data': [{'relationships': {'nodes': {'data': {'type': 'nodes', 'id': self.user_two_project._id}}}}]}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/0/type')
def test_bulk_creates_node_pointer_incorrect_type(self):
payload = {'data': [{'type': 'Wrong type.', 'relationships': {'nodes': {'data': {'type': 'nodes', 'id': self.user_two_project._id}}}}]}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 409)
assert_equal(res.json['errors'][0]['detail'], 'Resource identifier does not match server endpoint.')
class TestBulkDeleteNodeLinks(ApiTestCase):
def setUp(self):
super(TestBulkDeleteNodeLinks, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.pointer_project = ProjectFactory(creator=self.user, is_public=True)
self.pointer_project_two = ProjectFactory(creator=self.user, is_public=True)
self.pointer = self.project.add_pointer(self.pointer_project, auth=Auth(self.user), save=True)
self.pointer_two = self.project.add_pointer(self.pointer_project_two, auth=Auth(self.user), save=True)
self.private_payload = {
"data": [
{"type": "node_links", "id": self.pointer._id},
{"type": "node_links", "id": self.pointer_two._id}
]
}
self.private_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.project._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project_two = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.public_pointer_two = self.public_project.add_pointer(self.public_pointer_project_two,
auth=Auth(self.user),
save=True)
self.public_payload = {
'data': [
{'type': 'node_links', 'id': self.public_pointer._id},
{'type': 'node_links', 'id': self.public_pointer_two._id}
]
}
self.public_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.public_project._id)
def test_bulk_delete_node_links_blank_request(self):
res = self.app.delete_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_delete_pointer_limits(self):
res = self.app.delete_json_api(self.public_url, {'data': [self.public_payload['data'][0]] * 101},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Bulk operation limit is 100, got 101.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
def test_bulk_delete_dict_inside_data(self):
res = self.app.delete_json_api(self.public_url, {'data': {'id': self.public_project._id, 'type': 'node_links'}},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Expected a list of items but got type "dict".')
def test_bulk_delete_pointers_no_type(self):
payload = {'data': [
{'id': self.public_pointer._id},
{'id': self.public_pointer_two._id}
]}
res = self.app.delete_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], "/data/type")
def test_bulk_delete_pointers_incorrect_type(self):
payload = {'data': [
{'id': self.public_pointer._id, 'type': 'Incorrect type.'},
{'id': self.public_pointer_two._id, 'type': 'Incorrect type.'}
]}
res = self.app.delete_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 409)
def test_bulk_delete_pointers_no_id(self):
payload = {'data': [
{'type': 'node_links'},
{'type': 'node_links'}
]}
res = self.app.delete_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], "/data/id")
def test_bulk_delete_pointers_no_data(self):
res = self.app.delete_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must contain array of resource identifier objects.')
def test_bulk_delete_pointers_payload_is_empty_dict(self):
res = self.app.delete_json_api(self.public_url, {}, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data.')
def test_cannot_delete_if_registration(self):
registration = RegistrationFactory(project=self.public_project)
url = '/{}nodes/{}/node_links/'.format(API_BASE, registration._id)
res = self.app.delete_json_api(url, self.public_payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 404)
def test_bulk_deletes_public_node_pointers_logged_out(self):
res = self.app.delete_json_api(self.public_url, self.public_payload, expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_bulk_deletes_public_node_pointers_fails_if_bad_auth(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete_json_api(self.public_url, self.public_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
# This is could arguably be a 405, but we don't need to go crazy with status codes
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
self.public_project.reload()
assert_equal(node_count_before, len(self.public_project.nodes_pointer))
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project', index=-1)
def test_bulk_deletes_public_node_pointers_succeeds_as_owner(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete_json_api(self.public_url, self.public_payload, auth=self.user.auth, bulk=True)
self.public_project.reload()
assert_equal(res.status_code, 204)
assert_equal(node_count_before - 2, len(self.public_project.nodes_pointer))
self.public_project.reload()
def test_bulk_deletes_private_node_pointers_logged_out(self):
res = self.app.delete_json_api(self.private_url, self.private_payload, expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'project', index=-1)
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_bulk_deletes_private_node_pointers_logged_in_contributor(self):
res = self.app.delete_json_api(self.private_url, self.private_payload, auth=self.user.auth, bulk=True)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
assert_equal(len(self.project.nodes_pointer), 0)
def test_bulk_deletes_private_node_pointers_logged_in_non_contributor(self):
res = self.app.delete_json_api(self.private_url, self.private_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project', index=-1)
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
def test_return_bulk_deleted_public_node_pointer(self):
res = self.app.delete_json_api(self.public_url, self.public_payload, auth=self.user.auth, bulk=True)
self.public_project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
pointer_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, self.public_pointer._id)
#check that deleted pointer can not be returned
res = self.app.get(pointer_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
@assert_logs(NodeLog.POINTER_REMOVED, 'project', index=-1)
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_return_bulk_deleted_private_node_pointer(self):
res = self.app.delete_json_api(self.private_url, self.private_payload, auth=self.user.auth, bulk=True)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
pointer_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.project._id, self.pointer._id)
#check that deleted pointer can not be returned
res = self.app.get(pointer_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
# Regression test for https://openscience.atlassian.net/browse/OSF-4322
def test_bulk_delete_link_that_is_not_linked_to_correct_node(self):
project = ProjectFactory(creator=self.user)
# The node link belongs to a different project
res = self.app.delete_json_api(
self.private_url, self.public_payload,
auth=self.user.auth,
expect_errors=True,
bulk=True
)
assert_equal(res.status_code, 400)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], 'Node link does not belong to the requested node.')
| {
"content_hash": "5f40ce599bebfc7d56f23740203d2a35",
"timestamp": "",
"source": "github",
"line_count": 944,
"max_line_length": 163,
"avg_line_length": 46.00741525423729,
"alnum_prop": 0.5768460316363887,
"repo_name": "KAsante95/osf.io",
"id": "b5cd7e75d99df30255216dfeaadf4e0a1545aba8",
"size": "43455",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "api_tests/nodes/views/test_node_links_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "133911"
},
{
"name": "HTML",
"bytes": "68108"
},
{
"name": "JavaScript",
"bytes": "1394022"
},
{
"name": "Mako",
"bytes": "639052"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "4908262"
},
{
"name": "Shell",
"bytes": "2118"
}
],
"symlink_target": ""
} |
from service import ServiceBase
import os, sys, time, subprocess, atexit
from signal import SIGTERM
class LinuxService(ServiceBase):
def __init__(self, name, label, stdout='/dev/null', stderr='/dev/null'):
ServiceBase.__init__(self, name, label, stdout, stderr)
self.pidfile = '/tmp/%s.pid' % name
self.config_file = '/etc/%s.conf' % name
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
return
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
return
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file('/dev/null', 'r')
so = file(self.stdout or '/dev/null', 'a+')
se = file(self.stderr or '/dev/null', 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def getpid(self):
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
return pid
def status(self):
pid = self.getpid()
if pid:
return 'Service running with PID %s.' % pid
else:
return 'Service is not running.'
def check_permissions(self):
if not os.geteuid() == 0:
return (False, 'This script must be run with root permissions.')
else:
return (True, '')
def start(self):
"""
Start the daemon
"""
pid = self.getpid()
if pid:
message = "Service already running under PID %s\n"
sys.stderr.write(message % self.pidfile)
return
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
pid = self.getpid()
if not pid:
message = "Service is not running\n"
sys.stderr.write(message)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.5)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
return
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
atexit.register(self.terminate)
args = self.load_configuration()[0]
stdout = open(self.stdout, 'a+')
stderr = open(self.stderr, 'a+')
process = subprocess.Popen(args, stdout=stdout, stderr=stderr)
file(self.pidfile,'w+').write("%s\n" % process.pid)
process.wait()
self.terminate()
def terminate(self):
try:
os.remove(self.pidfile)
except:
pass
def install(self):
env = self.detect_environment()
src_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'service.py')
# make sure this script is executable
self.run_command('chmod', '+x', src_path)
# link this daemon to the service directory
dest_path = env['rc.d-path'] + self.name
os.symlink(src_path, dest_path)
# start the service at boot
install_command = self.get_service_installer_command(env)
result = self.run_command(*install_command)
self.start()
def uninstall(self):
self.stop()
env = self.detect_environment()
# stop the service from autostarting
uninstall_command = self.get_service_uninstaller_command(env)
result = self.run_command(*uninstall_command)
# remove link to the script from the service directory
path = env['rc.d-path'] + self.name
os.remove(path)
def detect_environment(self):
"""
Returns a dictionary of command/path to the required command-line applications.
One key is 'dist' which will either be 'debian' or 'redhat', which is the best
guess as to which Linux distribution the current system is based on.
"""
check_for = [
'chkconfig',
'service',
'update-rc.d',
'rpm',
'dpkg',
]
env = dict()
for cmd in check_for:
result = self.run_command('which', cmd)
if result[0]:
env[cmd] = result[0].replace('\n', '')
if 'rpm' in env:
env['dist'] = 'redhat'
env['rc.d-path'] = '/etc/rc.d/init.d/'
elif 'dpkg' in env:
env['dist'] = 'debian'
env['rc.d-path'] = '/etc/init.d/'
else:
env['dist'] = 'unknown'
env['rc.d-path'] = '/dev/null/'
return env
def get_service_installer_command(self, env):
"""
Returns list of args required to set a service to run on boot.
"""
if env['dist'] == 'redhat':
cmd = env['chkconfig']
return [cmd, self.name, 'on']
else:
cmd = env['update-rc.d']
return [cmd, self.name, 'defaults']
def get_service_uninstaller_command(self, env):
"""
Returns list of arge required to stop a service from running at boot.
"""
if env['dist'] == 'redhat':
cmd = env['chkconfig']
return [cmd, self.name, 'off']
else:
cmd = env['update-rc.d']
return [cmd, self.name, 'remove']
| {
"content_hash": "3a16c8b437c341811b39e0d27d371c56",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 89,
"avg_line_length": 30.32126696832579,
"alnum_prop": 0.5036561707207879,
"repo_name": "mmangura/Base",
"id": "eece4801d7d338ad3aaad86957d27ec52ceef771",
"size": "6701",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "scripts/service/linux.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "13926"
},
{
"name": "CSS",
"bytes": "111307"
},
{
"name": "HTML",
"bytes": "25448092"
},
{
"name": "JavaScript",
"bytes": "251247"
},
{
"name": "Python",
"bytes": "1073647"
},
{
"name": "Shell",
"bytes": "91020"
},
{
"name": "Tcl",
"bytes": "1205166"
},
{
"name": "XML",
"bytes": "1153"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .assertrepr_compare import pytest_assertrepr_compare | {
"content_hash": "132528e05e9a7a9117d66c6f01fa9a07",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 57,
"avg_line_length": 48,
"alnum_prop": 0.8333333333333334,
"repo_name": "jschwartz79/springfield",
"id": "110c15ec60852e53fdb5afbffb3f6f8b490703c8",
"size": "96",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/pytest_springfield/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1101"
},
{
"name": "Python",
"bytes": "54873"
}
],
"symlink_target": ""
} |
"""
Pyccuweather
The Python Accuweather API
connector.py
Basic connector object and methods
(c) Chris von Csefalvay, 2015.
"""
import requests
from pyccuweather import errors
from pyccuweather.froots import froot
from pyccuweather.objects import *
import os
class Connection(object):
"""
Represents a connection to the Accuweather API.
:param API_KEY: API key
:param dev: whether the dev mode api (apidev.accuweather.com) or the production api (api.accuweather.com) is used
:param retry: number of retries of failed operations - TODO: implement
:raise errors.MalformattedAPIKeyError: if the API key is not a 32-character string, an error is thrown
"""
def __init__(self, API_KEY: str=None, dev: bool=True, retry: int=3, timeout=None):
# TODO: implement retries
if API_KEY is None:
try:
self.API_KEY = os.environ["ACCUWEATHER_APIKEY"]
except KeyError:
raise errors.NoAPIKeyProvided()
else:
self.API_KEY = API_KEY
try:
assert isinstance(self.API_KEY, str)
assert len(self.API_KEY) is 32
except AssertionError:
raise errors.MalformattedAPIKeyError()
self.API_ROOT = "http://apidev.accuweather.com" if dev is True else "http://api.accuweather.com"
self.API_VERSION = "v1"
self.retries = retry
self.timeout = timeout
def __str__(self):
return u"Accuweather connector to {0:s}".format(self.API_ROOT)
def wipe_api_key(self):
"""
Wipes API key from a Connection instance
:return: void
"""
self.API_KEY = None
########################################################
# Location resolvers #
########################################################
def loc_geoposition(self, lat: float, lon: float):
"""
Resolves location based on geoposition.
:param lat: latitude
:param lon: longitude
:return: Location object
"""
try:
assert isinstance(lat, (int, float)) and isinstance(lon, (int, float))
except:
raise ValueError
try:
assert abs(lat) <= 90 and abs(lon) <= 180
except:
raise errors.RangeError(lat, lon)
payload = {"q": u"{0:.4f},{1:.4f}".format(lat, lon),
"apikey": self.API_KEY}
resp = requests.get(url=froot("loc_geoposition"),
params=payload).json()
assert len(resp) > 0
if isinstance(resp, list):
return Location(resp[0])
elif isinstance(resp, dict):
return Location(resp)
def loc_string(self, search_string: str, country_code: str=None):
"""
Resolves a search string and an optional country code to a location.
:param search_string: search string
:param country_code: country code to which the search will be limited
:return: a LocationSet of results
"""
if country_code is not None:
try:
assert len(country_code) is 2
except:
raise errors.InvalidCountryCodeError(country_code)
url = froot("loc_search_country", country_code=country_code)
payload = {"q": search_string,
"apikey": self.API_KEY}
else:
url = froot("loc_search")
payload = {"q": search_string,
"apikey": self.API_KEY}
resp = requests.get(url=url,
params=payload, timeout=self.timeout).json()
_result = list()
if len(resp) > 0:
for each in resp:
loc = Location(lkey=each["Key"],
lat=each["GeoPosition"]["Latitude"],
lon=each["GeoPosition"]["Longitude"],
localized_name=each["LocalizedName"],
english_name=each["EnglishName"],
region=each["Region"],
country=each["Country"],
administrative_area=each["AdministrativeArea"],
timezone=each["TimeZone"]
)
_result.append(loc)
else:
raise errors.NoResultsError(search_string)
return (LocationSet(results=_result,
search_expression=search_string,
country=country_code))
def loc_postcode(self, country_code: str, postcode: str):
"""
Resolves location based on postcode. Only works in selected countries (US, Canada).
:param country_code: Two-letter country code
:param postcode: Postcode
:return: Location object
"""
try:
assert len(country_code) is 2
except:
raise errors.InvalidCountryCodeError(country_code)
url = froot("loc_postcode", country_code=country_code)
payload = {"q": postcode,
"apikey": self.API_KEY}
resp = requests.get(url=url,
params=payload, timeout=self.timeout).json()
assert len(resp) > 0
if isinstance(resp, list):
return Location(resp[0])
elif isinstance(resp, dict):
return Location(resp)
def loc_ip(self, ip_address:str):
"""
Resolves location based on IP address.
:param ip_address: IP address
:return: Location object
"""
url = froot("loc_ip_address")
payload = {"q": ip_address,
"apikey": self.API_KEY}
resp = requests.get(url=url,
params=payload, timeout=self.timeout).json()
assert len(resp) > 0
if isinstance(resp, list):
return Location(resp[0])
elif isinstance(resp, dict):
return Location(resp)
def loc_lkey(self, lkey:int):
"""
Resolves location by Accuweather location key.
:param lkey: Accuweather location key
:return: Location object
"""
assert isinstance(lkey, int)
url = froot("loc_lkey", location_key=lkey)
payload = {"apikey": self.API_KEY}
resp = requests.get(url=url,
params=payload, timeout=self.timeout).json()
assert len(resp) > 0
if isinstance(resp, list):
return Location(resp[0])
elif isinstance(resp, dict):
return Location(resp)
########################################################
# Current conditions #
########################################################
def get_current_wx(self, lkey:int=None, location:Location=None, current:int=0, details:bool=True):
"""
Get current weather conditions.
:param lkey: Accuweather location key
:param location: Location object
:param current: horizon - current weather, 6 hours or 24 hours
:param details: should details be provided?
:return: raw observations or CurrentObs object
"""
assert current in [0, 6, 24]
assert lkey is not None or location is not None
if current is 0:
url = froot("currentconditions", location_key=lkey)
else:
url = froot("currentconditions_{current}".format(current=current), location_key=lkey)
payload = {"apikey": self.API_KEY,
"details": "true" if details is True else "false"}
resp = requests.get(url=url,
params=payload, timeout=self.timeout)
return CurrentObs(resp.json())
########################################################
# Forecasts #
########################################################
def get_forecast(self, forecast_type:str, lkey:int, details:bool=True, metric:bool=True):
forecast_types = ["1h", "12h", "24h", "72h", "120h", "240h",
"1d", "5d", "10d", "15d", "25d", "45d"]
assert forecast_type in forecast_types
fkeyid = u"forecast_{0:s}".format(forecast_type)
url = froot(fkeyid, location_key=lkey)
payload = {"apikey": self.API_KEY,
"details": "true" if details == True else "false",
"metric": "true" if metric == True else "false"}
resp = requests.get(url=url,
params=payload, timeout=self.timeout)
if forecast_type[-1] is "h":
return HourlyForecasts(resp.json())
elif forecast_type[-1] is "d":
return DailyForecasts(resp.json())
########################################################
# Air quality #
########################################################
def get_airquality(self, lkey:int, current:bool=True):
assert isinstance(lkey, int)
if current:
fkeyid = "airquality_current"
else:
fkeyid = "airquality_yesterday"
url = froot(fkeyid, location_key=lkey)
payload = {"apikey": self.API_KEY}
return requests.get(url=url,
params=payload, timeout=self.timeout)
########################################################
# Climo #
########################################################
def get_actuals(self, lkey:int, start_date:str, end_date:str=None):
# TODO: Return object
# (needs API access)
if end_date:
fkeyid = "climo_actuals_range"
url = froot(fkeyid, location_key=lkey)
payload = {"apikey": self.API_KEY,
"start": start_date,
"end": end_date}
else:
fkeyid = "climo_actuals_date"
url = froot(fkeyid,
date=start_date,
location_key=lkey)
payload = {"apikey": self.API_KEY}
return requests.get(url=url,
params=payload, timeout=self.timeout)
def get_records(self, lkey, start_date, end_date=None):
# TODO: Return object
# (needs API access)
if end_date:
fkeyid = "climo_records_range"
url = froot(fkeyid, location_key=lkey)
payload = {"apikey": self.API_KEY,
"start": start_date,
"end": end_date}
else:
fkeyid = "climo_records_date"
url = froot(fkeyid,
date=start_date,
location_key=lkey)
payload = {"apikey": self.API_KEY}
return requests.get(url=url,
params=payload, timeout=self.timeout)
def get_normals(self, lkey, start_date, end_date=None):
# TODO: Return object
# (needs API access)
if end_date:
fkeyid = "climo_normals_range"
url = froot(fkeyid, location_key=lkey)
payload = {"apikey": self.API_KEY,
"start": start_date,
"end": end_date}
else:
fkeyid = "climo_normals_date"
url = froot(fkeyid,
date=start_date,
location_key=lkey)
payload = {"apikey": self.API_KEY}
return requests.get(url=url,
params=payload, timeout=self.timeout)
########################################################
# Alerts #
########################################################
def get_alerts(self, lkey, forecast_range):
# TODO: Return object
# (needs API access)
assert isinstance(forecast_range, int)
fkeyid = u"alarms_{0:d}d".format(forecast_range)
url = froot(fkeyid, location_key=lkey)
payload = {"apikey": self.API_KEY}
return requests.get(url=url,
params=payload, timeout=self.timeout) | {
"content_hash": "8afd33bc376b8e8e1ad598f0daa322f5",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 117,
"avg_line_length": 33.088471849865954,
"alnum_prop": 0.4944093339815265,
"repo_name": "chrisvoncsefalvay/pyccuweather",
"id": "ec1160cd128baf406c1c7be3a7f677e358031fca",
"size": "12358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyccuweather/connector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44244"
}
],
"symlink_target": ""
} |
"""Support for Proliphix NT10e Thermostats."""
from __future__ import annotations
import proliphix
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ClimateEntityFeature,
HVACAction,
HVACMode,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_HOST,
CONF_PASSWORD,
CONF_USERNAME,
PRECISION_TENTHS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
ATTR_FAN = "fan"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Proliphix thermostats."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
host = config.get(CONF_HOST)
pdp = proliphix.PDP(host, username, password)
pdp.update()
add_entities([ProliphixThermostat(pdp)], True)
class ProliphixThermostat(ClimateEntity):
"""Representation a Proliphix thermostat."""
_attr_supported_features = ClimateEntityFeature.TARGET_TEMPERATURE
def __init__(self, pdp):
"""Initialize the thermostat."""
self._pdp = pdp
self._name = None
@property
def should_poll(self):
"""Set up polling needed for thermostat."""
return True
def update(self):
"""Update the data from the thermostat."""
self._pdp.update()
self._name = self._pdp.name
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def precision(self):
"""Return the precision of the system.
Proliphix temperature values are passed back and forth in the
API as tenths of degrees F (i.e. 690 for 69 degrees).
"""
return PRECISION_TENTHS
@property
def extra_state_attributes(self):
"""Return the device specific state attributes."""
return {ATTR_FAN: self._pdp.fan_state}
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
return self._pdp.cur_temp
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._pdp.setback
@property
def hvac_action(self) -> HVACAction:
"""Return the current state of the thermostat."""
state = self._pdp.hvac_state
if state == 1:
return HVACAction.OFF
if state in (3, 4, 5):
return HVACAction.HEATING
if state in (6, 7):
return HVACAction.COOLING
return HVACAction.IDLE
@property
def hvac_mode(self) -> HVACMode:
"""Return the current state of the thermostat."""
if self._pdp.is_heating:
return HVACMode.HEAT
if self._pdp.is_cooling:
return HVACMode.COOL
return HVACMode.OFF
@property
def hvac_modes(self) -> list[HVACMode]:
"""Return available HVAC modes."""
return []
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
return
self._pdp.setback = temperature
| {
"content_hash": "2a2afaa476000180fe3d0dc99cc44844",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 75,
"avg_line_length": 27.58695652173913,
"alnum_prop": 0.6448647228789073,
"repo_name": "toddeye/home-assistant",
"id": "0907602ed9dbe24cfe8470feae5e5704e20585d2",
"size": "3807",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/proliphix/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import nltk
sent = "They wind back the clock, while we chase after the wind."
text = nltk.word_tokenize(sent)
print text
print nltk.pos_tag(text)
| {
"content_hash": "7662ab90e4d26181891f450ef4b54936",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 65,
"avg_line_length": 21.142857142857142,
"alnum_prop": 0.7432432432432432,
"repo_name": "MingjunZhou/nltk_book",
"id": "f1fcecd5dee9c527dba0fbd8404a9f71db27c136",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solutions/ch5ex3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "32961"
}
],
"symlink_target": ""
} |
import logging
class HtmlSource:
def __init__(self, src, manualDependecy=""):
self._src = src
self.indentation = 0
self.fileExtension = src.split(".")[-1]
self.manualDependency = manualDependecy
self.hasRendered = False
def setSource(self, src):
self._src = src
self.hasRendered = False
def render(self):
self.hasRendered = True
if self.manualDependency == "":
if self.fileExtension == "css":
return "rel=\"stylesheet\" type=\"text/css\" href=\"" + self._src + "\">"
elif self.fileExtension == "js":
return "<script src=\"" + self._src + "\"></script>"
else:
self.hasRendered = False
raise Exception("This generator is still quite stupid and doesn't know about the " + self.fileExtension + ". "
"Feel free to add your own syntax to the code for your own "
"type of dependency.\nIf you just want to add a dependency the quick and dirty way, add it to "
"manualDependency like this:\nweirdDependency = HtmlDependency(None, '<script src=\"file.js\"></script>')\n"
"Then call dependency.render()")
else:
return self.manualDependency
| {
"content_hash": "56a7f77e394bf0e311b7fd4ee1209543",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 140,
"avg_line_length": 39.114285714285714,
"alnum_prop": 0.539810080350621,
"repo_name": "smckee6192/HTMLGenerator",
"id": "e5f2518b7123c1b575c96a9c510f6061792b7217",
"size": "1369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HtmlSource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6386"
}
],
"symlink_target": ""
} |
import sys
import django
from django.conf import settings
settings.configure(
DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
ROOT_URLCONF='urls',
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'arcutils',
),
MIDDLEWARE_CLASSES=[],
LDAP={
"default": {
"host": "ldap://ldap-login.oit.pdx.edu",
"username": "",
"password": "",
"search_dn": "ou=people,dc=pdx,dc=edu",
}
}
)
if django.VERSION[:2] >= (1, 7):
from django import setup
else:
setup = lambda: None
from django.test.simple import DjangoTestSuiteRunner
from django.test.utils import setup_test_environment
setup()
setup_test_environment()
test_runner = DjangoTestSuiteRunner(verbosity=1)
failures = test_runner.run_tests(['arcutils', ])
if failures:
sys.exit(failures)
| {
"content_hash": "93e78e69f4e0c5237e9d1c46377e730f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 52,
"avg_line_length": 22.644444444444446,
"alnum_prop": 0.5966633954857704,
"repo_name": "kfarr2/django-arcutils",
"id": "3cc6cb0e542a046e3035b5a986906489749e0605",
"size": "1041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32135"
}
],
"symlink_target": ""
} |
from django import template
from django.core.exceptions import ImproperlyConfigured
from django.utils.safestring import mark_safe
from ..processors import AssetRegistry
register = template.Library()
class AssetsNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
context.render_context["AMN"] = AssetRegistry()
content = self.nodelist.render(context)
# Now output out tags
extra_tags = "\n".join(context.render_context["AMN"].render(context))
return mark_safe(extra_tags) + content
@register.tag
def assets(parser, token):
nodelist = parser.parse()
return AssetsNode(nodelist)
@register.simple_tag(takes_context=True)
def asset(context, filename=None, *args, **kwargs):
"""
{% asset alias mode=? ... %}
{% asset file.js ... %}
{% asset name depends depends... %}
alias = short name for asset
file = static relative filename
mode = asset mode [inferred from filename extension]
args == dependencies [aliases or files]
"""
alias = kwargs.get("alias")
mode = kwargs.get("mode")
if alias is None and filename is None:
raise template.TemplateSyntaxError("asset tag requires at least one of name or alias")
if filename is None and mode is None:
raise template.TemplateSyntaxError("asset tag reqires mode when using an alias")
if alias == filename:
raise template.TemplateSyntaxError("Attempt to alias asset to itself.")
for ctx in reversed(context.render_context.dicts):
try:
registry = ctx['AMN']
except KeyError:
continue
else:
break
else:
raise ImproperlyConfigured("Must use {% assets %} tag before any {% asset %} tags.")
registry.add_asset(filename, alias, mode, args)
return ""
| {
"content_hash": "aa516d7d490f88662c08356dd6f57ce2",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 94,
"avg_line_length": 28.861538461538462,
"alnum_prop": 0.6572494669509595,
"repo_name": "funkybob/django-amn",
"id": "d08fee873672eac34b7b4ed85155f91542ed11d6",
"size": "1876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/damn/templatetags/damn.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17093"
}
],
"symlink_target": ""
} |
import numpy as np
d = 64 # dimension
nb = 100000 # database size
nq = 10000 # nb of queries
np.random.seed(1234) # make reproducible
xb = np.random.random((nb, d)).astype('float32')
xb[:, 0] += np.arange(nb) / 1000.
xq = np.random.random((nq, d)).astype('float32')
xq[:, 0] += np.arange(nq) / 1000.
import faiss # make faiss available
ngpus = faiss.get_num_gpus()
print("number of GPUs:", ngpus)
cpu_index = faiss.IndexFlatL2(d)
gpu_index = faiss.index_cpu_to_all_gpus( # build the index
cpu_index
)
gpu_index.add(xb) # add vectors to the index
print(gpu_index.ntotal)
k = 4 # we want to see 4 nearest neighbors
D, I = gpu_index.search(xq, k) # actual search
print(I[:5]) # neighbors of the 5 first queries
print(I[-5:]) # neighbors of the 5 last queries
| {
"content_hash": "71f7ca5f58aac340ce7836633c9ab69b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 67,
"avg_line_length": 31.433333333333334,
"alnum_prop": 0.5535524920466596,
"repo_name": "facebookresearch/faiss",
"id": "c458587ce905eba468c16ea4bf0df47aefaf5cce",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tutorial/python/5-Multiple-GPUs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1461"
},
{
"name": "C",
"bytes": "90054"
},
{
"name": "C++",
"bytes": "2704465"
},
{
"name": "CMake",
"bytes": "37442"
},
{
"name": "Cuda",
"bytes": "816558"
},
{
"name": "Dockerfile",
"bytes": "1084"
},
{
"name": "Python",
"bytes": "778607"
},
{
"name": "Roff",
"bytes": "1617"
},
{
"name": "Shell",
"bytes": "27297"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class CommonConfig(AppConfig):
name = 'common'
verbose_name = _("Common")
| {
"content_hash": "638970dd9de21f63d8df8d94f51963bc",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 55,
"avg_line_length": 24.857142857142858,
"alnum_prop": 0.735632183908046,
"repo_name": "DylanMcCall/stuartmccall.ca",
"id": "139a67502223a03751ad2200c41d5e84155a7b77",
"size": "174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31555"
},
{
"name": "HTML",
"bytes": "22095"
},
{
"name": "JavaScript",
"bytes": "73522"
},
{
"name": "Python",
"bytes": "47287"
}
],
"symlink_target": ""
} |
import operator
import platform
from django.db import transaction
from django.db.backends.base.features import BaseDatabaseFeatures
from django.db.utils import OperationalError
from django.utils.functional import cached_property
from .base import Database
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite can read from a cursor since SQLite 3.6.5, subject to the caveat
# that statements within a connection aren't isolated from each other. See
# https://sqlite.org/isolation.html.
can_use_chunked_reads = True
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
max_query_params = 999
supports_mixed_date_datetime_comparisons = False
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
can_create_inline_fk = False
supports_paramstyle_pyformat = False
can_clone_databases = True
supports_temporal_subtraction = True
ignores_table_name_case = True
supports_cast_with_precision = False
time_cast_precision = 3
can_release_savepoints = True
# Is "ALTER TABLE ... RENAME COLUMN" supported?
can_alter_table_rename_column = Database.sqlite_version_info >= (3, 25, 0)
supports_parentheses_in_compound = False
# Deferred constraint checks can be emulated on SQLite < 3.20 but not in a
# reasonably performant way.
supports_pragma_foreign_key_check = Database.sqlite_version_info >= (3, 20, 0)
can_defer_constraint_checks = supports_pragma_foreign_key_check
supports_functions_in_partial_indexes = Database.sqlite_version_info >= (3, 15, 0)
supports_over_clause = Database.sqlite_version_info >= (3, 25, 0)
supports_frame_range_fixed_distance = Database.sqlite_version_info >= (3, 28, 0)
supports_aggregate_filter_clause = Database.sqlite_version_info >= (3, 30, 1)
supports_order_by_nulls_modifier = Database.sqlite_version_info >= (3, 30, 0)
order_by_nulls_first = True
supports_json_field_contains = False
test_collations = {
'ci': 'nocase',
'cs': 'binary',
'non_default': 'nocase',
}
@cached_property
def django_test_skips(self):
skips = {
'SQLite stores values rounded to 15 significant digits.': {
'model_fields.test_decimalfield.DecimalFieldTests.test_fetch_from_db_without_float_rounding',
},
'SQLite naively remakes the table on field alteration.': {
'schema.tests.SchemaTests.test_unique_no_unnecessary_fk_drops',
'schema.tests.SchemaTests.test_unique_and_reverse_m2m',
'schema.tests.SchemaTests.test_alter_field_default_doesnt_perform_queries',
'schema.tests.SchemaTests.test_rename_column_renames_deferred_sql_references',
},
"SQLite doesn't have a constraint.": {
'model_fields.test_integerfield.PositiveIntegerFieldTests.test_negative_values',
},
"SQLite doesn't support negative precision for ROUND().": {
'db_functions.math.test_round.RoundTests.test_null_with_negative_precision',
'db_functions.math.test_round.RoundTests.test_decimal_with_negative_precision',
'db_functions.math.test_round.RoundTests.test_float_with_negative_precision',
'db_functions.math.test_round.RoundTests.test_integer_with_negative_precision',
},
}
if Database.sqlite_version_info < (3, 27):
skips.update({
'Nondeterministic failure on SQLite < 3.27.': {
'expressions_window.tests.WindowFunctionTests.test_subquery_row_range_rank',
},
})
if self.connection.is_in_memory_db():
skips.update({
"the sqlite backend's close() method is a no-op when using an "
"in-memory database": {
'servers.test_liveserverthread.LiveServerThreadTest.test_closes_connections',
'servers.tests.LiveServerTestCloseConnectionTest.test_closes_connections',
},
})
return skips
@cached_property
def supports_atomic_references_rename(self):
# SQLite 3.28.0 bundled with MacOS 10.15 does not support renaming
# references atomically.
if platform.mac_ver()[0].startswith('10.15.') and Database.sqlite_version_info == (3, 28, 0):
return False
return Database.sqlite_version_info >= (3, 26, 0)
@cached_property
def introspected_field_types(self):
return{
**super().introspected_field_types,
'BigAutoField': 'AutoField',
'DurationField': 'BigIntegerField',
'GenericIPAddressField': 'CharField',
'SmallAutoField': 'AutoField',
}
@cached_property
def supports_json_field(self):
with self.connection.cursor() as cursor:
try:
with transaction.atomic(self.connection.alias):
cursor.execute('SELECT JSON(\'{"a": "b"}\')')
except OperationalError:
return False
return True
can_introspect_json_field = property(operator.attrgetter('supports_json_field'))
has_json_object_function = property(operator.attrgetter('supports_json_field'))
@cached_property
def can_return_columns_from_insert(self):
return Database.sqlite_version_info >= (3, 35)
can_return_rows_from_bulk_insert = property(operator.attrgetter('can_return_columns_from_insert'))
| {
"content_hash": "c75fbf7213668f43825cbc005c2e4e4c",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 109,
"avg_line_length": 44.46825396825397,
"alnum_prop": 0.647867213992504,
"repo_name": "ghickman/django",
"id": "ff3e3f47a9faa68a5de7cbcd61bd47011bdaadf6",
"size": "5603",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "django/db/backends/sqlite3/features.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52334"
},
{
"name": "HTML",
"bytes": "170436"
},
{
"name": "JavaScript",
"bytes": "255321"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11414242"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Test bitcoind shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
| {
"content_hash": "f4952870f643267d66eb1aa8927c5068",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 103,
"avg_line_length": 38.03225806451613,
"alnum_prop": 0.6759966072943172,
"repo_name": "alecalve/bitcoin",
"id": "a76e0f1b50afc9a81b9fb580c6cd1a85b557ada9",
"size": "1393",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/functional/feature_shutdown.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "695632"
},
{
"name": "C++",
"bytes": "6008562"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "197255"
},
{
"name": "Makefile",
"bytes": "117105"
},
{
"name": "Objective-C",
"bytes": "123749"
},
{
"name": "Objective-C++",
"bytes": "6594"
},
{
"name": "Python",
"bytes": "1469100"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "88183"
}
],
"symlink_target": ""
} |
import os
import sys
def show_usage():
print 'usage:\nsign.py [-k] apk\n-k\tkeep origin apk\nexample: sign.py comiz.pk' % ()
if __name__ == '__main__':
#print sys.argv
argc = len(sys.argv)
if argc != 2 and argc != 3:
show_usage()
exit()
keypath = '/mnt/DATA/proj/eclipse/Android/zzlab.keystore'
keyname = 'zzlab.keystore'
zipalign = '/opt/android-sdk/tools/zipalign'
keep = False
if sys.argv[1] == '-k':
keep = True
originapk = sys.argv[argc-1]
pair = os.path.splitext(originapk)
signedapk = pair[0]+'_signed'+pair[1]
if not os.path.exists(originapk):
print 'Error: No such file.'
exit()
if os.path.exists(signedapk):
os.remove(signedapk)
cmd = 'jarsigner -verbose -keystore "%s" -signedjar "%s" "%s" %s' % (keypath, 'tmpapk', originapk, keyname)
print cmd
if os.system(cmd) != 0:
print 'failed'
exit()
cmd = '%s -v 4 "%s" "%s"' % (zipalign, 'tmpapk', signedapk)
print cmd
if os.system(cmd) != 0:
print 'failed'
exit()
os.remove('tmpapk')
if not keep:
os.remove(originapk)
print 'ok'
| {
"content_hash": "12e6d1b19b4f288f91f9bddbfae7b7c4",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 111,
"avg_line_length": 23.979591836734695,
"alnum_prop": 0.563404255319149,
"repo_name": "chrisju/script",
"id": "4b23a2cce4b3733d478adf92c510560367b71f1a",
"size": "1195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sign.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "73218"
},
{
"name": "Shell",
"bytes": "2670"
}
],
"symlink_target": ""
} |
from typing import Optional, Text
from zerver.lib.test_classes import WebhookTestCase
class GoogleCodeInTests(WebhookTestCase):
STREAM_NAME = 'gci'
URL_TEMPLATE = "/api/v1/external/gci?&api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'gci'
def test_abandon_event_message(self) -> None:
expected_subject = u'student-yqqtag'
expected_message = u'**student-yqqtag** abandoned the task [Sails unspread it stopped at kearney](https://codein.withgoogle.com/dashboard/task-instances/6296903092273152/).'
self.send_and_test_stream_message('task_abandoned_by_student',
expected_subject, expected_message)
def test_comment_event_message(self) -> None:
expected_subject = u'student-yqqtag'
expected_message = u'**student-yqqtag** commented on the task [Sails unspread it stopped at kearney](https://codein.withgoogle.com/dashboard/task-instances/6296903092273152/).'
self.send_and_test_stream_message('student_commented_on_task',
expected_subject, expected_message)
def test_submit_event_message(self) -> None:
expected_subject = u'student-yqqtag'
expected_message = u'**student-yqqtag** submitted the task [Sails unspread it stopped at kearney](https://codein.withgoogle.com/dashboard/task-instances/6296903092273152/).'
self.send_and_test_stream_message('task_submitted_by_student',
expected_subject, expected_message)
def test_claim_event_message(self) -> None:
expected_subject = u'student-yqqtag'
expected_message = u'**student-yqqtag** claimed the task [Sails unspread it stopped at kearney](https://codein.withgoogle.com/dashboard/task-instances/6296903092273152/).'
self.send_and_test_stream_message('task_claimed_by_student',
expected_subject, expected_message)
def test_approve_event_message(self) -> None:
expected_subject = u'student-yqqtag'
expected_message = u'**eeshangarg** approved the task [Sails unspread it stopped at kearney](https://codein.withgoogle.com/dashboard/task-instances/6296903092273152/).'
self.send_and_test_stream_message('task_approved_by_mentor',
expected_subject, expected_message)
def test_needswork_event_message(self) -> None:
expected_subject = u'student-yqqtag'
expected_message = u'**eeshangarg** submitted the task [Sails unspread it stopped at kearney](https://codein.withgoogle.com/dashboard/task-instances/5136918324969472/) for more work.'
self.send_and_test_stream_message('task_submitted_by_mentor_for_more_work',
expected_subject, expected_message)
| {
"content_hash": "aa04ae8d1510716fdeec5135638ac678",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 191,
"avg_line_length": 64.45454545454545,
"alnum_prop": 0.6650211565585331,
"repo_name": "mahim97/zulip",
"id": "22495da90dd79599fc06e41a5afe48ad3a188f50",
"size": "2860",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/webhooks/gci/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "299188"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "542463"
},
{
"name": "JavaScript",
"bytes": "1605569"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3510480"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
} |
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import faint
import os
def erase_columns(x0, x1):
"""Reduces the image width by removing all pixel columns from x0
to x1 (inclusive).
"""
x0, x1 = min(x0, x1), max(x0, x1)
if x0 < 0:
raise ValueError("Negative column specified.")
image = faint.get_active_image()
full = image.get_bitmap()
w, h = full.get_size()
w2 = w - (x1 - x0 + 1)
if w2 <= 0:
raise ValueError("Can't erase all columns.")
right = full.subbitmap(x1 + 1, 0, w - x1 - 1, h)
image.blit((x0,0),right)
image.set_rect(0,0, w2, h)
def erase_rows(y0, y1):
"""Reduces the image height by removing all pixel rows from y0
to y1 (inclusive).
"""
y0, y1 = min(y0, y1), max(y0, y1)
if y0 < 0:
raise ValueError("Negative row specified.")
image = faint.get_active_image()
full = image.get_bitmap()
w, h = full.get_size()
h2 = h - (y1 - y0 + 1)
if h2 <= 0:
raise ValueError("Can't erase all rows.")
bottom = full.subbitmap(0, y1 + 1, w, h - y1 - 1)
image.blit((0, y0),bottom)
image.set_rect(0,0, w, h2)
def erase_selection():
"""Removes the columns or rows indicated by the raster selection in
the active image and shrinks the image.
The raster selection must extend either across all rows or columns
of the image.
"""
image = faint.get_active_image()
x, y, w, h = image.get_selection()
img_w, img_h = image.get_size()
if h == img_h:
erase_columns(x, x + w)
image.set_selection(0,0,0,0)
image.command_name = "Erase Columns"
elif w == img_w:
erase_rows(y, y + h)
image.set_selection(0,0,0,0)
image.command_name = "Erase Rows"
def snowman():
"""Adds a text object with the snowman-character to test unicode
support.
"""
s = faint.get_settings()
s.fontsize = 48
preferred_font = "DejaVu Sans"
if preferred_font in faint.list_fonts():
s.font = preferred_font
return faint.Text((20,20,100,100), '\u2603', s)
def scroll_traverse():
"""Scrolls through the image column by column, using
scroll_page_down and scroll_page_right, eventually wrapping back
to 0,0"""
active = faint.get_active_image()
max = active.get_max_scroll()
current = active.get_scroll_pos()
if current[1] >= max[1]:
if current[0] >= max[0]:
active.set_scroll_pos(0,0)
else:
active.set_scroll_pos(current[0], 0)
active.scroll_page_right()
else:
active.scroll_page_down()
def _open_relative(image, replace, get_new_index):
if image is None:
image = faint.get_active_image()
path = image.get_filename()
if path is None:
return
d, f = os.path.split(path)
files = sorted(os.listdir(d))
i = files.index(f)
new_index = get_new_index(i)
if new_index < len(files):
new_image = faint.app.open(os.path.join(d, files[new_index]))
if replace and new_image is not None:
faint.app.close(image)
def open_next(image=None, replace=False):
_open_relative(image, replace, lambda i: i + 1)
def open_prev(image=None, replace=False):
_open_relative(image, replace, lambda i: i - 1)
def open_first(image=None, replace=False):
_open_relative(image, replace, lambda i: 0)
def open_last(image=None, replace=False):
_open_relative(image, replace, lambda i: -1)
| {
"content_hash": "e8f083af5bc201dc118c8df613ce073c",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 71,
"avg_line_length": 27.52980132450331,
"alnum_prop": 0.6006735626653837,
"repo_name": "lukas-ke/faint-graphics-editor",
"id": "cb258401bd08c892842145bddda724982d8203fb",
"size": "4206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/faint/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49581"
},
{
"name": "C++",
"bytes": "3170874"
},
{
"name": "Emacs Lisp",
"bytes": "13474"
},
{
"name": "HTML",
"bytes": "26096"
},
{
"name": "NSIS",
"bytes": "2088"
},
{
"name": "Python",
"bytes": "537915"
}
],
"symlink_target": ""
} |
"""Here's our handy setup script for the ipviking django authentication"""
from setuptools import setup, find_packages
setup(
name = 'ipviking_django',
version = '0.1',
description = 'An example of using the IPViking API as authentication in a Django app.',
author = 'Marcus Hoffman',
url = 'https://github.com/norsecorp/ipviking-django',
license = 'BSD',
packages = find_packages(),
include_package_data = True,
package_data = {'':['README.md']},
install_requires = ['ipviking_api_python', 'django'],
tests_require = [],
classifiers = ['Development Status :: 1 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD Licence',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: Network Security',
'Topic :: Software Development :: Libraries :: Python Modules'])
| {
"content_hash": "37c736496876a55ef9ff0f2f0484cbe9",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 94,
"avg_line_length": 44,
"alnum_prop": 0.5634469696969697,
"repo_name": "norsecorp/ipviking-django",
"id": "b0ff503f1143360a87b016e31cfc31cfd8181f44",
"size": "1056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17591"
}
],
"symlink_target": ""
} |
from pyxb.bundles.opengis.gml_3_3.raw.ce import *
| {
"content_hash": "f80dcd927249cef47563504bfe5ecea2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 49,
"avg_line_length": 50,
"alnum_prop": 0.76,
"repo_name": "jonfoster/pyxb2",
"id": "f4a66a8111072dafd7db2c79e0732b4d4797cde7",
"size": "50",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pyxb/bundles/opengis/gml_3_3/ce.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6286"
},
{
"name": "Python",
"bytes": "1854695"
},
{
"name": "Shell",
"bytes": "37524"
}
],
"symlink_target": ""
} |
from flamebroiler import Trie
import resource
import timeit
def find_words():
letters = [chr(i) for i in range(97,123)]
for first in letters:
for second in letters:
print first + second
matches = words.suffixes(first + second)
print resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
words = Trie()
for w in open("/usr/share/dict/words"):
word = w.strip().lower()
words[word] = word
print resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print timeit.Timer("find_words()", "from __main__ import find_words; gc.enable()").repeat(10, 1)
print resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
| {
"content_hash": "eb23f9b90cfd32b6b02cfe92f9a8a060",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 96,
"avg_line_length": 29.40909090909091,
"alnum_prop": 0.6893353941267388,
"repo_name": "apendleton/flamebroiler",
"id": "d9dbef429a36593f6997d747725c5f8abf8c60cd",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/py/words_trie_bench.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "17572"
},
{
"name": "Makefile",
"bytes": "851"
},
{
"name": "Python",
"bytes": "7666"
}
],
"symlink_target": ""
} |
"""awscfncli2 version."""
import pkg_resources
__version__ = pkg_resources.require('awscfncli2')[0].version
| {
"content_hash": "c1a4abacf566ad497a699650ca218e0d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 60,
"avg_line_length": 22,
"alnum_prop": 0.7181818181818181,
"repo_name": "Kotaimen/awscfncli",
"id": "36e7b2d7e1d97976f11774c07aab93576adacf7a",
"size": "110",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "awscfncli2/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "926"
},
{
"name": "Python",
"bytes": "130137"
},
{
"name": "Shell",
"bytes": "597"
}
],
"symlink_target": ""
} |
import errno
import logging
import os
import uuid
import struct
import time
import base64
import socket
from ceph_deploy.cliutil import priority
from ceph_deploy import conf, hosts, exc
from ceph_deploy.util import arg_validators, ssh, net
from ceph_deploy.misc import mon_hosts
from ceph_deploy.lib import remoto
from ceph_deploy.connection import get_local_connection
LOG = logging.getLogger(__name__)
def generate_auth_key():
key = os.urandom(16)
header = struct.pack(
'<hiih',
1, # le16 type: CEPH_CRYPTO_AES
int(time.time()), # le32 created: seconds
0, # le32 created: nanoseconds,
len(key), # le16: len(key)
)
return base64.b64encode(header + key).decode('utf-8')
def ssh_copy_keys(hostname, username=None):
LOG.info('making sure passwordless SSH succeeds')
if ssh.can_connect_passwordless(hostname):
return
LOG.warning('could not connect via SSH')
# Create the key if it doesn't exist:
id_rsa_pub_file = os.path.expanduser(u'~/.ssh/id_rsa.pub')
id_rsa_file = id_rsa_pub_file.split('.pub')[0]
if not os.path.exists(id_rsa_file):
LOG.info('creating a passwordless id_rsa.pub key file')
with get_local_connection(LOG) as conn:
remoto.process.run(
conn,
[
'ssh-keygen',
'-t',
'rsa',
'-N',
"",
'-f',
id_rsa_file,
]
)
# Get the contents of id_rsa.pub and push it to the host
LOG.info('will connect again with password prompt')
distro = hosts.get(hostname, username, detect_sudo=False)
auth_keys_path = '.ssh/authorized_keys'
if not distro.conn.remote_module.path_exists(auth_keys_path):
distro.conn.logger.warning(
'.ssh/authorized_keys does not exist, will skip adding keys'
)
else:
LOG.info('adding public keys to authorized_keys')
with open(os.path.expanduser('~/.ssh/id_rsa.pub'), 'r') as id_rsa:
contents = id_rsa.read()
distro.conn.remote_module.append_to_file(
auth_keys_path,
contents
)
distro.conn.exit()
def validate_host_ip(ips, subnets):
"""
Make sure that a given host all subnets specified will have at least one IP
in that range.
"""
# Make sure we prune ``None`` arguments
subnets = [s for s in subnets if s is not None]
validate_one_subnet = len(subnets) == 1
def ip_in_one_subnet(ips, subnet):
""" ensure an ip exists in at least one subnet """
for ip in ips:
if net.ip_in_subnet(ip, subnet):
return True
return False
for subnet in subnets:
if ip_in_one_subnet(ips, subnet):
if validate_one_subnet:
return
else: # keep going to make sure the other subnets are ok
continue
else:
msg = "subnet (%s) is not valid for any of the ips found %s" % (subnet, str(ips))
raise RuntimeError(msg)
def get_public_network_ip(ips, public_subnet):
"""
Given a public subnet, chose the one IP from the remote host that exists
within the subnet range.
"""
for ip in ips:
if net.ip_in_subnet(ip, public_subnet):
return ip
msg = "IPs (%s) are not valid for any of subnet specified %s" % (str(ips), str(public_subnet))
raise RuntimeError(msg)
def new(args):
if args.ceph_conf:
raise RuntimeError('will not create a Ceph conf file if attemtping to re-use with `--ceph-conf` flag')
LOG.debug('Creating new cluster named %s', args.cluster)
cfg = conf.ceph.CephConf()
cfg.add_section('global')
fsid = args.fsid or uuid.uuid4()
cfg.set('global', 'fsid', str(fsid))
# if networks were passed in, lets set them in the
# global section
if args.public_network:
cfg.set('global', 'public network', str(args.public_network))
if args.cluster_network:
cfg.set('global', 'cluster network', str(args.cluster_network))
mon_initial_members = []
mon_host = []
for (name, host) in mon_hosts(args.mon):
# Try to ensure we can ssh in properly before anything else
if args.ssh_copykey:
ssh_copy_keys(host, args.username)
# Now get the non-local IPs from the remote node
distro = hosts.get(host, username=args.username)
remote_ips = net.ip_addresses(distro.conn)
# custom cluster names on sysvinit hosts won't work
if distro.init == 'sysvinit' and args.cluster != 'ceph':
LOG.error('custom cluster names are not supported on sysvinit hosts')
raise exc.ClusterNameError(
'host %s does not support custom cluster names' % host
)
distro.conn.exit()
# Validate subnets if we received any
if args.public_network or args.cluster_network:
validate_host_ip(remote_ips, [args.public_network, args.cluster_network])
# Pick the IP that matches the public cluster (if we were told to do
# so) otherwise pick the first, non-local IP
LOG.debug('Resolving host %s', host)
if args.public_network:
ip = get_public_network_ip(remote_ips, args.public_network)
else:
ip = net.get_nonlocal_ip(host)
LOG.debug('Monitor %s at %s', name, ip)
mon_initial_members.append(name)
try:
socket.inet_pton(socket.AF_INET6, ip)
mon_host.append("[" + ip + "]")
LOG.info('Monitors are IPv6, binding Messenger traffic on IPv6')
cfg.set('global', 'ms bind ipv6', 'true')
except socket.error:
mon_host.append(ip)
LOG.debug('Monitor initial members are %s', mon_initial_members)
LOG.debug('Monitor addrs are %s', mon_host)
cfg.set('global', 'mon initial members', ', '.join(mon_initial_members))
# no spaces here, see http://tracker.newdream.net/issues/3145
cfg.set('global', 'mon host', ','.join(mon_host))
# override undesirable defaults, needed until bobtail
# http://tracker.ceph.com/issues/6788
cfg.set('global', 'auth cluster required', 'cephx')
cfg.set('global', 'auth service required', 'cephx')
cfg.set('global', 'auth client required', 'cephx')
path = '{name}.conf'.format(
name=args.cluster,
)
new_mon_keyring(args)
LOG.debug('Writing initial config to %s...', path)
tmp = '%s.tmp' % path
with open(tmp, 'w') as f:
cfg.write(f)
try:
os.rename(tmp, path)
except OSError as e:
if e.errno == errno.EEXIST:
raise exc.ClusterExistsError(path)
else:
raise
def new_mon_keyring(args):
LOG.debug('Creating a random mon key...')
mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % generate_auth_key()
keypath = '{name}.mon.keyring'.format(
name=args.cluster,
)
oldmask = os.umask(0o77)
LOG.debug('Writing monitor keyring to %s...', keypath)
try:
tmp = '%s.tmp' % keypath
with open(tmp, 'w', 0o600) as f:
f.write(mon_keyring)
try:
os.rename(tmp, keypath)
except OSError as e:
if e.errno == errno.EEXIST:
raise exc.ClusterExistsError(keypath)
else:
raise
finally:
os.umask(oldmask)
@priority(10)
def make(parser):
"""
Start deploying a new cluster, and write a CLUSTER.conf and keyring for it.
"""
parser.add_argument(
'mon',
metavar='MON',
nargs='+',
help='initial monitor hostname, fqdn, or hostname:fqdn pair',
type=arg_validators.Hostname(),
)
parser.add_argument(
'--no-ssh-copykey',
dest='ssh_copykey',
action='store_false',
default=True,
help='do not attempt to copy SSH keys',
)
parser.add_argument(
'--fsid',
dest='fsid',
help='provide an alternate FSID for ceph.conf generation',
)
parser.add_argument(
'--cluster-network',
help='specify the (internal) cluster network',
type=arg_validators.Subnet(),
)
parser.add_argument(
'--public-network',
help='specify the public network for a cluster',
type=arg_validators.Subnet(),
)
parser.set_defaults(
func=new,
)
| {
"content_hash": "b630fdaaddd8575c87fe06550dbdee92",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 110,
"avg_line_length": 31.1268115942029,
"alnum_prop": 0.5860784541962519,
"repo_name": "codenrhoden/ceph-deploy",
"id": "842117bdbc310139ae2f42c4b01cb6af4d02a494",
"size": "8591",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ceph_deploy/new.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "392696"
},
{
"name": "Shell",
"bytes": "8614"
}
],
"symlink_target": ""
} |
import os
from oslo_config import cfg
CONF = cfg.CONF
CONF.import_opt('policy_file', 'cinder.policy', group='oslo_policy')
CONF.import_opt('volume_driver', 'cinder.volume.manager')
CONF.import_opt('backup_driver', 'cinder.backup.manager')
CONF.import_opt('api_class', 'cinder.keymgr', group='key_manager')
CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='key_manager')
CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager')
def_vol_type = 'fake_vol_type'
def set_defaults(conf):
conf.set_default('default_volume_type', def_vol_type)
conf.set_default('volume_driver',
'cinder.tests.fake_driver.FakeLoggingVolumeDriver')
conf.set_default('iscsi_helper', 'fake')
conf.set_default('rpc_backend', 'cinder.openstack.common.rpc.impl_fake')
conf.set_default('connection', 'sqlite://', group='database')
conf.set_default('sqlite_synchronous', False, group='database')
conf.set_default('policy_file', 'cinder.tests.unit/policy.json',
group='oslo_policy')
conf.set_default('backup_driver', 'cinder.tests.unit.backup.fake_service')
conf.set_default('api_class',
'cinder.keymgr.conf_key_mgr.ConfKeyManager',
group='key_manager')
conf.set_default('fixed_key', default='0' * 64, group='key_manager')
conf.set_default('scheduler_driver',
'cinder.scheduler.filter_scheduler.FilterScheduler')
conf.set_default('state_path', os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..')))
conf.set_default('policy_dirs', [], group='oslo_policy')
# This is where we don't authenticate
conf.set_default('auth_strategy', 'noauth')
conf.set_default('auth_uri', 'fake', 'keystone_authtoken')
| {
"content_hash": "50ef5442d09726fcb287ee386944799b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 44.775,
"alnum_prop": 0.6610831937465104,
"repo_name": "cloudbase/cinder",
"id": "e442bb8e68e42739c5eb359adffdafd08ac011c6",
"size": "2523",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/conf_fixture.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17586629"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
} |
from ggrc import db
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from ggrc.models.mixins import deferred, Base
from ggrc.models.reflection import PublishOnly
class ObjectFile(Base, db.Model):
__tablename__ = 'object_files'
file_id = db.Column(db.String, nullable=False)
parent_folder_id = db.Column(db.String, nullable=True)
fileable_id = db.Column(db.Integer, nullable=False)
fileable_type = db.Column(db.String, nullable=False)
@property
def fileable_attr(self):
return '{0}_fileable'.format(self.fileable_type)
@property
def fileable(self):
return getattr(self, self.fileable_attr)
@fileable.setter
def fileable(self, value):
self.fileable_id = value.id if value is not None else None
self.fileable_type = value.__class__.__name__ if value is not None \
else None
return setattr(self, self.fileable_attr, value)
@staticmethod
def _extra_table_args(cls):
return (
#db.UniqueConstraint('file_id', 'fileable_id', 'fileable_type'),
)
_publish_attrs = [
'file_id',
'parent_folder_id',
'fileable',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(ObjectFile, cls).eager_query()
return query.options()
def _display_name(self):
return self.fileable.display_name + '<-> gdrive file' + self.file_id
class Fileable(object):
@classmethod
def late_init_fileable(cls):
def make_object_files(cls):
joinstr = 'and_(foreign(ObjectFile.fileable_id) == {type}.id, '\
'foreign(ObjectFile.fileable_type) == "{type}")'
joinstr = joinstr.format(type=cls.__name__)
return db.relationship(
'ObjectFile',
primaryjoin=joinstr,
backref='{0}_fileable'.format(cls.__name__),
cascade='all, delete-orphan',
)
cls.object_files = make_object_files(cls)
_publish_attrs = [
'object_files',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Fileable, cls).eager_query()
return query.options(
orm.subqueryload('object_files'))
| {
"content_hash": "6621afd2d3b004deb17a1be54db3280a",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 72,
"avg_line_length": 28.493506493506494,
"alnum_prop": 0.6572470373746582,
"repo_name": "NejcZupec/ggrc-core",
"id": "e79217d575a93ca26bd033ff780def021b89f8c4",
"size": "2307",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/ggrc_gdrive_integration/models/object_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "163629"
},
{
"name": "Cucumber",
"bytes": "136321"
},
{
"name": "HTML",
"bytes": "1057522"
},
{
"name": "JavaScript",
"bytes": "1494189"
},
{
"name": "Makefile",
"bytes": "6161"
},
{
"name": "Mako",
"bytes": "2178"
},
{
"name": "Python",
"bytes": "2151120"
},
{
"name": "Shell",
"bytes": "29929"
}
],
"symlink_target": ""
} |
import numpy as np
def softmax(df):
if len(df.shape) == 1:
df[df > 20] = 20
df[df < -20] = -20
ppositive = 1 / (1 + np.exp(-df))
ppositive[ppositive > 0.999999] = 1
ppositive[ppositive < 0.0000001] = 0
return np.transpose(np.array((1 - ppositive, ppositive)))
else:
# Compute the Softmax like it is described here:
# http://www.iro.umontreal.ca/~bengioy/dlbook/numerical.html
tmp = df - np.max(df, axis=1).reshape((-1, 1))
tmp = np.exp(tmp)
return tmp / np.sum(tmp, axis=1).reshape((-1, 1))
def convert_multioutput_multiclass_to_multilabel(probas):
if isinstance(probas, np.ndarray) and len(probas.shape) > 2:
raise ValueError('New unsupported sklearn output!')
if isinstance(probas, list):
multioutput_probas = np.ndarray((probas[0].shape[0], len(probas)))
for i, output in enumerate(probas):
# Only copy the probability of something having class 1
multioutput_probas[:, i] = output[:, 1]
if output.shape[1] > 2:
raise ValueError('Multioutput-Multiclass supported by '
'scikit-learn, but not by auto-sklearn!')
probas = multioutput_probas
return probas | {
"content_hash": "15bf276d01c856d87145eea95b8faf57",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 74,
"avg_line_length": 40.0625,
"alnum_prop": 0.5904836193447738,
"repo_name": "hmendozap/auto-sklearn",
"id": "d0b2dbf47a65c78218af877a63de3b6e22d0382b",
"size": "1282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autosklearn/pipeline/implementations/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6722"
},
{
"name": "Makefile",
"bytes": "6791"
},
{
"name": "Python",
"bytes": "1207634"
},
{
"name": "Shell",
"bytes": "851"
}
],
"symlink_target": ""
} |
class OpenIDMiddleware(object):
"""This middleware initializes some settings to make the
python-openid library compatible with Google App Engine
"""
def process_view(self, request, view_func, view_args, view_kwargs):
from openid import fetchers
import openidgae.fetcher
fetchers.setDefaultFetcher(openidgae.fetcher.UrlfetchFetcher())
# Switch logger to use logging package instead of stderr
from openid import oidutil
def myLoggingFunction(message, level=0):
import logging
logging.info(message)
oidutil.log = myLoggingFunction
def process_response(self, request, response):
# Yahoo wants to be able to verify the location of a Relying
# Party's OpenID 2.0 endpoints using Yadis
# http://developer.yahoo.com/openid/faq.html
# so we need to publish our XRDS file on our realm URL. The Realm
# URL is specified in OpenIDStartSubmit as the protocol and domain
# name of the URL, so we check if this request is for the root
# document there and add the appropriate header if it is.
if request.path == '/':
import django.core.urlresolvers
response['X-XRDS-Location'] = ''.join((
'http', ('', 's')[request.is_secure()], '://',
request.META['HTTP_HOST'],
django.core.urlresolvers.reverse('openidgae.views.RelyingPartyXRDS')
))
return response
| {
"content_hash": "818190d36e4bffe5098c404c0b72c096",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 43.03125,
"alnum_prop": 0.6957153231663036,
"repo_name": "lvbeck/niubi",
"id": "532fc062cd1c9609535890b348f0b4fa9d8b1ae3",
"size": "2201",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openidgae/middleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "11144"
},
{
"name": "Python",
"bytes": "1172311"
}
],
"symlink_target": ""
} |
from unittest import TestCase
import tempfile, os
from backup_monkey import SplunkLogging
class SplunkLoggingTest(TestCase):
parsed = None
values = {}
keys = ['body', 'severity', 'src_account', 'src_role', 'src_region', 'src_volume', 'src_snapshot', 'src_tags', 'subject', 'type', 'category']
log_file = tempfile.mkstemp()[1]
@classmethod
def setUpClass(cls):
values = dict([(k, k) for k in cls.keys])
SplunkLogging.set_path(cls.log_file)
SplunkLogging.write(**values)
with open(cls.log_file) as f:
for line in f:
cls.parsed = dict((k.split(' ')[-1], v.strip()) for k,v in [tuple(kv.split('=')) for kv in line.split(',')])
f.close()
def test_write(self):
assert len(self.parsed.keys()) == len(SplunkLogging.keys)
def test_reset_invalid_values(self):
assert self.parsed['type'] == 'unknown'
assert self.parsed['severity'] == 'unknown'
def test_values(self):
for k in filter(lambda v: v not in ['type', 'severity'], self.keys):
assert self.parsed[k] == k
def test_no_line_breaks(self):
SplunkLogging.write(subject='subject\r\n', body='body\ngoes\rhere')
with open(self.log_file) as f:
for line in f:
parsed = dict((k.split(' ')[-1], v) for k,v in [tuple(kv.split('=')) for kv in line.split(',')])
f.close()
assert parsed['subject'] == 'subject'
assert parsed['body'] == 'body goes here'
@classmethod
def tearDownClass(cls):
os.remove(cls.log_file)
SplunkLogging.reset_path()
| {
"content_hash": "1dd65d722ec90a56a2ce26a7925eaa10",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 143,
"avg_line_length": 33.51111111111111,
"alnum_prop": 0.6326259946949602,
"repo_name": "orionhealth/backup-monkey",
"id": "1ce3472a6df898106b32983d8113e9a198143676",
"size": "1508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_splunk_logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46301"
},
{
"name": "Shell",
"bytes": "51"
}
],
"symlink_target": ""
} |
"""
Annotation Type Hints
=====================
Defines the annotation type hints, the module exposes many aliases from
:mod:`typing` and :mod:`numpy.typing` to avoid having to handle multiple
imports.
"""
from __future__ import annotations
import numpy as np
import numpy.typing as npt
import re
from types import ModuleType
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
Iterator,
List,
Mapping,
NewType,
Optional,
Union,
Sequence,
TextIO,
Tuple,
TYPE_CHECKING,
Type,
TypeVar,
cast,
overload,
)
try:
from typing import (
Literal,
Protocol,
SupportsIndex,
TypedDict,
runtime_checkable,
)
# TODO: Drop "typing_extensions" when "Google Colab" uses Python >= 3.8.
# Remove exclusion in ".pre-commit-config.yaml" file for "pyupgrade".
except ImportError: # pragma: no cover
from typing_extensions import ( # type: ignore[assignment]
Literal,
Protocol,
SupportsIndex,
TypedDict,
runtime_checkable,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"Any",
"Callable",
"Dict",
"Generator",
"Iterable",
"Iterator",
"List",
"Mapping",
"ModuleType",
"Optional",
"Union",
"Sequence",
"SupportsIndex",
"TextIO",
"Tuple",
"Type",
"TypedDict",
"TypeVar",
"RegexFlag",
"DTypeBoolean",
"DTypeInteger",
"DTypeFloating",
"DTypeNumber",
"DTypeComplex",
"DType",
"Integer",
"Floating",
"Number",
"Complex",
"Boolean",
"Literal",
"Dataclass",
"NestedSequence",
"ArrayLike",
"IntegerOrArrayLike",
"FloatingOrArrayLike",
"NumberOrArrayLike",
"ComplexOrArrayLike",
"BooleanOrArrayLike",
"ScalarType",
"StrOrArrayLike",
"NDArray",
"IntegerOrNDArray",
"FloatingOrNDArray",
"NumberOrNDArray",
"ComplexOrNDArray",
"BooleanOrNDArray",
"StrOrNDArray",
"TypeInterpolator",
"TypeExtrapolator",
"TypeLUTSequenceItem",
"LiteralWarning",
"cast",
]
Any = Any
Callable = Callable
Dict = Dict
Generator = Generator
Iterable = Iterable
Iterator = Iterator
List = List
Mapping = Mapping
ModuleType = ModuleType
Optional = Optional
Union = Union
Sequence = Sequence
SupportsIndex = SupportsIndex
TextIO = TextIO
Tuple = Tuple
Type = Type
TypedDict = TypedDict
TypeVar = TypeVar
RegexFlag = NewType("RegexFlag", re.RegexFlag)
DTypeInteger = Union[
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
DTypeFloating = Union[np.float16, np.float32, np.float64]
DTypeNumber = Union[DTypeInteger, DTypeFloating]
DTypeComplex = Union[np.csingle, np.cdouble]
DTypeBoolean = np.bool_
DType = Union[DTypeBoolean, DTypeNumber, DTypeComplex]
Integer = int
Floating = float
Number = Union[Integer, Floating]
Complex = complex
Boolean = bool
# TODO: Use "typing.Literal" when minimal Python version is raised to 3.8.
Literal = Literal
# TODO: Revisit to use Protocol.
Dataclass = Any
# TODO: Drop mocking when minimal "Numpy" version is 1.21.x.
_T_co = TypeVar("_T_co", covariant=True)
class NestedSequence(Protocol[_T_co]):
"""A protocol for representing nested sequences.
Warning
-------
`NestedSequence` currently does not work in combination with typevars,
*e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``.
See Also
--------
collections.abc.Sequence
ABCs for read-only and mutable :term:`sequences`.
Examples
--------
>>> from __future__ import annotations
>>> from typing import TYPE_CHECKING
>>> import numpy as np
>>> def get_dtype(seq: NestedSequence[float]) -> np.dtype[np.float64]:
... return np.asarray(seq).dtype
>>> a = get_dtype([1.0])
>>> b = get_dtype([[1.0]])
>>> c = get_dtype([[[1.0]]])
>>> d = get_dtype([[[[1.0]]]])
>>> if TYPE_CHECKING:
... reveal_locals()
... # note: Revealed local types are:
... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
...
"""
def __len__(self) -> int:
"""Implement ``len(self)``."""
raise NotImplementedError
@overload
def __getitem__(
self, index: int
) -> _T_co | NestedSequence[_T_co]: # noqa: D105
...
@overload
def __getitem__(self, index: slice) -> NestedSequence[_T_co]: # noqa: D105
...
def __getitem__(self, index):
"""Implement ``self[x]``."""
raise NotImplementedError
def __contains__(self, x: object) -> bool:
"""Implement ``x in self``."""
raise NotImplementedError
def __iter__(self) -> Iterator[_T_co | NestedSequence[_T_co]]:
"""Implement ``iter(self)``."""
raise NotImplementedError
def __reversed__(self) -> Iterator[_T_co | NestedSequence[_T_co]]:
"""Implement ``reversed(self)``."""
raise NotImplementedError
def count(self, value: Any) -> int:
"""Return the number of occurrences of `value`."""
raise NotImplementedError
def index(self, value: Any) -> int:
"""Return the first index of `value`."""
raise NotImplementedError
ArrayLike = npt.ArrayLike
IntegerOrArrayLike = Union[Integer, ArrayLike]
FloatingOrArrayLike = Union[Floating, ArrayLike]
NumberOrArrayLike = Union[Number, ArrayLike]
ComplexOrArrayLike = Union[Complex, ArrayLike]
BooleanOrArrayLike = Union[Boolean, ArrayLike]
StrOrArrayLike = Union[str, ArrayLike]
ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True)
# TODO: Use "numpy.typing.NDArray" when minimal Numpy version is raised to
# 1.21.
if TYPE_CHECKING: # pragma: no cover
NDArray = np.ndarray[Any, np.dtype[ScalarType]]
else:
NDArray = np.ndarray
# TODO: Drop when minimal Python is raised to 3.9.
if TYPE_CHECKING: # pragma: no cover
IntegerOrNDArray = Union[Integer, NDArray[DTypeInteger]]
FloatingOrNDArray = Union[Floating, NDArray[DTypeFloating]]
NumberOrNDArray = Union[
Number, NDArray[Union[DTypeInteger, DTypeFloating]]
]
ComplexOrNDArray = Union[Complex, NDArray[DTypeComplex]]
BooleanOrNDArray = Union[Boolean, NDArray[DTypeBoolean]]
StrOrNDArray = Union[str, NDArray[np.str_]]
else:
IntegerOrNDArray = Union[Integer, NDArray]
FloatingOrNDArray = Union[Floating, NDArray]
NumberOrNDArray = Union[Number, NDArray]
ComplexOrNDArray = Union[Complex, NDArray]
BooleanOrNDArray = Union[Boolean, NDArray]
StrOrNDArray = Union[str, NDArray]
class TypeInterpolator(Protocol): # noqa: D101
x: NDArray
y: NDArray
def __init__(self, *args: Any, **kwargs: Any) -> None: # noqa: D102
... # pragma: no cover
def __call__(
self, x: FloatingOrArrayLike
) -> FloatingOrNDArray: # noqa: D102
... # pragma: no cover
class TypeExtrapolator(Protocol): # noqa: D101
interpolator: TypeInterpolator
def __init__(self, *args: Any, **kwargs: Any) -> None: # noqa: D102
... # pragma: no cover
def __call__(
self, x: FloatingOrArrayLike
) -> FloatingOrNDArray: # noqa: D102
... # pragma: no cover
@runtime_checkable
class TypeLUTSequenceItem(Protocol): # noqa: D101
def apply(self, RGB: ArrayLike, **kwargs: Any) -> NDArray: # noqa: D102
... # pragma: no cover
LiteralWarning = Literal[
"default", "error", "ignore", "always", "module", "once"
]
cast = cast
def arraylike( # type: ignore[empty-body]
a: ArrayLike | NestedSequence[ArrayLike],
) -> NDArray:
...
def number_or_arraylike( # type: ignore[empty-body]
a: NumberOrArrayLike | NestedSequence[ArrayLike],
) -> NDArray:
...
a: DTypeFloating = np.float64(1)
b: float = 1
c: Floating = 1
d: ArrayLike = [c, c]
e: FloatingOrArrayLike = d
s_a: Sequence[DTypeFloating] = [a, a]
s_b: Sequence[float] = [b, b]
s_c: Sequence[Floating] = [c, c]
arraylike(a)
arraylike(b)
arraylike(c)
arraylike(d)
arraylike([d, d])
arraylike(e)
arraylike([e, e])
arraylike(s_a)
arraylike(s_b)
arraylike(s_c)
number_or_arraylike(a)
number_or_arraylike(b)
number_or_arraylike(c)
number_or_arraylike(d)
number_or_arraylike([d, d])
number_or_arraylike(e)
number_or_arraylike([e, e])
number_or_arraylike(s_a)
number_or_arraylike(s_b)
number_or_arraylike(s_c)
np.atleast_1d(a)
np.atleast_1d(b)
np.atleast_1d(c)
np.atleast_1d(arraylike(d))
np.atleast_1d(arraylike([d, d]))
np.atleast_1d(arraylike(e))
np.atleast_1d(arraylike([e, e]))
np.atleast_1d(s_a)
np.atleast_1d(s_b)
np.atleast_1d(s_c)
del a, b, c, d, e, s_a, s_b, s_c
| {
"content_hash": "71960e054f62e78e273d4523110de5f2",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 79,
"avg_line_length": 23.642487046632123,
"alnum_prop": 0.6351084812623274,
"repo_name": "colour-science/colour",
"id": "3d110746e1c3622542df3ceedb821cb01733d16d",
"size": "9126",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "colour/hints/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7967270"
},
{
"name": "TeX",
"bytes": "163213"
},
{
"name": "Visual Basic 6.0",
"bytes": "1170"
}
],
"symlink_target": ""
} |
import csb.test as test
from csb.io import MemoryStream
from csb.bio.io.mrc import DensityMapReader, DensityMapWriter, DensityMapFormatError, HeaderInfo, ByteOrder
@test.unit
class TestDensityMapReader(test.Case):
def setUp(self):
super(TestDensityMapReader, self).setUp()
self.file = self.config.getTestFile('1C3W_10.mrc')
self.reader = DensityMapReader(self.file)
self.rawheader = None
with open(self.file, 'rb') as stream:
self.rawheader = self.reader._rawheader(stream)
def testReadRawHeader(self):
self.assertEqual(len(self.rawheader), DensityMapReader.HEADER_SIZE)
def testReadHeader(self):
density = self.reader.read_header()
self.assertEqual(density.data, None)
self.assertEqual(density.header, self.rawheader)
self.assertEqual(density.origin, [-36.0, -36.0, -36.0])
self.assertEqual(density.shape, (72, 72, 72))
self.assertEqual(density.spacing, (1.0, 1.0, 1.0))
def testRead(self):
density = self.reader.read()
self.assertIsNotNone(density.data)
self.assertEqual(density.header, self.rawheader)
self.assertEqual(density.origin, [-36.0, -36.0, -36.0])
self.assertEqual(density.shape, (72, 72, 72))
self.assertEqual(density.spacing, (1.0, 1.0, 1.0))
@test.unit
class TestDensityMapWriter(test.Case):
def setUp(self):
super(TestDensityMapWriter, self).setUp()
self.file = self.config.getTestFile('1C3W_10.mrc')
self.writer = DensityMapWriter()
self.reader = DensityMapReader(self.file)
self.density = self.reader.read()
def testWriteDensity(self):
with self.config.getTempStream(mode='b') as temp:
with open(self.file, 'rb') as source:
self.writer.write(temp, self.density)
temp.flush()
if temp.content() != source.read():
self.fail('binary strings differ')
def testReconstructHeader(self):
raw = self.density.header
self.density.header = None
new = self.writer.reconstruct_header(self.density)
original = self.reader._inspect(raw, ByteOrder.NATIVE)
generated = self.reader._inspect(new, ByteOrder.NATIVE)
for o, g in zip(original, generated):
self.assertAlmostEqual(o, g, places=4)
if __name__ == '__main__':
test.Console()
| {
"content_hash": "bec64d574837fdc7da79af2d6626d382",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 107,
"avg_line_length": 32.096385542168676,
"alnum_prop": 0.5844594594594594,
"repo_name": "csb-toolbox/CSB",
"id": "24bda3c70c505c8a0ff558a1d88472307f88ff8b",
"size": "2664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csb/test/cases/bio/io/mrc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14987"
},
{
"name": "Python",
"bytes": "1475360"
}
],
"symlink_target": ""
} |
from flask import redirect, render_template, url_for, request, flash
from time import time
from pyfsw import app, db
from pyfsw import Player, ShopCategory, ShopItem, ShopOrder, ShopHistory
from pyfsw import login_required, current_user
@app.route('/shop/offer')
def route_shop():
categories = db.session.query(ShopCategory).all()
user = current_user()
return render_template(
'shop/browse.htm',
categories=categories, user=user
)
@app.route('/shop/order/<int:id>', methods=['GET'])
@login_required
def route_shop_order(id):
item = db.session.query(ShopItem).filter(ShopItem.id == id).first()
if not item or not item.enabled:
return redirect(url_for('route_shop'))
characters = current_user().players
return render_template('shop/order.htm', item=item, characters=characters)
@app.route('/shop/order/<int:id>', methods=['POST'])
@login_required
def route_shop_order_post(id):
item = request.form.get('item', 0, type=int)
character = request.form.get('character', 0, type=int)
error = False
if item == 0 or character == 0:
return redirect(url_for('route_shop'))
item = db.session().query(ShopItem).filter(ShopItem.id == item).first()
if not item:
flash('Failed to find the selected item.', 'error')
error = True
character = db.session().query(Player).filter(Player.id == character).first()
if not character:
flash('Failed to find the selected character.', 'error')
error = True
account = current_user()
if character not in account.players:
flash('You can not order an item for foreign character.', 'error')
error = True
if account.points < item.price:
flash('You do not have enough premium points to purchase this item.', 'error')
error = True
if not error:
order = ShopOrder()
order.name = item.name
order.type = item.type
order.key = item.key
order.value = item.value
order.price = item.price
order.ordered = int(time())
order.character_id = character.id
account.points -= item.price
db.session().add(order)
db.session().commit()
flash('The item has been ordered and should be soon delivered to your character.', 'success')
return redirect(url_for('route_shop'))
@app.route('/shop/history', methods=['GET'])
@login_required
def route_shop_history():
user = current_user()
history = ShopHistory.query.filter(ShopHistory.account_id == user.id).all()
return render_template('shop/history.htm', history=history)
| {
"content_hash": "c1dfde4d05fba0ba241c8d23bfe4b8c0",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 95,
"avg_line_length": 29.25609756097561,
"alnum_prop": 0.7123801583993331,
"repo_name": "diath/pyfsw",
"id": "5fe718fe44d5d06772ebc891d649202c25f0b10c",
"size": "2399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfsw/views/shop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2445"
},
{
"name": "HTML",
"bytes": "113675"
},
{
"name": "JavaScript",
"bytes": "317"
},
{
"name": "Lua",
"bytes": "3328"
},
{
"name": "Nginx",
"bytes": "555"
},
{
"name": "Python",
"bytes": "117922"
}
],
"symlink_target": ""
} |
from setuptools import find_packages, setup
DESCRIPTION = 'App Engine backends for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='djangoappengine',
version='1.4.0',
packages=find_packages(exclude=['docs']),
install_requires=['djangotoolbox'],
author='Waldemar Kornewald',
author_email='[email protected]',
url='https://github.com/django-nonrel/djangoappengine',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license='3-clause BSD',
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
| {
"content_hash": "55b8942b080c254c089f3cf8f741d9ac",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 81,
"avg_line_length": 32.911764705882355,
"alnum_prop": 0.6309204647006256,
"repo_name": "jerod2000/djangoappengine",
"id": "8a6234072f35147ed717f43325152a55e3b07529",
"size": "1119",
"binary": false,
"copies": "1",
"ref": "refs/heads/appengine-1.4",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""AbstractCoroutine provider tests."""
import asyncio
from dependency_injector import providers, errors
from pytest import mark, raises
from .common import example
def test_inheritance():
assert isinstance(providers.AbstractCoroutine(example), providers.Coroutine)
@mark.asyncio
@mark.filterwarnings("ignore")
async def test_call_overridden_by_coroutine():
@asyncio.coroutine
def abstract_example():
raise RuntimeError("Should not be raised")
provider = providers.AbstractCoroutine(abstract_example)
provider.override(providers.Coroutine(example))
result = await provider(1, 2, 3, 4)
assert result == (1, 2, 3, 4)
@mark.asyncio
@mark.filterwarnings("ignore")
async def test_call_overridden_by_delegated_coroutine():
@asyncio.coroutine
def abstract_example():
raise RuntimeError("Should not be raised")
provider = providers.AbstractCoroutine(abstract_example)
provider.override(providers.DelegatedCoroutine(example))
result = await provider(1, 2, 3, 4)
assert result == (1, 2, 3, 4)
def test_call_not_overridden():
provider = providers.AbstractCoroutine(example)
with raises(errors.Error):
provider(1, 2, 3, 4)
def test_override_by_not_coroutine():
provider = providers.AbstractCoroutine(example)
with raises(errors.Error):
provider.override(providers.Factory(object))
def test_provide_not_implemented():
provider = providers.AbstractCoroutine(example)
with raises(NotImplementedError):
provider._provide((1, 2, 3, 4), dict())
def test_repr():
provider = providers.AbstractCoroutine(example)
assert repr(provider) == (
"<dependency_injector.providers."
"AbstractCoroutine({0}) at {1}>".format(repr(example), hex(id(provider)))
)
| {
"content_hash": "ff0e8104527b0adb24ed2793cf0886e9",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 81,
"avg_line_length": 27.181818181818183,
"alnum_prop": 0.7090301003344481,
"repo_name": "ets-labs/python-dependency-injector",
"id": "4f09844282c2e2143c38b4b99f285a134ce77064",
"size": "1794",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unit/providers/coroutines/test_abstract_coroutine_py35.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "201812"
},
{
"name": "Makefile",
"bytes": "1942"
},
{
"name": "Python",
"bytes": "492977"
}
],
"symlink_target": ""
} |
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from autobahn.wamp.types import CallOptions, RegisterOptions
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
Application component that produces progressive results.
"""
def onConnect(self):
self.join("realm1")
def onJoin(self, details):
@inlineCallbacks
def longop(n, details = None):
if details.progress:
for i in range(n):
details.progress(i)
yield sleep(1)
else:
yield sleep(1 * n)
returnValue(n)
self.register(longop, 'com.myapp.longop', RegisterOptions(details_arg = 'details'))
| {
"content_hash": "658dafb4bdde333176dd7d46a8002c18",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 89,
"avg_line_length": 25.838709677419356,
"alnum_prop": 0.6716604244694132,
"repo_name": "eugenejen/AutobahnPython",
"id": "777130688d33012ac9e0c483be25a490e6bfb026",
"size": "1571",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/twisted/wamp/basic/rpc/progress/backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Filename: test_random.py
Author: Daisuke Oyama
Tests for markov/random.py
"""
import numpy as np
from numpy.testing import (
assert_array_equal, assert_raises, assert_array_almost_equal_nulp
)
from nose.tools import eq_, ok_, raises
from quantecon.markov import (
random_markov_chain, random_stochastic_matrix, random_discrete_dp
)
def test_random_markov_chain_dense():
sparse = False
n, k = 5, 3
mc_dicts = [{'P': random_markov_chain(n, sparse=sparse).P, 'k': n},
{'P': random_markov_chain(n, k, sparse=sparse).P, 'k': k}]
for mc_dict in mc_dicts:
P = mc_dict['P']
assert_array_equal(P.shape, (n, n))
assert_array_equal((P > 0).sum(axis=1), [mc_dict['k']]*n)
def test_random_markov_chain_sparse():
sparse = True
n, k = 5, 3
mc_dicts = [{'P': random_markov_chain(n, sparse=sparse).P, 'k': n},
{'P': random_markov_chain(n, k, sparse=sparse).P, 'k': k}]
for mc_dict in mc_dicts:
P = mc_dict['P']
assert_array_equal(P.shape, (n, n))
assert_array_equal(P.getnnz(axis=1), [mc_dict['k']]*n)
def test_random_markov_chain_value_error():
# n <= 0
assert_raises(ValueError, random_markov_chain, 0)
# k = 0
assert_raises(ValueError, random_markov_chain, 2, 0)
# k > n
assert_raises(ValueError, random_markov_chain, 2, 3)
def test_random_stochastic_matrix_dense():
sparse = False
n, k = 5, 3
Ps = [random_stochastic_matrix(n, sparse=sparse),
random_stochastic_matrix(n, k, sparse=sparse)]
for P in Ps:
ok_(np.all(P >= 0))
assert_array_almost_equal_nulp(P.sum(axis=1), np.ones(n))
def test_random_stochastic_matrix_sparse():
sparse = True
n, k = 5, 3
Ps = [random_stochastic_matrix(n, sparse=sparse),
random_stochastic_matrix(n, k, sparse=sparse)]
for P in Ps:
ok_(np.all(P.data >= 0))
assert_array_almost_equal_nulp(P.sum(axis=1), np.ones(n))
def test_random_stochastic_matrix_dense_vs_sparse():
n, k = 10, 5
seed = 1234
P_dense = random_stochastic_matrix(n, sparse=False, random_state=seed)
P_sparse = random_stochastic_matrix(n, sparse=True, random_state=seed)
assert_array_equal(P_dense, P_sparse.toarray())
P_dense = random_stochastic_matrix(n, k, sparse=False, random_state=seed)
P_sparse = random_stochastic_matrix(n, k, sparse=True, random_state=seed)
assert_array_equal(P_dense, P_sparse.toarray())
class TestRandomDiscreteDP:
def setUp(self):
self.num_states, self.num_actions = 5, 4
self.num_sa = self.num_states * self.num_actions
self.k = 3
seed = 1234
self.ddp = \
random_discrete_dp(self.num_states, self.num_actions, k=self.k,
sparse=False, sa_pair=False, random_state=seed)
labels = ['dense', 'sparse']
self.ddps_sa = {}
for label in labels:
is_sparse = (label == 'sparse')
self.ddps_sa[label] = \
random_discrete_dp(self.num_states, self.num_actions, k=self.k,
sparse=is_sparse, sa_pair=True,
random_state=seed)
def test_shape(self):
n, m, L = self.num_states, self.num_actions, self.num_sa
eq_(self.ddp.R.shape, (n, m))
eq_(self.ddp.Q.shape, (n, m, n))
for ddp in self.ddps_sa.values():
eq_(ddp.R.shape, (L,))
eq_(ddp.Q.shape, (L, n))
def test_nonzero(self):
n, m, L, k = self.num_states, self.num_actions, self.num_sa, self.k
assert_array_equal((self.ddp.Q > 0).sum(axis=-1), np.ones((n, m))*k)
assert_array_equal((self.ddps_sa['dense'].Q > 0).sum(axis=-1),
np.ones(L)*k)
assert_array_equal(self.ddps_sa['sparse'].Q.getnnz(axis=-1),
np.ones(L)*k)
def test_equal_reward(self):
assert_array_equal(self.ddp.R.ravel(), self.ddps_sa['dense'].R)
assert_array_equal(self.ddps_sa['dense'].R, self.ddps_sa['sparse'].R)
def test_equal_probability(self):
assert_array_equal(self.ddp.Q.ravel(), self.ddps_sa['dense'].Q.ravel())
assert_array_equal(self.ddps_sa['dense'].Q,
self.ddps_sa['sparse'].Q.toarray())
def test_equal_beta(self):
for ddp in self.ddps_sa.values():
eq_(ddp.beta, self.ddp.beta)
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
| {
"content_hash": "50faac0481d8f2f5688ff6e09ca49591",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 79,
"avg_line_length": 32.22222222222222,
"alnum_prop": 0.5788793103448275,
"repo_name": "gxxjjj/QuantEcon.py",
"id": "dee4c701371e30a8ddae262047fbae48fc4c03af",
"size": "4640",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "quantecon/markov/tests/test_random.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Makefile",
"bytes": "331"
},
{
"name": "Python",
"bytes": "491178"
},
{
"name": "Shell",
"bytes": "219"
}
],
"symlink_target": ""
} |
import mock
import six.moves.urllib.parse as urlparse
import webob
from nova.api.openstack import common
from nova.api.openstack.compute import flavors as flavors_v21
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
def fake_get_limit_and_marker(request, max_limit=1):
params = common.get_pagination_params(request)
limit = params.get('limit', max_limit)
limit = min(max_limit, limit)
marker = params.get('marker')
return limit, marker
def return_flavor_not_found(context, flavor_id, read_deleted=None):
raise exception.FlavorNotFound(flavor_id=flavor_id)
class FlavorsTestV21(test.TestCase):
_prefix = "/v2/fake"
Controller = flavors_v21.FlavorsController
fake_request = fakes.HTTPRequestV21
_rspv = "v2/fake"
_fake = "/fake"
def setUp(self):
super(FlavorsTestV21, self).setUp()
fakes.stub_out_networking(self)
fakes.stub_out_flavor_get_all(self)
fakes.stub_out_flavor_get_by_flavor_id(self)
self.controller = self.Controller()
def _set_expected_body(self, expected, flavor):
# NOTE(oomichi): On v2.1 API, some extensions of v2.0 are merged
# as core features and we can get the following parameters as the
# default.
expected['OS-FLV-EXT-DATA:ephemeral'] = flavor.ephemeral_gb
expected['OS-FLV-DISABLED:disabled'] = flavor.disabled
expected['swap'] = flavor.swap
@mock.patch('nova.objects.Flavor.get_by_flavor_id',
side_effect=return_flavor_not_found)
def test_get_flavor_by_invalid_id(self, mock_get):
req = self.fake_request.blank(self._prefix + '/flavors/asdf')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'asdf')
def test_get_flavor_by_id(self):
req = self.fake_request.blank(self._prefix + '/flavors/1')
flavor = self.controller.show(req, '1')
expected = {
"flavor": {
"id": fakes.FLAVORS['1'].flavorid,
"name": fakes.FLAVORS['1'].name,
"ram": fakes.FLAVORS['1'].memory_mb,
"disk": fakes.FLAVORS['1'].root_gb,
"vcpus": fakes.FLAVORS['1'].vcpus,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
}
self._set_expected_body(expected['flavor'], fakes.FLAVORS['1'])
self.assertEqual(flavor, expected)
def test_get_flavor_with_custom_link_prefix(self):
self.flags(osapi_compute_link_prefix='http://zoo.com:42',
osapi_glance_link_prefix='http://circus.com:34')
req = self.fake_request.blank(self._prefix + '/flavors/1')
flavor = self.controller.show(req, '1')
expected = {
"flavor": {
"id": fakes.FLAVORS['1'].flavorid,
"name": fakes.FLAVORS['1'].name,
"ram": fakes.FLAVORS['1'].memory_mb,
"disk": fakes.FLAVORS['1'].root_gb,
"vcpus": fakes.FLAVORS['1'].vcpus,
"links": [
{
"rel": "self",
"href": "http://zoo.com:42/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://zoo.com:42" + self._fake +
"/flavors/1",
},
],
},
}
self._set_expected_body(expected['flavor'], fakes.FLAVORS['1'])
self.assertEqual(expected, flavor)
def test_get_flavor_list(self):
req = self.fake_request.blank(self._prefix + '/flavors')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": fakes.FLAVORS['1'].flavorid,
"name": fakes.FLAVORS['1'].name,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": fakes.FLAVORS['2'].flavorid,
"name": fakes.FLAVORS['2'].name,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_with_marker(self):
self.maxDiff = None
url = self._prefix + '/flavors?limit=1&marker=1'
req = self.fake_request.blank(url)
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": fakes.FLAVORS['2'].flavorid,
"name": fakes.FLAVORS['2'].name,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
'flavors_links': [
{'href': 'http://localhost/' + self._rspv +
'/flavors?limit=1&marker=2',
'rel': 'next'}
]
}
self.assertThat(flavor, matchers.DictMatches(expected))
def test_get_flavor_list_with_invalid_marker(self):
req = self.fake_request.blank(self._prefix + '/flavors?marker=99999')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_detail_with_limit(self):
url = self._prefix + '/flavors/detail?limit=1'
req = self.fake_request.blank(url)
response = self.controller.detail(req)
response_list = response["flavors"]
response_links = response["flavors_links"]
expected_flavors = [
{
"id": fakes.FLAVORS['1'].flavorid,
"name": fakes.FLAVORS['1'].name,
"ram": fakes.FLAVORS['1'].memory_mb,
"disk": fakes.FLAVORS['1'].root_gb,
"vcpus": fakes.FLAVORS['1'].vcpus,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
]
self._set_expected_body(expected_flavors[0], fakes.FLAVORS['1'])
self.assertEqual(response_list, expected_flavors)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/' + self._rspv + '/flavors/detail', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['1'], 'marker': ['1']},
matchers.DictMatches(params))
def test_get_flavor_with_limit(self):
req = self.fake_request.blank(self._prefix + '/flavors?limit=2')
response = self.controller.index(req)
response_list = response["flavors"]
response_links = response["flavors_links"]
expected_flavors = [
{
"id": fakes.FLAVORS['1'].flavorid,
"name": fakes.FLAVORS['1'].name,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": fakes.FLAVORS['2'].flavorid,
"name": fakes.FLAVORS['2'].name,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
}
]
self.assertEqual(response_list, expected_flavors)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/' + self._rspv + '/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['2'], 'marker': ['2']},
matchers.DictMatches(params))
def test_get_flavor_with_default_limit(self):
self.stubs.Set(common, "get_limit_and_marker",
fake_get_limit_and_marker)
self.flags(osapi_max_limit=1)
req = fakes.HTTPRequest.blank('/v2/fake/flavors?limit=2')
response = self.controller.index(req)
response_list = response["flavors"]
response_links = response["flavors_links"]
expected_flavors = [
{
"id": fakes.FLAVORS['1'].flavorid,
"name": fakes.FLAVORS['1'].name,
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/1",
}
]
}
]
self.assertEqual(response_list, expected_flavors)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['2'], 'marker': ['1']},
matchers.DictMatches(params))
def test_get_flavor_list_detail(self):
req = self.fake_request.blank(self._prefix + '/flavors/detail')
flavor = self.controller.detail(req)
expected = {
"flavors": [
{
"id": fakes.FLAVORS['1'].flavorid,
"name": fakes.FLAVORS['1'].name,
"ram": fakes.FLAVORS['1'].memory_mb,
"disk": fakes.FLAVORS['1'].root_gb,
"vcpus": fakes.FLAVORS['1'].vcpus,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": fakes.FLAVORS['2'].flavorid,
"name": fakes.FLAVORS['2'].name,
"ram": fakes.FLAVORS['2'].memory_mb,
"disk": fakes.FLAVORS['2'].root_gb,
"vcpus": fakes.FLAVORS['2'].vcpus,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self._set_expected_body(expected['flavors'][0], fakes.FLAVORS['1'])
self._set_expected_body(expected['flavors'][1], fakes.FLAVORS['2'])
self.assertEqual(expected, flavor)
@mock.patch('nova.objects.FlavorList.get_all',
return_value=objects.FlavorList())
def test_get_empty_flavor_list(self, mock_get):
req = self.fake_request.blank(self._prefix + '/flavors')
flavors = self.controller.index(req)
expected = {'flavors': []}
self.assertEqual(flavors, expected)
def test_get_flavor_list_filter_min_ram(self):
# Flavor lists may be filtered by minRam.
req = self.fake_request.blank(self._prefix + '/flavors?minRam=512')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": fakes.FLAVORS['2'].flavorid,
"name": fakes.FLAVORS['2'].name,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_ram(self):
# Ensure you cannot list flavors with invalid minRam param.
req = self.fake_request.blank(self._prefix + '/flavors?minRam=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_list_filter_min_disk(self):
# Flavor lists may be filtered by minDisk.
req = self.fake_request.blank(self._prefix + '/flavors?minDisk=20')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": fakes.FLAVORS['2'].flavorid,
"name": fakes.FLAVORS['2'].name,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_disk(self):
# Ensure you cannot list flavors with invalid minDisk param.
req = self.fake_request.blank(self._prefix + '/flavors?minDisk=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_list_detail_min_ram_and_min_disk(self):
"""Tests that filtering work on flavor details and that minRam and
minDisk filters can be combined
"""
req = self.fake_request.blank(self._prefix + '/flavors/detail'
'?minRam=256&minDisk=20')
flavor = self.controller.detail(req)
expected = {
"flavors": [
{
"id": fakes.FLAVORS['2'].flavorid,
"name": fakes.FLAVORS['2'].name,
"ram": fakes.FLAVORS['2'].memory_mb,
"disk": fakes.FLAVORS['2'].root_gb,
"vcpus": fakes.FLAVORS['2'].vcpus,
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self._set_expected_body(expected['flavors'][0], fakes.FLAVORS['2'])
self.assertEqual(expected, flavor)
class DisabledFlavorsWithRealDBTestV21(test.TestCase):
"""Tests that disabled flavors should not be shown nor listed."""
Controller = flavors_v21.FlavorsController
_prefix = "/v2"
fake_request = fakes.HTTPRequestV21
def setUp(self):
super(DisabledFlavorsWithRealDBTestV21, self).setUp()
# Add a new disabled type to the list of flavors
self.req = self.fake_request.blank(self._prefix + '/flavors')
self.context = self.req.environ['nova.context']
self.admin_context = context.get_admin_context()
self.disabled_type = self._create_disabled_instance_type()
self.addCleanup(self.disabled_type.destroy)
self.inst_types = objects.FlavorList.get_all(self.admin_context)
self.controller = self.Controller()
def _create_disabled_instance_type(self):
flavor = objects.Flavor(context=self.admin_context,
name='foo.disabled', flavorid='10.disabled',
memory_mb=512, vcpus=2, root_gb=1,
ephemeral_gb=0, swap=0, rxtx_factor=1.0,
vcpu_weight=1, disabled=True, is_public=True,
extra_specs={}, projects=[])
flavor.create()
return flavor
def test_index_should_not_list_disabled_flavors_to_user(self):
self.context.is_admin = False
flavor_list = self.controller.index(self.req)['flavors']
api_flavorids = set(f['id'] for f in flavor_list)
db_flavorids = set(i['flavorid'] for i in self.inst_types)
disabled_flavorid = str(self.disabled_type['flavorid'])
self.assertIn(disabled_flavorid, db_flavorids)
self.assertEqual(db_flavorids - set([disabled_flavorid]),
api_flavorids)
def test_index_should_list_disabled_flavors_to_admin(self):
self.context.is_admin = True
flavor_list = self.controller.index(self.req)['flavors']
api_flavorids = set(f['id'] for f in flavor_list)
db_flavorids = set(i['flavorid'] for i in self.inst_types)
disabled_flavorid = str(self.disabled_type['flavorid'])
self.assertIn(disabled_flavorid, db_flavorids)
self.assertEqual(db_flavorids, api_flavorids)
def test_show_should_include_disabled_flavor_for_user(self):
"""Counterintuitively we should show disabled flavors to all users and
not just admins. The reason is that, when a user performs a server-show
request, we want to be able to display the pretty flavor name ('512 MB
Instance') and not just the flavor-id even if the flavor id has been
marked disabled.
"""
self.context.is_admin = False
flavor = self.controller.show(
self.req, self.disabled_type['flavorid'])['flavor']
self.assertEqual(flavor['name'], self.disabled_type['name'])
def test_show_should_include_disabled_flavor_for_admin(self):
self.context.is_admin = True
flavor = self.controller.show(
self.req, self.disabled_type['flavorid'])['flavor']
self.assertEqual(flavor['name'], self.disabled_type['name'])
class ParseIsPublicTestV21(test.TestCase):
Controller = flavors_v21.FlavorsController
def setUp(self):
super(ParseIsPublicTestV21, self).setUp()
self.controller = self.Controller()
def assertPublic(self, expected, is_public):
self.assertIs(expected, self.controller._parse_is_public(is_public),
'%s did not return %s' % (is_public, expected))
def test_None(self):
self.assertPublic(True, None)
def test_truthy(self):
self.assertPublic(True, True)
self.assertPublic(True, 't')
self.assertPublic(True, 'true')
self.assertPublic(True, 'yes')
self.assertPublic(True, '1')
def test_falsey(self):
self.assertPublic(False, False)
self.assertPublic(False, 'f')
self.assertPublic(False, 'false')
self.assertPublic(False, 'no')
self.assertPublic(False, '0')
def test_string_none(self):
self.assertPublic(None, 'none')
self.assertPublic(None, 'None')
def test_other(self):
self.assertRaises(
webob.exc.HTTPBadRequest, self.assertPublic, None, 'other')
| {
"content_hash": "2a9a0c2dd0b4372a9747b548525756c8",
"timestamp": "",
"source": "github",
"line_count": 582,
"max_line_length": 79,
"avg_line_length": 38.7147766323024,
"alnum_prop": 0.4610331972306054,
"repo_name": "sebrandon1/nova",
"id": "46481b5eb913d2b8809398d0064874230ebc06cc",
"size": "23168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/test_flavors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "18265203"
},
{
"name": "Shell",
"bytes": "37074"
},
{
"name": "Smarty",
"bytes": "299657"
}
],
"symlink_target": ""
} |
import selectors
import socket
mysel = selectors.DefaultSelector()
keep_running = True
def read(connection, mask):
"Callback for read events"
global keep_running
client_address = connection.getpeername()
print('read({})'.format(client_address))
data = connection.recv(1024)
if data:
# A readable client socket has data
print(' received {!r}'.format(data))
connection.sendall(data)
else:
# Interpret empty result as closed connection
print(' closing')
mysel.unregister(connection)
connection.close()
# Tell the main loop to stop
# keep_running = False
def accept(sock, mask):
"Callback for new connections"
new_connection, addr = sock.accept()
print('accept({})'.format(addr))
new_connection.setblocking(False)
mysel.register(new_connection, selectors.EVENT_READ, read)
server_address = ('localhost', 10001)
print('starting up on {} port {}'.format(*server_address))
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(False)
server.bind(server_address)
server.listen(5)
mysel.register(server, selectors.EVENT_READ, accept)
while keep_running:
print('waiting for I/O')
for key, mask in mysel.select(timeout=1):
callback = key.data
callback(key.fileobj, mask)
print('shutting down')
mysel.close() | {
"content_hash": "4d731bbe53c2af6d0ccc56772a976774",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 62,
"avg_line_length": 26.48076923076923,
"alnum_prop": 0.6775599128540305,
"repo_name": "zaqwes8811/micro-apps",
"id": "2b4ca9a49c73200280c49701043ec678ce1f7fb9",
"size": "1404",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "selectors_echo_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "309556"
},
{
"name": "Assembly",
"bytes": "570069"
},
{
"name": "Batchfile",
"bytes": "56007"
},
{
"name": "C",
"bytes": "53062"
},
{
"name": "C#",
"bytes": "32208"
},
{
"name": "C++",
"bytes": "1108629"
},
{
"name": "CMake",
"bytes": "23718"
},
{
"name": "CSS",
"bytes": "186903"
},
{
"name": "Cuda",
"bytes": "9680"
},
{
"name": "Dart",
"bytes": "1158"
},
{
"name": "Dockerfile",
"bytes": "20181"
},
{
"name": "Go",
"bytes": "6640"
},
{
"name": "HTML",
"bytes": "2215958"
},
{
"name": "Haskell",
"bytes": "383"
},
{
"name": "Java",
"bytes": "140401"
},
{
"name": "JavaScript",
"bytes": "714877"
},
{
"name": "Jupyter Notebook",
"bytes": "25399728"
},
{
"name": "Kotlin",
"bytes": "713"
},
{
"name": "Lua",
"bytes": "2253"
},
{
"name": "MATLAB",
"bytes": "103"
},
{
"name": "Makefile",
"bytes": "33566"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "NSIS",
"bytes": "7481"
},
{
"name": "PHP",
"bytes": "59915"
},
{
"name": "Pascal",
"bytes": "2492"
},
{
"name": "Pawn",
"bytes": "3337"
},
{
"name": "Python",
"bytes": "1836093"
},
{
"name": "QML",
"bytes": "58517"
},
{
"name": "QMake",
"bytes": "4042"
},
{
"name": "R",
"bytes": "13753"
},
{
"name": "Ruby",
"bytes": "522"
},
{
"name": "Rust",
"bytes": "210"
},
{
"name": "Scheme",
"bytes": "113588"
},
{
"name": "Scilab",
"bytes": "1348"
},
{
"name": "Shell",
"bytes": "16112"
},
{
"name": "SourcePawn",
"bytes": "3316"
},
{
"name": "VBScript",
"bytes": "9376"
},
{
"name": "XSLT",
"bytes": "24926"
}
],
"symlink_target": ""
} |
import sys
from BeautifulSoup import BeautifulSoup
"""
Utility program to list AMIs for ubuntu cloud server releases:
$ UDISTRO=precise; curl -o $UDISTRO.txt http://cloud-images.ubuntu.com/releases/$UDISTRO/release/
$ echo " $UDISTRO:"; ./ubuntu_cloud_images.py $UDISTRO.txt
"""
Arch = {'32-bit': 'i386', '64-bit': 'x86_64'}
def ami_tuples(data):
"""Return ubuntu server release info as a list of named tuples
"""
soup = BeautifulSoup(data)
table = soup.find('table')
rows = table.findAll('tr')
headings = [td.find(text=True).strip() for td in rows[0].findAll('td')]
ami_list = []
for row in rows[1:]:
r = [p.text for p in [td for td in row.findAll('td')]]
ami = dict(zip(headings, r))
if not ami['root store'] == 'ebs':
continue
ami['architecture'] = Arch[ami['arch']]
ami['id'] = ami['ami'].replace('Launch', '')
ami_list.append(ami)
return ami_list
def ami_yaml(data):
yaml_list = []
region = None
for ami in ami_tuples(data):
if not ami['Region'] == region:
yaml_list.append(' {0}:'.format(ami['Region']))
yaml_list.append(' {0}: {1}'.format(ami['architecture'], ami['id']))
region = ami['Region']
return yaml_list
if __name__ == '__main__':
datafile = sys.argv[1]
data = open(datafile).read()
for y in ami_yaml(data):
print y
| {
"content_hash": "30c848d3f64f018191f7b5eb5835c63b",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 97,
"avg_line_length": 30.76086956521739,
"alnum_prop": 0.5844522968197879,
"repo_name": "rubic/shaker",
"id": "08b987c3296fbd1d69fcf44a705b4d21a94a4084",
"size": "1437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/ubuntu_cloud_images.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "46584"
}
],
"symlink_target": ""
} |
thumbSize = 200 | {
"content_hash": "2687c0966daeb43326678b2389f1658d",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 15,
"avg_line_length": 15,
"alnum_prop": 0.8,
"repo_name": "bhautikj/imageCatapult",
"id": "dc44418f456cdc9baeacb534d2976b2f5e587ed9",
"size": "1149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coreOpts.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7281"
},
{
"name": "JavaScript",
"bytes": "47479"
},
{
"name": "Python",
"bytes": "87261"
}
],
"symlink_target": ""
} |
import time
import numpy as np
import liblo as lo
import grovepi
import grove_oled
from adxl345 import ADXL345
from optparse import OptionParser
usage = "python sensor-osc.py --oscip 127.0.0.1 --oscport 7878 --with_accel 1 --with_oled 0 --with_pulse 1"
parser = OptionParser(usage=usage)
parser.add_option("-o", "--oscip",
dest="oscip", type='string', default="127.0.0.1",
help="IP address to send OSC message to")
parser.add_option("-p", "--oscport",
dest="oscport", type='int', default=7878,
help="host port")
parser.add_option("-a", "--with_accel",
dest="withAccel", type='int', default=1,
help="Is a Grove Accelerometer ADXL345 connected via I2C?")
parser.add_option("-P", "--with_pulse",
dest="withPulse", type='int', default=1,
help="Is a pulse sensor connected to A0?")
parser.add_option("-d", "--debug",
dest="printDebug", type='int', default=0,
help="Print the sensor values to stdout?")
(options, args) = parser.parse_args()
outputAddress = lo.Address(options.oscip, options.oscport)
global axes
if options.withAccel:
adxl345 = ADXL345()
axes = adxl345.getAxes(True)
analogdata = np.zeros(3)
digitaldata = np.zeros(1)
timestamps = np.zeros(3)
while True:
# this is specific to GrovePi Zero. Additional pins may be used.
# See https://www.dexterindustries.com/GrovePi/engineering/port-description/
# for unlocking more I/O
analogdata[0] = grovepi.analogRead(0)
analogdata[1] = grovepi.analogRead(1)
analogdata[2] = grovepi.analogRead(2)
timestamps[0] = time.time() # let's take a timestamp, in case we want to use it someday
analogMessage = lo.Message('/grove/analog', analogdata[0], analogdata[1], analogdata[2])
lo.send(outputAddress, analogMessage)
digitaldata[0] = grovepi.digitalRead(3)
timestamps[1] = time.time()
digitalMessage = lo.Message('/grove/digital', digitaldata[0])
lo.send(outputAddress, digitalMessage)
if options.printDebug:
print("analog: A0 %.3f, A1 %.3f, A2 %.3f" % (analogdata[0], analogdata[1], analogdata[2]))
print("digital: D3 %d" % (digitaldata[0]))
if options.withPulse:
pulseMessage = lo.Message('/grove/pulsesensor', analogdata[0])
lo.send(outputAddress, pulseMessage)
if options.withAccel:
timestamps[2] = time.time()
axes = adxl345.getAxes(True)
accelMessage = lo.Message('/grove/accel', axes['x'], axes['y'], axes['z'])
lo.send(outputAddress, accelMessage)
if options.printDebug:
print("accel: x = %.3fG, y = %.3fG, z = %.3fG" % ( axes['x'], axes['y'], axes['z']))
| {
"content_hash": "14d9a69968d84cdb219046894e3b2a2f",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 107,
"avg_line_length": 30.738636363636363,
"alnum_prop": 0.6428835489833642,
"repo_name": "treeoftenere/Interactivity",
"id": "14e22149543f2569181c416a5017e6c484342704",
"size": "2705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sensors/grove-osc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80776"
},
{
"name": "Shell",
"bytes": "577"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import time
import datetime
__all__ = ["timestamp", "now"]
def timestamp():
"""Generate a timestamp using the current date and time.
Returns
-------
str
The timestamp.
Examples
--------
>>> type(timestamp()) == type('')
True
"""
return datetime.datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
def now():
"""Generate a timestamp using the current date and time.
Returns
-------
str
The timestamp.
Examples
--------
>>> type(now()) == type('')
True
"""
return timestamp()
| {
"content_hash": "7913e1e251962e24918cc01fb39741b6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 16.302325581395348,
"alnum_prop": 0.5649072753209701,
"repo_name": "compas-dev/compas",
"id": "6895cf347d5045df3b5268953970919e9c2a7a48",
"size": "701",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/compas/utilities/datetime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3181804"
}
],
"symlink_target": ""
} |
import os.path
from matplotlib import pyplot as plt
import numpy as np
import tables as tb
from scipy.optimize import curve_fit, leastsq
import progressbar
from pyLandau import landau
from pybar.run_manager import RunManager # importing run manager
from pybar.scans.scan_iv import IVScan
from pybar.scans.test_register import RegisterTest
from pybar.scans.scan_digital import DigitalScan
from pybar.scans.scan_analog import AnalogScan
from pybar.scans.tune_fei4 import Fei4Tuning
from pybar.scans.tune_stuck_pixel import StuckPixelTuning
from pybar.scans.scan_threshold_fast import FastThresholdScan
from pybar.scans.tune_noise_occupancy import NoiseOccupancyTuning
from pybar.scans.calibrate_plsr_dac import PlsrDacScan
from pybar.scans.calibrate_hit_or import HitOrCalibration
from pybar.scans.scan_ext_trigger import ExtTriggerScan
from pybar.fei4.register_utils import make_box_pixel_mask_from_col_row
import pybar.scans.analyze_source_scan_tdc_data as tdc_analysis
def analyze_tdc(source_scan_filename, calibration_filename, col_span, row_span):
# Data files
calibation_file = calibration_filename
raw_data_file = source_scan_filename
hit_file = os.path.splitext(raw_data_file)[0] + r'_interpreted.h5'
# Selection criterions, change this to your needs
hit_selection = '(column > %d) & (column < %d) & (row > %d) & (row < %d)' % (col_span[0] + 1, col_span[1] - 1, row_span[0] + 5, row_span[1] - 5) # deselect edge pixels for better cluster size cut
hit_selection_conditions = ['(n_cluster==1)', '(n_cluster==1) & (cluster_size == 1)', '(n_cluster==1) & (cluster_size == 1) & (relative_BCID > 1) & (relative_BCID < 4) & ((tot > 12) | ((TDC * 1.5625 - tot * 25 < 100) & (tot * 25 - TDC * 1.5625 < 100))) & %s' % hit_selection]
event_status_select_mask = 0b0000111111011111
event_status_condition = 0b0000000100000000 # trigger, one in-time tdc word and perfect event structure required
# Interpret raw data and create hit table
tdc_analysis.analyze_raw_data(input_files=raw_data_file,
output_file_hits=hit_file,
interpreter_plots=True,
overwrite_output_files=True,
pdf_filename=raw_data_file,
align_at_trigger=True,
use_tdc_trigger_time_stamp=True,
max_tdc_delay=253)
# Select TDC histograms for different cut criterions, use the charge calibrations
tdc_analysis.histogram_tdc_hits(hit_file,
hit_selection_conditions,
event_status_select_mask,
event_status_condition,
calibation_file,
max_tdc=1500,
n_bins=350)
return os.path.splitext(hit_file)[0] + '_tdc_hists.h5'
def plsr_dac_to_charge(source_scan_filename, plsr_dac):
with tb.open_file(source_scan_filename, 'r') as in_file_h5:
vcal_c0 = float(in_file_h5.root.configuration.calibration_parameters[:][np.where(in_file_h5.root.configuration.calibration_parameters[:]['name'] == 'Vcal_Coeff_0')]['value'][0])
vcal_c1 = float(in_file_h5.root.configuration.calibration_parameters[:][np.where(in_file_h5.root.configuration.calibration_parameters[:]['name'] == 'Vcal_Coeff_1')]['value'][0])
c_high = float(in_file_h5.root.configuration.calibration_parameters[:][np.where(in_file_h5.root.configuration.calibration_parameters[:]['name'] == 'C_Inj_High')]['value'][0])
voltage = vcal_c0 + vcal_c1 * plsr_dac
return voltage * c_high / 0.16022
def fit_landau_bootstrap(x, y, p0, n_sigma=1, n_iterations=500, **kwargs): # fit the landau with bootstrap to give reasonable fit errors
def errfunc(p, x, y): # langau errorfunktion to minimize in fit
return landau.langau(x, *p) - y
yerr = kwargs.get('yerr', None)
pfit, _ = curve_fit(landau.langau, x, y, p0=p0)
residuals = errfunc(pfit, x, y)
s_res = np.std(residuals)
ps = []
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=n_iterations, term_width=80)
progress_bar.start()
for i in range(n_iterations):
if yerr is None:
randomDelta = np.random.normal(0.0, s_res, len(y))
randomdataY = y + randomDelta
else:
randomDelta = np.array([np.random.normal(0.0, derr, 1)[0] for derr in yerr])
randomdataY = y + randomDelta
randomfit, _ = leastsq(errfunc, p0, args=(x, randomdataY), full_output=0)
ps.append(randomfit)
progress_bar.update(i)
progress_bar.finish()
mean_pfit, err_pfit = np.mean(ps, 0), n_sigma * np.std(ps, 0)
return mean_pfit, err_pfit
def plot_landau(source_scan_filename, tdc_hists, target_threshold, fit_range=(13000, 30000)):
with tb.open_file(tdc_hists, 'r') as in_file_h5:
x, count, count_error = in_file_h5.root.HistTdcCalibratedCondition_2[:]['charge'], in_file_h5.root.HistTdcCalibratedCondition_2[:]['count'], in_file_h5.root.HistTdcCalibratedCondition_2[:]['count_error']
charge = plsr_dac_to_charge(source_scan_filename, x)
target_threshold_charge = plsr_dac_to_charge(source_scan_filename, target_threshold)
plt.clf()
plt.grid()
x_fit_range = np.logical_and(charge > fit_range[0], charge < fit_range[1])
coeff, err = fit_landau_bootstrap(charge[x_fit_range], count[x_fit_range], p0=(7000, np.std(charge[x_fit_range]), 150, np.amax(count[x_fit_range])), yerr=count_error[x_fit_range], n_iterations=100)
plt.bar(charge, count, width=charge[1] - charge[0], color='blue', label='data')
plt.plot(charge[x_fit_range], landau.langau(charge[x_fit_range], *coeff), 'r-')
plt.plot([target_threshold_charge, target_threshold_charge], [plt.ylim()[0], plt.ylim()[1]], 'b--', linewidth=2, label='Threshold $%d$ e' % target_threshold_charge)
plt.plot([coeff[0], coeff[0]], [plt.ylim()[0], plt.ylim()[1]], 'r--', linewidth=2, label='MPV $%d\pm%d$ e' % (coeff[0], err[0]))
plt.title('Landau, -30 C')
plt.legend(loc=0)
plt.show()
if __name__ == "__main__":
# Settings
bias_voltage = -80
max_iv_voltage = -100
# Tuning
cref = 12
target_threshold = 34
target_charge = 300
target_tot = 9
# TDC measurements
plsr_dacs = [target_threshold, 40, 50, 60, 80, 100, 120, 150, 200, 250, 300, 350, 400, 500, 600, 700, 800] # PlsrDAC range for TDC calibration, should start at threshold
col_span = [55, 75] # [50, 78] # pixel column range to use in TDC scans
row_span = [125, 225] # [20, 315] # pixel row range to use in TDC scans
tdc_pixel = make_box_pixel_mask_from_col_row(column=[col_span[0], col_span[1]], row=[row_span[0], row_span[1]]) # edge pixel are not used in analysis
runmngr = RunManager('configuration.yaml')
# IV scan
runmngr.run_run(run=IVScan, run_conf={"voltages": np.arange(-1, max_iv_voltage - 1, -1), "max_voltage": max_iv_voltage, "bias_voltage": bias_voltage, "minimum_delay": 0.5})
# FE check and complete tuning
runmngr.run_run(run=RegisterTest)
runmngr.run_run(run=DigitalScan) # digital scan with std. settings
if runmngr.current_run.register.flavor == 'fei4a': # FEI4 A related config changes, Deactivate noisy edge columns if FE-I4A
runmngr.current_run.register.set_global_register_value("DisableColumnCnfg", 549755813891) # Disable noisy columns
runmngr.current_run.register.set_global_register_value("Cref", cref) # Set correct cref
runmngr.current_run.register.save_configuration(runmngr.current_run.register.configuration_file)
runmngr.run_run(run=DigitalScan) # repeat digital scan with specific settings
runmngr.run_run(run=Fei4Tuning, run_conf={'target_threshold': target_threshold, 'target_tot': target_tot, 'target_charge': target_charge}, catch_exception=False)
runmngr.run_run(run=AnalogScan, run_conf={'scan_parameters': [('PlsrDAC', target_charge)]})
runmngr.run_run(run=FastThresholdScan)
runmngr.run_run(run=StuckPixelTuning)
runmngr.run_run(run=NoiseOccupancyTuning, run_conf={'occupancy_limit': 1000, 'n_triggers': 10000000}) # high occupancy limit to work with strong Sr-90 source
runmngr.run_run(run=PlsrDacScan, run_conf={"colpr_address": range(25, 39)})
# TDC calibration
runmngr.run_run(run=HitOrCalibration, run_conf={
'reset_rx_on_error': True,
"pixels": (np.dstack(np.where(tdc_pixel == 1)) + 1).tolist()[0],
"scan_parameters": [('column', None),
('row', None),
('PlsrDAC', plsr_dacs)]
})
calibration_filename = runmngr.current_run.output_filename + '_calibration.h5'
# Scintillator trigger source scan
imon_mask = tdc_pixel ^ 1 # imon mask = not enable mask
runmngr.current_run.register.set_pixel_register_value("Imon", imon_mask) # remember: for the selection later index 0 == colum/row 1
runmngr.run_run(run=ExtTriggerScan, run_conf={'comment': 'Strong Sr-90 source',
'col_span': col_span,
'row_span': row_span,
"use_enable_mask_for_imon": False,
"enable_tdc": True,
"trigger_delay": 8,
"trigger_rate_limit": 1000,
"trigger_latency": 232,
"trig_count": 0,
"scan_timeout": 45 * 60,
"no_data_timeout": 20,
'reset_rx_on_error': True,
"max_triggers": 1000000000})
source_scan_filename = runmngr.current_run.output_filename + '.h5'
tdc_hists = analyze_tdc(source_scan_filename, calibration_filename, col_span, row_span)
plot_landau(source_scan_filename, tdc_hists, target_threshold)
| {
"content_hash": "3131a36a001f1afa47e73e2f7c123088",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 279,
"avg_line_length": 57.202185792349724,
"alnum_prop": 0.6129155521589607,
"repo_name": "SiLab-Bonn/pyBAR",
"id": "9f63e1378fc0fce699fb56a1aa480a82cf3e7b61",
"size": "10622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/example_tdc/example_tdc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "4666"
},
{
"name": "Python",
"bytes": "1117928"
},
{
"name": "SystemVerilog",
"bytes": "23473"
},
{
"name": "Tcl",
"bytes": "5086"
},
{
"name": "Verilog",
"bytes": "246285"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "barpolar.marker"
_path_str = "barpolar.marker.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorscale",
"colorsrc",
"reversescale",
"width",
"widthsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color` is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets the marker.line color. It accepts either a specific color
or an array of numbers that are mapped to the colorscale
relative to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to barpolar.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color` is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In
case `colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an effect
only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color` is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
widthsrc=None,
**kwargs,
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.barpolar.marker.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In
case `colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an effect
only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color` is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.barpolar.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("widthsrc", None)
_v = widthsrc if widthsrc is not None else _v
if _v is not None:
self["widthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "4ade4ed07457c3e1f05211092d9ff4ae",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 87,
"avg_line_length": 37.8951367781155,
"alnum_prop": 0.5664327250852216,
"repo_name": "plotly/plotly.py",
"id": "1fc6461876505d4027f5a4265d8ada980129c241",
"size": "24935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/barpolar/marker/_line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Positive-Semidefinite Kernel library utilities."""
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'mask_matrix',
'maybe_get_common_dtype',
'pad_shape_with_ones',
'pairwise_square_distance_matrix',
'pairwise_square_distance_tensor',
'sum_rightmost_ndims_preserving_shape',
]
def pad_shape_with_ones(x, ndims, start=-1):
"""Maybe add `ndims` ones to `x.shape` starting at `start`.
If `ndims` is zero, this is a no-op; otherwise, we will create and return a
new `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the
right side. If the shape of `x` is known statically, the shape of the return
value will be as well.
Args:
x: The `Tensor` we'll return a reshaping of.
ndims: Python `integer` number of ones to pad onto `x.shape`.
start: Python `integer` specifying where to start padding with ones. Must
be a negative integer. For instance, a value of `-1` means to pad at the
end of the shape. Default value: `-1`.
Returns:
If `ndims` is zero, `x`; otherwise, a `Tensor` whose shape is that of `x`
with `ndims` ones concatenated on the right side. If possible, returns a
`Tensor` whose shape is known statically.
Raises:
ValueError: if `ndims` is not a Python `integer` greater than or equal to
zero.
"""
if not (isinstance(ndims, int) and ndims >= 0):
raise ValueError(
'`ndims` must be a Python `integer` greater than zero. Got: {}'
.format(ndims))
if not (isinstance(start, int) and start <= -1):
raise ValueError(
'`start` must be a Python `integer` less than zero. Got: {}'
.format(start))
if ndims == 0:
return x
x = tf.convert_to_tensor(value=x)
original_shape = x.shape
rank = ps.rank(x)
first_shape = ps.shape(x)[:rank + start + 1]
second_shape = ps.shape(x)[rank + start + 1:]
new_shape = ps.pad(first_shape, paddings=[[0, ndims]], constant_values=1)
new_shape = ps.concat([new_shape, second_shape], axis=0)
x = tf.reshape(x, new_shape)
if start == -1:
tensorshape_util.set_shape(
x, tensorshape_util.concatenate(original_shape, [1] * ndims))
elif tensorshape_util.rank(original_shape) is not None:
original_ndims = tensorshape_util.rank(original_shape)
new_shape = tensorshape_util.concatenate(
original_shape[:original_ndims + start + 1],
tensorshape_util.concatenate(
[1] * ndims,
original_shape[original_ndims + start + 1:]))
tensorshape_util.set_shape(x, new_shape)
return x
def sum_rightmost_ndims_preserving_shape(x, ndims):
"""Return `Tensor` with right-most ndims summed.
Args:
x: the `Tensor` whose right-most `ndims` dimensions to sum
ndims: number of right-most dimensions to sum.
Returns:
A `Tensor` resulting from calling `reduce_sum` on the `ndims` right-most
dimensions. If the shape of `x` is statically known, the result will also
have statically known shape. Otherwise, the resulting shape will only be
known at runtime.
"""
x = tf.convert_to_tensor(x)
x_ndims = ps.rank(x)
return tf.reduce_sum(x, axis=ps.range(x_ndims - ndims, x_ndims))
@tf.custom_gradient
def sqrt_with_finite_grads(x, name=None):
"""A sqrt function whose gradient at zero is very large but finite.
Args:
x: a `Tensor` whose sqrt is to be computed.
name: a Python `str` prefixed to all ops created by this function.
Default `None` (i.e., "sqrt_with_finite_grads").
Returns:
sqrt: the square root of `x`, with an overridden gradient at zero
grad: a gradient function, which is the same as sqrt's gradient everywhere
except at zero, where it is given a large finite value, instead of `inf`.
Raises:
TypeError: if `tf.convert_to_tensor(x)` is not a `float` type.
Often in kernel functions, we need to compute the L2 norm of the difference
between two vectors, `x` and `y`: `sqrt(sum_i((x_i - y_i) ** 2))`. In the
case where `x` and `y` are identical, e.g., on the diagonal of a kernel
matrix, we get `NaN`s when we take gradients with respect to the inputs. To
see, this consider the forward pass:
```
[x_1 ... x_N] --> [x_1 ** 2 ... x_N ** 2] -->
(x_1 ** 2 + ... + x_N ** 2) --> sqrt((x_1 ** 2 + ... + x_N ** 2))
```
When we backprop through this forward pass, the `sqrt` yields an `inf` because
`grad_z(sqrt(z)) = 1 / (2 * sqrt(z))`. Continuing the backprop to the left, at
the `x ** 2` term, we pick up a `2 * x`, and when `x` is zero, we get
`0 * inf`, which is `NaN`.
We'd like to avoid these `NaN`s, since they infect the rest of the connected
computation graph. Practically, when two inputs to a kernel function are
equal, we are in one of two scenarios:
1. We are actually computing k(x, x), in which case norm(x - x) is
identically zero, independent of x. In this case, we'd like the
gradient to reflect this independence: it should be zero.
2. We are computing k(x, y), and x just *happens* to have the same value
as y. The gradient at such inputs is in fact ill-defined (there is a
cusp in the sqrt((x - y) ** 2) surface along the line x = y). There are,
however, an infinite number of sub-gradients, all of which are valid at
all such inputs. By symmetry, there is exactly one which is "special":
zero, and we elect to use that value here. In practice, having two
identical inputs to a kernel matrix is probably a pathological
situation to be avoided, but that is better resolved at a higher level
than this.
To avoid the infinite gradient at zero, we use tf.custom_gradient to redefine
the gradient at zero. We assign it to be a very large value, specifically
the sqrt of the max value of the floating point dtype of the input. We use
the sqrt (as opposed to just using the max floating point value) to avoid
potential overflow when combining this value with others downstream.
"""
with tf.name_scope(name or 'sqrt_with_finite_grads'):
x = tf.convert_to_tensor(value=x, name='x')
if not dtype_util.is_floating(x.dtype):
raise TypeError('Input `x` must be floating type.')
def grad(grad_ys):
large_float_like_x = np.sqrt(
np.finfo(dtype_util.as_numpy_dtype(x.dtype)).max)
safe_grads = tf.where(
tf.equal(x, 0), large_float_like_x, 0.5 * tf.math.rsqrt(x))
return grad_ys * safe_grads
return tf.sqrt(x), grad
def maybe_get_common_dtype(arg_list):
"""Return common dtype of arg_list, or None.
Args:
arg_list: an iterable of items which are either `None` or have a `dtype`
property.
Returns:
dtype: The common dtype of items in `arg_list`, or `None` if the list is
empty or all items are `None`.
"""
# Note that `all` defaults to `True` if `arg_list` is empty.
if all(a is None for a in arg_list):
return None
return dtype_util.common_dtype(arg_list, tf.float32)
def pairwise_square_distance_matrix(x1, x2, feature_ndims):
"""Returns pairwise square distance between x1 and x2.
Given `x1` and `x2`, Tensors with shape `[..., N, D1, ... Dk]` and
`[..., M, D1, ... Dk]`, compute the pairwise distance matrix `a_ij` of shape
`[..., N, M]`, where each entry `a_ij` is the square of the euclidean norm of
`x1[..., i, ...] - x2[..., j, ...]`.
The approach uses the fact that (where k = 1).
```none
a_ij = sum_d (x1[i, d] - x2[j, d]) ** 2 =
sum_d x1[i, d] ** 2 + x2[j, d] ** 2 - 2 * x1[i, d] * x2[j, d]
```
The latter term can be written as a matmul between `x1` and `x2`.
This reduces the memory from the naive approach of computing the
squared difference of `x1` and `x2` by a factor of `prod_k D_k`.
This is at the cost of the computation being more numerically unstable.
Args:
x1: Floating point `Tensor` with shape `B1 + [N] + [D1, ..., Dk]`,
where `B1` is a (possibly empty) batch shape.
x2: Floating point `Tensor` with shape `B2 + [M] + [D1, ..., Dk]`,
where `B2` is a (possibly empty) batch shape that broadcasts
with `B1`.
feature_ndims: The number of dimensions to consider for the euclidean
norm. This is `k` from above.
Returns:
`Tensor` of shape `[..., N, M]` representing the pairwise square
distance matrix.
"""
row_norm_x1 = sum_rightmost_ndims_preserving_shape(
tf.square(x1), feature_ndims)[..., tf.newaxis]
row_norm_x2 = sum_rightmost_ndims_preserving_shape(
tf.square(x2), feature_ndims)[..., tf.newaxis, :]
reshaped_x1 = tf.reshape(x1, ps.concat(
[ps.shape(x1)[:-feature_ndims], [
ps.reduce_prod(ps.shape(x1)[-feature_ndims:])]], axis=0))
reshaped_x2 = tf.reshape(x2, ps.concat(
[ps.shape(x2)[:-feature_ndims], [
ps.reduce_prod(ps.shape(x2)[-feature_ndims:])]], axis=0))
pairwise_sq = row_norm_x1 + row_norm_x2 - 2 * tf.linalg.matmul(
reshaped_x1, reshaped_x2, transpose_b=True)
return tf.clip_by_value(pairwise_sq, 0., np.inf)
def pairwise_square_distance_tensor(
x1, x2, feature_ndims, x1_example_ndims=1, x2_example_ndims=1):
"""Returns pairwise distance between x1 and x2.
This method is a generalization `pairwise_square_distance_matrix`.
Given `x1` and `x2`, Tensors with shape `[..., N1, ... Nm, D1, ... Dk]` and
`[..., M1, ... Ml, D1, ... Dk]`, compute the pairwise distance tensor `A` of
shape `[..., N1, ... Nm, M1, ... Ml]`, where `m` is `x1_example_ndims` and
`l` is `x2_example_ndims`.
Args:
x1: Floating point `Tensor` with shape `B1 + E1 + [D1, ..., Dk]`,
where `B1` is a (possibly empty) batch shape, and `E1` is a list
of `x1_example_ndims` values.
x2: Floating point `Tensor` with shape `B2 + [M] + [D1, ..., Dk]`,
where `B2` is a (possibly empty) batch shape that broadcasts
with `B1`, and `E2` is a list of `x1_example_ndims` values.
feature_ndims: The number of dimensions to consider for the euclidean
norm. This is `k` from above.
x1_example_ndims: Integer for number of example dimensions in `x1`. This is
`len(E1)`.
x2_example_ndims: Integer for number of example dimensions in `x2`. This is
`len(E2)`.
Returns:
`Tensor` of shape `bc(B1, B2) + E1 + E2` representing the pairwise square
distance tensor.
"""
# Collapse all the example dimensions and then expand after.
x1_shape = tf.shape(x1)
x1_example_shape = x1_shape[
-(feature_ndims + x1_example_ndims):-feature_ndims]
x2_shape = tf.shape(x2)
x2_example_shape = x2_shape[
-(feature_ndims + x2_example_ndims):-feature_ndims]
x1 = tf.reshape(x1, tf.concat(
[x1_shape[:-(feature_ndims + x1_example_ndims)],
[-1],
x1_shape[-feature_ndims:]], axis=0))
x2 = tf.reshape(x2, tf.concat(
[x2_shape[:-(feature_ndims + x2_example_ndims)],
[-1],
x2_shape[-feature_ndims:]], axis=0))
pairwise = pairwise_square_distance_matrix(
x1, x2, feature_ndims=feature_ndims)
# Now we need to undo the transformation.
return tf.reshape(pairwise, tf.concat([
tf.shape(pairwise)[:-2], x1_example_shape, x2_example_shape], axis=0))
def mask_matrix(x, is_missing=None):
"""Copies a matrix, replacing masked-out rows/cols from the identity matrix.
Args:
x: A Tensor of shape `[..., n, n]`, representing a batch of n-by-n matrices.
is_missing: A boolean Tensor of shape `[..., n]`, representing a batch of
masks. If `is_missing` is None, `x` is returned.
Returns:
A Tensor of shape `[..., n, n]`, representing a batch of n-by-n matrices.
For each batch member `r`, element `r[i, j]` equals `eye(n)[i, j]` if
dimension `i` or `j` is True in the corresponding input mask. Otherwise,
`r[i, j]` equals the corresponding element from `x`.
"""
if is_missing is None:
return x
x = tf.convert_to_tensor(x)
is_missing = tf.convert_to_tensor(is_missing, dtype=tf.bool)
n = ps.dimension_size(x, -1)
return tf.where(is_missing[..., tf.newaxis] | is_missing[..., tf.newaxis, :],
tf.eye(n, dtype=x.dtype),
x)
| {
"content_hash": "bd5126ba8d55da5a0185ad58ca5d5a9b",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 80,
"avg_line_length": 41.25167785234899,
"alnum_prop": 0.6494753111526885,
"repo_name": "tensorflow/probability",
"id": "5df436b56ea006d8d274fa588828e3702bef0ebf",
"size": "12971",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_probability/python/math/psd_kernels/internal/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55552121"
},
{
"name": "Python",
"bytes": "17339674"
},
{
"name": "Shell",
"bytes": "24852"
},
{
"name": "Starlark",
"bytes": "663851"
}
],
"symlink_target": ""
} |
"""
Slack connections.
TODO: Substitute all this for the slackclient?
"""
import json
import logging
import requests
import asyncio
import websockets
log = logging.getLogger(__name__)
def is_direct_channel(channel_id):
if channel_id is None:
return False
if not channel_id:
return False
if channel_id[0] == 'D':
return True
return False
class SlackUser(object):
def __init__(self, data, connection):
self.data = data
for attr, val in data.items():
setattr(self, attr, val)
self.connection = connection
# This gets populated when channels are initialized.
self.im_channel = None
@asyncio.coroutine
def send_message(self, text, **kwargs):
if self.im_channel is None:
raise Exception('Unable to send IM to user %s' % self.data['name'])
yield from self.im_channel.send_message(text, **kwargs)
class SlackChannel(object):
user_id = None
name = None
id = None
def __init__(self, data, connection):
self.data = data
for attr, val in data.items():
if attr == 'user':
attr = 'user_id'
setattr(self, attr, val)
self.connection = connection
if self.get_type() == 'im':
self.user = self.connection.find_user(self.user_id)
self.user.im_channel = self
def get_type(self):
for type_name in ('im', 'group', 'channel'):
if getattr(self, 'is_' + type_name, False):
return type_name
raise Exception("Invalid type for channel %s" % self.name)
@asyncio.coroutine
def send_message(self, text, **kwargs):
msg = {'type': 'message', 'channel': self.id, 'text': text}
msg.update(**kwargs)
yield from self.connection.send_event(msg)
class SlackMessage(object):
def __init__(self, data, connection):
self.data = data
self.text = data['text']
self.user = connection.find_user(data['user'])
class SlackRTMConnection(object):
data = None
users = {}
channels = {}
user_id = None
socket = None
def __init__(self, bot, token):
self.bot = bot
self.token = token
self.api_url = 'https://slack.com/api/'
self.event_loop = bot.event_loop
self.last_message_id = 1
def api_connect(self):
params = {'token': self.token}
log.info('retrieving RTM connection URL')
resp = requests.get(self.api_url + 'rtm.start', params)
assert resp.status_code == 200
data = resp.json()
self.data = data
# pprint(self.data['ims'])
all_channels = data ['channels'] + data['ims'] + data['groups']
self.users = {user['id']: SlackUser(user, self) for user in data['users']}
self.channels = {ch ['id']: SlackChannel(ch, self) for ch in all_channels}
self.user_id = data ['self']['id']
return data['url']
def find_user(self, user_id):
return self.users.get(user_id)
def find_user_by_name(self, name):
for user in self.users.values():
if user.name == name:
return user
return None
def find_channel_by_name(self, name):
for ch in self.channels.values():
if ch.name == name:
return ch
return None
def find_channel(self, channel_id):
return self.channels.get(channel_id)
@asyncio.coroutine
def receive_event(self):
data = yield from self.socket.recv()
if data is None:
return None
return json.loads(data)
@asyncio.coroutine
def send_event(self, msg):
msg = msg.copy()
msg['id'] = self.last_message_id
self.last_message_id += 1
log.debug('Sending event: {}.'.format(json.dumps(msg)))
yield from self.socket.send(json.dumps(msg))
@asyncio.coroutine
def handle_message(self, msg):
if msg['user'] == self.data['self']['id']:
return
user = self.find_user(msg['user'])
if msg['channel'][0] == 'D':
# Direct message
if msg['text'].strip().lower() == 'ping':
yield from user.send_im('pong')
@asyncio.coroutine
def handle_im_created(self, msg):
channel = SlackChannel(msg['channel'], self)
self.channels[channel.id] = channel
@asyncio.coroutine
def connect(self):
rtm_url = yield from self.event_loop.run_in_executor(None, self.api_connect)
log.info('connecting to %s' % rtm_url)
self.socket = yield from websockets.connect(rtm_url)
hello = yield from self.receive_event()
assert hello['type'] == 'hello'
@asyncio.coroutine
def poll(self):
while True:
event = yield from self.receive_event()
if event is None:
break
print(event)
# First handle Slack system events
event_type = event.get('type')
if event_type == 'im_created':
yield from self.handle_im_created(event)
# Then pass the event to the bot
yield from self.bot.handle_slack_event(event)
@asyncio.coroutine
def close(self):
yield from self.socket.close()
| {
"content_hash": "8b39f1545c4463f32903748afa3bab4d",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 86,
"avg_line_length": 28.629032258064516,
"alnum_prop": 0.5712676056338029,
"repo_name": "PythonSanSebastian/pyper_the_bot",
"id": "ec93bd0be11a14a6e7489cf29363595a38073706",
"size": "5325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helbot/slack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1645"
},
{
"name": "Python",
"bytes": "125591"
},
{
"name": "Shell",
"bytes": "1867"
},
{
"name": "TeX",
"bytes": "10667"
}
],
"symlink_target": ""
} |
"""Part of the Keras training engine related to plain array data.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import errors
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils.generic_utils import make_batches
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.platform import tf_logging as logging
try:
from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
except ImportError:
issparse = None
def fit_loop(model,
inputs,
targets,
sample_weights=None,
batch_size=None,
epochs=100,
verbose=1,
callbacks=None,
val_inputs=None,
val_targets=None,
val_sample_weights=None,
shuffle=True,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""Abstract fit function for arrays of data.
Arguments:
model: Keras Model instance.
inputs: List of input arrays.
targets: List of target arrays.
sample_weights: Optional list of sample weight arrays.
batch_size: Integer batch size or None if unknown.
epochs: Number of times to iterate over the data
verbose: Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
val_inputs: List of input arrays.
val_targets: List of target arrays.
val_sample_weights: Optional list of sample weight arrays.
shuffle: Whether to shuffle the data at the beginning of each epoch
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
Returns:
`History` object.
Raises:
ValueError: in case of invalid arguments.
"""
model._make_train_function()
f = model.train_function
sample_weights = sample_weights or []
val_sample_weights = val_sample_weights or []
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + targets + sample_weights + [1]
else:
ins = inputs + targets + sample_weights
do_validation = False
if val_inputs:
do_validation = True
if (steps_per_epoch is None and verbose and inputs and
hasattr(inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):
print('Train on %d samples, validate on %d samples' %
(inputs[0].shape[0], val_inputs[0].shape[0]))
if validation_steps:
do_validation = True
if steps_per_epoch is None:
raise ValueError('Can only use `validation_steps` '
'when doing step-wise '
'training, i.e. `steps_per_epoch` '
'must be set.')
num_train_samples = training_utils.check_num_samples(
ins, batch_size, steps_per_epoch, 'steps_per_epoch')
count_mode = 'steps' if steps_per_epoch else 'samples'
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
val_inputs=val_inputs,
val_targets=val_targets,
val_sample_weights=val_sample_weights,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=num_train_samples,
validation_steps=validation_steps,
verbose=verbose,
count_mode=count_mode)
if num_train_samples is not None:
index_array = np.arange(num_train_samples)
# To prevent a slowdown, we find beforehand the arrays that need conversion.
feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights
indices_for_conversion_to_dense = []
for i in range(len(feed)):
if issparse is not None and issparse(ins[i]) and not K.is_sparse(feed[i]):
indices_for_conversion_to_dense.append(i)
callbacks.on_train_begin()
for epoch in range(initial_epoch, epochs):
# Reset stateful metrics
for m in model.stateful_metric_functions:
m.reset_states()
# Update callbacks
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
if steps_per_epoch is not None:
# Step-wise fit loop.
for step_index in range(steps_per_epoch):
batch_logs = {'batch': step_index, 'size': 1}
callbacks.on_batch_begin(step_index, batch_logs)
try:
outs = f(ins)
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your dataset '
'can generate at least `steps_per_epoch * epochs` '
'batches (in this case, %d batches). You may need to'
'use the repeat() function when building your '
'dataset.' %
steps_per_epoch * epochs)
break
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(model.metrics_names, outs):
batch_logs[l] = o
callbacks.on_batch_end(step_index, batch_logs)
if callbacks.model.stop_training:
break
if do_validation:
val_outs = test_loop(
model,
val_inputs,
val_targets,
sample_weights=val_sample_weights,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(model.metrics_names, val_outs):
epoch_logs['val_' + l] = o
else:
# Sample-wise fit loop.
if shuffle == 'batch':
index_array = training_utils.batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(num_train_samples, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
outs = f(ins_batch)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(model.metrics_names, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if callbacks.model.stop_training:
break
if batch_index == len(batches) - 1: # Last batch.
if do_validation:
val_outs = test_loop(
model,
val_inputs,
val_targets,
sample_weights=val_sample_weights,
batch_size=batch_size,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(model.metrics_names, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
callbacks.on_train_end()
return model.history
def predict_loop(model, inputs, batch_size=32, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model: Keras Model instance.
inputs: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
model._make_predict_function()
f = model.predict_function
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + [0]
else:
ins = inputs
num_samples = training_utils.check_num_samples(
inputs, batch_size, steps, 'steps')
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
indices_for_conversion_to_dense = []
for i in range(len(model._feed_inputs)):
if (issparse is not None and issparse(inputs[i]) and
not K.is_sparse(model._feed_inputs[i])):
indices_for_conversion_to_dense.append(i)
if steps is not None:
# Step-based predictions.
# Since we do not know how many samples
# we will see, we cannot pre-allocate
# the returned Numpy arrays.
# Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = []
for step in range(steps):
batch_outs = f(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
for batch_out in batch_outs:
unconcatenated_outs.append([])
for i, batch_out in enumerate(batch_outs):
unconcatenated_outs[i].append(batch_out)
if verbose == 1:
progbar.update(step + 1)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
else:
# Sample-based predictions.
outs = []
batches = make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if ins and isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
# Pre-allocate the results arrays.
for batch_out in batch_outs:
shape = (num_samples,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=batch_out.dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def test_loop(model,
inputs,
targets,
sample_weights=None,
batch_size=None,
verbose=0,
steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model: Keras Model instance.
inputs: List of input arrays.
targets: List of target arrays.
sample_weights: Optional list of sample weight arrays.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
model._make_test_function()
f = model.test_function
sample_weights = sample_weights or []
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + targets + sample_weights + [0]
else:
ins = inputs + targets + sample_weights
if hasattr(model, 'metrics'):
for m in model.stateful_metric_functions:
m.reset_states()
stateful_metric_indices = [
i for i, name in enumerate(model.metrics_names)
if str(name) in model.stateful_metric_names
]
else:
stateful_metric_indices = []
num_samples = training_utils.check_num_samples(
ins, batch_size, steps, 'steps')
outs = []
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
# To prevent a slowdown, we find beforehand the arrays that need conversion.
feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights
indices_for_conversion_to_dense = []
for i in range(len(feed)):
if issparse is not None and issparse(ins[i]) and not K.is_sparse(feed[i]):
indices_for_conversion_to_dense.append(i)
if steps is not None:
for step in range(steps):
batch_outs = f(ins)
if isinstance(batch_outs, list):
if step == 0:
for _ in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
if i in stateful_metric_indices:
outs[i] = batch_out
else:
outs[i] += batch_out
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs
if verbose == 1:
progbar.update(step + 1)
for i in range(len(outs)):
if i not in stateful_metric_indices:
outs[i] /= steps
else:
batches = make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
outs.extend([0.] * len(batch_outs))
for i, batch_out in enumerate(batch_outs):
if i in stateful_metric_indices:
outs[i] = batch_out
else:
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i in range(len(outs)):
if i not in stateful_metric_indices:
outs[i] /= num_samples
if len(outs) == 1:
return outs[0]
return outs
| {
"content_hash": "9f51a0e3f0a526a5b6dfb85e7321be44",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 79,
"avg_line_length": 35.25056433408578,
"alnum_prop": 0.6141777663934426,
"repo_name": "ZhangXinNan/tensorflow",
"id": "e2c458c65f27c5802acd9186e9bcedd4062e5a2a",
"size": "16305",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/engine/training_arrays.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "327005"
},
{
"name": "C#",
"bytes": "8215"
},
{
"name": "C++",
"bytes": "46648068"
},
{
"name": "CMake",
"bytes": "206720"
},
{
"name": "Dockerfile",
"bytes": "6978"
},
{
"name": "Go",
"bytes": "1210133"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "830576"
},
{
"name": "Jupyter Notebook",
"bytes": "2632421"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "51309"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40046802"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "455624"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
Subsets and Splits