repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
naturalness/sensibility | sensibility/__main__.py | 1 | 3308 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2017 Eddie Antonio Santos <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Allows one to set the language prior to running any of the scripts:
Usage:
sensibility [-l LANGUAGE] <command> [<args>]
"""
import os
import sys
from pathlib import Path
from types import SimpleNamespace
from typing import Dict, List, Tuple
from sensibility._paths import REPOSITORY_ROOT
bin_dir = REPOSITORY_ROOT / 'bin'
def main() -> None:
assert bin_dir.is_dir()
args = parse_args()
# Set up the environment
env: Dict[str, str] = {}
env.update(os.environ)
# Set the language if defined.
if args.language is not None:
env.update(SENSIBILITY_LANGUAGE=args.language)
if args.subcommand:
run_subcommand(args.subcommand, env)
else:
list_commands()
sys.exit(-1)
def run_subcommand(command, env) -> None:
bin, args = get_bin_and_argv(command)
if not bin.exists():
usage_error("Unknown executable:", bin)
os.execve(str(bin.absolute()), args, env)
def list_commands() -> None:
print("Please specify a subcommand:\n", file=sys.stderr)
for bin in bin_dir.rglob('*'):
if bin.is_dir() or not is_executable(bin):
continue
bin = bin.relative_to(bin_dir)
subcommand = ' '.join(bin.parts)
print(f"\t{subcommand}", file=sys.stderr)
def get_bin_and_argv(command: List[str]) -> Tuple[Path, List[str]]:
"""
Returns the absolute path to the binary, AND the argument vector,
including argv[0] (the command name).
"""
first_comp, = command[:1]
# XXX: Only supports one-level subcommands
if (bin_dir / first_comp).is_dir():
return bin_dir / first_comp / command[1], command[1:]
else:
return bin_dir / first_comp, command
def is_executable(path: Path) -> bool:
# access() is deprecated, but we're using it anyway!
return os.access(path, os.X_OK)
def parse_args(argv=sys.argv):
"""
Roll my own parse because argparse will swallow up arguments that don't
belong to it.
"""
argv = argv[1:]
args = SimpleNamespace()
args.language = None
args.subcommand = None
# Parse options one by one.
while argv:
arg = argv.pop(0)
if arg in ('-l', '--language'):
args.language = argv.pop(0)
elif arg.startswith('--language='):
_, args.language = arg.split('=', 1)
elif arg.startswith('-'):
usage_error(f"Unknown argument {arg!r}")
else:
args.subcommand = [arg] + argv[:]
break
return args
def usage_error(*args):
print(f"{sys.argv[0]}:", *args, file=sys.stderr)
sys.exit(2)
if __name__ == '__main__':
main()
| apache-2.0 | -7,140,425,813,645,337,000 | 25.894309 | 75 | 0.635127 | false | 3.564655 | false | false | false |
lyft/incubator-airflow | airflow/providers/google/cloud/example_dags/example_automl_video_intelligence_classification.py | 5 | 3497 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLCreateDatasetOperator, AutoMLDeleteDatasetOperator, AutoMLDeleteModelOperator,
AutoMLImportDataOperator, AutoMLTrainModelOperator,
)
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_VIDEO_BUCKET = os.environ.get(
"GCP_AUTOML_VIDEO_BUCKET", "gs://automl-video-demo-data/hmdb_split1.csv"
)
# Example values
DATASET_ID = "VCN123455678"
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"video_classification_model_metadata": {},
}
# Example dataset
DATASET = {
"display_name": "test_video_dataset",
"video_classification_dataset_metadata": {},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_VIDEO_BUCKET]}}
default_args = {"start_date": days_ago(1)}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Video Intelligence Classification
with models.DAG(
"example_automl_video",
default_args=default_args,
schedule_interval=None, # Override to match your needs
user_defined_macros={"extract_object_id": extract_object_id},
tags=['example'],
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = (
'{{ task_instance.xcom_pull("create_dataset_task", key="dataset_id") }}'
)
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(
task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION
)
model_id = "{{ task_instance.xcom_pull('create_model', key='model_id') }}"
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
create_dataset_task >> import_dataset_task >> create_model >> \
delete_model_task >> delete_datasets_task
| apache-2.0 | -2,503,223,521,123,196,000 | 31.990566 | 88 | 0.711467 | false | 3.511044 | false | false | false |
Mchakravartula/rockstor-core | src/rockstor/cli/iscsi_console.py | 6 | 1777 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from base_console import BaseConsole
from share_iscsi_console import ShareIscsiConsole
from rest_util import api_call
class IscsiConsole(BaseConsole):
def __init__(self, prompt):
BaseConsole.__init__(self)
self.prompt = prompt + ' Iscsi>'
self.url = BaseConsole.url + 'sm/services/iscsi/'
def do_status(self, args):
iscsi_info = api_call(self.url)
print iscsi_info
def put_wrapper(self, args, command):
input_data = {'command': command,}
iscsi_info = api_call(self.url, data=input_data, calltype='put')
print iscsi_info
def do_start(self, args):
return self.put_wrapper(args, 'start')
def do_stop(self, args):
return self.put_wrapper(args, 'stop')
def do_share(self, args):
input_share = args.split()
if (len(input_share) > 0):
si_console = ShareIscsiConsole(input_share[0])
if (len(input_share) > 1):
si_console.onecmd(' '.join(input_share[1:]))
else:
si_console.cmdloop()
| gpl-3.0 | 1,379,702,315,604,901,400 | 31.907407 | 72 | 0.667417 | false | 3.709812 | false | false | false |
vincentbernat/QCss-3 | qcss3/collector/database.py | 1 | 7154 | """
Database dumping of in-memory datastore for QCss3
This module writes to database the content of a memory datastore using
adapters. For example, if we want to write an object representing an a
load balancer (implementing C{ILoadBalancer} interface), we will use
C{IDatabaseWriter(lb).write(txn)} where C{lb} is the load balancer and
C{txn} a transaction to use.
"""
from zope.interface import Interface, implements
from twisted.python import components
from qcss3.collector.datastore import ILoadBalancer, IVirtualServer, IRealServer, ISorryServer
class IDatabaseWriter(Interface):
"""Interface to write an entity to the database"""
def write(txn, id=None):
"""
Dump the current entity to database using the given transaction.
@param txn: transaction to use to dump to the database
@param id: unique id to use for the entity (if needed)
"""
class ActionWriterMixIn:
def write_actions(self, txn, actions, lb, vs=None, rs=None):
"""Write actions to `action' table.
@param txn: transaction to use to write actions to database
@param actions: actions to write
@param lb: loadbalancer
@param vs: virtual server
@param rs: real server
"""
txn.execute("DELETE FROM action WHERE lb=%(lb)s AND vs=%(vs)s AND rs=%(rs)s",
{'lb': lb, 'vs': vs, 'rs': rs})
for key in actions:
txn.execute("INSERT INTO action VALUES "
"(%(lb)s, %(vs)s, %(rs)s, %(key)s, %(value)s)",
{ 'lb': lb, 'vs': vs, 'rs': rs, 'key': key,
'value': actions[key] })
class LoadBalancerWriter(ActionWriterMixIn):
implements(IDatabaseWriter)
def __init__(self, loadbalancer):
self.loadbalancer = loadbalancer
def write(self, txn, id=None):
"""Dump the loadbalancer to the database"""
# Remove existing information
txn.execute("UPDATE loadbalancer SET deleted=CURRENT_TIMESTAMP "
"WHERE name=%(name)s AND deleted='infinity'",
{'name': self.loadbalancer.name})
# Insert new information
txn.execute("INSERT INTO loadbalancer "
"(name, type, description) VALUES "
"(%(name)s, %(kind)s, %(description)s)",
{ 'name': self.loadbalancer.name,
'kind': self.loadbalancer.kind,
'description': self.loadbalancer.description })
# Then write virtual servers information
virtualservers = self.loadbalancer.virtualservers
for virtualserver in virtualservers:
IDatabaseWriter(
virtualservers[virtualserver]).write(txn,
(self.loadbalancer.name,
virtualserver))
self.write_actions(txn, self.loadbalancer.actions, self.loadbalancer.name)
class VirtualServerWriter(ActionWriterMixIn):
implements(IDatabaseWriter)
def __init__(self, virtualserver):
self.virtualserver = virtualserver
def write(self, txn, id=None):
"""
Dump the virtual server to the database.
@param id: (name of loadbalancer, ID of the virtual server)
"""
lb, vs = id
# Remove existing information
txn.execute("UPDATE virtualserver SET deleted=CURRENT_TIMESTAMP "
"WHERE lb=%(lb)s AND vs=%(vs)s AND deleted='infinity'",
{'lb': lb, 'vs': vs})
# Insert new information
txn.execute("INSERT INTO virtualserver "
"(lb, vs, name, vip, protocol, mode) VALUES "
"(%(lb)s, %(vs)s, %(name)s, %(vip)s, %(protocol)s, %(mode)s)",
{'lb': lb, 'vs': vs,
'name': self.virtualserver.name,
'vip': self.virtualserver.vip,
'protocol': self.virtualserver.protocol,
'mode': self.virtualserver.mode})
# Insert extra information
for key in self.virtualserver.extra:
txn.execute("INSERT INTO virtualserver_extra "
"(lb, vs, key, value) VALUES "
"(%(lb)s, %(vs)s, %(key)s, %(value)s)",
{ 'lb': lb, 'vs': vs, 'key': key,
'value': self.virtualserver.extra[key] })
# Insert real servers
realservers = self.virtualserver.realservers
for realserver in realservers:
IDatabaseWriter(
realservers[realserver]).write(txn,
(lb, vs, realserver))
self.write_actions(txn, self.virtualserver.actions, lb, vs)
class RealOrSorryServerWriter(ActionWriterMixIn):
implements(IDatabaseWriter)
def __init__(self, realserver):
self.realserver = realserver
def write(self, txn, id=None):
"""
Dump the real/sorry server to the database.
@param id: (name of load balancer,
ID of the virtualserver, ID of the real server)
"""
lb, vs, rs = id
# Remove existing information
txn.execute("UPDATE realserver SET deleted=CURRENT_TIMESTAMP "
"WHERE lb=%(lb)s AND vs=%(vs)s AND rs=%(rs)s "
"AND deleted='infinity'",
{'lb': lb, 'vs': vs, 'rs': rs})
# Insert new information
weight = None
if IRealServer.providedBy(self.realserver):
weight = self.realserver.weight
txn.execute("INSERT INTO realserver "
"(lb, vs, rs, name, rip, port, protocol, weight, rstate, sorry) "
"VALUES "
"(%(lb)s, %(vs)s, %(rs)s, %(name)s, %(rip)s, "
"%(port)s, %(protocol)s, %(weight)s, %(rstate)s, %(sorry)s)",
{'lb': lb, 'vs': vs, 'rs': rs,
'name': self.realserver.name,
'rip': self.realserver.rip,
'port': self.realserver.rport,
'protocol': self.realserver.protocol,
'weight': weight,
'rstate': self.realserver.state,
'sorry': ISorryServer.providedBy(self.realserver) })
# Insert extra information
for key in self.realserver.extra:
txn.execute("INSERT INTO realserver_extra VALUES "
"(%(lb)s, %(vs)s, %(rs)s, %(key)s, %(value)s)",
{ 'lb': lb, 'vs': vs, 'rs': rs, 'key': key,
'value': self.realserver.extra[key] })
self.write_actions(txn, self.realserver.actions, lb, vs, rs)
components.registerAdapter(
LoadBalancerWriter,
ILoadBalancer,
IDatabaseWriter)
components.registerAdapter(
VirtualServerWriter,
IVirtualServer,
IDatabaseWriter)
components.registerAdapter(
RealOrSorryServerWriter,
IRealServer,
IDatabaseWriter)
components.registerAdapter(
RealOrSorryServerWriter,
ISorryServer,
IDatabaseWriter)
| gpl-3.0 | -3,035,962,816,937,538,000 | 40.114943 | 94 | 0.558708 | false | 4.283832 | false | false | false |
waipu/bakawipe | lib/beon/regexp.py | 1 | 2519 | ''' '''
# targetregexp = r'\/{forum}\/(\d*)-(\d*)\-[\w|\-]*([vb]-?i*-?r-?t|se(?:x|ks|kas)|eb(?:at|i)|t-?r-?a-?h|(?:-ja|ischu)-(?:m\-|j\-|zh\-|devushk|par(?:en|nja)|hozja)|ots[o|\-]s|rolevit|-sis[\-|e][kc]|v(?:-pop|du(?:i\-|va))|rabyn|droch|[ob]?liz(?:at\-|va[it])|hentai|shlju(?:hu|shk)|kisk[au]-(?:vsja|mokr)|do-orgazm|shali|min-?et|nakaz(?:iva|hi|at)|(?:parni|devushki)-kto-hochet|hoch(?:u|esh)-tak-)[\w|\-]*\-read\.shtml'
s_id = r'(\d+)\-(\d+)\-[\w\-]*(?:\-read)?\.[sz]?html'
s_topic = r'(\d+-\d+\-[\w|\-]*(?:\-read)?\.[sz]?html)'
s_uni = r'((\d+)-(\d+)\-[\w|\-]*(?:\-read)?\.[sz]?html)'
ud_prefix = r'http:\/\/(?:(\w+)\.)?(\w+\.\w+)\/(?:[\w._-]+\/)?'
udf_prefix = r'http:\/\/(?:(\w+)\.)?(\w+\.\w+)\/(?:([\w._-]+)\/)?'
sub_prefix = r'http:\/\/(?:{0}\.)?{1}\/(?:{2}\/)?'
ds_u_prefix = r'http:\/\/(?:(\w+)\.)?{0}\/(?:[\w._-]+\/)?'
f_udi = ud_prefix + s_id # -> (user, domain, (id1, id2))
f_udfi = udf_prefix + s_id # -> (user, domain, forum, (id1, id2))
f_udft = udf_prefix + s_topic # -> (user, domain, forum, topic)
f_udfti = udf_prefix + s_uni # -> (user, domain, forum, topic, (id1, id2))
f_sub_id = sub_prefix + s_id # -> (id1, id2)
f_sub_topic = sub_prefix + s_topic # -> (topic)
picregexp = r'(http\:\/\/i\d+\.{0}\/\d+\/\d+\/\d+\/\d+\/\d+\/Picture\d*\.jpe?g)'
chashregexp = r'value\=\'?(\w+)\'?.*?name\=\'?cahash\'?' # Regexp for captcha hash.
wait5min_register = r'Пожалуйста, подождите 5 минут и попробуйте зарегистрировать пользователя снова.'
wait5min_uni = r'<font color=ff0000>' # Stupid one, isn't it?
aregexp = r'http:\/\/a(\d)\.{0}\/i\/captcha\/' # Regexp for auth server.
var_login = r'var user_login = \'(\w*)\';' # Parse current login from js var.
imgregexp = r'\[image-\w*-\w*-http:\/\/a{0}.{1}\/i\/temp\/\d*\/[\w.]*\]' # wtf is this?
captchaurl = 'http://a{0}.{1}/i/captcha/{2}.png'
hashinmail = r'http:\/\/{0}\/p\/validate_user_email\.cgi\?p(?:=|=)(\w+)'
show_link_options = r"showLinksOptions\(this,\s*?'\w+?',\s*?'(\d+?)',\s*?\d,\s*?\d,\s*?\d\)"
img_codes = r'(\[image-original-none-[\w:\.\/]+?\])'
deobfuscate_html = r'<script.*?>.*?dеobfuscate_html\s*?\(.*?\).*?<\/script>'
r302_found = r'302 Found'
r502_bad_gateway = r'502 Bad Gateway'
class getposts:
addcomment = r"AddComment\s*?\(\s*?[\'\"](.+?)[\'\"]\s*?\)\s*?;"
setlastcomment = r"setLastComment\s*?\(\s*?[\'\"]?(\d+?)[\'\"]?\s*?\)\s*?;"
cookie = r"document.cookie\s*?=\s*?[\'\"](.*)[\'\"]\s*?;"
runchecker = r"runChecker\s*?\(\s*?\)\s*?;"
| gpl-3.0 | -8,834,536,637,641,971,000 | 60.25 | 416 | 0.509796 | false | 2.106621 | false | false | false |
sgongar/Herschel-PACS-Toolbox-Red-Leak | pipeline_tests/case_FFRange_Sel.py | 1 | 8434 | # coding = utf-8
# specflatfielding range
# excludeleak = false
# range = [198.0, 203.0]
print "Processing second case"
lineSpec = isLineSpec(slicedCubes)
shortRange = isShortRange(obs)
slicedFrames = specFlatFieldRange(slicedFrames, useSplinesModel=True,
excludeLeaks=False, calTree=calTree,
copy=copyCube, selectedRange=[198.0, 203.0],
wlIntervalKnots={1:2.0, 2:3.0, 3:2.0})
copyCube = False
maskNotFF = True
slicedCubes = specFrames2PacsCube(slicedFrames)
slicedCubes = centerRaDecMetaData(slicedCubes)
# Building the wavelength grids for each slice
# Used cal file: wavelengthGrid
upsample = getUpsample(obs)
waveGrid = wavelengthGrid(slicedCubes, oversample = 2, upsample = upsample,
calTree = calTree)
# Active masks
slicedCubes = activateMasks(slicedCubes,
String1d(["GLITCH", "UNCLEANCHOP", "SATURATION",
"GRATMOVE", "BADFITPIX",
"BADPIXELS"]), exclusive = True,
copy = copyCube)
# Flag the remaining outliers (sigma-clipping in wavelength domain),
# with default parameters here
slicedCubes = specFlagOutliers(slicedCubes, waveGrid)
# Rebin all cubes on consistent wavelength grids
masksForRebinning = String1d(["OUTOFBAND", "GLITCH", "UNCLEANCHOP",
"SATURATION", "GRATMOVE", "BADFITPIX",
"OUTLIERS", "BADPIXELS"])
masksForRebinning.append("NOTFFED")
slicedCubes = activateMasks(slicedCubes, masksForRebinning, exclusive = True)
slicedRebinnedCubes = specWaveRebin(slicedCubes, waveGrid)
print slicedRebinnedCubes.refs.size()
# Only continue if there is at least one slice leftover after red-leak filtering
if slicedRebinnedCubes.refs.size() > 0:
# Select only the slices in the PACS cube which are also in the rebinned cube
slicedCubes = selectSlices(slicedCubes, refContext = slicedRebinnedCubes)
# Combine the nod-A & nod-B rebinned cubes.
# All cubes at the same raster position are averaged.
# This is the final science-grade product for spatially undersampled
# rasters and single pointings
slicedRebinnedCubes = specAddNodCubes(slicedRebinnedCubes)
# compute ra/dec meta keywords
slicedRebinnedCubes = centerRaDecMetaData(slicedRebinnedCubes)
# convert the cubes to a table
slicedTable = pacsSpecCubeToTable(slicedRebinnedCubes)
# Compute equidistant wavelength grid for equidistant regridding
equidistantWaveGrid = wavelengthGrid(slicedCubes, oversample = 2,
upsample = upsample, calTree = calTree,
regularGrid = True,
fracMinBinSize = 0.35)
# determine mapping algorithm and parameters
driz, pixelSize, interpolatePixelSize, oversampleSpace, upsampleSpace, pixFrac, source, mapType = determineMappingAlgorithm(slicedRebinnedCubes,camera)
# Mosaic, per wavelength range, all raster pointings into a single cube
slicedDrizzledCubes = None
slicedDrizzledEquidistantCubes = None
slicedInterpolatedCubes = None
slicedInterpolatedEquidistantCubes = None
slicedProjectedEquidistantCubes = None
if driz:
oversampleWave = 2
upsampleWave = upsample
waveGridForDrizzle = wavelengthGrid(slicedCubes,
oversample = oversampleWave,
upsample = upsampleWave,
calTree = calTree)
equidistantWaveGridForDrizzle = wavelengthGrid(slicedCubes,
oversample = oversampleWave,
upsample = upsampleWave,
calTree = calTree,
regularGrid = True,
fracMinBinSize = 0.35)
spaceGrid = spatialGrid(slicedCubes,
wavelengthGrid = waveGridForDrizzle,
oversample = oversampleSpace,
upsample = upsampleSpace, pixfrac = pixFrac,
calTree = calTree)
slicedDrizzledCubes = drizzle(slicedCubes,
wavelengthGrid = waveGridForDrizzle,
spatialGrid = spaceGrid)[0]
slicedDrizzledCubes = centerRaDecMetaData(slicedDrizzledCubes)
sink.saveWhenMemoryShort(slicedDrizzledCubes)
slicedDrizzledEquidistantCubes = specRegridWavelength(slicedDrizzledCubes,
equidistantWaveGridForDrizzle)
sink.saveWhenMemoryShort(slicedDrizzledEquidistantCubes)
slicedProjectedCubes = specProject(slicedRebinnedCubes,
cubeWithOutputGrid = slicedDrizzledCubes)
del spaceGrid, waveGridForDrizzle, equidistantWaveGridForDrizzle
del oversampleWave, upsampleWave
else:
slicedProjectedCubes = specProject(slicedRebinnedCubes,
outputPixelsize = pixelSize)
if mapType != "oversampled":
slicedInterpolatedCubes = specInterpolate(slicedRebinnedCubes,
outputPixelsize = interpolatePixelSize)
slicedInterpolatedCubes = centerRaDecMetaData(slicedInterpolatedCubes)
if (mapType=="nyquist" or mapType=="oversampled"):
slicedProjectedEquidistantCubes = specRegridWavelength(slicedProjectedCubes,
equidistantWaveGrid)
else:
slicedInterpolatedEquidistantCubes = specRegridWavelength(slicedInterpolatedCubes,
equidistantWaveGrid)
slicedProjectedCubes = centerRaDecMetaData(slicedProjectedCubes)
sink.saveWhenMemoryShort(slicedProjectedCubes)
# do a pointsource extraction for the pointed observations only
# should applied a rangespec of linespec process??
spectra1d = None
if source=='point':
if isRangeSpec(obs):
c1_2nd, c9_2nd, c129_2nd = extractCentralSpectrum(slicedRebinnedCubes,
smoothing = 'filter',
width = 50,
preFilterWidth = 15,
calTree = calTree)
else:
c1_2nd, c9_2nd, c129_2nd = extractCentralSpectrum(slicedRebinnedCubes,
smoothing = 'median',
calTree = calTree)
spectra1d = fillPacsCentralSpectra(slicedRebinnedCubes,
ptSrcSpec = c1_2nd, ptSrc3x3Spec = c9_2nd)
# del c1_2nd, c9_2nd, c129_2nd
slicedRebinnedCubes.meta.set("sanitycheck",StringParameter("test2"))
# update the level 2 of the ObservationContext
obs = updatePacsObservation(obs, 2.0, [slicedCubes, slicedRebinnedCubes, slicedProjectedCubes, slicedDrizzledCubes,
slicedTable, slicedInterpolatedCubes, slicedDrizzledEquidistantCubes, slicedInterpolatedEquidistantCubes,
slicedProjectedEquidistantCubes])
# remove variables to cleanup memory
del slicedTable, equidistantWaveGrid, driz, pixelSize, interpolatePixelSize
del oversampleSpace, upsampleSpace, pixFrac, source, mapType
del slicedDrizzledCubes, slicedDrizzledEquidistantCubes
del slicedInterpolatedCubes, slicedInterpolatedEquidistantCubes
del slicedProjectedCubes, slicedProjectedEquidistantCubes
else:
LOGGER.warning("No slices left anymore after filtering red-leak and out-of-band slices.")
# Delete some variables (memory clean-up)
del slicedFrames
del maskNotFF, upsample, waveGrid, masksForRebinning, slicedRebinnedCubes
# restore default sink state
restoreOldSinkState()
| lgpl-3.0 | 892,825,352,011,702,500 | 49.807229 | 155 | 0.602798 | false | 3.935604 | false | false | false |
fretboardfreak/space | lib/model/update.py | 1 | 4960 | # Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import functools
from logging import debug
from .resources import Resources
# Using a global to hold the delayed events for now.
# Also, delayed events are currently executed through the delayed_event_trigger
# rather than a separate event thread.
DELAYED_EVENTS = []
# TODO:
# - make engine queries threadsafe
# - ensure that only queries are used to interact with the model
# - write a class that will run its own thread for managing
# - Move event handling from engine into the event manager class
# - remove all uses of delayed_event_trigger
def update_trigger(func):
"""Decorator to trigger an update before given method is called."""
@functools.wraps(func)
def new_function(*args, **kwargs):
if len(args) > 0 and hasattr(args[0], 'update'):
args[0].update()
return func(*args, **kwargs)
return new_function
def delayed_event_trigger(func):
"""Decorator to trigger delayed events before calling a method."""
@functools.wraps(func)
def new_function(*args, **kwargs):
if hasattr(delayed_event_trigger, 'CALLABLE'):
debug('Performing Delayed Actions...')
delayed_event_trigger.CALLABLE()
return func(*args, **kwargs)
return new_function
def calculate_update_increments(last_update, new_time=None):
"""Determine the number of updated increments between last_update and now.
"""
if not new_time:
new_t = time.time()
else:
new_t = new_time
return new_t - last_update
class ResourceUpdater(object):
"""Helper class to handle updating a Planet's resources based on income.
Attributes "new_time", "difference", "resources" will be unset until the
update() method is called.
"""
def __init__(self, last_update, resources, rates, max_resources=None):
self.last_update = last_update
self.original_resources = resources
self.rates = rates
self.max_resources = max_resources
self.new_time, self.difference, self.resources = [None, None, None]
def update(self):
"""Calculate the new value of resources for planet."""
self.new_time = time.time()
increments = calculate_update_increments(self.last_update,
new_time=self.new_time)
self.difference = Resources()
self.resources = self.original_resources.copy()
for res in self.original_resources:
self.difference[res] = self.rates[res] * increments
if self.max_resources:
new_val = min(self.resources[res] + self.difference[res],
self.max_resources[res])
else:
new_val = self.resources[res] + self.difference[res]
self.resources[res] = new_val
return self.resources, self.new_time
class DelayedEvent(object):
"""Perform an action after some delay.
:descriptor: A string describing the event.
:delay: A number representing the number of seconds to delay.
:action: A callable to be executed after the delay.
When triggered, if the period of delay has passed, the provided action
callable will be executed. If the event triggered it will return True
otherwise it will return None. When triggered the attribute "triggered"
will change from False to True unless an exception was thrown by the action
callable. Once the "triggered" attribute is set to True the event cannot
be re-triggered.
When triggering events, the trigger time can be passed in as the keyword
"_time" otherwise time.time() will be used.
"""
def __init__(self, descriptor, delay, action, *args, **kwargs):
self.descriptor = descriptor
self.delay = delay
self.action = action
self.args = args
self.kwargs = kwargs
self.trigger_time = time.time() + delay
self.triggered = False
def is_delay_over(self, _time=None):
if not _time:
_time = time.time()
return _time >= self.trigger_time
def __call__(self, _time=None):
if not self.is_delay_over(_time):
return
if not self.triggered:
debug('Triggering event "{}"...'.format(self.descriptor))
self.action(*self.args, **self.kwargs)
self.triggered = True
return True
| apache-2.0 | -6,945,588,662,346,634,000 | 34.177305 | 79 | 0.65625 | false | 4.206955 | false | false | false |
abdoosh00/edraak | common/lib/xmodule/xmodule/modulestore/inheritance.py | 2 | 8205 | """
Support for inheritance of fields down an XBlock hierarchy.
"""
from datetime import datetime
from pytz import UTC
from xmodule.partitions.partitions import UserPartition
from xblock.fields import Scope, Boolean, String, Float, XBlockMixin, Dict, Integer, List
from xblock.runtime import KeyValueStore, KvsFieldData
from xmodule.fields import Date, Timedelta
class UserPartitionList(List):
"""Special List class for listing UserPartitions"""
def from_json(self, values):
return [UserPartition.from_json(v) for v in values]
def to_json(self, values):
return [user_partition.to_json()
for user_partition in values]
class InheritanceMixin(XBlockMixin):
"""Field definitions for inheritable fields."""
graded = Boolean(
help="Whether this module contributes to the final course grade",
scope=Scope.settings,
default=False,
)
start = Date(
help="Start time when this module is visible",
default=datetime(2030, 1, 1, tzinfo=UTC),
scope=Scope.settings
)
due = Date(
help="Date that this problem is due by",
scope=Scope.settings,
)
extended_due = Date(
help="Date that this problem is due by for a particular student. This "
"can be set by an instructor, and will override the global due "
"date if it is set to a date that is later than the global due "
"date.",
default=None,
scope=Scope.user_state,
)
course_edit_method = String(
help="Method with which this course is edited.",
default="Studio", scope=Scope.settings
)
giturl = String(
help="url root for course data git repository",
scope=Scope.settings,
)
xqa_key = String(help="DO NOT USE", scope=Scope.settings)
annotation_storage_url = String(help="Location of Annotation backend", scope=Scope.settings, default="http://your_annotation_storage.com", display_name="Url for Annotation Storage")
annotation_token_secret = String(help="Secret string for annotation storage", scope=Scope.settings, default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", display_name="Secret Token String for Annotation")
graceperiod = Timedelta(
help="Amount of time after the due date that submissions will be accepted",
scope=Scope.settings,
)
showanswer = String(
help="When to show the problem answer to the student",
scope=Scope.settings,
default="finished",
)
rerandomize = String(
help="When to rerandomize the problem",
scope=Scope.settings,
default="never",
)
days_early_for_beta = Float(
help="Number of days early to show content to beta users",
scope=Scope.settings,
default=None,
)
static_asset_path = String(
help="Path to use for static assets - overrides Studio c4x://",
scope=Scope.settings,
default='',
)
text_customization = Dict(
help="String customization substitutions for particular locations",
scope=Scope.settings,
)
use_latex_compiler = Boolean(
help="Enable LaTeX templates?",
default=False,
scope=Scope.settings
)
max_attempts = Integer(
display_name="Maximum Attempts",
help=("Defines the number of times a student can try to answer this problem. "
"If the value is not set, infinite attempts are allowed."),
values={"min": 0}, scope=Scope.settings
)
matlab_api_key = String(
display_name="Matlab API key",
help="Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
"This key is granted for exclusive use by this course for the specified duration. "
"Please do not share the API key with other courses and notify MathWorks immediately "
"if you believe the key is exposed or compromised. To obtain a key for your course, "
"or to report and issue, please contact [email protected]",
scope=Scope.settings
)
# This is should be scoped to content, but since it's defined in the policy
# file, it is currently scoped to settings.
user_partitions = UserPartitionList(
help="The list of group configurations for partitioning students in content experiments.",
default=[],
scope=Scope.settings
)
def compute_inherited_metadata(descriptor):
"""Given a descriptor, traverse all of its descendants and do metadata
inheritance. Should be called on a CourseDescriptor after importing a
course.
NOTE: This means that there is no such thing as lazy loading at the
moment--this accesses all the children."""
if descriptor.has_children:
parent_metadata = descriptor.xblock_kvs.inherited_settings.copy()
# add any of descriptor's explicitly set fields to the inheriting list
for field in InheritanceMixin.fields.values():
if field.is_set_on(descriptor):
# inherited_settings values are json repr
parent_metadata[field.name] = field.read_json(descriptor)
for child in descriptor.get_children():
inherit_metadata(child, parent_metadata)
compute_inherited_metadata(child)
def inherit_metadata(descriptor, inherited_data):
"""
Updates this module with metadata inherited from a containing module.
Only metadata specified in self.inheritable_metadata will
be inherited
`inherited_data`: A dictionary mapping field names to the values that
they should inherit
"""
try:
descriptor.xblock_kvs.inherited_settings = inherited_data
except AttributeError: # the kvs doesn't have inherited_settings probably b/c it's an error module
pass
def own_metadata(module):
"""
Return a dictionary that contains only non-inherited field keys,
mapped to their serialized values
"""
return module.get_explicitly_set_fields_by_scope(Scope.settings)
class InheritingFieldData(KvsFieldData):
"""A `FieldData` implementation that can inherit value from parents to children."""
def __init__(self, inheritable_names, **kwargs):
"""
`inheritable_names` is a list of names that can be inherited from
parents.
"""
super(InheritingFieldData, self).__init__(**kwargs)
self.inheritable_names = set(inheritable_names)
def default(self, block, name):
"""
The default for an inheritable name is found on a parent.
"""
if name in self.inheritable_names and block.parent is not None:
parent = block.get_parent()
if parent:
return getattr(parent, name)
super(InheritingFieldData, self).default(block, name)
def inheriting_field_data(kvs):
"""Create an InheritanceFieldData that inherits the names in InheritanceMixin."""
return InheritingFieldData(
inheritable_names=InheritanceMixin.fields.keys(),
kvs=kvs,
)
class InheritanceKeyValueStore(KeyValueStore):
"""
Common superclass for kvs's which know about inheritance of settings. Offers simple
dict-based storage of fields and lookup of inherited values.
Note: inherited_settings is a dict of key to json values (internal xblock field repr)
"""
def __init__(self, initial_values=None, inherited_settings=None):
super(InheritanceKeyValueStore, self).__init__()
self.inherited_settings = inherited_settings or {}
self._fields = initial_values or {}
def get(self, key):
return self._fields[key.field_name]
def set(self, key, value):
# xml backed courses are read-only, but they do have some computed fields
self._fields[key.field_name] = value
def delete(self, key):
del self._fields[key.field_name]
def has(self, key):
return key.field_name in self._fields
def default(self, key):
"""
Check to see if the default should be from inheritance rather than from the field's global default
"""
return self.inherited_settings[key.field_name]
| agpl-3.0 | 2,126,278,373,199,145,500 | 36.126697 | 202 | 0.664595 | false | 4.423181 | false | false | false |
cprov/snapcraft | snapcraft/internal/build_providers/_multipass/_multipass.py | 1 | 4486 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import shlex
from .._base_provider import Provider
from ._instance_info import InstanceInfo
from ._multipass_command import MultipassCommand
class Multipass(Provider):
"""A multipass provider for snapcraft to execute its lifecycle."""
def _run(self, command) -> None:
self._multipass_cmd.execute(instance_name=self.instance_name, command=command)
def _launch(self) -> None:
self._multipass_cmd.launch(instance_name=self.instance_name, image="16.04")
def _mount(self, *, mountpoint: str, dev_or_path: str) -> None:
target = "{}:{}".format(self.instance_name, mountpoint)
self._multipass_cmd.mount(source=dev_or_path, target=target)
def _mount_snaps_directory(self) -> None:
# https://github.com/snapcore/snapd/blob/master/dirs/dirs.go
# CoreLibExecDir
path = os.path.join(os.path.sep, "var", "lib", "snapd", "snaps")
self._mount(mountpoint=self._SNAPS_MOUNTPOINT, dev_or_path=path)
def _push_file(self, *, source: str, destination: str) -> None:
destination = "{}:{}".format(self.instance_name, destination)
self._multipass_cmd.copy_files(source=source, destination=destination)
def __init__(self, *, project, echoer) -> None:
super().__init__(project=project, echoer=echoer)
self._multipass_cmd = MultipassCommand()
self._instance_info = None # type: InstanceInfo
def create(self) -> None:
"""Create the multipass instance and setup the build environment."""
self.launch_instance()
self._instance_info = self._get_instance_info()
self.setup_snapcraft()
def destroy(self) -> None:
"""Destroy the instance, trying to stop it first."""
if self._instance_info is None:
return
if not self._instance_info.is_stopped():
self._multipass_cmd.stop(instance_name=self.instance_name)
self._multipass_cmd.delete(instance_name=self.instance_name)
def provision_project(self, tarball: str) -> None:
"""Provision the multipass instance with the project to work with."""
# TODO add instance check.
# Step 0, sanitize the input
tarball = shlex.quote(tarball)
# First create a working directory
self._multipass_cmd.execute(
command=["mkdir", self.project_dir], instance_name=self.instance_name
)
# Then copy the tarball over
destination = "{}:{}".format(self.instance_name, tarball)
self._multipass_cmd.copy_files(source=tarball, destination=destination)
# Finally extract it into project_dir.
extract_cmd = ["tar", "-xvf", tarball, "-C", self.project_dir]
self._multipass_cmd.execute(
command=extract_cmd, instance_name=self.instance_name
)
def build_project(self) -> None:
# TODO add instance check.
# Use the full path as /snap/bin is not in PATH.
snapcraft_cmd = "cd {}; /snap/bin/snapcraft snap --output {}".format(
self.project_dir, self.snap_filename
)
self._multipass_cmd.execute(
command=["sh", "-c", snapcraft_cmd], instance_name=self.instance_name
)
def retrieve_snap(self) -> str:
# TODO add instance check.
source = "{}:{}/{}".format(
self.instance_name, self.project_dir, self.snap_filename
)
self._multipass_cmd.copy_files(source=source, destination=self.snap_filename)
return self.snap_filename
def _get_instance_info(self):
instance_info_raw = self._multipass_cmd.info(
instance_name=self.instance_name, output_format="json"
)
return InstanceInfo.from_json(
instance_name=self.instance_name, json_info=instance_info_raw.decode()
)
| gpl-3.0 | -473,096,161,636,848,000 | 38.699115 | 86 | 0.649354 | false | 3.844045 | false | false | false |
box-samples/user-management | box_manage_users/scripts/script.py | 1 | 2088 | # coding: utf-8
from __future__ import unicode_literals
import os
from py.io import TerminalWriter
from box_manage_users.tc_client import TCClient
from box_manage_users.util import setup_logging
class Script(object):
"""
Script base class.
Configures logging and outputs duck ascii art.
"""
_title = 'Base Script'
_message = 'Instructions'
_verbose_log_filename = os.path.join('logs', 'verbose.log')
_failure_log_filename = os.path.join('logs', 'failure.log')
_overview_log_filename = os.path.join('logs', 'overview.log')
def __init__(self):
self._logger = setup_logging(name='console')
self._tw = TerminalWriter()
self._tw.sep('#', self._title, green=True, bold=True)
self._client = TCClient()
self._logger.info(
'Great! Let\'s get this going!\n'
' _ _ _\n'
' >(.)__ <(.)__ =(.)__\n'
' (___/ (___/ (___/ \n'
)
self._verbose_logger = setup_logging(self._verbose_log_filename, debug=True)
self._fail_logger = setup_logging(self._failure_log_filename, name='failures')
self._overview_logger = setup_logging(self._overview_log_filename, name='overview')
def run(self):
"""
Runs the script. Intended to be overridden by base classes.
"""
self._tw.sep('#', 'Process Complete!', green=True, bold=True)
def get_user_id_from_email_address(self, email):
"""
Given an email address, find the user in the enterprise that has that email address and return that user's id.
:param email: User's email address for which to retrieve the user ID.
:return: The user ID of the user with the given email address.
"""
user = self._client.get_user_by_email(email)
if user is None:
self._fail_logger.warning('No user with login %s. Could not deprovision.', email)
self._overview_logger.warning('No user with login %s. Could not deprovision.', email)
return None
return user.id
| apache-2.0 | 3,649,809,080,674,029,000 | 38.396226 | 118 | 0.598659 | false | 3.715302 | false | false | false |
tbenthompson/quadracheer | quadracheer/piessens.py | 1 | 2265 | import numpy as np
from map import map_pts_wts
from gaussian_quad import gaussxw
def piessens(N, x0, nonsingular_N = -1):
"""
Quadrature points and weights for integrating a function with form
f(x) / (x - x0)
on the interval [-1, 1]
Uses the 2N point gauss rule derived in Piessens (1970) Almost certainly
suboptimal, but it's very simple and it works. Exact for polynomials of
order 4N.
"""
if nonsingular_N == -1:
nonsingular_N = N
nonsingular_N = nonsingular_N
N = N
x0 = x0
# Split the interval into two sections. One is properly integrable.
# The other is symmetric about the singularity point and must be
# computed using as a cauchy principal value integral.
if x0 < 0.0:
pv_start = -1.0
pv_length = 2 * (x0 + 1)
proper_length = 2.0 - pv_length
proper_start = pv_start + pv_length
else:
pv_length = 2 * (-x0 + 1)
pv_start = 1.0 - pv_length
proper_start = -1.0
proper_length = 2.0 - pv_length
# the interval without the singularity
gx, gw = gaussxw(nonsingular_N)
x, w = map_pts_wts(gx, gw, proper_start, proper_start + proper_length)
# Get the points for the singular part using Piessen's method
x_sing, w_sing = piessen_method(N, pv_start, pv_length, x0)
# Finished!
x = np.append(x, x_sing)
w = np.append(w, w_sing)
return x,w
def piessen_method(N, pv_start, pv_length, x0, add_singularity = True):
x_base, w_base = piessen_neg_one_to_one_nodes(N)
# Convert to the interval from [pv_start, pv_start + pv_length]
x = (pv_length / 2) * x_base + \
(2 * pv_start + pv_length) / 2.0
# No need to scale the weights because the linear factor in the 1/r
# exactly cancels the jacobian.
w = w_base
# If we don't factor out the 1 / (x - x0) of the quadratured function,
# so we must account for it here.
if add_singularity:
w *= x - x0
return x, w
def piessen_neg_one_to_one_nodes(N):
"""Piessen nodes and weights for [-1, 1]"""
if N % 2 == 1:
raise Exception("Piessens method requires an even quadrature " +
"order")
gx, gw = gaussxw(2 * N)
x = gx
w = gw / gx
return x, w
| mit | 3,262,751,747,561,430,000 | 31.357143 | 76 | 0.607947 | false | 3.10274 | false | false | false |
msullivan/advent-of-code | 2020/12a.py | 1 | 1105 | #!/usr/bin/env python3
import sys
import re
def extract(s):
return [int(x) for x in re.findall(r'-?\d+', s)]
UP = (0, -1)
DOWN = (0, 1)
LEFT = (-1, 0)
RIGHT = (1, 0)
DIRS = { 'E': RIGHT, 'W': LEFT, 'N': UP, 'S': DOWN }
ROT = 'NESW'
def add(v1, v2):
return tuple(x + y for x, y in zip(v1, v2))
def mul(k, v2):
return tuple(k * y for y in v2)
def main(args):
# data = [x.split('\n') for x in sys.stdin.read().split('\n\n')]
data = [s.strip() for s in sys.stdin]
pos = (0, 0)
dir = 'E'
for line in data:
cmd = line[0]
arg = int(line[1:])
if cmd in DIRS:
pos = add(pos, mul(arg, DIRS[cmd]))
elif cmd in 'LR':
arg //= 90
i = ROT.index(dir)
m = 1 if cmd == 'R' else -1
i += m*arg
dir = ROT[i % 4]
elif cmd == 'F':
pos = add(pos, mul(arg, DIRS[dir]))
print(pos)
# My original solution had this and worked, lol
# print(abs(pos[0] + pos[1]))
print(abs(pos[0]) + abs(pos[1]))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit | 3,101,922,182,427,441,000 | 21.1 | 68 | 0.478733 | false | 2.662651 | false | false | false |
ibhubs/sketch-components | sketch_components/engines/react/native/components/image.py | 1 | 5573 | """
.. code-block:: javascript
import { Image } from 'react-native'
A Layer Group with Bitmap as child or a Shape Layer with an Image as a fill
can be tagged as **Image**. The name of the layer will be the name of the
image source generated.
"""
from sketch_components.utils import combine_styles, small_camel_case
from .commons import StyleSheet
from .component import Component
from ..props import Prop, Props, PropTypes, EdgeInsetsPropType, \
StyleSheetPropType, ImageSourcePropType, \
ImageStylePropTypes
class Image(Component):
def __init__(self, source_name=None, source_path=None, props=None,
parent=None, layer=None):
super(self.__class__, self).__init__(parent=parent, layer=layer)
self.name = 'Image'
self.path = 'react-native'
self.is_default = False
self.props = Props({
'style': StyleSheetPropType(ImageStylePropTypes),
'source': ImageSourcePropType,
'defaultSource': PropTypes.oneOfType([PropTypes.shape({
'uri': PropTypes.string, 'width': PropTypes.number,
'height': PropTypes.number,
'scale': PropTypes.number}), PropTypes.number]),
'accessible': PropTypes.bool,
'accessibilityLabel': PropTypes.node,
'blurRadius': PropTypes.number,
'capInsets': EdgeInsetsPropType,
'resizeMethod': PropTypes.oneOf(['auto', 'resize', 'scale']),
'resizeMode': PropTypes.oneOf(
['cover', 'contain', 'stretch', 'repeat', 'center']),
'testID': PropTypes.string,
'onLayout': PropTypes.func,
'onLoadStart': PropTypes.func,
'onProgress': PropTypes.func,
'onError': PropTypes.func,
'onPartialLoad': PropTypes.func,
'onLoad': PropTypes.func,
'onLoadEnd': PropTypes.func
})
self.update_props(props)
self.is_self_closing = True
self.source_name = source_name
self.source_path = source_path
def update_dependencies(self):
for child in self.children:
self.dependencies.add(child.import_statement())
if not child.is_exportable_component():
child.update_dependencies()
self.dependencies.update(child.dependencies)
from mako.template import Template
if self.source_name and self.source_path:
image_source_dependency = Template(
"""import ${name} from '${path}'""").render(
name=self.source_name,
path=self.source_path)
self.dependencies.add(image_source_dependency)
@classmethod
def create_component(cls, sketch_layer, parent=None):
if sketch_layer.component:
props = sketch_layer.component.get_react_native_props()
else:
props = dict()
style = props.get('style', Prop(dict())).value
source_path = ''
source_name = ''
if sketch_layer.name is not None and sketch_layer.image is not None:
source_name = small_camel_case(sketch_layer.name)
sketch_layer.image.set_image_source(source_name)
source_path = sketch_layer.image.get_image_source()
elif sketch_layer.is_layer_group():
image_layer = None
for layer in sketch_layer.layers:
if layer.is_image_layer():
image_layer = layer
if (image_layer is not None and
image_layer.name is not None and
image_layer.image is not None):
source_name = small_camel_case(image_layer.name)
image_layer.image.set_image_source(source_name)
source_path = image_layer.image.get_image_source()
elif sketch_layer.is_shape_group():
if (sketch_layer.style and sketch_layer.style.fills and
sketch_layer.style.fills[0].image):
source_name = small_camel_case(sketch_layer.name)
sketch_layer.style.fills[0].image.set_image_source(source_name)
source_path = sketch_layer.style.fills[
0].image.get_image_source()
if props.get('source', None) is None and source_name != '':
props['source'] = Prop(source_name, is_literal=True)
elif props.get('source') is not None:
source_path = ''
source_name = ''
props.update(
{'style': StyleSheet(
combine_styles(sketch_layer.get_css_view_styles(), style),
name=sketch_layer.name)})
component = Image(source_name=source_name, source_path=source_path,
props=props, parent=parent,
layer=sketch_layer)
for layer in sketch_layer.layers:
if (layer.is_shape_group() and
layer.hasClippingMask or
layer.is_rectangle_shape() or
layer.is_oval_shape()):
child = layer.get_react_component(pass_styles=True,
parent=component)
if child:
component.add_child(child)
# TODO: Remove after bug in react native is fixed
if (component.props['style'].styles['borderRadius']) is not None and (
component.props['style'].styles['borderRadius'] > 0):
component.props['style'].styles['backgroundColor'] = None
return component
| mit | 5,057,059,149,163,134,000 | 42.539063 | 79 | 0.575991 | false | 4.053091 | false | false | false |
konrads/spudblog-app-engine | spudblog/views.py | 1 | 3956 | """Views, both for UI presentation and for api calls.
UI views have no prefix, api calls are prefixed with 'api'
"""
from functools import wraps
from django.shortcuts import render
from django.http import (HttpResponse, HttpResponseNotAllowed,
HttpResponseRedirect, HttpResponseForbidden,
HttpResponseNotFound)
from django.contrib.auth import logout as django_logout
from django.contrib.auth.views import login as django_login
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
import json
import db
from django.core.exceptions import ObjectDoesNotExist
def api_call(methods=None, needs_login=False):
"""Enforces valid http method has been used, if `needs_login`
- validates the user is authenticated, converts KeyError into Http404.
:param methods: valid http methods ('GET', 'POST', 'PUT', 'DELETE')
:param needs_login: if authenticated user session needs to be present
:returns: decorated view
"""
if not methods:
methods = ['GET']
def decorator(f):
@wraps(f)
def wrapper(request, *args, **kwds):
if needs_login and not request.user.is_authenticated():
return HttpResponseForbidden('Unauthorized/timed out user')
if request.method not in methods:
return HttpResponseNotAllowed(methods)
res = None
try:
res = f(request, *args, **kwds)
except (KeyError, ObjectDoesNotExist):
pass
if not res:
return HttpResponseNotFound('Resource not found')
return HttpResponse(
json.dumps(res, indent=4),
mimetype='application/json')
# do not need csfr for REST api...?
return csrf_exempt(wrapper)
return decorator
##### html views
def logout(request):
django_logout(request)
return HttpResponseRedirect('/')
def blog_explorer(request):
return render(
request,
'spudblog/blog_explorer.html',
{'blogs': db.get_blogs()})
@login_required
def my_blogs(request):
return render(
request,
'spudblog/my_blogs.html',
{'blogs': db.get_blogs(request.user.id),
'user_name': request.user.username})
##### API
@api_call()
def all(request):
"""Debug api call, lists all users, their blogs and posts."""
return db.all_as_json()
@api_call()
def full_blog(request, blog_id):
"""Gets full blog, with title, id, posts."""
blog_id = long(blog_id)
return db.get_full_blog(blog_id).as_json()
@api_call(methods=['POST', 'PUT', 'DELETE'], needs_login=True)
def blog(request, blog_id):
"""CRUD operations on blog, ie. create, update and delete
(no fetch, that's done within :func:`views.full-blog`)."""
if request.method == 'POST':
blog = json.loads(request.body)
return db.create_blog(request.user.id, blog).as_json()
elif request.method == 'PUT':
blog_id = long(blog_id)
blog = json.loads(request.body)
blog['id'] = blog_id # whether id's set or not...
return db.update_blog(blog).as_json()
elif request.method == 'DELETE':
blog_id = long(blog_id)
return db.del_blog(blog_id)
@api_call(methods=['POST', 'PUT', 'DELETE'], needs_login=True)
def post(request, post_id):
"""CRUD operations on post, ie. create, update and delete
(no fetch, that's done within :func:`views.full-blog`."""
if request.method == 'POST':
post = json.loads(request.body)
blog_id = post.pop('blog_id')
return db.create_post(blog_id, post).as_json()
elif request.method == 'PUT':
post_id = long(post_id)
post = json.loads(request.body)
return db.update_post(post).as_json()
elif request.method == 'DELETE':
post_id = long(post_id)
return db.del_post(post_id)
| bsd-3-clause | 5,498,022,839,492,439,000 | 31.966667 | 75 | 0.628665 | false | 3.874633 | false | false | false |
orgads/msys2-runtime | newlib/doc/chapter-texi2docbook.py | 9 | 1510 | #!/usr/bin/env python3
#
# python script to convert the handwritten chapter .texi files, which include
# the generated files for each function, to DocBook XML
#
# all we care about is the content of the refentries, so all this needs to do is
# convert the @include of the makedoc generated .def files to xi:include of the
# makedocbook generated .xml files.
#
from __future__ import print_function
import sys
import re
def main():
first_node = True
print ('<?xml version="1.0" encoding="UTF-8"?>')
print ('<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">')
for l in sys.stdin.readlines():
l = l.rstrip()
# transform @file{foo} to <filename>foo</filename>
l = re.sub("@file{(.*?)}", "<filename>\\1</filename>", l)
if l.startswith("@node"):
l = l.replace("@node", "", 1)
l = l.strip()
l = l.lower()
if first_node:
print ('<chapter id="%s" xmlns:xi="http://www.w3.org/2001/XInclude">' % l.replace(' ', '_'))
first_node = False
elif l.startswith("@chapter "):
l = l.replace("@chapter ", "", 1)
print ('<title>%s</title>' % l)
elif l.startswith("@include "):
l = l.replace("@include ", "", 1)
l = l.replace(".def", ".xml", 1)
print ('<xi:include href="%s"/>' % l.strip())
print ('</chapter>')
if __name__ == "__main__" :
main()
| gpl-2.0 | 1,336,451,028,471,773,200 | 32.555556 | 129 | 0.554967 | false | 3.519814 | false | false | false |
darcyfdu/findlicense | src/formattedcode/format.py | 1 | 12904 | #
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
# -*- coding: UTF-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
import copy
import sqlite3
from collections import OrderedDict
from operator import itemgetter
from os.path import abspath
from os.path import basename
from os.path import dirname
from os.path import exists
from os.path import expanduser
from os.path import isfile
from os.path import join
import os
import simplejson as json
import time
from commoncode import fileutils
"""
Format scans outputs.
"""
def get_template(templates_dir, template_name='template.html'): # @ReservedAssignment
"""
Given a template directory, load and return the template file in the template_name
file found in that directory.
"""
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(templates_dir))
template = env.get_template(template_name)
return template
def get_template_dir(format): # @ReservedAssignment
"""
Given a format string return the corresponding standard template directory.
"""
return join(dirname(__file__), 'templates', format)
def as_html_app(scanned_path, output_file):
"""
Return an HTML string built from a list of results and the html-app template.
"""
template = get_template(get_template_dir('html-app'))
_, assets_dir = get_html_app_files_dirs(output_file)
return template.render(assets_dir=assets_dir, scanned_path=scanned_path)
def get_html_app_help(output_filename):
"""
Return an HTML string containing html-app help page with a reference back
to the main app
"""
template = get_template(get_template_dir('html-app'), template_name='help_template.html')
return template.render(main_app=output_filename)
class HtmlAppAssetCopyWarning(Exception):
pass
class HtmlAppAssetCopyError(Exception):
pass
def is_stdout(output_file):
return output_file.name == '<stdout>'
def get_html_app_files_dirs(output_file):
"""
Return a tuple of (parent_dir, dir_name) directory named after the
`output_file` file object file_base_name (stripped from extension) and a
`_files` suffix Return empty strings if output is to stdout.
"""
if is_stdout(output_file):
return '', ''
file_name = output_file.name
parent_dir = dirname(file_name)
dir_name = fileutils.file_base_name(file_name) + '_files'
return parent_dir, dir_name
def create_html_app_assets(results, output_file):
"""
Given an html-app output_file, create the corresponding `_files` directory
and copy the assets to this directory. The target directory is deleted if it
exists.
Raise HtmlAppAssetCopyWarning if the output_file is <stdout> or
HtmlAppAssetCopyError if the copy was not possible.
"""
try:
if is_stdout(output_file):
raise HtmlAppAssetCopyWarning()
assets_dir = join(get_template_dir('html-app'), 'assets')
# delete old assets
tgt_dirs = get_html_app_files_dirs(output_file)
target_dir = join(*tgt_dirs)
if exists(target_dir):
fileutils.delete(target_dir)
# copy assets
fileutils.copytree(assets_dir, target_dir)
# write json data
root_path, assets_dir = get_html_app_files_dirs(output_file)
with codecs.open(join(root_path, assets_dir, 'data.json'), 'wb', encoding='utf-8') as f:
f.write('data=')
json.dump(results, f, iterable_as_array=True)
# create help file
with codecs.open(join(root_path, assets_dir, 'help.html'), 'wb', encoding='utf-8') as f:
f.write(get_html_app_help(basename(output_file.name)))
except HtmlAppAssetCopyWarning, w:
raise w
except Exception, e:
raise HtmlAppAssetCopyError(e)
def isNetdis(liceses,conn):
netdisliceses = []
netdisLi = []
cursor = conn.execute("SELECT NAME from NETDISTRIBUTIONLICENSE")
for row in cursor:
netdisLi.append(row[0])
for licese in liceses:
if(licese in netdisLi):
netdisliceses.append(licese)
return netdisliceses
sameLi = []
def isSameLi(liceses,conn):
sameLi = []
cursor = conn.execute("SELECT NAME from sameLicense")
for row in cursor:
sameLi.append(row[0])
sameliceses = []
for licese in liceses:
if(licese in sameLi):
sameliceses.append(licese)
return sameliceses
openLi = []
def isOpensource(liceses,conn):
openLi = []
cursor = conn.execute("SELECT NAME from OPENSOURCELICENSE")
for row in cursor:
openLi.append(row[0])
openliceses = []
for licese in liceses:
if(licese in openLi):
openliceses.append(licese)
return openliceses
notPatLi = []
def isNotPatent(liceses,conn):
notPatLi = []
cursor = conn.execute("SELECT NAME from NOTPATENTLICENSE")
for row in cursor:
notPatLi.append(row[0])
notPatliceses = []
for licese in liceses:
if(licese in notPatLi):
notPatliceses.append(licese)
return notPatliceses
def isModified(liceses,conn):
ModLi = []
cursor = conn.execute("SELECT NAME from ModIFYLICENSE")
for row in cursor:
ModLi.append(row[0])
modliceses = []
for licese in liceses:
if(licese in ModLi):
modliceses.append(licese)
return modliceses
def isTrademark(liceses,conn):
TMLi = []
cursor = conn.execute("SELECT NAME from TRADEMARKLICENSE")
for row in cursor:
TMLi.append(row[0])
TMliceses = []
for licese in liceses:
if(licese in TMLi):
TMliceses.append(licese)
return TMliceses
def mayConflict(liceses,conn):
maycficeses = []
for licese in liceses:
if(licese in sameLi):
temp = []
for li in sameLi:
if (li != licese):
temp.append(li)
maycficeses.append({'licenses':licese,'maycf':temp})
return maycficeses
def isConflict(liceses,conn):
confliceses = []
for i in range(len(liceses)):
for j in range(i+1,len(liceses)):
isflag = False
if((liceses[i] in sameLi) and (liceses[j] in sameLi) and not(('gpl' in liceses[j]) and ('gpl' in liceses[i]))):
isflag = True
if(isflag):
confliceses.append([liceses[i],liceses[j]])
return confliceses
def printre(liceselist,loacllist):
templist = []
for item in liceselist:
templocal1 = []
for local in loacllist:
if (local.has_key(item)):
templocal1.append(local[item])
templocal1.sort()
templist.append({'licenses':item,'loacal':templocal1})
return templist
def countlicense(liceselist,loacllist):
templist = []
for item in liceselist:
tempcount = 0
for local in loacllist:
if (local.has_key(item)):
tempcount +=1
templist.append({'licenses':item,'count':tempcount})
return templist
def printconf(liceselist,loacllist):
templist = []
for item in liceselist:
templocal1 = []
templocal2 = []
for local in loacllist:
if (local.has_key(item[0])):
templocal1.append(local[item[0]])
if (local.has_key(item[1])):
templocal2.append(local[item[1]])
templist.append({'licenses1':item[0],'loacal1':templocal1,'licenses2':item[1],'loacal2':templocal2})
return templist
def as_template(scanned_files, files_count,output_file, template='html',):
"""
Return an string built from a list of results and the provided template.
The template defaults to the standard HTML template format or can point to
the path of a custom template file.
"""
from licensedcode.models import get_licenses
conn = sqlite3.connect('data.db')
if template == 'html':
template = get_template(get_template_dir('html'))
else:
# load a custom template
tpath = fileutils.as_posixpath(abspath(expanduser(template)))
assert isfile(tpath)
tdir = fileutils.parent_directory(tpath)
tfile = fileutils.file_name(tpath)
template = get_template(tdir, tfile)
converted = OrderedDict()
converted_infos = OrderedDict()
converted_packages = OrderedDict()
licenses = {}
LICENSES = 'licenses'
COPYRIGHTS = 'copyrights'
PACKAGES = 'packages'
URLS = 'urls'
EMAILS = 'emails'
liceses1 = []
licessloacl = []
# Create a flattened data dict keyed by path
for scanned_file in scanned_files:
path = scanned_file['path']
results = []
if COPYRIGHTS in scanned_file:
for entry in scanned_file[COPYRIGHTS]:
results.append({
'start': entry['start_line'],
'end': entry['end_line'],
'what': 'copyright',
# NOTE: we display one statement per line.
'value': '\n'.join(entry['statements']),
})
if LICENSES in scanned_file:
for entry in scanned_file[LICENSES]:
results.append({
'start': entry['start_line'],
'end': entry['end_line'],
'what': 'license',
'value': entry['key'],
})
if entry['key'] not in licenses:
licenses[entry['key']] = entry
entry['object'] = get_licenses().get(entry['key'])
if results:
converted[path] = sorted(results, key=itemgetter('start'))
for k in converted[path]:
if(k['what']=='license'):
licessloacl.append({k['value']:path})
if(not (k['value'] in liceses1)):
liceses1.append(k['value'])
# this is klunky: we need to drop templates entirely
converted_infos[path] = OrderedDict()
for name, value in scanned_file.items():
if name in (LICENSES, PACKAGES, COPYRIGHTS, EMAILS, URLS):
continue
converted_infos[path][name] = value
if PACKAGES in scanned_file:
converted_packages[path] = scanned_file[PACKAGES]
licenses = OrderedDict(sorted(licenses.items()))
ISOTIMEFORMAT='-%Y-%m-'
scantime = time.strftime(ISOTIMEFORMAT,time.localtime())
filename = os.path.basename(output_file.name).rsplit(scantime,1)[0]
files = {
'filename':filename,
'filecount':files_count,
'scantime':os.path.basename(output_file.name).rsplit(filename,1)[1][1:-5],
'license_length':len(licenses),
'license_count':countlicense(licenses.keys(),licessloacl),
'isSameLi':printre(isSameLi(liceses1,conn),licessloacl),
'isNetdis':printre(isNetdis(liceses1,conn),licessloacl),
'isOpensource':printre(isOpensource(liceses1,conn),licessloacl),
'isNotPatent':printre(isNotPatent(liceses1,conn),licessloacl),
'isModified':printre(isModified(liceses1,conn),licessloacl),
'isTrademark':printre(isTrademark(liceses1,conn),licessloacl),
'isConflict':printconf(isConflict(liceses1,conn),licessloacl),
'mayConflict':mayConflict(liceses1,conn),
'license_copyright': converted,
'infos': converted_infos,
'packages': converted_packages
}
return template.generate(files=files, licenses=licenses)
| apache-2.0 | -3,150,960,963,537,207,000 | 34.844444 | 124 | 0.640809 | false | 3.744631 | false | false | false |
StryKaizer/Brew | djangoproject/brew/migrations/0015_auto__del_field_mashlog_status__add_field_mashlog_active_mashing_step_.py | 1 | 3182 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'MashLog.status'
db.delete_column('brew_mashlog', 'status')
# Adding field 'MashLog.active_mashing_step_state'
db.add_column('brew_mashlog', 'active_mashing_step_state',
self.gf('django.db.models.fields.CharField')(default=None, max_length=1),
keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'MashLog.status'
raise RuntimeError("Cannot reverse this migration. 'MashLog.status' and its values cannot be restored.")
# Deleting field 'MashLog.active_mashing_step_state'
db.delete_column('brew_mashlog', 'active_mashing_step_state')
models = {
'brew.batch': {
'Meta': {'object_name': 'Batch'},
'brewing_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mashing_scheme': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brew.MashingScheme']"}),
'number': ('django.db.models.fields.IntegerField', [], {'max_length': '3'})
},
'brew.mashingscheme': {
'Meta': {'object_name': 'MashingScheme'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'brew.mashingstep': {
'Meta': {'object_name': 'MashingStep'},
'degrees': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mashing_scheme': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brew.MashingScheme']"}),
'minutes': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
'brew.mashlog': {
'Meta': {'object_name': 'MashLog'},
'active_mashing_step': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brew.MashingStep']"}),
'active_mashing_step_state': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brew.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'degrees': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'brew.variable': {
'Meta': {'object_name': 'Variable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '127'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['brew'] | isc | -7,723,776,558,796,714,000 | 47.969231 | 121 | 0.561596 | false | 3.620023 | false | false | false |
kmike/django-generic-images | generic_images/models.py | 1 | 8243 | #coding: utf-8
import os
#import random
from django.db import models
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.db.models import Max
from django.utils.translation import ugettext_lazy as _
from generic_images.signals import image_saved, image_deleted
from generic_images.managers import AttachedImageManager
from generic_utils.models import GenericModelBase
class BaseImageModel(models.Model):
''' Simple abstract Model class with image field.
.. attribute:: image
``models.ImageField``
'''
def get_upload_path(self, filename):
''' Override this to customize upload path '''
raise NotImplementedError
def _upload_path_wrapper(self, filename):
return self.get_upload_path(filename)
image = models.ImageField(_('Image'), upload_to=_upload_path_wrapper)
class Meta:
abstract = True
class ReplaceOldImageModel(BaseImageModel):
'''
Abstract Model class with image field.
If the file for image is re-uploaded, old file is deleted.
'''
def _replace_old_image(self):
''' Override this in subclass if you don't want
image replacing or want to customize image replacing
'''
try:
old_obj = self.__class__.objects.get(pk=self.pk)
if old_obj.image.path != self.image.path:
path = old_obj.image.path
default_storage.delete(path)
except self.__class__.DoesNotExist:
pass
def save(self, *args, **kwargs):
if self.pk:
self._replace_old_image()
super(ReplaceOldImageModel, self).save(*args, **kwargs)
class Meta:
abstract = True
class AbstractAttachedImage(ReplaceOldImageModel, GenericModelBase):
'''
Abstract Image model that can be attached to any other Django model
using generic relations.
.. attribute:: is_main
BooleanField. Whether the image is the main image for object.
This field is set to False automatically for all images attached to
same object if image with is_main=True is saved to ensure that there
is only 1 main image for object.
.. attribute:: order
IntegerField to support ordered image sets.
On creation it is set to max(id)+1.
'''
user = models.ForeignKey(User, blank=True, null=True,
verbose_name=_('User'))
'''A ForeignKey to associated user, for example user who uploaded image.
Can be empty.'''
caption = models.TextField(_('Caption'), null=True, blank=True)
'TextField caption for image'
is_main = models.BooleanField(_('Main image'), default=False)
order = models.IntegerField(_('Order'), default=0)
objects = AttachedImageManager()
'''Default manager of :class:`~generic_images.managers.AttachedImageManager`
type.'''
def next(self):
''' Returns next image for same content_object and None if image is
the last. '''
try:
return self.__class__.objects.for_model(self.content_object,
self.content_type).\
filter(order__lt=self.order).order_by('-order')[0]
except IndexError:
return None
def previous(self):
''' Returns previous image for same content_object and None if image
is the first. '''
try:
return self.__class__.objects.for_model(self.content_object,
self.content_type).\
filter(order__gt=self.order).order_by('order')[0]
except IndexError:
return None
def get_order_in_album(self, reversed_ordering=True):
''' Returns image order number. It is calculated as (number+1) of images
attached to the same content_object whose order is greater
(if 'reverse_ordering' is True) or lesser (if 'reverse_ordering' is
False) than image's order.
'''
lookup = 'order__gt' if reversed_ordering else 'order__lt'
return self.__class__.objects.\
for_model(self.content_object, self.content_type).\
filter(**{lookup: self.order}).count() + 1
def _get_next_pk(self):
max_pk = self.__class__.objects.aggregate(m=Max('pk'))['m'] or 0
return max_pk+1
# def put_as_last(self):
# """ Sets order to max(order)+1 for self.content_object
# """
# last = self.__class__.objects.exclude(id=self.id).\
# filter(
# object_id = self.object_id,
# content_type = self.content_type,
# ).aggregate(max_order=Max('order'))['max_order'] or 0
# self.order = last+1
def get_file_name(self, filename):
''' Returns file name (without path and extenstion)
for uploaded image. Default is 'max(pk)+1'.
Override this in subclass or assign another functions per-instance
if you want different file names (ex: random string).
'''
# alphabet = "1234567890abcdefghijklmnopqrstuvwxyz"
# # 1e25 variants
# return ''.join([random.choice(alphabet) for i in xrange(16)])
# anyway _get_next_pk is needed for setting `order` field
return str(self._get_next_pk())
def get_upload_path(self, filename):
''' Override this in proxy subclass to customize upload path.
Default upload path is
:file:`/media/images/<user.id>/<filename>.<ext>`
or :file:`/media/images/common/<filename>.<ext>` if user is not set.
``<filename>`` is returned by
:meth:`~generic_images.models.AbstractAttachedImage.get_file_name`
method. By default it is probable id of new image (it is
predicted as it is unknown at this stage).
'''
user_folder = str(self.user.pk) if self.user else 'common'
root, ext = os.path.splitext(filename)
return os.path.join('media', 'images', user_folder,
self.get_file_name(filename) + ext)
def save(self, *args, **kwargs):
send_signal = getattr(self, 'send_signal', True)
if self.is_main:
related_images = self.__class__.objects.filter(
content_type=self.content_type,
object_id=self.object_id
)
related_images.update(is_main=False)
if not self.pk: # object is created
if not self.order: # order is not set
self.order = self._get_next_pk() # let it be max(pk)+1
super(AbstractAttachedImage, self).save(*args, **kwargs)
if send_signal:
image_saved.send(sender = self.content_type.model_class(),
instance = self)
def delete(self, *args, **kwargs):
send_signal = getattr(self, 'send_signal', True)
super(AbstractAttachedImage, self).delete(*args, **kwargs)
if send_signal:
image_deleted.send(sender = self.content_type.model_class(),
instance = self)
def __unicode__(self):
try:
if self.user:
return u"AttachedImage #%d for [%s] by [%s]" % (
self.pk, self.content_object, self.user)
else:
return u"AttachedImage #%d for [%s]" % (
self.pk, self.content_object,)
except:
try:
return u"AttachedImage #%d" % (self.pk)
except TypeError:
return u"new AttachedImage"
class Meta:
abstract=True
class AttachedImage(AbstractAttachedImage):
'''
Image model that can be attached to any other Django model using
generic relations. It is simply non-abstract subclass of
:class:`~generic_images.models.AbstractAttachedImage`
'''
class Meta:
ordering = ['-order']
| mit | 1,550,361,170,257,904,400 | 33.927966 | 80 | 0.576247 | false | 4.389244 | false | false | false |
DataBassDroppers/291ProjectOne | Patient_Info_Update.py | 1 | 18073 | import cx_Oracle
import getpass #gets password without echoing
import random
import datetime
class Patient_Info_Update():
def __init__(self):
pass
def main(self, credentials):
self.con = cx_Oracle.connect(credentials[0] + '/' + \
credentials[1] + '@gwynne.cs.ualberta.ca:1521/CRS')
state = self.getInputs()
if state == 0:
return 1
self.executeStatement(state)
self.con.close()
return 1
def printOptions(self):
print()
print("[1] Enter new Patient")
print("[2] Edit Existing Patient")
print("[3] Return to main menu.")
def getInputs(self):
while 1:
self.name_update = False
self.address_update = False
self.birth_update = False
self.phone_update = False
self.printOptions()
ans = input("Enter a choice: ")
if ans == "1":
self.HCN = self.getUniqueHCN()
self.printSeparator()
self.name = self.getName()
go=True
self.printSeparator()
while go:
self.address,go = self.getAddress()
go=True
self.printSeparator()
while go:
self.birth,go = self.getBirthDate()
self.printSeparator()
self.phone = self.getPhone()
self.printSeparator()
print("Patient Name: " + self.name)
print("Patient Address: " + self.address)
print("Patient Birth Date: " + self.birth)
print("Patient Phone Number: " + self.phone)
print()
while 1:
conf = input("Confirm information (y/n): ")
if conf == "y":
print("Information confirmed.")
return 1
elif conf == "n":
print("Information not confirmed, returning to start.")
break
else:
print("Invalid choice, pick 'y' or 'n'")
elif ans == "2":
go=True
self.printSeparator()
while go:
self.patient,go = self.getPatient()
not_done = True
while not_done:
curs = self.con.cursor()
curs.execute("select * from patient where health_care_no=" + str(self.patient))
rows = curs.fetchall()
print()
for row in rows:
list1=[]
counter=0
for x in row:
if counter == 3:
if x is not None:
x=(x.strftime("%Y-%m-%d %H:%M:%S"))
x=x[:-9]
counter+=1
list1.append(x)
print("Current Information: " + str(tuple(list1)))
print("[1] Update patient name.")
print("[2] Update patient address.")
print("[3] Update patient birth date.")
print("[4] Update patient phone number.")
print("[5] Return to menu.")
check = input("Enter an option: ")
if check == "1":
self.printSeparator()
self.name = self.getName()
self.name_update = True
ask = input("Update another value? (y/n): ")
while 1:
if ask == "y":
break
elif ask == "n":
not_done = False
break
else:
print("Invalid input. ")
print()
elif check == "2":
go=True
self.printSeparator()
while go:
self.address,go = self.getAddress()
self.address_update = True
ask = input("Update another value? (y/n): ")
while 1:
if ask == "y":
break
elif ask == "n":
not_done = False
break
else:
print("Invalid input. ")
print()
elif check == "3":
go=True
self.printSeparator()
while go:
self.birth,go = self.getBirthDate()
self.birth_update = True
ask = input("Update another value? (y/n): ")
while 1:
if ask == "y":
break
elif ask == "n":
not_done = False
break
else:
print("Invalid input. ")
print()
elif check == "4":
self.printSeparator()
self.phone = self.getPhone()
self.phone_update = True
ask = input("Update another value? (y/n): ")
while 1:
if ask == "y":
break
elif ask == "n":
not_done = False
break
else:
print("Invalid input. ")
print()
elif check == "5":
break
else:
print("Invalid input.")
print()
self.printSeparator()
if self.name_update:
print("Patient Name: " + self.name)
if self.address_update:
print("Patient Address: " + self.address)
if self.birth_update:
print("Patient Birth Date: " + self.birth)
if self.phone_update:
print("Patient Phone Number: " + self.phone)
print()
while 1:
conf = input("Confirm updates (y/n): ")
if conf == "y":
print("Information confirmed.")
return 2
elif conf == "n":
print("Information not confirmed, returning to start.")
break
else:
print("Invalid choice, pick 'y' or 'n'")
elif ans == "3":
return 0
else:
print("Invalid choice.")
def input_check(input):
try:
check = eval(input)
if check not in [1,2,3,4,5]:
return 0
else:
return check
except:
return 0
def getPhone(self):
ans = True
while ans:
print()
phone = input("Input Patient Phone Number (10-digits): ")
if phone.isdigit() and len(phone) == 10:
reply = input("Confirm patient number :: " + phone + " :: (y/n): ")
if reply == "y":
ans = False
elif reply == "n":
print("Phone incorrect, returning to start.")
else:
print("Invalid input, returning to start.")
else:
print("Invalid input. Enter phone as a single number without spaces or dashes.")
print()
return phone
def getName(self):
ans = True
while ans:
print()
name = input("Input Patient Name: ")
reply = input("Confirm patient name :: " + name + " :: (y/n): ")
if reply == "y":
ans = False
elif reply == "n":
print("Name incorrect, enter again.")
else:
print("Invalid input, enter again.")
return name
def getAddress(self):
not_allowed = [chr(34), chr(39)]
ans = True
while ans:
print()
address = input("Enter Address: ")
reply = input("Confirm patient address :: " + address + " :: (y/n): ")
if reply == "y":
for each in address:
if each in not_allowed:
print("Apostrophe and Quotation characters are disallowed.")
return False, True
if len(address) > 200:
print("Address entry exceeds character limit of 200.")
return False, True
else:
return address, False
elif reply == "n":
print("Address incorrect, enter again.")
else:
print("Invalid input, enter again.")
def getBirthDate(self):
ans = True
while ans:
print()
string = input('Enter Birth Date "yyyy/mm/dd": ')
if len(string) != 10:
print("Invalid input.")
return False, True
else:
year = string[0:4]
month = string[5:7]
day = string[8:]
correctDate = None
if self.isNumber(year) and self.isNumber(month) and self.isNumber(day) and string[4] == "/" and string[7] == "/":
try:
newDate = datetime.datetime(int(year),int(month),int(day))
correctDate = True
except ValueError:
correctDate = False
if correctDate:
reply = input("Confirm patient birth date :: " + string + " :: (y/n): ")
if reply == "y":
return string,False
elif reply == "n":
print("Birth date incorrect, enter again.")
else:
print("Invalid input, enter again.")
else:
print("Invalid date.")
return False, True
def goodNumber(self,string,case):
if case == "D":
curs = self.con.cursor()
curs.execute("select * from doctor where employee_no like'"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
elif case == "T":
curs = self.con.cursor()
curs.execute("select * from test_record where test_id like '"+string+"'")
rows = curs.fetchall()
if len(rows) ==0:
return False
else:
return True
else:
curs = self.con.cursor()
curs.execute("select * from patient where health_care_no like'"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
def isReal(self,string,case):
if case == "D":
curs = self.con.cursor()
curs.execute("select * from doctor d, patient p where d.health_care_no=p.health_care_no and p.name like'"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
elif case == "T":
curs = self.con.cursor()
curs.execute("select * from test_type where test_name like'"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
elif case == "L":
curs = self.con.cursor()
curs.execute("select * from medical_lab where lab_name like '"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
elif case == "R":
curs = self.con.cursor()
curs.execute("select * from test_record where test_id like '"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
else:
curs = self.con.cursor()
curs.execute("select * from patient where name like'"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
def isNumber(self, string):
return string.isdigit()
# returns the patient_no on success
def getPatient(self):
curs = self.con.cursor()
curs.execute("select name,health_care_no from patient p")
rows = curs.fetchall()
for row in rows:
print(row)
string = input('Enter Patient name or number: ')
if self.isNumber(string):
if self.goodNumber(string,"P"):
return int(string),False
else:
print("Invalid health care number.")
print()
return False,True
else:
if self.isReal(string,"P"):
return self.getPatientNumber(string),False
else:
print(string,"is not a real patient, try again")
return False,True
def getPatientNumber(self,string):
curs = self.con.cursor()
curs.execute("select * from patient p where p.name like '"+string+"'")
rows = curs.fetchall()
tmp = []
if len(rows) > 1:
while 1:
print()
print("Health Care Number | Name | Address | Date of Birth | Phone number")
for row in rows:
print(row)
tmp.append(str(row[0]))
pick = input("Enter ID of correct patient: ")
if pick in tmp:
return pick
else:
print("Incorrect value, enter valid ID of correct patient.")
else:
return rows[0][0]
def printSeparator(self):
print("")
print("-----------------------")
print("")
def getUniqueHCN(self):
curs = self.con.cursor()
curs.execute("select health_care_no from patient")
rows = curs.fetchall()
while (True):
health_care_no = random.randint(0, 10**3)
if all(health_care_no != row[0] for row in rows):
return health_care_no
def executeStatement(self, state):
print("******EXECUTING STATEMENT******")
curs = self.con.cursor()
if state == 1:
try:
curs.execute("insert into patient values (" + str(self.HCN) + ", '" + str(self.name) + "', '" + str(self.address) + "', TO_DATE('" + str(self.birth) + "', 'YYYY-MM-DD'), '" + str(self.phone) + "')")
except:
self.printSeparator()
print("SQL Database Violation. Remember, Name and Address are a unique key.")
elif state == 2:
if self.name_update and self.address_update:
curs.execute("select name, address from patient")
rows = curs.fetchall()
for row in rows:
if row[0] == self.name and row[1] == self.address:
self.printSeparator()
print("SQL Database Violation. Name and Address are a unique key.")
self.printSeparator()
return 0
if self.name_update:
try:
curs.execute("update patient set name='" + str(self.name) + "' where health_care_no=" + str(self.patient))
except:
self.printSeparator()
print("SQL Database Violation. Remember, Name and Address are a unique key.")
self.printSeparator()
if self.address_update:
try:
curs.execute("update patient set address='" + str(self.address) + "' where health_care_no=" + str(self.patient))
except:
self.printSeparator()
print("SQL Database Violation. Remember, Name and Address are a unique key.")
self.printSeparator()
if self.birth_update:
curs.execute("update patient set birth_day=TO_DATE('" + str(self.birth) + "', 'YYYY-MM-DD') where health_care_no=" + str(self.patient))
if self.phone_update:
curs.execute("update patient set phone='" + str(self.phone) + "' where health_care_no=" + str(self.patient))
self.printSeparator()
self.con.commit()
| apache-2.0 | -2,980,646,853,998,649,300 | 35.73374 | 214 | 0.409893 | false | 5.175544 | true | false | false |
our-city-app/oca-backend | src/solutions/flex/handlers.py | 1 | 18390 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import json
import os
import jinja2
import webapp2
from babel import dates, Locale
from jinja2 import StrictUndefined, Undefined
from mcfw.rpc import serialize_complex_value
from rogerthat.bizz import channel
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.communities.models import AppFeatures
from rogerthat.bizz.registration import get_headers_for_consent
from rogerthat.bizz.session import set_service_identity
from rogerthat.consts import DEBUG
from rogerthat.dal.profile import get_service_profile
from rogerthat.models import ServiceIdentity
from rogerthat.pages.legal import get_version_content, DOC_TERMS_SERVICE, get_current_document_version
from rogerthat.pages.login import SessionHandler
from rogerthat.rpc import users
from rogerthat.translations import DEFAULT_LANGUAGE
from rogerthat.utils.channel import send_message_to_session
from shop.bizz import get_organization_types, update_customer_consents
from shop.dal import get_customer
from solutions import translate, translations, COMMON_JS_KEYS
from solutions.common.bizz import OrganizationType, SolutionModule
from solutions.common.bizz.functionalities import get_functionalities
from solutions.common.bizz.settings import get_service_info
from solutions.common.consts import UNITS, UNIT_SYMBOLS, UNIT_PIECE, UNIT_LITER, UNIT_KG, UNIT_GRAM, UNIT_HOUR, \
UNIT_MINUTE, ORDER_TYPE_SIMPLE, ORDER_TYPE_ADVANCED, UNIT_PLATTER, UNIT_SESSION, UNIT_PERSON, UNIT_DAY, CURRENCIES
from solutions.common.dal import get_solution_settings, get_restaurant_menu, get_solution_email_settings, \
get_solution_settings_or_identity_settings
from solutions.common.models import SolutionQR, SolutionServiceConsent
from solutions.common.models.properties import MenuItemTO
from solutions.common.to import SolutionEmailSettingsTO
from solutions.flex import SOLUTION_FLEX
from solutions.jinja_extensions import TranslateExtension
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader([os.path.join(os.path.dirname(__file__), 'templates'),
os.path.join(os.path.dirname(__file__), '..', 'common', 'templates')]),
extensions=[TranslateExtension],
undefined=StrictUndefined if DEBUG else Undefined)
DEFAULT_JS_TEMPLATES = [
'inbox_messages',
'inbox_detail_messages',
'inbox_send_message_to_services',
'qanda_question_table',
'qanda_question_modules',
'qanda_question_detail',
'settings/settings_branding',
'settings/settings_branding_preview',
'settings/app_user_roles',
'settings/app_user_admins',
'settings/app_user_add_roles',
'settings/try_publish_changes',
'functionalities/functionality',
]
MODULES_JS_TEMPLATE_MAPPING = {
SolutionModule.AGENDA: [
'events_add',
'events_add_dates',
'events',
'events_events',
'events_settings',
'events_calendar_settings',
'events_uitcalendar_settings'
],
SolutionModule.APPOINTMENT: [
'timeframe_template'
],
SolutionModule.CITY_APP: [
'services/service',
'services/service_form',
'services/modules_list',
'services/service_search',
'services/service_export',
'settings/app_settings',
'settings/paddle'
],
SolutionModule.DISCUSSION_GROUPS: [
'discussion_groups/discussion_groups_list',
'discussion_groups/discussion_groups_put'
],
SolutionModule.GROUP_PURCHASE: [
'group_purchase',
'group_purchase_subscriptions'
],
SolutionModule.LOYALTY: [
'loyalty_slides',
'loyalty_slide_add',
'loyalty_tablets',
'loyalty_tablet_modal',
'loyalty_scans',
'loyalty_scans_redeem_stamps_modal',
'loyalty_lottery_add_modal',
'loyalty_customer_visits_detail_modal',
'loyalty_customer_visits_detail',
'loyalty_customer_visit',
'loyalty_lottery_history',
'loyalty_export'
],
SolutionModule.MENU: [
'menu',
'menu_additem',
'menu_editdescription',
'menu_edit_image',
'menu_import'
],
SolutionModule.ORDER: [
'order',
'order_list',
'pause_orders_modal',
'timeframe_template',
'menu',
'menu_import',
'menu_additem',
'menu_editdescription',
'menu_edit_image',
'payments',
'payconiq_nl',
],
SolutionModule.PHARMACY_ORDER: [
'pharmacy_order',
'pharmacy_order_list'
],
SolutionModule.RESTAURANT_RESERVATION: [
'reservation_addshift',
'reservation_addtable',
'reservation_broken_reservations',
'reservation_delete_table_confirmation',
'reservation_editreservation',
'reservation_edittables',
'reservation_no_shift_found',
'reservation_shiftcontents',
'reservation_tablecontents',
'reservation_update_reservation_tables',
'reservations'
],
SolutionModule.REPAIR: [
'repair_order'
],
SolutionModule.SANDWICH_BAR: [
'sandwiches_order_inbox_detail',
'sandwiches_list_item'
],
SolutionModule.STATIC_CONTENT: [
'static_content/static_content_select_icon',
'static_content/static_content'
],
SolutionModule.HIDDEN_CITY_WIDE_LOTTERY: [
'loyalty_lottery_add_modal',
'loyalty_customer_visits_detail_modal',
'loyalty_customer_visits_detail',
'loyalty_customer_visit',
'loyalty_lottery_history',
'loyalty_slides',
'loyalty_slide_add'
],
}
class FlexHomeHandler(webapp2.RequestHandler):
def _get_location_templates(self, service_user, language):
tmpl_params = {'language': language,
'debug': DEBUG,
'service_user_email': service_user}
templates = {}
templates_to_get = {'location'}
for tmpl in templates_to_get:
templates[tmpl] = JINJA_ENVIRONMENT.get_template(tmpl + '.html').render(tmpl_params)
templates = json.dumps(templates)
return templates
def _get_templates(self, lang, currency, modules):
# type: (str, str, list[str]) -> str
tmpl_params = {
'language': lang or DEFAULT_LANGUAGE,
'debug': DEBUG,
'currency': currency,
}
templates = {}
templates_to_get = set(DEFAULT_JS_TEMPLATES)
for module in modules:
for tmpl in MODULES_JS_TEMPLATE_MAPPING.get(module, []):
templates_to_get.add(tmpl)
for tmpl in templates_to_get:
templates[tmpl] = JINJA_ENVIRONMENT.get_template(tmpl + '.html').render(tmpl_params)
templates = json.dumps(templates)
return templates
def _get_qr_codes(self, sln_settings, service_identity):
if SolutionModule.QR_CODES in sln_settings.modules:
return SolutionQR.list_by_user(sln_settings.service_user, service_identity, sln_settings.solution)
else:
return []
def _get_days(self, language):
return [(k, v.capitalize()) for k, v in dates.get_day_names('wide', locale=language).items()]
def _get_months(self, language, width):
return [v.capitalize() for _, v in dates.get_month_names(width, locale=language).items()]
def _get_day_str(self, language, day):
return dates.get_day_names('wide', locale=language)[day].capitalize()
def _get_week_days(self, language):
return [self._get_day_str(language, day) for day in [6, 0, 1, 2, 3, 4, 5]]
def get(self):
service_user = users.get_current_user()
if not service_user:
self.redirect("/ourcityapp")
return
sln_settings = get_solution_settings(service_user)
if not sln_settings or sln_settings.solution != SOLUTION_FLEX:
self.redirect("/ourcityapp")
return
session_ = users.get_current_session()
lang = sln_settings.main_language or DEFAULT_LANGUAGE
all_translations = {key: translate(lang, key) for key in translations[DEFAULT_LANGUAGE]}
for other_key, key in COMMON_JS_KEYS.iteritems():
all_translations[other_key] = all_translations[key]
service_identity = session_.service_identity if session_.service_identity else ServiceIdentity.DEFAULT
service_info = get_service_info(service_user, service_identity)
if sln_settings.identities:
if not session_.service_identity:
jinja_template = JINJA_ENVIRONMENT.get_template('locations.html')
params = {
'language': lang,
'debug': DEBUG,
'templates': self._get_location_templates(service_user, lang),
'service_name': service_info.name,
'service_user_email': service_user.email().encode("utf-8"),
'currency': service_info.currency,
'translations': json.dumps(all_translations)
}
channel.append_firebase_params(params)
self.response.out.write(jinja_template.render(params))
return
elif session_.service_identity:
session_ = set_service_identity(session_, None)
# Dont require terms of use for:
# - shop users (admins)
# - cities logging in on other services their dashboard (layout_only)
# - cirklo-only customers
must_check_tos = not session_.layout_only and not session_.shop and not sln_settings.ciklo_vouchers_only()
service_profile = get_service_profile(service_user)
if must_check_tos:
lastest_tos_version = get_current_document_version(DOC_TERMS_SERVICE)
if service_profile.tos_version != lastest_tos_version:
self.redirect('/terms')
return
sln_i_settings = get_solution_settings_or_identity_settings(sln_settings, service_identity)
customer = get_customer(service_user)
jinja_template = JINJA_ENVIRONMENT.get_template('index.html')
days = self._get_days(lang)
day_flags = [(pow(2, day_num), day_name) for day_num, day_name in days]
months = self._get_months(lang, 'wide')
months_short = self._get_months(lang, 'abbreviated')
week_days = self._get_week_days(lang)
loyalty_version = self.request.get("loyalty")
community = get_community(service_profile.community_id)
locale = Locale.parse(lang)
currency_symbols = {currency: locale.currency_symbols.get(currency, currency) for currency in CURRENCIES}
consts = {
'UNIT_PIECE': UNIT_PIECE,
'UNIT_LITER': UNIT_LITER,
'UNIT_KG': UNIT_KG,
'UNIT_GRAM': UNIT_GRAM,
'UNIT_HOUR': UNIT_HOUR,
'UNIT_MINUTE': UNIT_MINUTE,
'UNIT_DAY': UNIT_DAY,
'UNIT_PERSON': UNIT_PERSON,
'UNIT_SESSION': UNIT_SESSION,
'UNIT_PLATTER': UNIT_PLATTER,
'ORDER_TYPE_SIMPLE': ORDER_TYPE_SIMPLE,
'ORDER_TYPE_ADVANCED': ORDER_TYPE_ADVANCED,
'ORDER_ITEM_VISIBLE_IN_MENU': MenuItemTO.VISIBLE_IN_MENU,
'ORDER_ITEM_VISIBLE_IN_ORDER': MenuItemTO.VISIBLE_IN_ORDER,
'ORGANIZATION_TYPES': {
'CITY': OrganizationType.CITY,
'EMERGENCY': OrganizationType.EMERGENCY,
'PROFIT': OrganizationType.PROFIT,
'NON_PROFIT': OrganizationType.NON_PROFIT,
},
'CURRENCY_SYMBOLS': currency_symbols
}
functionality_modules = functionality_info = None
if community.signup_enabled:
functionality_modules, functionality_info = map(json.dumps, get_functionalities(
lang, sln_settings.modules, sln_settings.get_activated_modules(), community))
is_city = service_user == community.main_service_user
news_review_enabled = AppFeatures.NEWS_REVIEW in community.features
default_router_location = u'#/functionalities'
if sln_settings.ciklo_vouchers_only():
default_router_location = u'#/vouchers'
elif not functionality_modules:
default_router_location = u'#/news'
organization_types = get_organization_types(customer, community.default_app, lang, include_all=True)
currency = service_info.currency
params = {'language': lang,
'sln_settings': sln_settings,
'sln_i_settings': sln_i_settings,
'hidden_by_city': sln_settings.hidden_by_city,
'debug': DEBUG,
'templates': self._get_templates(lang, currency, sln_settings.modules),
'service_name': service_info.name,
'service_user_email': service_user.email().encode("utf-8"),
'service_identity': service_identity,
'qr_codes': self._get_qr_codes(sln_settings, service_identity),
'SolutionModule': SolutionModule,
'days': days,
'day_flags': day_flags,
'months': months,
'months_short': months_short,
'week_days': week_days,
'customer': customer,
'loyalty': True if loyalty_version else False,
'functionality_modules': functionality_modules,
'functionality_info': functionality_info,
'email_settings': json.dumps(serialize_complex_value(
SolutionEmailSettingsTO.fromModel(get_solution_email_settings(), service_user),
SolutionEmailSettingsTO, False)),
'currency': currency,
'is_layout_user': session_.layout_only if session_ else False,
'UNITS': json.dumps(UNITS),
'UNIT_SYMBOLS': json.dumps(UNIT_SYMBOLS),
'CONSTS': consts,
'CONSTS_JSON': json.dumps(consts),
'modules': json.dumps(sln_settings.modules),
'provisioned_modules': json.dumps(sln_settings.provisioned_modules),
'translations': json.dumps(all_translations),
'organization_types': organization_types,
'organization_types_json': json.dumps(dict(organization_types)),
'is_city': is_city,
'news_review_enabled': news_review_enabled,
'can_edit_paddle': is_city and session_.shop,
'default_router_location': default_router_location
}
if SolutionModule.BULK_INVITE in sln_settings.modules:
params['bulk_invite_message'] = translate(lang, "settings-bulk-invite-message", app_name=community.name)
params['menu'] = get_restaurant_menu(service_user) if SolutionModule.MENU in sln_settings.modules else None
channel.append_firebase_params(params)
self.response.out.write(jinja_template.render(params))
class FlexLogoutHandler(SessionHandler):
def get(self):
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
if not sln_settings or sln_settings.solution != SOLUTION_FLEX or not sln_settings.identities:
self.stop_session()
return self.redirect('/ourcityapp')
session_ = users.get_current_session()
if session_.service_identity:
session_ = set_service_identity(session_, None)
send_message_to_session(service_user, session_, u"solutions.common.locations.update", si=None)
self.redirect('/ourcityapp')
class TermsAndConditionsHandler(SessionHandler):
def get(self):
service_user = users.get_current_user()
if not service_user:
self.redirect('/ourcityapp')
return
sln_settings = get_solution_settings(service_user)
if not sln_settings:
self.stop_session()
return self.redirect('/ourcityapp')
lang = sln_settings.main_language
version = get_current_document_version(DOC_TERMS_SERVICE)
params = {
'tac': get_version_content(lang, DOC_TERMS_SERVICE, version),
'tac_version': version,
'language': lang,
'show_email_checkboxes': get_customer(service_user) is not None,
}
jinja_template = JINJA_ENVIRONMENT.get_template('terms.html')
self.response.out.write(jinja_template.render(params))
def post(self):
service_user = users.get_current_user()
if not service_user:
self.redirect('/ourcityapp')
return
sln_settings = get_solution_settings(service_user)
if not sln_settings:
self.stop_session()
return self.redirect('/ourcityapp')
version = long(self.request.get('version')) or get_current_document_version(DOC_TERMS_SERVICE)
customer = get_customer(service_user)
if customer:
context = u'User terms'
update_customer_consents(customer.user_email, {
SolutionServiceConsent.TYPE_NEWSLETTER: self.request.get(
SolutionServiceConsent.TYPE_NEWSLETTER) == 'on',
SolutionServiceConsent.TYPE_EMAIL_MARKETING: self.request.get(
SolutionServiceConsent.TYPE_EMAIL_MARKETING) == 'on'
}, get_headers_for_consent(self.request), context)
service_profile = get_service_profile(service_user)
service_profile.tos_version = version
service_profile.put()
self.redirect('/')
| apache-2.0 | -8,495,946,109,372,370,000 | 40.418919 | 118 | 0.627569 | false | 3.914432 | false | false | false |
danielfreeman11/convex-nets | LaunchScripts/ConnectedCompsCalcWide_Debug.py | 1 | 22575 | import numpy as np
import sys
#Usage:
#python thisprog.py threshold numofnetworks
#Will randomly initialize numofnetworks neural networks and train them until the error on a training set is less than threshold
#Will then try to interpolate between these networks while keeping error below that of threshold.
#Will tabulate the number of connected components found in this way
#Simple network: Given three integers a,b,c, [-100,100] chooses three random x-values, and evaluates
#the quadratic function a*x^2 + b*x + c at those values.
def func(x,a,b,c):
return x*x*a + x*b + c
def generatecandidate3(a,b,c):
candidate = [np.random.random() for x in xrange(1)]
candidatesolutions = [func(x,a,b,c) for x in candidate]
return candidate, candidatesolutions
import copy
alpha,hidden_dim,hidden_dim2 = (.001,12,4)
threshrange = np.linspace(.03,.1,101)
thresh = threshrange[int(sys.argv[1])%100]
synapses = []
#Testing starting in the same place
#synapse0 = 2*np.random.random((1,hidden_dim)) - 1
#synapse1 = 2*np.random.random((hidden_dim,hidden_dim2)) - 1
#synapse2 = 2*np.random.random((hidden_dim2,1)) - 1
for i in xrange(int(sys.argv[2])):
synapse_0 = 2*np.random.random((1,hidden_dim)) - 1
synapse_1 = 2*np.random.random((hidden_dim,1)) - 1
#synapse_2 = 2*np.random.random((hidden_dim2,1)) - 1
#synapse_0 = copy.deepcopy(synapse0)
#synapse_1 = copy.deepcopy(synapse1)
#synapse_2 = copy.deepcopy(synapse2)
#remove the comment to get random initialization
stopcond = True
while stopcond:
#print 'epoch:' + str(e)
X = []
y = []
for i in xrange(10000):
a,b = generatecandidate3(.5,.25,.1)
X.append(a)
y.append(b)
X= np.array(X)
y=np.array(y)
j = 0
while stopcond:
#if j%5000 == 0: print j
layer_1 = 1/(1+np.exp(-(np.dot(X,synapse_0))))
#if(False):
# dropout_percent = .1
# layer_1 *= np.random.binomial([np.ones((len(X),hidden_dim))],1-dropout_percent)[0] * (1.0/(1-dropout_percent))
layer_2 = 1/(1+np.exp(-(np.dot(layer_1,synapse_1))))
#if(True):
# dropout_percent = .2
# layer_2 *= np.random.binomial([np.ones((len(layer_1),hidden_dim2))],1-dropout_percent)[0] * (1.0/(1-dropout_percent))
#layer_3 = 1/(1+np.exp(-(np.dot(layer_2,synapse_2))))
#if(False):
# dropout_percent = .25
# layer_2 *= np.random.binomial([np.ones((len(layer_2),2))],1-dropout_percent)[0] * (1.0/(1-dropout_percent))
layer_2_delta = (layer_2- y)*(layer_2*(1-layer_2))
#layer_2_delta = layer_3_delta.dot(synapse_2.T) * (layer_2 * (1-layer_2))
layer_1_delta = layer_2_delta.dot(synapse_1.T) * (layer_1 * (1-layer_1))
#synapse_2 -= (alpha * layer_2.T.dot(layer_3_delta))
synapse_1 -= (alpha * layer_1.T.dot(layer_2_delta))
synapse_0 -= (alpha * X.T.dot(layer_1_delta))
# how much did we miss the target value?
layer_2_error = layer_2 - y
if (j%50) == 0:
print "Error after "+str(j)+" iterations:" + str(np.mean(np.abs(layer_2_error)))
if np.mean(np.abs(layer_2_error)) < thresh:
#print "Changing stopcond!"
stopcond = False
j+=1
#remove the comment to get random initialization
synapses.append([synapse_0,synapse_1])#,synapse_2])
#Idea: Take two networks as input. Construct string connecting two nework with "beads" along the string.
#Stochastically (monte carlo? simulated annealing?) wiggle the beads until the max on the beads is minimized
from random import gauss
import copy
def make_rand_vector(dims):
vec = [gauss(0, 1) for i in range(dims)]
mag = sum(x**2 for x in vec) ** .5
return [x/mag for x in vec]
#Definition for test set:
'''X = []
y = []
for i in xrange(100):
j = i/100.
a,b = [[j],[func(j,.5,.25,.1)]]
X.append(a)
y.append(b)
X= np.array(X)
y=np.array(y)'''
#returns a later thats t-between synapse1 and synapse2 (t ranges from 0 to 1)
def synapse_interpolate(synapse1, synapse2, t):
return (synapse2-synapse1)*t + synapse1
X = []
y = []
def GenTest(X, y):
X = []
y = []
for i in xrange(1000):
a,b = generatecandidate3(.5,.25,.1)
X.append(a)
y.append(b)
return np.array(X), np.array(y)
X, y = GenTest(X,y)
#Simple container to hold the weights defined on the beads
class WeightString:
def __init__(self, w1, w2, numbeads, threshold, springk):
self.w1 = w1
self.w2 = w2
self.beads = []
self.velocity = []
self.threshold = threshold
self.springk = springk
for n in xrange(numbeads):
beaddata = []
for k in xrange(len(self.w1)):
beaddata.append(synapse_interpolate(self.w1[k],self.w2[k], (n + 1.)/(numbeads+1.)))
self.beads.append(beaddata)
self.velocity = copy.deepcopy(self.beads)
for b in self.velocity:
for v in b:
v = 0.*v
#self.beads.reverse()
self.InitialEnergy = self.SpringEnergy()
self.AllBeads = copy.deepcopy(self.beads)
self.AllBeads.insert(0,self.w1)
self.AllBeads.append(self.w2)
self.ConvergedList = [False for f in xrange(len(self.AllBeads))]
self.ConvergedList[0] = True
self.ConvergedList[-1] = True
def SpringNorm(self, order):
total = 0.
#Energy between mobile beads
for i,b in enumerate(self.AllBeads):
if i < len(self.AllBeads)-1:
#print "Tallying energy between bead " + str(i) + " and bead " + str(i+1)
subtotal = 0.
for j in xrange(len(b)):
subtotal += np.linalg.norm(np.subtract(self.AllBeads[i][j],self.AllBeads[i+1][j]),ord=order)#/len(self.beads[0][j])
total+=subtotal
return total#/len(self.beads)
def SpringEnergy(self):
total = 0.
#Energy between the pinned, immobile weight and the first bead
subtotal = 0.
for j in xrange(len(self.beads[0])):
subtotal += np.linalg.norm(np.subtract(self.w1[j],self.beads[0][j]),ord=2)/len(self.beads[0][j])
total+=subtotal
#Energy between mobile beads
for i,b in enumerate(self.beads):
if i < len(self.beads)-1:
#print "Tallying energy between bead " + str(i) + " and bead " + str(i+1)
subtotal = 0.
for j in xrange(len(b)):
subtotal += np.linalg.norm(np.subtract(self.beads[i][j],self.beads[i+1][j]),ord=2)/len(self.beads[0][j])
total+=subtotal
#Energy between pinned, immobile final weights, and the last bead
subtotal = 0.
for j in xrange(len(self.beads[-1])):
subtotal += np.linalg.norm(np.subtract(self.w2[j],self.beads[-1][j]),ord=2)/len(self.beads[0][j])
total+=subtotal
return total/len(self.beads)
def SGDBead(self, bead, X, y):
layers = []
l1 = 1/(1+np.exp(-(np.dot(X,self.AllBeads[bead][0]))))
layers.append(l1)
for i,b in enumerate(self.AllBeads[bead][1:]):
l = 1/(1+np.exp(-(np.dot(layers[-1],b))))
layers.append(l)
layersdelta = []
l3 = (layers[-1] - y)*(layers[-1]*(1-layers[-1])) #+ (1./regparam)*OldSpringEnergy*np.ones(np.shape(y))
layersdelta.append(l3)
for i,l in enumerate(layers[:-1]):
ldelta = layersdelta[-1].dot(self.AllBeads[bead][-1-i].T) * (layers[:-1][-1-i]) * (1- (layers[:-1][-1-i]))
layersdelta.append(ldelta)
for i in xrange(len(layers)-1):
if -i-1 != 0:
self.AllBeads[bead][-i-1] -= .001*layers[-i-2].T.dot(layersdelta[i])
else:
self.AllBeads[bead][0] -= .001*X.T.dot(layersdelta[-1])
finalerror = (layers[-1] - y)
return np.mean(np.abs(finalerror))
#monte carlo update step
def UpdateBead(self, temperature, bead, X, y):
regparam = 100.
OldSpringEnergy = self.SpringEnergy()
OldMax = [EvalNet(b,X)-y for b in self.beads]
OldMaxError = max([np.mean(np.abs(om)) for om in OldMax])
oe = OldSpringEnergy/100000. + OldMaxError
#print "Old SE: " + str(OldSpringEnergy)
#print "Old Max: " + str(OldMax)
####print "Oldmaxerror: " + str(OldMaxError)
oldweight = copy.deepcopy(self.beads[bead])
layers = []
#print bead[0]
l1 = 1/(1+np.exp(-(np.dot(X,self.beads[bead][0]))))
layers.append(l1)
for i,b in enumerate(self.beads[bead][1:]):
l = 1/(1+np.exp(-(np.dot(layers[-1],b))))
layers.append(l)
#layer_3_delta = (layer_3- y)*(layer_3*(1-layer_3))
#layer_2_delta = layer_3_delta.dot(synapse_2.T) * (layer_2 * (1-layer_2))
#layer_1_delta = layer_2_delta.dot(synapse_1.T) * (layer_1 * (1-layer_1))
#layersdelta = []
layersdelta = []
l3 = (layers[-1] - y)*(layers[-1]*(1-layers[-1])) #+ (1./regparam)*OldSpringEnergy*np.ones(np.shape(y))
layersdelta.append(l3)
for i,l in enumerate(layers[:-1]):
ldelta = layersdelta[-1].dot(self.beads[bead][-1-i].T) * (layers[:-1][-1-i]) * (1- (layers[:-1][-1-i]))
layersdelta.append(ldelta)
for i in xrange(len(layers)-1):
#print i
#print self.beads[bead][-i-1]
#rint layers[-i-2].T
#print layersdelta[-i-1]
#print layers[-i-2].T.dot(layersdelta[-i-1])
if -i-1 != 0:
self.beads[bead][-i-1] -= .1*layers[-i-2].T.dot(layersdelta[i])
else:
self.beads[bead][0] -= .1*X.T.dot(layersdelta[-1])
#The code below regularizes the network so that they stay near each other in weight space
'''if bead == 0:
self.beads[bead][-i-1] -= (np.subtract(self.beads[bead][-i-1],self.w1[-i-1]) + np.subtract(self.beads[bead+1][-i-1],self.beads[bead][-i-1]))/regparam
if bead == len(self.beads)-1:
self.beads[bead][-i-1] -= (np.subtract(self.w2[-i-1],self.beads[bead][-i-1]) + np.subtract(self.beads[bead][-i-1],self.beads[bead-1][-i-1]))/regparam
if (bead > 0 and bead < len(self.beads)-1):
self.beads[bead][-i-1] -= (np.subtract(self.beads[bead+1][-i-1],self.beads[bead][-i-1]) + \
np.subtract(self.beads[bead][-i-1],self.beads[bead-1][-i-1]))/regparam'''
#layers.reverse()
# how much did we miss the target value?
NewSpringEnergy = self.SpringEnergy()
finalerror = (layers[-1] - y) #(1./regparam)*NewSpringEnergy*np.ones(np.shape(y))
NewMaxError = np.mean(np.abs(finalerror))
#print "New SE: " + str(NewSpringEnergy)
#print "Old Max: " + str(OldMax)
####print "Newmaxerror: " + str(NewMaxError)
ne = NewSpringEnergy/100000. + NewMaxError
#print "Newtotal: " + str(ne)
####print "\n"
myrand = np.random.rand()
####print "rand is: " + str(myrand) + " and boltzmann weight is " + str(np.exp(-(NewSpringEnergy - OldSpringEnergy)/temperature))
if NewSpringEnergy > OldSpringEnergy:
#if NewSpringEnergy > self.InitialEnergy:
if NewMaxError > OldMaxError:
self.beads[bead]=oldweight
else:
if myrand > np.exp(-(NewSpringEnergy - OldSpringEnergy)/temperature):
#if myrand > np.exp(-(NewSpringEnergy - self.InitialEnergy)/temperature):
#print "Rejecting proposal"
self.beads[bead]=oldweight
return True
#def JUST MAKE A PURE KINETIC EVOLVER, SWAP BETWEEN KINETIC EVOLUTION AND GRADIENT DESCENT
def UpdateKinetic(self, dt, k):
for bead in xrange(len(self.beads)):
for i in xrange(len(self.beads[bead])):
self.beads[bead][i] += dt*self.velocity[bead][i]
for bead in xrange(len(self.beads)):
for i in xrange(len(self.beads[bead])):
if bead == 0:
self.velocity[bead][i] += -dt*k*(np.subtract(self.beads[bead][i],self.w1[i]) + np.subtract(self.beads[bead+1][i],self.beads[bead][i]))
if bead == len(self.beads)-1:
self.velocity[bead][i] += -dt*k*(np.subtract(self.w2[i],self.beads[bead][i]) + np.subtract(self.beads[bead][i],self.beads[bead-1][i]))
if (bead > 0 and bead < len(self.beads)-1):
self.velocity[bead][i] += -dt*k*(np.subtract(self.beads[bead+1][i],self.beads[bead][i]) + \
np.subtract(self.beads[bead][i],self.beads[bead-1][i]))
#self.velocity[bead][i] -= .1*self.velocity[bead][i]
#monte carlo update step
def UpdateBeadPureKinetic(self, temperature, bead):
OldSpringEnergy = self.SpringEnergy()
#OldMax = [EvalNet(b,X)-y for b in self.beads]
#OldMaxError = max([np.mean(np.abs(om)) for om in OldMax])
#oe = OldSpringEnergy/100000. + OldMaxError
##print "Old SE: " + str(OldSpringEnergy)
#print "Old Max: " + str(OldMax)
#print "Oldmaxerror: " + str(OldMaxError)
#print "Oldtotal: " + str(oe)
oldweight = copy.deepcopy(self.beads[bead])
randupdates = []
for i,syn in enumerate(self.beads[bead]):
#create random perturbation to weight matrix with correct shape
addtobead = np.reshape(make_rand_vector(syn.size),syn.shape)
#add it to this particular bead
self.beads[bead][i]+=.1*addtobead
NewSpringEnergy = self.SpringEnergy()
#NewMax = [EvalNet(b,X)-y for b in self.beads]
#NewMaxError = max([np.mean(np.abs(om)) for om in OldMax])
##print "New SE: " + str(OldSpringEnergy)
#print "Old Max: " + str(OldMax)
#print "Newmaxerror: " + str(OldMaxError)
#ne = NewSpringEnergy/100000. + NewMaxError
#print "Newtotal: " + str(ne)
##print "\n"
#Gibbs sampling
#if OldSpringError/100. + OldMaxError < NewSpringError/100. + NewMaxError:
myrand = np.random.rand()
##print "rand is: " + str(myrand) + " and boltzmann weight is " + str(np.exp(-(NewSpringEnergy - OldSpringEnergy)/temperature))
if NewSpringEnergy > OldSpringEnergy:
if myrand > np.exp(-(NewSpringEnergy - OldSpringEnergy)/temperature):
##print "Rejecting proposal"
self.beads[bead]=oldweight
return True
test = WeightString(synapses[0],synapses[1],5,1,1)
#Simple function to evaluate network
def EvalNet(net, X):
layer_1 = 1/(1+np.exp(-(np.dot(X,net[0]))))
layer_2 = 1/(1+np.exp(-(np.dot(layer_1,net[1]))))
#layer_3 = 1/(1+np.exp(-(np.dot(layer_2,net[2]))))
# how much did we miss the target value?
#layer_3_error = layer_3 - y
return layer_2
def BeadError(X, y, bead):
X= np.array(X)
y=np.array(y)
layer_1 = 1/(1+np.exp(-(np.dot(X,bead[0]))))
layer_2 = 1/(1+np.exp(-(np.dot(layer_1,bead[1]))))
#layer_3 = 1/(1+np.exp(-(np.dot(layer_2,bead[2]))))
# how much did we miss the target value?
layer_2_error = layer_2 - y
return np.mean(np.abs(layer_2_error))
def InterpBeadError(X, y, bead1, bead2, write = False, name = "00"):
'''X = []
y = []
for i in xrange(1000):
a,b = generatecandidate3(.5,.25,.1)
X.append(a)
y.append(b)'''
X= np.array(X)
y=np.array(y)
errors = []
for tt in xrange(100):
#Should make this architecture independent at some point
t = tt/100.
layer_1 = 1/(1+np.exp(-(np.dot(X,synapse_interpolate(bead1[0],bead2[0],t)))))
layer_2 = 1/(1+np.exp(-(np.dot(layer_1,synapse_interpolate(bead1[1],bead2[1],t)))))
#layer_3 = 1/(1+np.exp(-(np.dot(layer_2,synapse_interpolate(bead1[2],bead2[2],t)))))
# how much did we miss the target value?
layer_2_error = layer_2 - y
errors.append(np.mean(np.abs(layer_2_error)))
if write == True:
with open("f" + str(name) + ".out",'w+') as f:
for e in errors:
f.write(str(e) + "\n")
return max(errors)
results = []
connecteddict = {}
for i1 in xrange(len(synapses)):
connecteddict[i1] = 'not connected'
for i1 in xrange(len(synapses)):
#print i1
for i2 in xrange(len(synapses)):
if i2 > i1 and ((connecteddict[i1] != connecteddict[i2]) or (connecteddict[i1] == 'not connected' or connecteddict[i2] == 'not connected')) :
test = WeightString(synapses[i1],synapses[i2],1,1,1)
training_threshold = thresh
depth = 0
d_max = 10
#Check error between beads
#Alg: for each bead at depth i, SGD until converged.
#For beads with max error along path too large, add another bead between them, repeat
while (depth < d_max):
X, y = GenTest(X,y)
counter = 0
for i,c in enumerate(test.ConvergedList):
if c == False:
error = BeadError(X, y, test.AllBeads[i])
#print error
while error > .5 * training_threshold and counter < 40000:
counter += 1
error = test.SGDBead(i, X, y)
if counter%5000==0:
print counter
print error
test.ConvergedList[i] = True
#print test.ConvergedList
interperrors = []
for b in xrange(len(test.AllBeads)-1):
e = InterpBeadError(X,y,test.AllBeads[b],test.AllBeads[b+1])
interperrors.append(e)
#print interperrors
if max(interperrors) < training_threshold:
depth = 2*d_max
#print test.ConvergedList
#print test.SpringNorm(2)
#print "Done!"
else:
#Interperrors stores the maximum error on the path between beads
#shift index to account for added beads
shift = 0
for i, ie in enumerate(interperrors):
if ie > training_threshold:
beaddata = []
for k in xrange(len(test.w1)):
beaddata.append(synapse_interpolate(test.AllBeads[i+shift][k],test.AllBeads[i+shift+1][k], .5))
test.AllBeads.insert(i+shift+1,beaddata)
test.ConvergedList.insert(i+shift+1, False)
shift+=1
#print test.ConvergedList
#print test.SpringNorm(2)
#print d_max
depth += 1
if depth == 2*d_max:
results.append([i1,i2,test.SpringNorm(2),"Connected"])
if connecteddict[i1] == 'not connected' and connecteddict[i2] == 'not connected':
connecteddict[i1] = i1
connecteddict[i2] = i1
if connecteddict[i1] == 'not connected':
connecteddict[i1] = connecteddict[i2]
else:
if connecteddict[i2] == 'not connected':
connecteddict[i2] = connecteddict[i1]
else:
if connecteddict[i1] != 'not connected' and connecteddict[i2] != 'not connected':
hold = connecteddict[i2]
connecteddict[i2] = connecteddict[i1]
for h in xrange(len(synapses)):
if connecteddict[h] == hold:
connecteddict[h] = connecteddict[i1]
else:
results.append([i1,i2,test.SpringNorm(2),"Disconnected"])
#print results[-1]
uniquecomps = []
totalcomps = 0
for i in xrange(len(synapses)):
if not (connecteddict[i] in uniquecomps):
uniquecomps.append(connecteddict[i])
if connecteddict[i] == 'not connected':
totalcomps += 1
#print i,connecteddict[i]
notconoffset = 0
if 'not connected' in uniquecomps:
notconoffset = -1
print "Thresh: " + str(thresh)
print "Comps: " + str(len(uniquecomps) + notconoffset + totalcomps)
#for i in xrange(len(synapses)):
# print connecteddict[i]
connsum = []
for r in results:
if r[3] == "Connected":
connsum.append(r[2])
#print r[2]
print "***"
print np.average(connsum)
print np.std(connsum)
| mit | 6,229,132,581,789,846,000 | 31.896396 | 165 | 0.51247 | false | 3.336043 | true | false | false |
jephdo/grigri | grigri/tools.py | 1 | 3758 | # -*- coding: utf-8 -*-
"""
grigri.tools
~~~~~~~~~~~~~~~~~~
Miscellaneous functions for dealing with type-checking.
"""
from datetime import datetime, date
from math import pi, sin, cos, atan2, sqrt, floor, ceil
import pandas as pd
from dateutil.parser import parse
def is_null(*args):
"""
Returns the first non-null value. Similar to T-SQL COALESCE() function.
>>> is_null(None, float('nan'), 'hello')
'hello'
"""
for k in args:
if not pd.isnull(k):
return k
# if everything is null then return the last argument
return args[-1]
def is_numeric(n):
"""
Tests if an object is interpretable as a number.
>>> is_numeric('1')
True
"""
try:
float(n)
return True
except (ValueError, TypeError):
return False
def is_date(dt, strict=True):
"""
Tests if an object is interpretable as a datetime object.
:param dt: object to test as a datetime
:param strict: If set to `False` will also try to interpret strings as
dates.
"""
if isinstance(dt, (datetime, date)):
return True
if not strict:
try:
if dt not in (' ', '-', ''):
parse(dt)
return True
except (AttributeError, ValueError):
pass
return False
def is_empty(data):
"""
Checks if an object, particularly :class:`Series` and :class:`DataFrame`,
contains any values.
.. note::
``DataFrame`` objects have an ``empty`` attribute, but ``Series`` don't.
This function allows you to check both data structures using only one
function.
"""
try:
return not bool(data)
except ValueError:
pass
try:
return data.empty
# Series objects do not have an empty method, so check
# if there are any values
except AttributeError:
if data.tolist():
return False
return True
def percent_change(before, after):
"""
Return percent change increase or decrease between two numbers.
>>> percent_change(100, 110)
0.1
"""
try:
return (1. * after - before) / before
except ZeroDivisionError:
return float('nan')
def find_column_name(frame, column_name):
"""
Searches for the desired column in a DataFrame and returns its name.
This situation arises when you pull data from a data source (e.g. SQL)
and you know the column name is installationid, but case-sensitivity may be
an issue.
"""
column_match = column_name.lower()
for col in frame.columns:
if col.lower() == column_match:
return col
raise KeyError("Cannot find column in DataFrame: %s" % column_name)
def split_sequence(data, n):
"""
Splits a Series or DataFrame (or any list-like object) into chunks.
:param data: List-like data-structure (list, DataFrame, Series,...) to
be split.
:param n: Number of chunks to split `data` into. Chunks will be as equal
size as possible.
.. warning::
Data is not sorted by ``split_sequence`` and will be split as given.
You must pre-sort if necessary.
"""
L = len(data)
split_size = int(ceil(1. * L / n))
return (data[i:i+ split_size] for i in range(0, L, split_size))
def ditto(frames, meth, *args, **kwargs):
"""
Applies the same method to one or more DataFrames. Any args or kwargs necessary
to call `meth` should also be passed in.
:param frames: List of DataFrames.
:param meth: Name of DataFrame method to call on each DataFrame in `frames`.
"""
return [getattr(frame, meth)(*args, **kwargs) for frame in frames]
| mit | -1,809,279,128,360,851,200 | 23.887417 | 84 | 0.600585 | false | 4.089227 | false | false | false |
vimilimiv/weibo-popularity_judge-and-content_optimization | 数据处理/get_keyword_feature.py | 1 | 3446 | #-------------------------------------------------------------------------------
# coding=utf8
# Name: 模块1
# Purpose:
#
# Author: zhx
#
# Created: 10/05/2016
# Copyright: (c) zhx 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import openpyxl
import jieba
threshold = 2140
popular = 0
def main():
cctv_data = openpyxl.load_workbook("cctv.xlsx")
cctv_keywords = openpyxl.load_workbook("cctv_keywords.xlsx")
cctv_new = openpyxl.Workbook()
new_sheet = cctv_new.active
#print cctv_data.get_sheet_names()
sheet1 = cctv_keywords["Sheet"]
sheet2 = cctv_data["Sheet"]
words = {}
for r in xrange(1,36003):
word = sheet1.cell(row=r,column=1).value
word_min = sheet1.cell(row=r,column=2).value
word_max = sheet1.cell(row=r,column=3).value
word_mean = sheet1.cell(row=r,column=4).value
words[word] = [word_min,word_max,word_mean]
for r in xrange(2,4749):
print r
content = sheet2.cell(row=r,column=3).value
time = sheet2.cell(row=r,column=11).value
like = sheet2.cell(row=r,column=5).value
repost = sheet2.cell(row=r,column=6).value
if like == '赞':
like = '0'
if repost =='转发':
repost = '0'
like_repost = int(like)+int(repost)
if like_repost>threshold:
popular =1
else:
popular =0
hour = int(time[1:3])
minute =int (time[4:])
time = hour*60 + minute
new_sheet.cell(row=r,column=10).value = time
new_sheet.cell(row=r,column=11).value = like_repost
if content ==None:
continue
print r
seg_list = jieba.cut(content, cut_all = True)
wordsplite = ' '.join(seg_list)
wordsplite = wordsplite.split(' ')
maxlike = 0
max_word =''
min_word =''
mean_word=''
minlike = 9999999
tmplist = []
tmpdic ={}
for w in wordsplite:
if words.has_key(w):
tmpdic[w] =int(words[w][2])
tmplist.append(int(words[w][2]))
likes = int(words[w][2])
if likes<minlike:
minlike = likes
min_word = w
if likes>maxlike:
maxlike = likes
max_word = w
else:
continue
if len(tmplist)!=0:
tmplist.sort()
mean = tmplist[int(len(tmplist)/2)]
for w in tmpdic:
if tmpdic[w]==mean:
mean_word =w
if min_word!='':
new_sheet.cell(row=r,column=1).value = words[min_word][0]
new_sheet.cell(row=r,column=2).value = words[min_word][1]
new_sheet.cell(row=r,column=3).value = words[min_word][2]
if max_word!='':
new_sheet.cell(row=r,column=4).value = words[max_word][0]
new_sheet.cell(row=r,column=5).value = words[max_word][1]
new_sheet.cell(row=r,column=6).value = words[max_word][2]
if mean_word!='':
new_sheet.cell(row=r,column=7).value = words[mean_word][0]
new_sheet.cell(row=r,column=8).value = words[mean_word][1]
new_sheet.cell(row=r,column=9).value = words[mean_word][2]
cctv_new.save("train_feature_keyword_reg.xlsx")
main()
| mit | 5,607,186,483,168,549,000 | 33.707071 | 80 | 0.502619 | false | 3.26616 | false | false | false |
digmore/pypushed | pushed/pushed.py | 1 | 5114 | # encoding: utf-8
import json
import requests
BASE_URL = 'https://api.pushed.co'
API_VERSION = '1'
PUSH = 'push'
OAUTH = 'oauth'
ACCESS_TOKEN = 'oauth/access_token'
USER_AGENT = 'python-pushed/0.1.4'
class Pushed(object):
'''Pushed.co API client class.
Param: app_key -> A Pushed.co application key
app_secret -> The secret authorizing the application key
'''
def __init__(self, app_key, app_secret):
self.app_key, self.app_secret = app_key, app_secret
def push_app(self, content, content_url=None):
'''Push a notification to a Pushed application.
Param: content -> content of Pushed notification message
content_url (optional) -> enrich message with URL
Returns Shipment ID as string
'''
parameters = {
'app_key': self.app_key,
'app_secret': self.app_secret
}
return self._push(content, 'app', parameters, content_url)
def push_channel(self, content, channel, content_url=None):
'''Push a notification to a Pushed channel.
Param: content -> content of Pushed notification message
channel -> string identifying a Pushed channel
content_url (optional) -> enrich message with URL
Returns Shipment ID as string
'''
parameters = {
'app_key': self.app_key,
'app_secret': self.app_secret,
'target_alias': channel
}
return self._push(content, 'channel', parameters, content_url)
def push_user(self, content, access_token, content_url=None):
'''Push a notification to a specific pushed user.
Param: content -> content of Pushed notification message
access_token -> OAuth access token
content_url (optional) -> enrich message with URL
Returns Shipment ID as string
'''
parameters = {
'app_key': self.app_key,
'app_secret': self.app_secret,
'access_token': access_token
}
return self._push(content, 'user', parameters, content_url)
def push_pushed_id(self, content, pushed_id, content_url=None):
'''Push a notification to a specific pushed user by Pushed ID.
Param: content -> content of Pushed notification message
pushed_id -> user's pushed ID
content_url (optional) -> enrich message with URL
Returns Shipment ID as string
'''
parameters = {
'app_key': self.app_key,
'app_secret': self.app_secret,
'pushed_id': pushed_id,
'target_alias': 'Nothing' # Required, but seems unused
}
return self._push(content, 'pushed_id', parameters, content_url)
def _push(self, content, target_type, parameters={}, content_url=None):
parameters.update(
{
'content': content,
'target_type': target_type
}
)
if content_url is not None:
parameters.update(
{
'content_type': 'url',
'content_extra': content_url
}
)
push_uri = "/".join([BASE_URL, API_VERSION, PUSH])
success, response = self._request(push_uri, parameters)
if success:
return response['response']['data']['shipment']
else:
raise PushedAPIError(
response['error']['type'],
response['error']['message']
)
def access_token(self, code):
'''Exchange a temporary OAuth2 code for an access token.
Param: code -> temporary OAuth2 code from a Pushed callback
Returns access token as string
'''
parameters = {"code": code}
access_uri = "/".join([BASE_URL, API_VERSION, ACCESS_TOKEN])
# RFC non-compliant response prevents use of standard OAuth modules
success, response = self._request(access_uri, parameters)
if success:
return response['response']['data']['access_token']
else:
raise PushedAPIError(
response['error']['type'],
response['error']['message']
)
def _request(self, url, parameters):
headers = {
'Content-Type': 'application/json',
'User-Agent': USER_AGENT
}
r = requests.post(url, data=json.dumps(parameters), headers=headers)
return (
str(r.status_code).startswith('2'),
r.json()
)
def authorization_link(self, redirect_uri):
'''Construct OAuth2 authorization link.
Params: redirect_uri -> URI for receiving callback with token
Returns authorization URL as string
'''
args = '?client_id=%s&redirect_uri=%s' % (
self.app_key,
redirect_uri
)
uri = "/".join([BASE_URL, API_VERSION, OAUTH, args])
return uri
class PushedAPIError(Exception):
'''Raise when an API request does not return a success status'''
| mit | -2,296,525,638,792,235,800 | 32.86755 | 76 | 0.564333 | false | 4.319257 | false | false | false |
schluedj/bayleaf | bayleaf/likelihoods.py | 1 | 12712 | ### Likelihood storage for bayleaf
### Author: David Schlueter
### Vanderbilt University Department of Biostatistics
### July 10, 2017
import theano.tensor as tt
import numpy as np
import theano.tensor as tt
from pymc3.distributions import Continuous, draw_values, generate_samples, Bound, transforms
## Base class from pymc3
class PositiveContinuous(Continuous):
"""Base class for positive continuous distributions"""
def __init__(self, transform=transforms.log, *args, **kwargs):
super(PositiveContinuous, self).__init__(
transform=transform, *args, **kwargs)
################################################################################
###################### Univariate Parametric Models ############################
################################################################################
class Exponential_Censored(PositiveContinuous):
"""
Exponential censored log-likelihood.
.. math::
======== ====================================================
======== ====================================================
Parameters
----------
alpha : float
For exponential model, set = 1 .
"""
def __init__(self, alpha, indep, *args, **kwargs):
super(Exponential_Censored, self).__init__(*args, **kwargs)
self.indep = indep = tt.as_tensor_variable(indep)
def logp(self, value, event):
indep = self.indep
indep = tt.exp(indep)
return event * tt.log(indep) - indep * value
class Weibull_Censored(PositiveContinuous):
"""
Weibull censored log-likelihood.
.. math::
======== ====================================================
======== ====================================================
Parameters
----------
alpha : float
Shape parameter (alpha > 0).
"""
def __init__(self, alpha, indep, *args, **kwargs):
super(Weibull_Censored, self).__init__(*args, **kwargs)
self.alpha = alpha = tt.as_tensor_variable(alpha)
self.indep = indep = tt.as_tensor_variable(indep)
def logp(self, value, event):
indep = self.indep
alpha = self.alpha
indep = tt.exp(indep)
return event*(tt.log(alpha) + tt.log(indep) + (alpha-1)*tt.log(value))- (indep * value**alpha)
## CoxPH w/ weibull baseline hazard
class WeibullPH(PositiveContinuous):
"""
Cox PH censored log-likelihood with weibull baseline hazard
.. math::
======== ====================================================
======== ====================================================
Parameters
----------
alpha : float
Shape parameter (alpha > 0).
"""
def __init__(self, alpha, lam, indep, *args, **kwargs):
super(WeibullPH, self).__init__(*args, **kwargs)
self.alpha = alpha = tt.as_tensor_variable(alpha)
self.lam = lam = tt.as_tensor_variable(lam)
self.indep = indep = tt.as_tensor_variable(indep)
# Weibull survival likelihood, accounting for censoring
def logp(self, value, event):
indep = self.indep
alpha = self.alpha
lam = self.lam
indep = tt.exp(indep)
return event*(tt.log(alpha) + tt.log(lam) + tt.log(indep) + (alpha-1)*tt.log(value)) - (lam*indep * value**alpha)
#class ExtremeValue_Censored(PositiveContinuous):
# """
# Extreme Value censored log-likelihood.
# .. math::
# ======== ====================================================
# ======== ====================================================
# Parameters
## ----------
# alpha : float
# Shape parameter (alpha > 0).
# """
# def __init__(self, alpha, indep, *args, **kwargs):
# super(ExtremeValue_Censored, self).__init__(*args, **kwargs)
# self.alpha = alpha = tt.as_tensor_variable(alpha)
# self.indep = indep = tt.as_tensor_variable(indep)
# Extreme Value survival likelihood, accounting for censoring
# def logp(self, value, event):
# indep = self.indep
# alpha = self.alpha
# return event*(tt.log(alpha)+(alpha*value)+indep) - tt.exp(indep+alpha*value)
#### TO ADD: Gamma, Log-Normal
################################################################################
###################### Univariate Semi-Parametric Models ############################
################################################################################
#### To Add, Piecewise exponential
###############################################################################
###################### Multivariate Parametric Models ############################
################################################################################
#### To Add, Gamma frailty with Weibull Baseline hazard
###############################################################################
###################### Multivariate Parametric Models ##########################
################################################################################
############################# Copula Likelihoods ###############################
class Clayton_Censored(PositiveContinuous):
"""
## we will modify this to include flexible specification of the baseline hazard, for now though we will just assume a weibull form i each dimension
Bivariate Clayton Censored Model
.. math::
======== ====================================================
======== ====================================================
Parameters
----------
"""
def __init__(self, alpha, indep_1, indep_2, rho_1, lam_1, rho_2, lam_2,
*args, **kwargs):
super(Clayton_Censored, self).__init__(*args, **kwargs)
self.alpha = alpha = tt.as_tensor_variable(alpha)
self.indep_1 = indep_1 = tt.as_tensor_variable(indep_1)
self.lam_1 = lam_1 = tt.as_tensor_variable(lam_1)
self.rho_1 = rho_1 = tt.as_tensor_variable(rho_1)
self.indep_2 = indep_2 = tt.as_tensor_variable(indep_2)
self.lam_2 = lam_2 = tt.as_tensor_variable(lam_2)
self.rho_2 = rho_2 = tt.as_tensor_variable(rho_2)
def logp(self, time_1, time_2, delta_1, delta_2):
"""
time_1: array
time in the first dimension.
time_2: array
time in the second dimension.
delta_1: array
event indicator in the first dimension.
delta_2: array
event indicator in the second dimension.
"""
## define local instances of the globally initiated variables
alpha = self.alpha
indep_1 = self.indep_1
lam_1 = self.lam_1
rho_1 = self.rho_1
indep_2 = self.indep_2
lam_2 = self.lam_2
rho_2 = self.rho_2
### Now define survival quantities
### Baseline quantities
# H(t) = lam*t^{rho}
base_cum_hazard_1 = lam_1*time_1**(rho_1)
base_cum_hazard_2 = lam_2*time_2**(rho_2)
# h(t) = lam*rho*t^{rho-1}
base_hazard_1 = lam_1*rho_1*time_1**(rho_1-1)
base_hazard_2 = lam_2*rho_2*time_2**(rho_2-1)
# h(t|X) = h(t)*exp(X'β)
conditional_hazard_1 = base_hazard_1 * tt.exp(indep_1)
conditional_hazard_2 = base_hazard_2 * tt.exp(indep_2)
# H(t|X) = H(t)*exp(X'β)
conditional_cum_hazard_1 = base_cum_hazard_1 * tt.exp(indep_1)
conditional_cum_hazard_2 = base_cum_hazard_2 * tt.exp(indep_2)
# S(t|X) = exp(-H(t|X))
surv_1 = tt.exp(-conditional_cum_hazard_1)
surv_2 = tt.exp(-conditional_cum_hazard_2)
## f(t|X) = S(t|X)*h(t|X)
density_1 = conditional_hazard_1 * surv_1
density_2 = conditional_hazard_2 * surv_2
### Copula derivatives:
### Copula derivatives:
log_clayton_copula = (-alpha)**(-1)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1)
log_d_clayton_copula_s1 = -(alpha+1)*tt.log(surv_1)-((alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1)
log_d_clayton_copula_s2 = -(alpha+1)*tt.log(surv_2)-((alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1)
log_d2_clayton_copula_s1_s2 = tt.log(alpha+1)+(-(alpha+1))*tt.log(surv_1*surv_2)-((2*alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1)
### different parts of log likelihood
first = delta_1*delta_2*(log_d2_clayton_copula_s1_s2+tt.log(density_1)+tt.log(density_2))
second = delta_1*(1-delta_2)*(log_d_clayton_copula_s1+tt.log(density_1))
third = delta_2*(1-delta_1)*(log_d_clayton_copula_s2+tt.log(density_2))
fourth = (1-delta_1)*(1-delta_2)*log_clayton_copula
return first + second + third + fourth
class Clayton_Censored_Trans(PositiveContinuous):
"""
## we will modify this to include flexible specification of the baseline hazard, for now though we will just assume a weibull form i each dimension
Bivariate Joe Censored Model
.. math::
======== ====================================================
======== ====================================================
Parameters
----------
"""
def __init__(self, alpha, indep_1, indep_2, rho_1, lam_1, rho_2, lam_2, r_1, r_2,
*args, **kwargs):
super(Clayton_Censored_Trans, self).__init__(*args, **kwargs)
self.alpha = alpha = tt.as_tensor_variable(alpha)
## Parameters for first dimension
self.indep_1 = indep_1 = tt.as_tensor_variable(indep_1)
self.lam_1 = lam_1 = tt.as_tensor_variable(lam_1)
self.rho_1 = rho_1 = tt.as_tensor_variable(rho_1)
self.r_1 = r_1 = tt.as_tensor_variable(r_1)
## Parameters for second dimension
self.indep_2 = indep_2 = tt.as_tensor_variable(indep_2)
self.lam_2 = lam_2 = tt.as_tensor_variable(lam_2)
self.rho_2 = rho_2 = tt.as_tensor_variable(rho_2)
self.r_2 = r_2 = tt.as_tensor_variable(r_2)
def logp(self, time_1, time_2, delta_1, delta_2):
"""
time_1: array
time in the first dimension.
time_2: array
time in the second dimension.
delta_1: array
event indicator in the first dimension.
delta_2: array
event indicator in the second dimension.
"""
## define local instances of the globally initiated variables
alpha = self.alpha
indep_1 = self.indep_1
lam_1 = self.lam_1
rho_1 = self.rho_1
r_1 = self.r_1
indep_2 = self.indep_2
lam_2 = self.lam_2
rho_2 = self.rho_2
r_2 = self.r_2
### Now define survival quantities
### Baseline quantities
# H(t) = lam*t^{rho}
base_cum_hazard_1 = lam_1*time_1**(rho_1)
base_cum_hazard_2 = lam_2*time_2**(rho_2)
# h(t) = lam*rho*t^{rho-1}
base_hazard_1 = lam_1*rho_1*time_1**(rho_1-1)
base_hazard_2 = lam_2*rho_2*time_2**(rho_2-1)
# h(t|X) = h(t)*exp(X'β)
#conditional_hazard_1 = base_hazard_1 * tt.exp(indep_1)
#conditional_hazard_2 = base_hazard_2 * tt.exp(indep_2)
# H(t|X) = log(1+r*H(t)*exp(X'β))/r
conditional_cum_hazard_1 = tt.log(1 + r_1 * base_cum_hazard_1 * tt.exp(indep_1))/r_1
conditional_cum_hazard_2 = tt.log(1 + r_2 * base_cum_hazard_2 * tt.exp(indep_2))/r_2
# S(t|X) = exp(-H(t|X))
surv_1 = tt.exp(-conditional_cum_hazard_1)
surv_2 = tt.exp(-conditional_cum_hazard_2)
## f(t|X) = S(t|X)*h(t|X)
density_1 = base_hazard_1*tt.exp(indep_1)*(1+r_1*base_cum_hazard_1*tt.exp(indep_1))**-(1+r_1**(-1))
density_2 = base_hazard_2*tt.exp(indep_2)*(1+r_2*base_cum_hazard_2*tt.exp(indep_2))**-(1+r_2**(-1))
### Copula derivatives:
log_clayton_copula = (-alpha)**(-1)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1)
log_d_clayton_copula_s1 = -(alpha+1)*tt.log(surv_1)-((alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1)
log_d_clayton_copula_s2 = -(alpha+1)*tt.log(surv_2)-((alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1)
log_d2_clayton_copula_s1_s2 = tt.log(alpha+1)+(-(alpha+1))*tt.log(surv_1*surv_2)-((2*alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1)
### different parts of log likelihood
first = delta_1*delta_2*(log_d2_clayton_copula_s1_s2+tt.log(density_1)+tt.log(density_2))
second = delta_1*(1-delta_2)*(log_d_clayton_copula_s1+tt.log(density_1))
third = delta_2*(1-delta_1)*(log_d_clayton_copula_s2+tt.log(density_2))
fourth = (1-delta_1)*(1-delta_2)*log_clayton_copula
return first + second + third + fourth
#class ExtremeValue_Censored(PositiveContinuous):
| mit | 1,360,070,279,587,922,400 | 38.962264 | 152 | 0.509443 | false | 3.299922 | false | false | false |
willmcgugan/rich | rich/live_render.py | 1 | 3658 | import sys
from typing import Optional, Tuple
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal # pragma: no cover
from ._loop import loop_last
from .console import Console, ConsoleOptions, RenderableType, RenderResult
from .control import Control
from .segment import ControlType, Segment
from .style import StyleType
from .text import Text
VerticalOverflowMethod = Literal["crop", "ellipsis", "visible"]
class LiveRender:
"""Creates a renderable that may be updated.
Args:
renderable (RenderableType): Any renderable object.
style (StyleType, optional): An optional style to apply to the renderable. Defaults to "".
"""
def __init__(
self,
renderable: RenderableType,
style: StyleType = "",
vertical_overflow: VerticalOverflowMethod = "ellipsis",
) -> None:
self.renderable = renderable
self.style = style
self.vertical_overflow = vertical_overflow
self._shape: Optional[Tuple[int, int]] = None
def set_renderable(self, renderable: RenderableType) -> None:
"""Set a new renderable.
Args:
renderable (RenderableType): Any renderable object, including str.
"""
self.renderable = renderable
def position_cursor(self) -> Control:
"""Get control codes to move cursor to beginning of live render.
Returns:
Control: A control instance that may be printed.
"""
if self._shape is not None:
_, height = self._shape
return Control(
ControlType.CARRIAGE_RETURN,
(ControlType.ERASE_IN_LINE, 2),
*(
(
(ControlType.CURSOR_UP, 1),
(ControlType.ERASE_IN_LINE, 2),
)
* (height - 1)
)
)
return Control()
def restore_cursor(self) -> Control:
"""Get control codes to clear the render and restore the cursor to its previous position.
Returns:
Control: A Control instance that may be printed.
"""
if self._shape is not None:
_, height = self._shape
return Control(
ControlType.CARRIAGE_RETURN,
*((ControlType.CURSOR_UP, 1), (ControlType.ERASE_IN_LINE, 2)) * height
)
return Control()
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
renderable = self.renderable
_Segment = Segment
style = console.get_style(self.style)
lines = console.render_lines(renderable, options, style=style, pad=False)
shape = _Segment.get_shape(lines)
_, height = shape
if height > options.size.height:
if self.vertical_overflow == "crop":
lines = lines[: options.size.height]
shape = _Segment.get_shape(lines)
elif self.vertical_overflow == "ellipsis":
lines = lines[: (options.size.height - 1)]
overflow_text = Text(
"...",
overflow="crop",
justify="center",
end="",
style="live.ellipsis",
)
lines.append(list(console.render(overflow_text)))
shape = _Segment.get_shape(lines)
self._shape = shape
for last, line in loop_last(lines):
yield from line
if not last:
yield _Segment.line()
| mit | 6,486,358,692,883,061,000 | 31.371681 | 98 | 0.556862 | false | 4.444714 | false | false | false |
micfan/dinner | src/public/migrations/0004_auto_20160124_0351.py | 1 | 2313 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-23 19:51
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('public', '0003_auto_20150518_1613'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(max_length=5000, verbose_name=b'\xe5\x86\x85\xe5\xae\xb9')),
('unread', models.BooleanField(default=True, verbose_name=b'\xe6\x9c\xaa\xe8\xaf\xbb')),
('is_spam', models.BooleanField(default=False, verbose_name=b'\xe5\x9e\x83\xe5\x9c\xbe')),
('trashed', models.BooleanField(default=False, verbose_name=b'\xe5\x9b\x9e\xe6\x94\xb6\xe7\xab\x99')),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
migrations.AddField(
model_name='message',
name='come',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='message',
name='reply',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='public.Message'),
),
migrations.AddField(
model_name='message',
name='to',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
]
| mit | 5,651,005,434,688,837,000 | 42.641509 | 256 | 0.60441 | false | 3.810544 | false | false | false |
esatterly/splunk-cassandra | bin/extern.py | 1 | 1279 | #!/usr/bin/env python
#
# Copyright 2011 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# UNDONE: Need to locate installed Python on Windows
"""Common 'wrapper' script used to invoke an 'external' Python scripts. This
module is intended to be invoked using Splunk's internal Python stack and
uses the subprocess module to execute another Python script using the
platform's installed Python stack."""
from os import path
from subprocess import Popen, STDOUT
import sys
def extern(fname):
"""Invoke the given 'external' python script."""
run([fname] + sys.argv[1:])
def run(argv):
process = Popen(["/usr/bin/python"] + argv, env={}, stderr=STDOUT)
process.communicate()
process.wait()
if __name__ == "__main__":
run(sys.argv[1:])
| apache-2.0 | 4,965,296,529,155,139,000 | 32.657895 | 76 | 0.723221 | false | 3.97205 | false | false | false |
FactomProject/Testing | examples/python/libs/mnemonic.py | 1 | 4220 | #
# Copyright (c) 2013 Pavol Rusnak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import binascii
import hashlib
import hmac
import os
import sys
import unicodedata
from pbkdf2 import PBKDF2
PBKDF2_ROUNDS = 2048
class Mnemonic(object):
def __init__(self, language):
self.radix = 2048
with open('%s/%s.txt' % (self._get_directory(), language), 'r') as f:
self.wordlist = [w.strip() for w in f.readlines()]
if len(self.wordlist) != self.radix:
raise Exception('Wordlist should contain %d words, but it contains %d words.' % (self.radix, len(self.wordlist)))
@classmethod
def _get_directory(cls):
return os.path.join(os.path.dirname(__file__), 'wordlist')
@classmethod
def list_languages(cls):
return [ f.split('.')[0] for f in os.listdir(cls._get_directory()) if f.endswith('.txt') ]
@classmethod
def normalize_string(cls, txt):
if isinstance(txt, str if sys.version < '3' else bytes):
utxt = txt.decode('utf8')
elif isinstance(txt, unicode if sys.version < '3' else str):
utxt = txt
else:
raise Exception("String value expected")
return unicodedata.normalize('NFKD', utxt)
@classmethod
def detect_language(cls, code):
first = code.split(' ')[0]
languages = cls.list_languages()
for lang in languages:
mnemo = cls(lang)
if first in mnemo.wordlist:
return lang
raise Exception("Language not detected")
def generate(self, strength = 128):
if strength % 32 > 0:
raise Exception('Strength should be divisible by 32, but it is not (%d).' % strength)
return self.to_mnemonic(os.urandom(strength // 8))
def to_mnemonic(self, data):
if len(data) % 4 > 0:
raise Exception('Data length in bits should be divisible by 32, but it is not (%d bytes = %d bits).' % (len(data), len(data) * 8))
h = hashlib.sha256(data).hexdigest()
b = bin(int(binascii.hexlify(data), 16))[2:].zfill(len(data) * 8) + \
bin(int(h, 16))[2:].zfill(256)[:len(data) * 8 // 32]
result = []
for i in range(len(b) // 11):
idx = int(b[i * 11:(i + 1) * 11], 2)
result.append(self.wordlist[idx])
if self.detect_language(' '.join(result)) == 'japanese': # Japanese must be joined by ideographic space.
result_phrase = '\xe3\x80\x80'.join(result)
else:
result_phrase = ' '.join(result)
return result_phrase
def check(self, mnemonic):
if self.detect_language(mnemonic.replace('\xe3\x80\x80', ' ')) == 'japanese':
mnemonic = mnemonic.replace('\xe3\x80\x80', ' ') # Japanese will likely input with ideographic space.
mnemonic = mnemonic.split(' ')
if len(mnemonic) % 3 > 0:
return False
try:
idx = map(lambda x: bin(self.wordlist.index(x))[2:].zfill(11), mnemonic)
b = ''.join(idx)
except:
return False
l = len(b)
d = b[:l // 33 * 32]
h = b[-l // 33:]
nd = binascii.unhexlify(hex(int(d, 2))[2:].rstrip('L').zfill(l // 33 * 8))
nh = bin(int(hashlib.sha256(nd).hexdigest(), 16))[2:].zfill(256)[:l // 33]
return h == nh
@classmethod
def to_seed(cls, mnemonic, passphrase = ''):
mnemonic = cls.normalize_string(mnemonic)
passphrase = cls.normalize_string(passphrase)
return PBKDF2(mnemonic, u'mnemonic' + passphrase, iterations=PBKDF2_ROUNDS, macmodule=hmac, digestmodule=hashlib.sha512).read(64)
| mit | 7,408,913,559,512,287,000 | 35.37931 | 133 | 0.690521 | false | 3.187311 | false | false | false |
DroneSimon/inteligencia_visual | generarImagenes.py | 1 | 2005 |
import cv2
from Perceptron1 import predict
import numpy as np
import subsampling
import Blur
def generarImagenesVideo(video,tamx,tamy):
cap = cv2.VideoCapture(video)
i = 1
while True:
ret, frame = cap.read()
i += 1
#Probar cambiando el tiempo
if i%5 ==0:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
smallImage = Blur.aplicarDiferenciasGaussianas(gray, 64, 64, 9, 9, 4, 9, 9, 8)
#==== matriz a vector ================================================================
vector = np.resize(smallImage,(1,smallImage.size))
vectorImage = np.concatenate(([[1]], vector), axis=1)
pred = predict(vectorImage)
lista = pred.tolist()
peak = pred.max()
neuron = lista.index(peak) + 1 #neuron activado y buscado en la lista
rojo = 0
azul = 0
verde = 0
if peak > 0.7:
if neuron == 1:
titulo = "FUEGO"
rojo = 255
else:
if neuron == 2:
titulo = "ALERTA"
verde = 255
else:
titulo = ""
azul = 255
else:
titulo = " "
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, titulo ,(19,90), font, 2, (azul,verde,rojo),5)
#cv2.imshow("nitidaGris", gray)
cv2.imshow("original", frame)
k = cv2.waitKey(90)
if k == 90:
break
cv2.destroyAllWindows()
def pintarFuego(frame, font):
cv2.putText(frame, "FUEGO", (19, 90), font, 2, (0, 0, 255), 5)
def pintarAlerta(frame, font):
cv2.putText(frame, "ALERTA", (19, 90), font, 2, (0, 255, 0), 5)
def pintarAnomalia(frame, font):
cv2.putText(frame, "", (19, 90), font, 2, (255, 0, 0), 5)
dir = 'fuego.mp4'
generarImagenesVideo(dir,64,64)
| mit | -252,583,283,261,075,620 | 27.642857 | 98 | 0.47581 | false | 3.325041 | false | false | false |
fmichea/srddl | srddl/core/frontends/fe_curses.py | 1 | 6148 | import sys
import time
import string
CURSES_ON = True
try:
import urwid
except ImportError:
CURSES_ON = False
import srddl.core.frontend_loader as scf
import srddl.core.helpers as sch
import srddl.data as sd
LOGS = open('/tmp/curses_logs.txt', 'w')
def _write(*args):
LOGS.write(' '.join(str(s) for s in args) + '\n')
LOGS.flush()
class Curses(scf.Frontend):
class Meta:
name = 'curses'
help = 'awesome curses ui!'
enabled = CURSES_ON and sys.stdin.isatty()
def process(self, args):
cw = CursesMainWindow()
cw.main()
class KeyBinding(sch.NamedRecord):
class Meta:
fields = ['keys', 'function']
_KEYS = [('Ctrl', 'ctrl'), ('Alt', 'meta'), ('+', ' ')]
def _verbose_to_urwid_keys(keys):
def sub(key):
for verb, uverb in _KEYS:
key = key.replace(verb, uverb)
return key
return [sub(key) for key in keys]
def _urwid_to_verbose(key):
for verb, uverb in _KEYS:
key = key.replace(uverb, verb)
return key
if CURSES_ON:
class StatusBar(urwid.AttrMap):
def __init__(self, mw):
self.wtext = urwid.Text('')
super().__init__(self.wtext, 'footer')
self.msgs, self.mw = [], mw
self.add_text('Welcome!')
def add_text(self, txt, timeout=0):
if 0 < timeout:
self.msgs.append((txt, time.time() + timeout))
self.mw.loop.set_alarm_in(timeout, self._reload_text)
else:
self.msgs.append((txt, 0))
self._set_text(txt)
def _reload_text(self, obj, user_data):
count, t0 = 0, time.time()
for it in range(len(self.msgs)):
idx = it - count
if self.msgs[idx][1] and self.msgs[idx][1] < t0:
del self.msgs[idx]
count += 1
if count:
self._set_text(self.msgs[-1][0])
def _set_text(self, markup):
if isinstance(markup, str):
markup = [markup]
self.wtext.set_text([' '] + markup)
class StatusBarAsker(urwid.Edit, metaclass=urwid.signals.MetaSignals):
signals = ['ask_done']
def __init__(self, *args, **kwargs):
self.validator = kwargs.pop('validator', None)
super().__init__(*args, **kwargs)
def keypress(self, size, key):
if key == 'enter':
urwid.emit_signal(self, 'ask_done', self.get_edit_text())
elif key == 'esc':
urwid.emit_signal(self, 'ask_done', None)
elif len(key) != 1 or self.validator is None or self.validator(key):
super().keypress(size, key)
class HexView(urwid.ListWalker):
def __init__(self, data):
self.focus = (0, 0)
self.view = sd.DataView(data)
def __getitem__(self, position):
line, _ = position
_write('position =', position)
if 0 <= line and line < self.view.max_lines():
addr, data = list(self.view(line, 1).items())[0]
# Widgets for columns
widgets = [('pack', urwid.Text([('addr', addr)]))]
data = [[('pack', urwid.Text(b)) for b in d] for d in data['data']]
widgets.extend([urwid.Columns(d, dividechars=1) for d in data])
return urwid.Columns(widgets, dividechars=2, min_width=len(addr))
raise IndexError
def next_position(self, position):
if position[0] < self.view.max_lines():
return (position[0] + 1, position[1])
raise IndexError
def prev_position(self, position):
if position[0] != 0:
return (position[0] - 1, position[1])
raise IndexError
class CursesMainWindow:
def __init__(self):
# Non-UI data.
self.data = sd.FileData('/bin/ls')
# Palette of colors.
self.palette = [
('footer', 'black', 'light gray'),
('addr', 'white', 'black'),
]
self.loop = None
# Build main view.
## Body
self.body = urwid.ListBox(HexView(self.data))
## Footer
self.status_bar = StatusBar(self)
## Main view
self.view = urwid.Frame(self.body, footer=self.status_bar)
# Main loop
self.loop = urwid.MainLoop(self.view, palette=self.palette,
unhandled_input=self.unhandled_input)
def unhandled_input(self, key):
def exit_program(key):
'''quit the program'''
raise urwid.ExitMainLoop()
def goto_offset(key):
def validator(key):
return key in string.hexdigits
def done(offset):
_write('offset select =', int(offset, 16))
self.ask('Go to offset 0x', done, validator=validator)
KEYBINDINGS = [
('General features:', 1, [
KeyBinding(['q', 'Q'], exit_program),
]),
('Move arround:', 1, [
KeyBinding(['g'], goto_offset),
]),
]
for _, _, bindings in KEYBINDINGS:
for binding in bindings:
if key in _verbose_to_urwid_keys(binding.keys):
binding.function(key)
return True
txt = 'Unknwon key binding \'{}\', try \'h\' to see them all.'
self.status_bar.add_text(txt.format(_urwid_to_verbose(key)), timeout=2)
def ask(self, prompt, callback, validator=None):
edit = StatusBarAsker(' ' + prompt, validator=validator)
def ask_done(content):
urwid.disconnect_signal(self, edit, 'ask_done', ask_done)
self.view.set_focus('body')
self.view.set_footer(self.status_bar)
if content is not None:
callback(content)
self.view.set_footer(urwid.AttrMap(edit, 'footer'))
self.view.set_focus('footer')
urwid.connect_signal(edit, 'ask_done', ask_done)
def main(self):
self.loop.run()
| bsd-3-clause | 1,060,420,255,881,414,000 | 30.367347 | 83 | 0.523585 | false | 3.757946 | false | false | false |
necrolyte2/getshipmass | getshipstat.py | 1 | 1198 | import re
import sys
import requests
from bs4 import BeautifulSoup
URL = 'https://wiki.eveonline.com/en/wiki'
STEELPLATEMASS = 2812500
MWDMASS = 50000000
def fetch_stat(statname, shipname):
r = requests.get(URL + '/' + shipname)
soup = BeautifulSoup(r.text)
paragraphs = soup.find_all('p')
p = '{0}\s+([\d\s]+)(.*)$'.format(statname)
for paragraph in paragraphs:
if statname in paragraph:
return paragraph
def parse_mass(massstr):
massstr = massstr.encode('utf-8')
p = '([\d,]+)'
mass = re.search(p, massstr).groups()[0].replace(',','')
return mass
def main():
ship = sys.argv[1]
m = fetch_stat('Mass', ship)
shipmass = int(parse_mass(m))
print '---' + ship + '---'
h = fetch_stat('Low Slots', ship)
lowslots = int(parse_mass(h))
print '\tShip Mass: {0}'.format(shipmass)
print '\tLow Slots: {0}'.format(lowslots)
platemass = STEELPLATEMASS * lowslots
print '\tPlate Mass: {0}'.format(platemass)
total = shipmass + platemass + MWDMASS
print '\tTotal Mass(ship+plate+MWD): {0}'.format(total)
print '\tMass With Higgs Anchor: {0}'.format(total*2)
if __name__ == '__main__':
main()
| mit | -5,794,873,514,477,937,000 | 26.860465 | 60 | 0.614357 | false | 2.995 | false | false | false |
onedata/web-client | bamboos/docker/environment/s3.py | 2 | 1219 | # coding=utf-8
"""Author: Krzysztof Trzepla
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Brings up a S3 storage.
"""
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
from . import common, docker
def _node_up(image, buckets, name, uid):
hostname = common.format_hostname([name, 's3'], uid)
container = docker.run(
image=image,
hostname=hostname,
name=hostname,
detach=True)
settings = docker.inspect(container)
ip = settings['NetworkSettings']['IPAddress']
port = 4569
host_name = '{0}:{1}'.format(ip, port)
access_key = 'AccessKey'
secret_key = 'SecretKey'
for bucket in buckets:
connection = S3Connection(access_key, secret_key,
host=ip, port=port, is_secure=False,
calling_format=OrdinaryCallingFormat())
connection.create_bucket(bucket)
return {
'docker_ids': [container],
'host_name': host_name,
'access_key': access_key,
'secret_key': secret_key,
}
def up(image, buckets, name, uid):
return _node_up(image, buckets, name, uid)
| mit | -3,361,023,189,771,634,000 | 26.088889 | 73 | 0.61936 | false | 3.627976 | false | false | false |
idlesign/django-firebird | firebird/base.py | 1 | 14281 | """
Firebird database backend for Django.
Requires KInterbasDB 3.3+:
http://www.firebirdsql.org/index.php?op=devel&sub=python
"""
import re
import sys
import base64
try:
import kinterbasdb as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Unable to load KInterbasDB module: %s" % e)
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from firebird.creation import DatabaseCreation
from firebird.introspection import DatabaseIntrospection
from firebird.client import DatabaseClient
from django.conf import settings
import django.utils.encoding as utils_encoding
import kinterbasdb.typeconv_datetime_stdlib as typeconv_datetime
import kinterbasdb.typeconv_fixed_decimal as typeconv_fixeddecimal
import kinterbasdb.typeconv_text_unicode as typeconv_textunicode
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
OperationalError = Database.OperationalError
class CursorWrapper(object):
"""
A thin wrapper around kinterbasdb cursor class so that we can catch
particular exception instances and reraise them with the right types.
Django uses "format" style placeholders, but firebird uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
We need to do some data translation too.
See: http://kinterbasdb.sourceforge.net/dist_docs/usage.html for Dynamic Type Translation
"""
def __init__(self, cursor):
self.cursor = cursor
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def execute(self, query, args=None):
# This is a workaround for KInterbasDB locks
if query.find('DROP') != -1:
# self.cursor.close()
# someday will recreate cursor here
pass
try:
#print query, args
if not args:
args = ()
return self.cursor.execute(query)
else:
query = self.convert_query(query, len(args))
return self.cursor.execute(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)+('sql: '+query,)+args), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)+('sql: '+query,)+args), sys.exc_info()[2]
def executemany(self, query, args):
try:
#print query, args
if not args:
args = ()
return self.cursor.executemany(query)
else:
query = self.convert_query(query, len(args[0]))
return self.cursor.executemany(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)+('sql: '+query,)+args), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)+('sql: '+query,)+args), sys.exc_info()[2]
def convert_query(self, query, num_params):
return query % tuple("?" * num_params)
def fetchone(self):
return self.cursor.fetchone()
def fetchmany(self, size=None):
return self.cursor.fetchmany(size)
def fetchall(self):
return self.cursor.fetchall()
class DatabaseFeatures(BaseDatabaseFeatures):
"""
This class defines bd-specific features.
- can_return_id_from_insert
return insert id right in SELECT statements
as described at http://firebirdfaq.org/faq243/
for Firebird 2+
"""
can_return_id_from_insert = False
class DatabaseOperations(BaseDatabaseOperations):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = 'firebird.compiler'
def __init__(self, connection, dialect=3):
super(DatabaseOperations, self).__init__(connection)
self.dialect = dialect
self._cache = None
self._engine_version = None
self.FB_CHARSET_CODE = 3 #UNICODE_FSS
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Firebird, we have to create a generator and a trigger.
gn_name = self.quote_name(self.get_generator_name(table))
tr_name = self.quote_name(self.get_trigger_name(table))
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
generator_sql = """CREATE GENERATOR %(gn_name)s""" % locals()
trigger_sql = """
CREATE TRIGGER %(tr_name)s FOR %(tbl_name)s
BEFORE INSERT
AS
BEGIN
IF (NEW.%(col_name)s IS NULL) THEN
NEW.%(col_name)s = GEN_ID(%(gn_name)s, 1);
END""" % locals()
return generator_sql, trigger_sql
def date_extract_sql(self, lookup_type, field_name):
# Firebird uses WEEKDAY keyword.
lkp_type = lookup_type
if lkp_type == 'week_day':
lkp_type = 'weekday'
return "EXTRACT(%s FROM %s)" % (lkp_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
if lookup_type == 'year':
sql = "EXTRACT(year FROM %s)||'-01-01 00:00:00'" % field_name
elif lookup_type == 'month':
sql = "EXTRACT(year FROM %s)||'-'||EXTRACT(month FROM %s)||'-01 00:00:00'" % (field_name, field_name)
elif lookup_type == 'day':
sql = "EXTRACT(year FROM %s)||'-'||EXTRACT(month FROM %s)||'-'||EXTRACT(day FROM %s)||' 00:00:00'" % (field_name, field_name, field_name)
return "CAST(%s AS TIMESTAMP)" % sql
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def fulltext_search_sql(self, field_name):
# We use varchar for TextFields so this is possible
# Look at http://www.volny.cz/iprenosil/interbase/ip_ib_strings.htm
return '%%s CONTAINING %s' % self.quote_name(field_name)
def return_insert_id(self):
return 'RETURNING %s', ()
def last_insert_id(self, cursor, table_name, pk_name):
# Method used for Firebird prior 2. Method is unreliable, but nothing else could be done
cursor.execute('SELECT GEN_ID(%s, 0) FROM rdb$database' % (self.get_generator_name(table_name),))
return cursor.fetchone()[0]
def max_name_length(self):
return 31
def convert_values(self, value, field):
return super(DatabaseOperations, self).convert_values(value, field)
def query_class(self, DefaultQueryClass):
return query.query_class(DefaultQueryClass)
def quote_name(self, name):
# Dialect differences as described in http://mc-computing.com/databases/Firebird/SQL_Dialect.html
if self.dialect==1:
name = name.upper()
else:
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % util.truncate_name(name, self.max_name_length())
# Handle RDB$DB_KEY calls
if name.find('RDB$DB_KEY') > -1:
name = name.strip('"')
return name
def get_generator_name(self, table_name):
return '%s_GN' % util.truncate_name(table_name, self.max_name_length() - 3).upper()
def get_trigger_name(self, table_name):
return '%s_TR' % util.truncate_name(table_name, self.max_name_length() - 3).upper()
def year_lookup_bounds(self, value):
first = '%s-01-01'
second = self.conv_in_date('%s-12-31 23:59:59.999999' % value)
return [first % value, second]
def conv_in_ascii(self, text):
if text is not None:
# Handle binary data from RDB$DB_KEY calls
if text.startswith('base64'):
return base64.b64decode(text.lstrip('base64'))
return utils_encoding.smart_str(text, 'ascii')
def conv_in_blob(self, text):
return typeconv_textunicode.unicode_conv_in((utils_encoding.smart_unicode(text), self.FB_CHARSET_CODE))
def conv_in_fixed(self, (val, scale)):
if val is not None:
if isinstance(val, basestring):
val = decimal.Decimal(val)
# fixed_conv_in_precise produces weird numbers
# return typeconv_fixeddecimal.fixed_conv_in_precise((val, scale))
return int(val.to_integral())
def conv_in_timestamp(self, timestamp):
if isinstance(timestamp, basestring):
# Replaces 6 digits microseconds to 4 digits allowed in Firebird
timestamp = timestamp[:24]
return typeconv_datetime.timestamp_conv_in(timestamp)
def conv_in_time(self, value):
import datetime
if isinstance(value, datetime.datetime):
value = datetime.time(value.hour, value.minute, value.second, value.microsecond)
return typeconv_datetime.time_conv_in(value)
def conv_in_date(self, value):
if isinstance(value, basestring):
if self.dialect==1:
# Replaces 6 digits microseconds to 4 digits allowed in Firebird dialect 1
value = value[:24]
else:
# Time portion is not stored in dialect 3
value = value[:10]
return typeconv_datetime.date_conv_in(value)
def conv_in_unicode(self, text):
if text[0] is not None:
return typeconv_textunicode.unicode_conv_in((utils_encoding.smart_unicode(text[0]), self.FB_CHARSET_CODE))
def conv_out_ascii(self, text):
if text is not None:
# Handle binary data from RDB$DB_KEY calls
if "\0" in text:
return 'base64'+base64.b64encode(text)
return utils_encoding.smart_unicode(text, strings_only=True)
def conv_out_blob(self, text):
return typeconv_textunicode.unicode_conv_out((text, self.FB_CHARSET_CODE))
class DatabaseWrapper(BaseDatabaseWrapper):
"""
Represents a database connection.
"""
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE %s ESCAPE'\\'",
'icontains': 'CONTAINING %s', #case is ignored
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'STARTING WITH %s', #looks to be faster then LIKE
'endswith': "LIKE %s ESCAPE'\\'",
'istartswith': 'STARTING WITH UPPER(%s)',
'iendswith': "LIKE UPPER(%s) ESCAPE'\\'",
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
settings_dict = self.settings_dict
self.settings = {
'charset': 'UNICODE_FSS',
'dialect': 3,
}
if settings_dict['HOST']:
self.settings['host'] = settings_dict['HOST']
if settings_dict['NAME']:
self.settings['database'] = settings_dict['NAME']
if settings_dict['USER']:
self.settings['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
self.settings['password'] = settings_dict['PASSWORD']
self.settings.update(settings_dict['OPTIONS'])
self.dialect = self.settings['dialect']
if 'init_params' in self.settings:
Database.init(**self.settings['init_params'])
self.server_version = None
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self, dialect=self.dialect)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
new_connection = False
if self.connection is None:
new_connection = True
self.connection = Database.connect(**self.settings)
connection_created.send(sender=self.__class__)
cursor = self.connection.cursor()
if new_connection:
if self.connection.charset == 'UTF8':
self.ops.FB_CHARSET_CODE = 4 # UTF-8 with Firebird 2.0+
self.connection.set_type_trans_in({
'DATE': self.ops.conv_in_date,
'TIME': self.ops.conv_in_time,
'TIMESTAMP': self.ops.conv_in_timestamp,
'FIXED': self.ops.conv_in_fixed,
'TEXT': self.ops.conv_in_ascii,
'TEXT_UNICODE': self.ops.conv_in_unicode,
'BLOB': self.ops.conv_in_blob
})
self.connection.set_type_trans_out({
'DATE': typeconv_datetime.date_conv_out,
'TIME': typeconv_datetime.time_conv_out,
'TIMESTAMP': typeconv_datetime.timestamp_conv_out,
'FIXED': typeconv_fixeddecimal.fixed_conv_out_precise,
'TEXT': self.ops.conv_out_ascii,
'TEXT_UNICODE': typeconv_textunicode.unicode_conv_out,
'BLOB': self.ops.conv_out_blob
})
version = re.search(r'\s(\d{1,2})\.(\d{1,2})', self.connection.server_version)
self.server_version = tuple([int(x) for x in version.groups()])
# feature for Firebird version 2 and above
if self.server_version[0] >=2:
self.features.can_return_id_from_insert = True
return CursorWrapper(cursor)
def get_server_version(self):
return self.server_version | bsd-3-clause | -4,105,446,912,383,823,400 | 37.6 | 149 | 0.589735 | false | 3.994685 | false | false | false |
area3001/ares | sw/games/test_game_1/test1.py | 1 | 6624 | import pyglet
from pyglet.window import key
import cocos
from cocos import actions, layer, sprite, scene
from cocos.director import director
import cocos.euclid as eu
import cocos.collision_model as cm
import math
import paho.mqtt.client as mqtt
import json
MAP_SIZE = (600, 600)
VELOCITY_MAX = 400
VELOCITY_INERTIA = 3 # smaller means more inertia
VELOCITY_BRAKE_VS_SPEED = 3
VELOCITY_IMPACT_ON_TURNING = 0.0025
TURNING_SPEED = 3
VELOCITY_DECLINE = 0.995 # not touching controls means the velocity will go to zero
class CollidableSprite(cocos.sprite.Sprite):
def __init__(self, image, cx, cy, radius):
super(CollidableSprite, self).__init__(image)
self.position = (cx, cy)
self.cshape = cm.CircleShape(eu.Vector2(cx, cy), 25)
def update_in_collision_manager(self):
collision_manager.remove_tricky(self)
self.cshape = cm.CircleShape(eu.Vector2(self.position[0], self.position[1]), 25)
collision_manager.add(self)
def maybe_impact(self):
if collision_manager.any_near(self, 1):
self.velocity = (- self.velocity[0], - self.velocity[1])
#self.velocity = (0, 0)
# check if out of map
self.position = (max(0, min(self.position[0], MAP_SIZE[0])), \
max(0, min(self.position[1], MAP_SIZE[1])))
# How to handle collisions
#mapcollider = mapcolliders.RectMapCollider("bounce")
# Car Actions class
class Car(actions.Move):
def step(self, dt):
super(Car, self).step(dt)
rl = keyboard[key.RIGHT] - keyboard[key.LEFT]
speed_or_brake = keyboard[key.UP] - keyboard[key.DOWN]
radians = self.target.rotation * math.pi / 180
# Update the speed from the perspective of the car
try:
speed_or_brake = keyboard[key.UP] - VELOCITY_BRAKE_VS_SPEED * keyboard[key.DOWN] \
if self.target.speed > 0 else \
VELOCITY_BRAKE_VS_SPEED * keyboard[key.UP] - keyboard[key.DOWN]
self.target.speed = VELOCITY_DECLINE * (min(VELOCITY_INERTIA * speed_or_brake + self.target.speed, VELOCITY_MAX))
except AttributeError:
self.target.speed = math.sqrt(self.target.velocity[0]**2 + self.target.velocity[1]**2)
velocity_x = self.target.speed * math.sin(radians)
velocity_y = self.target.speed * math.cos(radians)
self.target.velocity = (velocity_x, velocity_y)
# turn the car
rl = TURNING_SPEED * rl * VELOCITY_IMPACT_ON_TURNING * abs(self.target.speed)
rl = rl if self.target.speed > 0 else - rl
action = actions.interval_actions.RotateBy(rl, 0)
self.target.do(action)
self.target.update_in_collision_manager()
self.target.maybe_impact()
class Mqtt_layer(layer.Layer):
def __init__(self, collision_mgr):
super(Mqtt_layer, self).__init__()
self.collision_mgr = collision_mgr
# MQTT part
def on_marker(client, userdata, msg):
print("marker: '" + str(msg.payload))
payload = json.loads(msg.payload)
print payload["position"][0]
print payload["position"][1]
# create an obstacle and add to layer
# obstacle3 = CollidableSprite('sprites/obstacle.png', 200, 200, 0)
# player_layer.add(obstacle3)
# obstacle3.velocity = (0, 0)
# collision_manager.add(obstacle3)
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# client.message_callback_add("ares/video/markers", on_marker)
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("ares/video/markers")
client.subscribe("ares/video/edges")
client.subscribe("ares/video/objects")
client.subscribe("ares/mgt/features/add")
client.subscribe("ares/mgt/features/remove")
# The callback for when a PUBLISH message is received from the server which is not handled in other handlers
def on_message(client, userdata, msg):
print("Received message '" + str(msg.payload) + "' on topic '" \
+ msg.topic + "' with QoS " + str(msg.qos))
payload = json.loads(msg.payload)
x = payload["position"][0]
y = payload["position"][1]
# create an obstacle and add to layer
obstacle3 = CollidableSprite('sprites/obstacle.png', x, y, 0)
self.add(obstacle3)
# obstacle3.velocity = (0, 0)
self.collision_mgr.add(obstacle3)
self.client = mqtt.Client()
self.client.on_connect = on_connect
self.client.on_message = on_message
self.client.connect("localhost", 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
def draw(self):
self.client.loop(0)
# Main class
def main():
global keyboard
global collision_manager
collision_manager = cm.CollisionManagerBruteForce()
director.init(width=MAP_SIZE[0], height=MAP_SIZE[1], autoscale=True, resizable=True)
# Create a layer
player_layer = Mqtt_layer(collision_manager)
# create an obstacle and add to layer
obstacle1 = CollidableSprite('sprites/obstacle.png', 200, 200, 0)
player_layer.add(obstacle1)
obstacle1.velocity = (0, 0)
collision_manager.add(obstacle1)
# create an obstacle and add to layer
obstacle2 = CollidableSprite('sprites/obstacle.png', 320, 240, 0)
player_layer.add(obstacle2)
obstacle2.velocity = (0, 0)
collision_manager.add(obstacle2)
# create an obstacle and add to layer
obstacle4 = CollidableSprite('sprites/obstacle.png', 490, 490, 0)
player_layer.add(obstacle4)
obstacle4.velocity = (0, 0)
collision_manager.add(obstacle4)
# create the car and add to layer
car = CollidableSprite('sprites/Black_viper.png', 100, 100, 10)
action = actions.interval_actions.ScaleBy(0.25, 0)
car.do(action)
player_layer.add(car)
car.velocity = (0, 0)
# Set the sprite's movement class.
car.do(Car())
# Create a scene and set its initial layer.
main_scene = scene.Scene(player_layer)
# collisions
collision_manager.add(car)
# Attach a KeyStateHandler to the keyboard object.
keyboard = key.KeyStateHandler()
director.window.push_handlers(keyboard)
# Play the scene in the window.
director.run(main_scene)
if __name__ == '__main__':
main() | mit | 6,703,829,673,079,058,000 | 32.80102 | 119 | 0.647796 | false | 3.393443 | false | false | false |
kevgraham7/toolbox | python/restdriver/restdriver/rest_driver.py | 1 | 6408 | #!/usr/bin/env python3
# Dependent packages:
import json
import logging as log
import os
import requests
import yaml
import sys
from restdriver import cli
__author__ = 'kgraham'
__version__ = '0.0.0-0'
def init_defaults():
cwd = os.getcwd()
root = os.path.abspath('/')
defaults = dict(config_file=os.path.join(cwd, 'environments.yaml'),
suite_file=os.path.join(cwd, 'suites', 'default.tcs'),
results_file=os.path.join(cwd, 'results', 'default.rslt'),
log_file=os.path.join(root, 'var', 'log', 'restdriver', 'restdriver.log'))
return defaults
def load_config_file(config):
# Load Config
try:
with open(config['config_file'], 'r') as yamlfile:
yamlconfig = yaml.load(yamlfile)
config.update(yamlconfig)
except FileNotFoundError:
log.error("Error: Config file %s not found" % config['config_file'])
sys.exit(1)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
log.error("Error: Invalid config file %s" % config['config_file'])
log.error("Error: Location (%s line %d)" % (config['config_filename'], mark.line+1))
else:
log.error("Error: ", err)
sys.exit(1)
# Log the configuration
log.info('------------------------------------------')
log.info(" Config file : %s" % config['config_file'])
log.info(" Log file : %s" % config['log_file'])
log.info(" Results file : %s" % config['results_file'])
log.info(" Test suite : %s" % config['suite_file'])
log.debug(" Configuration : %s" % config)
log.info('------------------------------------------')
return config
def initialize():
defaults = init_defaults()
parser = cli.get_parser(defaults)
options, args = parser.parse_args()
# Setup log
log_level = getattr(log, options.debug_level.upper())
debug_enabled = (log_level == log.DEBUG)
try:
log.basicConfig(format='%(asctime)s %(levelname)-5s [(%(thread)s) %(module)s:%(lineno)s] %(message)s',
filemode='w', level=log_level, filename=options.log_file)
log.getLogger().addHandler(log.StreamHandler())
log.info('------------------------------------------')
log.info('Starting %s' % __file__)
except (FileNotFoundError, IOError) as err:
print("Failed to open log file '%s', check permissions" % options.log_file, file=sys.stderr)
print(err, file=sys.stderr)
sys.exit(1)
# Normalize file names to absolute paths
config_dir, config_filename, config_file = normalize_filename(options.config_file)
log_dir, log_filename, log_file = normalize_filename(options.log_file)
results_dir, results_filename, results_file = normalize_filename(options.results_file)
suite_dir, suite_filename, suite_file = normalize_filename(options.suite_file)
# Save initial configuration
config = dict(config_file=config_file, config_filepath=config_dir, config_filename=config_filename,
log_file=log_file, log_filepath=log_dir, log_filename=log_filename,
results_file=results_file, results_filepath=results_dir, results_filename=results_filename,
suite_file=suite_file, suite_filepath=suite_dir, suite_filename=suite_filename,
log_level=log_level, debug_enabled=debug_enabled)
# Add configuration from config file
config = load_config_file(config)
return config
def normalize_filename(file_path):
file_dir, file_name = os.path.split(file_path)
file_dir = os.path.abspath(file_dir)
absolute_filename = os.path.join(file_dir, file_name)
return file_dir, file_name, absolute_filename
def load_suite_templates(suite, config):
message_templates = {}
if 'message_t' in suite:
message_templates = suite['message_t']
return message_templates
def load_suite_testcases(suite, config):
testcases = {}
if 'testcase' in suite:
testcases = suite['testcase']
return testcases
def load_suite(suite_file, config):
try:
with open(suite_file, 'r') as yamlfile:
suite = yaml.load(yamlfile)
message_templates = load_suite_templates(suite, config)
log.debug("Message Templates:\n%s" % json.dumps(message_templates, indent=2))
testcases = load_suite_testcases(suite, config)
log.debug("Test Cases:\n%s" % json.dumps(testcases, indent=2))
except FileNotFoundError:
log.error("Error: Config file %s not found" % config['config_file'])
sys.exit(1)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
log.error("Error: Invalid config file %s" % config['config_file'])
log.error("Error: Location (%s line %d)" % (config['config_filename'], mark.line+1))
else:
log.error("Error: ", err)
sys.exit(1)
return message_templates, testcases
def run_command(command):
for cmd_name, cmd_data in command.items():
print('%s' % type(cmd_data))
print('%s' % cmd_data)
if cmd_name == 'send':
method = cmd_data['method']
url = cmd_data['baseurl'] + '/' + cmd_data['resource']
if method.upper() == "GET":
response = requests.get(url)
headers = ""
for key, val in response.headers.items():
headers += "%s: %s\n" % (key, val)
# if response.headers['Content-Type'] == 'application/json':
content_string = json.dumps(response.json(), indent=2)
print("RESPONSE %s %s\n%s\n%s" % (response.status_code, response.reason, headers, content_string))
break
def run_testcase(testcase, message_templates):
for tc_name, tc_data in testcase.items():
print("1>>> %s" % tc_name)
print("1>>> %s" % tc_data)
for cmd in tc_data:
run_command(cmd)
def run_suite(config):
message_templates, testcases = load_suite(config['suite_file'], config)
for testcase in testcases:
print("2>>> %s" % testcase)
run_testcase(testcase, message_templates)
def main():
config = initialize()
run_suite(config)
if __name__ == '__main__':
main()
| apache-2.0 | -7,151,217,311,790,520,000 | 34.403315 | 114 | 0.59535 | false | 3.689119 | true | false | false |
maxamillion/dnf | dnf/query.py | 5 | 1587 | # query.py
# Implements Query.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import hawkey
from hawkey import Query
from dnf.i18n import ucd
from dnf.pycomp import basestring
def _by_provides(sack, patterns, ignore_case=False, get_query=False):
if isinstance(patterns, basestring):
patterns = [patterns]
q = sack.query()
flags = []
if ignore_case:
flags.append(hawkey.ICASE)
q.filterm(*flags, provides__glob=patterns)
if get_query:
return q
return q.run()
def _per_nevra_dict(pkg_list):
return {ucd(pkg):pkg for pkg in pkg_list}
| gpl-2.0 | -8,465,437,484,022,581,000 | 33.5 | 77 | 0.73472 | false | 3.805755 | false | false | false |
cbentes/texta | task_manager/language_model_manager/language_model_manager.py | 1 | 5898 | import json
import logging
import os
import platform
from datetime import datetime
from gensim.models import word2vec
from searcher.models import Search
from task_manager.models import Task
from texta.settings import ERROR_LOGGER, INFO_LOGGER, MODELS_DIR
from utils.datasets import Datasets
from utils.es_manager import ES_Manager
if platform.system() == 'Windows':
from threading import Thread as Process
else:
from multiprocessing import Process
class LanguageModel:
def __init__(self):
self.id = None
self.model = None
self.model_name = None
def train(self, task_id):
self.id = task_id
# Process(target=self._training_worker).start()
self._training_worker() # Apache wsgi multiprocessing problem
# self._training_worker()
return True
def _training_worker(self):
logging.getLogger(INFO_LOGGER).info(json.dumps({'process': 'CREATE MODEL', 'event': 'model_training_started', 'data': {'task_id': self.id}}))
num_passes = 5
# Number of word2vec passes + one pass to vocabulary building
total_passes = num_passes + 1
show_progress = ShowProgress(self.id, multiplier=total_passes)
show_progress.update_view(0)
model = word2vec.Word2Vec()
task_params = json.loads(Task.objects.get(pk=self.id).parameters)
try:
sentences = EsIterator(task_params, callback_progress=show_progress)
model = word2vec.Word2Vec(
sentences,
min_count=int(task_params['min_freq']),
size=int(task_params['num_dimensions']),
workers=int(task_params['num_workers']),
iter=int(num_passes),
max_vocab_size=int(task_params['max_vocab']) if task_params['max_vocab'] else None
)
self.model = model
self.save()
# declare the job done
logging.getLogger(INFO_LOGGER).info(json.dumps({'process': 'CREATE MODEL', 'event': 'model_training_completed', 'data': {'task_id': self.id}}))
r = Task.objects.get(pk=self.id)
r.time_completed = datetime.now()
r.status = 'Completed'
r.result = json.dumps({"model_type": "word2vec", "lexicon_size": len(self.model.wv.vocab)})
r.save()
except Exception as e:
logging.getLogger(ERROR_LOGGER).error(json.dumps({'process': 'CREATE MODEL', 'event': 'model_training_failed', 'data': {'task_id': self.id}}), exc_info=True)
print('--- Error: {0}'.format(e))
# declare the job as failed
r = Task.objects.get(pk=self.id)
r.time_completed = datetime.now()
r.status = 'Failed'
r.save()
print('done')
def delete(self):
pass
def save(self):
try:
model_name = 'model_' + str(self.id)
self.model_name = model_name
output_model_file = os.path.join(MODELS_DIR, model_name)
self.model.save(output_model_file)
return True
except Exception as e:
model_name = 'model_' + str(self.id)
filepath = os.path.join(MODELS_DIR, model_name)
logging.getLogger(ERROR_LOGGER).error('Failed to save model pickle to filesystem.', exc_info=True, extra={'filepath': filepath, 'modelname': model_name})
class ShowProgress(object):
""" Show model training progress
"""
def __init__(self, task_pk, multiplier=None):
self.n_total = None
self.n_count = 0
self.task_pk = task_pk
self.multiplier = multiplier
def set_total(self, total):
self.n_total = total
if self.multiplier:
self.n_total = self.multiplier * total
def update(self, amount):
if amount == 0:
return
self.n_count += amount
percentage = (100.0 * self.n_count) / self.n_total
self.update_view(percentage)
def update_view(self, percentage):
r = Task.objects.get(pk=self.task_pk)
r.status = 'Running [{0:3.0f} %]'.format(percentage)
r.save()
class EsIteratorError(Exception):
""" EsIterator Exception
"""
pass
class EsIterator(object):
""" ElasticSearch Iterator
"""
def __init__(self, parameters, callback_progress=None):
ds = Datasets().activate_dataset_by_id(parameters['dataset'])
query = self._parse_query(parameters)
self.field = json.loads(parameters['field'])['path']
self.es_m = ds.build_manager(ES_Manager)
self.es_m.load_combined_query(query)
self.callback_progress = callback_progress
if self.callback_progress:
total_elements = self.get_total_documents()
callback_progress.set_total(total_elements)
@staticmethod
def _parse_query(parameters):
search = parameters['search']
# select search
if search == 'all_docs':
query = {"main": {"query": {"bool": {"minimum_should_match": 0, "must": [], "must_not": [], "should": []}}}}
else:
query = json.loads(Search.objects.get(pk=int(search)).query)
return query
def __iter__(self):
self.es_m.set_query_parameter('size', 500)
response = self.es_m.scroll()
scroll_id = response['_scroll_id']
l = response['hits']['total']
while l > 0:
response = self.es_m.scroll(scroll_id=scroll_id)
l = len(response['hits']['hits'])
scroll_id = response['_scroll_id']
# Check errors in the database request
if (response['_shards']['total'] > 0 and response['_shards']['successful'] == 0) or response['timed_out']:
msg = 'Elasticsearch failed to retrieve documents: ' \
'*** Shards: {0} *** Timeout: {1} *** Took: {2}'.format(response['_shards'], response['timed_out'], response['took'])
raise EsIteratorError(msg)
for hit in response['hits']['hits']:
try:
# Take into account nested fields encoded as: 'field.sub_field'
decoded_text = hit['_source']
for k in self.field.split('.'):
decoded_text = decoded_text[k]
sentences = decoded_text.split('\n')
for sentence in sentences:
yield [word.strip().lower() for word in sentence.split(' ')]
except KeyError:
# If the field is missing from the document
logging.getLogger(ERROR_LOGGER).error('Key does not exist.', exc_info=True, extra={'hit': hit, 'scroll_response': response})
if self.callback_progress:
self.callback_progress.update(l)
def get_total_documents(self):
return self.es_m.get_total_documents()
| gpl-3.0 | -5,002,534,108,965,422,000 | 29.246154 | 160 | 0.678535 | false | 3.147279 | false | false | false |
PyCQA/pylint | tests/input/similar_lines_b.py | 2 | 1172 | """ The sister file of similar_lines_a, another file designed to have lines of
similarity when compared to its sister file
As with the sister file, we use lorm-ipsum to generate 'random' code. """
# Copyright (c) 2020 Frank Harrison <[email protected]>
class Nulla:
tortor = "ultrices quis porta in"
sagittis = "ut tellus"
def pulvinar(self, blandit, metus):
egestas = [mauris for mauris in zip(blandit, metus)]
neque = (egestas, blandit)
def similar_function_5_lines(self, similar): # line same #1
some_var = 10 # line same #2
someother_var *= 300 # line same #3
fusce = "sit" # line same #4
amet = "tortor" # line same #5
iaculis = "dolor" # line diff
return some_var, someother_var, fusce, amet, iaculis, iaculis # line diff
def tortor(self):
ultrices = 2
quis = ultricies * "porta"
return ultricies, quis
class Commodo:
def similar_function_3_lines(self, tellus): # line same #1
agittis = 10 # line same #2
tellus *= 300 # line same #3
laoreet = "commodo " # line diff
return agittis, tellus, laoreet # line diff
| gpl-2.0 | 5,301,449,111,242,282,000 | 31.555556 | 82 | 0.629693 | false | 2.997442 | false | false | false |
dmdm/PySite | pysite/scripts/pysite_sassc.py | 1 | 1541 | # -*- coding: utf-8 -*-
import transaction
import argparse
from pprint import pprint
import datetime
import sys
import os
import pysite.lib
import pysite.cli
import pysite.authmgr.const
import pysite.vmailmgr.manager as vmailmanager
from pysite.exc import PySiteError
class PySiteSasscCli(pysite.cli.Cli):
def __init__(self):
super().__init__()
def compile(self, site):
site_dir = os.path.join(self._rc.g('sites_dir'), site)
rc = pysite.lib.load_site_config(site_dir, 'rc.yaml')
resp = pysite.lib.compile_sass(site_dir, rc)
resp.print()
def main(argv=sys.argv):
cli = PySiteSasscCli()
# Main parser
parser = argparse.ArgumentParser(description="""PySite-Sassc command-line
interface.""",
epilog="""
Samples:
pysite-sassc -c production.ini www.default.local
""")
parser.add_argument('-c', '--config', required=True,
help="""Path to INI file with configuration,
e.g. 'production.ini'""")
parser.add_argument('-l', '--locale', help="""Set the desired locale.
If omitted and output goes directly to console, we automatically use
the console's locale.""")
parser.add_argument('site',
help="Name of a site, e.g. 'www.default.local'")
# Parse args and run command
args = parser.parse_args()
###pprint(args); sys.exit()
pysite.lib.init_cli_locale(args.locale, print_info=True)
cli.init_app(args)
cli.compile(args.site)
print("Done.", file=sys.stderr)
| agpl-3.0 | 4,563,246,747,042,835,000 | 25.568966 | 77 | 0.641142 | false | 3.534404 | false | false | false |
DMPwerkzeug/DMPwerkzeug | rdmo/options/migrations/0006_refactoring.py | 2 | 4475 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-25 11:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('options', '0005_refactoring'),
]
operations = [
migrations.AlterModelOptions(
name='optionset',
options={'ordering': ('uri',), 'verbose_name': 'OptionSet', 'verbose_name_plural': 'OptionSets'},
),
migrations.AddField(
model_name='option',
name='comment',
field=models.TextField(blank=True, help_text='Additional information about this option.', null=True, verbose_name='Comment'),
),
migrations.AddField(
model_name='option',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this option (auto-generated).', max_length=640, null=True, verbose_name='URI'),
),
migrations.AddField(
model_name='option',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this option.', max_length=256, null=True, verbose_name='URI Prefix'),
),
migrations.AddField(
model_name='optionset',
name='comment',
field=models.TextField(blank=True, help_text='Additional information about this option set.', null=True, verbose_name='Comment'),
),
migrations.AddField(
model_name='optionset',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this option set (auto-generated).', max_length=640, null=True, verbose_name='URI'),
),
migrations.AddField(
model_name='optionset',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this option set.', max_length=256, null=True, verbose_name='URI Prefix'),
),
migrations.AlterField(
model_name='option',
name='additional_input',
field=models.BooleanField(default=False, help_text='Designates whether an additional input is possible for this option.', verbose_name='Additional input'),
),
migrations.AlterField(
model_name='option',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this option. The URI will be generated from this key.', max_length=128, null=True, verbose_name='Key'),
),
migrations.AlterField(
model_name='option',
name='optionset',
field=models.ForeignKey(blank=True, help_text='The option set this option belongs to.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='options', to='options.OptionSet', verbose_name='Option set'),
),
migrations.AlterField(
model_name='option',
name='order',
field=models.IntegerField(default=0, help_text='The position of this option in lists.', verbose_name='Order'),
),
migrations.AlterField(
model_name='option',
name='text_de',
field=models.CharField(help_text='The German text displayed for this option.', max_length=256, verbose_name='Text (de)'),
),
migrations.AlterField(
model_name='option',
name='text_en',
field=models.CharField(help_text='The English text displayed for this option.', max_length=256, verbose_name='Text (en)'),
),
migrations.AlterField(
model_name='optionset',
name='conditions',
field=models.ManyToManyField(blank=True, help_text='The list of conditions evaluated for this option set.', to='conditions.Condition', verbose_name='Conditions'),
),
migrations.AlterField(
model_name='optionset',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this option set. The URI will be generated from this key.', max_length=128, null=True, verbose_name='Key'),
),
migrations.AlterField(
model_name='optionset',
name='order',
field=models.IntegerField(default=0, help_text='The position of this option set in lists.', verbose_name='Order'),
),
]
| apache-2.0 | 9,142,807,684,476,936,000 | 46.105263 | 231 | 0.610503 | false | 4.382958 | false | false | false |
alexykot/bitfund | bitfund/project/views/charts.py | 1 | 9451 | from decimal import Decimal
import os
import re
import cairoplot
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from bitfund.core.settings_split.project import (ARGB_DONUT_CHART_PLEDGES,
ARGB_DONUT_CHART_REDONATIONS,
ARGB_DONUT_CHART_OTHER_SOURCES,
ARGB_DONUT_CHART_BACKGROUND,
TOTAL_DEGREES,
CHART_RADIUS_LIST,
CHART_IMAGE_TYPE,
CHART_PARAMS,
MINIMAL_DEFAULT_PLEDGES_DEGREES,
MINIMAL_DEFAULT_OTHER_SOURCES_DEGREES,
MINIMAL_DEFAULT_REDONATIONS_DEGREES, CHART_INNER_RADIUS, CHART_PLEDGES_RGB, CHART_REDONATIONS_RGB, CHART_OTHER_SOURCES_RGB, CHART_BACKGROUND_RGB, CHART_PLEDGES_STYLE, CHART_PLEDGES_ALPHA)
from bitfund.core.settings_split.server import MEDIA_ROOT
from bitfund.project.decorators import disallow_not_public_unless_maintainer
from bitfund.project.models import Project, ProjectGoal, ProjectNeed
from bitfund.project.template_helpers import _get_chart_relative_filename, hex_to_rgb, is_number, _parse_request_chart_params
@disallow_not_public_unless_maintainer
def chart_image_project(request, project_key):
project = get_object_or_404(Project, key=project_key)
chart_size, pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas = _parse_request_chart_params(request)
chart_colors = [pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas]
if chart_size in CHART_PARAMS['project']:
chart_image_width = CHART_PARAMS['project'][chart_size]['w']
chart_image_height = CHART_PARAMS['project'][chart_size]['h']
else:
chart_image_width = chart_image_height = int(chart_size)
chart_relpathname = _get_chart_relative_filename(project_key, chart_size)
chart_abspathname = MEDIA_ROOT+chart_relpathname
project_monthly_budget = project.getTotalMonthlyBudget()
pledges_needs_total_sum, pledges_goals_total_sum = project.getTotalMonthlyPledges()
redonations_total_sum = project.getTotalMonthlyRedonations()
other_sources_needs_total_sum, other_sources_goals_total_sum = project.getTotalMonthlyOtherSources()
other_sources_total_sum = other_sources_needs_total_sum + other_sources_goals_total_sum
#donut chart radiants
if project_monthly_budget > 0 :
pledges_degrees = min(TOTAL_DEGREES,
round(TOTAL_DEGREES * (pledges_needs_total_sum / project_monthly_budget)))
redonations_degrees = min((TOTAL_DEGREES-pledges_degrees),
round(TOTAL_DEGREES * (redonations_total_sum / project_monthly_budget)))
other_sources_degrees = min((TOTAL_DEGREES-pledges_degrees-redonations_degrees),
round(TOTAL_DEGREES * (other_sources_total_sum / project_monthly_budget)))
else :
pledges_degrees = 0
redonations_degrees = 0
other_sources_degrees = 0
if pledges_needs_total_sum > 0 :
pledges_degrees = TOTAL_DEGREES
elif redonations_total_sum > 0 :
redonations_degrees = TOTAL_DEGREES
elif other_sources_total_sum > 0 :
other_sources_degrees = TOTAL_DEGREES
if pledges_needs_total_sum == 0 and redonations_degrees == 0 and other_sources_degrees == 0 :
pledges_degrees = MINIMAL_DEFAULT_PLEDGES_DEGREES
redonations_degrees = MINIMAL_DEFAULT_REDONATIONS_DEGREES
other_sources_degrees = MINIMAL_DEFAULT_OTHER_SOURCES_DEGREES
chart_data = {'1' : pledges_degrees,
'2' : redonations_degrees,
'3' : other_sources_degrees,
'4' : max(0, (TOTAL_DEGREES-(pledges_degrees+redonations_degrees+other_sources_degrees))),
}
cairoplot.donut_plot(name=chart_abspathname,
data=chart_data,
width=chart_image_width, height=chart_image_height,
background='transparent',
inner_radius=CHART_INNER_RADIUS,
radius_list=CHART_RADIUS_LIST,
colors=chart_colors
)
response = HttpResponse(mimetype='image/'+CHART_IMAGE_TYPE)
response['Content-Length'] = os.path.getsize(chart_abspathname)
response.write(open(chart_abspathname, 'r').read())
return response
def chart_image_need(request, project_key, need_id):
need = get_object_or_404(ProjectNeed, pk=need_id)
chart_size, pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas = _parse_request_chart_params(request)
chart_colors = [pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas]
if chart_size in CHART_PARAMS['need']:
chart_image_width = CHART_PARAMS['need'][chart_size]['w']
chart_image_height = CHART_PARAMS['need'][chart_size]['h']
else:
chart_image_width = chart_image_height = int(chart_size)
chart_relpathname = _get_chart_relative_filename(project_key, chart_size, need_id=need_id)
chart_abspathname = MEDIA_ROOT+chart_relpathname
pledges_degrees = min(TOTAL_DEGREES, Decimal(TOTAL_DEGREES * ((need.getPledgesMonthlyTotal()) / need.amount)).quantize(Decimal('1') ))
redonations_degrees = min((TOTAL_DEGREES-pledges_degrees),
Decimal(TOTAL_DEGREES * ((need.getRedonationsMonthlyTotal()) / need.amount)).quantize(Decimal('1')) )
other_sources_degrees = min((TOTAL_DEGREES-(pledges_degrees+redonations_degrees)),
Decimal(TOTAL_DEGREES * ((need.getOtherSourcesMonthlyTotal()) / need.amount)).quantize(Decimal('1')) )
if pledges_degrees == 0 and redonations_degrees == 0 and other_sources_degrees == 0 :
pledges_degrees = MINIMAL_DEFAULT_PLEDGES_DEGREES
redonations_degrees = MINIMAL_DEFAULT_REDONATIONS_DEGREES
other_sources_degrees = MINIMAL_DEFAULT_OTHER_SOURCES_DEGREES
chart_data = {'1' : pledges_degrees,
'2' : redonations_degrees,
'3' : other_sources_degrees,
'4' : max(0, (TOTAL_DEGREES-(pledges_degrees+other_sources_degrees+redonations_degrees)))
}
cairoplot.donut_plot(name=chart_abspathname,
data=chart_data,
width=chart_image_width, height=chart_image_height,
background='transparent',
inner_radius=CHART_INNER_RADIUS,
colors=chart_colors,
radius_list=CHART_RADIUS_LIST)
response = HttpResponse(mimetype='image/'+CHART_IMAGE_TYPE)
response['Content-Length'] = os.path.getsize(chart_abspathname)
response.write(open(chart_abspathname, 'r').read())
return response
def chart_image_goal(request, project_key, goal_key):
project = get_object_or_404(Project, key=project_key)
goal = get_object_or_404(ProjectGoal, project_id=project.id, key=goal_key)
chart_size, pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas = _parse_request_chart_params(request)
chart_colors = [pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas]
if chart_size in CHART_PARAMS['goal']:
chart_image_width = CHART_PARAMS['goal'][chart_size]['w']
chart_image_height = CHART_PARAMS['goal'][chart_size]['h']
else:
chart_image_width = chart_image_height = int(chart_size)
chart_relpathname = _get_chart_relative_filename(project_key, chart_size, goal_id=goal.id)
chart_abspathname = MEDIA_ROOT+chart_relpathname
if goal.amount > 0:
pledges_degrees = min(TOTAL_DEGREES,
Decimal(TOTAL_DEGREES * ((goal.getTotalPledges()) / goal.amount)).quantize(Decimal('1')) )
other_sources_degrees = min((TOTAL_DEGREES-pledges_degrees),
Decimal(TOTAL_DEGREES * ((goal.getTotalOtherSources()) / goal.amount)).quantize(Decimal('1')) )
else:
pledges_degrees = 0
other_sources_degrees = 0
if pledges_degrees == 0 and other_sources_degrees == 0 :
pledges_degrees = MINIMAL_DEFAULT_PLEDGES_DEGREES
other_sources_degrees = MINIMAL_DEFAULT_OTHER_SOURCES_DEGREES
chart_data = {'1' : pledges_degrees,
'2' : 0, # redonations never apply to goals
'3' : other_sources_degrees,
'4' : max(0, (TOTAL_DEGREES-(pledges_degrees+other_sources_degrees)))
}
cairoplot.donut_plot(name=chart_abspathname,
data=chart_data,
width=chart_image_width, height=chart_image_height,
background='transparent',
inner_radius=CHART_INNER_RADIUS,
colors=chart_colors,
radius_list=CHART_RADIUS_LIST)
response = HttpResponse(mimetype='image/'+CHART_IMAGE_TYPE)
response['Content-Length'] = os.path.getsize(chart_abspathname)
response.write(open(chart_abspathname, 'r').read())
return response
| gpl-3.0 | -2,881,084,951,697,107,000 | 47.968912 | 230 | 0.624061 | false | 3.628023 | false | false | false |
mn1del/rpi_cnc_img | helpy.py | 1 | 2984 | #!/usr/bin/env python
# helphelp.py
# utility helper functions
import re
import shutil
import os
import subprocess
# chginfile() - replaces strFrom with strTo in file strFilePath
# takes simple string input for strFrom
def chginfile(strFilePath, strFrom, strTo):
"replaces strFrom with strTo in file strFilePath"
fin = open(strFilePath, "r") # input file
fout = open("temp.txt", "w") # temporary output file
patt = re.compile(strFrom) # pattern
for line in fin:
newline = patt.sub(strTo, line) # replace strFrom with strTo
print(newline)
fout.write(newline)
fin.close()
fin = None
fout.close()
fout = None
shutil.move("temp.txt", strFilePath) # replace original with temp.txt
patt = None
os.remove("temp.txt")
# chginfile() - replaces regexFrom with strTo in file strFilePath
# for example chginfile_re(fp,"\d.\d","1.2")
# regexFrom argument needs to be passed in quotes
# see http://www.rexegg.com/regex-quickstart.html for regex cheatsheet
def chginfile_re(strFilePath, regexFrom, strTo):
"replaces regexFrom with strTo in file strFilePath"
fin = open(strFilePath, "r") # input file
fout = open("temp.txt", "w") # temporary output file
patt = re.compile(regexFrom) # pattern
for line in fin: # loop through each line in fin
newline = patt.sub(strTo, line) # replace strFrom with strTo
print(newline) # print to console, not necessary
fout.write(newline) # write to temporary file fout
fin.close()
fin = None
fout.close()
fout = None
shutil.move("temp.txt", strFilePath) # replace original with temp.txt
patt = None
os.remove("temp.txt")
# cmdcall() - takes a string and passes it into subprocess.call()
# effectively mimics entering a command diarectly into the shell
# deprecated - found had more control using subprocess.call()... eg when directory changes were needed
# with the cwd argument
def cmdcall(commandString):
"calls command ""commandString"", as if entered in the CLI"
subprocess.call(commandString.split())
def filestuff(origfilepath):
"quick example of how python can be used to replace subprocess.call("sed")"
lines = open(origfilepath, "r").readlines() # returns string object
new = open("tempfile.txt","w") # this is where the processed lines will get dumped
for line in lines:
# do some clever regex stuff here
lines[lines.index(line)] = line # change line to result of clever regex processing
lines.insert(lines.index(line), newvalue) # insert newvalue into new line above line
lines.append(newvalue) # append to end of list
new.write(line) #write processed line to new
new.close()
new = open(tempfile.txt", "r").read() # returns string
orig = open(origfilepath, "w") # original file, ready for overwriting
orig.write(new) # overwrite with contents of newly created tempfile.txt
orig.close() # all done!
| gpl-3.0 | -6,394,695,533,283,493,000 | 39.876712 | 102 | 0.688673 | false | 3.683951 | false | false | false |
sudkannan/xen-hv | dist/install/usr/lib64/python2.6/site-packages/xen/xend/server/BlktapController.py | 13 | 10561 | # Copyright (c) 2005, XenSource Ltd.
import string, re, os
from xen.xend.server.blkif import BlkifController
from xen.xend.XendLogging import log
from xen.util.xpopen import xPopen3
phantomDev = 0;
phantomId = 0;
blktap1_disk_types = [
'aio',
'sync',
'vmdk',
'ram',
'qcow',
'qcow2',
'ioemu',
]
blktap2_disk_types = [
'aio',
'ram',
'qcow',
'vhd',
'remus',
]
blktap_disk_types = blktap1_disk_types + blktap2_disk_types
def doexec(args, inputtext=None):
"""Execute a subprocess, then return its return code, stdout and stderr"""
proc = xPopen3(args, True)
if inputtext != None:
proc.tochild.write(inputtext)
stdout = proc.fromchild
stderr = proc.childerr
rc = proc.wait()
return (rc,stdout,stderr)
# blktap1 device controller
class BlktapController(BlkifController):
def __init__(self, vm):
BlkifController.__init__(self, vm)
def frontendRoot(self):
"""@see DevController#frontendRoot"""
return "%s/device/vbd" % self.vm.getDomainPath()
def getDeviceDetails(self, config):
(devid, back, front) = BlkifController.getDeviceDetails(self, config)
phantomDevid = 0
wrapped = False
try:
imagetype = self.vm.info['image']['type']
except:
imagetype = ""
if imagetype == 'hvm':
tdevname = back['dev']
index = ['c', 'd', 'e', 'f', 'g', 'h', 'i', \
'j', 'l', 'm', 'n', 'o', 'p']
while True:
global phantomDev
global phantomId
import os, stat
phantomId = phantomId + 1
if phantomId == 16:
if index[phantomDev] == index[-1]:
if wrapped:
raise VmError(" No loopback block \
devices are available. ")
wrapped = True
phantomDev = 0
else:
phantomDev = phantomDev + 1
phantomId = 1
devname = 'xvd%s%d' % (index[phantomDev], phantomId)
try:
info = os.stat('/dev/%s' % devname)
except:
break
vbd = { 'mode': 'w', 'device': devname }
fn = 'tap:%s' % back['params']
# recurse ... by creating the vbd, then fallthrough
# and finish creating the original device
from xen.xend import XendDomain
dom0 = XendDomain.instance().privilegedDomain()
phantomDevid = dom0.create_phantom_vbd_with_vdi(vbd, fn)
# we need to wait for this device at a higher level
# the vbd that gets created will have a link to us
# and will let them do it there
# add a hook to point to the phantom device,
# root path is always the same (dom0 tap)
if phantomDevid != 0:
front['phantom_vbd'] = '/local/domain/0/backend/tap/0/%s' \
% str(phantomDevid)
return (devid, back, front)
class Blktap2Controller(BlktapController):
def __init__(self, vm):
BlktapController.__init__(self, vm)
def backendPath(self, backdom, devid):
if self.deviceClass == 'tap2':
deviceClass = 'vbd'
else:
deviceClass = 'tap'
return "%s/backend/%s/%s/%d" % (backdom.getDomainPath(),
deviceClass,
self.vm.getDomid(), devid)
def getDeviceDetails(self, config):
(devid, back, front) = BlktapController.getDeviceDetails(self, config)
if self.deviceClass == 'tap2':
# since blktap2 uses blkback as a backend the 'params' feild contains
# the path to the blktap2 device (/dev/xen/blktap-2/tapdev*). As well,
# we need to store the params used to create the blktap2 device
# (tap:tapdisk:<driver>:/<image-path>)
tapdisk_uname = config.get('tapdisk_uname', '')
(_, tapdisk_params) = string.split(tapdisk_uname, ':', 1)
back['tapdisk-params'] = tapdisk_params
return (devid, back, front)
def getDeviceConfiguration(self, devid, transaction = None):
# this is a blktap2 device, so we need to overwrite the 'params' feild
# with the actual blktap2 parameters. (the vbd parameters are of little
# use to us)
config = BlktapController.getDeviceConfiguration(self, devid, transaction)
if transaction is None:
tapdisk_params = self.readBackend(devid, 'tapdisk-params')
else:
tapdisk_params = self.readBackendTxn(transaction, devid, 'tapdisk-params')
if tapdisk_params:
config['uname'] = 'tap:' + tapdisk_params
return config
def createDevice(self, config):
uname = config.get('uname', '')
try:
(typ, subtyp, params, file) = string.split(uname, ':', 3)
if subtyp not in ('tapdisk', 'ioemu'):
raise ValueError('invalid subtype')
except:
(typ, params, file) = string.split(uname, ':', 2)
subtyp = 'tapdisk'
if typ in ('tap'):
if subtyp in ('tapdisk', 'ioemu'):
if params not in blktap2_disk_types or \
TapdiskController.check():
# pass this device off to BlktapController
log.warn('WARNING: using deprecated blktap module')
self.deviceClass = 'tap'
devid = BlktapController.createDevice(self, config)
self.deviceClass = 'tap2'
return devid
device = TapdiskController.create(params, file)
# modify the configutration to create a blkback for the underlying
# blktap2 device. Note: we need to preserve the original tapdisk uname
# (it is used during save/restore and for managed domains).
config.update({'tapdisk_uname' : uname})
config.update({'uname' : 'phy:' + device.rstrip()})
devid = BlkifController.createDevice(self, config)
config.update({'uname' : uname})
config.pop('tapdisk_uname')
return devid
# This function is called from a thread when the
# domain is detached from the disk.
def finishDeviceCleanup(self, backpath, path):
"""Perform any device specific cleanup
@backpath backend xenstore path.
@path frontend device path
"""
#Figure out what we're going to wait on.
self.waitForBackend_destroy(backpath)
TapdiskController.destroy(path)
class TapdiskException(Exception):
pass
class TapdiskController(object):
'''class which encapsulates all tapdisk control operations'''
TAP_CTL = 'tap-ctl'
TAP_DEV = '/dev/xen/blktap-2/tapdev'
class Tapdisk(object):
def __init__(self, pid=None, minor=-1, state=None,
dtype='', image=None, device=None):
self.pid = pid
self.minor = minor
self.state = state
self.dtype = dtype
self.image = image
self.device = device
def __str__(self):
return 'image=%s pid=%s minor=%s state=%s type=%s device=%s' \
% (self.image, self.pid, self.minor, self.state, self.dtype,
self.device)
@staticmethod
def exc(*args):
rc, stdout, stderr = doexec([TapdiskController.TAP_CTL] + list(args))
out, err = stdout.read().strip(), stderr.read().strip()
stdout.close()
stderr.close()
if rc:
raise TapdiskException('%s failed (%s %s %s)' % \
(args, rc, out, err))
return out
@staticmethod
def check():
try:
TapdiskController.exc('check')
return 0
except Exception, e:
log.warn("tapdisk2 check failed: %s" % e)
return -1
@staticmethod
def list():
tapdisks = []
_list = TapdiskController.exc('list')
if not _list: return []
for line in _list.split('\n'):
tapdisk = TapdiskController.Tapdisk()
for pair in line.split():
key, value = pair.split('=')
if key == 'pid':
tapdisk.pid = value
elif key == 'minor':
tapdisk.minor = int(value)
if tapdisk.minor >= 0:
tapdisk.device = '%s%s' % \
(TapdiskController.TAP_DEV, tapdisk.minor)
elif key == 'state':
tapdisk.state = value
elif key == 'args' and value.find(':') != -1:
tapdisk.dtype, tapdisk.image = value.split(':')
tapdisks.append(tapdisk)
return tapdisks
@staticmethod
def fromDevice(device):
if device.startswith(TapdiskController.TAP_DEV):
minor = os.minor(os.stat(device).st_rdev)
tapdisks = filter(lambda x: x.minor == minor,
TapdiskController.list())
if len(tapdisks) == 1:
return tapdisks[0]
return None
@staticmethod
def create(dtype, image):
return TapdiskController.exc('create', '-a%s:%s' % (dtype, image))
@staticmethod
def destroy(device):
tapdisk = TapdiskController.fromDevice(device)
if tapdisk:
if tapdisk.pid:
TapdiskController.exc('destroy',
'-p%s' % tapdisk.pid,
'-m%s' % tapdisk.minor)
else:
TapdiskController.exc('free', '-m%s' % tapdisk.minor)
@staticmethod
def pause(device):
tapdisk = TapdiskController.fromDevice(device)
if tapdisk and tapdisk.pid:
TapdiskController.exc('pause',
'-p%s' % tapdisk.pid,
'-m%s' % tapdisk.minor)
@staticmethod
def unpause(device):
tapdisk = TapdiskController.fromDevice(device)
if tapdisk and tapdisk.pid:
TapdiskController.exc('unpause',
'-p%s' % tapdisk.pid,
'-m%s' % tapdisk.minor)
| gpl-2.0 | 952,386,985,070,820,100 | 32.849359 | 86 | 0.528832 | false | 4.003412 | true | false | false |
flavour/eden | modules/templates/BRCMS/idcards.py | 9 | 11359 | # -*- coding: utf-8 -*-
import os
from reportlab.lib.colors import HexColor
from reportlab.platypus import Paragraph
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_CENTER
from gluon import current
from s3.codecs.card import S3PDFCardLayout
from s3 import s3_format_fullname, s3_str
# Fonts we use in this layout
NORMAL = "Helvetica"
BOLD = "Helvetica-Bold"
# =============================================================================
class IDCardLayout(S3PDFCardLayout):
"""
Layout for printable beneficiary ID cards
"""
# -------------------------------------------------------------------------
@classmethod
def fields(cls, resource):
"""
The layout-specific list of fields to look up from the resource
@param resource: the resource
@returns: list of field selectors
"""
return ["id",
"pe_id",
"pe_label",
"first_name",
"middle_name",
"last_name",
"case.organisation_id$root_organisation",
]
# -------------------------------------------------------------------------
@classmethod
def lookup(cls, resource, items):
"""
Look up layout-specific common data for all cards
@param resource: the resource
@param items: the items
@returns: a dict with common data
"""
db = current.db
s3db = current.s3db
defaultpath = os.path.join(current.request.folder, 'uploads')
# Get all root organisations
root_orgs = set(item["_row"]["org_organisation.root_organisation"]
for item in items)
# Get localized root organisation names
ctable = s3db.br_case
represent = ctable.organisation_id.represent
if represent.bulk:
root_org_names = represent.bulk(list(root_orgs), show_link=False)
else:
root_org_names = None
# Get all PE IDs
pe_ids = set(item["_row"]["pr_person.pe_id"] for item in items)
# Look up all profile pictures
itable = s3db.pr_image
query = (itable.pe_id.belongs(pe_ids)) & \
(itable.profile == True) & \
(itable.deleted == False)
rows = db(query).select(itable.pe_id, itable.image)
field = itable.image
path = field.uploadfolder if field.uploadfolder else defaultpath
pictures = {row.pe_id: os.path.join(path, row.image) for row in rows if row.image}
return {"pictures": pictures,
"root_org_names": root_org_names,
}
# -------------------------------------------------------------------------
def draw(self):
"""
Draw the card (one side)
Instance attributes (NB draw-function should not modify them):
- self.canv...............the canvas (provides the drawing methods)
- self.resource...........the resource
- self.item...............the data item (dict)
- self.labels.............the field labels (dict)
- self.backside...........this instance should render the backside
of a card
- self.multiple...........there are multiple cards per page
- self.width..............the width of the card (in points)
- self.height.............the height of the card (in points)
NB Canvas coordinates are relative to the lower left corner of the
card's frame, drawing must not overshoot self.width/self.height
"""
T = current.T
c = self.canv
w = self.width
#h = self.height
common = self.common
blue = HexColor(0x27548F)
item = self.item
raw = item["_row"]
root_org = raw["org_organisation.root_organisation"]
# Get the localized root org name
org_names = common.get("root_org_names")
if org_names:
root_org_name = org_names.get(root_org)
#draw_field = self.draw_field
draw_value = self.draw_value
draw_label = self.draw_label
code = raw["pr_person.pe_label"]
if not self.backside:
# Horizontal alignments
LEFT = w / 4 - 5
CENTER = w / 2 - 5
RIGHT = w * 3 / 4 - 5
# Vertical alignments
TOP = 200
#LOWER = [76, 58, 40]
BOTTOM = 16
# Organisation name
if root_org_name:
draw_value(LEFT, TOP, root_org_name,
width = 55,
height = 55,
size = 10,
valign = "middle",
)
# Get the profile picture
pictures = common.get("pictures")
if pictures:
picture = pictures.get(raw["pr_person.pe_id"])
if picture:
self.draw_image(picture, RIGHT, TOP,
width = 60,
height = 55,
valign = "middle",
halign = "center",
)
# Center fields in reverse order so that vertical positions
# can be adjusted for very long and hence wrapping strings
y = 98
# ID
ah = draw_value(CENTER, y, code, height=24, size=8)
draw_label(CENTER, y, None, T("ID Number"))
# Name
y += ah + 12
name = s3_format_fullname(fname = raw["pr_person.first_name"],
mname = raw["pr_person.middle_name"],
lname = raw["pr_person.last_name"],
truncate = False,
)
draw_value(CENTER, y, name, height=24, size=10)
draw_label(CENTER, y, None, T("Name"))
# Barcode
if code:
self.draw_barcode(s3_str(code), CENTER, BOTTOM,
height = 12,
halign = "center",
maxwidth = w - 15,
)
# Graphics
c.setFillColor(blue)
c.rect(0, 0, w, 12, fill=1, stroke=0)
c.rect(w - 12, 0, 12, 154, fill=1, stroke=0)
# Add a utting line with multiple cards per page
if self.multiple:
c.setDash(1, 2)
self.draw_outline()
else:
# Horizontal alignments
CENTER = w / 2
# Vertical alignments
TOP = 200
MIDDLE = 85
BOTTOM = 16
# QR Code
if code:
identity = "%s//%s:%s:%s" % (code,
raw["pr_person.first_name"] or "",
raw["pr_person.middle_name"] or "",
raw["pr_person.last_name"] or "",
)
self.draw_qrcode(identity, CENTER, MIDDLE,
size=60, halign="center", valign="center")
# Barcode
if code:
self.draw_barcode(s3_str(code), CENTER, BOTTOM,
height = 12,
halign = "center",
maxwidth = w - 15
)
# Graphics
c.setFillColor(blue)
c.rect(0, 0, w, 10, fill=1, stroke=0)
# -------------------------------------------------------------------------
def draw_field(self, x, y, colname, size=7, bold=True):
"""
Helper function to draw a centered field value of self.item above
position (x, y)
@param x: drawing position
@param y: drawing position
@param colname: the column name of the field to look up the value
@param size: the font size (points)
@param bold: use bold font
"""
c = self.canv
font = BOLD if bold else NORMAL
value = self.item.get(colname)
if value:
c.setFont(font, size)
c.drawCentredString(x, y, s3_str(value))
# -------------------------------------------------------------------------
def draw_value(self, x, y, value, width=120, height=40, size=7, bold=True, valign=None):
"""
Helper function to draw a centered text above position (x, y);
allows the text to wrap if it would otherwise exceed the given
width
@param x: drawing position
@param y: drawing position
@param value: the text to render
@param width: the maximum available width (points)
@param height: the maximum available height (points)
@param size: the font size (points)
@param bold: use bold font
@param valign: vertical alignment ("top"|"middle"|"bottom"),
default "bottom"
@returns: the actual height of the text element drawn
"""
# Preserve line breaks by replacing them with <br/> tags
value = s3_str(value).strip("\n").replace('\n','<br />\n')
stylesheet = getSampleStyleSheet()
style = stylesheet["Normal"]
style.fontName = BOLD if bold else NORMAL
style.fontSize = size
style.leading = size + 2
style.splitLongWords = False
style.alignment = TA_CENTER
para = Paragraph(value, style)
aw, ah = para.wrap(width, height)
while((ah > height or aw > width) and style.fontSize > 4):
# Reduce font size to make fit
style.fontSize -= 1
style.leading = style.fontSize + 2
para = Paragraph(value, style)
aw, ah = para.wrap(width, height)
if valign == "top":
vshift = ah
elif valign == "middle":
vshift = ah / 2.0
else:
vshift = 0
para.drawOn(self.canv, x - para.width / 2, y - vshift)
return ah
# -------------------------------------------------------------------------
def draw_label(self, x, y, colname, default=""):
"""
Helper function to draw a centered label below position (x, y)
@param x: drawing position
@param y: drawing position
@param colname: the column name of the field to look up the label
@param default: the default label (if label cannot be looked up),
pass colname=None to enforce using the default
"""
if colname:
label = self.labels.get(colname, default)
else:
label = default
c = self.canv
c.setFont(NORMAL, 5)
c.drawCentredString(x, y - 6, s3_str(label))
# END =========================================================================
| mit | 5,628,638,435,310,537,000 | 33.525836 | 92 | 0.46122 | false | 4.634435 | false | false | false |
apocalyptech/pyinvedit | pyinveditlib/minecraft.py | 1 | 8671 | #!/usr/bin/env python
# vim: set expandtab tabstop=4 shiftwidth=4:
#
# Copyright (c) 2012, Christopher J. Kucera
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the PyInvEdit team nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL VINCENT VOLLERS OR CJ KUCERA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pymclevel import nbt
# This file primarily contains classes which represent the actual
# on-disk data for a given savefile, without our abstractions on
# top of it.
class EnchantmentSlot(object):
"""
Holds information about a particular enchantment inside a particular inventory slot
"""
def __init__(self, nbtobj=None, num=None, lvl=None):
"""
Initializes a new object. Either pass in 'nbtobj' or
both 'num' and 'lvl'
"""
if nbtobj is None:
self.num = num
self.lvl = lvl
self.extratags = {}
else:
self.num = nbtobj['id'].value
self.lvl = nbtobj['lvl'].value
self.extratags = {}
for tagname in nbtobj:
if tagname not in ['id', 'lvl']:
self.extratags[tagname] = nbtobj[tagname]
def copy(self):
"""
Returns a fresh object with our data
"""
newench = EnchantmentSlot(num=self.num, lvl=self.lvl)
newench.extratags = self.extratags
return newench
def export_nbt(self):
"""
Exports ourself as an NBT object
"""
nbtobj = nbt.TAG_Compound()
nbtobj['id'] = nbt.TAG_Short(self.num)
nbtobj['lvl'] = nbt.TAG_Short(self.lvl)
for tagname, tagval in self.extratags.iteritems():
nbtobj[tagname] = tagval
return nbtobj
def has_extra_info(self):
"""
Returns whether or not we have any extra information
"""
return (len(self.extratags) > 0)
class InventorySlot(object):
"""
Holds information about a particular inventory slot. We make an effort to
never lose any data that we don't explicitly understand, and so you'll see
two extra dicts in here with the names extratags and extratagtags. The
first holds extra tag information stored right at the "Slot" level of
the NBT structure. Before we enabled explicit support for enchantments,
this is the variable which held and saved enchantment information.
Since adding in Enchantments explicitly, extratagtags is used to store
extra tag information found alongside enchantments. The enchantments
themselves are found in an "ench" tag which itself lives inside a tag
helpfully labeled "tag," hence the odd naming of "extratagtags." Alas!
"""
def __init__(self, nbtobj=None, other=None, num=None, damage=None, count=None, slot=None):
"""
Initializes a new object. There are a few different valid ways of doing so:
1) Pass in only nbtobj, as loaded from level.dat. Everything will be populated
from that one object. Used on initial loads.
2) Pass in other and slot, which is another InventorySlot object from which to
copy all of our data.
3) Only pass in "slot" - this will create an empty object.
4) Pass in num, damage, count, and slot.
"""
if nbtobj is None:
if other is None:
self.slot = slot
self.num = num
self.damage = damage
self.count = count
self.extratags = {}
self.extratagtags = {}
self.enchantments = []
else:
self.slot = other.slot
self.num = other.num
self.damage = other.damage
self.count = other.count
self.extratags = other.extratags
self.extratagtags = other.extratagtags
self.enchantments = []
for ench in other.enchantments:
self.enchantments.append(ench.copy())
else:
self.num = nbtobj['id'].value
self.damage = nbtobj['Damage'].value
self.count = nbtobj['Count'].value
self.slot = nbtobj['Slot'].value
self.enchantments = []
self.extratagtags = {}
if 'tag' in nbtobj:
if 'ench' in nbtobj['tag']:
for enchtag in nbtobj['tag']['ench']:
self.enchantments.append(EnchantmentSlot(nbtobj=enchtag))
for tagname in nbtobj['tag']:
if tagname not in ['ench']:
extratagtags[tagname] = nbtobj['tag'][tagname]
self.extratags = {}
for tagname in nbtobj:
if tagname not in ['id', 'Damage', 'Count', 'Slot', 'tag']:
self.extratags[tagname] = nbtobj[tagname]
# Check to see if we're supposed to override the "slot" value
if slot is not None:
self.slot = slot
# Doublecheck that we have some vars
if self.extratags is None:
self.extratags = {}
if self.extratagtags is None:
self.extratagtags = {}
if self.enchantments is None:
self.enchantments = []
def __cmp__(self, other):
"""
Comparator object for sorting
"""
return cmp(self.num, other.num)
def export_nbt(self):
"""
Exports ourself as an NBT object
"""
item_nbt = nbt.TAG_Compound()
item_nbt['Count'] = nbt.TAG_Byte(self.count)
item_nbt['Slot'] = nbt.TAG_Byte(self.slot)
item_nbt['id'] = nbt.TAG_Short(self.num)
item_nbt['Damage'] = nbt.TAG_Short(self.damage)
for tagname, tagval in self.extratags.iteritems():
item_nbt[tagname] = tagval
if len(self.enchantments) > 0 or len(self.extratagtags) > 0:
tag_nbt = nbt.TAG_Compound()
if len(self.enchantments) > 0:
ench_tag = nbt.TAG_List()
for ench in self.enchantments:
ench_tag.append(ench.export_nbt())
tag_nbt['ench'] = ench_tag
for tagname, tagval in self.extratagtags.iteritems():
tag_nbt[tagname] = tagval
item_nbt['tag'] = tag_nbt
return item_nbt
def has_extra_info(self):
"""
Returns whether or not we have any extra info in our tags
"""
if len(self.extratags) > 0:
return True
if len(self.extratagtags) > 0:
return True
for ench in self.enchantments:
if ench.has_extra_info():
return True
return False
class Inventory(object):
"""
Holds Information about our inventory as a whole
"""
def __init__(self, data):
"""
Loads in memory fro the given NBT Object
"""
self.inventory = {}
for item in data:
self._import_item(item)
def _import_item(self, item):
"""
Imports an item from the given NBT Object
"""
slot = item['Slot'].value
self.inventory[slot] = InventorySlot(nbtobj=item)
def get_items(self):
"""
Gets a list of all items in this inventory set
"""
return self.inventory.values()
| isc | -6,467,950,144,855,455,000 | 37.198238 | 94 | 0.597855 | false | 4.001384 | false | false | false |
ryosuzuki/crowdsource-platform | crowdsourcing/serializers/worker.py | 2 | 8045 | __author__ = 'elsabakiu, dmorina, neilthemathguy, megha, asmita'
from crowdsourcing import models
from rest_framework import serializers
from crowdsourcing.serializers.template import TemplateItemSerializer
from crowdsourcing.serializers.dynamic import DynamicFieldsModelSerializer
from rest_framework.exceptions import ValidationError
from django.db import transaction
class SkillSerializer(serializers.ModelSerializer):
class Meta:
model = models.Skill
fields = ('name', 'description', 'verified', 'deleted', 'created_timestamp', 'last_updated', 'id')
read_only_fields = ('created_timestamp', 'last_updated')
def create(self, validated_data):
skill = models.Skill.objects.create(deleted=False, **validated_data)
return skill
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.description = validated_data.get('description', instance.description)
# TODO(megha.agarwal): Define method to verify the skill added
instance.verified = True
instance.save()
return instance
def delete(self, instance):
instance.deleted = True
instance.save()
return instance
class WorkerSerializer(DynamicFieldsModelSerializer):
'''
Good Lord, this needs cleanup :D
'''
num_tasks = serializers.SerializerMethodField()
task_status_det = serializers.SerializerMethodField()
task_category_det = serializers.SerializerMethodField()
task_price_time = serializers.SerializerMethodField()
total_balance = serializers.SerializerMethodField()
class Meta:
model = models.Worker
fields = ('profile', 'skills', 'num_tasks', 'task_status_det', 'task_category_det', 'task_price_time', 'id','total_balance')
read_only_fields = ('num_tasks', 'task_status_det', 'task_category_det', 'task_price_time','total_balance')
def create(self, validated_data):
worker = models.Worker.objects.create(**validated_data)
return worker
def delete(self, instance):
instance.deleted = True
instance.save()
return instance
# Returns number of tasks the worker has/had worked on
def get_num_tasks(self, instance):
# response_data = models.Worker.objects.filter(taskworker__worker = instance).count()
response_data = models.TaskWorker.objects.filter(worker=instance).count()
return response_data
# Returns tasks grouped by task status that the worker has undertaken
# Also returns the number of tasks within each task status
def get_task_status_det(self, instance):
task_status = dict()
number_task_per_status = dict()
task_set = models.TaskWorker.objects.filter(worker=instance)
# e.g. task_status = {'Accepted': ['Task1', 'Task2', 'Task3']}
for task_worker in task_set:
key = task_worker.task.module.status
value = task_worker.task.module.description
task_status.setdefault(key, [])
task_status[key].append(value)
# e.g. number_task_per_status = ['Accepted' : 3]
for key_status in task_status:
number_task_per_status[key_status] = len(task_status[key_status])
return task_status, number_task_per_status
# Returns the task grouped by Category that the worker has undertaken
# Also returns the number of tasks within each category
def get_task_category_det(self, instance):
task_categories = dict()
number_task_per_category = dict()
task_set = models.TaskWorker.objects.filter(worker=instance)
# e.g. task_categories = {'Image': ['Task1', 'Task2', 'Task3']}
for task_worker in task_set:
key = task_worker.task.module.categories.name
value = task_worker.task.module.description
task_categories.setdefault(key, [])
task_categories[key].append(value)
# e.g. number_task_per_category = ['Image' : 3]
for key_category in task_categories:
number_task_per_category[key_category] = len(task_categories[key_category])
return task_categories, number_task_per_category
# Returns the number of hours spent by a worker on the task and corresponding price
def get_task_price_time(self, instance):
task_det = []
task_set = models.TaskWorker.objects.filter(worker=instance)
# e.g. task_det = [{description: 'Task1', price: '50$', time_spent_in_hrs: '2', deadline: '2015-06-11'}]
for task_worker in task_set:
task_info = dict()
deadline = task_worker.task.module.project.end_date
# TODO(megha.agarwal): Refine duration spent on a task
date1 = task_worker.task.created_timestamp
date2 = task_worker.task.last_updated
time_spent = (((date2 - date1).total_seconds()) / 3600)
task_info['description'] = task_worker.task.module.description
task_info['deadline'] = deadline
task_info['price'] = task_worker.task.price
task_info['time_spent_in_hrs'] = time_spent
task_det.append(task_info)
return task_det
def get_total_balance(self,instance):
acceptedresults = models.TaskWorkerResult.objects.all().filter(status = 2,task_worker__worker = instance)
balance = 0
for eachresult in acceptedresults:
balance = balance + eachresult.task_worker.task.price
return balance
class WorkerSkillSerializer(serializers.ModelSerializer):
class Meta:
model = models.WorkerSkill
fields = ('worker', 'skill', 'level', 'verified', 'created_timestamp', 'last_updated')
read_only_fields = ('worker', 'created_timestamp', 'last_updated', 'verified')
def create(self, **kwargs):
worker_skill = models.WorkerSkill.objects.get_or_create(worker=kwargs['worker'], **self.validated_data)
return worker_skill
class TaskWorkerResultSerializer (serializers.ModelSerializer):
#task_worker = TaskWorkerSerializer()
template_item = TemplateItemSerializer()
class Meta:
model = models.TaskWorkerResult
fields = ('id', 'template_item', 'result', 'status', 'created_timestamp', 'last_updated')
read_only_fields = ('template_item', 'created_timestamp', 'last_updated')
class TaskWorkerSerializer (serializers.ModelSerializer):
module = serializers.ModelField(model_field=models.Task()._meta.get_field('module'), write_only=True)
task_worker_results = TaskWorkerResultSerializer(many=True, read_only=True)
worker_alias = serializers.SerializerMethodField()
class Meta:
model = models.TaskWorker
fields = ('task', 'worker', 'created_timestamp', 'last_updated', 'module', 'task_worker_results', 'worker_alias')
read_only_fields = ('task', 'worker', 'created_timestamp', 'last_updated')
def create(self, **kwargs):
module = self.validated_data.pop('module')
module_instance = models.Module.objects.get(id=module)
repetition = module_instance.repetition
with transaction.atomic():
tasks = models.Task.objects.select_for_update(nowait=False).filter(module=module).exclude(status__gt=2).exclude(task_workers__worker=kwargs['worker']).first()
if tasks:
task_worker = models.TaskWorker.objects.create(worker=kwargs['worker'], task=tasks)
tasks.status = 2
tasks.save()
return task_worker
else:
raise ValidationError('No tasks left for this module')
def get_worker_alias(self, obj):
return obj.worker.profile.worker_alias
class WorkerModuleApplicationSerializer(serializers.ModelSerializer):
class Meta:
model = models.WorkerModuleApplication
fields = ('worker', 'module', 'status', 'created_timestamp', 'last_updated')
read_only_fields = ('worker', 'module', 'created_timestamp', 'last_updated')
| mit | 3,688,710,145,697,652,000 | 42.486486 | 170 | 0.661653 | false | 4.028543 | false | false | false |
inonit/wagtail | wagtail/wagtailadmin/tests/test_userbar.py | 3 | 5225 | from django.test import TestCase
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from django.template import Template, Context
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Page, PAGE_TEMPLATE_VAR
from wagtail.tests.testapp.models import BusinessIndex, BusinessChild
class TestUserbarTag(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_superuser(
username='test',
email='[email protected]',
password='password'
)
self.homepage = Page.objects.get(id=2)
def dummy_request(self, user=None):
request = RequestFactory().get('/')
request.user = user or AnonymousUser()
return request
def test_userbar_tag(self):
template = Template("{% load wagtailuserbar %}{% wagtailuserbar %}")
content = template.render(Context({
PAGE_TEMPLATE_VAR: self.homepage,
'request': self.dummy_request(self.user),
}))
self.assertIn("<!-- Wagtail user bar embed code -->", content)
def test_userbar_tag_self(self):
"""
Ensure the userbar renders with `self` instead of `PAGE_TEMPLATE_VAR`
"""
template = Template("{% load wagtailuserbar %}{% wagtailuserbar %}")
content = template.render(Context({
'self': self.homepage,
'request': self.dummy_request(self.user),
}))
self.assertIn("<!-- Wagtail user bar embed code -->", content)
def test_userbar_tag_anonymous_user(self):
template = Template("{% load wagtailuserbar %}{% wagtailuserbar %}")
content = template.render(Context({
PAGE_TEMPLATE_VAR: self.homepage,
'request': self.dummy_request(),
}))
# Make sure nothing was rendered
self.assertEqual(content, '')
class TestUserbarFrontend(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.homepage = Page.objects.get(id=2)
def test_userbar_frontend(self):
response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.homepage.id, )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/userbar/base.html')
def test_userbar_frontend_anonymous_user_cannot_see(self):
# Logout
self.client.logout()
response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.homepage.id, )))
# Check that the user recieved a forbidden message
self.assertEqual(response.status_code, 403)
class TestUserbarAddLink(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.login()
self.homepage = Page.objects.get(url_path='/home/')
self.event_index = Page.objects.get(url_path='/home/events/')
self.business_index = BusinessIndex(title='Business', slug='business', live=True)
self.homepage.add_child(instance=self.business_index)
self.business_child = BusinessChild(title='Business Child', slug='child', live=True)
self.business_index.add_child(instance=self.business_child)
def test_page_allowing_subpages(self):
response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.event_index.id, )))
# page allows subpages, so the 'add page' button should show
expected_url = reverse('wagtailadmin_pages:add_subpage', args=(self.event_index.id, ))
expected_link = '<a href="%s" target="_parent" class="action icon icon-plus" title="Add a child page">Add</a>' \
% expected_url
self.assertContains(response, expected_link)
def test_page_disallowing_subpages(self):
response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.business_child.id, )))
# page disallows subpages, so the 'add page' button shouldn't show
expected_url = reverse('wagtailadmin_pages:add_subpage', args=(self.business_index.id, ))
expected_link = '<a href="%s" target="_parent" class="action icon icon-plus" title="Add a child page">Add</a>' \
% expected_url
self.assertNotContains(response, expected_link)
class TestUserbarModeration(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.homepage = Page.objects.get(id=2)
self.homepage.save_revision()
self.revision = self.homepage.get_latest_revision()
def test_userbar_moderation(self):
response = self.client.get(reverse('wagtailadmin_userbar_moderation', args=(self.revision.id, )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/userbar/base.html')
def test_userbar_moderation_anonymous_user_cannot_see(self):
# Logout
self.client.logout()
response = self.client.get(reverse('wagtailadmin_userbar_moderation', args=(self.revision.id, )))
# Check that the user recieved a forbidden message
self.assertEqual(response.status_code, 403)
| bsd-3-clause | 7,109,178,911,511,505,000 | 38.285714 | 120 | 0.664115 | false | 3.994648 | true | false | false |
mnickey/emails-with-attachments | log_reader.py | 1 | 4019 | # coding=utf-8
__author__ = 'Michael Nickey'
"""
REQUEST:
On a Host that is connected to a LAN, you have a log-file that contains a list of users who have
logged onto some of the machines on the network,
in the past 24 hrs. Write a script that searches for computers on the network that are currently online,
and then sends a text-file to appropriate users on the online computers.
At the end of the run, the script should mark in the log file, computers to which the file has been transmitted.
In the log file, it should also add computers that have been discovered in the current traversal,
which were not listed originally.
Please specify any assumptions you make and explain how you’d test your code.
Assumptions:
The log file is in csv form.
The log file contains
a date/time stamp
username
user-email address
computer-id
online status (online vs offline)
script will be in the same directory as the logfile or the logfile will be copied to the same directory
"""
# Imports
import csv
import datetime
import logging
# SET GLOBAL VARIABLES
# This is the time delta. To change this time, change the number to the days that you want to search the log for.
DAY = datetime.timedelta(days=4)
# Format of the dates that are being compared
FORMAT = "%Y-%m-%d"
# Create a date variable for the current date
TODAY = datetime.date.today()
# Set the log output file, and the log level
logging.basicConfig(filename="output.txt", level=logging.DEBUG)
#----------------------------------------------------------------------
def csv_dict_reader(file_obj):
"""
Read a CSV file using csv.DictReader
:param file_obj: This is the logfile that is to be read. The log file needs to be in CSV format
:return: info from the CSV log file of those users that have a time delta greater than what is set in DAY.
In this example the time delta is set to 4 days.
"""
reader = csv.DictReader(file_obj, delimiter=',')
for line in reader:
line["Date"] = datetime.date(*(int(x) for x in line["Date"].split("-")))
if (TODAY - (line["Date"] )) < DAY:
# Log the computers that have been accessed within the last day
logging.info("{} -- User: {} at {} accessed {}".format(TODAY, line["Username"], line["User Email"], line["Computer ID"]))
print line["Username"], line["User Email"]
send_the_Mail(line["User Email"])
def send_the_Mail(recipient):
"""
This function takes in recipient and will send the email to that email address with an attachment.
:param recipient: the email of the person to get the text file attachment
"""
# Import the needed email libraries
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from smtplib import SMTP
# Set the server and the message details
send_from = '[email protected]'
send_to = recipient
subject = "Computer Access text file"
# Create the multipart
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = send_from
msg['To'] = send_to
# msg preable for those that do not have a email reader
msg.preamble = 'Multipart message.\n'
# Text part of the message
part = MIMEText("This is an automated message. No need to reply... it won't be answered anyway :) ")
msg.attach(part)
# The attachment part of the message
part = MIMEApplication(open("output.txt", "rb").read())
part.add_header('Content-Disposition', 'attachment', filename="output.txt")
msg.attach(part)
# Create an instance of a SMTP server
smtp = SMTP(host='smtp.gmail.com', port='587')
# Start the server
smtp.ehlo()
smtp.starttls()
smtp.login('ENTER YOUR EMAIL LOGIN USERNAME', 'ENTER YOUR PASSWORD HERE')
# Send the email
smtp.sendmail(msg['From'], msg['To'], msg.as_string() )
smtp.quit()
if __name__ == "__main__":
with open("ComputerLog.csv",) as f_obj:
csv_dict_reader(f_obj)
| mit | -7,436,426,573,632,817,000 | 35.853211 | 133 | 0.680607 | false | 3.911392 | false | false | false |
conda/kapsel | conda_kapsel/commands/test/test_activate.py | 1 | 7491 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
try:
from shlex import quote
except ImportError:
from pipes import quote
import platform
from conda_kapsel.commands.main import _parse_args_and_run_subcommand
from conda_kapsel.commands.activate import activate, main
from conda_kapsel.commands.prepare_with_mode import UI_MODE_TEXT_ASSUME_YES_DEVELOPMENT
from conda_kapsel.internal.test.tmpfile_utils import with_directory_contents_completing_project_file
from conda_kapsel.project_file import DEFAULT_PROJECT_FILENAME
from conda_kapsel.local_state_file import DEFAULT_LOCAL_STATE_FILENAME
from conda_kapsel.test.project_utils import project_dir_disable_dedicated_env
class Args(object):
def __init__(self, **kwargs):
self.directory = "."
self.env_spec = None
self.mode = UI_MODE_TEXT_ASSUME_YES_DEVELOPMENT
self.command = None
for key in kwargs:
setattr(self, key, kwargs[key])
def _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch):
can_connect_args = dict()
def mock_can_connect_to_socket(host, port, timeout_seconds=0.5):
can_connect_args['host'] = host
can_connect_args['port'] = port
can_connect_args['timeout_seconds'] = timeout_seconds
return True
monkeypatch.setattr("conda_kapsel.plugins.network_util.can_connect_to_socket", mock_can_connect_to_socket)
return can_connect_args
def test_activate(monkeypatch):
can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch)
def activate_redis_url(dirname):
project_dir_disable_dedicated_env(dirname)
result = activate(dirname, UI_MODE_TEXT_ASSUME_YES_DEVELOPMENT, conda_environment=None, command_name=None)
assert can_connect_args['port'] == 6379
assert result is not None
if platform.system() == 'Windows':
result = [line for line in result if not line.startswith("export PATH")]
print("activate changed PATH on Windows and ideally it would not.")
if len(result) > 2:
import os
print("os.environ=" + repr(os.environ))
print("result=" + repr(result))
assert ['export PROJECT_DIR=' + quote(dirname), 'export REDIS_URL=redis://localhost:6379'] == result
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, activate_redis_url)
def test_activate_quoting(monkeypatch):
def activate_foo(dirname):
project_dir_disable_dedicated_env(dirname)
result = activate(dirname, UI_MODE_TEXT_ASSUME_YES_DEVELOPMENT, conda_environment=None, command_name=None)
assert result is not None
if platform.system() == 'Windows':
result = [line for line in result if not line.startswith("export PATH")]
print("activate changed PATH on Windows and ideally it would not.")
assert ["export FOO='$! boo'", 'export PROJECT_DIR=' + quote(dirname)] == result
with_directory_contents_completing_project_file(
{
DEFAULT_PROJECT_FILENAME: """
variables:
FOO: {}
""",
DEFAULT_LOCAL_STATE_FILENAME: """
variables:
FOO: $! boo
"""
}, activate_foo)
def test_main(monkeypatch, capsys):
def mock_conda_create(prefix, pkgs, channels):
raise RuntimeError("this test should not create an environment in %s with pkgs %r" % (prefix, pkgs))
monkeypatch.setattr('conda_kapsel.internal.conda_api.create', mock_conda_create)
can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch)
def main_redis_url(dirname):
project_dir_disable_dedicated_env(dirname)
main(Args(directory=dirname))
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, main_redis_url)
assert can_connect_args['port'] == 6379
out, err = capsys.readouterr()
assert "export REDIS_URL=redis://localhost:6379\n" in out
assert "" == err
def test_main_dirname_not_provided_use_pwd(monkeypatch, capsys):
can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch)
def main_redis_url(dirname):
from os.path import abspath as real_abspath
def mock_abspath(path):
if path == ".":
return dirname
else:
return real_abspath(path)
monkeypatch.setattr('os.path.abspath', mock_abspath)
project_dir_disable_dedicated_env(dirname)
code = _parse_args_and_run_subcommand(['conda-kapsel', 'activate'])
assert code == 0
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, main_redis_url)
assert can_connect_args['port'] == 6379
out, err = capsys.readouterr()
assert "export PROJECT_DIR" in out
assert "export REDIS_URL=redis://localhost:6379\n" in out
assert "" == err
def test_main_dirname_provided_use_it(monkeypatch, capsys):
can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch)
def main_redis_url(dirname):
project_dir_disable_dedicated_env(dirname)
code = _parse_args_and_run_subcommand(['conda-kapsel', 'activate', '--directory', dirname])
assert code == 0
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, main_redis_url)
assert can_connect_args['port'] == 6379
out, err = capsys.readouterr()
assert "export PROJECT_DIR" in out
assert "export REDIS_URL=redis://localhost:6379\n" in out
assert "" == err
def test_main_bad_command_provided(capsys):
def check(dirname):
project_dir_disable_dedicated_env(dirname)
code = _parse_args_and_run_subcommand(['conda-kapsel', 'activate', '--directory', dirname, '--command', 'nope'])
assert code == 1
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, check)
out, err = capsys.readouterr()
assert err.startswith("Command name 'nope' is not in")
def _monkeypatch_can_connect_to_socket_to_fail_to_find_redis(monkeypatch):
def mock_can_connect_to_socket(host, port, timeout_seconds=0.5):
if port == 6379:
return False # default Redis not there
else:
return True # can't start a custom Redis here
monkeypatch.setattr("conda_kapsel.plugins.network_util.can_connect_to_socket", mock_can_connect_to_socket)
def test_main_fails_to_redis(monkeypatch, capsys):
_monkeypatch_can_connect_to_socket_to_fail_to_find_redis(monkeypatch)
def main_redis_url(dirname):
project_dir_disable_dedicated_env(dirname)
code = main(Args(directory=dirname))
assert 1 == code
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
services:
REDIS_URL: redis
"""}, main_redis_url)
out, err = capsys.readouterr()
assert "missing requirement" in err
assert "All ports from 6380 to 6449 were in use" in err
| bsd-3-clause | -3,643,867,649,094,397,000 | 33.516129 | 120 | 0.657944 | false | 3.64832 | true | false | false |
yuanagain/seniorthesis | src/4by4viz2.py | 2 | 1518 | # Plot of the Lorenz Attractor based on Edward Lorenz's 1963 "Deterministic
# Nonperiodic Flow" publication.
# http://journals.ametsoc.org/doi/abs/10.1175/1520-0469%281963%29020%3C0130%3ADNF%3E2.0.CO%3B2
#
# Note: Because this is a simple non-linear ODE, it would be more easily
# done using SciPy's ode solver, but this approach depends only
# upon NumPy.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def lorenz(x, y, z, s=10, r=13, b=2.667):
x_dot = s*(y - x)
y_dot = r*x - y - x*z
z_dot = x*y - b*z
return x_dot, y_dot, z_dot
dt = 0.01
stepCnt = 10000
# Need one more for the initial values
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
ws = np.empty((stepCnt + 1,))
# Setting initial values
xs[0], ys[0], zs[0], ws[0]= (0., 1., 1.05, 0.)
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the X, Y, Z state
x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i])
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
ws[i + 1] = i * dt
#fig = plt.figure()
#ax = fig.gca(projection='3d')
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(xs, ys)
plt.yscale('linear')
plt.title('xy')
plt.grid(True)
plt.gca().set_aspect('equal')
plt.subplot(2, 1, 2)
plt.plot(ws, zs)
plt.yscale('linear')
plt.title('wz')
plt.grid(True)
plt.gca().set_aspect('equal')
plt.show()
print(ws[0:10])
print(ys)
print(ws)
#plt.show()
| mit | -1,375,077,003,969,293,000 | 21.656716 | 94 | 0.620553 | false | 2.324655 | false | false | false |
krosenfeld/scatterbrane | scatterbrane/brane.py | 1 | 19596 | """
.. module:: brane
:platform: Unix
:synopsis: Simulate effect of anisotropic scattering.
.. moduleauthor:: Katherine Rosenfeld <[email protected]>
.. moduleauthor:: Michael Johnson
Default settings are appropriate for Sgr A*
Resources:
Bower et al. (2004, 2006)
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import RectBivariateSpline
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import zoom,rotate
from numpy import (pi,sqrt,log,sin,cos,exp,ceil,arange,
min,abs,ones,radians,dot,transpose,
zeros_like,clip,empty,empty,empty_like,reshape)
from numpy.fft import fft2,fftfreq
from astropy.constants import au,pc
from astropy import units
import logging
from scatterbrane import utilities
__all__ = ["Brane"]
class Brane(object):
"""
Scattering simulation object.
:param model: ``(n, n)``
Numpy array of the source image.
:param dx: scalar
Resolution element of model in uas.
:param nphi: (optional) ``(2, )`` or scalar
Number of pixels in a screen. This may be a tuple specifying the dimensions of a rectangular screen.
:param screen_res: (optional) scalar
Size of a screen pixel in units of :math:`R_0`.
:param wavelength: (optional) scalar
Observing wavelength in meters.
:param dpc: (optional) scalar
Observer-Source distance in parsecs.
:param rpc: (optional) scalar
Observer-Scatterer distance in parsecs.
:param r0: (optional) scalar
Phase coherence length along major axis as preset string or in km.
:param r_outer: (optional) scalar
Outer scale of turbulence in units of :math:`R_0`.
:param r_inner: (optional) scalar
Inner scale of turbulence in units of :math:`R_0`.
:param alpha: (optional) string or scalar
Preset string or float to set power-law index for tubulence scaling (e.g., Kolmogorov has :math:`\\alpha= 5/3`)
:param anisotropy: (optional) scalar
Anisotropy for screen is the EW / NS elongation.
:param pa: (optional) scalar
Orientation of kernel's major axis, East of North, in degrees.
:param live_dangerously: (optional) bool
Skip the parameter checks?
:param think_positive: (optional) bool
Should we enforce that the source image has no negative pixel values?
:returns: An instance of a scattering simulation.
:Example:
.. code-block:: python
s = Brane(m,dx,nphi=2**12,screen_res=5.,wavelength=3.0e-3,dpc=8.4e3,rpc=5.8e3)
where ``s`` is the class instance, ``m`` is the image array, ``nphi`` is the number of screen pixels,
``wavelength`` is the observing wavelength.
.. note:: :math:`R_0` is the phase coherence length and Sgr A* defaults are from Bower et al. (2006).
"""
def __init__(self,model,dx,nphi=2**12,screen_res=2,\
wavelength=1.3e-3,dpc=8400,rpc=5800,r0 = 'sgra',\
r_outer=10000000,r_inner=12,alpha='kolmogorov',\
anisotropy=2.045,pa=78,match_screen_res=False,live_dangerously=False,think_positive=False):
# set initial members
self.logger = logging.getLogger(self.__class__.__name__)
self.live_dangerously = live_dangerously
self.think_positive = think_positive
self.wavelength = wavelength*1e-3 # observing wavelength in km
self.dpc = float(dpc) # earth-source distance in pc
self.rpc = float(rpc) # R: source-scatterer distance in pc
self.d = self.dpc - self.rpc # D: earth-scatterer distance in pc
self.m = self.d/self.rpc # magnification factor (D/R)
if r0 == 'sgra':
# major axis (EW) phase coherence length in km
self.r0 = 3136.67*(1.3e-6/self.wavelength)
else:
try:
self.r0 = float(r0)
except:
raise ValueError('Bad value for r0')
self.anisotropy = anisotropy # anisotropy for screen = (EW / NS elongation)
self.pa = pa # orientation of major axis, E of N (or CCW of +y)
# Fresnel scale in km
self.rf = sqrt(self.dpc*pc.to(units.km).value / (2*pi / self.wavelength) * self.m / (1+self.m)**2)
# compute pixel scale for image
if match_screen_res:
self.ips = 1
self.screen_dx = screen_res * self.r0
else:
self.screen_dx = screen_res * self.r0 # size of a screen pixel in km
self.ips = int(ceil(1e-6*dx*self.d*au.to(units.km).value/self.screen_dx)) # image pixel / screen pixel
# image arrays
self.dx = 1e6 * self.ips * (self.screen_dx / (au.to(units.km).value * self.d)) # image pixel scale in uas
self.nx = int(ceil(model.shape[-1] * dx / self.dx)) # number of image pixels
self.model = model # source model
self.model_dx = dx # source model resolution
self.iss = np.array([],dtype=np.float64) # scattered image
self.isrc = np.array([],dtype=np.float64) # source image at same scale as scattered image
# screen parameters
if type(nphi) == int:
self.nphi = (nphi,nphi) # size of screen array
else:
self.nphi = nphi
self.nphi = np.asarray(self.nphi)
self.r_inner = r_inner # inner turbulence scale in r0
self.r_outer = r_outer # outer turbulence scale in r0
#self.qmax = 1.*screen_res/r_inner # 1 / inner turbulence scale in pix
#self.qmin = 1.*screen_res/r_outer # 1 / outer tubulence scale in pix
if alpha == 'kolmogorov':
self.alpha = 5./3
else:
try:
self.alpha = float(alpha)
except:
raise ValueError('Bad value for alpha')
# use logger to report
self.chatter()
# includes sanity check
self.setModel(self.model,self.model_dx,think_positive=self.think_positive)
def _checkSanity(self):
'''
Check that screen parameters are consistent.
'''
# sanity check: is screen large enough?
assert np.ceil(self.nx*self.ips)+2 < np.min(self.nphi), \
"screen is not large enough: {0} > {1}".\
format(int(np.ceil(self.ips*self.nx)+2),np.min(self.nphi))
# sanity check: is image square?
assert self.model.shape[-1] == self.model.shape[-2], \
'source image must be square'
# sanity check: integer number of screen pixels / image pixel?
assert self.ips % 1 == 0, 'image/screen pixels should be an integer'
# is inner turbulence scale larger than r0?
#assert 1./self.qmax > self.r0/self.screen_dx, 'turbulence inner scale < r0'
assert self.r_inner > 1., 'turbulence inner scale < r0'
# check image smoothness
V = fft2(self.isrc)
freq = fftfreq(self.nx,d=self.dx*radians(1.)/(3600*1e6))
u = dot(transpose([np.ones(self.nx)]),[freq])
v = dot(transpose([freq]),[ones(self.nx)])
try:
if max(abs(V[sqrt(u*u+v*v) > (1.+self.m)*self.r_inner*self.r0/self.wavelength])) / self.isrc.sum() > 0.01:
self.logger.warning('image is not smooth enough: {0:g} > 0.01'.format(max(abs(V[sqrt(u*u+v*v) > (1.+self.m)*self.r_inner*self.r0/self.wavelength])) / self.isrc.sum()))
except ValueError:
self.logger.warning('r_inner is too large to test smoothness')
# is screen pixel smaller than inner turbulence scale?
#assert 1./self.qmax >= 1, 'screen pixel > turbulence inner scale'
assert self.r_inner*self.r0/self.screen_dx >= 1, 'screen pixel > turbulence inner scale'
if (self.rf*self.rf/self.r0/(self.ips*self.screen_dx) < 3):
self.logger.warning('WARNING: image resolution is approaching Refractive scale')
def chatter(self):
'''
Print information about the current scattering simulation where many parameters are cast as integers.
'''
fmt = "{0:32s} :: "
self.logger.info( (fmt + "{1:g}").format('Observing wavelength [mm]',1e6*self.wavelength) )
self.logger.info( (fmt + "{1:d}").format('Phase coherence length [km]',int(self.r0)) )
self.logger.info( (fmt + "{1:d}").format('Fresnel scale [km]',int(self.rf)) )
self.logger.info( (fmt + "{1:d}").format('Refractive scale [km]',int(self.rf**2/self.r0)) )
#self.logger.info( (fmt + "{1:d}").format('Inner turbulence scale [km]',int(self.screen_dx/self.qmax)))
self.logger.info( (fmt + "{1:d}").format('Inner turbulence scale [km]',int(self.r_inner*self.r0)))
self.logger.info( (fmt + "{1:d}").format('Screen resolution [km]',int(self.screen_dx)))
self.logger.info( (fmt + "{1:d} {2:d}").format('Linear filling factor [%,%]',*map(int,100.*self.nx*self.ips/self.nphi)) )
self.logger.info( (fmt + "{1:g}").format('Image resolution [uas]',self.dx))
self.logger.info( (fmt + "{1:d}").format('Image size',int(self.nx)))
def _generateEmptyScreen(self):
'''
Create an empty screen.
'''
if not self.live_dangerously: self._checkSanity()
self.phi = np.zeros(self.nphi)
def setModel(self,model,dx,think_positive=False):
'''
Set new model for the source.
:param model: ``(n, n)``
Numpy image array.
:param dx: scalar
Pixel size in microarcseconds.
:param think_positive: (optional) bool
Should we enforce that the source image has no negative pixel values?
'''
self.nx = int(ceil(model.shape[-1] * dx / self.dx)) # number of image pixels
self.model = model # source model
self.model_dx = dx # source model resolution
# load source image that has size and resolution compatible with the screen.
self.isrc = np.empty(2*(self.nx,))
self.think_positive = think_positive
M = self.model.shape[1] # size of original image array
f_img = RectBivariateSpline(self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model)
xx_,yy_ = np.meshgrid((np.arange(self.nx) - 0.5*(self.nx-1)),\
(np.arange(self.nx) - 0.5*(self.nx-1)),indexing='xy')
m = f_img.ev(yy_.flatten(),xx_.flatten()).reshape(2*(self.nx,))
self.isrc = m * (self.dx/self.model_dx)**2 # rescale for change in pixel size
if self.think_positive:
self.isrc[self.isrc < 0] = 0
if not self.live_dangerously: self._checkSanity()
def generatePhases(self,seed=None,save_phi=None):
'''
Generate screen phases.
:param seed: (optional) scalar
Seed for random number generator
:param save_phi: (optional) string
To save the screen, set this to the filename.
'''
# check setup
if not self.live_dangerously: self._checkSanity()
# seed the generator
if seed != None:
np.random.seed(seed=seed)
# include anisotropy
qx2 = dot(transpose([np.ones(self.nphi[0])]),[np.fft.rfftfreq(self.nphi[1])**2])
qy2 = dot(transpose([np.fft.fftfreq(self.nphi[0])**2*self.anisotropy**2]),[np.ones(self.nphi[1]//2+1)])
rr = qx2+qy2
rr[0,0] = 0.02 # arbitrary normalization
# generating phases with given power spectrum
size = rr.shape
qmax2 = (self.r_inner*self.r0/self.screen_dx)**-2
qmin2 = (self.r_outer*self.r0/self.screen_dx)**-2
phi_t = (1/sqrt(2) * sqrt(exp(-1./qmax2*rr) * (rr + qmin2)**(-0.5*(self.alpha+2.)))) \
* (np.random.normal(size=size) + 1j * np.random.normal(size=size))
# calculate phi
self.phi = np.fft.irfft2(phi_t)
# normalize structure function
nrm = self.screen_dx/(self.r0*sqrt(self._getPhi(1,0)))
self.phi *= nrm
# save screen
if save_phi != None:
np.save(save_phi,self.phi)
def _checkDPhi(self,nLag=5):
'''
Report the phase structure function for various lags.
:param nLag: (optional) int
Number of lags to report starting with 0.
'''
self.logger.info( "\nEstimates of the phase structure function at various lags:")
for i in np.arange(nLag):
self.logger.info( "lag ",i, self._getPhi(i,0), self._getPhi(0,i), self._getPhi(i,i))
def _getPhi(self,lag_x,lag_y):
'''
Empirical estimate for phase structure function
:param lag_x: int
Screen pixels to lag in x direction.
:param lag_y: int
Screen pixels to lag in y direction.
'''
assert (lag_x < self.nphi[0]) and (lag_x < self.nphi[1]), "lag choice larger than screen array"
# Y,X
if (lag_x == 0 and lag_y == 0):
return 0.
if (lag_x == 0):
return 1.*((self.phi[:-1*lag_y,:] - self.phi[lag_y:,:])**2).sum()/(self.nphi[0]*(self.nphi[1]-lag_y))
if (lag_y == 0):
return 1.*((self.phi[:,:-1*lag_x] - self.phi[:,lag_x:])**2).sum()/((self.nphi[0]-lag_x)*self.nphi[1])
else:
return (1.*((self.phi[:-1*lag_y,:-1*lag_x] - self.phi[lag_y:,lag_x:])**2).sum()/((self.nphi[1]-lag_y)*(self.nphi[0]-lag_x)))
def readScreen(self,filename):
'''
Read in screen phases from a file.
:param filename: string
File containing the screen phases.
'''
self.phi = np.fromfile(filename,dtype=np.float64).reshape(self.nphi)
def _calculate_dphi(self,move_pix=0):
'''
Calculate the screen gradient.
:param move_pix: (optional) int
Number of pixels to roll the screen (for time evolution).
:returns: ``(nx, nx)``, ``(nx, nx)``
numpy arrays containing the dx,dy components of the gradient vector.
.. note:: Includes factors of the Fresnel scale and the result is in units of the source image.
'''
ips = self.ips # number of screen pixels per image pixel
# -- when this != 1, some sinusoidal signal
# over time with period of image_resolution
nx = self.nx # number of image pixels
rf = self.rf / self.screen_dx # Fresnel scale in screen pixels
assert move_pix < (self.nphi[1] - self.nx*self.ips), 'screen is not large enough'
dphi_x = (0.5 * rf * rf / ips ) * \
(self.phi[0:ips*nx:ips,2+move_pix:ips*nx+2+move_pix:ips] -
self.phi[0:ips*nx:ips,0+move_pix:ips*nx+move_pix :ips])
dphi_y = (0.5 * rf * rf / ips ) * \
(self.phi[2:ips*nx+2:ips,0+move_pix:ips*nx+move_pix:ips] -
self.phi[0:ips*nx :ips,0+move_pix:ips*nx+move_pix:ips])
self.logger.debug('{0:d},{1:d}'.format(*dphi_x.shape))
return dphi_x,dphi_y
def scatter(self,move_pix=0,scale=1):
'''
Generate the scattered image which is stored in the ``iss`` member.
:param move_pix: (optional) int
Number of pixels to roll the screen (for time evolution).
:param scale: (optional) scalar
Scale factor for gradient. To simulate the scattering effect at another
wavelength this is (lambda_new/lambda_old)**2
'''
M = self.model.shape[-1] # size of original image array
N = self.nx # size of output image array
#if not self.live_dangerously: self._checkSanity()
# calculate phase gradient
dphi_x,dphi_y = self._calculate_dphi(move_pix=move_pix)
if scale != 1:
dphi_x *= scale
dphi_y *= scale
xx_,yy = np.meshgrid((np.arange(N) - 0.5*(N-1)),\
(np.arange(N) - 0.5*(N-1)),indexing='xy')
# check whether we care about PA of scattering kernel
if self.pa != None:
f_model = RectBivariateSpline(self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model)
# apply rotation
theta = -(90 * pi / 180) + np.radians(self.pa) # rotate CW 90 deg, then CCW by PA
xx_ += dphi_x
yy += dphi_y
xx = cos(theta)*xx_ - sin(theta)*yy
yy = sin(theta)*xx_ + cos(theta)*yy
self.iss = f_model.ev(yy.flatten(),xx.flatten()).reshape((self.nx,self.nx))
# rotate back and clip for positive values for I
if self.think_positive:
self.iss = clip(rotate(self.iss,-1*theta/np.pi*180,reshape=False),a_min=0,a_max=1e30) * (self.dx/self.model_dx)**2
else:
self.iss = rotate(self.iss,-1*theta/np.pi*180,reshape=False) * (self.dx/self.model_dx)**2
# otherwise do a faster lookup rather than the expensive interpolation.
else:
yyi = np.rint((yy+dphi_y+self.nx/2)).astype(np.int) % self.nx
xxi = np.rint((xx_+dphi_x+self.nx/2)).astype(np.int) % self.nx
if self.think_positive:
self.iss = clip(self.isrc[yyi,xxi],a_min=0,a_max=1e30)
else:
self.iss = self.isrc[yyi,xxi]
def _load_src(self,stokes=(0,),think_positive=True):
'''
Load the source image from model (changes size and resolution to match the screen).
:param stokes: (optional) tuple
Stokes parameters to consider.
:param think_positive: (optional) bool
Should we enforce that the source image has no negative pixel values?
'''
M = self.model.shape[1] # size of original image array
N = self.nx # size of output image array
if len(self.model.shape) > 2:
self.isrc = np.empty((self.model.shape[-1],N,N))
else:
self.isrc = np.empty((1,N,N))
self.model = np.reshape(self.model,(1,M,M))
for s in stokes:
f_img = RectBivariateSpline(self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model[s,:,:])
xx_,yy_ = np.meshgrid((np.arange(N) - 0.5*(N-1)),\
(np.arange(N) - 0.5*(N-1)),indexing='xy')
m = f_img.ev(yy_.flatten(),xx_.flatten()).reshape((self.nx,self.nx))
res = m * (self.dx/self.model_dx)**2 # rescale for change in pixel size
if s == 0 and think_positive:
res[res < 0] = 0
self.isrc[s,:,:] = res
self.model = np.squeeze(self.model)
self.isrc = np.squeeze(self.isrc)
def saveSettings(self,filename):
'''
Save screen settings to a file.
:param filename: string
settings filename
'''
f = open(filename,"w")
f.write("wavelength \t {0}\n".format(self.wavelength))
f.write("dpc \t {0}\n".format(self.dpc))
f.write("rpc \t {0}\n".format(self.rpc))
f.write("d \t {0}\n".format(self.d))
f.write("m \t {0}\n".format(self.m))
f.write("r0 \t {0}\n".format(self.r0))
f.write("anisotropy \t {0}\n".format(self.anisotropy))
f.write("pa \t {0}\n".format(self.pa))
f.write("nphix \t {0}\n".format(self.nphi[0])) # size of phase screen
f.write("nphiy \t {0}\n".format(self.nphi[1])) # size of phase screen
f.write("screen_dx \t {0}\n".format(self.screen_dx))
f.write("rf \t {0}\n".format(self.rf))
f.write("ips \t {0}\n".format(self.ips))
f.write("dx \t {0}\n".format(self.dx))
f.write("nx \t {0}\n".format(self.nx))
f.write("qmax \t {0}\n".format(self.r_inner)) # inner turbulence scale in r0
f.write("qmin \t {0}\n".format(self.r_outer)) # outer turbulence scale in r0
#f.write("qmax \t {0}\n".format(self.qmax)) # 1./inner turbulence scale in screen pixels
#f.write("qmin \t {0}\n".format(self.qmin)) # 1./inner turbulence scale in screen pixels
f.close()
| mit | 3,471,398,328,925,831,000 | 39.740125 | 177 | 0.597928 | false | 3.167286 | false | false | false |
jopohl/urh | src/urh/awre/Preprocessor.py | 1 | 11419 | import itertools
import math
import os
import time
from collections import defaultdict
import numpy as np
from urh.cythonext import awre_util
from urh.signalprocessing.FieldType import FieldType
class Preprocessor(object):
"""
This class preprocesses the messages in the following ways
1) Identify preamble / length of preamble
2) Identify sync word(s)
3) Align all given messages on the identified preamble information
"""
_DEBUG_ = False
def __init__(self, bitvectors: list, existing_message_types: dict = None):
self.bitvectors = bitvectors # type: list[np.ndarray]
self.existing_message_types = existing_message_types if existing_message_types is not None else dict()
def preprocess(self) -> (np.ndarray, int):
raw_preamble_positions = self.get_raw_preamble_positions()
existing_sync_words = self.__get_existing_sync_words()
if len(existing_sync_words) == 0:
sync_words = self.find_possible_syncs(raw_preamble_positions)
else:
# NOTE: This does not cover the case if protocol has multiple sync words and not all of them were labeled
sync_words = existing_sync_words
preamble_starts = raw_preamble_positions[:, 0]
preamble_lengths = self.get_preamble_lengths_from_sync_words(sync_words, preamble_starts=preamble_starts)
sync_len = len(sync_words[0]) if len(sync_words) > 0 else 0
return preamble_starts, preamble_lengths, sync_len
def get_preamble_lengths_from_sync_words(self, sync_words: list, preamble_starts: np.ndarray):
"""
Get the preamble lengths based on the found sync words for all messages.
If there should be more than one sync word in a message, use the first one.
:param sync_words:
:param preamble_starts:
:return:
"""
# If there should be varying sync word lengths we need to return an array of sync lengths per message
assert all(len(sync_word) == len(sync_words[0]) for sync_word in sync_words)
byte_sync_words = [bytes(map(int, sync_word)) for sync_word in sync_words]
result = np.zeros(len(self.bitvectors), dtype=np.uint32)
for i, bitvector in enumerate(self.bitvectors):
preamble_lengths = []
bits = bitvector.tobytes()
for sync_word in byte_sync_words:
sync_start = bits.find(sync_word)
if sync_start != -1:
if sync_start - preamble_starts[i] >= 2:
preamble_lengths.append(sync_start - preamble_starts[i])
# Consider case where sync word starts with preamble pattern
sync_start = bits.find(sync_word, sync_start + 1, sync_start + 2 * len(sync_word))
if sync_start != -1:
if sync_start - preamble_starts[i] >= 2:
preamble_lengths.append(sync_start - preamble_starts[i])
preamble_lengths.sort()
if len(preamble_lengths) == 0:
result[i] = 0
elif len(preamble_lengths) == 1:
result[i] = preamble_lengths[0]
else:
# consider all indices not more than one byte before first one
preamble_lengths = list(filter(lambda x: x < preamble_lengths[0] + 7, preamble_lengths))
# take the smallest preamble_length, but prefer a greater one if it is divisible by 8 (or 4)
preamble_length = next((pl for pl in preamble_lengths if pl % 8 == 0), None)
if preamble_length is None:
preamble_length = next((pl for pl in preamble_lengths if pl % 4 == 0), None)
if preamble_length is None:
preamble_length = preamble_lengths[0]
result[i] = preamble_length
return result
def find_possible_syncs(self, raw_preamble_positions=None):
difference_matrix = self.get_difference_matrix()
if raw_preamble_positions is None:
raw_preamble_positions = self.get_raw_preamble_positions()
return self.determine_sync_candidates(raw_preamble_positions, difference_matrix, n_gram_length=4)
@staticmethod
def merge_possible_sync_words(possible_sync_words: dict, n_gram_length: int):
"""
Merge possible sync words by looking for common prefixes
:param possible_sync_words: dict of possible sync words and their frequencies
:return:
"""
result = defaultdict(int)
if len(possible_sync_words) < 2:
return possible_sync_words.copy()
for sync1, sync2 in itertools.combinations(possible_sync_words, 2):
common_prefix = os.path.commonprefix([sync1, sync2])
if len(common_prefix) > n_gram_length:
result[common_prefix] += possible_sync_words[sync1] + possible_sync_words[sync2]
else:
result[sync1] += possible_sync_words[sync1]
result[sync2] += possible_sync_words[sync2]
return result
def determine_sync_candidates(self,
raw_preamble_positions: np.ndarray,
difference_matrix: np.ndarray,
n_gram_length=4) -> list:
possible_sync_words = awre_util.find_possible_sync_words(difference_matrix, raw_preamble_positions,
self.bitvectors, n_gram_length)
self.__debug("Possible sync words", possible_sync_words)
if len(possible_sync_words) == 0:
return []
possible_sync_words = self.merge_possible_sync_words(possible_sync_words, n_gram_length)
self.__debug("Merged sync words", possible_sync_words)
scores = self.__score_sync_lengths(possible_sync_words)
sorted_scores = sorted(scores, reverse=True, key=scores.get)
estimated_sync_length = sorted_scores[0]
if estimated_sync_length % 8 != 0:
for other in filter(lambda x: 0 < estimated_sync_length-x < 7, sorted_scores):
if other % 8 == 0:
estimated_sync_length = other
break
# Now we look at all possible sync words with this length
sync_words = {word: frequency for word, frequency in possible_sync_words.items()
if len(word) == estimated_sync_length}
self.__debug("Sync words", sync_words)
additional_syncs = self.__find_additional_sync_words(estimated_sync_length, sync_words, possible_sync_words)
if additional_syncs:
self.__debug("Found additional sync words", additional_syncs)
sync_words.update(additional_syncs)
result = []
for sync_word in sorted(sync_words, key=sync_words.get, reverse=True):
# Convert bytes back to string
result.append("".join(str(c) for c in sync_word))
return result
def __find_additional_sync_words(self, sync_length: int, present_sync_words, possible_sync_words) -> dict:
"""
Look for additional sync words, in case we had varying preamble lengths and multiple sync words
(see test_with_three_syncs_different_preamble_lengths for an example)
:param sync_length:
:type present_sync_words: dict
:type possible_sync_words: dict
:return:
"""
np_syn = [np.fromiter(map(int, sync_word), dtype=np.uint8, count=len(sync_word))
for sync_word in present_sync_words]
messages_without_sync = [i for i, bv in enumerate(self.bitvectors)
if not any(awre_util.find_occurrences(bv, s, return_after_first=True) for s in np_syn)]
result = dict()
if len(messages_without_sync) == 0:
return result
# Is there another sync word that applies to all messages without sync?
additional_candidates = {word: score for word, score in possible_sync_words.items()
if len(word) > sync_length and not any(s in word for s in present_sync_words)}
for sync in sorted(additional_candidates, key=additional_candidates.get, reverse=True):
if len(messages_without_sync) == 0:
break
score = additional_candidates[sync]
s = sync[:sync_length]
np_s = np.fromiter(s, dtype=np.uint8, count=len(s))
matching = [i for i in messages_without_sync
if awre_util.find_occurrences(self.bitvectors[i], np_s, return_after_first=True)]
if matching:
result[s] = score
for m in matching:
messages_without_sync.remove(m)
return result
def get_raw_preamble_positions(self) -> np.ndarray:
"""
Return a 2D numpy array where first column is the start of preamble
second and third columns are lower and upper bound for preamble length by message, respectively
"""
result = np.zeros((len(self.bitvectors), 3), dtype=np.uint32)
for i, bitvector in enumerate(self.bitvectors):
if i in self.existing_message_types:
preamble_label = self.existing_message_types[i].get_first_label_with_type(FieldType.Function.PREAMBLE)
else:
preamble_label = None
if preamble_label is None:
start, lower, upper = awre_util.get_raw_preamble_position(bitvector)
else:
# If this message is already labeled with a preamble we just use it's values
start, lower, upper = preamble_label.start, preamble_label.end, preamble_label.end
result[i, 0] = start
result[i, 1] = lower - start
result[i, 2] = upper - start
return result
def get_difference_matrix(self) -> np.ndarray:
"""
Return a matrix of the first difference index between all messages
:return:
"""
return awre_util.get_difference_matrix(self.bitvectors)
def __score_sync_lengths(self, possible_sync_words: dict):
sync_lengths = defaultdict(int)
for sync_word, score in possible_sync_words.items():
sync_lengths[len(sync_word)] += score
self.__debug("Sync lengths", sync_lengths)
return sync_lengths
def __get_existing_sync_words(self) -> list:
result = []
for i, bitvector in enumerate(self.bitvectors):
if i in self.existing_message_types:
sync_label = self.existing_message_types[i].get_first_label_with_type(FieldType.Function.SYNC)
else:
sync_label = None
if sync_label is not None:
result.append("".join(map(str, bitvector[sync_label.start:sync_label.end])))
return result
def __debug(self, *args):
if self._DEBUG_:
print("[PREPROCESSOR]", *args)
@staticmethod
def get_next_multiple_of_n(number: int, n: int):
return n * int(math.ceil(number / n))
@staticmethod
def lower_multiple_of_n(number: int, n: int):
return n * int(math.floor(number / n))
@staticmethod
def get_next_lower_multiple_of_two(number: int):
return number if number % 2 == 0 else number - 1
| gpl-3.0 | -6,106,701,050,303,925,000 | 41.136531 | 120 | 0.602154 | false | 4.116438 | false | false | false |
ksu-svt/svt-event-web-app | event_manager/models.py | 1 | 1860 | from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.http import HttpRequest
class Member(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE)
netid = models.IntegerField(default=0)
role = models.CharField(max_length=200)
major = models.CharField(max_length=200)
points = models.IntegerField(default=0)
def __str__(self):
return self.user.get_full_name()
class Team(models.Model):
title = models.CharField(max_length=200)
lead = models.ForeignKey(User, related_name="team_lead",null=True, on_delete=models.SET_NULL)
members = models.ManyToManyField(User, related_name='team_members', blank=True)
def __str__(self):
return self.title
class Event(models.Model):
team = models.ForeignKey(Team,on_delete=models.CASCADE)
title = models.CharField(max_length=200)
location = models.CharField(max_length=200)
dateTime = models.DateTimeField()
description = models.CharField(max_length=200)
members = models.ManyToManyField(User, related_name='members', blank=True)
def __str__(self):
return self.title
class TeamAdmin(admin.ModelAdmin):
model=Team
filter_vertical = ('members',)
class EventAdmin(admin.ModelAdmin):
model=Event
filter_vertical = ('members',)
class TeamUserInline(admin.StackedInline):
model = Team.members.through
max_num = 1
class MemberInline(admin.StackedInline):
model=Member
max_num=1
class UserAdmin(AuthUserAdmin):
inlines=(MemberInline, TeamUserInline)
def get_inline_instances(self, request, obj=None):
if not obj:
return list()
return super().get_inline_instances(request, obj)
| mit | -8,321,106,513,214,914,000 | 30.525424 | 97 | 0.715591 | false | 3.757576 | false | false | false |
macarthur-lab/xbrowse | breakpoint_search/models.py | 1 | 2451 |
from django.db import models
from xbrowse_server.base.models import Project, Individual
from xbrowse.core import genomeloc
class BreakpointFile(models.Model):
project = models.ForeignKey(Project, blank=True)
file_path = models.CharField(max_length=500, default="", blank=True)
class Meta:
db_table="base_breakpointfile"
class Breakpoint(models.Model):
project = models.ForeignKey(Project, null=False)
individual = models.ForeignKey(Individual, null=False)
xpos = models.BigIntegerField(db_index=True)
# depth cscore partner genes cdsdist
obs = models.IntegerField(db_index=True)
sample_count = models.IntegerField(db_index=True)
consensus = models.FloatField()
partner = models.TextField(blank=True, null=True)
class Meta:
db_table="base_breakpoint"
def toList(self):
genes = [{ 'gene' : bg.gene_symbol, 'cds_dist': bg.cds_dist } for bg in self.breakpointgene_set.all()]
chr,pos = genomeloc.get_chr_pos(self.xpos)
return [
self.xpos,
chr,
pos,
self.obs,
self.sample_count,
self.consensus,
self.partner,
self.individual.indiv_id,
genes,
]
def toDict(self):
genes = [{ 'gene' : bg.gene_symbol, 'cds_dist': bg.cds_dist } for bg in self.breakpointgene_set.all()]
chr,pos = genomeloc.get_chr_pos(self.xpos)
return {
'xpos' : self.xpos,
'chr' : chr,
'pos' : pos,
'obs' : self.obs,
'sample_count' : self.sample_count,
'consensus' : self.consensus,
'indiv_id' : self.individual.indiv_id,
'genes' : genes,
}
class BreakpointMetaData(models.Model):
breakpoint = models.ForeignKey(Breakpoint, null=False)
type = models.TextField(blank=True, default="")
tags = models.TextField(blank=True, default="")
class Meta:
db_table="base_breakpointmetadata"
def toDict(self):
return {
'breakpoint_id' : self.breakpoint.xpos,
'type' : self.type,
'tags' : self.tags
}
class BreakpointGene(models.Model):
breakpoint = models.ForeignKey(Breakpoint, null=False)
gene_symbol = models.CharField(db_index=True,max_length=20) # HGNC symbol
cds_dist = models.IntegerField()
class Meta:
db_table="base_breakpointgene"
| agpl-3.0 | -226,880,252,465,269,630 | 29.259259 | 110 | 0.609955 | false | 3.641902 | false | false | false |
lamby/python-social-auth | examples/pyramid_example/example/__init__.py | 54 | 1248 | import sys
sys.path.append('../..')
from pyramid.config import Configurator
from pyramid.session import UnencryptedCookieSessionFactoryConfig
from sqlalchemy import engine_from_config
from social.apps.pyramid_app.models import init_social
from .models import DBSession, Base
def main(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
session_factory = UnencryptedCookieSessionFactoryConfig('thisisasecret')
config = Configurator(settings=settings,
session_factory=session_factory,
autocommit=True)
config.include('pyramid_chameleon')
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_request_method('example.auth.get_user', 'user', reify=True)
config.add_route('home', '/')
config.add_route('done', '/done')
config.include('example.settings')
config.include('example.local_settings')
config.include('social.apps.pyramid_app')
init_social(config, Base, DBSession)
config.scan()
config.scan('social.apps.pyramid_app')
return config.make_wsgi_app()
| bsd-3-clause | -2,521,684,883,802,776,600 | 35.705882 | 76 | 0.706731 | false | 3.863777 | true | false | false |
Legilibre/SedLex | sedlex/AddGitLabIssueVisitor.py | 1 | 2028 | # -*- coding: utf-8 -*-
from duralex.AbstractVisitor import AbstractVisitor
from . import template
from duralex.alinea_parser import *
import gitlab
class AddGitLabIssueVisitor(AbstractVisitor):
def __init__(self, args):
self.gitlab = gitlab.Gitlab('https://gitlab.com', args.gitlab_token)
self.repo_name = args.gitlab_repository
self.repo = self.gitlab.projects.get(self.repo_name)
self.issues = self.repo.issues.list(state='opened')
self.current_issue_number = -1
self.current_issue_link = None
super(AddGitLabIssueVisitor, self).__init__()
def visit_edit_node(self, node, post):
if post:
return
node['gitlabIssue'] = self.current_issue_link
node['commitMessage'] = template.template_string('gitlab/commit_message.j2', {'edit': node})
def visit_node(self, node):
if 'type' in node and node['type'] == 'article':
title = template.template_string('gitlab/issue_title.j2', {'article': node})
description = template.template_string('gitlab/issue_description.j2', {'article': node})
found = False
for issue in self.issues:
if issue.title == title:
found = True
self.current_issue_number = issue.iid
if issue.description != description:
issue.save(title=title, description=description)
if not found:
issue = self.gitlab.project_issues.create(
{
'title': title,
'description': description
},
project_id=self.repo.id
)
self.current_issue_number = issue.iid
self.current_issue_link = 'https://gitlab.com/' + self.repo_name + '/issues/' + str(self.current_issue_number)
node['gitlabIssue'] = self.current_issue_link
super(AddGitLabIssueVisitor, self).visit_node(node)
| agpl-3.0 | 8,291,171,328,780,894,000 | 39.56 | 122 | 0.57643 | false | 4.155738 | false | false | false |
tridc/django_local_library | locallibrary/settings.py | 1 | 3964 | """
Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '2!o+4zkrcvvwhj65wph4bb=dkloys+l5br)m8^ih_xp52^1^6i'
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '2!o+4zkrcvvwhj65wph4bb=dkloys+l5br)m8^ih_xp52^1^6i')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = bool(os.environ.get('DJANGO_DEBUG', True))
ALLOWED_HOSTS = ['secret-reef-21077.herokuapp.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| gpl-3.0 | 8,768,230,256,608,004,000 | 27.314286 | 102 | 0.7056 | false | 3.396744 | false | false | false |
ryfeus/lambda-packs | Keras_tensorflow/source/keras/regularizers.py | 9 | 2034 | from __future__ import absolute_import
import six
from . import backend as K
from .utils.generic_utils import serialize_keras_object
from .utils.generic_utils import deserialize_keras_object
class Regularizer(object):
"""Regularizer base class.
"""
def __call__(self, x):
return 0.
@classmethod
def from_config(cls, config):
return cls(**config)
class L1L2(Regularizer):
"""Regularizer for L1 and L2 regularization.
# Arguments
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0., l2=0.):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
def __call__(self, x):
regularization = 0.
if self.l1:
regularization += K.sum(self.l1 * K.abs(x))
if self.l2:
regularization += K.sum(self.l2 * K.square(x))
return regularization
def get_config(self):
return {'l1': float(self.l1),
'l2': float(self.l2)}
# Aliases.
def l1(l=0.01):
return L1L2(l1=l)
def l2(l=0.01):
return L1L2(l2=l)
def l1_l2(l1=0.01, l2=0.01):
return L1L2(l1=l1, l2=l2)
def serialize(regularizer):
return serialize_keras_object(regularizer)
def deserialize(config, custom_objects=None):
return deserialize_keras_object(config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='regularizer')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret regularizer identifier:',
identifier)
| mit | -123,960,795,711,041,810 | 23.506024 | 72 | 0.593412 | false | 3.698182 | true | false | false |
NLHEALTHCARE/PYELT | pyelt/mappings/transformations.py | 1 | 5537 | from pyelt.datalayers.dwh import Dwh
__author__ = 'hvreenen'
# class FieldTransformationType():
# INLINE = 'INLINE' #type: str
# FUNCTION = 'FUNCTION'#type: str
#
# class FunctionLanguage():
# PLSQL = 'PLSQL'#type: str
# PLPYTHON = 'PLPYTHON'#type: str
class FieldTransformation():
def __init__(self, name: str = '', sql: str = '') -> None:
self.name = name #type: str
self.field_name = 'id' #type: str
self.table = ''
self.descr = '' #type: str
self.filter = '' #type: str
# self.type = FieldTransformationType.INLINE #type: str
self.steps = {} #type: Dict[int, FieldTransformStep]
if sql:
# self.parse_sql(sql)
step = self.new_step(sql)
def get_table(self):
return self.table
def parse_sql(self, sql: str):
pos_start = sql.find('(')
pos_end = sql.rfind(')')
func_name = sql[:pos_start]
func_inner = sql[pos_start + 1:pos_end]
step = self.new_step(func_inner)
self.steps[step.sort_order] = step
def new_step(self, sql: str) -> 'FieldTransformStep':
step = FieldTransformStep(sql=sql)
step.sort_order = len(self.steps) + 1
self.steps[step.sort_order] = step
return step
def get_sql(self, alias: str='')->str:
sql = ''
index = 0
steps = sorted(self.steps.values(), key = lambda x: x.sort_order)
for step in steps:
# step_sql = step.sql
step_sql = step.get_sql(alias)
step_sql = step_sql.replace(self.field_name, "{fld}")
if (index > 0):
if '{fld}' in step_sql:
sql = step_sql.replace("{fld}", sql)
else:
sql = step_sql.replace("{step" + str(index) + "}", sql)
else:
sql = step_sql
sql = sql.replace("{fld}", self.field_name)
index += 1
return sql
def __repr__(self):
return self.get_sql('')
# def create_function_at_db(self, dwh: 'Dwh') -> None:
# #todo afmaken
# params = {} #type: Dict[str, str]
# sql = """CREATE OR REPLACE FUNCTION {schema}.{name}({params})
# RETURNS {return_type} AS
# $BODY$
# {body}
# $BODY$
# LANGUAGE {lang} VOLATILE;""".format(**params)
# dwh.execute(sql)
class FieldTransformStep(FieldTransformation):
def __init__(self, sortorder: int = 0, name: str = '', sql: str = '') -> None:
FieldTransformation.__init__(self, name)
self.sql = sql
self.sort_order = sortorder
# self.parse_sql(sql)
def parse_sql(self, sql: str) -> None:
func_name = ''
func_params = []
pos_start = sql.find('(')
pos_end = sql.rfind(')')
func_name = sql[:pos_start]
func_params_sql = sql[pos_start + 1:pos_end]
func_param_names = func_params_sql.split(',')
for func_param_name in func_param_names:
if not func_param_name: continue
func_param = FuncParam(func_param_name.strip())
func_params.append(func_param)
self.func_name = func_name
self.func_params = func_params
def get_sql(self, alias: str='') -> str:
return self.sql
# func_params_sql = ''
# for func_param in self.func_params:
# if func_param.is_db_field and alias:
# func_params_sql += '{}.{}, '.format(alias, func_param)
# else:
# func_params_sql += '{}, '.format(func_param)
# func_params_sql = func_params_sql[:-2]
# sql = "{}({})".format(self.func_name, func_params_sql)
# return sql
# class FuncParam():
# def __init__(self, name: str = '') -> None:
# self.name = name #type: str
# self.is_db_field = "'" not in name #type: bool
# if self.is_digit(name):
# self.is_db_field = False
#
#
# def __str__(self) -> str:
# return self.name
#
# def is_digit(self, s: str) -> bool:
# try:
# f = float(s)
# return True
# except:
# return False
class Lookup(FieldTransformation):
def __init__(self, name, dict={}, sor=None):
super().__init__(name=name)
self.new_step("(select ref_code_doel from {}.{} where ref_code_doel = '{}')".format(sor, name, '{fld}'))
def get_ddl(self):
sql = """
CREATE TABLE {}.{}_ref_mappings
(ref_code_bron text,
ref_code_doel text)
""".format(self.name)
def get_etl(self):
values = ''
for code, descr in self.dict.items():
values += "('{}', '{}'),\r\n".format(code, descr)
values = values[:-3]
params = {}
params['values'] = values
sql = """
CREATE TEMP TABLE {sor}.{name}_ref_mappings_temp
(ref_code_bron text,
ref_code_doel text);
INSERT INTO {sor}.{name}_ref_mappings_temp (ref_code_bron, ref_code_doel)
VALUES {values};
INSERT INTO {sor}.{name}_ref_mappings (ref_code_bron, ref_code_doel)
SELECT ref_code_bron, ref_code_doel
FROM {sor}.{name}_ref_mappings_temp
WHERE NOT EXISTS (SELECT 1 FROM {sor}.{name}_ref_mappings maps WHERE maps.naam = '{ref_type}');
DROP TABLE _ref_values_temp;
""".format(**params)
| gpl-3.0 | -7,576,310,073,809,948,000 | 32.762195 | 112 | 0.513274 | false | 3.511097 | false | false | false |
arruda/cloudfuzzy | fuzzy_modeling/models/defuzzifys.py | 1 | 3626 | # -*- coding: utf-8 -*-
import inspect
from django.db import models
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from fuzzy_modeling.utils import get_class_by_python_path, get_choices_from_python_path_listing
from fuzzy_modeling.models.norms import NormModel
from fuzzy_modeling.models.utils import PyFuzzyMixin
from fuzzy_modeling.models.parameters import ParameterModel
class DefuzzifyModel(models.Model, PyFuzzyMixin):
"""
A Fuzzy defuzzify base model
"""
class Meta:
app_label = 'fuzzy_modeling'
DEFUZZIFY_CHOICES = get_choices_from_python_path_listing(
'fuzzy.defuzzify',
ignores=['Base', ]
)
# (
# ('fuzzy.defuzzify.COG.COG', _("COG")),
# ('fuzzy.defuzzify.Dict.Dict', _("Dict")),
# ('fuzzy.defuzzify.COGS.COGS', _("COGS")),
# ('fuzzy.defuzzify.LM.LM', _("LM")),
# ('fuzzy.defuzzify.MaxLeft.MaxLeft', _("MaxLeft")),
# ('fuzzy.defuzzify.MaxRight.MaxRight', _("MaxRight")),
# ('fuzzy.defuzzify.RM.RM', _("RM")),
# )
defuzzify = models.CharField(
_("Defuzzify"),
choices=DEFUZZIFY_CHOICES,
max_length=250,
blank=False, null=False,
default=DEFUZZIFY_CHOICES[0][0]
)
inf = models.ForeignKey(NormModel, related_name="defuzzify_inf_set", blank=True, null=True)
acc = models.ForeignKey(NormModel, related_name="defuzzify_acc_set", blank=True, null=True)
parameters = generic.GenericRelation(ParameterModel)
def get_pyfuzzy(self):
"""
Return the Pyfuzzy class of this model
"""
DefuzzifyClass = get_class_by_python_path(self.defuzzify)
inf = self.inf.get_pyfuzzy() if self.inf else None
acc = self.acc.get_pyfuzzy() if self.acc else None
# parameters =
parameters_dict = {
'INF': inf,
'ACC': acc
}
for p in self.parameters.all():
if p.name != 'INF' and p.name != 'ACC':
parameters_dict[p.name] = p.get_value()
defuzzify = DefuzzifyClass(**parameters_dict)
return defuzzify
@classmethod
def from_pyfuzzy(cls, pyfuzzy):
"""
Return the model representation of an instance of the pyfuzzy attr
"""
defuzz_model = cls()
defuzzify = 'fuzzy.defuzzify.%s.%s' % (
pyfuzzy.__class__.__name__,
pyfuzzy.__class__.__name__
)
defuzz_model.defuzzify = defuzzify
# INF
inf_model = None
if pyfuzzy.INF:
inf_model = cls.inf.field.related.parent_model.from_pyfuzzy(pyfuzzy.INF)
defuzz_model.inf = inf_model
# ACC
acc_model = None
if pyfuzzy.ACC:
acc_model = cls.acc.field.related.parent_model.from_pyfuzzy(pyfuzzy.ACC)
defuzz_model.acc = acc_model
defuzz_model.save()
# parameters
for arg in inspect.getargspec(pyfuzzy.__init__).args:
if arg != 'self':
arg_value = getattr(pyfuzzy, arg)
if arg_value is not None:
arg_type = ParameterModel.get_type_from_python_type(arg_value)
defuzz_model.parameters.create(
name=arg,
value=arg_value,
value_type=arg_type
)
defuzz_model.save()
return defuzz_model
def __unicode__(self):
return self.get_defuzzify_display()
| mit | -7,038,179,230,675,926,000 | 29.728814 | 95 | 0.566189 | false | 3.633267 | false | false | false |
carletes/vagrant-environment | vagrant/core.py | 1 | 4786 | # Copyright (c) 2015 Carlos Valiente
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Utilities for working with Vagrant environments."""
import os
import logging
import subprocess
__all__ = [
"destroy",
"up",
]
class up(object):
"""Context manager that brings up a Vagrant environment on start.
"""
log = logging.getLogger("vagrant")
def __init__(self, dname=None):
"""Constructor.
Parameters:
dname
Path to the directory containing the Vagrantfile. Defaults to
the current working directory if not given.
"""
self._dname = os.getcwd() if dname is None else dname
self._vagrantfile = os.path.join(self._dname, "Vagrantfile")
if not os.access(self._vagrantfile, os.F_OK):
raise Exception("%s: Not found" % (self._vagrantfile,))
self._hosts = None
def __enter__(self):
for (host, status) in self._status():
if status != "running":
self._vagrant("up", host)
return self
def __exit__(self, *exc_info):
pass
@property
def hosts(self):
"""Tuple of Vagrant nodes in this Vagrant environment.
"""
if self._hosts is None:
self._hosts = []
for line in self._vagrant("status --machine-readable"):
bits = line.split(",")
if bits[2] == "state":
self._hosts.append(bits[1])
self._hosts = tuple(self._hosts)
return self._hosts
def provision(self):
"""Provisions all nodes in this Vagrant environment.
"""
return self._vagrant("provision")
def ssh(self, node, cmd):
"""Executes the given command in the given hosts.
Raises an error if the return code of ``vagrant ssh`` is non-zero.
Returns a list containing the output of ``vagrant ssh`` (both stdout
and stderr).
"""
return self._vagrant('ssh -c "%s"' % (cmd,), node)
def _status(self):
ret = []
for line in self._vagrant("status --machine-readable"):
bits = line.split(",")
if bits[2] == "state":
ret.append((bits[1], bits[3]))
if self._hosts is None:
self._hosts = tuple(h for (h, _) in ret)
return ret
def _vagrant(self, *args):
cmdline = ["vagrant"]
cmdline.extend(args)
cmdline = " ".join(cmdline)
self.log.debug("Executing: %s", cmdline)
p = subprocess.Popen(cmdline,
shell=True,
cwd=self._dname,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
if p.returncode:
raise Exception(stdout)
return stdout.strip().split("\n")
LOG = logging.getLogger("vagrant")
def destroy(dname=None):
"""Destroys the Vagrant environment.
Arguments:
dname
Path to the directory containing the Vagrantfile. Defaults to the
current working directory if not given.
"""
dname = os.getcwd() if dname is None else dname
vagrantfile = os.path.join(dname, "Vagrantfile")
if not os.access(vagrantfile, os.F_OK):
raise Exception("%s: Not found" % (vagrantfile,))
cmdline = "vagrant destroy --force"
LOG.debug("Executing: %s", cmdline)
p = subprocess.Popen(cmdline,
shell=True,
cwd=dname,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
if p.returncode:
raise Exception(stdout)
LOG.debug(stdout)
| mit | 2,155,889,464,656,060,200 | 30.486842 | 79 | 0.59486 | false | 4.187227 | false | false | false |
Karspexet/Karspexet | karspexet/venue/models.py | 1 | 1209 | from django.contrib.postgres.fields import HStoreField
from django.db import models
class Venue(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
address = models.TextField(blank=True)
map_address = models.CharField(blank=True, max_length=255)
seat_map_dimensions = HStoreField(null=False, default=dict)
def __str__(self):
return self.name
class SeatingGroup(models.Model):
venue = models.ForeignKey(Venue, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
def active_pricing_model(self, timestamp=None):
return self.pricingmodel_set.active(timestamp).filter(seating_group_id=self.id).first()
class Seat(models.Model):
group = models.ForeignKey(SeatingGroup, on_delete=models.CASCADE)
name = models.CharField(max_length=40, help_text='Till exempel "Rad 17, Stol 5011"')
x_pos = models.IntegerField()
y_pos = models.IntegerField()
def __str__(self):
return self.name
def price_for_type(self, ticket_type, timestamp=None):
return self.group.active_pricing_model(timestamp).price_for(ticket_type)
| mit | 216,008,215,363,029,280 | 31.675676 | 95 | 0.705542 | false | 3.464183 | false | false | false |
ProkopHapala/SimpleSimulationEngine | cpp/apps/OrbitalWar/py/RocktGun.py | 1 | 1577 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
def linearPowerLimitedRocket_dist( t, F, ve=10000, m0=1.0 ):
A = m0*ve
B = np.log( F*t - A )
return F*ve*( t/F + A*B/F**2 ) - t*ve*B
def getAccFT( F=None, ve=10000, P=1e+9, m0=1.0, fmass=0.1 ):
if F is None:
F = P/ve ;print "F[N]" , F
tend = (1-fmass)*ve*m0/F ;print "tend[s]", tend
return tend, F
def linearPowerLimitedRocket( t, ve=10000, P=1e+9, m0=1.0 ):
#F = P/ve ;print "F[N]" , F
#tend = m0*ve*(fmass - 1)/(F*fmass) ;print "tend[s]", tend
#tend = (1-fmass)*ve*m0/F ;print "tend[s]", tend
tend, F = getAccFT( ve=ve, P=P, m0=m0 )
a = F/( m0 -F*t/ve ) #;primt "a[G]", a/9.81
v0 = ve*np.log( m0*ve )
v = -ve*np.log( np.abs( m0*ve - F*t ) ) + v0
#s = ( ve*t + t*v - v*(m0*ve/F) )
s = ve*t + v * ( t - m0*ve/F )
#s = linearPowerLimitedRocket_dist( t, F, ve=ve, m0=m0 )
return s,v,a
P = 10e+9
ve = 10e+3
fmass = 0.1
m0 = 1.0
tend, F = getAccFT( ve=ve, P=P, m0=m0, fmass=fmass )
ts = np.linspace(0,tend,1000)
s,v,a = linearPowerLimitedRocket( ts, ve=ve, P=P, m0=m0 )
plt.figure( figsize=(5,9) )
plt.subplot(3,1,1); plt.plot( ts, a ); plt.ylabel('a'); plt.xlabel('t[s]'); plt.grid()
plt.axvline( tend, ls="--")
plt.subplot(3,1,2); plt.plot( ts, v ); plt.ylabel('v [m/s]'); plt.xlabel('t[s]') ; plt.grid()
plt.axvline( tend, ls="--")
plt.subplot(3,1,3); plt.plot( ts, s ); plt.ylabel('s [m] '); plt.xlabel('t[s]') ; plt.grid()
plt.show() | mit | -2,502,383,001,833,996,300 | 29.941176 | 93 | 0.521877 | false | 2.119624 | false | false | false |
noba3/KoTos | addons/plugin.video.ardmediathek_de/resources/lib/subtitle.py | 1 | 5962 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import xbmc
import re
import resources.lib.utils as utils
import xbmcaddon
import HTMLParser
import xbmcvfs
addonID = 'plugin.video.ardmediathek_de'
addon = xbmcaddon.Addon(id=addonID)
subFile = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile')+'/sub.srt').decode('utf-8')
baseUrl = "http://www.ardmediathek.de"
coloredSubtitles = addon.getSetting("coloredSubtitles") == "true"
def setSubtitle(uri,offset=0):
if offset != 0:
print offset
print baseUrl+uri
if uri.startswith('/subtitle'):
_newSubtitle(baseUrl+uri)
else:
_oldSubtitle(baseUrl+uri)
def _newSubtitle(url):
#if os.path.exists(subFile):
# os.remove(subFile)
if xbmcvfs.exists(subFile):
xbmcvfs.delete(subFile)
try:
content = utils.getUrl(url)
except:
content = ""
if content:
dict = _stylesSetup(re.compile('<tt:styling>(.+?)</tt:styling>', re.DOTALL).findall(content)[0])
div = re.compile('<tt:div.+?>(.+?)</tt:div>', re.DOTALL).findall(content)[0]
p = re.compile('<tt:p(.+?)</tt:p>', re.DOTALL).findall(div)
i = 1
buffer = ''
for part in p:
if '<tt:span' in part:
part = part.replace('begin="1','begin="0').replace('end="1','end="0').replace('\n','').replace('<tt:br/>','\n')
begin = re.compile('begin="(.+?)"').findall(part)[0]
begin = begin.replace(".",",")[:-1]
end = re.compile('end="(.+?)"').findall(part)[0]
end = end.replace(".",",")[:-1]
s = part.split('>')[0]
part = part.replace(s+'>','')
if 'style=' in s:
style = re.compile('style="(.+?)"').findall(s)[0]
if dict[style]:
part = '<font color="'+dict[style]+'">'+part+'</font>'
match = re.compile('<(.+?)>').findall(part)
for entry in match:
if entry.startswith('tt:span'):
if 'style' in entry:
style = re.compile('style="(.+?)"').findall(entry)[0]
part = part.replace('<'+entry+'>','<font color="'+dict[style]+'">')
else:
part = part.replace('<'+entry+'>','')
elif entry.startswith('tt:/span'):
part = part.replace('</tt:span>','</font>')
else:
part = part.replace('<'+entry+'>','')
buffer += str(i) + '\n'
buffer += begin+" --> "+end+"\n"
buffer += part + '\n\n'
i+=1
f = xbmcvfs.File(subFile, 'w')
f.write(buffer)
f.close()
xbmc.sleep(1000)
xbmc.Player().setSubtitles(subFile)
def _oldSubtitle(url):
if os.path.exists(subFile):
os.remove(subFile)
try:
content = utils.getUrl(url)
except:
content = ""
if content:
dict = _stylesSetup(re.compile('<styling>(.+?)</styling>', re.DOTALL).findall(content)[0])
matchLine = re.compile('<p id=".+?" begin="1(.+?)" end="1(.+?)".+?style="(.+?)">(.+?)</p>', re.DOTALL).findall(content)
#fh = open(subFile, 'a')
f = xbmcvfs.File(subFile, 'w')
count = 1
for begin, end, style, line in matchLine:
begin = "0"+begin.replace(".",",")[:-1]
end = "0"+end.replace(".",",")[:-1]
text = ''
line = line.replace('\n','').strip()
line = line.replace("<br />","\n")
if dict[style]:
line = '<font color="'+dict[style]+'">'+line+'</font>'
s = line.split('<')
for entry in s:
if entry.startswith('span'):
if 'tts:color' in entry.split('>')[0]:
color = re.compile('tts:color="(.+?)"', re.DOTALL).findall(entry.split('>')[0])[0]
line = line.replace('<'+entry.split('>')[0]+'>','<font color="'+color+'">')
line = line.replace('</span>','</font>')
while ' ' in line:
line = line.replace(' ',' ')
line = line.replace(' \n','\n').replace(' </font>\n','</font>\n')
#fh.write(str(count)+"\n"+begin+" --> "+end+"\n"+_cleanTitle(line)+"\n\n")
f.write(str(count)+"\n"+begin+" --> "+end+"\n"+_cleanTitle(line)+"\n\n")
count+=1
f.close()
xbmc.sleep(1000)
xbmc.Player().setSubtitles(subFile)
"""
def _oldSubtitle(url):
if os.path.exists(subFile):
os.remove(subFile)
try:
content = utils.getUrl(url)
except:
content = ""
if content:
dict = _stylesSetup(re.compile('<styling>(.+?)</styling>', re.DOTALL).findall(content)[0])
matchLine = re.compile('<p id=".+?" begin="1(.+?)" end="1(.+?)".+?style="(.+?)">(.+?)</p>', re.DOTALL).findall(content)
fh = open(subFile, 'a')
count = 1
for begin, end, style, line in matchLine:
begin = "0"+begin.replace(".",",")[:-1]
end = "0"+end.replace(".",",")[:-1]
match = re.compile('<span(.+?)>', re.DOTALL).findall(line)
for span in match:
line = line.replace("<span"+span+">","")
line = line.replace("<br />","\n").replace("</span>","").strip()
if dict[style]:
line = '<font color="'+dict[style]+'">'+line+'</font>'
fh.write(str(count)+"\n"+begin+" --> "+end+"\n"+_cleanTitle(line)+"\n\n")
count+=1
fh.close()
xbmc.sleep(1000)
xbmc.Player().setSubtitles(subFile)
"""
def _stylesSetup(styles):
dict = {}
styles = styles.replace('tt:','').replace('xml:','')
match_styles = re.compile('<style(.+?)>', re.DOTALL).findall(styles)
for style in match_styles:
id = re.compile('id="(.+?)"', re.DOTALL).findall(style)[0]
if 'color=' in style and coloredSubtitles:
color = re.compile('color="(.+?)"', re.DOTALL).findall(style)[0]
else:
color = False
dict[id] = color
return dict
def _cleanTitle(title,html=True):
if html:
title = HTMLParser.HTMLParser().unescape(title)
return title.encode("utf-8")
else:
title = title.replace("<", "<").replace(">", ">").replace("&", "&").replace(""", "\"").replace("'", "'").replace(""", "\"").replace("ß", "ß").replace("–", "-")
title = title.replace("Ä", "Ä").replace("Ü", "Ü").replace("Ö", "Ö").replace("ä", "ä").replace("ü", "ü").replace("ö", "ö").replace("é", "é").replace("è", "è")
title = title.replace("Ä","Ä").replace("ä","ä").replace("Ö","Ö").replace("ö","ö").replace("Ü","Ü").replace("ü","ü").replace("ß","ß")
title = title.replace("'","'").strip()
return title | gpl-2.0 | 6,460,534,141,782,706,000 | 33.871345 | 203 | 0.575646 | false | 2.796435 | false | false | false |
buaabarty/dpark | dpark/decorator.py | 2 | 1107 | # -*- coding: utf-8 -*-
class LazyJIT(object):
this = None
def __init__(self, decorator, f, *args, **kwargs):
self.f = f
self.args = args
self.kwargs = kwargs
self.decorator = decorator
def __call__(self, *args, **kwargs):
if self.this is None:
try:
mod = __import__('numba', fromlist=[self.decorator])
d = getattr(mod, self.decorator)
self.this = d(*self.args, **self.kwargs)(self.f)
except ImportError, e:
self.this = self.f
return getattr(self.this, '__call__')(*args, **kwargs)
def jit(signature, **kwargs):
if not isinstance(signature, (str, unicode)):
raise ValueError('First argument should be signature')
def _(f):
return LazyJIT('jit', f, signature, **kwargs)
return _
def autojit(*args, **kwargs):
if len(args) ==1 and not kwargs and callable(args[0]):
f = args[0]
return LazyJIT('autojit', f)
else:
def _(f):
return LazyJIT('autojit', f, *args, **kwargs)
return _
| bsd-3-clause | 4,109,995,176,437,829,000 | 30.628571 | 68 | 0.534779 | false | 3.778157 | false | false | false |
ox-it/humfrey | humfrey/pingback/extraction.py | 1 | 2807 | """
Contains functions with extract triples from external resources.
"""
from __future__ import with_statement
import functools
import urlparse
import lxml
import pytz
import rdflib
from django.conf import settings
from humfrey.utils.namespaces import NS
__all__ = ['extractors']
class NoLinkFoundError(Exception):
pass
class InvalidPingback(Exception):
def __init__(self, reason):
self.reason = reason
def _extract_from_rdf(graph, response, filename, source, target, format):
pass
def _extract_from_html(graph, response, filename, source, target):
try:
html = lxml.etree.parse(response, parser=lxml.etree.HTMLParser())
except:
raise InvalidPingback('invalid-html')
url = response['content-location']
for anchor in html.xpath(".//a"):
href = urlparse.urlparse(urlparse.urljoin(url, anchor.get('href')))
if not href[2]:
href = href[:2] + ('/',) + href[3:]
href = urlparse.urlunparse(href)
if href == target:
break
else:
raise NoLinkFoundError
title = html.xpath('.//head/title')
if title and title[0].text:
graph.add((rdflib.URIRef(url), NS.dcterms['title'], rdflib.Literal(title[0].text)))
extractors = {'application/rdf+xml': functools.partial(_extract_from_rdf, format='xml'),
'text/n3': functools.partial(_extract_from_rdf, format='n3'),
'text/turtle': functools.partial(_extract_from_rdf, format='n3'),
'text/plain': functools.partial(_extract_from_rdf, format='nt'),
'application/xhtml+xml': _extract_from_html,
'text/html': _extract_from_html,
}
def extract(pingback, response):
content_type = response.get('content-type', '').split(';')[0].lower()
graph = rdflib.ConjunctiveGraph()
graph_name = pingback.graph_name
date = lambda x: rdflib.Literal(pytz.timezone(settings.TIME_ZONE).localize(x))
url = response['content-location']
uri = rdflib.URIRef(url)
graph += (
(uri, NS.sioc.links_to, rdflib.URIRef(pingback.target)),
(graph_name, NS.dcterms.created, date(pingback.created)),
(graph_name, NS.dcterms.modified, date(pingback.updated)),
(graph_name, NS.dcterms.source, uri),
(graph_name, NS.void.inDataset, settings.PINGBACK_DATASET),
(graph_name, NS.dcterms['title'], rdflib.Literal(u'Pingback from %s to %s' % (unicode(pingback.source), unicode(pingback.target)))),
)
try:
extractor = extractors[content_type]
except KeyError:
raise InvalidPingback('unexpected-media-type')
try:
extractor(graph, response, pingback.source, pingback.target)
except NoLinkFoundError:
raise InvalidPingback('no-link-found')
return graph
| bsd-3-clause | -1,017,957,257,831,237,400 | 29.846154 | 140 | 0.644817 | false | 3.650195 | false | false | false |
stvstnfrd/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_assetstore.py | 1 | 34490 | """
Tests for assetstore using any of the modulestores for metadata. May extend to testing the storage options
too.
"""
import unittest
from datetime import datetime, timedelta
import pytest
import ddt
import pytz
import six
from django.test import TestCase
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import CourseLocator
from six.moves import range, zip
from openedx.core.lib.tests import attr
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore import IncorrectlySortedList, ModuleStoreEnum, SortedAssetList
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.utils import (
MIXED_MODULESTORE_BOTH_SETUP,
MODULESTORE_SETUPS,
MixedModulestoreBuilder,
XmlModulestoreBuilder
)
class AssetStoreTestData(object):
"""
Shared data for constructing test assets.
"""
now = datetime.now(pytz.utc)
user_id = 144
if six.PY2:
user_id_long = long(user_id) # lint-amnesty, pylint: disable=undefined-variable
else:
user_id_long = int(user_id)
user_email = "[email protected]"
asset_fields = (
AssetMetadata.ASSET_BASENAME_ATTR, 'internal_name', 'pathname', 'locked',
'edited_by', 'edited_by_email', 'edited_on', 'created_by', 'created_by_email', 'created_on',
'curr_version', 'prev_version'
)
all_asset_data = (
('pic1.jpg', 'EKMND332DDBK', 'pix/archive', False,
user_id_long, user_email, now + timedelta(seconds=10 * 1), user_id_long, user_email, now, '14', '13'),
('shout.ogg', 'KFMDONSKF39K', 'sounds', True,
user_id, user_email, now + timedelta(seconds=10 * 2), user_id, user_email, now, '1', None),
('code.tgz', 'ZZB2333YBDMW', 'exercises/14', False,
user_id * 2, user_email, now + timedelta(seconds=10 * 3), user_id * 2, user_email, now, 'AB', 'AA'),
('dog.png', 'PUPY4242X', 'pictures/animals', True,
user_id_long * 3, user_email, now + timedelta(seconds=10 * 4), user_id_long * 3, user_email, now, '5', '4'),
('not_here.txt', 'JJJCCC747', '/dev/null', False,
user_id * 4, user_email, now + timedelta(seconds=10 * 5), user_id * 4, user_email, now, '50', '49'),
('asset.txt', 'JJJCCC747858', '/dev/null', False,
user_id * 4, user_email, now + timedelta(seconds=10 * 6), user_id * 4, user_email, now, '50', '49'),
('roman_history.pdf', 'JASDUNSADK', 'texts/italy', True,
user_id * 7, user_email, now + timedelta(seconds=10 * 7), user_id * 7, user_email, now, '1.1', '1.01'),
('weather_patterns.bmp', '928SJXX2EB', 'science', False,
user_id * 8, user_email, now + timedelta(seconds=10 * 8), user_id * 8, user_email, now, '52', '51'),
('demo.swf', 'DFDFGGGG14', 'demos/easy', False,
user_id * 9, user_email, now + timedelta(seconds=10 * 9), user_id * 9, user_email, now, '5', '4'),
)
class TestSortedAssetList(unittest.TestCase):
"""
Tests the SortedAssetList class.
"""
def setUp(self):
super(TestSortedAssetList, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
asset_list = [dict(list(zip(AssetStoreTestData.asset_fields, asset))) for asset in AssetStoreTestData.all_asset_data] # lint-amnesty, pylint: disable=line-too-long
self.sorted_asset_list_by_filename = SortedAssetList(iterable=asset_list)
self.sorted_asset_list_by_last_edit = SortedAssetList(iterable=asset_list, key=lambda x: x['edited_on'])
self.course_key = CourseLocator('org', 'course', 'run')
def test_exception_on_bad_sort(self):
asset_key = self.course_key.make_asset_key('asset', 'pic1.jpg')
with pytest.raises(IncorrectlySortedList):
__ = self.sorted_asset_list_by_last_edit.find(asset_key)
def test_find(self):
asset_key = self.course_key.make_asset_key('asset', 'asset.txt')
assert self.sorted_asset_list_by_filename.find(asset_key) == 0
asset_key_last = self.course_key.make_asset_key('asset', 'weather_patterns.bmp')
assert self.sorted_asset_list_by_filename.find(asset_key_last) == (len(AssetStoreTestData.all_asset_data) - 1)
@attr('mongo')
@ddt.ddt
class TestMongoAssetMetadataStorage(TestCase):
"""
Tests for storing/querying course asset metadata.
"""
XML_MODULESTORE_MAP = {
'XML_MODULESTORE_BUILDER': XmlModulestoreBuilder(),
'MIXED_MODULESTORE_BUILDER': MixedModulestoreBuilder([('xml', XmlModulestoreBuilder())])
}
def setUp(self):
super(TestMongoAssetMetadataStorage, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.addTypeEqualityFunc(datetime, self._compare_datetimes)
self.addTypeEqualityFunc(AssetMetadata, self._compare_metadata)
self.differents = (('different', 'burn.jpg'),)
self.vrmls = (
('vrml', 'olympus_mons.vrml'),
('vrml', 'ponte_vecchio.vrml'),
)
self.regular_assets = (('asset', 'zippy.png'),)
self.alls = self.differents + self.vrmls + self.regular_assets
def _compare_metadata(self, mdata1, mdata2, msg=None):
"""
So we can use the below date comparison
"""
if type(mdata1) != type(mdata2): # lint-amnesty, pylint: disable=unidiomatic-typecheck
self.fail(self._formatMessage(msg, u"{} is not same type as {}".format(mdata1, mdata2)))
for attr in mdata1.ATTRS_ALLOWED_TO_UPDATE: # lint-amnesty, pylint: disable=redefined-outer-name
assert getattr(mdata1, attr) == getattr(mdata2, attr), msg
def _compare_datetimes(self, datetime1, datetime2, msg=None):
"""
Don't compare microseconds as mongo doesn't encode below milliseconds
"""
if not timedelta(seconds=-1) < datetime1 - datetime2 < timedelta(seconds=1):
self.fail(self._formatMessage(msg, u"{} != {}".format(datetime1, datetime2)))
def _make_asset_metadata(self, asset_loc):
"""
Make a single test asset metadata.
"""
now = datetime.now(pytz.utc)
return AssetMetadata(
asset_loc, internal_name='EKMND332DDBK',
pathname='pictures/historical', contenttype='image/jpeg',
locked=False, fields={'md5': '77631ca4f0e08419b70726a447333ab6'},
edited_by=ModuleStoreEnum.UserID.test, edited_on=now,
created_by=ModuleStoreEnum.UserID.test, created_on=now,
curr_version='v1.0', prev_version='v0.95'
)
def _make_asset_thumbnail_metadata(self, asset_md):
"""
Add thumbnail to the asset_md
"""
asset_md.thumbnail = 'ABC39XJUDN2'
return asset_md
def setup_assets(self, course1_key, course2_key, store=None):
"""
Setup assets. Save in store if given
"""
for i, asset in enumerate(AssetStoreTestData.all_asset_data):
asset_dict = dict(list(zip(AssetStoreTestData.asset_fields[1:], asset[1:])))
if i in (0, 1) and course1_key:
asset_key = course1_key.make_asset_key('asset', asset[0])
asset_md = AssetMetadata(asset_key, **asset_dict)
if store is not None:
store.save_asset_metadata(asset_md, asset[4])
elif course2_key:
asset_key = course2_key.make_asset_key('asset', asset[0])
asset_md = AssetMetadata(asset_key, **asset_dict)
# Don't save assets 5 and 6.
if store is not None and i not in (4, 5):
store.save_asset_metadata(asset_md, asset[4])
@ddt.data(*MODULESTORE_SETUPS)
def test_save_one_and_confirm(self, storebuilder):
"""
Save the metadata in each store and retrieve it singularly, as all assets, and after deleting all.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_filename = 'burnside.jpg'
new_asset_loc = course.id.make_asset_key('asset', asset_filename)
# Save the asset's metadata.
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
# Find the asset's metadata and confirm it's the same.
found_asset_md = store.find_asset_metadata(new_asset_loc)
assert found_asset_md is not None
assert new_asset_md == found_asset_md
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 1
@ddt.data(*MODULESTORE_SETUPS)
def test_delete(self, storebuilder):
"""
Delete non-existent and existent metadata
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
# Attempt to delete an asset that doesn't exist.
assert store.delete_asset_metadata(new_asset_loc, ModuleStoreEnum.UserID.test) == 0
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert store.delete_asset_metadata(new_asset_loc, ModuleStoreEnum.UserID.test) == 1
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
@ddt.data(*MODULESTORE_SETUPS)
def test_find_non_existing_assets(self, storebuilder):
"""
Find a non-existent asset in an existing course.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
# Find existing asset metadata.
asset_md = store.find_asset_metadata(new_asset_loc)
assert asset_md is None
@ddt.data(*MODULESTORE_SETUPS)
def test_get_all_non_existing_assets(self, storebuilder):
"""
Get all assets in an existing course when no assets exist.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
# Find existing asset metadata.
asset_md = store.get_all_asset_metadata(course.id, 'asset')
assert asset_md == []
@ddt.data(*MODULESTORE_SETUPS)
def test_find_assets_in_non_existent_course(self, storebuilder):
"""
Find asset metadata from a non-existent course.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
fake_course_id = CourseKey.from_string("{}nothere/{}nothere/{}nothere".format(
course.id.org, course.id.course, course.id.run
))
new_asset_loc = fake_course_id.make_asset_key('asset', 'burnside.jpg')
# Find asset metadata from non-existent course.
with pytest.raises(ItemNotFoundError):
store.find_asset_metadata(new_asset_loc)
with pytest.raises(ItemNotFoundError):
store.get_all_asset_metadata(fake_course_id, 'asset')
@ddt.data(*MODULESTORE_SETUPS)
def test_add_same_asset_twice(self, storebuilder):
"""
Add an asset's metadata, then add it again.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
# Add asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 1
# Add *the same* asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
# Still one here?
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 1
@ddt.data(*MODULESTORE_SETUPS)
def test_different_asset_types(self, storebuilder):
"""
Test saving assets with other asset types.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('vrml', 'pyramid.vrml')
new_asset_md = self._make_asset_metadata(new_asset_loc)
# Add asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'vrml')) == 1
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
@ddt.data(*MODULESTORE_SETUPS)
def test_asset_types_with_other_field_names(self, storebuilder):
"""
Test saving assets using an asset type of 'course_id'.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('course_id', 'just_to_see_if_it_still_works.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
# Add asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'course_id')) == 1
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
all_assets = store.get_all_asset_metadata(course.id, 'course_id')
assert all_assets[0].asset_id.path == new_asset_loc.path
@ddt.data(*MODULESTORE_SETUPS)
def test_lock_unlock_assets(self, storebuilder):
"""
Save multiple metadata in each store and retrieve it singularly, as all assets, and after deleting all.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
locked_state = new_asset_md.locked
# Flip the course asset's locked status.
store.set_asset_metadata_attr(new_asset_loc, "locked", not locked_state, ModuleStoreEnum.UserID.test)
# Find the same course and check its locked status.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
assert updated_asset_md.locked == (not locked_state)
# Now flip it back.
store.set_asset_metadata_attr(new_asset_loc, "locked", locked_state, ModuleStoreEnum.UserID.test)
reupdated_asset_md = store.find_asset_metadata(new_asset_loc)
assert reupdated_asset_md is not None
assert reupdated_asset_md.locked == locked_state
ALLOWED_ATTRS = (
('pathname', '/new/path'),
('internal_name', 'new_filename.txt'),
('locked', True),
('contenttype', 'image/png'),
('thumbnail', 'new_filename_thumb.jpg'),
('fields', {'md5': '5346682d948cc3f683635b6918f9b3d0'}),
('curr_version', 'v1.01'),
('prev_version', 'v1.0'),
('edited_by', 'Mork'),
('edited_on', datetime(1969, 1, 1, tzinfo=pytz.utc)),
)
DISALLOWED_ATTRS = (
('asset_id', 'IAmBogus'),
('created_by', 'Smith'),
('created_on', datetime.now(pytz.utc)),
)
UNKNOWN_ATTRS = (
('lunch_order', 'burger_and_fries'),
('villain', 'Khan')
)
@ddt.data(*MODULESTORE_SETUPS)
def test_set_all_attrs(self, storebuilder):
"""
Save setting each attr one at a time
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
for attribute, value in self.ALLOWED_ATTRS:
# Set the course asset's attribute.
store.set_asset_metadata_attr(new_asset_loc, attribute, value, ModuleStoreEnum.UserID.test)
# Find the same course asset and check its changed attribute.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
assert getattr(updated_asset_md, attribute, None) is not None
assert getattr(updated_asset_md, attribute, None) == value
@ddt.data(*MODULESTORE_SETUPS)
def test_set_disallowed_attrs(self, storebuilder):
"""
setting disallowed attrs should fail
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
for attribute, value in self.DISALLOWED_ATTRS:
original_attr_val = getattr(new_asset_md, attribute)
# Set the course asset's attribute.
store.set_asset_metadata_attr(new_asset_loc, attribute, value, ModuleStoreEnum.UserID.test)
# Find the same course and check its changed attribute.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
assert getattr(updated_asset_md, attribute, None) is not None
# Make sure that the attribute is unchanged from its original value.
assert getattr(updated_asset_md, attribute, None) == original_attr_val
@ddt.data(*MODULESTORE_SETUPS)
def test_set_unknown_attrs(self, storebuilder):
"""
setting unknown attrs should fail
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
for attribute, value in self.UNKNOWN_ATTRS:
# Set the course asset's attribute.
store.set_asset_metadata_attr(new_asset_loc, attribute, value, ModuleStoreEnum.UserID.test)
# Find the same course and check its changed attribute.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
# Make sure the unknown field was *not* added.
with pytest.raises(AttributeError):
assert getattr(updated_asset_md, attribute) == value
@ddt.data(*MODULESTORE_SETUPS)
def test_save_one_different_asset(self, storebuilder):
"""
saving and deleting things which are not 'asset'
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_key = course.id.make_asset_key('different', 'burn.jpg')
new_asset_thumbnail = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset_thumbnail, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'different')) == 1
assert store.delete_asset_metadata(asset_key, ModuleStoreEnum.UserID.test) == 1
assert len(store.get_all_asset_metadata(course.id, 'different')) == 0
@ddt.data(*MODULESTORE_SETUPS)
def test_find_different(self, storebuilder):
"""
finding things which are of type other than 'asset'
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_key = course.id.make_asset_key('different', 'burn.jpg')
new_asset_thumbnail = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset_thumbnail, ModuleStoreEnum.UserID.test)
assert store.find_asset_metadata(asset_key) is not None
unknown_asset_key = course.id.make_asset_key('different', 'nosuchfile.jpg')
assert store.find_asset_metadata(unknown_asset_key) is None
def _check_asset_values(self, assets, orig):
"""
Check asset type/path values.
"""
for idx, asset in enumerate(orig):
assert assets[idx].asset_id.asset_type == asset[0]
assert assets[idx].asset_id.path == asset[1]
@ddt.data(*MODULESTORE_SETUPS)
def test_get_multiple_types(self, storebuilder):
"""
getting all things which are of type other than 'asset'
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
# Save 'em.
for asset_type, filename in self.alls:
asset_key = course.id.make_asset_key(asset_type, filename)
new_asset = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset, ModuleStoreEnum.UserID.test)
# Check 'em.
for asset_type, asset_list in (
('different', self.differents),
('vrml', self.vrmls),
('asset', self.regular_assets),
):
assets = store.get_all_asset_metadata(course.id, asset_type)
assert len(assets) == len(asset_list)
self._check_asset_values(assets, asset_list)
assert len(store.get_all_asset_metadata(course.id, 'not_here')) == 0
assert len(store.get_all_asset_metadata(course.id, None)) == 4
assets = store.get_all_asset_metadata(
course.id, None, start=0, maxresults=-1,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(assets) == len(self.alls)
self._check_asset_values(assets, self.alls)
@ddt.data(*MODULESTORE_SETUPS)
def test_save_metadata_list(self, storebuilder):
"""
Save a list of asset metadata all at once.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
# Make a list of AssetMetadata objects.
md_list = []
for asset_type, filename in self.alls:
asset_key = course.id.make_asset_key(asset_type, filename)
md_list.append(self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
))
# Save 'em.
store.save_asset_metadata_list(md_list, ModuleStoreEnum.UserID.test)
# Check 'em.
for asset_type, asset_list in (
('different', self.differents),
('vrml', self.vrmls),
('asset', self.regular_assets),
):
assets = store.get_all_asset_metadata(course.id, asset_type)
assert len(assets) == len(asset_list)
self._check_asset_values(assets, asset_list)
assert len(store.get_all_asset_metadata(course.id, 'not_here')) == 0
assert len(store.get_all_asset_metadata(course.id, None)) == 4
assets = store.get_all_asset_metadata(
course.id, None, start=0, maxresults=-1,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(assets) == len(self.alls)
self._check_asset_values(assets, self.alls)
@ddt.data(*MODULESTORE_SETUPS)
def test_save_metadata_list_with_mismatched_asset(self, storebuilder):
"""
Save a list of asset metadata all at once - but with one asset's metadata from a different course.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
# Make a list of AssetMetadata objects.
md_list = []
for asset_type, filename in self.alls:
if asset_type == 'asset':
asset_key = course2.id.make_asset_key(asset_type, filename)
else:
asset_key = course1.id.make_asset_key(asset_type, filename)
md_list.append(self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
))
# Save 'em.
store.save_asset_metadata_list(md_list, ModuleStoreEnum.UserID.test)
# Check 'em.
for asset_type, asset_list in (
('different', self.differents),
('vrml', self.vrmls),
):
assets = store.get_all_asset_metadata(course1.id, asset_type)
assert len(assets) == len(asset_list)
self._check_asset_values(assets, asset_list)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 0
assert len(store.get_all_asset_metadata(course1.id, None)) == 3
assets = store.get_all_asset_metadata(
course1.id, None, start=0, maxresults=-1,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(assets) == len((self.differents + self.vrmls))
self._check_asset_values(assets, self.differents + self.vrmls)
@ddt.data(*MODULESTORE_SETUPS)
def test_delete_all_different_type(self, storebuilder):
"""
deleting all assets of a given but not 'asset' type
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_key = course.id.make_asset_key('different', 'burn_thumb.jpg')
new_asset_thumbnail = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset_thumbnail, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'different')) == 1
@ddt.data(*MODULESTORE_SETUPS)
def test_get_all_assets_with_paging(self, storebuilder):
"""
Save multiple metadata in each store and retrieve it singularly, as all assets, and after deleting all.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
self.setup_assets(course1.id, course2.id, store)
expected_sorts_by_2 = (
(
('displayname', ModuleStoreEnum.SortOrder.ascending),
('code.tgz', 'demo.swf', 'dog.png', 'roman_history.pdf', 'weather_patterns.bmp'),
(2, 2, 1)
),
(
('displayname', ModuleStoreEnum.SortOrder.descending),
('weather_patterns.bmp', 'roman_history.pdf', 'dog.png', 'demo.swf', 'code.tgz'),
(2, 2, 1)
),
(
('uploadDate', ModuleStoreEnum.SortOrder.ascending),
('code.tgz', 'dog.png', 'roman_history.pdf', 'weather_patterns.bmp', 'demo.swf'),
(2, 2, 1)
),
(
('uploadDate', ModuleStoreEnum.SortOrder.descending),
('demo.swf', 'weather_patterns.bmp', 'roman_history.pdf', 'dog.png', 'code.tgz'),
(2, 2, 1)
),
)
# First, with paging across all sorts.
for sort_test in expected_sorts_by_2:
for i in range(3):
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=2 * i, maxresults=2, sort=sort_test[0]
)
num_expected_results = sort_test[2][i]
expected_filename = sort_test[1][2 * i]
assert len(asset_page) == num_expected_results
assert asset_page[0].asset_id.path == expected_filename
if num_expected_results == 2:
expected_filename = sort_test[1][(2 * i) + 1]
assert asset_page[1].asset_id.path == expected_filename
# Now fetch everything.
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=0, sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(asset_page) == 5
assert asset_page[0].asset_id.path == 'code.tgz'
assert asset_page[1].asset_id.path == 'demo.swf'
assert asset_page[2].asset_id.path == 'dog.png'
assert asset_page[3].asset_id.path == 'roman_history.pdf'
assert asset_page[4].asset_id.path == 'weather_patterns.bmp'
# Some odd conditions.
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=100, sort=('uploadDate', ModuleStoreEnum.SortOrder.ascending)
)
assert len(asset_page) == 0
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=3, maxresults=0,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(asset_page) == 0
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=3, maxresults=-12345,
sort=('displayname', ModuleStoreEnum.SortOrder.descending)
)
assert len(asset_page) == 2
@ddt.data('XML_MODULESTORE_BUILDER', 'MIXED_MODULESTORE_BUILDER')
def test_xml_not_yet_implemented(self, storebuilderName):
"""
Test coverage which shows that for now xml read operations are not implemented
"""
storebuilder = self.XML_MODULESTORE_MAP[storebuilderName]
with storebuilder.build(contentstore=None) as (__, store):
course_key = store.make_course_key("org", "course", "run")
asset_key = course_key.make_asset_key('asset', 'foo.jpg')
assert store.find_asset_metadata(asset_key) is None
assert store.get_all_asset_metadata(course_key, 'asset') == []
@ddt.data(*MODULESTORE_SETUPS)
def test_copy_all_assets_same_modulestore(self, storebuilder):
"""
Create a course with assets, copy them all to another course in the same modulestore, and check on it.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
self.setup_assets(course1.id, None, store)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 2
assert len(store.get_all_asset_metadata(course2.id, 'asset')) == 0
store.copy_all_asset_metadata(course1.id, course2.id, ModuleStoreEnum.UserID.test * 101)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 2
all_assets = store.get_all_asset_metadata(
course2.id, 'asset', sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(all_assets) == 2
assert all_assets[0].asset_id.path == 'pic1.jpg'
assert all_assets[1].asset_id.path == 'shout.ogg'
@ddt.data(*MODULESTORE_SETUPS)
def test_copy_all_assets_from_course_with_no_assets(self, storebuilder):
"""
Create a course with *no* assets, and try copy them all to another course in the same modulestore.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
store.copy_all_asset_metadata(course1.id, course2.id, ModuleStoreEnum.UserID.test * 101)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 0
assert len(store.get_all_asset_metadata(course2.id, 'asset')) == 0
all_assets = store.get_all_asset_metadata(
course2.id, 'asset', sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(all_assets) == 0
@ddt.data(
('mongo', 'split'),
('split', 'mongo'),
)
@ddt.unpack
def test_copy_all_assets_cross_modulestore(self, from_store, to_store):
"""
Create a course with assets, copy them all to another course in a different modulestore, and check on it.
"""
mixed_builder = MIXED_MODULESTORE_BOTH_SETUP
with mixed_builder.build() as (__, mixed_store):
with mixed_store.default_store(from_store):
course1 = CourseFactory.create(modulestore=mixed_store)
with mixed_store.default_store(to_store):
course2 = CourseFactory.create(modulestore=mixed_store)
self.setup_assets(course1.id, None, mixed_store)
assert len(mixed_store.get_all_asset_metadata(course1.id, 'asset')) == 2
assert len(mixed_store.get_all_asset_metadata(course2.id, 'asset')) == 0
mixed_store.copy_all_asset_metadata(course1.id, course2.id, ModuleStoreEnum.UserID.test * 102)
all_assets = mixed_store.get_all_asset_metadata(
course2.id, 'asset', sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(all_assets) == 2
assert all_assets[0].asset_id.path == 'pic1.jpg'
assert all_assets[1].asset_id.path == 'shout.ogg'
| agpl-3.0 | -2,249,754,757,599,794,000 | 46.704011 | 172 | 0.597478 | false | 3.848471 | true | false | false |
dbarbier/privot | python/test/t_FunctionalChaos_ishigami.py | 1 | 6841 | #! /usr/bin/env python
from openturns import *
from math import *
TESTPREAMBLE()
RandomGenerator().SetSeed(0)
try :
# Problem parameters
dimension = 3
a = 7.0
b = 0.1
# Reference analytical values
meanTh = a/2
covTh = (b**2 * pi**8) / 18.0 + (b * pi**4) / 5.0 + (a**2) / 8.0 + 1.0 / 2.0
sob_1 = [(b * pi**4 / 5.0 + b**2 * pi**8 / 50.0 + 1.0/2.0) / covTh, (a**2 / 8.0) / covTh, 0.0]
sob_2 = [0.0, (b**2 * pi**8 / 18.0 - b**2 * pi**8 / 50.0) / covTh, 0.0]
sob_3 = [0.0]
sob_T1 = [sob_1[0] + sob_2[0] + sob_2[1] + sob_3[0], sob_1[1] + sob_2[0] + sob_2[2] + sob_3[0], sob_1[2] + sob_2[1] + sob_2[2] + sob_3[0]]
sob_T2 = [sob_2[0] + sob_2[1] + sob_3[0], sob_2[0] + sob_2[2] + sob_3[0], sob_2[1] + sob_2[2] + sob_3[0]]
sob_T3 = [sob_3[0]]
# Create the Ishigami function
inputVariables = Description(dimension)
inputVariables[0] = "xi1"
inputVariables[1] = "xi2"
inputVariables[2] = "xi3"
outputVariables = Description(1)
outputVariables[0] = "y"
formula = Description(1)
formula[0] = "sin(xi1) + (" + str(a) + ") * (sin(xi2)) ^ 2 + (" + str(b) + ") * xi3^4 * sin(xi1)"
model = NumericalMathFunction(inputVariables, outputVariables, formula)
# Create the input distribution
marginals = DistributionCollection(dimension)
marginals[0] = Uniform(-pi, pi)
marginals[1] = Uniform(-pi, pi)
marginals[2] = Uniform(-pi, pi)
distribution = ComposedDistribution(marginals)
# Create the orthogonal basis
polynomialCollection = PolynomialFamilyCollection(dimension)
polynomialCollection[0] = OrthogonalUniVariatePolynomialFamily(LegendreFactory())
polynomialCollection[1] = OrthogonalUniVariatePolynomialFamily(LegendreFactory())
polynomialCollection[2] = OrthogonalUniVariatePolynomialFamily(LegendreFactory())
enumerateFunction = EnumerateFunction(dimension)
productBasis = OrthogonalBasis(OrthogonalProductPolynomialFactory(polynomialCollection, enumerateFunction))
# Create the adaptive strategy
# We can choose amongst several strategies
# First, the most efficient (but more complex!) strategy
listAdaptiveStrategy = list()
degree = 6
indexMax = enumerateFunction.getStrataCumulatedCardinal(degree)
basisDimension = enumerateFunction.getStrataCumulatedCardinal(divmod(degree, 2)[0])
threshold = 1.0e-6
listAdaptiveStrategy.append(CleaningStrategy(productBasis, indexMax, basisDimension, threshold, False))
# Second, the most used (and most basic!) strategy
listAdaptiveStrategy.append(FixedStrategy(productBasis, enumerateFunction.getStrataCumulatedCardinal(degree)))
# Third, a slight enhancement with respect to the basic strategy
listAdaptiveStrategy.append(SequentialStrategy(productBasis, enumerateFunction.getStrataCumulatedCardinal(divmod(degree, 2)[0]), False))
for adaptiveStrategyIndex in range(len(listAdaptiveStrategy)):
adaptiveStrategy = listAdaptiveStrategy[adaptiveStrategyIndex]
# Create the projection strategy
samplingSize = 250
listProjectionStrategy = list()
# We have only the LeastSquaresStrategy up to now (0.13.0) but we can choose several sampling schemes
# Monte Carlo sampling
listProjectionStrategy.append(LeastSquaresStrategy(MonteCarloExperiment(samplingSize)))
# LHS sampling
listProjectionStrategy.append(LeastSquaresStrategy(LHSExperiment(samplingSize)))
# Low Discrepancy sequence
listProjectionStrategy.append(LeastSquaresStrategy(LowDiscrepancyExperiment(LowDiscrepancySequence(SobolSequence()),samplingSize)))
for projectionStrategyIndex in range(len(listProjectionStrategy)):
projectionStrategy = listProjectionStrategy[projectionStrategyIndex]
# Create the polynomial chaos algorithm
maximumResidual = 1.0e-10
algo = FunctionalChaosAlgorithm(model, distribution, adaptiveStrategy, projectionStrategy)
algo.setMaximumResidual(maximumResidual)
RandomGenerator.SetSeed(0)
algo.run()
# Examine the results
result = FunctionalChaosResult(algo.getResult())
print "###################################"
print AdaptiveStrategy(adaptiveStrategy)
print ProjectionStrategy(projectionStrategy)
#print "coefficients=", result.getCoefficients()
residuals = result.getResiduals()
print "residuals=", residuals
relativeErrors = result.getRelativeErrors()
print "relativeErrors=", relativeErrors
# Post-process the results
vector = FunctionalChaosRandomVector(result)
mean = vector.getMean()[0]
print "mean=%.8f" % mean, "absolute error=%.10f" % fabs(mean - meanTh)
variance = vector.getCovariance()[0, 0]
print "variance=%.8f" % variance, "absolute error=%.10f" % fabs(variance - covTh)
for i in range(dimension):
value = vector.getSobolIndex(i)
print "Sobol index", i, "= %.8f" % value, "absolute error=%.10f" % fabs(value - sob_1[i])
indices = Indices(2)
k = 0
for i in range(dimension):
indices[0] = i
for j in range(i+1, dimension):
indices[1] = j
value = vector.getSobolIndex(indices)
print "Sobol index", indices, "=%.8f" % value, "absolute error=%.10f" % fabs(value - sob_2[k])
k = k+1
indices = Indices(3)
indices[0] = 0
indices[1] = 1
indices[2] = 2
value = vector.getSobolIndex(indices)
print "Sobol index", indices, "=%.8f" % value, "absolute error=%.10f" % fabs(value - sob_3[0])
for i in range(dimension):
value = vector.getSobolTotalIndex(i)
print "Sobol total index", i, "=%.8f" % value, "absolute error=%.10f" % fabs(value - sob_T1[i])
indices = Indices(2)
k = 0
for i in range(dimension):
indices[0] = i
for j in range(i+1, dimension):
indices[1] = j
value = vector.getSobolIndex(indices)
print "Sobol total index", indices, "=%.8f" % value, "absolute error=%.10f" % fabs(value - sob_2[k])
k = k+1
indices = Indices(3)
indices[0] = 0
indices[1] = 1
indices[2] = 2
value = vector.getSobolTotalIndex(indices)
print "Sobol total index ", indices, "=%.8f" % value, "absolute error=%.10f" % fabs(value - sob_3[0])
except :
import sys
print "t_FunctionalChaos_ishigami.py", sys.exc_type, sys.exc_value
| lgpl-3.0 | -7,455,507,249,196,285,000 | 47.864286 | 142 | 0.618769 | false | 3.479654 | false | false | false |
kynikos/outspline | src/outspline/extensions/copypaste/queries.py | 1 | 1459 | # Outspline - A highly modular and extensible outliner.
# Copyright (C) 2011 Dario Giovannetti <[email protected]>
#
# This file is part of Outspline.
#
# Outspline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Outspline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outspline. If not, see <http://www.gnu.org/licenses/>.
copy_create = ("CREATE TABLE Copy (C_id INTEGER, "
"C_parent INTEGER, "
"C_previous INTEGER, "
"C_text TEXT)")
copy_select_check = 'SELECT C_id FROM Copy LIMIT 1'
copy_select_parent = ('SELECT C_id, C_text FROM Copy WHERE C_parent=? '
'AND C_previous=? LIMIT 1')
copy_select_parent_roots = ('SELECT C_id, C_text FROM Copy '
'WHERE C_parent NOT IN (SELECT C_id FROM Copy)')
copy_insert = ('INSERT INTO Copy (C_id, C_parent, C_previous, C_text) '
'VALUES (?, ?, ?, ?)')
copy_delete = 'DELETE FROM Copy'
| gpl-3.0 | 3,803,032,165,900,533,000 | 40.685714 | 76 | 0.641535 | false | 3.78961 | false | false | false |
aptivate/ckanext-mapactionevent | ckanext/mapactionevent/controllers/event_groupcontroller.py | 1 | 1738 | import re
import ckan.lib.base as base
import ckan.controllers.group as group
import ckan.model as model
import ckan.logic as logic
from ckan.common import c, _
abort = base.abort
NotAuthorized = logic.NotAuthorized
NotFound = logic.NotFound
class EventGroupController(group.GroupController):
group_types = ['event']
def _action(self, action_name):
''' select the correct group/org action '''
if action_name == 'group_create':
action_name = 'event_create'
elif action_name == 'group_list':
action_name = 'event_list'
return super(EventGroupController, self)._action(action_name)
def _render_template(self, template_name, group_type):
''' render the correct group/org template '''
import sys; print >>sys.stderr, template_name, group_type
return super(EventGroupController, self)._render_template(template_name, group_type)
# TODO: overridden as no hook for changing template in base controller.
def members(self, id):
group_type = self._ensure_controller_matches_group_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user}
try:
c.members = self._action('member_list')(
context, {'id': id, 'object_type': 'user'}
)
data_dict = {'id': id}
data_dict['include_datasets'] = False
c.group_dict = self._action('group_show')(context, data_dict)
except NotAuthorized:
abort(401, _('Unauthorized to delete group %s') % '')
except NotFound:
abort(404, _('Group not found'))
return self._render_template('mapactionevent/members.html', group_type)
| agpl-3.0 | 6,376,609,578,868,277,000 | 33.078431 | 92 | 0.623705 | false | 4.004608 | false | false | false |
mapr/hue | desktop/core/ext-py/pysaml2-2.4.0/src/saml2/cache.py | 32 | 5624 | #!/usr/bin/env python
import shelve
from saml2.ident import code, decode
from saml2 import time_util, SAMLError
import logging
logger = logging.getLogger(__name__)
# The assumption is that any subject may consist of data
# gathered from several different sources, all with their own
# timeout time.
class ToOld(SAMLError):
pass
class CacheError(SAMLError):
pass
class Cache(object):
def __init__(self, filename=None):
if filename:
self._db = shelve.open(filename, writeback=True)
self._sync = True
else:
self._db = {}
self._sync = False
def delete(self, name_id):
"""
:param name_id: The subject identifier, a NameID instance
"""
del self._db[code(name_id)]
if self._sync:
try:
self._db.sync()
except AttributeError:
pass
def get_identity(self, name_id, entities=None,
check_not_on_or_after=True):
""" Get all the identity information that has been received and
are still valid about the subject.
:param name_id: The subject identifier, a NameID instance
:param entities: The identifiers of the entities whoes assertions are
interesting. If the list is empty all entities are interesting.
:return: A 2-tuple consisting of the identity information (a
dictionary of attributes and values) and the list of entities
whoes information has timed out.
"""
if not entities:
try:
cni = code(name_id)
entities = self._db[cni].keys()
except KeyError:
return {}, []
res = {}
oldees = []
for entity_id in entities:
try:
info = self.get(name_id, entity_id, check_not_on_or_after)
except ToOld:
oldees.append(entity_id)
continue
if not info:
oldees.append(entity_id)
continue
for key, vals in info["ava"].items():
try:
tmp = set(res[key]).union(set(vals))
res[key] = list(tmp)
except KeyError:
res[key] = vals
return res, oldees
def get(self, name_id, entity_id, check_not_on_or_after=True):
""" Get session information about a subject gotten from a
specified IdP/AA.
:param name_id: The subject identifier, a NameID instance
:param entity_id: The identifier of the entity_id
:param check_not_on_or_after: if True it will check if this
subject is still valid or if it is too old. Otherwise it
will not check this. True by default.
:return: The session information
"""
cni = code(name_id)
(timestamp, info) = self._db[cni][entity_id]
if check_not_on_or_after and time_util.after(timestamp):
raise ToOld("past %s" % timestamp)
return info or None
def set(self, name_id, entity_id, info, not_on_or_after=0):
""" Stores session information in the cache. Assumes that the name_id
is unique within the context of the Service Provider.
:param name_id: The subject identifier, a NameID instance
:param entity_id: The identifier of the entity_id/receiver of an
assertion
:param info: The session info, the assertion is part of this
:param not_on_or_after: A time after which the assertion is not valid.
"""
cni = code(name_id)
if cni not in self._db:
self._db[cni] = {}
self._db[cni][entity_id] = (not_on_or_after, info)
if self._sync:
try:
self._db.sync()
except AttributeError:
pass
def reset(self, name_id, entity_id):
""" Scrap the assertions received from a IdP or an AA about a special
subject.
:param name_id: The subject identifier, a NameID instance
:param entity_id: The identifier of the entity_id of the assertion
:return:
"""
self.set(name_id, entity_id, {}, 0)
def entities(self, name_id):
""" Returns all the entities of assertions for a subject, disregarding
whether the assertion still is valid or not.
:param name_id: The subject identifier, a NameID instance
:return: A possibly empty list of entity identifiers
"""
cni = code(name_id)
return self._db[cni].keys()
def receivers(self, name_id):
""" Another name for entities() just to make it more logic in the IdP
scenario """
return self.entities(name_id)
def active(self, name_id, entity_id):
""" Returns the status of assertions from a specific entity_id.
:param name_id: The ID of the subject
:param entity_id: The entity ID of the entity_id of the assertion
:return: True or False depending on if the assertion is still
valid or not.
"""
try:
cni = code(name_id)
(timestamp, info) = self._db[cni][entity_id]
except KeyError:
return False
if not info:
return False
else:
return time_util.not_on_or_after(timestamp)
def subjects(self):
""" Return identifiers for all the subjects that are in the cache.
:return: list of subject identifiers
"""
return [decode(c) for c in self._db.keys()]
| apache-2.0 | -6,052,578,660,948,278,000 | 31.508671 | 78 | 0.573257 | false | 4.23494 | false | false | false |
paulfurley/unstructured-data-parser | extract_tags/test_hostname.py | 1 | 2191 |
import re
def main():
TLD_GROUP = (
r'(XN--CLCHC0EA0B2G2A9GCD|XN--HGBK6AJ7F53BBA|XN--HLCJ6AYA9ESC7A|'
'XN--11B5BS3A9AJ6G|XN--MGBERP4A5D4AR|XN--XKC2DL3A5EE0H|'
'XN--80AKHBYKNJ4F|XN--XKC2AL3HYE2A|XN--LGBBAT1AD8J|XN--MGBC0A9AZCG|'
'XN--9T4B11YI5A|XN--MGBAAM7A8H|XN--MGBAYH7GPA|XN--MGBBH1A71E|'
'XN--FPCRJ9C3D|XN--FZC2C9E2C|XN--YFRO4I67O|XN--YGBI2AMMX|'
'XN--3E0B707E|XN--JXALPDLP|XN--KGBECHTV|XN--OGBPF8FL|XN--0ZWM56D|'
'XN--45BRJ9C|XN--80AO21A|XN--DEBA0AD|XN--G6W251D|XN--GECRJ9C|'
'XN--H2BRJ9C|XN--J6W193G|XN--KPRW13D|XN--KPRY57D|XN--PGBS0DH|'
'XN--S9BRJ9C|XN--90A3AC|XN--FIQS8S|XN--FIQZ9S|XN--O3CW4H|'
'XN--WGBH1C|XN--WGBL6A|XN--ZCKZAH|XN--P1AI|MUSEUM|TRAVEL|AERO|ARPA|'
'ASIA|COOP|INFO|JOBS|MOBI|NAME|BIZ|CAT|COM|EDU|GOV|INT|MIL|NET|ORG|'
'PRO|TEL|XXX|AC|AD|AE|AF|AG|AI|AL|AM|AN|AO|AQ|AR|AS|AT|AU|AW|AX|AZ|'
'BA|BB|BD|BE|BF|BG|BH|BI|BJ|BM|BN|BO|BR|BS|BT|BV|BW|BY|BZ|CA|CC|CD|'
'CF|CG|CH|CI|CK|CL|CM|CN|CO|CR|CU|CV|CW|CX|CY|CZ|DE|DJ|DK|DM|DO|DZ|'
'EC|EE|EG|ER|ES|ET|EU|FI|FJ|FK|FM|FO|FR|GA|GB|GD|GE|GF|GG|GH|GI|GL|'
'GM|GN|GP|GQ|GR|GS|GT|GU|GW|GY|HK|HM|HN|HR|HT|HU|ID|IE|IL|IM|IN|IO|'
'IQ|IR|IS|IT|JE|JM|JO|JP|KE|KG|KH|KI|KM|KN|KP|KR|KW|KY|KZ|LA|LB|LC|'
'LI|LK|LR|LS|LT|LU|LV|LY|MA|MC|MD|ME|MG|MH|MK|ML|MM|MN|MO|MP|MQ|MR|'
'MS|MT|MU|MV|MW|MX|MY|MZ|NA|NC|NE|NF|NG|NI|NL|NO|NP|NR|NU|NZ|OM|PA|'
'PE|PF|PG|PH|PK|PL|PM|PN|PR|PS|PT|PW|PY|QA|RE|RO|RS|RU|RW|SA|SB|SC|'
'SD|SE|SG|SH|SI|SJ|SK|SL|SM|SN|SO|SR|ST|SU|SV|SX|SY|SZ|TC|TD|TF|TG|'
'TH|TJ|TK|TL|TM|TN|TO|TP|TR|TT|TV|TW|TZ|UA|UG|UK|US|UY|UZ|VA|VC|VE|'
'VG|VI|VN|VU|WF|WS|YE|YT|ZA|ZM|ZW)')
url_pattern = r'([A-Z]+\:\/\/[A-Z0-9\-\.]+\.' + TLD_GROUP + r')\b'
#pattern = r'\.(.*)'
text = 'blah blah 00ftp://www.domain.com/foo/bar blah blah'
my_re = re.compile(url_pattern, re.IGNORECASE)
match = my_re.search(text)
print match
if match:
print(match.group())
matches = my_re.findall(text)
print(matches)
main()
| mit | -1,720,534,985,402,122,800 | 45.617021 | 80 | 0.573711 | false | 1.815244 | false | false | false |
zouppen/simulavr | regress/timertest/timer_16bit_normal.py | 5 | 2481 | from vcdtestutil import VCDTestCase, VCDTestLoader, mSec
class TestCase(VCDTestCase):
p2irq = {
"atmega128": "IRQ.VECTOR14",
"at90s4433": "IRQ.VECTOR5",
"at90s8515": "IRQ.VECTOR6",
"atmega48": "IRQ.VECTOR13",
}
def setUp(self):
self.getVCD()
self.setClock(4000000)
self.processor = self.getProcessorType()
self.tov1 = self.p2irq[self.processor]
def test_00(self):
"""simulation time [0..40ms]"""
self.assertVCD()
self.assertEqual(self.vcd.starttime, 0)
self.assertEqual(self.vcd.endtime, 40 * mSec)
def test_01(self):
"""init counter"""
self.assertVCD()
p = self.getVariable("TIMER1.TCNTH")
self.assertEqual(p.firstedge.intValue, 0)
p = self.getVariable("TIMER1.TCNTL")
self.assertEqual(p.firstedge.intValue, 0)
def test_02(self):
"""counter period = 0,25us"""
self.assertVCD()
c = self.getVariable("TIMER1.Counter")
c1 = c.firstedge
tp = self.tClock
t0 = c1.internalTime - tp
dtc = tp * 65536
self.assertEqual(c1.intValue, 1)
c2 = c.getNextEdge(c1)
self.assertEqual(c2.intValue, 2)
self.assertEqual(c2.internalTime - c1.internalTime, tp)
def test_03(self):
"""counter mode: count 0xffff, then 0"""
self.assertVCD()
c = self.getVariable("TIMER1.Counter")
c1 = c.firstedge
tp = self.tClock
t0 = c1.internalTime - tp
dtc = tp * 65536
c2 = c.getNextEdge(t0 + dtc)
self.assertEqual(c2.intValue, 0)
def test_04(self):
"""check occurence of TOV1 interrupt"""
self.assertVCD()
ctr = self.getVariable("TIMER1.Counter")
tp = self.tClock
t0 = ctr.firstedge.internalTime - tp
dtc = tp * 65536
idelay = 6 * self.tClock
irq = self.getVariable(self.tov1)
# first overflow
t = t0 + dtc
ce = ctr.getNextEdge(t)
self.assertEqual(ce.internalTime, t)
self.assertEqual(ce.intValue, 0)
# check, when interrupt occurs
ie = irq.getNextEdge(t)
self.assertEqual(ie.intValue, 1)
self.assertTrue(ie.internalTime <= (t + idelay), "TOV1 occured to late")
# seek next TOV1
ie = irq.getNextEdge(irq.getNextEdge(ie))
self.assertTrue(ie.internalTime <= (t + dtc + idelay), "second TOV1 occured to late")
if __name__ == '__main__':
from unittest import TestLoader, TextTestRunner
tests = VCDTestLoader("timer_16bit_normal_atmega128.vcd").loadTestsFromTestCase(TestCase)
TextTestRunner(verbosity = 2).run(tests)
# EOF
| gpl-2.0 | -4,561,736,661,851,272,700 | 28.535714 | 91 | 0.646514 | false | 2.981971 | true | false | false |
schinckel/django-boardinghouse | boardinghouse/contrib/demo/models.py | 1 | 3666 | import datetime
from django.conf import settings
from django.db import models
from django.utils import six, timezone
from django.utils.functional import cached_property
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext as _
import pytz
from boardinghouse.base import SharedSchemaMixin
from boardinghouse.exceptions import Forbidden
from boardinghouse.schema import activate_schema, deactivate_schema
class ExpiringObjectsQuerySet(models.query.QuerySet):
def expired(self):
"Expired demos"
return self.filter(expires_at__lt=timezone.now().replace(tzinfo=pytz.utc))
def active(self):
"Non-expired demos"
return self.filter(expires_at__gte=timezone.now().replace(tzinfo=pytz.utc))
@six.python_2_unicode_compatible
class DemoSchema(SharedSchemaMixin, models.Model):
"""A User's demo setup.
Each user may only have at most one DemoSchema object, which will have an
expiry date.
We retain a reference to the template from which it was cloned, so we can
easily reset it.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True,
related_name='demo_schema')
expires_at = models.DateTimeField()
from_template = models.ForeignKey('template.SchemaTemplate',
on_delete=models.CASCADE,
related_name='demo_schemata',
limit_choices_to=~models.Q(use_for_demo=None))
objects = ExpiringObjectsQuerySet.as_manager()
class Meta:
verbose_name = 'user demo'
verbose_name_plural = 'user demos'
def __str__(self):
if self.expired:
return u'Expired demo for {0} (expired {1} ago)'.format(self.user, timesince(self.expires_at))
return u'Demo for {0}: expires at {1} ({2} from now)'.format(
self.user, self.expires_at, timeuntil(self.expires_at))
@cached_property
def schema(self):
return '{0}{1}'.format(settings.BOARDINGHOUSE_DEMO_PREFIX, self.user_id)
@property
def expired(self):
return self.expires_at < timezone.now().replace(tzinfo=pytz.utc)
@property
def name(self):
return _('Demo schema ({template_name})').format(template_name=self.from_template.name)
@property
def _clone(self):
return self.from_template.schema
def save(self, *args, **kwargs):
if not self.expires_at:
self.expires_at = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) + settings.BOARDINGHOUSE_DEMO_PERIOD
return super(DemoSchema, self).save(*args, **kwargs)
def activate(self):
if self.expired:
raise DemoSchemaExpired()
activate_schema(self.schema)
def deactivate(self):
deactivate_schema()
class DemoSchemaExpired(Forbidden):
pass
class ValidDemoTemplateManager(models.Manager):
def get_queryset(self):
return super(ValidDemoTemplateManager, self).get_queryset().filter(template_schema__is_active=True)
class ValidDemoTemplate(SharedSchemaMixin, models.Model):
template_schema = models.OneToOneField('template.SchemaTemplate',
primary_key=True,
on_delete=models.CASCADE,
related_name='use_for_demo')
objects = ValidDemoTemplateManager()
def __str__(self):
return '{0} is valid as a demo source'.format(self.template_schema)
| bsd-3-clause | -4,153,517,286,524,997,600 | 32.944444 | 118 | 0.639116 | false | 4.189714 | false | false | false |
jeremiak/regulations-site | regulations/generator/layers/formatting.py | 2 | 3033 | from itertools import dropwhile, takewhile
import re
from django.template import loader, Context
class FormattingLayer(object):
shorthand = 'formatting'
def __init__(self, layer_data):
self.layer_data = layer_data
self.table_tpl = loader.get_template('regulations/layers/table.html')
self.note_tpl = loader.get_template('regulations/layers/note.html')
self.code_tpl = loader.get_template('regulations/layers/code.html')
self.subscript_tpl = loader.get_template(
'regulations/layers/subscript.html')
def render_table(self, table):
max_width = 0
for header_row in table['header']:
width = sum(cell['colspan'] for cell in header_row)
max_width = max(max_width, width)
# Just in case a row is longer than the header
row_max = max(len(row) for row in table['rows'])
max_width = max(max_width, row_max)
# Now pad rows if needed
for row in table['rows']:
row.extend([''] * (max_width - len(row)))
context = Context(table)
# Remove new lines so that they don't get escaped on display
return self.table_tpl.render(context).replace('\n', '')
def render_note(self, fence_data):
lines = fence_data.get('lines', [])
lines = [l for l in lines
if l.replace('Note:', '').replace('Notes:', '').strip()]
context = Context({'lines': lines})
return self.note_tpl.render(context).replace('\n', '')
def render_code(self, fence_data):
"""Generic code rendering. Not language specific"""
lines = fence_data.get('lines', [])
context = Context({'lines': lines})
return self.code_tpl.render(context)
def apply_layer(self, text_index):
"""Convert all plaintext tables into html tables"""
layer_pairs = []
if text_index in self.layer_data:
for data in self.layer_data[text_index]:
if 'table_data' in data:
layer_pairs.append((data['text'],
self.render_table(data['table_data']),
data['locations']))
if data.get('fence_data', {}).get('type') == 'note':
layer_pairs.append((data['text'],
self.render_note(data['fence_data']),
data['locations']))
elif 'fence_data' in data:
layer_pairs.append((data['text'],
self.render_code(data['fence_data']),
data['locations']))
if 'subscript_data' in data:
layer_pairs.append((
data['text'],
self.subscript_tpl.render(Context(
data['subscript_data'])).replace('\n', ''),
data['locations']))
return layer_pairs
| cc0-1.0 | 1,645,014,395,746,222,300 | 40.547945 | 78 | 0.521925 | false | 4.382948 | false | false | false |
yuzhangcmu/Python-Study | Concept_Implement/MapReduce.py | 2 | 4345 | #word_count.py
import string
import map_reduce
def mapper(input_key,input_value):
return [(word,1) for word in
remove_punctuation(input_value.lower()).split()]
"""
After Mapper, we have this
[('the', 1), ('quick', 1), ('brown', 1), ('fox', 1),
('jumped', 1), ('over', 1), ('the', 1), ('lazy', 1), ('grey', 1),
('dogs', 1), ('mary', 1), ('had', 1), ('a', 1), ('little', 1),
('lamb', 1), ('its', 1), ('fleece', 1), ('was', 1), ('white', 1),
('as', 1), ('snow', 1), ('and', 1), ('everywhere', 1),
('that', 1), ('mary', 1), ('went', 1), ('the', 1), ('lamb', 1),
('was', 1), ('sure', 1), ('to', 1), ('go', 1), ('thats', 1),
('one', 1), ('small', 1), ('step', 1), ('for', 1), ('a', 1),
('man', 1), ('one', 1), ('giant', 1), ('leap', 1), ('for', 1),
('mankind', 1)]
"""
# Used to remove ','
def remove_punctuation(s):
return s.translate(string.maketrans("",""), string.punctuation)
def reducer(intermediate_key,intermediate_value_list):
return (intermediate_key,sum(intermediate_value_list))
"""
After Reducer, we have this
{'and': [1], 'fox': [1], 'over': [1], 'one': [1, 1], 'as': [1],
'go': [1], 'its': [1], 'lamb': [1, 1], 'giant': [1],
'for': [1, 1], 'jumped': [1], 'had': [1], 'snow': [1],
'to': [1], 'leap': [1], 'white': [1], 'was': [1, 1],
'mary': [1, 1], 'brown': [1], 'lazy': [1], 'sure': [1],
'that': [1], 'little': [1], 'small': [1], 'step': [1],
'everywhere': [1], 'mankind': [1], 'went': [1], 'man': [1],
'a': [1, 1], 'fleece': [1], 'grey': [1], 'dogs': [1],
'quick': [1], 'the': [1, 1, 1], 'thats': [1]}
"""
filenames = ["text\\a.txt","text\\b.txt","text\\c.txt"]
i = {}
for filename in filenames:
f = open(filename)
i[filename] = f.read()
f.close()
print map_reduce.map_reduce(i,mapper,reducer)
"""
The map_reduce module imported by this program implements MapReduce in pretty much the simplest possible way, using some useful functions from the itertools library:
"""
# map_reduce.py
"""Defines a single function, map_reduce, which takes an input
dictionary i and applies the user-defined function mapper to each
(input_key,input_value) pair, producing a list of intermediate
keys and intermediate values. Repeated intermediate keys then
have their values grouped into a list, and the user-defined
function reducer is applied to the intermediate key and list of
intermediate values. The results are returned as a list."""
import itertools
def map_reduce(i,mapper,reducer):
intermediate = []
# This is processing the mapper, combine all the mapper to the same list
for (key,value) in i.iteritems():
intermediate.extend(mapper(key,value))
groups = {}
# very important step.
# 1. lambda simply yields the first argument in the intermdediate, which is the key.
# That is used for setup group by what
# 2. sorted is used to get the result grouped. See the later comment
# 3. line 50 list comprehension is used to get the value, which can also use x[1] I think
for key, group in itertools.groupby(sorted(intermediate),lambda x: x[0]):
groups[key] = list([y for x, y in group])
# And finally apply reducer function to each item
return [reducer(intermediate_key,groups[intermediate_key]) for intermediate_key in groups]
"""
from itertools import groupby
def groupby_even_odd(items):
f = lambda x: 'even' if x % 2 == 0 else 'odd'
gb = groupby(items, f)
for k, items in gb:
print '%s: %s' % (k, ','.join(map(str, items)))
>>> groupby_even_odd([1, 3, 4, 5, 6, 8, 9, 11])
odd: 1,3
even: 4
odd: 5
even: 6,8
odd: 9,11
Which is not what we want. To improve, simply do the following:
def groupby_even_odd(items):
f = lambda x: 'even' if x % 2 == 0 else 'odd'
gb = groupby(sorted(items, key=f), f)
for k, items in gb:
print '%s: %s' % (k, ','.join(map(str, items)))
>>> groupby_even_odd([1, 3, 4, 5, 6, 8, 9, 11])
even: 4,6,8
odd: 1,3,5,9,11
"""
def map_reduce(i, mapper, reducer):
intermediate = []
for key, value in i.iteritems():
intermediate.extend(mapper(key, value))
groups = {}
for key, group in itertools.groupby(sorted(intermediate), key=lambda x: x[0]):
# Another way do to this
for inter_key, value in group:
groups.setdefault(key, []).append(value)
return [reducer(k, v for k, v in groups.iteritems())]
| mit | 856,955,800,107,694,100 | 34.909091 | 165 | 0.599079 | false | 2.99862 | false | false | false |
msfrank/cifparser | cifparser/namespace.py | 1 | 6514 | # Copyright 2015 Michael Frank <[email protected]>
#
# This file is part of cifparser. cifparser is BSD-licensed software;
# for copyright information see the LICENSE file.
from cifparser.converters import *
def or_default(default, fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except KeyError:
return default
class Namespace(object):
"""
"""
def __init__(self, values):
"""
:param values:
:type values: cifparser.valuetree.ValueTree
"""
self.values = values
def get_container(self, path):
return self.values.get_container(path)
def get_container_or_default(self, path, default=None):
return or_default(default, self.get_container, path)
def contains_container(self, path):
"""
"""
return self.values.contains_container(path)
def get_raw(self, path, name):
return self.values.get_field(path, name)
def get_raw_or_default(self, path, name, default=None):
return or_default(default, self.get_raw, path, name)
def get_raw_list(self, path, name):
return self.values.get_field_list(path, name)
def get_raw_list_or_default(self, path, name, default=None):
return or_default(default, self.get_raw_list, path, name)
def get_str(self, path, name):
return str_to_stripped(self.get_raw(path, name))
def get_str_or_default(self, path, name, default=None):
return or_default(default, self.get_str, path, name)
def get_str_list(self, path, name):
return map(lambda x: str_to_stripped(x), self.values.get_field_list(path, name))
def get_str_list_or_default(self, path, name, default=None):
return or_default(default, self.get_str_list, path, name)
def get_flattened(self, path, name):
return str_to_flattened(self.get_raw(path, name))
def get_flattened_or_default(self, path, name, default=None):
return or_default(default, self.get_str, path, name)
def get_flattened_list(self, path, name):
return map(lambda x: str_to_flattened(x), self.values.get_field_list(path, name))
def get_flattened_list_or_default(self, path, name, default=None):
return or_default(default, self.get_flattened_list, path, name)
def get_int(self, path, name):
return str_to_int(self.get_flattened(path, name))
def get_int_or_default(self, path, name, default=None):
return or_default(default, self.get_int, path, name)
def get_int_list(self, path, name):
return map(lambda x: str_to_int(x), self.get_flattened_list(path, name))
def get_int_list_or_default(self, path, name, default=None):
return or_default(default, self.get_int_list, path, name)
def get_bool(self, path, name):
return str_to_bool(self.get_flattened(path, name))
def get_bool_or_default(self, path, name, default=None):
return or_default(default, self.get_bool, path, name)
def get_bool_list(self, path, name):
return map(lambda x: str_to_bool(x), self.get_flattened_list(path, name))
def get_bool_list_or_default(self, path, name, default=None):
return or_default(default, self.get_bool_list, path, name)
def get_float(self, path, name):
return str_to_float(self.get_flattened(path, name))
def get_float_or_default(self, path, name, default=None):
return or_default(default, self.get_float, path, name)
def get_float_list(self, path, name):
return map(lambda x: str_to_float(x), self.get_flattened_list(path, name))
def get_float_list_or_default(self, path, name, default=None):
return or_default(default, self.get_float_list, path, name)
def get_timedelta(self, path, name):
return str_to_timedelta(self.get_flattened(path, name))
def get_timedelta_or_default(self, path, name, default=None):
return or_default(default, self.get_timedelta, path, name)
def get_timedelta_list(self, path, name):
return map(lambda x: str_to_timedelta(x), self.get_flattened_list(path, name))
def get_timedelta_list_or_default(self, path, name, default=None):
return or_default(default, self.get_timedelta_list, path, name)
def get_size(self, path, name):
return str_to_size(self.get_flattened(path, name))
def get_size_or_default(self, path, name, default=None):
return or_default(default, self.get_size, path, name)
def get_size_list(self, path, name):
return map(lambda x: str_to_size(x), self.get_flattened_list(path, name))
def get_size_list_or_default(self, path, name, default=None):
return or_default(default, self.get_size_list, path, name)
def get_percentage(self, path, name):
return str_to_percentage(self.get_flattened(path, name))
def get_percentage_or_default(self, path, name, default=None):
return or_default(default, self.get_percentage, path, name)
def get_percentage_list(self, path, name):
return map(lambda x: str_to_percentage(x), self.get_flattened_list(path, name))
def get_percentage_list_or_default(self, path, name, default=None):
return or_default(default, self.get_percentage_list, path, name)
def get_throughput(self, path, name):
return str_to_throughput(self.get_flattened(path, name))
def get_throughput_or_default(self, path, name, default=None):
return or_default(default, self.get_throughput, path, name)
def get_throughput_list(self, path, name):
return map(lambda x: str_to_throughput(x), self.get_flattened_list(path, name))
def get_throughput_list_or_default(self, path, name, default=None):
return or_default(default, self.get_throughput_list, path, name)
def contains_field(self, path, name):
"""
Returns True if the specified name exists, otherwise False.
:param name: The name.
:type name: str
:returns: True or False.
:rtype: [bool]
"""
return self.values.contains_field(path, name)
def contains_field_list(self, path, name):
"""
Returns True if the specified name exists, otherwise False.
:param name: The name.
:type name: str
:returns: True or False.
:rtype: [bool]
"""
return self.values.contains_field_list(path, name)
def contains(self, path, name):
return self.contains_field(path, name) or self.contains_field_list(path, name)
| bsd-2-clause | -6,926,594,170,818,973,000 | 35.595506 | 89 | 0.655204 | false | 3.398018 | false | false | false |
StanczakDominik/PythonPIC | pythonpic/tests/test_charge_interpolation.py | 1 | 3565 | # # coding=utf-8
# import matplotlib.pyplot as plt
# import numpy as np
# import pytest
#
# from ..algorithms.field_interpolation import PeriodicInterpolateField
# from ..classes import Species, PeriodicGrid
#
# @pytest.mark.parametrize("power", range(6))
# def test_poly(power, plotting=False):
# NG = 16
# NG_plot = 500
# L = 1
#
# x, dx = np.linspace(0, L, NG, retstep=True, endpoint=False)
#
# N = 128
# x_particles = np.linspace(0, L, N, endpoint=False)
#
# def electric_field_function(x):
# return x ** power
#
# electric_field = electric_field_function(x)
#
# interpolated = PeriodicInterpolateField(x_particles, electric_field, x, dx)
# analytical = electric_field_function(x_particles)
#
# region_before_last_point = x_particles < x.max()
#
# def plot():
# x_plot = np.linspace(0, L, NG_plot, endpoint=False)
# electric_field_plot = electric_field_function(x_plot)
# plt.plot(x_plot, electric_field_plot, lw=5)
# plt.plot(x[region_before_last_point], electric_field[region_before_last_point])
# plt.plot(x_particles, interpolated, "go-")
# plt.vlines(x, electric_field.min(), electric_field.max())
# plt.show()
# return "poly test failed for power = {}".format(power)
#
# if plotting:
# plot()
#
# assert np.allclose(analytical[region_before_last_point], interpolated[region_before_last_point], atol=1e-2, rtol=1e-2), plot()
#
#
# @pytest.mark.parametrize("field", [lambda x: np.sin(2 * np.pi * x), lambda x: np.cos(2 * np.pi * x)])
# def test_periodic(field, plotting=False):
# NG = 16
# NG_plot = 500
# L = 1
#
# x, dx = np.linspace(0, L, NG, retstep=True, endpoint=False)
#
# N = 128
# x_particles = np.linspace(0, L, N, endpoint=False)
#
# electric_field = field(x)
# interpolated = PeriodicInterpolateField(x_particles, electric_field, x, dx)
# analytical = field(x_particles)
#
# def plot():
# x_plot = np.linspace(0, L, NG_plot, endpoint=False)
# electric_field_plot = field(x_plot)
# plt.plot(x_plot, electric_field_plot, lw=5)
# plt.plot(x, electric_field)
# plt.plot(x_particles, interpolated, "go-")
# plt.vlines(x, electric_field.min(), electric_field.max())
# plt.show()
# return "periodic test failure"
#
# if plotting:
# plot()
#
# assert np.allclose(interpolated, analytical, atol=1e-2, rtol=1e-2), plot()
#
#
# @pytest.mark.parametrize("power", range(2, 6))
# def test_single_particle(power, plotting=False):
# """tests interpolation of field to particles:
# at cell boundary
# at hall cell
# at 3/4 cell
# at end of simulation region (PBC)
# """
# NG = 16
# L = 1
# g = PeriodicGrid(1, L=L, NG=NG)
# s = Species(1, 1, 4, g)
#
# def electric_field_function(x):
# return x ** power
#
# electric_field = electric_field_function(g.x)
#
# interpolated = PeriodicInterpolateField(s.x, electric_field, g.x, g.dx)
# analytical = electric_field_function(s.x)
# # analytical[-1] = (electric_field[0] + electric_field[-1]) / 2
#
# def plot():
# plt.plot(s.x, interpolated, "go-")
# plt.vlines(g.x, electric_field.min(), electric_field.max())
# plt.show()
# return "poly test failed for power = {}".format(power)
#
# if plotting:
# plot()
#
# assert np.allclose(analytical, interpolated), plot()
#
#
# if __name__ == "__main__":
# test_single_particle()
| bsd-3-clause | 226,414,916,271,088,860 | 30.830357 | 132 | 0.601683 | false | 2.824881 | true | false | false |
forseti-security/forseti-security | google/cloud/forseti/common/gcp_type/cloudsql_instance.py | 1 | 3326 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A CloudSQL Instance Resource.
See: https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/
"""
import json
from google.cloud.forseti.common.gcp_type import resource
class CloudSQLInstanceLifecycleState(resource.LifecycleState):
"""Represents the cloud_sql_instance's LifecycleState."""
pass
class CloudSQLInstance(resource.Resource):
"""CloudSQL Instance resource."""
RESOURCE_NAME_FMT = 'instances/%s'
def __init__(
self,
instance_id,
full_name=None,
data=None,
name=None,
display_name=None,
parent=None,
locations=None,
lifecycle_state=CloudSQLInstanceLifecycleState.UNSPECIFIED):
"""Initialize.
Args:
instance_id (str): The cloud sql instance id.
full_name (str): The full resource name and ancestry.
data (str): Resource representation of the cloud sql instance.
name (str): The cloud_sql_instance's unique GCP name, with the
format "cloud_sql_instances/{id}".
display_name (str): The cloud sql instance's display name.
locations (List[str]): Locations this cloud sql instance resides in.
If set, there should be exactly one element in the list.
parent (Resource): The parent Resource.
lifecycle_state (LifecycleState): The lifecycle state of the
cloud_sql_instance.
"""
super(CloudSQLInstance, self).__init__(
resource_id=instance_id,
resource_type=resource.ResourceType.CLOUD_SQL_INSTANCE,
name=name,
display_name=display_name,
parent=parent,
locations=locations,
lifecycle_state=lifecycle_state)
self.full_name = full_name
self.data = data
@classmethod
def from_json(cls, parent, json_string):
"""Create a cloud_sql_instance from a JSON string.
Args:
parent (Resource): resource this cloud_sql_instance belongs to.
json_string(str): JSON string of a cloud_sql_instance GCP API
response.
Returns:
CloudSQLInstance: cloud sql instance resource.
"""
instance_dict = json.loads(json_string)
instance_id = instance_dict['name']
return cls(
parent=parent,
instance_id=instance_id,
full_name='{}cloudsqlinstance/{}/'.format(parent.full_name,
instance_id),
display_name=instance_id,
locations=[instance_dict['region']],
data=json_string,
)
| apache-2.0 | 7,918,750,074,306,586,000 | 34.763441 | 80 | 0.61816 | false | 4.359109 | false | false | false |
sanglass/sandglass.time | setup.py | 1 | 1300 | from setuptools import find_packages
from setuptools import setup
setup(
name='sandglass.time',
version='0.1.0',
packages=find_packages(),
namespace_packages=['sandglass'],
zip_safe=False,
include_package_data=True,
install_requires=[
'gunicorn',
'pyramid==1.5',
'pyramid_tm==0.7',
'pyramid_mailer==0.13',
'sqlalchemy==0.9.1',
'alembic==0.6.3',
'zope.sqlalchemy',
'zope.component==4.1.0',
# Enum support for python < 3.4
'flufl.enum',
# Forms/data handling
'colander',
# Translations extraction support
'Babel',
'lingua',
# Documentation support
'Sphinx',
# Timezone support
'pytz',
# Command line support
'cement',
'PasteScript',
],
entry_points={
'paste.app_factory': [
'main = sandglass.time.main:make_wsgi_app',
],
'console_scripts': [
'sandglass = sandglass.time.command:main',
],
},
paster_plugins=['pyramid'],
message_extractors={
'sandglass/time': [
('**.py', 'lingua_python', None),
('tests/**', 'ignore', None),
('locales/**', 'ignore', None),
],
},
)
| bsd-3-clause | 2,986,097,353,548,480,500 | 24 | 55 | 0.509231 | false | 3.571429 | false | false | false |
ajaska/code-snippets | fontName.py | 1 | 1040 | import struct
def fontName(path):
tags = {}
ntoffset, offset, records = None, None, None
with open(path, 'rb') as f:
data = f.read()
tables = struct.unpack_from('>H', data, 4)[0]
for i in range(tables):
tag = data[i*16 + 12:i*16 + 16]
if tag == b"name":
ntoffset = struct.unpack_from('>I', data, i*16 + 20)[0]
offset = struct.unpack_from('>H', data, ntoffset + 4)[0]
records = struct.unpack_from('>H', data, ntoffset + 2)[0]
break
if ntoffset is None:
return tags
storage = ntoffset + offset
for i in range(records):
id = struct.unpack_from('>H', data, ntoffset + i*12 + 12)[0]
length = struct.unpack_from('>H', data, ntoffset + i*12 + 14)[0]
offset = struct.unpack_from('>H', data, ntoffset + i*12 + 16)[0]
value = data[storage + offset:storage + offset + length]
value = ''.join([chr(x) for x in value if x != 0])
tags[id] = value
return tags[1] if 1 in tags else None
| mit | 3,033,834,519,379,990,500 | 33.666667 | 72 | 0.548077 | false | 3.25 | false | false | false |
alfredoavanzosc/odoomrp-wip-1 | machine_manager_preventive/models/preventive_operation.py | 12 | 6586 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api, exceptions, _
class PreventiveOperationtype(models.Model):
_name = 'preventive.operation.type'
_description = 'Machinery preventive operation template type'
name = fields.Char('Name')
ref = fields.Char('Operation Reference')
cycles = fields.Integer('Cycles')
basedoncy = fields.Boolean('Based on Cycles')
basedontime = fields.Boolean('Based on Time')
margin_cy1 = fields.Integer(
'Cycles Margin 1', help="A negative number means that the alarm will "
"be activated before the condition is met")
margin_cy2 = fields.Integer('Cycles Margin 2')
frequency = fields.Integer('Frequency', help="Estimated time for the next"
" operation.")
interval_unit = fields.Selection([('day', 'Days'), ('week', 'Weeks'),
('mon', 'Months'), ('year', 'Years')],
'Interval Unit')
margin_fre1 = fields.Integer(
'Frequency Margin 1', help="A negative number means that the alarm "
"will be activated before the compliance date")
interval_unit1 = fields.Selection([('day', 'Days'), ('week', 'Weeks'),
('mon', 'Months'), ('year', 'Years')],
'Interval Unit')
margin_fre2 = fields.Integer(
'Frequency Margin 2', help="A negative number means that the alarm "
"will be activated before the compliance date")
interval_unit2 = fields.Selection([('day', 'Days'), ('week', 'Weeks'),
('mon', 'Months'), ('year', 'Years')],
'Interval Unit')
description = fields.Text('Description')
hours_qty = fields.Float('Quantity Hours', required=False,
help="Expected time for the execution of the "
"operation. hh:mm")
@api.constrains('basedoncy', 'cycles')
def _check_basedoncy(self):
for record in self:
if record.basedoncy and record.cycles <= 0:
raise exceptions.ValidationError(
_("Operations based on cycles must have a positive cycle "
"frequency"))
@api.constrains('basedontime', 'frequency', 'interval_unit')
def _check_basedontime(self):
for record in self:
if record.basedontime and (
record.frequency <= 0 or not record.interval_unit):
raise exceptions.Warning(
_("Operations based on time must have a positive time "
" frequency"))
@api.one
@api.onchange('interval_unit')
def onchange_interval_unit(self):
if self.interval_unit:
self.interval_unit1 = self.interval_unit
self.interval_unit2 = self.interval_unit
@api.constrains('margin_cy1', 'margin_cy2')
def check_cycle_margins(self):
for record in self:
if record.margin_cy1 and record.margin_cy2 and (
record.margin_cy1 > record.margin_cy2):
raise exceptions.ValidationError(
_('First margin must be before second'))
@api.constrains('margin_fre1', 'interval_unit1', 'margin_fre2',
'interval_unit2')
def _check_time_margins(self):
for record in self:
if record.interval_unit1 and record.interval_unit2:
machine_operations = self.env['preventive.machine.operation']
date = fields.Date.today()
margin1 = machine_operations.get_interval_date(
date, record.margin_fre1, record.interval_unit1)
margin2 = machine_operations.get_interval_date(
date, record.margin_fre2, record.interval_unit2)
if margin1 > margin2:
raise exceptions.ValidationError(
_("First margin must be before second"))
class PreventiveOperationMaterial(models.Model):
_name = "preventive.operation.material"
_description = "New material line."
op_machi_mat = fields.Many2one('preventive.operation.matmach', 'Operation')
product_id = fields.Many2one('product.product', 'Product', required=True)
product_uom_qty = fields.Float('Quantity', default='1')
product_uom = fields.Many2one('product.uom', 'Unit of Measure',
required=True)
@api.one
@api.onchange('product_id')
def onchange_product(self):
if self.product_id:
self.product_uom = self.product_id.uom_id.id
class PreventiveOperationMatmach(models.Model):
# operation_machine_materials
_name = 'preventive.operation.matmach'
_description = 'Operation - Material - Machine Relation'
name = fields.Char('Name')
optype_id = fields.Many2one('preventive.operation.type', 'Operation')
opmaster = fields.Many2one('preventive.master', 'Master Operation')
material = fields.One2many('preventive.operation.material', 'op_machi_mat',
'Material')
basedoncy = fields.Boolean(related='optype_id.basedoncy')
basedontime = fields.Boolean(related='optype_id.basedontime')
cycles = fields.Integer(related='optype_id.cycles')
frequency = fields.Integer(related='optype_id.frequency')
interval_unit = fields.Selection(related='optype_id.interval_unit')
hours_qty = fields.Float(related='optype_id.hours_qty')
description = fields.Text('Description')
@api.one
@api.onchange('optype_id')
def onchange_optype_id(self):
if self.optype_id:
self.description = self.optype_id.description
| agpl-3.0 | 4,377,930,631,046,565,400 | 44.42069 | 79 | 0.59505 | false | 4.254522 | false | false | false |
ecustacm/ecustacm.github.io | assets/2017-05-08/a/solution/main.py | 1 | 4094 | from fractions import Fraction
from copy import deepcopy
z_val = {}
def gcd(a, b):
if b == 0:
return a
return gcd(b, a%b)
def lcm(a, b):
return a * b / gcd(a, b)
class PolyTerm(object):
def __init__(self, frac=Fraction(0, 1)):
self.coef = frac
self.a = [] # a_i^j sub & pow [ [1, 2], [2, 1], ... ]
def mul_coef(self, frac):
self.coef *= frac
def inner_sort(self):
self.a = sorted(self.a, reverse=False)
def out(self):
print("coef: %s, term: %s" % (self.coef, self.a))
class Poly(object):
def __init__(self):
self.poly = []
def mul_coef(self, coef):
for term in self.poly:
term.mul_coef(coef)
def mul_ai(self, sub):
new_poly = deepcopy(self.poly)
for term in new_poly:
find = False
for a in term.a:
if a[0] == sub:
find = True
a[1] += 1
break
if not find:
term.a.append([sub, 1])
term.inner_sort()
self.poly = new_poly
def add_poly(self, polyb):
ret_poly = Poly()
all_terms = []
ret_terms = []
for terma in self.poly:
all_terms.append(terma)
for termb in polyb.poly:
all_terms.append(termb)
ll = len(all_terms)
for i in range(ll):
for j in range(i+1, ll):
sta = set([ (s, p) for s, p in all_terms[i].a ] )
stb = set([ (s, p) for s, p in all_terms[j].a ] )
if sta == stb:
all_terms[i].coef = all_terms[i].coef + all_terms[j].coef
all_terms[j].coef = 0
for term in all_terms:
if term.coef != 0:
ret_terms.append(term)
ret_poly.poly = deepcopy(ret_terms)
return ret_poly
def get_poly(self):
ret = deepcopy(self.poly)
return ret
def out(self):
for term in self.poly:
term.out()
print("poly end")
def get_z_val(n):
"""
https://en.wikipedia.org/wiki/Cycle_index
"""
global z_val
if n in z_val:
return deepcopy(z_val[n])
if n == 0:
one = PolyTerm(Fraction(1.0))
poly = Poly()
poly.poly = [one]
z_val[n] = deepcopy(poly)
return z_val[n]
res = Poly()
for i in range(1, n+1):
v1 = get_z_val(n - i)
v = deepcopy(v1)
v.mul_ai(i)
res = res.add_poly(v)
res.mul_coef(Fraction(1, n))
z_val[n] = deepcopy(res)
return z_val[n]
def func(n, m):
poly_n = get_z_val(n)
poly_m = get_z_val(m)
# poly_n.out()
# poly_m.out()
ret_poly = Poly()
for terma in poly_n.poly:
for termb in poly_m.poly:
new_term = PolyTerm()
new_term.coef = terma.coef * termb.coef
for ta in terma.a:
for tb in termb.a:
sa = ta[0]
pa = ta[1]
sb = tb[0]
pb = tb[1]
ll = lcm(sa, sb)
new_term.a.append([ll, (sa * sb * pa * pb / ll)])
ret_poly.poly.append(new_term)
return ret_poly
def subs(term, v):
total = 1
for a in term.a:
total *= v**a[1]
return term.coef * total
def answer(w, h, s):
poly = func(w, h)
total = 0
for term in poly.poly:
total += subs(term, s)
return str(total)
def table():
for i in range(1, 11):
for j in range(1, 11):
if i * j > 25:
continue
ans = answer(i, j, 2)
s = "ans[%s][%s] = %s;" % (i, j, ans)
print(s)
def main():
with open("out", "w") as f:
for i in range(1, 11):
for j in range(1, 11):
if i * j > 25:
continue
ans = answer(i, j, 2)
s = "%s\n" % (ans)
f.write(s)
if __name__ == "__main__":
table()
# main()
| mit | -4,697,172,123,889,366,000 | 19.994872 | 77 | 0.443332 | false | 3.24663 | false | false | false |
haylesr/angr | angr/analyses/cfg_base.py | 1 | 8889 | import networkx
import logging
from ..errors import AngrCFGError
l = logging.getLogger(name="angr.cfg_base")
class CFGBase(object):
"""
The base class for control flow graphs.
"""
def __init__(self, project, context_sensitivity_level):
self._project = project
# Initialization
self._graph = None
self._nodes = None
self._edge_map = None
self._loop_back_edges = None
self._overlapped_loop_headers = None
self._thumb_addrs = set()
if context_sensitivity_level < 0:
raise Exception("Unsupported context sensitivity level %d" % context_sensitivity_level)
self._context_sensitivity_level=context_sensitivity_level
def __contains__(self, cfg_node):
return cfg_node in self._graph
@property
def context_sensitivity_level(self):
return self._context_sensitivity_level
def _initialize_cfg(self):
"""
Re-create the DiGraph
"""
self._graph = networkx.DiGraph()
# pylint: disable=no-self-use
def copy(self):
raise Exception("Not implemented.")
def _construct(self):
raise Exception("Not implemented")
def output(self):
raise Exception("Not implemented")
# TODO: Mark as deprecated
def get_bbl_dict(self):
return self._nodes
def get_predecessors(self, cfgnode, excluding_fakeret=True):
"""
Get predecessors of a node on the control flow graph.
:param CFGNode cfgnode: The node
:param bool excluding_fakeret: True if you want to exclude all predecessors that is connected to the node with
a fakeret edge.
:return: A list of predecessors
:rtype: list
"""
if not excluding_fakeret:
if cfgnode in self._graph:
return self._graph.predecessors(cfgnode)
else:
return []
else:
predecessors = []
for pred, _, data in self._graph.in_edges_iter([cfgnode], data=True):
jumpkind = data['jumpkind']
if jumpkind != 'Ijk_FakeRet':
predecessors.append(pred)
return predecessors
def get_successors(self, basic_block, excluding_fakeret=True):
if not excluding_fakeret:
if basic_block in self._graph:
return self._graph.successors(basic_block)
else:
return []
else:
successors = []
for _, suc, data in self._graph.out_edges_iter([basic_block], data=True):
jumpkind = data['jumpkind']
if jumpkind != 'Ijk_FakeRet':
successors.append(suc)
return successors
def get_successors_and_jumpkind(self, basic_block, excluding_fakeret=True):
successors = []
for _, suc, data in self._graph.out_edges_iter([basic_block], data=True):
if not excluding_fakeret or data['jumpkind'] != 'Ijk_FakeRet':
successors.append((suc, data['jumpkind']))
return successors
def get_all_predecessors(self, cfgnode):
"""
Get all predecessors of a specific node on the control flow graph.
:param CFGNode cfgnode: The CFGNode object
:return: A list of predecessors in the CFG
:rtype: list
"""
return networkx.dfs_predecessors(self._graph, cfgnode)
def get_all_successors(self, basic_block):
return networkx.dfs_successors(self._graph, basic_block)
def get_node(self, addr_tuple):
"""
Get a single node from node key.
:param addr_tuple: The node key
:return:
"""
if addr_tuple in self._nodes.keys():
return self._nodes[addr_tuple]
else:
return None
def nodes(self):
return self._graph.nodes()
def get_any_node(self, addr, is_syscall=None, anyaddr=False):
"""
Get an artitrary CFGNode (without considering their contexts) from our graph.
:param addr: Address of the beginning of the basic block. Set anyaddr to True to support arbitrary address.
:param is_syscall: Whether you want to get the syscall node or any other node. This is due to the fact that
syscall SimProcedures have the same address as the targer it returns to.
None means get either, True means get a syscall node, False means get something that isn't
a syscall node.
:param anyaddr: If anyaddr is True, then addr doesn't have to be the beginning address of a basic block.
`anyaddr=True` makes more sense after the CFG is normalized.
:return: A CFGNode if there is any that satisfies given conditions, or None otherwise
"""
# TODO: Loop though self._nodes instead of self.graph.nodes()
# TODO: Of course, I should first fix the issue that .normalize() doesn't update self._nodes
for n in self.graph.nodes_iter():
cond = n.looping_times == 0
if anyaddr and n.size is not None:
cond = cond and (addr >= n.addr and addr < n.addr + n.size)
else:
cond = cond and (addr == n.addr)
if cond:
if is_syscall is None:
return n
if n.is_syscall == is_syscall:
return n
return None
def _get_irsb(self, cfg_node):
if cfg_node is None:
return None
if cfg_node.input_state is None:
raise AngrCFGError(
'You should save the input state when generating the CFG if you want to retrieve the SimIRSB later.')
# Recreate the SimIRSB
return self._project.factory.sim_run(cfg_node.input_state)
def irsb_from_node(self, cfg_node):
"""
Create SimRun from a CFGNode object.
"""
return self._get_irsb(cfg_node)
def get_any_irsb(self, addr):
"""
Returns a SimRun of a certain address. If there are many SimRuns with the same address in CFG,
return an arbitrary one.
You should never assume this method returns a specific one.
"""
cfg_node = self.get_any_node(addr)
return self._get_irsb(cfg_node)
def get_all_nodes(self, addr, is_syscall=None):
"""
Get all CFGNodes whose address is the specified one,
:param addr: Address of the node
:param is_syscall: True returns the syscall node, False returns the normal CFGNode, None returns both
:return: all CFGNodes
"""
results = [ ]
for cfg_node in self._graph.nodes_iter():
if cfg_node.addr == addr:
if is_syscall and cfg_node.is_syscall:
results.append(cfg_node)
elif is_syscall == False and not cfg_node.is_syscall:
results.append(cfg_node)
else:
results.append(cfg_node)
return results
def get_all_irsbs(self, addr):
"""
Returns all SimRuns of a certain address, without considering contexts.
"""
nodes = self.get_all_nodes(addr)
results = [ ]
for n in nodes:
results.append(self._get_irsb(n))
return results
def get_loop_back_edges(self):
return self._loop_back_edges
def get_irsb_addr_set(self):
irsb_addr_set = set()
for tpl, _ in self._nodes:
irsb_addr_set.add(tpl[-1]) # IRSB address
return irsb_addr_set
def get_branching_nodes(self):
"""
Returns all nodes that has an out degree >= 2
"""
nodes = set()
for n in self._graph.nodes():
if self._graph.out_degree(n) >= 2:
nodes.add(n)
return nodes
def get_exit_stmt_idx(self, src_block, dst_block):
"""
Get the corresponding exit statement ID for control flow to reach destination block from source block. The exit
statement ID was put on the edge when creating the CFG.
Note that there must be a direct edge between the two blocks, otherwise an exception will be raised.
:return: The exit statement ID
"""
if not self.graph.has_edge(src_block, dst_block):
raise AngrCFGError('Edge (%s, %s) does not exist in CFG' % (src_block, dst_block))
return self.graph[src_block][dst_block]['exit_stmt_idx']
@property
def graph(self):
return self._graph
def remove_edge(self, simrun_from, simrun_to):
edge = (simrun_from, simrun_to)
if edge in self._graph:
self._graph.remove_edge(edge)
def is_thumb_addr(self, addr):
return addr in self._thumb_addrs
| bsd-2-clause | 5,258,722,314,853,719,000 | 32.798479 | 119 | 0.58173 | false | 4.144056 | false | false | false |
dandb/hacklog | hacklog/entities.py | 1 | 3730 | from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from datetime import date, datetime
from session import Session
db = None
Base = declarative_base()
def enum(**enums):
return type('Enum', (), enums)
def create_db_engine(server):
global db
db = create_engine('sqlite:///' + server.dbFile)
def create_tables():
Base.metadata.create_all(db)
Session.configure(bind=db)
class EventLog(Base):
__tablename__ = 'eventLog'
date = Column('date', DateTime, primary_key=True)
username = Column('username', String, primary_key=True)
ipAddress = Column('ipAddress', String)
success = Column('success', Boolean)
server = Column('server', String)
def __init__(self, date, username, ipAddress, success, server):
self.date = date
self.username = username
self.ipAddress = ipAddress
self.success = success
self.server = server
class User(Base):
__tablename__ = 'users'
username = Column('username', String, primary_key=True)
date = Column('date', DateTime)
score = Column('score', Integer)
scareCount = Column('scareCount', Integer)
lastScareDate = Column('lastScareDate', DateTime)
def __init__(self, username, date, score):
self.username=username
self.date=date
self.score=score
self.scareCount=0
self.lastScareDate = date.today()
class Days(Base):
__tablename__ = 'days'
date = Column('date', DateTime, primary_key=True)
username = Column('username', String, primary_key=True)
profile = Column('profile', PickleType)
totalCount = Column('totalCount', Integer)
def __init__(self, date, username, profile, totalCount):
self.date=date
self.username=username
self.profile = profile
self.totalCount = totalCount
class Hours(Base):
__tablename__ = 'hours'
date = Column('date', DateTime, primary_key=True)
username = Column('username', String, primary_key=True)
profile = Column('profile', PickleType)
totalCount = Column('totalCount', Integer)
def __init__(self, date, username, profile, totalCount):
self.date=date
self.username=username
self.profile = profile
self.totalCount = totalCount
class Servers(Base):
__tablename__ = 'servers'
date = Column('date', DateTime, primary_key=True)
username = Column('username', String, primary_key=True)
profile = Column('profile', PickleType)
totalCount = Column('totalCount', Integer)
def __init__(self, date, username, profile, totalCount):
self.date=date
self.username=username
self.profile = profile
self.totalCount = totalCount
class IpAddress(Base):
__tablename__ = 'ipAddress'
date = Column('date', DateTime, primary_key=True)
username = Column('username', String, primary_key=True)
profile = Column('profile', PickleType)
totalCount = Column('totalCount', Integer)
def __init__(self, date, username, profile, totalCount):
self.date=date
self.username=username
self.profile = profile
self.totalCount = totalCount
@staticmethod
def checkIpForVpn(ip):
quadrantList = ip.split('.')
if quadrantList[0] == '10' and quadrantList[1] == '42':
return True
return False
@staticmethod
def checkIpForInternal(ip):
quadrantList = ip.split('.')
if quadrantList[0] == '10':
if quadrantList[1] == '24' or quadrantList[1] == '26':
return True
elif quadrantList[0] == '172' and quadrantList[1] == '16':
return True
return False
class SyslogMsg():
def __init__(self, data='', host='', port=0):
self.data = data
self.host = host
self.port = port
self.date = datetime.now()
class MailConf():
def __init__(self, emailTest=False):
self.emailTest = emailTest
| gpl-3.0 | 3,588,870,365,465,080,000 | 26.226277 | 64 | 0.680697 | false | 3.400182 | false | false | false |
bchappet/dnfpy | src/dnfpyUtils/optimisation/psoDNFLin.py | 1 | 3303 | from psoDNF import PSODNF
from pso import QtApp
from PyQt4 import QtGui
from dnfpyUtils.models.modelNSpike import ModelNSpike
import numpy as np
from dnfpyUtils.scenarios.scenarioRobustness import ScenarioRobustness
import dnfpy.controller.runner as runner
from dnfpyUtils.scenarios.scenarioSwitch import ScenarioSwitch
from dnfpyUtils.scenarios.scenarioNoise import ScenarioNoise
class PSODNFLin(PSODNF):
def getListParam(self):
#correspond to betaE,betaI,alphaE,alphaI
return ["iExc","iInh","wExc","wInh"]
def getBounds(self):
"""return (lowerBounds,upperBounds"""
z = 10e-6
lowerBounds = np.array([z,z,z,z])
upperBounds = np.array([10,1,1,2])
return (lowerBounds,upperBounds)
def getEvaluationParamsDict(self):
return dict(timeEnd=20,allowedTime=2)
def getConstantParamsDict(self):
return dict(size=49,lateral='dol',activation='step',model='spike')
def evaluate(self,indiv):
#TODO have a list of scenario
scenarioR = ScenarioRobustness()
scenarioS = ScenarioSwitch()
scenarioN = ScenarioNoise()
#indiv.update(self.constantParamsDict)
#print("evaluate %s"%indiv)
model = self.getModel(indiv)
timeEnd = self.evaluationParamsDict['timeEnd']
allowedTime = self.evaluationParamsDict['allowedTime']
(errorR,wellClusterizedR,time,convergenceR,maxNbAct,meanNbAct,elapsedTime,errorShapeR,compEmpty)\
= runner.launch(model, scenarioR, timeEnd,allowedTime)
if errorR < 1 and errorShapeR < 3. and convergenceR <30:
# #print("indiv %s"%indiv)
#print("error %s shape %s convergence %s"%(errorR,errorShapeR,convergenceR))
(errorS,wellClusterizedS,time,convergenceS,maxNbAct,meanNbAct,elapsedTime,errorShapeS,compEmpty)\
= runner.launch(model, scenarioS, 6.,allowedTime)
(errorN,wellClusterizedN,time,convergenceN,maxNbAct,meanNbAct,elapsedTime,errorShapeN,compEmpty)\
= runner.launch(model, scenarioN, timeEnd,allowedTime)
else:
(errorS, wellClusterizedS,errorShapeS,convergenceS) = (10, 10, 10,100)
(errorN, wellClusterizedN,errorNhapeN,convergenceN) = (10, 10, 10,100)
#
if convergenceS == None:
convergenceS = 100
if convergenceR == None:
convergenceR = 100
if convergenceN == None:
convergenceN = 100
#
fitnessError = (errorR + errorS + errorN )/3.
fitnessCluster = (wellClusterizedR + wellClusterizedS + wellClusterizedN)/3.
fitnessShape = (errorShapeR + errorShapeS + wellClusterizedN)/3.
fitnessConv = (convergenceR + convergenceS + convergenceN)/3.
#print("error %s, conv %s, shape %s"%(fitnessError*10,fitnessConv/10.,fitnessShape))
return fitnessShape + fitnessError*10 + fitnessConv/10.
#return errorShapeN + errorN*10 + convergenceN/10.
if __name__ == "__main__":
import sys
app = QtGui.QApplication([""])
view = QtApp()
model = PSODNFLin(view,swarmSize=100,nbEvaluationMax=30000,nbThread=8)
view.setModel(model)
model.start()
sys.exit(app.exec_())
res = (model.bestX,model.bestFitness)
print(res)
| gpl-2.0 | 6,273,942,784,654,214,000 | 38.795181 | 109 | 0.665153 | false | 3.521322 | false | false | false |
oknuutti/visnav-py | visnav/algo/absnet.py | 1 | 6037 | import cv2
import numpy as np
import torch
from torchvision import transforms
from PIL import Image
from visnav.algo import tools
from visnav.algo.base import AlgorithmBase
from visnav.algo.image import ImageProc
from visnav.algo.tools import Stopwatch, PositioningException
from poseilluminet import PoseIllumiNet, PoseIllumiDataset
from visnav.settings import *
class AbsoluteNavigationNN(AlgorithmBase):
DEF_MODEL_NAME = 'rose-mob-v10.tar' # median ~28deg: v6, v8; ~25deg: v9, 27.5deg: v10
DEF_LUMINOSITY_THRESHOLD = 65
DEF_CROP_MARGIN = 10
DEF_MIN_PIXELS = int(np.pi * 50 ** 2 * 0.3)
DEF_ESTIMATE_THRESHOLD = False
def __init__(self, system_model, render_engine, obj_idx, model_name=None, use_cuda=True, verbose=True):
super(AbsoluteNavigationNN, self).__init__(system_model, render_engine, obj_idx)
self.model_name = model_name or AbsoluteNavigationNN.DEF_MODEL_NAME
self.model = None # lazy load
self.verbose = verbose
if use_cuda:
torch.cuda.current_device()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = torch.device("cpu")
def load_model(self, path=None):
path = path or os.path.join(DATA_DIR, self.model_name)
data = torch.load(path)
name = data.get('name', '')
if len(name) > 0:
assert name.split('-')[0] == self.system_model.mission_id, \
"wrong model loaded (%s) for current mission (%s)" % (name.split('-')[0], self.system_model.mission_id)
if self.verbose:
print("model '%s' loaded (%s: %s, nf=%d, do=%.1f), trained for %d epochs, validation loss %.3f" % (
path, name, data['arch'], data.get('features', 2048), data.get('dropout', 0.5),
data['epoch'], data.get('loss', np.nan),))
# referred from densepose-project
self.model = PoseIllumiNet(arch=data['arch'],
width_mult=data.get('width_mult', 1.0),
num_features=data.get('features', 2048),
dropout=data.get('dropout', 0.5))
for k in ('cost_fn.gamma', 'cost_fn.beta'):
data['model'].pop(k)
self.model.load_state_dict(data['model'])
# optimizer.load_state_dict(data['optimizer'])
self.model.to(self.device)
self.model.eval()
def process(self, orig_sce_img, outfile, rotate_sc=False, **kwargs):
# maybe load torch model
if self.model is None:
self.load_model()
if outfile is not None:
self.debug_filebase = outfile + ('n' if isinstance(orig_sce_img, str) else '')
# maybe load scene image
if isinstance(orig_sce_img, str):
orig_sce_img = self.load_target_image(orig_sce_img)
self.timer = Stopwatch()
self.timer.start()
if self.DEF_ESTIMATE_THRESHOLD:
threshold = ImageProc.optimal_threshold(None, orig_sce_img)
else:
threshold = self.DEF_LUMINOSITY_THRESHOLD
# detect target, get bounds
x, y, w, h = ImageProc.single_object_bounds(orig_sce_img, threshold=threshold,
crop_marg=self.DEF_CROP_MARGIN,
min_px=self.DEF_MIN_PIXELS, debug=DEBUG)
if x is None:
raise PositioningException('asteroid not detected in image')
# crop image
img_bw = ImageProc.crop_and_zoom_image(orig_sce_img, x, y, w, h, None, (224, 224))
# save cropped image in log archive
if BATCH_MODE and self.debug_filebase:
self.timer.stop()
cv2.imwrite(self.debug_filebase+'a.png', img_bw)
self.timer.start()
# massage input
input = cv2.cvtColor(img_bw, cv2.COLOR_GRAY2BGR)
input = Image.fromarray(input)
input = PoseIllumiDataset.eval_transform(input)[None, :, :, :].to(self.device, non_blocking=True)
# run model
with torch.no_grad():
output = self.model(input)
# massage output
output = output[0] if isinstance(output, (list, tuple)) else output
output = output.detach().cpu().numpy()
# check if estimated illumination direction is close or not
ill_est = self.model.illumination(output)[0]
r_ini, q_ini, ill_ini = self.system_model.get_cropped_system_scf(x, y, w, h)
if tools.angle_between_v(ill_est, ill_ini) > 10: # max 10 degree discrepancy accepted
print('bad illumination direction estimated, initial=%s, estimated=%s' % (ill_ini, ill_est))
# apply result
r_est = self.model.position(output)[0]
q_est = np.quaternion(*self.model.rotation(output)[0])
self.system_model.set_cropped_system_scf(x, y, w, h, r_est, q_est, rotate_sc=rotate_sc)
self.timer.stop()
if False:
r_est2, q_est2, ill_est2 = self.system_model.get_cropped_system_scf(x, y, w, h)
self.system_model.swap_values_with_real_vals()
r_real, q_real, ill_real = self.system_model.get_cropped_system_scf(x, y, w, h)
self.system_model.swap_values_with_real_vals()
print('compare q_est vs q_est2, q_real vs q_est, q_real vs q_est2')
# save result image
if BATCH_MODE and self.debug_filebase:
# save result in log archive
res_img = self.render(textures=False)
sce_img = cv2.resize(orig_sce_img, tuple(np.flipud(res_img.shape)))
cv2.imwrite(self.debug_filebase+'b.png', np.concatenate((sce_img, res_img), axis=1))
if DEBUG:
cv2.imshow('compare', np.concatenate((sce_img, res_img), axis=1))
cv2.waitKey()
| mit | -4,335,682,116,186,066,400 | 41.121429 | 123 | 0.574458 | false | 3.49971 | false | false | false |
CARocha/mapafinca | encuestas/migrations/0015_auto_20170502_2209.py | 1 | 2971 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('encuestas', '0014_encuesta_estacion'),
]
operations = [
migrations.CreateModel(
name='CostoGanaderia',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('costo', models.FloatField(verbose_name=b'Costo total en Moneda local')),
('encuesta', models.ForeignKey(to='encuestas.Encuesta')),
],
options={
'verbose_name_plural': 'Costo para ganaderia mayor y menor',
},
),
migrations.CreateModel(
name='CostoProcesamiento',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('costo', models.FloatField(verbose_name=b'Costo total en Moneda local')),
('encuesta', models.ForeignKey(to='encuestas.Encuesta')),
],
options={
'verbose_name_plural': 'Costo para productos procesados',
},
),
migrations.AlterModelOptions(
name='costofrutas',
options={'verbose_name_plural': 'Total Mz y costo para frutas familiar'},
),
migrations.AddField(
model_name='cultivostradicionales',
name='precio_consumido',
field=models.FloatField(null=True, blank=True),
),
migrations.AlterField(
model_name='distribuciontierra',
name='tierra',
field=models.IntegerField(verbose_name=b'20.1_Distribuci\xc3\xb3n de la tierra en la finca', choices=[(1, b'Bosque'), (2, b'Tacotal/Guamil/Machorra/Llano'), (3, b'Cultivo anual'), (4, b'Plantaci\xc3\xb3n forestal'), (5, b'Potrero'), (6, b'Pasto en asocio con \xc3\xa1rboles'), (7, b'Frutales'), (8, b'Cultivos en asocio')]),
),
migrations.AlterField(
model_name='duenono',
name='no',
field=models.IntegerField(choices=[(1, b'Arrendada'), (2, b'Promesa de venta'), (3, b'Prestada'), (4, b'Tierra Ind\xc3\xadgena/Comunal'), (5, b'Sin escritura'), (6, b'Colectivo/Cooperativa')]),
),
migrations.AlterField(
model_name='entrevistados',
name='cedula',
field=models.CharField(max_length=50, null=True, verbose_name=b'No. C\xc3\xa9dula/DPI', blank=True),
),
migrations.AlterField(
model_name='respuestano41',
name='agricola',
field=multiselectfield.db.fields.MultiSelectField(blank=True, max_length=7, null=True, choices=[(b'A', b'Falta de semilla'), (b'B', b'Mala calidad de la semilla'), (b'C', b'Falta de riego'), (b'D', b'Poca Tierra')]),
),
]
| mit | 678,143,037,716,315,300 | 44.015152 | 336 | 0.578256 | false | 3.422811 | false | false | false |
Parrot-Developers/alchemy | scripts/host.py | 1 | 1083 | #!/usr/bin/env python3
import sys
import platform
def getinfo(name):
if name == "OS":
val = platform.system().lower()
if "msys" in val or "mingw" in val:
return "windows"
return val
elif name == "ARCH":
is64bit = platform.architecture()[0] == "64bit"
val = platform.machine().lower()
if val.startswith("arm") or val == "aarch64" or val == "arm64":
return "aarch64" if is64bit else "arm"
elif val in ["i386", "i686", "amd64", "x86_64"]:
return "x64" if is64bit else "x86"
else:
sys.stderr.write("Unknown architecture: '%s'\n" % val)
return "unknown"
else:
return None
if __name__ == "__main__":
def main():
if len(sys.argv) != 2:
sys.stderr.write("Invalid number of arguments: %d\n" % (len(sys.argv) - 1))
sys.exit(1)
val = getinfo(sys.argv[1])
if val is None:
sys.stderr.write("Invalid argument '%s'\n" % sys.argv[1])
sys.exit(1)
print(val)
main()
| bsd-3-clause | -1,814,906,651,461,402,000 | 29.942857 | 87 | 0.519852 | false | 3.416404 | false | false | false |
mrocklin/streams | streamz/core.py | 1 | 56658 | from __future__ import absolute_import, division, print_function
from collections import deque, defaultdict
from datetime import timedelta
import functools
import logging
import six
import sys
import threading
from time import time
import weakref
import toolz
from tornado import gen
from tornado.locks import Condition
from tornado.ioloop import IOLoop
from tornado.queues import Queue
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from collections.abc import Iterable
from .compatibility import get_thread_identity
from .orderedweakset import OrderedWeakrefSet
no_default = '--no-default--'
_global_sinks = set()
_html_update_streams = set()
thread_state = threading.local()
logger = logging.getLogger(__name__)
_io_loops = []
def get_io_loop(asynchronous=None):
if asynchronous:
return IOLoop.current()
if not _io_loops:
loop = IOLoop()
thread = threading.Thread(target=loop.start)
thread.daemon = True
thread.start()
_io_loops.append(loop)
return _io_loops[-1]
def identity(x):
return x
class RefCounter:
""" A counter to track references to data
This class is used to track how many nodes in the DAG are referencing
a particular element in the pipeline. When the count reaches zero,
then parties interested in knowing if data is done being processed are
notified
Parameters
----------
initial: int, optional
The initial value of the reference counter
cb: callable
The function to use a callback when the reference count reaches zero
loop: tornado.ioloop.IOLoop
The loop on which to create a callback when the reference count
reaches zero
"""
def __init__(self, initial=0, cb=None, loop=None):
self.loop = loop if loop else get_io_loop()
self.count = initial
self.cb = cb
def retain(self, n=1):
"""Retain the reference
Parameters
----------
n: The number of times to retain the reference
"""
self.count += n
def release(self, n=1):
"""Release the reference
If the reference count is equal to or less than zero, the callback, if
provided will added to the provided loop or default loop
Parameters
----------
n: The number of references to release
"""
self.count -= n
if self.count <= 0 and self.cb:
self.loop.add_callback(self.cb)
def __str__(self):
return '<RefCounter count={}>'.format(self.count)
__repr__ = __str__
class Stream(object):
""" A Stream is an infinite sequence of data.
Streams subscribe to each other passing and transforming data between them.
A Stream object listens for updates from upstream, reacts to these updates,
and then emits more data to flow downstream to all Stream objects that
subscribe to it. Downstream Stream objects may connect at any point of a
Stream graph to get a full view of the data coming off of that point to do
with as they will.
Parameters
----------
stream_name: str or None
This is the name of the stream.
asynchronous: boolean or None
Whether or not this stream will be used in asynchronous functions or
normal Python functions. Leave as None if you don't know.
True will cause operations like emit to return awaitable Futures
False will use an Event loop in another thread (starts it if necessary)
ensure_io_loop: boolean
Ensure that some IOLoop will be created. If asynchronous is None or
False then this will be in a separate thread, otherwise it will be
IOLoop.current
Examples
--------
>>> def inc(x):
... return x + 1
>>> source = Stream() # Create a stream object
>>> s = source.map(inc).map(str) # Subscribe to make new streams
>>> s.sink(print) # take an action whenever an element reaches the end
>>> L = list()
>>> s.sink(L.append) # or take multiple actions (streams can branch)
>>> for i in range(5):
... source.emit(i) # push data in at the source
'1'
'2'
'3'
'4'
'5'
>>> L # and the actions happen at the sinks
['1', '2', '3', '4', '5']
"""
_graphviz_shape = 'ellipse'
_graphviz_style = 'rounded,filled'
_graphviz_fillcolor = 'white'
_graphviz_orientation = 0
str_list = ['func', 'predicate', 'n', 'interval']
def __init__(self, upstream=None, upstreams=None, stream_name=None,
loop=None, asynchronous=None, ensure_io_loop=False):
self.downstreams = OrderedWeakrefSet()
if upstreams is not None:
self.upstreams = list(upstreams)
else:
self.upstreams = [upstream]
self._set_asynchronous(asynchronous)
self._set_loop(loop)
if ensure_io_loop and not self.loop:
self._set_asynchronous(False)
if self.loop is None and self.asynchronous is not None:
self._set_loop(get_io_loop(self.asynchronous))
for upstream in self.upstreams:
if upstream:
upstream.downstreams.add(self)
self.name = stream_name
def _set_loop(self, loop):
self.loop = None
if loop is not None:
self._inform_loop(loop)
else:
for upstream in self.upstreams:
if upstream and upstream.loop:
self.loop = upstream.loop
break
def _inform_loop(self, loop):
"""
Percolate information about an event loop to the rest of the stream
"""
if self.loop is not None:
if self.loop is not loop:
raise ValueError("Two different event loops active")
else:
self.loop = loop
for upstream in self.upstreams:
if upstream:
upstream._inform_loop(loop)
for downstream in self.downstreams:
if downstream:
downstream._inform_loop(loop)
def _set_asynchronous(self, asynchronous):
self.asynchronous = None
if asynchronous is not None:
self._inform_asynchronous(asynchronous)
else:
for upstream in self.upstreams:
if upstream and upstream.asynchronous:
self.asynchronous = upstream.asynchronous
break
def _inform_asynchronous(self, asynchronous):
"""
Percolate information about an event loop to the rest of the stream
"""
if self.asynchronous is not None:
if self.asynchronous is not asynchronous:
raise ValueError("Stream has both asynchronous and synchronous elements")
else:
self.asynchronous = asynchronous
for upstream in self.upstreams:
if upstream:
upstream._inform_asynchronous(asynchronous)
for downstream in self.downstreams:
if downstream:
downstream._inform_asynchronous(asynchronous)
def _add_upstream(self, upstream):
"""Add upstream to current upstreams, this method is overridden for
classes which handle stream specific buffers/caches"""
if self.upstreams == [None]:
self.upstreams[0] = upstream
else:
self.upstreams.append(upstream)
def _add_downstream(self, downstream):
"""Add downstream to current downstreams"""
self.downstreams.add(downstream)
def _remove_downstream(self, downstream):
"""Remove downstream from current downstreams"""
self.downstreams.remove(downstream)
def _remove_upstream(self, upstream):
"""Remove upstream from current upstreams, this method is overridden for
classes which handle stream specific buffers/caches"""
if len(self.upstreams) == 1:
self.upstreams[0] = [None]
else:
self.upstreams.remove(upstream)
@classmethod
def register_api(cls, modifier=identity, attribute_name=None):
""" Add callable to Stream API
This allows you to register a new method onto this class. You can use
it as a decorator.::
>>> @Stream.register_api()
... class foo(Stream):
... ...
>>> Stream().foo(...) # this works now
It attaches the callable as a normal attribute to the class object. In
doing so it respects inheritance (all subclasses of Stream will also
get the foo attribute).
By default callables are assumed to be instance methods. If you like
you can include modifiers to apply before attaching to the class as in
the following case where we construct a ``staticmethod``.
>>> @Stream.register_api(staticmethod)
... class foo(Stream):
... ...
>>> Stream.foo(...) # Foo operates as a static method
You can also provide an optional ``attribute_name`` argument to control
the name of the attribute your callable will be attached as.
>>> @Stream.register_api(attribute_name="bar")
... class foo(Stream):
... ...
>> Stream().bar(...) # foo was actually attached as bar
"""
def _(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
name = attribute_name if attribute_name else func.__name__
setattr(cls, name, modifier(wrapped))
return func
return _
@classmethod
def register_plugin_entry_point(cls, entry_point, modifier=identity):
if hasattr(cls, entry_point.name):
raise ValueError(
f"Can't add {entry_point.name} from {entry_point.module_name} "
f"to {cls.__name__}: duplicate method name."
)
def stub(*args, **kwargs):
""" Entrypoints-based streamz plugin. Will be loaded on first call. """
node = entry_point.load()
if not issubclass(node, Stream):
raise TypeError(
f"Error loading {entry_point.name} "
f"from module {entry_point.module_name}: "
f"{node.__class__.__name__} must be a subclass of Stream"
)
if getattr(cls, entry_point.name).__name__ == "stub":
cls.register_api(
modifier=modifier, attribute_name=entry_point.name
)(node)
return node(*args, **kwargs)
cls.register_api(modifier=modifier, attribute_name=entry_point.name)(stub)
def start(self):
""" Start any upstream sources """
for upstream in self.upstreams:
upstream.start()
def __str__(self):
s_list = []
if self.name:
s_list.append('{}; {}'.format(self.name, self.__class__.__name__))
else:
s_list.append(self.__class__.__name__)
for m in self.str_list:
s = ''
at = getattr(self, m, None)
if at:
if not callable(at):
s = str(at)
elif hasattr(at, '__name__'):
s = getattr(self, m).__name__
elif hasattr(at.__class__, '__name__'):
s = getattr(self, m).__class__.__name__
else:
s = None
if s:
s_list.append('{}={}'.format(m, s))
if len(s_list) <= 2:
s_list = [term.split('=')[-1] for term in s_list]
text = "<"
text += s_list[0]
if len(s_list) > 1:
text += ': '
text += ', '.join(s_list[1:])
text += '>'
return text
__repr__ = __str__
def _ipython_display_(self, **kwargs): # pragma: no cover
try:
from ipywidgets import Output
from IPython.core.interactiveshell import InteractiveShell
except ImportError:
if hasattr(self, '_repr_html_'):
return self._repr_html_()
else:
return self.__repr__()
output = Output(_view_count=0)
output_ref = weakref.ref(output)
def update_cell(val):
output = output_ref()
if output is None:
return
with output:
content, *_ = InteractiveShell.instance().display_formatter.format(val)
output.outputs = ({'output_type': 'display_data',
'data': content,
'metadata': {}},)
s = self.map(update_cell)
_html_update_streams.add(s)
self.output_ref = output_ref
s_ref = weakref.ref(s)
def remove_stream(change):
output = output_ref()
if output is None:
return
if output._view_count == 0:
ss = s_ref()
ss.destroy()
_html_update_streams.remove(ss) # trigger gc
output.observe(remove_stream, '_view_count')
return output._ipython_display_(**kwargs)
def _emit(self, x, metadata=None):
"""
Push data into the stream at this point
Parameters
----------
x: any
an element of data
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
"""
if metadata:
self._retain_refs(metadata, len(self.downstreams))
else:
metadata = []
result = []
for downstream in list(self.downstreams):
r = downstream.update(x, who=self, metadata=metadata)
if type(r) is list:
result.extend(r)
else:
result.append(r)
self._release_refs(metadata)
return [element for element in result if element is not None]
def emit(self, x, asynchronous=False, metadata=None):
""" Push data into the stream at this point
This is typically done only at source Streams but can theoretically be
done at any point
Parameters
----------
x: any
an element of data
asynchronous:
emit asynchronously
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
"""
ts_async = getattr(thread_state, 'asynchronous', False)
if self.loop is None or asynchronous or self.asynchronous or ts_async:
if not ts_async:
thread_state.asynchronous = True
try:
result = self._emit(x, metadata=metadata)
if self.loop:
return gen.convert_yielded(result)
finally:
thread_state.asynchronous = ts_async
else:
@gen.coroutine
def _():
thread_state.asynchronous = True
try:
result = yield self._emit(x, metadata=metadata)
finally:
del thread_state.asynchronous
raise gen.Return(result)
sync(self.loop, _)
def update(self, x, who=None, metadata=None):
return self._emit(x, metadata=metadata)
def gather(self):
""" This is a no-op for core streamz
This allows gather to be used in both dask and core streams
"""
return self
def connect(self, downstream):
""" Connect this stream to a downstream element.
Parameters
----------
downstream: Stream
The downstream stream to connect to
"""
self._add_downstream(downstream)
downstream._add_upstream(self)
def disconnect(self, downstream):
""" Disconnect this stream to a downstream element.
Parameters
----------
downstream: Stream
The downstream stream to disconnect from
"""
self._remove_downstream(downstream)
downstream._remove_upstream(self)
@property
def upstream(self):
if len(self.upstreams) != 1:
raise ValueError("Stream has multiple upstreams")
else:
return self.upstreams[0]
def destroy(self, streams=None):
"""
Disconnect this stream from any upstream sources
"""
if streams is None:
streams = self.upstreams
for upstream in list(streams):
upstream.downstreams.remove(self)
self.upstreams.remove(upstream)
def scatter(self, **kwargs):
from .dask import scatter
return scatter(self, **kwargs)
def remove(self, predicate):
""" Only pass through elements for which the predicate returns False """
return self.filter(lambda x: not predicate(x))
@property
def scan(self):
return self.accumulate
@property
def concat(self):
return self.flatten
def sink_to_list(self):
""" Append all elements of a stream to a list as they come in
Examples
--------
>>> source = Stream()
>>> L = source.map(lambda x: 10 * x).sink_to_list()
>>> for i in range(5):
... source.emit(i)
>>> L
[0, 10, 20, 30, 40]
"""
L = []
self.sink(L.append)
return L
def frequencies(self, **kwargs):
""" Count occurrences of elements """
def update_frequencies(last, x):
return toolz.assoc(last, x, last.get(x, 0) + 1)
return self.scan(update_frequencies, start={}, **kwargs)
def visualize(self, filename='mystream.png', **kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` and ``networkx`` to be installed.
Parameters
----------
filename : str, optional
The name of the file to write to disk.
kwargs:
Graph attributes to pass to graphviz like ``rankdir="LR"``
"""
from .graph import visualize
return visualize(self, filename, **kwargs)
def to_dataframe(self, example):
""" Convert a stream of Pandas dataframes to a DataFrame
Examples
--------
>>> source = Stream()
>>> sdf = source.to_dataframe()
>>> L = sdf.groupby(sdf.x).y.mean().stream.sink_to_list()
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
"""
from .dataframe import DataFrame
return DataFrame(stream=self, example=example)
def to_batch(self, **kwargs):
""" Convert a stream of lists to a Batch
All elements of the stream are assumed to be lists or tuples
Examples
--------
>>> source = Stream()
>>> batches = source.to_batch()
>>> L = batches.pluck('value').map(inc).sum().stream.sink_to_list()
>>> source.emit([{'name': 'Alice', 'value': 1},
... {'name': 'Bob', 'value': 2},
... {'name': 'Charlie', 'value': 3}])
>>> source.emit([{'name': 'Alice', 'value': 4},
... {'name': 'Bob', 'value': 5},
... {'name': 'Charlie', 'value': 6}])
"""
from .batch import Batch
return Batch(stream=self, **kwargs)
def _retain_refs(self, metadata, n=1):
""" Retain all references in the provided metadata `n` number of times
Parameters
----------
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
n: The number of times to retain the provided references
"""
for m in metadata:
if 'ref' in m:
m['ref'].retain(n)
def _release_refs(self, metadata, n=1):
""" Release all references in the provided metadata `n` number of times
Parameters
----------
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
n: The number of times to retain the provided references
"""
for m in metadata:
if 'ref' in m:
m['ref'].release(n)
@Stream.register_api()
class sink(Stream):
""" Apply a function on every element
Examples
--------
>>> source = Stream()
>>> L = list()
>>> source.sink(L.append)
>>> source.sink(print)
>>> source.sink(print)
>>> source.emit(123)
123
123
>>> L
[123]
See Also
--------
map
Stream.sink_to_list
"""
_graphviz_shape = 'trapezium'
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# take the stream specific kwargs out
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
_global_sinks.add(self)
def update(self, x, who=None, metadata=None):
result = self.func(x, *self.args, **self.kwargs)
if gen.isawaitable(result):
return result
else:
return []
@Stream.register_api()
class map(Stream):
""" Apply a function to every element in the stream
Parameters
----------
func: callable
*args :
The arguments to pass to the function.
**kwargs:
Keyword arguments to pass to func
Examples
--------
>>> source = Stream()
>>> source.map(lambda x: 2*x).sink(print)
>>> for i in range(5):
... source.emit(i)
0
2
4
6
8
"""
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
try:
result = self.func(x, *self.args, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
else:
return self._emit(result, metadata=metadata)
@Stream.register_api()
class starmap(Stream):
""" Apply a function to every element in the stream, splayed out
See ``itertools.starmap``
Parameters
----------
func: callable
*args :
The arguments to pass to the function.
**kwargs:
Keyword arguments to pass to func
Examples
--------
>>> source = Stream()
>>> source.starmap(lambda a, b: a + b).sink(print)
>>> for i in range(5):
... source.emit((i, i))
0
2
4
6
8
"""
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
y = x + self.args
try:
result = self.func(*y, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
else:
return self._emit(result, metadata=metadata)
def _truthy(x):
return not not x
@Stream.register_api()
class filter(Stream):
""" Only pass through elements that satisfy the predicate
Parameters
----------
predicate : function
The predicate. Should return True or False, where
True means that the predicate is satisfied.
*args :
The arguments to pass to the predicate.
**kwargs:
Keyword arguments to pass to predicate
Examples
--------
>>> source = Stream()
>>> source.filter(lambda x: x % 2 == 0).sink(print)
>>> for i in range(5):
... source.emit(i)
0
2
4
"""
def __init__(self, upstream, predicate, *args, **kwargs):
if predicate is None:
predicate = _truthy
self.predicate = predicate
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
if self.predicate(x, *self.args, **self.kwargs):
return self._emit(x, metadata=metadata)
@Stream.register_api()
class accumulate(Stream):
""" Accumulate results with previous state
This performs running or cumulative reductions, applying the function
to the previous total and the new element. The function should take
two arguments, the previous accumulated state and the next element and
it should return a new accumulated state,
- ``state = func(previous_state, new_value)`` (returns_state=False)
- ``state, result = func(previous_state, new_value)`` (returns_state=True)
where the new_state is passed to the next invocation. The state or result
is emitted downstream for the two cases.
Parameters
----------
func: callable
start: object
Initial value, passed as the value of ``previous_state`` on the first
invocation. Defaults to the first submitted element
returns_state: boolean
If true then func should return both the state and the value to emit
If false then both values are the same, and func returns one value
**kwargs:
Keyword arguments to pass to func
Examples
--------
A running total, producing triangular numbers
>>> source = Stream()
>>> source.accumulate(lambda acc, x: acc + x).sink(print)
>>> for i in range(5):
... source.emit(i)
0
1
3
6
10
A count of number of events (including the current one)
>>> source = Stream()
>>> source.accumulate(lambda acc, x: acc + 1, start=0).sink(print)
>>> for _ in range(5):
... source.emit(0)
1
2
3
4
5
Like the builtin "enumerate".
>>> source = Stream()
>>> source.accumulate(lambda acc, x: ((acc[0] + 1, x), (acc[0], x)),
... start=(0, 0), returns_state=True
... ).sink(print)
>>> for i in range(3):
... source.emit(0)
(0, 0)
(1, 0)
(2, 0)
"""
_graphviz_shape = 'box'
def __init__(self, upstream, func, start=no_default, returns_state=False,
**kwargs):
self.func = func
self.kwargs = kwargs
self.state = start
self.returns_state = returns_state
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.with_state = kwargs.pop('with_state', False)
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
if self.state is no_default:
self.state = x
if self.with_state:
return self._emit((self.state, x), metadata=metadata)
else:
return self._emit(x, metadata=metadata)
else:
try:
result = self.func(self.state, x, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
if self.returns_state:
state, result = result
else:
state = result
self.state = state
if self.with_state:
return self._emit((self.state, result), metadata=metadata)
else:
return self._emit(result, metadata=metadata)
@Stream.register_api()
class slice(Stream):
"""
Get only some events in a stream by position. Works like list[] syntax.
Parameters
----------
start : int
First event to use. If None, start from the beginnning
end : int
Last event to use (non-inclusive). If None, continue without stopping.
Does not support negative indexing.
step : int
Pass on every Nth event. If None, pass every one.
Examples
--------
>>> source = Stream()
>>> source.slice(2, 6, 2).sink(print)
>>> for i in range(5):
... source.emit(0)
2
4
"""
def __init__(self, upstream, start=None, end=None, step=None, **kwargs):
self.state = 0
self.star = start or 0
self.end = end
self.step = step or 1
if any((_ or 0) < 0 for _ in [start, end, step]):
raise ValueError("Negative indices not supported by slice")
stream_name = kwargs.pop('stream_name', None)
Stream.__init__(self, upstream, stream_name=stream_name)
self._check_end()
def update(self, x, who=None, metadata=None):
if self.state >= self.star and self.state % self.step == 0:
self.emit(x, metadata=metadata)
self.state += 1
self._check_end()
def _check_end(self):
if self.end and self.state >= self.end:
# we're done
for upstream in self.upstreams:
upstream._remove_downstream(self)
@Stream.register_api()
class partition(Stream):
""" Partition stream into tuples of equal size
Parameters
----------
n: int
Maximum partition size
timeout: int or float, optional
Number of seconds after which a partition will be emitted,
even if its size is less than ``n``. If ``None`` (default),
a partition will be emitted only when its size reaches ``n``.
key: hashable or callable, optional
Emit items with the same key together as a separate partition.
If ``key`` is callable, partition will be identified by ``key(x)``,
otherwise by ``x[key]``. Defaults to ``None``.
Examples
--------
>>> source = Stream()
>>> source.partition(3).sink(print)
>>> for i in range(10):
... source.emit(i)
(0, 1, 2)
(3, 4, 5)
(6, 7, 8)
>>> source = Stream()
>>> source.partition(2, key=lambda x: x % 2).sink(print)
>>> for i in range(4):
... source.emit(i)
(0, 2)
(1, 3)
>>> from time import sleep
>>> source = Stream()
>>> source.partition(5, timeout=1).sink(print)
>>> for i in range(3):
... source.emit(i)
>>> sleep(1)
(0, 1, 2)
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, timeout=None, key=None, **kwargs):
self.n = n
self._timeout = timeout
self._key = key
self._buffer = defaultdict(lambda: [])
self._metadata_buffer = defaultdict(lambda: [])
self._callbacks = {}
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
def _get_key(self, x):
if self._key is None:
return None
if callable(self._key):
return self._key(x)
return x[self._key]
@gen.coroutine
def _flush(self, key):
result, self._buffer[key] = self._buffer[key], []
metadata_result, self._metadata_buffer[key] = self._metadata_buffer[key], []
yield self._emit(tuple(result), list(metadata_result))
self._release_refs(metadata_result)
@gen.coroutine
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
key = self._get_key(x)
buffer = self._buffer[key]
metadata_buffer = self._metadata_buffer[key]
buffer.append(x)
if isinstance(metadata, list):
metadata_buffer.extend(metadata)
else:
metadata_buffer.append(metadata)
if len(buffer) == self.n:
if self._timeout is not None and self.n > 1:
self._callbacks[key].cancel()
yield self._flush(key)
return
if len(buffer) == 1 and self._timeout is not None:
self._callbacks[key] = self.loop.call_later(
self._timeout, self._flush, key
)
@Stream.register_api()
class sliding_window(Stream):
""" Produce overlapping tuples of size n
Parameters
----------
return_partial : bool
If True, yield tuples as soon as any events come in, each tuple being
smaller or equal to the window size. If False, only start yielding
tuples once a full window has accrued.
Examples
--------
>>> source = Stream()
>>> source.sliding_window(3, return_partial=False).sink(print)
>>> for i in range(8):
... source.emit(i)
(0, 1, 2)
(1, 2, 3)
(2, 3, 4)
(3, 4, 5)
(4, 5, 6)
(5, 6, 7)
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, return_partial=True, **kwargs):
self.n = n
self._buffer = deque(maxlen=n)
self.metadata_buffer = deque(maxlen=n)
self.partial = return_partial
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
self._buffer.append(x)
if not isinstance(metadata, list):
metadata = [metadata]
self.metadata_buffer.append(metadata)
if self.partial or len(self._buffer) == self.n:
flat_metadata = [m for ml in self.metadata_buffer for m in ml]
ret = self._emit(tuple(self._buffer), flat_metadata)
if len(self.metadata_buffer) == self.n:
completed = self.metadata_buffer.popleft()
self._release_refs(completed)
return ret
else:
return []
def convert_interval(interval):
if isinstance(interval, str):
import pandas as pd
interval = pd.Timedelta(interval).total_seconds()
return interval
@Stream.register_api()
class timed_window(Stream):
""" Emit a tuple of collected results every interval
Every ``interval`` seconds this emits a tuple of all of the results
seen so far. This can help to batch data coming off of a high-volume
stream.
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self._buffer = []
self.metadata_buffer = []
self.last = gen.moment
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
self._buffer.append(x)
self._retain_refs(metadata)
self.metadata_buffer.append(metadata)
return self.last
@gen.coroutine
def cb(self):
while True:
L, self._buffer = self._buffer, []
metadata, self.metadata_buffer = self.metadata_buffer, []
m = [m for ml in metadata for m in ml]
self.last = self._emit(L, m)
self._release_refs(m)
yield self.last
yield gen.sleep(self.interval)
@Stream.register_api()
class delay(Stream):
""" Add a time delay to results """
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.queue = Queue()
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
@gen.coroutine
def cb(self):
while True:
last = time()
x, metadata = yield self.queue.get()
yield self._emit(x, metadata=metadata)
self._release_refs(metadata)
duration = self.interval - (time() - last)
if duration > 0:
yield gen.sleep(duration)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
return self.queue.put((x, metadata))
@Stream.register_api()
class rate_limit(Stream):
""" Limit the flow of data
This stops two elements of streaming through in an interval shorter
than the provided value.
Parameters
----------
interval: float
Time in seconds
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.next = 0
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
@gen.coroutine
def update(self, x, who=None, metadata=None):
now = time()
old_next = self.next
self.next = max(now, self.next) + self.interval
if now < old_next:
yield gen.sleep(old_next - now)
yield self._emit(x, metadata=metadata)
@Stream.register_api()
class buffer(Stream):
""" Allow results to pile up at this point in the stream
This allows results to buffer in place at various points in the stream.
This can help to smooth flow through the system when backpressure is
applied.
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, **kwargs):
self.queue = Queue(maxsize=n)
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
return self.queue.put((x, metadata))
@gen.coroutine
def cb(self):
while True:
x, metadata = yield self.queue.get()
yield self._emit(x, metadata=metadata)
self._release_refs(metadata)
@Stream.register_api()
class zip(Stream):
""" Combine streams together into a stream of tuples
We emit a new tuple once all streams have produce a new tuple.
See also
--------
combine_latest
zip_latest
"""
_graphviz_orientation = 270
_graphviz_shape = 'triangle'
def __init__(self, *upstreams, **kwargs):
self.maxsize = kwargs.pop('maxsize', 10)
self.condition = Condition()
self.literals = [(i, val) for i, val in enumerate(upstreams)
if not isinstance(val, Stream)]
self.buffers = {upstream: deque()
for upstream in upstreams
if isinstance(upstream, Stream)}
upstreams2 = [upstream for upstream in upstreams if isinstance(upstream, Stream)]
Stream.__init__(self, upstreams=upstreams2, **kwargs)
def _add_upstream(self, upstream):
# Override method to handle setup of buffer for new stream
self.buffers[upstream] = deque()
super(zip, self)._add_upstream(upstream)
def _remove_upstream(self, upstream):
# Override method to handle removal of buffer for stream
self.buffers.pop(upstream)
super(zip, self)._remove_upstream(upstream)
def pack_literals(self, tup):
""" Fill buffers for literals whenever we empty them """
inp = list(tup)[::-1]
out = []
for i, val in self.literals:
while len(out) < i:
out.append(inp.pop())
out.append(val)
while inp:
out.append(inp.pop())
return tuple(out)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
L = self.buffers[who] # get buffer for stream
L.append((x, metadata))
if len(L) == 1 and all(self.buffers.values()):
vals = [self.buffers[up][0] for up in self.upstreams]
tup, md = __builtins__['zip'](*vals)
for buf in self.buffers.values():
buf.popleft()
self.condition.notify_all()
if self.literals:
tup = self.pack_literals(tup)
md = [m for ml in md for m in ml]
ret = self._emit(tup, md)
self._release_refs(md)
return ret
elif len(L) > self.maxsize:
return self.condition.wait()
@Stream.register_api()
class combine_latest(Stream):
""" Combine multiple streams together to a stream of tuples
This will emit a new tuple of all of the most recent elements seen from
any stream.
Parameters
----------
emit_on : stream or list of streams or None
only emit upon update of the streams listed.
If None, emit on update from any stream
See Also
--------
zip
"""
_graphviz_orientation = 270
_graphviz_shape = 'triangle'
def __init__(self, *upstreams, **kwargs):
emit_on = kwargs.pop('emit_on', None)
self._initial_emit_on = emit_on
self.last = [None for _ in upstreams]
self.metadata = [None for _ in upstreams]
self.missing = set(upstreams)
if emit_on is not None:
if not isinstance(emit_on, Iterable):
emit_on = (emit_on, )
emit_on = tuple(
upstreams[x] if isinstance(x, int) else x for x in emit_on)
self.emit_on = emit_on
else:
self.emit_on = upstreams
Stream.__init__(self, upstreams=upstreams, **kwargs)
def _add_upstream(self, upstream):
# Override method to handle setup of last and missing for new stream
self.last.append(None)
self.metadata.append(None)
self.missing.update([upstream])
super(combine_latest, self)._add_upstream(upstream)
if self._initial_emit_on is None:
self.emit_on = self.upstreams
def _remove_upstream(self, upstream):
# Override method to handle removal of last and missing for stream
if self.emit_on == upstream:
raise RuntimeError("Can't remove the ``emit_on`` stream since that"
"would cause no data to be emitted. "
"Consider adding an ``emit_on`` first by "
"running ``node.emit_on=(upstream,)`` to add "
"a new ``emit_on`` or running "
"``node.emit_on=tuple(node.upstreams)`` to "
"emit on all incoming data")
self.last.pop(self.upstreams.index(upstream))
self.metadata.pop(self.upstreams.index(upstream))
self.missing.remove(upstream)
super(combine_latest, self)._remove_upstream(upstream)
if self._initial_emit_on is None:
self.emit_on = self.upstreams
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
idx = self.upstreams.index(who)
if self.metadata[idx]:
self._release_refs(self.metadata[idx])
self.metadata[idx] = metadata
if self.missing and who in self.missing:
self.missing.remove(who)
self.last[idx] = x
if not self.missing and who in self.emit_on:
tup = tuple(self.last)
md = [m for ml in self.metadata for m in ml]
return self._emit(tup, md)
@Stream.register_api()
class flatten(Stream):
""" Flatten streams of lists or iterables into a stream of elements
Examples
--------
>>> source = Stream()
>>> source.flatten().sink(print)
>>> for x in [[1, 2, 3], [4, 5], [6, 7, 7]]:
... source.emit(x)
1
2
3
4
5
6
7
See Also
--------
partition
"""
def update(self, x, who=None, metadata=None):
L = []
for i, item in enumerate(x):
if i == len(x) - 1:
y = self._emit(item, metadata=metadata)
else:
y = self._emit(item)
if type(y) is list:
L.extend(y)
else:
L.append(y)
return L
@Stream.register_api()
class unique(Stream):
""" Avoid sending through repeated elements
This deduplicates a stream so that only new elements pass through.
You can control how much of a history is stored with the ``maxsize=``
parameter. For example setting ``maxsize=1`` avoids sending through
elements when one is repeated right after the other.
Parameters
----------
maxsize: int or None, optional
number of stored unique values to check against
key : function, optional
Function which returns a representation of the incoming data.
For example ``key=lambda x: x['a']`` could be used to allow only
pieces of data with unique ``'a'`` values to pass through.
hashable : bool, optional
If True then data is assumed to be hashable, else it is not. This is
used for determining how to cache the history, if hashable then
either dicts or LRU caches are used, otherwise a deque is used.
Defaults to True.
Examples
--------
>>> source = Stream()
>>> source.unique(maxsize=1).sink(print)
>>> for x in [1, 1, 2, 2, 2, 1, 3]:
... source.emit(x)
1
2
1
3
"""
def __init__(self, upstream, maxsize=None, key=identity, hashable=True,
**kwargs):
self.key = key
self.maxsize = maxsize
if hashable:
self.seen = dict()
if self.maxsize:
from zict import LRU
self.seen = LRU(self.maxsize, self.seen)
else:
self.seen = []
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
y = self.key(x)
emit = True
if isinstance(self.seen, list):
if y in self.seen:
self.seen.remove(y)
emit = False
self.seen.insert(0, y)
if self.maxsize:
del self.seen[self.maxsize:]
if emit:
return self._emit(x, metadata=metadata)
else:
if self.seen.get(y, '~~not_seen~~') == '~~not_seen~~':
self.seen[y] = 1
return self._emit(x, metadata=metadata)
@Stream.register_api()
class union(Stream):
""" Combine multiple streams into one
Every element from any of the upstreams streams will immediately flow
into the output stream. They will not be combined with elements from
other streams.
See also
--------
Stream.zip
Stream.combine_latest
"""
def __init__(self, *upstreams, **kwargs):
super(union, self).__init__(upstreams=upstreams, **kwargs)
def update(self, x, who=None, metadata=None):
return self._emit(x, metadata=metadata)
@Stream.register_api()
class pluck(Stream):
""" Select elements from elements in the stream.
Parameters
----------
pluck : object, list
The element(s) to pick from the incoming element in the stream
If an instance of list, will pick multiple elements.
Examples
--------
>>> source = Stream()
>>> source.pluck([0, 3]).sink(print)
>>> for x in [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 10, 11]]:
... source.emit(x)
(1, 4)
(4, 7)
(8, 11)
>>> source = Stream()
>>> source.pluck('name').sink(print)
>>> for x in [{'name': 'Alice', 'x': 123}, {'name': 'Bob', 'x': 456}]:
... source.emit(x)
'Alice'
'Bob'
"""
def __init__(self, upstream, pick, **kwargs):
self.pick = pick
super(pluck, self).__init__(upstream, **kwargs)
def update(self, x, who=None, metadata=None):
if isinstance(self.pick, list):
return self._emit(tuple([x[ind] for ind in self.pick]),
metadata=metadata)
else:
return self._emit(x[self.pick], metadata=metadata)
@Stream.register_api()
class collect(Stream):
"""
Hold elements in a cache and emit them as a collection when flushed.
Examples
--------
>>> source1 = Stream()
>>> source2 = Stream()
>>> collector = collect(source1)
>>> collector.sink(print)
>>> source2.sink(collector.flush)
>>> source1.emit(1)
>>> source1.emit(2)
>>> source2.emit('anything') # flushes collector
...
[1, 2]
"""
def __init__(self, upstream, cache=None, metadata_cache=None, **kwargs):
if cache is None:
cache = deque()
self.cache = cache
if metadata_cache is None:
metadata_cache = deque()
self.metadata_cache = metadata_cache
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
self.cache.append(x)
if metadata:
if isinstance(metadata, list):
self.metadata_cache.extend(metadata)
else:
self.metadata_cache.append(metadata)
def flush(self, _=None):
out = tuple(self.cache)
metadata = list(self.metadata_cache)
self._emit(out, metadata)
self._release_refs(metadata)
self.cache.clear()
self.metadata_cache.clear()
@Stream.register_api()
class zip_latest(Stream):
"""Combine multiple streams together to a stream of tuples
The stream which this is called from is lossless. All elements from
the lossless stream are emitted reguardless of when they came in.
This will emit a new tuple consisting of an element from the lossless
stream paired with the latest elements from the other streams.
Elements are only emitted when an element on the lossless stream are
received, similar to ``combine_latest`` with the ``emit_on`` flag.
See Also
--------
Stream.combine_latest
Stream.zip
"""
def __init__(self, lossless, *upstreams, **kwargs):
upstreams = (lossless,) + upstreams
self.last = [None for _ in upstreams]
self.metadata = [None for _ in upstreams]
self.missing = set(upstreams)
self.lossless = lossless
self.lossless_buffer = deque()
Stream.__init__(self, upstreams=upstreams, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
idx = self.upstreams.index(who)
if who is self.lossless:
self.lossless_buffer.append((x, metadata))
elif self.metadata[idx]:
self._release_refs(self.metadata[idx])
self.metadata[idx] = metadata
self.last[idx] = x
if self.missing and who in self.missing:
self.missing.remove(who)
if not self.missing:
L = []
while self.lossless_buffer:
self.last[0], self.metadata[0] = self.lossless_buffer.popleft()
md = [m for ml in self.metadata for m in ml]
L.append(self._emit(tuple(self.last), md))
self._release_refs(self.metadata[0])
return L
@Stream.register_api()
class latest(Stream):
""" Drop held-up data and emit the latest result
This allows you to skip intermediate elements in the stream if there is
some back pressure causing a slowdown. Use this when you only care about
the latest elements, and are willing to lose older data.
This passes through values without modification otherwise.
Examples
--------
>>> source.map(f).latest().map(g) # doctest: +SKIP
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, **kwargs):
self.condition = Condition()
self.next = []
self.next_metadata = None
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
if self.next_metadata:
self._release_refs(self.next_metadata)
self._retain_refs(metadata)
self.next = [x]
self.next_metadata = metadata
self.loop.add_callback(self.condition.notify)
@gen.coroutine
def cb(self):
while True:
yield self.condition.wait()
[x] = self.next
yield self._emit(x, self.next_metadata)
@Stream.register_api()
class to_kafka(Stream):
""" Writes data in the stream to Kafka
This stream accepts a string or bytes object. Call ``flush`` to ensure all
messages are pushed. Responses from Kafka are pushed downstream.
Parameters
----------
topic : string
The topic which to write
producer_config : dict
Settings to set up the stream, see
https://docs.confluent.io/current/clients/confluent-kafka-python/#configuration
https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
Examples:
bootstrap.servers: Connection string (host:port) to Kafka
Examples
--------
>>> from streamz import Stream
>>> ARGS = {'bootstrap.servers': 'localhost:9092'}
>>> source = Stream()
>>> kafka = source.map(lambda x: str(x)).to_kafka('test', ARGS)
<to_kafka>
>>> for i in range(10):
... source.emit(i)
>>> kafka.flush()
"""
def __init__(self, upstream, topic, producer_config, **kwargs):
import confluent_kafka as ck
self.topic = topic
self.producer = ck.Producer(producer_config)
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.stopped = False
self.polltime = 0.2
self.loop.add_callback(self.poll)
self.futures = []
@gen.coroutine
def poll(self):
while not self.stopped:
# executes callbacks for any delivered data, in this thread
# if no messages were sent, nothing happens
self.producer.poll(0)
yield gen.sleep(self.polltime)
def update(self, x, who=None, metadata=None):
future = gen.Future()
self.futures.append(future)
@gen.coroutine
def _():
while True:
try:
# this runs asynchronously, in C-K's thread
self.producer.produce(self.topic, x, callback=self.cb)
return
except BufferError:
yield gen.sleep(self.polltime)
except Exception as e:
future.set_exception(e)
return
self.loop.add_callback(_)
return future
@gen.coroutine
def cb(self, err, msg):
future = self.futures.pop(0)
if msg is not None and msg.value() is not None:
future.set_result(None)
yield self._emit(msg.value())
else:
future.set_exception(err or msg.error())
def flush(self, timeout=-1):
self.producer.flush(timeout)
def sync(loop, func, *args, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# This was taken from distrbuted/utils.py
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and ((isinstance(loop, PollIOLoop) and getattr(loop, '_closing', False))
or (hasattr(loop, 'asyncio_loop') and loop.asyncio_loop._closed)):
raise RuntimeError("IOLoop is closed")
timeout = kwargs.pop('callback_timeout', None)
e = threading.Event()
main_tid = get_thread_identity()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == get_thread_identity():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if timeout is not None:
future = gen.with_timeout(timedelta(seconds=timeout), future)
result[0] = yield future
except Exception:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if timeout is not None:
if not e.wait(timeout):
raise gen.TimeoutError("timed out after %s s." % (timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
six.reraise(*error[0])
else:
return result[0]
| bsd-3-clause | -9,161,140,651,931,675,000 | 29.926856 | 90 | 0.566434 | false | 4.155945 | false | false | false |
TouK/vumi | vumi/blinkenlights/tests/test_metrics.py | 3 | 17835 | import time
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, Deferred
from vumi.blinkenlights import metrics
from vumi.tests.utils import get_stubbed_channel
from vumi.message import Message
from vumi.service import Worker
from vumi.tests.helpers import VumiTestCase, WorkerHelper
class TestMetricPublisher(VumiTestCase):
def setUp(self):
self.worker_helper = self.add_helper(WorkerHelper())
@inlineCallbacks
def start_publisher(self, publisher):
channel = yield get_stubbed_channel(self.worker_helper.broker)
publisher.start(channel)
def _sleep(self, delay):
d = Deferred()
reactor.callLater(delay, lambda: d.callback(None))
return d
def _check_msg(self, prefix, metric, values):
msgs = self.worker_helper.get_dispatched_metrics()
if values is None:
self.assertEqual(msgs, [])
return
[datapoint] = msgs[-1]
self.assertEqual(datapoint[0], prefix + metric.name)
self.assertEqual(datapoint[1], list(metric.aggs))
# check datapoints within 2s of now -- the truncating of
# time.time() to an int for timestamps can cause a 1s
# difference by itself
now = time.time()
self.assertTrue(all(abs(p[0] - now) < 2.0
for p in datapoint[2]),
"Not all datapoints near now (%f): %r"
% (now, datapoint))
self.assertEqual([dp[1] for dp in datapoint[2]], values)
@inlineCallbacks
def test_publish_single_metric(self):
publisher = metrics.MetricPublisher()
yield self.start_publisher(publisher)
msg = metrics.MetricMessage()
cnt = metrics.Count("my.count")
msg.append(
("vumi.test.%s" % (cnt.name,), cnt.aggs, [(time.time(), 1)]))
publisher.publish_message(msg)
self._check_msg("vumi.test.", cnt, [1])
def test_publisher_provides_interface(self):
publisher = metrics.MetricPublisher()
self.assertTrue(metrics.IMetricPublisher.providedBy(publisher))
class TestMetricManager(VumiTestCase):
def setUp(self):
self._next_publish = Deferred()
self.add_cleanup(lambda: self._next_publish.callback(None))
self.worker_helper = self.add_helper(WorkerHelper())
def on_publish(self, mm):
d, self._next_publish = self._next_publish, Deferred()
d.callback(mm)
def wait_publish(self):
return self._next_publish
@inlineCallbacks
def start_manager_as_publisher(self, manager):
channel = yield get_stubbed_channel(self.worker_helper.broker)
manager.start(channel)
self.add_cleanup(manager.stop)
def _sleep(self, delay):
d = Deferred()
reactor.callLater(delay, lambda: d.callback(None))
return d
def _check_msg(self, manager, metric, values):
msgs = self.worker_helper.get_dispatched_metrics()
if values is None:
self.assertEqual(msgs, [])
return
[datapoint] = msgs[-1]
self.assertEqual(datapoint[0], manager.prefix + metric.name)
self.assertEqual(datapoint[1], list(metric.aggs))
# check datapoints within 2s of now -- the truncating of
# time.time() to an int for timestamps can cause a 1s
# difference by itself
now = time.time()
self.assertTrue(all(abs(p[0] - now) < 2.0
for p in datapoint[2]),
"Not all datapoints near now (%f): %r"
% (now, datapoint))
self.assertEqual([dp[1] for dp in datapoint[2]], values)
@inlineCallbacks
def test_start_manager_no_publisher(self):
mm = metrics.MetricManager("vumi.test.")
self.assertEqual(mm._publisher, None)
self.assertEqual(mm._task, None)
channel = yield get_stubbed_channel(self.worker_helper.broker)
mm.start(channel)
self.add_cleanup(mm.stop)
self.assertIsInstance(mm._publisher, metrics.MetricPublisher)
self.assertNotEqual(mm._task, None)
@inlineCallbacks
def test_start_manager_publisher_and_channel(self):
publisher = metrics.MetricPublisher()
mm = metrics.MetricManager("vumi.test.", publisher=publisher)
self.assertEqual(mm._publisher, publisher)
self.assertEqual(mm._task, None)
channel = yield get_stubbed_channel(self.worker_helper.broker)
self.assertRaises(RuntimeError, mm.start, channel)
def test_start_polling_no_publisher(self):
mm = metrics.MetricManager("vumi.test.")
self.assertEqual(mm._publisher, None)
self.assertEqual(mm._task, None)
mm.start_polling()
self.add_cleanup(mm.stop_polling)
self.assertEqual(mm._publisher, None)
self.assertNotEqual(mm._task, None)
def test_start_polling_with_publisher(self):
publisher = metrics.MetricPublisher()
mm = metrics.MetricManager("vumi.test.", publisher=publisher)
self.assertEqual(mm._publisher, publisher)
self.assertEqual(mm._task, None)
mm.start_polling()
self.add_cleanup(mm.stop_polling)
self.assertEqual(mm._publisher, publisher)
self.assertNotEqual(mm._task, None)
def test_oneshot(self):
self.patch(time, "time", lambda: 12345)
mm = metrics.MetricManager("vumi.test.")
cnt = metrics.Count("my.count")
mm.oneshot(cnt, 3)
self.assertEqual(cnt.name, "my.count")
self.assertEqual(mm._oneshot_msgs, [
(cnt, [(12345, 3)]),
])
def test_register(self):
mm = metrics.MetricManager("vumi.test.")
cnt = mm.register(metrics.Count("my.count"))
self.assertEqual(cnt.name, "my.count")
self.assertEqual(mm._metrics, [cnt])
def test_double_register(self):
mm = metrics.MetricManager("vumi.test.")
mm.register(metrics.Count("my.count"))
self.assertRaises(metrics.MetricRegistrationError,
mm.register, metrics.Count("my.count"))
def test_lookup(self):
mm = metrics.MetricManager("vumi.test.")
cnt = mm.register(metrics.Count("my.count"))
self.assertTrue("my.count" in mm)
self.assertTrue(mm["my.count"] is cnt)
self.assertEqual(mm["my.count"].name, "my.count")
@inlineCallbacks
def test_publish_metrics_poll(self):
mm = metrics.MetricManager("vumi.test.", 0.1, self.on_publish)
cnt = mm.register(metrics.Count("my.count"))
yield self.start_manager_as_publisher(mm)
cnt.inc()
mm.publish_metrics()
self._check_msg(mm, cnt, [1])
@inlineCallbacks
def test_publish_metrics_oneshot(self):
mm = metrics.MetricManager("vumi.test.", 0.1, self.on_publish)
cnt = metrics.Count("my.count")
yield self.start_manager_as_publisher(mm)
mm.oneshot(cnt, 1)
mm.publish_metrics()
self._check_msg(mm, cnt, [1])
@inlineCallbacks
def test_start(self):
mm = metrics.MetricManager("vumi.test.", 0.1, self.on_publish)
cnt = mm.register(metrics.Count("my.count"))
yield self.start_manager_as_publisher(mm)
self.assertTrue(mm._task is not None)
self._check_msg(mm, cnt, None)
cnt.inc()
yield self.wait_publish()
self._check_msg(mm, cnt, [1])
cnt.inc()
cnt.inc()
yield self.wait_publish()
self._check_msg(mm, cnt, [1, 1])
@inlineCallbacks
def test_publish_metrics(self):
mm = metrics.MetricManager("vumi.test.", 0.1, self.on_publish)
cnt = metrics.Count("my.count")
yield self.start_manager_as_publisher(mm)
mm.oneshot(cnt, 1)
self.assertEqual(len(mm._oneshot_msgs), 1)
mm.publish_metrics()
self.assertEqual(mm._oneshot_msgs, [])
self._check_msg(mm, cnt, [1])
def test_publish_metrics_not_started_no_publisher(self):
mm = metrics.MetricManager("vumi.test.")
self.assertEqual(mm._publisher, None)
mm.oneshot(metrics.Count("my.count"), 1)
self.assertRaises(ValueError, mm.publish_metrics)
def test_stop_unstarted(self):
mm = metrics.MetricManager("vumi.test.", 0.1, self.on_publish)
mm.stop()
mm.stop() # Check that .stop() is idempotent.
@inlineCallbacks
def test_in_worker(self):
worker = yield self.worker_helper.get_worker(Worker, {}, start=False)
mm = yield worker.start_publisher(metrics.MetricManager,
"vumi.test.", 0.1, self.on_publish)
acc = mm.register(metrics.Metric("my.acc"))
try:
self.assertTrue(mm._task is not None)
self._check_msg(mm, acc, None)
acc.set(1.5)
acc.set(1.0)
yield self.wait_publish()
self._check_msg(mm, acc, [1.5, 1.0])
finally:
mm.stop()
@inlineCallbacks
def test_task_failure(self):
channel = yield get_stubbed_channel()
mm = metrics.MetricManager("vumi.test.", 0.1)
wait_error = Deferred()
class BadMetricError(Exception):
pass
class BadMetric(metrics.Metric):
def poll(self):
wait_error.callback(None)
raise BadMetricError("bad metric")
mm.register(BadMetric("bad"))
mm.start(channel)
yield wait_error
yield self._sleep(0) # allow log message to be processed
error, = self.flushLoggedErrors(BadMetricError)
self.assertTrue(error.type is BadMetricError)
class TestAggregators(VumiTestCase):
def test_sum(self):
self.assertEqual(metrics.SUM([]), 0.0)
self.assertEqual(metrics.SUM([1.0, 2.0]), 3.0)
self.assertEqual(metrics.SUM([2.0, 1.0]), 3.0)
self.assertEqual(metrics.SUM.name, "sum")
self.assertEqual(metrics.Aggregator.from_name("sum"), metrics.SUM)
def test_avg(self):
self.assertEqual(metrics.AVG([]), 0.0)
self.assertEqual(metrics.AVG([1.0, 2.0]), 1.5)
self.assertEqual(metrics.AVG([2.0, 1.0]), 1.5)
self.assertEqual(metrics.AVG.name, "avg")
self.assertEqual(metrics.Aggregator.from_name("avg"), metrics.AVG)
def test_min(self):
self.assertEqual(metrics.MIN([]), 0.0)
self.assertEqual(metrics.MIN([1.0, 2.0]), 1.0)
self.assertEqual(metrics.MIN([2.0, 1.0]), 1.0)
self.assertEqual(metrics.MIN.name, "min")
self.assertEqual(metrics.Aggregator.from_name("min"), metrics.MIN)
def test_max(self):
self.assertEqual(metrics.MAX([]), 0.0)
self.assertEqual(metrics.MAX([1.0, 2.0]), 2.0)
self.assertEqual(metrics.MAX([2.0, 1.0]), 2.0)
self.assertEqual(metrics.MAX.name, "max")
self.assertEqual(metrics.Aggregator.from_name("max"), metrics.MAX)
def test_last(self):
self.assertEqual(metrics.LAST([]), 0.0)
self.assertEqual(metrics.LAST([1.0, 2.0]), 2.0)
self.assertEqual(metrics.LAST([2.0, 1.0]), 1.0)
self.assertEqual(metrics.LAST.name, "last")
self.assertEqual(metrics.Aggregator.from_name("last"), metrics.LAST)
def test_already_registered(self):
self.assertRaises(metrics.AggregatorAlreadyDefinedError,
metrics.Aggregator, "sum", sum)
class CheckValuesMixin(object):
def _check_poll_base(self, metric, n):
datapoints = metric.poll()
# check datapoints within 2s of now -- the truncating of
# time.time() to an int for timestamps can cause a 1s
# difference by itself
now = time.time()
self.assertTrue(all(abs(d[0] - now) <= 2.0
for d in datapoints),
"Not all datapoints near now (%f): %r"
% (now, datapoints))
self.assertTrue(all(isinstance(d[0], (int, long)) for d in datapoints))
actual_values = [dp[1] for dp in datapoints]
return actual_values
def check_poll_func(self, metric, n, test):
actual_values = self._check_poll_base(metric, n)
self.assertEqual([test(v) for v in actual_values], [True] * n)
def check_poll(self, metric, expected_values):
n = len(expected_values)
actual_values = self._check_poll_base(metric, n)
self.assertEqual(actual_values, expected_values)
class TestMetric(VumiTestCase, CheckValuesMixin):
def test_manage(self):
mm = metrics.MetricManager("vumi.test.")
metric = metrics.Metric("foo")
metric.manage(mm)
self.assertEqual(metric.name, "foo")
mm2 = metrics.MetricManager("vumi.othertest.")
self.assertRaises(metrics.MetricRegistrationError, metric.manage,
mm2)
def test_managed(self):
metric = metrics.Metric("foo")
self.assertFalse(metric.managed)
mm = metrics.MetricManager("vumi.test.")
metric.manage(mm)
self.assertTrue(metric.managed)
def test_poll(self):
metric = metrics.Metric("foo")
self.check_poll(metric, [])
metric.set(1.0)
metric.set(2.0)
self.check_poll(metric, [1.0, 2.0])
class TestCount(VumiTestCase, CheckValuesMixin):
def test_inc_and_poll(self):
metric = metrics.Count("foo")
self.check_poll(metric, [])
metric.inc()
self.check_poll(metric, [1.0])
self.check_poll(metric, [])
metric.inc()
metric.inc()
self.check_poll(metric, [1.0, 1.0])
class TestTimer(VumiTestCase, CheckValuesMixin):
def patch_time(self, starting_value):
def fake_time():
return self._fake_time
self.patch(time, 'time', fake_time)
self._fake_time = starting_value
def incr_fake_time(self, value):
self._fake_time += value
def test_start_and_stop(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
timer.start()
self.incr_fake_time(0.1)
timer.stop()
self.check_poll_func(timer, 1, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_already_started(self):
timer = metrics.Timer("foo")
timer.start()
self.assertRaises(metrics.TimerAlreadyStartedError, timer.start)
def test_not_started(self):
timer = metrics.Timer("foo")
self.assertRaises(metrics.TimerNotStartedError, timer.stop)
def test_stop_and_stop(self):
timer = metrics.Timer("foo")
timer.start()
timer.stop()
self.assertRaises(metrics.TimerNotStartedError, timer.stop)
def test_double_start_and_stop(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
timer.start()
self.incr_fake_time(0.1)
timer.stop()
timer.start()
self.incr_fake_time(0.1)
timer.stop()
self.check_poll_func(timer, 2, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_context_manager(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
with timer:
self.incr_fake_time(0.1) # feign sleep
self.check_poll_func(timer, 1, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_accumulate_times(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
with timer:
self.incr_fake_time(0.1) # feign sleep
with timer:
self.incr_fake_time(0.1) # feign sleep
self.check_poll_func(timer, 2, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_timeit(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
with timer.timeit():
self.incr_fake_time(0.1)
self.check_poll_func(timer, 1, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_timeit_start_and_stop(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
event_timer = timer.timeit()
event_timer.start()
self.incr_fake_time(0.1)
event_timer.stop()
self.check_poll_func(timer, 1, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_timeit_start_and_start(self):
event_timer = metrics.Timer("foo").timeit()
event_timer.start()
self.assertRaises(metrics.TimerAlreadyStartedError, event_timer.start)
def test_timeit_stop_without_start(self):
event_timer = metrics.Timer("foo").timeit()
self.assertRaises(metrics.TimerNotStartedError, event_timer.stop)
def test_timeit_stop_and_stop(self):
event_timer = metrics.Timer("foo").timeit()
event_timer.start()
event_timer.stop()
self.assertRaises(metrics.TimerAlreadyStoppedError, event_timer.stop)
def test_timeit_autostart(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
event_timer = timer.timeit(start=True)
self.incr_fake_time(0.1)
event_timer.stop()
self.check_poll_func(timer, 1, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
class TestMetricsConsumer(VumiTestCase):
def test_consume_message(self):
expected_datapoints = [
("vumi.test.v1", 1234, 1.0),
("vumi.test.v2", 3456, 2.0),
]
datapoints = []
callback = lambda *v: datapoints.append(v)
consumer = metrics.MetricsConsumer(None, callback)
msg = metrics.MetricMessage()
msg.extend(expected_datapoints)
vumi_msg = Message.from_json(msg.to_json())
consumer.consume_message(vumi_msg)
self.assertEqual(datapoints, expected_datapoints)
| bsd-3-clause | -3,814,275,998,903,326,700 | 34.67 | 79 | 0.605495 | false | 3.617647 | true | false | false |
MRtrix3/mrtrix3 | lib/mrtrix3/dwi2response/tax.py | 1 | 7374 | # Copyright (c) 2008-2021 the MRtrix3 contributors.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Covered Software is provided under this License on an "as is"
# basis, without warranty of any kind, either expressed, implied, or
# statutory, including, without limitation, warranties that the
# Covered Software is free of defects, merchantable, fit for a
# particular purpose or non-infringing.
# See the Mozilla Public License v. 2.0 for more details.
#
# For more details, see http://www.mrtrix.org/.
import math, os, shutil
from mrtrix3 import MRtrixError
from mrtrix3 import app, image, matrix, path, run
def usage(base_parser, subparsers): #pylint: disable=unused-variable
parser = subparsers.add_parser('tax', parents=[base_parser])
parser.set_author('Robert E. Smith ([email protected])')
parser.set_synopsis('Use the Tax et al. (2014) recursive calibration algorithm for single-fibre voxel selection and response function estimation')
parser.add_citation('Tax, C. M.; Jeurissen, B.; Vos, S. B.; Viergever, M. A. & Leemans, A. Recursive calibration of the fiber response function for spherical deconvolution of diffusion MRI data. NeuroImage, 2014, 86, 67-80')
parser.add_argument('input', help='The input DWI')
parser.add_argument('output', help='The output response function text file')
options = parser.add_argument_group('Options specific to the \'tax\' algorithm')
options.add_argument('-peak_ratio', type=float, default=0.1, help='Second-to-first-peak amplitude ratio threshold')
options.add_argument('-max_iters', type=int, default=20, help='Maximum number of iterations')
options.add_argument('-convergence', type=float, default=0.5, help='Percentile change in any RF coefficient required to continue iterating')
def check_output_paths(): #pylint: disable=unused-variable
app.check_output_path(app.ARGS.output)
def get_inputs(): #pylint: disable=unused-variable
pass
def needs_single_shell(): #pylint: disable=unused-variable
return True
def execute(): #pylint: disable=unused-variable
lmax_option = ''
if app.ARGS.lmax:
lmax_option = ' -lmax ' + app.ARGS.lmax
convergence_change = 0.01 * app.ARGS.convergence
progress = app.ProgressBar('Optimising')
iteration = 0
while iteration < app.ARGS.max_iters:
prefix = 'iter' + str(iteration) + '_'
# How to initialise response function?
# old dwi2response command used mean & standard deviation of DWI data; however
# this may force the output FODs to lmax=2 at the first iteration
# Chantal used a tensor with low FA, but it'd be preferable to get the scaling right
# Other option is to do as before, but get the ratio between l=0 and l=2, and
# generate l=4,6,... using that amplitude ratio
if iteration == 0:
rf_in_path = 'init_RF.txt'
mask_in_path = 'mask.mif'
# Grab the mean and standard deviation across all volumes in a single mrstats call
# Also scale them to reflect the fact that we're moving to the SH basis
image_stats = image.statistics('dwi.mif', mask='mask.mif', allvolumes=True)
mean = image_stats.mean * math.sqrt(4.0 * math.pi)
std = image_stats.std * math.sqrt(4.0 * math.pi)
# Now produce the initial response function
# Let's only do it to lmax 4
init_rf = [ str(mean), str(-0.5*std), str(0.25*std*std/mean) ]
with open('init_RF.txt', 'w') as init_rf_file:
init_rf_file.write(' '.join(init_rf))
else:
rf_in_path = 'iter' + str(iteration-1) + '_RF.txt'
mask_in_path = 'iter' + str(iteration-1) + '_SF.mif'
# Run CSD
run.command('dwi2fod csd dwi.mif ' + rf_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path)
# Get amplitudes of two largest peaks, and directions of largest
run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak peaks.mif -mask ' + mask_in_path + ' -fmls_no_thresholds')
app.cleanup(prefix + 'FOD.mif')
run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif none ' + prefix + 'amps.mif')
run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'first_peaks.mif -coord 3 0 -axes 0,1,2')
run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'second_peaks.mif -coord 3 1 -axes 0,1,2')
app.cleanup(prefix + 'amps.mif')
run.command('fixel2peaks ' + prefix + 'fixel/directions.mif ' + prefix + 'first_dir.mif -number 1')
app.cleanup(prefix + 'fixel')
# Revise single-fibre voxel selection based on ratio of tallest to second-tallest peak
run.command('mrcalc ' + prefix + 'second_peaks.mif ' + prefix + 'first_peaks.mif -div ' + prefix + 'peak_ratio.mif')
app.cleanup(prefix + 'first_peaks.mif')
app.cleanup(prefix + 'second_peaks.mif')
run.command('mrcalc ' + prefix + 'peak_ratio.mif ' + str(app.ARGS.peak_ratio) + ' -lt ' + mask_in_path + ' -mult ' + prefix + 'SF.mif -datatype bit')
app.cleanup(prefix + 'peak_ratio.mif')
# Make sure image isn't empty
sf_voxel_count = image.statistics(prefix + 'SF.mif', mask=prefix+'SF.mif').count
if not sf_voxel_count:
raise MRtrixError('Aborting: All voxels have been excluded from single-fibre selection')
# Generate a new response function
run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix + 'first_dir.mif ' + prefix + 'RF.txt' + lmax_option)
app.cleanup(prefix + 'first_dir.mif')
new_rf = matrix.load_vector(prefix + 'RF.txt')
progress.increment('Optimising (' + str(iteration+1) + ' iterations, ' + str(sf_voxel_count) + ' voxels, RF: [ ' + ', '.join('{:.3f}'.format(n) for n in new_rf) + '] )')
# Detect convergence
# Look for a change > some percentage - don't bother looking at the masks
if iteration > 0:
old_rf = matrix.load_vector(rf_in_path)
reiterate = False
for old_value, new_value in zip(old_rf, new_rf):
mean = 0.5 * (old_value + new_value)
diff = math.fabs(0.5 * (old_value - new_value))
ratio = diff / mean
if ratio > convergence_change:
reiterate = True
if not reiterate:
run.function(shutil.copyfile, prefix + 'RF.txt', 'response.txt')
run.function(shutil.copyfile, prefix + 'SF.mif', 'voxels.mif')
break
app.cleanup(rf_in_path)
app.cleanup(mask_in_path)
iteration += 1
progress.done()
# If we've terminated due to hitting the iteration limiter, we still need to copy the output file(s) to the correct location
if os.path.exists('response.txt'):
app.console('Exited at iteration ' + str(iteration+1) + ' with ' + str(sf_voxel_count) + ' SF voxels due to unchanged RF coefficients')
else:
app.console('Exited after maximum ' + str(app.ARGS.max_iters) + ' iterations with ' + str(sf_voxel_count) + ' SF voxels')
run.function(shutil.copyfile, 'iter' + str(app.ARGS.max_iters-1) + '_RF.txt', 'response.txt')
run.function(shutil.copyfile, 'iter' + str(app.ARGS.max_iters-1) + '_SF.mif', 'voxels.mif')
run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False))
if app.ARGS.voxels:
run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE)
| mpl-2.0 | 183,440,786,461,445,730 | 48.16 | 226 | 0.676024 | false | 3.167526 | false | false | false |
MartinThoma/LaTeX-examples | documents/Programmierparadigmen/scripts/python/n-damen.py | 1 | 1249 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def get_next(n, i, damen_pos):
for i in range(n):
candidates = set(list(range(n)))
candidates -= set(damen_pos)
candidates -= set(list(range(damen_pos[i]+1)))
candidates = list(candidates)
if len(candidates) > 0:
damen_pos[i] = candidates[0]
return i, damen_pos
else:
damen_pos = damen_pos[0:i] + [0]*(n-i)
i -= 1
def is_attacked(damen, x, y):
""" Wird das Feld (x,y) von einer der Damen angegriffen? """
for dy, dx in enumerate(damen[:y]):
if dx == x or dy == y or abs(x-dx) == abs(y-dy):
return True
return False
def finde_loesung(n):
""" Platziere n Damen so auf einem n×n Feld,
sodass sich keine Damen schlagen.
"""
# damen[i] ist die x-position von Dame i in Zeile i
damen = [0]*n
i = 1
solutions = []
while 0 <= i < n:
while not is_attacked(damen, damen[i], i):
if i == n-1:
yield damen
break
i += 1
i, damen = get_next(n, i, damen)
def alle_loesungen(n):
generator = finde_loesung(n)
return list(generator)
print(len(alle_loesungen(11)))
| mit | 5,815,788,792,761,971,000 | 25 | 64 | 0.52484 | false | 2.791946 | false | false | false |
valdergallo/pyconst | pyconst/const.py | 1 | 2021 | # encoding: utf-8
from __future__ import unicode_literals, absolute_import
from .slug import slugify as s
from .slug import slugify_attr as s_attr
import enum
class PyConstString(str):
def __new__(cls, name=None, value=None):
if not value:
value = name
else:
value = s(value)
obj = str.__new__(cls, value)
obj.name = name
obj.label = name
obj.value = value
return obj
class Const(object):
def __init__(self, *args, **kwargs):
self.__data = ()
for value in args:
self.add(value)
for name, attr in kwargs.items():
self.add(name, attr)
def __set_iter_value(self, iter_value):
attr, value, name = (None,) * 3
if len(iter_value) == 1:
attr = iter_value[0]
elif len(iter_value) == 2:
attr, value = iter_value
elif len(iter_value) == 3:
attr, value, name = iter_value
elif len(iter_value) > 3:
name = iter_value[2]
value = iter_value[1]
attr = iter_value[0]
return attr, value, name
def to_enum(self):
return enum.Enum('DynamicEnum', {i[0]:i[0] for i in self})
def add(self, attr, value=None, name=None):
"Set values in constant"
if isinstance(attr, tuple) or isinstance(attr, list):
attr, value, name = self.__set_iter_value(attr)
if attr is None:
attr = name
if value is None:
value = attr
if name is None:
name = attr
self.__data += (PyConstString(name=name, value=value),)
# set attribute as slugfiy
self.__dict__[s_attr(attr)] = self.__data[-1]
def __getitem__(self, index):
"Get index item"
return (self.__data[index], self.__data[index].name)
def __iter__(self):
"Lazy return"
return ((i, i.name) for i in self.__data)
def __len__(self):
return len(self.__data)
| gpl-3.0 | -344,034,119,650,024,000 | 25.246753 | 66 | 0.528946 | false | 3.715074 | false | false | false |
Kwpolska/pkgbuilder | pkgbuilder/package.py | 1 | 5090 | # -*- encoding: utf-8 -*-
# PKGBUILDer v4.3.0
# An AUR helper (and library) in Python 3.
# Copyright © 2011-2021, Chris Warrick.
# See /LICENSE for licensing information.
"""
The Package class, the most important class in PKGBUILDer.
:Copyright: © 2011-2021, Chris Warrick.
:License: BSD (see /LICENSE).
"""
from . import UTC, DS
from .exceptions import SanityError
import datetime
__all__ = ('Package', 'AURPackage', 'ABSPackage')
def mktime(ts):
return datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=UTC)
class Package(object):
"""The base class for packages."""
is_abs = None
name = ''
version = ''
description = ''
repo = ''
url = ''
licenses = []
human = ''
depends = []
optdepends = []
conflicts = []
provides = []
replaces = []
groups = []
def __init__(self, **kwargs):
"""Initialize the class."""
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
"""Return something nice for people wanting a string."""
return '-'.join((self.name, self.version))
def __repr__(self):
"""Return something nice for people wanting a repr."""
if self.is_abs:
return '<Repository Package {0}-{1}>'.format(self.name, self.version)
elif not self.is_abs:
return '<AUR Package {0}-{1}>'.format(self.name, self.version)
elif self.is_abs is None:
return '<??? Package {0}-{1}>'.format(self.name, self.version)
else:
return SanityError('is_abs is invalid ({0})'.format(self.is_abs),
'Package.__repr__()', is_abs=self.is_abs)
class AURPackage(Package):
"""An AUR package."""
repo = 'aur'
id = None
packagebase = ''
packagebaseid = None
makedepends = []
checkdepends = []
is_abs = False
is_outdated = None
outdated_since = None
added = None
modified = None
votes = None
urlpath = ''
popularity = None
keywords = []
@classmethod
def from_aurdict(cls, aurdict):
"""Create an instance of AURPackage from AUR RPC data."""
bindings = {'Description': 'description',
'ID': 'id',
'Maintainer': 'human',
'Name': 'name',
'NumVotes': 'votes',
'URL': 'url',
'Version': 'version',
'PackageBase': 'packagebase',
'PackageBaseID': 'packagebaseid',
'Depends': 'depends',
'MakeDepends': 'makedepends',
'CheckDepends': 'checkdepends',
'OptDepends': 'optdepends',
'Conflicts': 'conflicts',
'Provides': 'provides',
'Replaces': 'replaces',
'Groups': 'groups',
'License': 'licenses',
'URLPath': 'urlpath',
'Popularity': 'popularity',
'Keywords': 'keywords',
}
ignore = ['OutOfDate', 'FirstSubmitted', 'LastModified']
p = cls()
for k, v in aurdict.items():
try:
if v is not None:
setattr(p, bindings[k], v)
except KeyError:
if k not in ignore:
DS.log.warning('AURDict has an unknown %s key: %s',
k, aurdict)
# Manual overrides.
p.is_outdated = aurdict['OutOfDate'] is not None
if p.is_outdated:
p.outdated_since = mktime(aurdict['OutOfDate'])
else:
p.outdated_since = None
p.added = mktime(aurdict['FirstSubmitted'])
p.modified = mktime(aurdict['LastModified'])
return p
class ABSPackage(Package):
"""A repository package (formerly ABS)."""
is_abs = True
# Most of those aren’t necessary, but I am copying them over because I can.
arch = ''
backup = []
base64_sig = None
builddate = None
deltas = []
download_size = None
filename = ''
files = []
has_scriptlet = None
installdate = None
isize = None
md5sum = ''
reason = []
sha256sum = ''
size = None
@classmethod
def from_pyalpm(cls, alpmpkg):
"""Transform a pyalpm.Package into a pkgbuilder.package.ABSPackage."""
copy = ['arch', 'backup', 'base64_sig', 'conflicts', 'deltas',
'depends', 'download_size', 'filename', 'files', 'groups',
'has_scriptlet', 'isize', 'licenses', 'md5sum', 'name',
'optdepends', 'provides', 'reason', 'replaces', 'sha256sum',
'size', 'url', 'version']
p = cls()
for i in copy:
setattr(p, i, getattr(alpmpkg, i))
p.repo = alpmpkg.db.name
p.description = alpmpkg.desc
p.human = alpmpkg.packager
p.builddate = mktime(alpmpkg.builddate)
p.installdate = mktime(alpmpkg.installdate)
return p
| bsd-3-clause | -633,215,422,135,197,200 | 28.229885 | 81 | 0.518482 | false | 3.9033 | false | false | false |
mila-udem/blocks-extras | tests/bricks/test_attention.py | 1 | 4696 | import numpy
import theano
from numpy.testing import assert_allclose
from theano import tensor
from blocks.bricks import Identity
from blocks.bricks.recurrent import SimpleRecurrent
from blocks.bricks.attention import SequenceContentAttention
from blocks.initialization import IsotropicGaussian, Constant
from blocks_extras.bricks.attention2 import AttentionRecurrent
from blocks.graph import ComputationGraph
from blocks.select import Selector
def test_attention_recurrent():
rng = numpy.random.RandomState(1234)
dim = 5
batch_size = 4
input_length = 20
attended_dim = 10
attended_length = 15
wrapped = SimpleRecurrent(dim, Identity())
attention = SequenceContentAttention(
state_names=wrapped.apply.states,
attended_dim=attended_dim, match_dim=attended_dim)
recurrent = AttentionRecurrent(wrapped, attention, seed=1234)
recurrent.weights_init = IsotropicGaussian(0.5)
recurrent.biases_init = Constant(0)
recurrent.initialize()
attended = tensor.tensor3("attended")
attended_mask = tensor.matrix("attended_mask")
inputs = tensor.tensor3("inputs")
inputs_mask = tensor.matrix("inputs_mask")
outputs = recurrent.apply(
inputs=inputs, mask=inputs_mask,
attended=attended, attended_mask=attended_mask)
states, glimpses, weights = outputs
assert states.ndim == 3
assert glimpses.ndim == 3
assert weights.ndim == 3
# For values.
def rand(size):
return rng.uniform(size=size).astype(theano.config.floatX)
# For masks.
def generate_mask(length, batch_size):
mask = numpy.ones((length, batch_size), dtype=theano.config.floatX)
# To make it look like read data
for i in range(batch_size):
mask[1 + rng.randint(0, length - 1):, i] = 0.0
return mask
input_vals = rand((input_length, batch_size, dim))
input_mask_vals = generate_mask(input_length, batch_size)
attended_vals = rand((attended_length, batch_size, attended_dim))
attended_mask_vals = generate_mask(attended_length, batch_size)
func = theano.function([inputs, inputs_mask, attended, attended_mask],
[states, glimpses, weights])
states_vals, glimpses_vals, weight_vals = func(
input_vals, input_mask_vals,
attended_vals, attended_mask_vals)
assert states_vals.shape == (input_length, batch_size, dim)
assert glimpses_vals.shape == (input_length, batch_size, attended_dim)
assert (len(ComputationGraph(outputs).shared_variables) ==
len(Selector(recurrent).get_parameters()))
# Manual reimplementation
inputs2d = tensor.matrix()
states2d = tensor.matrix()
mask1d = tensor.vector()
weighted_averages = tensor.matrix()
distribute_func = theano.function(
[inputs2d, weighted_averages],
recurrent.distribute.apply(
inputs=inputs2d,
weighted_averages=weighted_averages))
wrapped_apply_func = theano.function(
[states2d, inputs2d, mask1d], wrapped.apply(
states=states2d, inputs=inputs2d, mask=mask1d, iterate=False))
attention_func = theano.function(
[states2d, attended, attended_mask],
attention.take_glimpses(
attended=attended, attended_mask=attended_mask,
states=states2d))
states_man = wrapped.initial_states(batch_size).eval()
glimpses_man = numpy.zeros((batch_size, attended_dim),
dtype=theano.config.floatX)
for i in range(input_length):
inputs_man = distribute_func(input_vals[i], glimpses_man)
states_man = wrapped_apply_func(states_man, inputs_man,
input_mask_vals[i])
glimpses_man, weights_man = attention_func(
states_man, attended_vals, attended_mask_vals)
assert_allclose(states_man, states_vals[i], rtol=1e-5)
assert_allclose(glimpses_man, glimpses_vals[i], rtol=1e-5)
assert_allclose(weights_man, weight_vals[i], rtol=1e-5)
# weights for not masked position must be zero
assert numpy.all(weight_vals * (1 - attended_mask_vals.T) == 0)
# weights for masked positions must be non-zero
assert numpy.all(abs(weight_vals + (1 - attended_mask_vals.T)) > 1e-5)
# weights from different steps should be noticeably different
assert (abs(weight_vals[0] - weight_vals[1])).sum() > 1e-2
# weights for all state after the last masked position should be same
for i in range(batch_size):
last = int(input_mask_vals[:, i].sum())
for j in range(last, input_length):
assert_allclose(weight_vals[last, i], weight_vals[j, i], 1e-5)
| mit | -8,633,155,055,716,963,000 | 39.834783 | 75 | 0.670358 | false | 3.601227 | false | false | false |
google-research/google-research | tf3d/object_detection/data_preparation_utils.py | 1 | 25222 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for preparing data to be compatible with object detection pipeline.
Functions to prepare Waymo, scannet and kitti datasets.
"""
import enum
import gin
import gin.tf
import tensorflow as tf
import tensorflow_datasets as tfds
from tf3d import standard_fields
# TODO(alirezafathi): Remove internal mark when dataset files are moved to tf3d.
from tf3d.datasets.specs import waymo_frames
from tf3d.utils import projections
class ObjectDifficulty(enum.IntEnum):
SUPER_HARD = 0
HARD = 1
MODERATE = 2
EASY = 3
def _random_string_generator(num_numbers=5, max_number_value=100000):
string_tensors = []
for _ in range(num_numbers):
random_number = tf.random.uniform([],
minval=0,
maxval=max_number_value,
dtype=tf.int32)
string_tensors.append(tf.strings.as_string(random_number))
return tf.strings.join(string_tensors)
@gin.configurable
def prepare_scannet_scene_dataset(inputs, valid_object_classes=None):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
valid_object_classes: List of valid object classes. if None, it is ignored.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
if 'mesh/vertices/positions' in inputs:
prepared_inputs[standard_fields.InputDataFields
.point_positions] = inputs['mesh/vertices/positions']
if 'mesh/vertices/normals' in inputs:
prepared_inputs[standard_fields.InputDataFields
.point_normals] = inputs['mesh/vertices/normals']
prepared_inputs[standard_fields.InputDataFields.point_normals] = tf.where(
tf.math.is_nan(
prepared_inputs[standard_fields.InputDataFields.point_normals]),
tf.zeros_like(
prepared_inputs[standard_fields.InputDataFields.point_normals]),
prepared_inputs[standard_fields.InputDataFields.point_normals])
if 'mesh/vertices/colors' in inputs:
prepared_inputs[standard_fields.InputDataFields
.point_colors] = inputs['mesh/vertices/colors'][:, 0:3]
prepared_inputs[standard_fields.InputDataFields.point_colors] = tf.cast(
prepared_inputs[standard_fields.InputDataFields.point_colors],
dtype=tf.float32)
prepared_inputs[standard_fields.InputDataFields.point_colors] *= (2.0 /
255.0)
prepared_inputs[standard_fields.InputDataFields.point_colors] -= 1.0
if 'scene_name' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_image_name] = inputs['scene_name']
if 'mesh/vertices/semantic_labels' in inputs:
prepared_inputs[
standard_fields.InputDataFields
.object_class_points] = inputs['mesh/vertices/semantic_labels']
if 'mesh/vertices/instance_labels' in inputs:
prepared_inputs[
standard_fields.InputDataFields.object_instance_id_points] = tf.reshape(
inputs['mesh/vertices/instance_labels'], [-1])
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
object_class))
valid_objects_mask = tf.cast(
valid_objects_mask,
dtype=prepared_inputs[
standard_fields.InputDataFields.object_class_points].dtype)
prepared_inputs[standard_fields.InputDataFields
.object_class_points] *= valid_objects_mask
return prepared_inputs
@gin.configurable
def prepare_scannet_frame_dataset(inputs,
min_pixel_depth=0.3,
max_pixel_depth=6.0,
valid_object_classes=None):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
min_pixel_depth: Pixels with depth values less than this are pruned.
max_pixel_depth: Pixels with depth values more than this are pruned.
valid_object_classes: List of valid object classes. if None, it is ignored.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
if 'cameras/rgbd_camera/intrinsics/K' not in inputs:
raise ValueError('Intrinsic matrix is missing.')
if 'cameras/rgbd_camera/extrinsics/R' not in inputs:
raise ValueError('Extrinsic rotation matrix is missing.')
if 'cameras/rgbd_camera/extrinsics/t' not in inputs:
raise ValueError('Extrinsics translation is missing.')
if 'cameras/rgbd_camera/depth_image' not in inputs:
raise ValueError('Depth image is missing.')
if 'cameras/rgbd_camera/color_image' not in inputs:
raise ValueError('Color image is missing.')
if 'frame_name' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_image_name] = inputs['frame_name']
camera_intrinsics = inputs['cameras/rgbd_camera/intrinsics/K']
depth_image = inputs['cameras/rgbd_camera/depth_image']
image_height = tf.shape(depth_image)[0]
image_width = tf.shape(depth_image)[1]
x, y = tf.meshgrid(
tf.range(image_width), tf.range(image_height), indexing='xy')
x = tf.reshape(tf.cast(x, dtype=tf.float32) + 0.5, [-1, 1])
y = tf.reshape(tf.cast(y, dtype=tf.float32) + 0.5, [-1, 1])
point_positions = projections.image_frame_to_camera_frame(
image_frame=tf.concat([x, y], axis=1),
camera_intrinsics=camera_intrinsics)
rotate_world_to_camera = inputs['cameras/rgbd_camera/extrinsics/R']
translate_world_to_camera = inputs['cameras/rgbd_camera/extrinsics/t']
point_positions = projections.to_world_frame(
camera_frame_points=point_positions,
rotate_world_to_camera=rotate_world_to_camera,
translate_world_to_camera=translate_world_to_camera)
prepared_inputs[standard_fields.InputDataFields
.point_positions] = point_positions * tf.reshape(
depth_image, [-1, 1])
depth_values = tf.reshape(depth_image, [-1])
valid_depth_mask = tf.logical_and(
tf.greater_equal(depth_values, min_pixel_depth),
tf.less_equal(depth_values, max_pixel_depth))
prepared_inputs[standard_fields.InputDataFields.point_colors] = tf.reshape(
tf.cast(inputs['cameras/rgbd_camera/color_image'], dtype=tf.float32),
[-1, 3])
prepared_inputs[standard_fields.InputDataFields.point_colors] *= (2.0 / 255.0)
prepared_inputs[standard_fields.InputDataFields.point_colors] -= 1.0
prepared_inputs[
standard_fields.InputDataFields.point_positions] = tf.boolean_mask(
prepared_inputs[standard_fields.InputDataFields.point_positions],
valid_depth_mask)
prepared_inputs[
standard_fields.InputDataFields.point_colors] = tf.boolean_mask(
prepared_inputs[standard_fields.InputDataFields.point_colors],
valid_depth_mask)
if 'cameras/rgbd_camera/semantic_image' in inputs:
prepared_inputs[
standard_fields.InputDataFields.object_class_points] = tf.cast(
tf.reshape(inputs['cameras/rgbd_camera/semantic_image'], [-1, 1]),
dtype=tf.int32)
prepared_inputs[
standard_fields.InputDataFields.object_class_points] = tf.boolean_mask(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
valid_depth_mask)
if 'cameras/rgbd_camera/instance_image' in inputs:
prepared_inputs[
standard_fields.InputDataFields.object_instance_id_points] = tf.cast(
tf.reshape(inputs['cameras/rgbd_camera/instance_image'], [-1]),
dtype=tf.int32)
prepared_inputs[standard_fields.InputDataFields
.object_instance_id_points] = tf.boolean_mask(
prepared_inputs[standard_fields.InputDataFields
.object_instance_id_points],
valid_depth_mask)
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
object_class))
valid_objects_mask = tf.cast(
valid_objects_mask,
dtype=prepared_inputs[
standard_fields.InputDataFields.object_class_points].dtype)
prepared_inputs[standard_fields.InputDataFields
.object_class_points] *= valid_objects_mask
return prepared_inputs
@gin.configurable
def prepare_waymo_open_dataset(inputs,
valid_object_classes=None,
max_object_distance_from_source=74.88):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
valid_object_classes: List of valid object classes. if None, it is ignored.
max_object_distance_from_source: Maximum distance of objects from source. It
will be ignored if None.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
if standard_fields.InputDataFields.point_positions in inputs:
prepared_inputs[standard_fields.InputDataFields.point_positions] = inputs[
standard_fields.InputDataFields.point_positions]
if standard_fields.InputDataFields.point_intensities in inputs:
prepared_inputs[standard_fields.InputDataFields.point_intensities] = inputs[
standard_fields.InputDataFields.point_intensities]
if standard_fields.InputDataFields.point_elongations in inputs:
prepared_inputs[standard_fields.InputDataFields.point_elongations] = inputs[
standard_fields.InputDataFields.point_elongations]
if standard_fields.InputDataFields.point_normals in inputs:
prepared_inputs[standard_fields.InputDataFields.point_normals] = inputs[
standard_fields.InputDataFields.point_normals]
if 'cameras/front/intrinsics/K' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_intrinsics] = inputs['cameras/front/intrinsics/K']
if 'cameras/front/extrinsics/R' in inputs:
prepared_inputs[
standard_fields.InputDataFields
.camera_rotation_matrix] = inputs['cameras/front/extrinsics/R']
if 'cameras/front/extrinsics/t' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_translation] = inputs['cameras/front/extrinsics/t']
if 'cameras/front/image' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_image] = inputs['cameras/front/image']
prepared_inputs[standard_fields.InputDataFields
.camera_raw_image] = inputs['cameras/front/image']
prepared_inputs[standard_fields.InputDataFields
.camera_original_image] = inputs['cameras/front/image']
if 'scene_name' in inputs and 'frame_name' in inputs:
prepared_inputs[
standard_fields.InputDataFields.camera_image_name] = tf.strings.join(
[inputs['scene_name'], inputs['frame_name']], separator='_')
if 'objects/pose/R' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_rotation_matrix] = inputs['objects/pose/R']
if 'objects/pose/t' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_center] = inputs['objects/pose/t']
if 'objects/shape/dimension' in inputs:
prepared_inputs[
standard_fields.InputDataFields.objects_length] = tf.reshape(
inputs['objects/shape/dimension'][:, 0], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_width] = tf.reshape(
inputs['objects/shape/dimension'][:, 1], [-1, 1])
prepared_inputs[
standard_fields.InputDataFields.objects_height] = tf.reshape(
inputs['objects/shape/dimension'][:, 2], [-1, 1])
if 'objects/category/label' in inputs:
prepared_inputs[standard_fields.InputDataFields.objects_class] = tf.reshape(
inputs['objects/category/label'], [-1, 1])
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[standard_fields.InputDataFields.objects_class],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[standard_fields.InputDataFields.objects_class],
object_class))
valid_objects_mask = tf.reshape(valid_objects_mask, [-1])
for key in standard_fields.get_input_object_fields():
if key in prepared_inputs:
prepared_inputs[key] = tf.boolean_mask(prepared_inputs[key],
valid_objects_mask)
if max_object_distance_from_source is not None:
if standard_fields.InputDataFields.objects_center in prepared_inputs:
object_distances = tf.norm(
prepared_inputs[standard_fields.InputDataFields.objects_center][:,
0:2],
axis=1)
valid_mask = tf.less(object_distances, max_object_distance_from_source)
for key in standard_fields.get_input_object_fields():
if key in prepared_inputs:
prepared_inputs[key] = tf.boolean_mask(prepared_inputs[key],
valid_mask)
return prepared_inputs
@gin.configurable
def prepare_kitti_dataset(inputs, valid_object_classes=None):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
valid_object_classes: List of valid object classes. if None, it is ignored.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
prepared_inputs[standard_fields.InputDataFields.point_positions] = inputs[
standard_fields.InputDataFields.point_positions]
prepared_inputs[standard_fields.InputDataFields.point_intensities] = inputs[
standard_fields.InputDataFields.point_intensities]
prepared_inputs[standard_fields.InputDataFields
.camera_intrinsics] = inputs['cameras/cam02/intrinsics/K']
prepared_inputs[standard_fields.InputDataFields.
camera_rotation_matrix] = inputs['cameras/cam02/extrinsics/R']
prepared_inputs[standard_fields.InputDataFields
.camera_translation] = inputs['cameras/cam02/extrinsics/t']
prepared_inputs[standard_fields.InputDataFields
.camera_image] = inputs['cameras/cam02/image']
prepared_inputs[standard_fields.InputDataFields
.camera_raw_image] = inputs['cameras/cam02/image']
prepared_inputs[standard_fields.InputDataFields
.camera_original_image] = inputs['cameras/cam02/image']
if 'scene_name' in inputs and 'frame_name' in inputs:
prepared_inputs[
standard_fields.InputDataFields.camera_image_name] = tf.strings.join(
[inputs['scene_name'], inputs['frame_name']], separator='_')
if 'objects/pose/R' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_rotation_matrix] = inputs['objects/pose/R']
if 'objects/pose/t' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_center] = inputs['objects/pose/t']
if 'objects/shape/dimension' in inputs:
prepared_inputs[
standard_fields.InputDataFields.objects_length] = tf.reshape(
inputs['objects/shape/dimension'][:, 0], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_width] = tf.reshape(
inputs['objects/shape/dimension'][:, 1], [-1, 1])
prepared_inputs[
standard_fields.InputDataFields.objects_height] = tf.reshape(
inputs['objects/shape/dimension'][:, 2], [-1, 1])
if 'objects/category/label' in inputs:
prepared_inputs[standard_fields.InputDataFields.objects_class] = tf.reshape(
inputs['objects/category/label'], [-1, 1])
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[standard_fields.InputDataFields.objects_class],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[standard_fields.InputDataFields.objects_class],
object_class))
valid_objects_mask = tf.reshape(valid_objects_mask, [-1])
for key in standard_fields.get_input_object_fields():
if key in prepared_inputs:
prepared_inputs[key] = tf.boolean_mask(prepared_inputs[key],
valid_objects_mask)
return prepared_inputs
@gin.configurable
def prepare_proxy_dataset(inputs):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
# Points
prepared_inputs[standard_fields.InputDataFields.point_positions] = inputs[
standard_fields.InputDataFields.point_positions]
prepared_inputs[standard_fields.InputDataFields.point_intensities] = inputs[
standard_fields.InputDataFields.point_intensities]
# Camera
prepared_inputs[
standard_fields.InputDataFields.camera_intrinsics] = tf.reshape(
inputs['camera_intrinsics'], [3, 3])
prepared_inputs[
standard_fields.InputDataFields.camera_rotation_matrix] = tf.reshape(
inputs['camera_rotation_matrix'], [3, 3])
prepared_inputs[
standard_fields.InputDataFields.camera_translation] = tf.reshape(
inputs['camera_translation'], [3])
prepared_inputs[
standard_fields.InputDataFields.camera_image] = inputs['image']
prepared_inputs[
standard_fields.InputDataFields.camera_raw_image] = inputs['image']
prepared_inputs[
standard_fields.InputDataFields.camera_original_image] = inputs['image']
prepared_inputs[standard_fields.InputDataFields
.camera_image_name] = _random_string_generator()
# objects pose
prepared_inputs[
standard_fields.InputDataFields.objects_rotation_matrix] = tf.reshape(
inputs['objects_rotation'], [-1, 3, 3])
prepared_inputs[standard_fields.InputDataFields.objects_center] = tf.reshape(
inputs['objects_center'], [-1, 3])
# objects size
prepared_inputs[standard_fields.InputDataFields.objects_length] = tf.reshape(
inputs['objects_length'], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_width] = tf.reshape(
inputs['objects_width'], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_height] = tf.reshape(
inputs['objects_height'], [-1, 1])
# labels
prepared_inputs[standard_fields.InputDataFields.objects_class] = tf.reshape(
inputs['objects_class'], [-1, 1])
return prepared_inputs
def compute_kitti_difficulty(boxes, occlusions, truncations, image_height):
"""Computes box difficulty as Hard(1), Moderate(2), Easy(3) or 0 (Super hard).
Easy: height >=40 Px, occlusion <= 0, truncation <= 0.15
Moderate: height >=25 Px, occlusion <= 1, truncation <= 0.30
Hard: height >=25 Px, occlusion <= 2, truncation <= 0.50
Note that 'Hard' box is also 'Moderate' and 'Easy'.
Returns a (N, 1) tensor containing object difficulty with following labelmap:
0: SuperHard
1: Hard
2: Moderate
3: Easy
TODO(abhijitkundu): Since difficulty level is very specific to kitti, this
function should be in kitti evaluation rather than detection preprocessor.
Args:
boxes: (N, 4) tensor of 2d boxes with [ymin, xmin, ymax, xmax] each row.
occlusions: (N, 1) tensor containing box occlusion level
truncations: (N, 1) tensor containing box truncation level
image_height: Image height.
Returns:
A (N, 1) int32 tensor containing per box difficulty labels with 0 (SuperHard),
1 (Hard), 2 (Moderate) and 3 (Easy).
"""
# box heights in pixels
heights = tf.reshape((boxes[:, 2] - boxes[:, 0]), [-1, 1]) * tf.cast(
image_height, dtype=tf.float32)
# compute binary masks for each difficulty level
is_easy = (heights >= 40.0) & (occlusions <= 0) & (truncations <= 0.15)
is_moderate = (heights >= 25.0) & (occlusions <= 1) & (truncations <= 0.30)
is_hard = (heights >= 25.0) & (occlusions <= 2) & (truncations <= 0.50)
# set difficulty map
difficulty = tf.maximum(
tf.maximum(
tf.cast(is_hard, dtype=tf.int32) * ObjectDifficulty.HARD,
tf.cast(is_moderate, dtype=tf.int32) * ObjectDifficulty.MODERATE),
tf.cast(is_easy, dtype=tf.int32) * ObjectDifficulty.EASY)
return difficulty
def get_waymo_per_frame_with_prediction_feature_spec(
num_object_classes,
encoded_features_dimension,
include_encoded_features=True):
"""Returns a tfds feature spec with regular per frame entries and predictions.
Args:
num_object_classes: Number of object classes.
encoded_features_dimension: Encoded features dimension.
include_encoded_features: If True, it will include encoded features.
Otherwise, it will not include them.
Returns:
A tfds feature spec.
"""
prediction_feature_dict = {
standard_fields.DetectionResultFields.object_rotation_matrix_points:
tfds.features.Tensor(shape=(None, 3, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.object_length_points:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.object_height_points:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.object_width_points:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.object_center_points:
tfds.features.Tensor(shape=(None, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.object_semantic_points:
tfds.features.Tensor(
shape=(None, num_object_classes), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_rotation_matrix:
tfds.features.Tensor(shape=(None, 3, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_length:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_height:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_width:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_center:
tfds.features.Tensor(shape=(None, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_class:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_score:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
}
if include_encoded_features:
prediction_feature_dict[standard_fields.DetectionResultFields
.encoded_features_points] = tfds.features.Tensor(
shape=(None, encoded_features_dimension),
dtype=tf.float32)
prediction_feature_dict[standard_fields.DetectionResultFields
.objects_encoded_features] = tfds.features.Tensor(
shape=(None, encoded_features_dimension),
dtype=tf.float32)
prediction_feature_spec = tfds.features.FeaturesDict(prediction_feature_dict)
output_feature_spec_dict = {
k: v for k, v in waymo_frames.FRAME_FEATURE_SPEC.items()
}
output_feature_spec_dict['predictions'] = prediction_feature_spec
return tfds.features.FeaturesDict(output_feature_spec_dict)
| apache-2.0 | -7,792,968,965,668,784,000 | 43.879004 | 80 | 0.672667 | false | 3.791642 | false | false | false |
jezdez-archive/django-ticker | ticker/views.py | 1 | 2240 | from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from ticker.models import Entry
from tagging.models import Tag, TaggedItem
def overview(request, num_latest=10, template_name='ticker/overview.html', extra_context={}):
"""Show the 10 latest entries"""
entry_list = Entry.objects.public()[:num_latest]
template_context = {
'entry_list': entry_list,
}
template_context.update(extra_context)
return render_to_response(template_name, template_context,
RequestContext(request))
def archive(request, template_name='ticker/archive.html', extra_context={}):
"""Shows a archive page and a list of tags"""
entry_list = Entry.objects.public()
tag_list = Tag.objects.cloud_for_model(Entry, steps=9,
filters={'status': Entry.STATUS_OPEN })
template_context = {
'entry_list': entry_list,
'tag_list': tag_list,
}
template_context.update(extra_context)
return render_to_response(template_name, template_context,
RequestContext(request))
def archive_by_tag(request, tag, template_name='ticker/archive_by_tag.html', extra_context={}):
"""Shows a list of entries related with a specific `tag`"""
get_object_or_404(Tag, name=tag)
entry_list = TaggedItem.objects.get_by_model(Entry.objects.public(), [tag])
related_tags = Tag.objects.related_for_model([tag], Entry)
template_context = {
'the_tag': tag,
'related_tags': related_tags,
'entry_list': entry_list,
}
template_context.update(extra_context)
return render_to_response(template_name, template_context,
context_instance=RequestContext(request))
def details(request, slug, template_name='ticker/details.html', extra_context={}):
"""Shows a details page for the given entry"""
entry = get_object_or_404(Entry.objects.public(), slug=slug)
template_context = {
'entry': entry,
}
template_context.update(extra_context)
return render_to_response(template_name, template_context,
RequestContext(request))
| bsd-3-clause | 8,860,472,621,819,965,000 | 42.921569 | 95 | 0.647768 | false | 3.950617 | false | false | false |
haamoon/tensorpack | tensorpack/RL/envbase.py | 1 | 3524 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: envbase.py
# Author: Yuxin Wu <[email protected]>
from abc import abstractmethod, ABCMeta
from collections import defaultdict
import six
from ..utils import get_rng
__all__ = ['RLEnvironment', 'ProxyPlayer',
'DiscreteActionSpace']
@six.add_metaclass(ABCMeta)
class RLEnvironment(object):
""" Base class of RL environment. """
def __init__(self):
self.reset_stat()
@abstractmethod
def current_state(self):
"""
Observe, return a state representation
"""
@abstractmethod
def action(self, act):
"""
Perform an action. Will automatically start a new episode if isOver==True
Args:
act: the action
Returns:
tuple: (reward, isOver)
"""
def restart_episode(self):
""" Start a new episode, even if the current hasn't ended """
raise NotImplementedError()
def finish_episode(self):
""" Get called when an episode finished"""
pass
def get_action_space(self):
""" Returns:
:class:`ActionSpace` """
raise NotImplementedError()
def reset_stat(self):
""" Reset all statistics counter"""
self.stats = defaultdict(list)
def play_one_episode(self, func, stat='score'):
""" Play one episode for eval.
Args:
func: the policy function. Takes a state and returns an action.
stat: a key or list of keys in stats to return.
Returns:
the stat(s) after running this episode
"""
if not isinstance(stat, list):
stat = [stat]
while True:
s = self.current_state()
act = func(s)
r, isOver = self.action(act)
# print r
if isOver:
s = [self.stats[k] for k in stat]
self.reset_stat()
return s if len(s) > 1 else s[0]
class ActionSpace(object):
def __init__(self):
self.rng = get_rng(self)
@abstractmethod
def sample(self):
pass
def num_actions(self):
raise NotImplementedError()
class DiscreteActionSpace(ActionSpace):
def __init__(self, num):
super(DiscreteActionSpace, self).__init__()
self.num = num
def sample(self):
return self.rng.randint(self.num)
def num_actions(self):
return self.num
def __repr__(self):
return "DiscreteActionSpace({})".format(self.num)
def __str__(self):
return "DiscreteActionSpace({})".format(self.num)
class NaiveRLEnvironment(RLEnvironment):
""" For testing only"""
def __init__(self):
self.k = 0
def current_state(self):
self.k += 1
return self.k
def action(self, act):
self.k = act
return (self.k, self.k > 10)
class ProxyPlayer(RLEnvironment):
""" Serve as a proxy to another player """
def __init__(self, player):
self.player = player
def reset_stat(self):
self.player.reset_stat()
def current_state(self):
return self.player.current_state()
def action(self, act):
return self.player.action(act)
@property
def stats(self):
return self.player.stats
def restart_episode(self):
self.player.restart_episode()
def finish_episode(self):
self.player.finish_episode()
def get_action_space(self):
return self.player.get_action_space()
| apache-2.0 | 1,491,384,573,628,866,300 | 22.184211 | 81 | 0.575199 | false | 4.027429 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.