repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ApproxEng/approxeng.input | src/python/approxeng/input/gui/profiler.py | 1 | 9669 | import curses
import curses.textpad
import re
import signal
from math import floor
import yaml
from evdev import InputDevice
from approxeng.input.controllers import get_valid_devices
from approxeng.input.profiling import Profiler, ProfilerThread, Profile, BUTTON_NAMES, AXIS_NAMES
DEFAULT_AXIS_KEYS = ('z', 'x', 'c', 'v', 'b', 'n', 'm', ',')
DEFAULT_BUTTON_KEYS = ('1', '2', '3', '4', '5', '6', '7', '8', 'q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', 'a')
def profiler_main():
devices = list(get_valid_devices())
if devices:
print('Available input devices:\n')
for index, device in enumerate(devices):
print(f'[{index}] : {device.name} (vendor={device.info.vendor}, product={device.info.product})')
if devices:
i = input('\nEnter a device number to continue (0): ')
try:
if i == '':
i = 0
i = int(i)
if not len(devices) > i >= 0:
print('Device number must be one of the ones above, exiting.')
exit(0)
run_profiler_gui(devices[i])
except ValueError:
print('Input must be a number, exiting.')
exit(0)
else:
print('No valid devices found, ensure your controller is connected?')
def run_profiler_gui(device: InputDevice, button_keys=DEFAULT_BUTTON_KEYS, axis_keys=DEFAULT_AXIS_KEYS):
curses.wrapper(build_profiler_gui(device=device, button_keys=button_keys, axis_keys=axis_keys))
def build_profiler_gui(device: InputDevice, button_keys=DEFAULT_BUTTON_KEYS, axis_keys=DEFAULT_AXIS_KEYS,
filename=None):
profiler = Profiler(device=device)
profiler_thread = ProfilerThread(profiler=profiler)
profiler_thread.start()
profile = Profile()
profile.name = device.name
profile.vendor_id = device.info.vendor
profile.product_id = device.info.product
def convert_device_name():
return re.sub(r'\s+', '_', re.sub(r'[^\w\s]', '', device.name.lower()))
if filename is None:
filename = f'{convert_device_name()}_v{device.info.vendor}_p{device.info.product}.yaml'
def signal_handler(sig, frame):
with open(filename, 'w') as outfile:
yaml.dump(profile.dict, outfile)
profiler_thread.stop()
exit(0)
signal.signal(signal.SIGINT, signal_handler)
def curses_main(screen):
try:
display = DisplayState(screen=screen, profile=profile, profiler=profiler, axis_keys=axis_keys,
button_keys=button_keys)
curses.cbreak()
curses.halfdelay(1)
while True:
display.start()
display.println('Approxeng.input controller profiling tool')
display.println('Select axis or button and activate corresponding control')
display.println(f'File : {filename}')
display.println(f'CTRL-C to exit and save YAML definition file')
# display.println(f'{profiler.axis_changes}')
display.newline()
display.print_header('Buttons')
for index, button in enumerate(BUTTON_NAMES):
row, col = divmod(index, 4)
display.show_button(display.line + row, col * 20, button)
display.line += floor((len(BUTTON_NAMES) - 1) / 4) + 1
display.newline()
display.print_header('Axes')
for index, axis in enumerate(AXIS_NAMES):
row, col = divmod(index, 2)
display.show_axis(display.line + row, col * 40, axis)
display.line += floor((len(AXIS_NAMES) - 1) / 2) + 1
display.newline()
if display.control_is_button:
display.println('Button selected - press control to assign or BACKSPACE to clear')
elif display.control_is_axis:
if display.control[0] == 'd':
display.println('Binary axis, press both corresponding buttons to assign')
else:
display.println('Analogue axis, move control to full extent to assign')
display.println('SPACE to toggle inversion, BACKSPACE to toggle enable / disable')
try:
key = screen.getkey()
if key in button_keys and button_keys.index(key) < len(BUTTON_NAMES):
profiler.reset()
display.control = BUTTON_NAMES[button_keys.index(key)]
elif key in axis_keys and axis_keys.index(key) < len(AXIS_NAMES):
profiler.reset()
display.control = AXIS_NAMES[axis_keys.index(key)]
elif key == ' ' and display.control_is_axis:
profile.toggle_axis_invert(name=display.control)
elif key == 'KEY_BACKSPACE':
profiler.reset()
if display.control_is_button:
profile.set_button(name=display.control, code=None)
elif display.control_is_axis:
profile.toggle_axis_enable(name=display.control)
elif key == 'KEY_LEFT':
profiler.reset()
display.select_previous_control()
elif key == 'KEY_RIGHT':
profiler.reset()
display.select_next_control()
except curses.error:
# Expect this when the key check times out
pass
except KeyboardInterrupt:
profiler_thread.stop()
pass
return curses_main
class DisplayState:
def __init__(self, screen, profile, profiler, axis_keys, button_keys):
self.screen = screen
self.profile = profile
self.profiler = profiler
self.axis_keys = axis_keys
self.button_keys = button_keys
self.line = 0
self.all_controls = [*BUTTON_NAMES, *AXIS_NAMES]
self.control = self.all_controls[0]
# Disable echo to terminal
curses.noecho()
# Hide the cursor
curses.curs_set(0)
# Contrast colour for UI
curses.init_pair(1, curses.COLOR_YELLOW, curses.COLOR_BLACK)
# Highlight
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
# Enable colour
curses.start_color()
# Clear the screen
screen.clear()
# Enable key events for special keys i.e. arrows, backspace
screen.keypad(True)
def start(self):
self.screen.clear()
self.line = 0
def println(self, string, contrast=False):
try:
if contrast:
self.screen.addstr(self.line, 0, string, curses.color_pair(1))
else:
self.screen.addstr(self.line, 0, string)
except curses.error:
pass
self.line += 1
def newline(self):
self.line += 1
def print_header(self, string):
s = '——' + string
s += '—' * (80 - len(s))
self.println(s, True)
@property
def control_is_button(self):
return self.control in BUTTON_NAMES
@property
def control_is_axis(self):
return self.control in AXIS_NAMES
def select_next_control(self):
control_index = self.all_controls.index(self.control)
self.control = self.all_controls[(control_index + 1) % len(self.all_controls)]
def select_previous_control(self):
control_index = self.all_controls.index(self.control)
self.control = self.all_controls[(control_index - 1) % len(self.all_controls)]
def select_next_row(self):
pass
def select_previous_row(self):
pass
def show_axis(self, row, col, axis):
control = self.axis_keys[AXIS_NAMES.index(axis)]
try:
if self.control == axis:
# Pick up either all changes, or binary changes only if the axis starts with 'd'
changes = self.profiler.axis_changes if axis[0] != 'd' else self.profiler.binary_axis_changes
if changes:
# Currently editing this axis, show live information if available
code, min_value, max_value, current_value = changes[0]
self.profile.set_axis_range(axis, code, min_value, max_value)
rep = self.profile.axes[axis].build_repr(axis=axis, control=control, current_value=current_value)
else:
rep = self.profile.axes[axis].build_repr(axis=axis, control=control)
self.screen.addstr(row, col, rep, curses.color_pair(2))
else:
self.screen.addstr(row, col, self.profile.axes[axis].build_repr(axis=axis, control=control))
except curses.error:
pass
def show_button(self, row, col, button):
control = self.button_keys[BUTTON_NAMES.index(button)]
try:
if self.control == button:
if self.profiler.last_button_pressed:
self.profile.set_button(button, self.profiler.last_button_pressed)
rep = f'[{control}] {button} : {self.profile.buttons[button] or "---"}'
self.screen.addstr(row, col, rep, curses.color_pair(2))
else:
rep = f'[{control}] {button} : {self.profile.buttons[button] or "---"}'
self.screen.addstr(row, col, rep)
except curses.error:
pass
| apache-2.0 | 6,403,155,809,056,931,000 | 38.929752 | 117 | 0.559247 | false | 4.077215 | false | false | false |
Seanld/Remagined-Software | Transfer/get.py | 1 | 1266 | import socket
import sys
import os
HOST = sys.argv[1]
PORT = int(sys.argv[2])
s = socket.socket()
try:
s.connect((HOST, PORT))
except:
raise Exception("Device not available to download from.")
STAY = True
def ben(string):
return bytes(string.encode("utf-8"))
def bde(binary):
return str(binary.decode("utf-8"))
while STAY == True:
FILENAME = input("[SERVER]: Enter filename => ")
s.send(ben(FILENAME))
response = bde(s.recv(1024))
if response[:6] == "EXISTS":
filesize = float(response[6:])
confirm = input("[SERVER]: "+FILENAME+" is "+str(filesize)+" bytes, download? (Y/N) => ").upper()
if confirm == "Y":
s.send(ben("OK"))
newname = input("[DEVICE]: Name new file => ")
f = open(newname, "wb")
data = s.recv(1024)
totalRecv = len(data)
f.write(data)
while totalRecv < filesize:
data = s.recv(1024)
totalRecv += len(data)
f.write(data)
print(str((totalRecv/float(filesize))*100)+"% complete")
print("Download complete!")
f.close()
elif confirm == "N":
pass
else:
print("[SERVER]: Does not exist!")
| mit | 8,095,323,869,432,168,000 | 26.521739 | 105 | 0.534755 | false | 3.701754 | false | false | false |
malexer/meteocalc | meteocalc/windchill.py | 1 | 1625 | """Module for calculation of Wind chill.
Wind-chill or windchill (popularly wind chill factor) is the lowering of
body temperature due to the passing-flow of lower-temperature air.
Wind chill numbers are always lower than the air temperature for values
where the formula is valid.
When the apparent temperature is higher than the air temperature,
the heat index is used instead.
Check wikipedia for more info:
https://en.wikipedia.org/wiki/Wind_chill
Formula details:
https://www.wpc.ncep.noaa.gov/html/windchill.shtml
"""
from .temperature import Temp, F
def wind_chill(temperature, wind_speed):
"""Calculate Wind Chill (feels like temperature) based on NOAA.
Default unit for resulting Temp value is Fahrenheit and it will be used
in case of casting to int/float. Use Temp properties to convert result to
Celsius (Temp.c) or Kelvin (Temp.k).
Wind Chill Temperature is only defined for temperatures at or below
50 F and wind speeds above 3 mph.
:param temperature: temperature value in Fahrenheit or Temp instance.
:type temperature: int, float, Temp
:param wind_speed: wind speed in mph
:type wind_speed: int, float
:returns: Wind chill value
:rtype: Temp
"""
T = temperature.f if isinstance(temperature, Temp) else temperature
V = wind_speed
if T > 50 or V <= 3:
raise ValueError(
"Wind Chill Temperature is only defined for temperatures at"
" or below 50 F and wind speeds above 3 mph.")
WINDCHILL = 35.74 + (0.6215 * T) - 35.75 * V**0.16 + 0.4275 * T * V**0.16
return Temp(WINDCHILL, unit=F)
| mit | -8,933,491,278,379,710,000 | 30.862745 | 77 | 0.705231 | false | 3.532609 | false | false | false |
dhrod5/link | fabfile.py | 2 | 11830 | """
Fabfile for deploying and setting up code that looks like the production
environment. it also makes it easy to start up the servers
If you want to run on the localhost you may need to first do::
rm -rf ~/.ssh/known_hosts
"""
from __future__ import with_statement
import os
import re
from fabric.api import local, settings, abort, run , cd, env, lcd, sudo, prompt
from fabric.contrib.console import confirm
from fabric.contrib import files
env.roledefs = {'local':['localhost']}
env.use_ssh_config=True
TAG_REGEX = re.compile('^[0-9]+\.[0-9]+\.[0-9]+')
STABLE_MSG = '**stable**'
LINK_CODE_DIR = os.path.split(os.path.abspath(__file__))[0]
def dir_code_base():
"""
If you are using any localhost then it will use the current directory.
Otherwise you will use the code_dir
"""
if 'localhost' in env.host_string:
return os.getcwd()
return code_dir
def dir_scripts():
"""
The directory where you house all the scripts
"""
return '%s/scripts' % (dir_code_base())
config_dir = '~/.link'
def test_install():
import os
#set the link dir to something silly
os.environ['LNK_DIR']='saodusah'
#create a virtual environment
local('echo $LNK_DIR')
local('virtualenv env')
#remove everything from the build directory
local('rm -rf build')
#run this and see that it works
local('source env/bin/activate && python setup.py install')
def configure():
"""
Create the base configuration so that you can change it. Might want to
include the configuration in a different repo
"""
if not files.exists(config_dir):
run('mkdir %s' % config_dir)
lnk_config = '%s/link.config' % config_dir
if not files.exists(lnk_config):
run('touch %s' % lnk_config)
def script(script_name, command = 'python', **args):
"""
Will run the script that is in the scripts folder. you can pass in a
dictionory of args and it will pass it through to the script as command line
args in this format
fab -R local script:example.py,arg1=value1,arg2=value2
that will result in running this command
<command> <scripts_directory>/<scriptname> --arg1=value1 --arg2=value2
"""
with cd(dir_scripts()):
parameters = ''
if args:
parameters = ' '.join(['--%s=%s' % (key, value) for key,value in
args.iteritems()])
run("%s %s %s" % (command , script_name, parameters))
def commit(msg=None):
"""
Commit your changes to git
:msg: @todo
:returns: @todo
"""
print '---Commiting---'
print
msg = msg or prompt('Commit message: ')
commit = False
commit = prompt('Confirm commit? [y/n]') == 'y'
if commit:
with settings(warn_only=True):
_commit = not local('git commit -a -m "%s"' % msg).failed
if not _commit:
#nothing was committed
commit = False
print "Nothing to commit"
else:
abort('commit aborted')
print
print '---Done---'
return commit
def tag_names(number = 10, stable=False):
number = int(number)
print "fetching tags first"
local('git fetch --tags ')
print "Showing latest tags for reference"
tags = local('git tag -n1 ', capture = True)
tags = [x for x in tags.split('\n') if TAG_REGEX.findall(x) and
(not stable or STABLE_MSG in x)]
tags.sort(reverse=True)
#take the first <number> things in the list
tags = tags[0:min(len(tags), number)]
print '\n'.join(tags)
print
return tags
def check_tag_format(tag):
"""
Checks the tag format and returns the component parts
"""
parsed = tag.split('.')
try:
#allow for at most 2 minor decimals...i mean comeon
major = int(parsed[0])
minor = int(parsed[1])
build = int(parsed[2][0:2])
return (major, minor, build)
except Exception as e:
print e
abort("""Must be of the form <major_version>.<minor>.<maintence>, like
0.0.1. Only integers allowed""")
def write_version(version):
"""
Write out the version python file to the link directory before installing
version needs to be a list or tuple of the form (<major>, <minor>, <build>)
or a string in the format <major>.<minor>.<build> all ints
"""
file_name ='link/__init__.py'
init = open(file_name)
init_read = init.readlines()
init.close()
version_line = [idx for idx, x in enumerate(init_read) if '__version__ = ' in x]
if len(version_line)>1:
raise Exception('version is in there more than once')
if isinstance(version, str):
try:
version_split = map(int, version.split('.'))
except:
raise Exception("Version string must be in the format <major>.<minor>.<build>")
if not isinstance(version_split, (list, tuple)) or len(version_split)!=3:
raise Exception('invalid version %s' % version)
init_read[version_line[0]] = "__version__ = '%s'\n" % version
init = open(file_name, 'w')
try:
init.write(''.join(init_read))
finally:
init.close()
def prompt_for_tag(default_offset=1, stable_only = False):
"""
Prompt for the tag you want to use, offset for the default by input
"""
tags = tag_names(10, stable_only)
print "Showing latest tags for reference"
default = '0.0.1'
if tags:
default = tags[0]
(major, minor, build) = check_tag_format(default)
build = build+default_offset
new_default = '%s.%s.%s' % (major, minor, build)
tag = prompt('Tag name [in format x.xx] (default: %s) ? ' % new_default)
tag = tag or new_default
return tag
def push_to_pypi():
"""
Will push the code to pypi
"""
if prompt('would you like to tag a new version first [y/n]') == 'y':
tag()
local('python setup.py sdist upload')
def prompt_commit():
"""
prompts if you would like to commit
"""
local('git status')
print
print
_commit = prompt('Do you want to commit? [y/n]') == 'y'
if _commit:
msg = prompt('Commit message: ')
return commit(msg)
def tag(mark_stable=False):
"""
Tag a release, will prompt you for the tag version. You can mark it as
stable here as well
"""
tag = prompt_for_tag()
print "writing this tag version to version.py before commiting"
write_version(tag)
print
_commit = prompt_commit()
print
if not _commit and not tag:
print
print "Nothing commited, using default tag %s" % default
print
tag = default
else:
msg = ''
if mark_stable:
msg = STABLE_MSG + ' '
msg += prompt("enter msg for tag: ")
local('git tag %(ref)s -m "%(msg)s"' % { 'ref': tag, 'msg':msg})
local('git push --tags')
return tag
def merge(branch=None, merge_to = 'master'):
"""
Merge your changes and delete the old branch
"""
if not branch:
print "no branch specified, using current"
branch = current_branch()
if prompt('confirm merge with of branch %s to %s [y/N]' % (branch, merge_to)) == 'y':
prompt_commit()
local('git checkout %s ' % merge_to)
local('git merge %s ' % branch)
if prompt('delete the old branch locally and remotely? [y/N]') == 'y':
local('git branch -d %s' % branch)
local('git push origin :%s' % branch)
else:
print "leaving branch where it is"
if prompt('push results [y/N]' ) == 'y':
local('git push')
def tag_deploy(mark_stable=False):
"""
Asks you to tag this release and Figures out what branch you are on.
It then calls the deploy function
"""
local('git fetch --tags')
branch = local('git branch | grep "^*" | cut -d" " -f2', capture=True)
_tag = tag(mark_stable=mark_stable)
deploy(_tag, branch)
def retag(tag, msg):
"""
Retag a tag with a new message
"""
local('git tag %s %s -f -m "%s"' % (tag, tag, msg))
local('git push --tags')
def mark_stable(tag, msg = None):
"""
Mark a previous tag as stable
"""
retag(tag, '%s %s' % (STABLE_MSG, msg) )
def current_branch():
current_branch = local('git branch | grep "^*"', capture=True).lstrip('* ')
print "Current branch is %s" % current_branch
return current_branch
def deploy(tag=None, branch=None, stable_only=False):
"""
This is only for deployment on a dev box where everything can be owned by
this user. This is NOT for production deployment. Put's the code in
code_dir
"""
if not tag:
tag = prompt_for_tag(0, stable_only = stable_only)
configure()
setup_environment()
#check out all the code in the right place
with cd(code_dir):
# i **THINK** you have to have the branch checked out before you can
# checkout the tag
if branch:
#then you haven't even checkout this branch
branches = run('git branch')
if branch not in branches:
run('git checkout -b %s' % branch)
_current_branch = current_branch()
if "* %s" % branch != _current_branch:
run('git checkout %s' % branch)
#pull the latest
run('git pull origin %s' % branch)
else:
run("git pull origin master")
#check out a specific tag
if tag:
run("git fetch --tags")
run("git checkout %s" % tag)
#hacky
if env.user == 'root':
#make sure everything is still owned by the deployer
run('chown -R %s %s' % (deploy_user, code_dir))
###
# How to setup a fresh box. You probably have to run this as root for it to
# work
###
def install_easy_install():
"""
Installs setup tool, this should also go into an RPM
"""
run('wget http://pypi.python.org/packages/2.7/s/setuptools/setuptools-0.6c11-py2.7.egg#md5=fe1f997bc722265116870bc7919059ea')
run('sh setuptools-0.6c11-py2.7.egg')
def install_python():
"""
Installs python, I should be able to create an RPM eventually
"""
run('wget http://python.org/ftp/python/2.7.2/Python-2.7.2.tgz')
run('tar -xvf Python-2.7.2.tgz')
with cd('Python-2.7.2'):
run('./configure')
run('make')
run('make install')
###
# This isn't reall necessary but i'll keep it for now
###
def install_python_dependancies():
"""
Easy install all the packages we need
"""
run('easy_install requests')
run('easy_install numpy')
run('easy_install pandas')
run('easy_install happybase')
run('easy_install flask')
run('easy_install ipython')
run('easy_install gunicorn')
run('easy_install link')
run('easy_install pymongo')
run('easy_install mysql-python')
run('easy_install docutils')
def install_box_libraries():
"""
Installs the libs you need like readlines and libsqlite. This will only
run on a ubuntu machine with apt-get
"""
with settings(warn_only=True):
has_apt = run('which apt-get')
if has_apt:
run('apt-get install make')
run('apt-get install libsqlite3-dev')
run('apt-get install libreadline6 libreadline6-dev')
run('apt-get install libmysqlclient-dev')
else:
print "this is not an ubuntu system...skipping"
def setup_box():
"""
Will install python and all libs needed to set up this box to run the
examjam code. Eventually this needs to be more RPM based
"""
#place_pub_key()
install_box_libraries()
install_python()
install_easy_install()
install_python_dependancies()
| apache-2.0 | 9,002,402,062,855,273,000 | 28.066339 | 129 | 0.595182 | false | 3.664808 | true | false | false |
FLHerne/mapgen | hackystuff.py | 1 | 1404 | import numpy
class TerrainType:
DEEPW = 0
WATER = 9
ROCKS = 10
BOGGY = 11
GRASS = 1
SANDY = 2
SNOWY = 3
TREES = 4
PLANK = 5
FLOOR = 6
ROOFD = 12
WALLS = 7
GLASS = 8
BuildCosts = {
TerrainType.DEEPW: 80,
TerrainType.WATER: 40,
TerrainType.ROCKS: 8,
TerrainType.BOGGY: 24,
TerrainType.GRASS: 4,
TerrainType.SANDY: 6,
TerrainType.SNOWY: 10,
TerrainType.TREES: 8,
TerrainType.PLANK: 1,
TerrainType.FLOOR: 1,
TerrainType.ROOFD: 1,
TerrainType.WALLS: 20,
TerrainType.GLASS: 20
}
ABuildCosts = [BuildCosts[i] if i in BuildCosts else 0 for i in range(max(BuildCosts.keys())+1)]
TypeColors = {
TerrainType.DEEPW: (0 , 0 ,255),
TerrainType.WATER: (0 ,127,255),
TerrainType.ROCKS: (127,127,127),
TerrainType.BOGGY: (64 ,127,127),
TerrainType.GRASS: (0 ,255, 0),
TerrainType.SANDY: (127,127, 0),
TerrainType.SNOWY: (255,255,255),
TerrainType.TREES: (64 ,127, 64),
TerrainType.PLANK: (127, 64, 0),
TerrainType.FLOOR: (255,255,127),
TerrainType.ROOFD: (128, 0 ,128),
TerrainType.WALLS: (0 , 0 , 0),
TerrainType.GLASS: (0 ,255,255)
}
colordtype = numpy.dtype([('r', numpy.uint8), ('g', numpy.uint8), ('b', numpy.uint8)])
ATypeColors = numpy.array([TypeColors[i] if i in TypeColors else 0 for i in range(max(TypeColors.keys())+1)], dtype=colordtype)
| gpl-2.0 | -3,909,303,735,207,333,400 | 25.490566 | 127 | 0.621083 | false | 2.480565 | false | false | false |
manxueitp/cozmo-test | cozmo_sdk_examples/tutorials/03_vision/01_light_when_face.py | 2 | 1720 | #!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Wait for Cozmo to see a face, and then turn on his backpack light.
This is a script to show off faces, and how they are easy to use.
It waits for a face, and then will light up his backpack when that face is visible.
'''
import asyncio
import time
import cozmo
def light_when_face(robot: cozmo.robot.Robot):
'''The core of the light_when_face program'''
# Move lift down and tilt the head up
robot.move_lift(-3)
robot.set_head_angle(cozmo.robot.MAX_HEAD_ANGLE).wait_for_completed()
face = None
print("Press CTRL-C to quit")
while True:
if face and face.is_visible:
robot.set_all_backpack_lights(cozmo.lights.blue_light)
else:
robot.set_backpack_lights_off()
# Wait until we we can see another face
try:
face = robot.world.wait_for_observed_face(timeout=30)
except asyncio.TimeoutError:
print("Didn't find a face.")
return
time.sleep(.1)
cozmo.run_program(light_when_face, use_viewer=True, force_viewer_on_top=True)
| mit | -2,022,172,166,260,576,500 | 30.272727 | 83 | 0.67907 | false | 3.628692 | false | false | false |
googleinterns/inventory-visualizer | backend/authentication/token_controller.py | 1 | 1184 | import random
import string
import config
import os
class TokenControllerSingleton:
secret_key = None
@staticmethod
def get_secret_key():
if TokenControllerSingleton.secret_key is None:
TokenControllerSingleton()
return TokenControllerSingleton.secret_key
def get_server_secret_key(self):
with open(config.SECRET_KEY_FILE) as file:
key = file.read()
return key
def secret_key_exists(self):
return os.path.isfile(config.SECRET_KEY_FILE) and os.stat(config.SECRET_KEY_FILE).st_size != 0
def generate_secret_key(self):
letters_and_digits = string.ascii_letters + string.digits
key = ''.join((random.choice(letters_and_digits) for i in range(config.secret_key_length)))
with open(config.SECRET_KEY_FILE, 'w+') as file:
file.write(key)
return key
def __init__(self):
if TokenControllerSingleton.secret_key is None:
if self.secret_key_exists():
TokenControllerSingleton.secret_key = self.get_server_secret_key()
else:
TokenControllerSingleton.secret_key = self.generate_secret_key()
| apache-2.0 | 3,258,940,062,933,860,400 | 31.888889 | 102 | 0.647804 | false | 4.027211 | true | false | false |
automl/SpySMAC | cave/analyzer/feature_analysis/feature_clustering.py | 1 | 2199 | from cave.analyzer.base_analyzer import BaseAnalyzer
from cave.analyzer.feature_analysis.feature_analysis import FeatureAnalysis
from cave.utils.helpers import check_for_features
from cave.utils.hpbandster_helpers import format_budgets
class FeatureClustering(BaseAnalyzer):
""" Clustering instances in 2d; the color encodes the cluster assigned to each cluster. Similar to ISAC, we use
a k-means to cluster the instances in the feature space. As pre-processing, we use standard scaling and a PCA to
2 dimensions. To guess the number of clusters, we use the silhouette score on the range of 2 to 12 in the number
of clusters"""
def __init__(self, runscontainer):
super().__init__(runscontainer)
check_for_features(runscontainer.scenario)
formatted_budgets = format_budgets(self.runscontainer.get_budgets())
for budget, run in zip(self.runscontainer.get_budgets(),
self.runscontainer.get_aggregated(keep_budgets=True, keep_folders=False)):
imp = run.share_information['feature_importance']
self.result[formatted_budgets[budget]] = self.feat_analysis(
output_dir=run.output_dir,
scenario=run.scenario,
feat_names=run.feature_names,
feat_importance=imp,
)
def get_name(self):
return "Feature Clustering"
def feat_analysis(self,
output_dir,
scenario,
feat_names,
feat_importance,
):
feat_analysis = FeatureAnalysis(output_dn=output_dir,
scenario=scenario,
feat_names=feat_names,
feat_importance=feat_importance)
return {'figure': feat_analysis.cluster_instances()}
| bsd-3-clause | -2,952,882,242,238,624,300 | 48.977273 | 116 | 0.530241 | false | 5.149883 | false | false | false |
polyaxon/polyaxon | sdks/python/http_client/v1/polyaxon_sdk/models/v1_project.py | 1 | 12417 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.10.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Project(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'uuid': 'str',
'owner': 'str',
'name': 'str',
'description': 'str',
'tags': 'list[str]',
'created_at': 'datetime',
'updated_at': 'datetime',
'is_public': 'bool',
'bookmarked': 'bool',
'readme': 'str',
'excluded_runtimes': 'list[str]',
'settings': 'V1ProjectSettings',
'role': 'str',
'live_state': 'int'
}
attribute_map = {
'uuid': 'uuid',
'owner': 'owner',
'name': 'name',
'description': 'description',
'tags': 'tags',
'created_at': 'created_at',
'updated_at': 'updated_at',
'is_public': 'is_public',
'bookmarked': 'bookmarked',
'readme': 'readme',
'excluded_runtimes': 'excluded_runtimes',
'settings': 'settings',
'role': 'role',
'live_state': 'live_state'
}
def __init__(self, uuid=None, owner=None, name=None, description=None, tags=None, created_at=None, updated_at=None, is_public=None, bookmarked=None, readme=None, excluded_runtimes=None, settings=None, role=None, live_state=None, local_vars_configuration=None): # noqa: E501
"""V1Project - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._uuid = None
self._owner = None
self._name = None
self._description = None
self._tags = None
self._created_at = None
self._updated_at = None
self._is_public = None
self._bookmarked = None
self._readme = None
self._excluded_runtimes = None
self._settings = None
self._role = None
self._live_state = None
self.discriminator = None
if uuid is not None:
self.uuid = uuid
if owner is not None:
self.owner = owner
if name is not None:
self.name = name
if description is not None:
self.description = description
if tags is not None:
self.tags = tags
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if is_public is not None:
self.is_public = is_public
if bookmarked is not None:
self.bookmarked = bookmarked
if readme is not None:
self.readme = readme
if excluded_runtimes is not None:
self.excluded_runtimes = excluded_runtimes
if settings is not None:
self.settings = settings
if role is not None:
self.role = role
if live_state is not None:
self.live_state = live_state
@property
def uuid(self):
"""Gets the uuid of this V1Project. # noqa: E501
:return: The uuid of this V1Project. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this V1Project.
:param uuid: The uuid of this V1Project. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def owner(self):
"""Gets the owner of this V1Project. # noqa: E501
:return: The owner of this V1Project. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this V1Project.
:param owner: The owner of this V1Project. # noqa: E501
:type: str
"""
self._owner = owner
@property
def name(self):
"""Gets the name of this V1Project. # noqa: E501
:return: The name of this V1Project. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1Project.
:param name: The name of this V1Project. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this V1Project. # noqa: E501
:return: The description of this V1Project. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1Project.
:param description: The description of this V1Project. # noqa: E501
:type: str
"""
self._description = description
@property
def tags(self):
"""Gets the tags of this V1Project. # noqa: E501
:return: The tags of this V1Project. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this V1Project.
:param tags: The tags of this V1Project. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def created_at(self):
"""Gets the created_at of this V1Project. # noqa: E501
:return: The created_at of this V1Project. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this V1Project.
:param created_at: The created_at of this V1Project. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this V1Project. # noqa: E501
:return: The updated_at of this V1Project. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this V1Project.
:param updated_at: The updated_at of this V1Project. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def is_public(self):
"""Gets the is_public of this V1Project. # noqa: E501
:return: The is_public of this V1Project. # noqa: E501
:rtype: bool
"""
return self._is_public
@is_public.setter
def is_public(self, is_public):
"""Sets the is_public of this V1Project.
:param is_public: The is_public of this V1Project. # noqa: E501
:type: bool
"""
self._is_public = is_public
@property
def bookmarked(self):
"""Gets the bookmarked of this V1Project. # noqa: E501
:return: The bookmarked of this V1Project. # noqa: E501
:rtype: bool
"""
return self._bookmarked
@bookmarked.setter
def bookmarked(self, bookmarked):
"""Sets the bookmarked of this V1Project.
:param bookmarked: The bookmarked of this V1Project. # noqa: E501
:type: bool
"""
self._bookmarked = bookmarked
@property
def readme(self):
"""Gets the readme of this V1Project. # noqa: E501
:return: The readme of this V1Project. # noqa: E501
:rtype: str
"""
return self._readme
@readme.setter
def readme(self, readme):
"""Sets the readme of this V1Project.
:param readme: The readme of this V1Project. # noqa: E501
:type: str
"""
self._readme = readme
@property
def excluded_runtimes(self):
"""Gets the excluded_runtimes of this V1Project. # noqa: E501
:return: The excluded_runtimes of this V1Project. # noqa: E501
:rtype: list[str]
"""
return self._excluded_runtimes
@excluded_runtimes.setter
def excluded_runtimes(self, excluded_runtimes):
"""Sets the excluded_runtimes of this V1Project.
:param excluded_runtimes: The excluded_runtimes of this V1Project. # noqa: E501
:type: list[str]
"""
self._excluded_runtimes = excluded_runtimes
@property
def settings(self):
"""Gets the settings of this V1Project. # noqa: E501
:return: The settings of this V1Project. # noqa: E501
:rtype: V1ProjectSettings
"""
return self._settings
@settings.setter
def settings(self, settings):
"""Sets the settings of this V1Project.
:param settings: The settings of this V1Project. # noqa: E501
:type: V1ProjectSettings
"""
self._settings = settings
@property
def role(self):
"""Gets the role of this V1Project. # noqa: E501
:return: The role of this V1Project. # noqa: E501
:rtype: str
"""
return self._role
@role.setter
def role(self, role):
"""Sets the role of this V1Project.
:param role: The role of this V1Project. # noqa: E501
:type: str
"""
self._role = role
@property
def live_state(self):
"""Gets the live_state of this V1Project. # noqa: E501
:return: The live_state of this V1Project. # noqa: E501
:rtype: int
"""
return self._live_state
@live_state.setter
def live_state(self, live_state):
"""Sets the live_state of this V1Project.
:param live_state: The live_state of this V1Project. # noqa: E501
:type: int
"""
self._live_state = live_state
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Project):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Project):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | -5,995,626,127,572,591,000 | 25.141053 | 278 | 0.56471 | false | 3.939404 | true | false | false |
lneuhaus/pyrpl | pyrpl/widgets/module_widgets/acquisition_module_widget.py | 1 | 6247 | """
Acquisition modules are the modules used to acquire data from the Red Pitaya.
At the moment, they include the
* :mod:`~pyrpl.hardware_modules.scope`,
* :mod:`~pyrpl.software_modules.network_analyzer`,
* :mod:`~pyrpl.software_modules.spectrum_analyzer`.
All the acquisition modules have in common a plot area where the data is
displayed and a control panel BELOW the plot for changing acquisition
settings. Widgets for specialized acquisition modules
(e.g. :class:`~pyrpl.hardware_modules.scope.Scope`) have an additional control
panel ABOVE the plot are for settings that are only available for that module.
The different buttons in the acquisition module control panel below the plot are:
- :attr:`~.AcquisitionModule.trace_average` chooses the number of successive traces to average together.
- :attr:`~.AcquisitionModule.curve_name` is the name for the next curve that is saved.
- :code:`Run single` starts a single acquisition of :code:`trace_average` traces (calls :meth:`.AcquisitionModule.single`).
- :code:`Run continuous` starts a continuous acquisition with a running
average filter, where :code:`trace_average` is the decay constant of the
running average filter (calls :meth:`.AcquisitionModule.continuous`).
- :code:`Restart average` resets trace averages to zero to start a new
measurement from scratch.
- :code:`Save curve` saves the current measurement data to a new
:class:`pyrpl.curvedb.CurveDB` object under the name
:attr:`~.AcquisitionModule.curve_name`.
"""
from . import ModuleWidget
from qtpy import QtCore, QtWidgets
class CurrentAvgLabel(QtWidgets.QWidget):
def __init__(self, parent=None):
super(CurrentAvgLabel, self).__init__(parent)
self.main_lay = QtWidgets.QVBoxLayout()
self.setLayout(self.main_lay)
self.label = QtWidgets.QLabel("current_avg")
self.main_lay.addWidget(self.label)
self.value_label = QtWidgets.QLabel("0 /")
self.main_lay.addWidget(self.value_label)
self.main_lay.addStretch(1)
self.value_label.setAlignment(QtCore.Qt.AlignCenter)
self.main_lay.setContentsMargins(0,0,0,0)
def set_value(self, val):
self.value_label.setText(str(val) + ' /')
class AcquisitionModuleWidget(ModuleWidget):
def init_gui(self):
self.button_single = QtWidgets.QPushButton("Run single")
self.button_single.clicked.connect(self.run_single_clicked)
self.button_continuous = QtWidgets.QPushButton("Run continuous")
self.button_continuous.clicked.connect(self.run_continuous_clicked)
self.button_restart_averaging = QtWidgets.QPushButton(
'Restart averaging')
self.button_restart_averaging.clicked.connect(self.restart_clicked)
self.button_save = QtWidgets.QPushButton("Save curve")
self.button_save.clicked.connect(self.module.save_curve)
self.current_avg_label = CurrentAvgLabel()
aws = self.attribute_widgets
self.attribute_layout.removeWidget(aws["trace_average"])
self.attribute_layout.removeWidget(aws["curve_name"])
self.button_layout.addWidget(self.current_avg_label)
self.button_layout.addWidget(aws["trace_average"])
self.button_layout.addWidget(aws["curve_name"])
self.button_layout.addWidget(self.button_single)
self.button_layout.addWidget(self.button_continuous)
self.button_layout.addWidget(self.button_restart_averaging)
self.button_layout.addWidget(self.button_save)
self.main_layout.addLayout(self.button_layout)
self.button_layout.setStretchFactor(self.button_single, 1)
self.button_layout.setStretchFactor(self.button_continuous, 1)
self.button_layout.setStretchFactor(self.button_restart_averaging, 1)
self.button_layout.setStretchFactor(self.button_save, 1)
self.button_layout.addStretch(1)
self.attribute_layout.setStretch(0, 0) # since widgets are all removed
# and re-added, the stretch ends up on the left, so better cancel it
# and make a new one at the end
def run_single_clicked(self):
if str(self.button_single.text()).startswith("Run single"):
self.module.single_async()
else:
self.module.stop()
def run_continuous_clicked(self):
"""
Toggles the button run_continuous to stop or vice versa and starts
he acquisition timer
"""
if str(self.button_continuous.text()).startswith("Run continuous"):
self.module.continuous()
else:
self.module.pause()
def restart_clicked(self):
old_running_state = self.module.running_state
self.module.stop()
if old_running_state in ["running_single", "running_continuous"]:
self.module.running_state = old_running_state
self.update_current_average()
def update_current_average(self):
self.current_avg_label.set_value(self.module.current_avg)
def update_running_buttons(self):
"""
Change text of Run continuous button and visibility of run single button
according to module.running_continuous
"""
self.update_current_average()
if self.module.current_avg>0:
number_str = ' (' + str(self.module.current_avg) + ")"
else:
number_str = ""
if self.module.running_state == 'running_continuous':
#if self.module.current_avg >= self.module.trace_average:
# # shows a plus sign when number of averages is available
# number_str = number_str[:-1] + '+)'
self.button_continuous.setText("Pause")# + number_str)
self.button_single.setText("Run single")
self.button_single.setEnabled(False)
else:
if self.module.running_state == "running_single":
self.button_continuous.setText("Run continuous")
self.button_single.setText("Stop")# + number_str)
self.button_single.setEnabled(True)
else:
self.button_continuous.setText("Run continuous")# + number_str)
self.button_single.setText("Run single")
self.button_single.setEnabled(True)
| gpl-3.0 | 2,141,065,530,158,079,000 | 42.381944 | 123 | 0.676645 | false | 3.961319 | false | false | false |
sn6uv/gmpy_cffi | gmpy_cffi/cache.py | 1 | 6327 | import sys
from gmpy_cffi.interface import ffi, gmp
if sys.version > '3':
long = int
xrange = range
cache_size = 100
cache_obsize = 128
def get_cache():
"""
get_cache() -> (cache_size, object_size)
Return the current cache size (number of objects) and maximum size
per object (number of limbs) for all GMPY2 objects.
"""
return cache_size, cache_obsize
def set_cache(size, obsize):
"""
set_cache(cache_size, object_size)
Set the current cache size (number of objects) and the maximum size
per object (number of limbs). Raises ValueError if cache size exceeds
1000 or object size exceeds 16384
"""
global cache_size, cache_obsize
if not isinstance(size, (int, long)):
raise TypeError("integer argument expected, got %s" % type(size))
if not isinstance(obsize, (int, long)):
raise TypeError("integer argument expected, got %s" % type(obsize))
if size < 0 or size > 1000:
raise ValueError("cache size must between 0 and 1000")
if obsize < 0 or obsize > 16384:
raise ValueError("object size must between 0 and 16384")
cache_size = size
cache_obsize = obsize
_init_mpz_cache()
_init_mpq_cache()
_init_mpfr_cache()
# MPZ
def _init_mpz_cache():
global mpz_cache, in_mpz_cache
mpz_cache = []
in_mpz_cache = cache_size
for _ in xrange(cache_size):
mpz = ffi.new("mpz_t")
gmp.mpz_init(mpz)
mpz_cache.append(mpz)
_init_mpz_cache()
def _new_mpz():
"""Return an initialized mpz_t."""
global in_mpz_cache
if in_mpz_cache:
in_mpz_cache -= 1
return mpz_cache[in_mpz_cache]
else:
mpz = ffi.new("mpz_t")
gmp.mpz_init(mpz)
return mpz
def _del_mpz(mpz):
global in_mpz_cache
if in_mpz_cache < cache_size:
if ffi.sizeof(mpz[0]) <= cache_obsize:
mpz_cache[in_mpz_cache] = mpz
else:
mpz_cache[in_mpz_cache] = ffi.new('mpz_t')
in_mpz_cache += 1
else:
gmp.mpz_clear(mpz)
# MPQ
def _init_mpq_cache():
global mpq_cache, in_mpq_cache
mpq_cache = []
in_mpq_cache = cache_size
for _ in xrange(cache_size):
mpq = ffi.new("mpq_t")
gmp.mpq_init(mpq)
mpq_cache.append(mpq)
_init_mpq_cache()
def _new_mpq():
"""Return an initialized mpq_t."""
global in_mpq_cache
if in_mpq_cache:
in_mpq_cache -= 1
return mpq_cache[in_mpq_cache]
else:
mpq = ffi.new("mpq_t")
gmp.mpq_init(mpq)
return mpq
def _del_mpq(mpq):
global in_mpq_cache
if in_mpq_cache < cache_size:
if ffi.sizeof(mpq[0]) <= cache_obsize:
mpq_cache[in_mpq_cache] = mpq
else:
mpq_cache[in_mpq_cache] = ffi.new('mpq_t')
in_mpq_cache += 1
else:
gmp.mpq_clear(mpq)
# MPFR
def _init_mpfr_cache():
global mpfr_cache, in_mpfr_cache
mpfr_cache = []
in_mpfr_cache = cache_size
for _ in xrange(cache_size):
mpfr = ffi.new("mpfr_t")
gmp.mpfr_init(mpfr)
mpfr_cache.append(mpfr)
_init_mpfr_cache()
def _new_mpfr(prec=0):
"""Return an initialized mpfr_t."""
global in_mpfr_cache
if isinstance(prec, (int, long)):
if not (prec == 0 or gmp.MPFR_PREC_MIN <= prec <= gmp.MPFR_PREC_MAX):
raise ValueError("invalid prec %i (wanted %s <= prec <= %s)" % (
prec, gmp.MPFR_PREC_MIN, gmp.MPFR_PREC_MAX))
else:
raise TypeError('an integer is required')
if in_mpfr_cache:
in_mpfr_cache -= 1
# Set default precision
if prec == 0:
gmp.mpfr_set_prec(mpfr_cache[in_mpfr_cache], gmp.mpfr_get_default_prec())
else:
gmp.mpfr_set_prec(mpfr_cache[in_mpfr_cache], prec)
return mpfr_cache[in_mpfr_cache]
else:
mpfr = ffi.new("mpfr_t")
if prec == 0:
gmp.mpfr_init(mpfr)
else:
gmp.mpfr_init2(mpfr, prec)
return mpfr
def _del_mpfr(mpfr):
global in_mpfr_cache
if in_mpfr_cache < cache_size:
if ffi.sizeof(mpfr[0]) <= cache_obsize:
mpfr_cache[in_mpfr_cache] = mpfr
else:
mpfr_cache[in_mpfr_cache] = ffi.new('mpfr_t')
in_mpfr_cache += 1
else:
gmp.mpfr_clear(mpfr)
# MPC
def _init_mpc_cache():
global mpc_cache, in_mpc_cache
mpc_cache = []
in_mpc_cache = cache_size
for _ in xrange(cache_size):
mpc = ffi.new("mpc_t")
gmp.mpc_init2(mpc, gmp.mpfr_get_default_prec())
mpc_cache.append(mpc)
_init_mpc_cache()
def _new_mpc(prec=(0,0)):
"""Return an initialized mpc_t."""
global in_mpc_cache
# prec is assumed to be checked already
rprec, iprec = prec
if not all(p == 0 or gmp.MPFR_PREC_MIN <= p <= gmp.MPFR_PREC_MAX
for p in prec):
raise ValueError(
"invalid prec (wanted prec == 0 or %s <= prec <= %s)" % (
gmp.MPFR_PREC_MIN, gmp.MPFR_PREC_MAX))
if in_mpc_cache:
in_mpc_cache -= 1
# Set default precision
if rprec == iprec:
if rprec == 0:
gmp.mpc_set_prec(mpc_cache[in_mpc_cache], gmp.mpfr_get_default_prec())
else:
gmp.mpc_set_prec(mpc_cache[in_mpc_cache], rprec)
else:
if rprec == 0:
rprec = gmp.mpfr_get_default_prec()
if iprec == 0:
iprec = gmp.mpfr_get_default_prec()
gmp.mpc_clear(mpc_cache[in_mpc_cache])
gmp.mpc_init3(mpc_cache[in_mpc_cache], rprec, iprec)
return mpc_cache[in_mpc_cache]
else:
mpc = ffi.new("mpc_t")
if rprec == 0:
rprec = gmp.mpfr_get_default_prec()
if iprec == 0:
iprec = gmp.mpfr_get_default_prec()
if rprec == iprec:
gmp.mpc_init2(mpc, rprec)
else:
gmp.mpc_init3(mpc, rprec, iprec)
return mpc
def _del_mpc(mpc):
global in_mpc_cache
if in_mpc_cache < cache_size:
mpc_cache[in_mpc_cache] = mpc
# FIXME This doesn't seem to be working properly
if ffi.sizeof(mpc[0]) <= cache_obsize:
mpc_cache[in_mpc_cache] = mpc
else:
mpc_cache[in_mpc_cache] = ffi.new('mpc_t')
| bsd-3-clause | -4,395,362,153,672,936,400 | 25.144628 | 86 | 0.559823 | false | 2.971818 | false | false | false |
WholeGrainGoats/servo | tests/wpt/web-platform-tests/annotation-vocab/tools/vocab_tester.py | 64 | 8320 |
# Author: Rob Sanderson ([email protected])
# License: Apache2
# Last Modified: 2016-09-02
import json
from rdflib import ConjunctiveGraph, URIRef
from pyld import jsonld
from pyld.jsonld import compact, expand, frame, from_rdf, to_rdf, JsonLdProcessor
import urllib
# Stop code from looking up the contexts online for every operation
docCache = {}
def fetch(url):
fh = urllib.urlopen(url)
data = fh.read()
fh.close()
return data
def load_document_and_cache(url):
if docCache.has_key(url):
return docCache[url]
doc = {
'contextUrl': None,
'documentUrl': None,
'document': ''
}
data = fetch(url)
doc['document'] = data;
docCache[url] = doc
return doc
jsonld.set_document_loader(load_document_and_cache)
class Validator(object):
def __init__(self):
self.rdflib_class_map = {
"Annotation": "oa:Annotation",
"Dataset": "dctypes:Dataset",
"Image": "dctypes:StillImage",
"Video": "dctypes:MovingImage",
"Audio": "dctypes:Sound",
"Text": "dctypes:Text",
"TextualBody": "oa:TextualBody",
"ResourceSelection": "oa:ResourceSelection",
"SpecificResource": "oa:SpecificResource",
"FragmentSelector": "oa:FragmentSelector",
"CssSelector": "oa:CssSelector",
"XPathSelector": "oa:XPathSelector",
"TextQuoteSelector": "oa:TextQuoteSelector",
"TextPositionSelector": "oa:TextPositionSelector",
"DataPositionSelector": "oa:DataPositionSelector",
"SvgSelector": "oa:SvgSelector",
"RangeSelector": "oa:RangeSelector",
"TimeState": "oa:TimeState",
"HttpState": "oa:HttpRequestState",
"CssStylesheet": "oa:CssStyle",
"Choice": "oa:Choice",
"Composite": "oa:Composite",
"List": "oa:List",
"Independents": "oa:Independents",
"Person": "foaf:Person",
"Software": "as:Application",
"Organization": "foaf:Organization",
"AnnotationCollection": "as:OrderedCollection",
"AnnotationPage": "as:OrderedCollectionPage",
"Audience": "schema:Audience"
}
def _clean_bnode_ids(self, js):
new = {}
for (k,v) in js.items():
if k == 'id' and v.startswith("_:"):
continue
elif type(v) == dict:
# recurse
res = self._clean_bnode_ids(v)
new[k] = res
else:
new[k] = v
return new
def _mk_rdflib_jsonld(self, js):
# rdflib's json-ld implementation sucks
# Pre-process to make it work
# recurse the structure looking for types, and replacing them.
new = {}
for (k,v) in js.items():
if k == 'type':
if type(v) == list:
nl = []
for i in v:
if self.rdflib_class_map.has_key(i):
nl.append(self.rdflib_class_map[i])
new['type'] = nl
else:
if self.rdflib_class_map.has_key(v):
new['type'] = self.rdflib_class_map[v]
elif type(v) == dict:
# recurse
res = self._mk_rdflib_jsonld(v)
new[k] = res
else:
new[k] = v
return new
def json_to_rdf(self, js, fmt=None):
d2 = self._mk_rdflib_jsonld(js)
js = json.dumps(d2)
g = ConjunctiveGraph()
g.parse(data=js, format='json-ld')
if fmt:
out = g.serialize(format=fmt)
return out
else:
return g
def rdf_to_jsonld(self, rdf, fmt):
g = ConjunctiveGraph()
g.parse(data=rdf, format=fmt)
out = g.serialize(format='json-ld')
j2 = json.loads(out)
j2 = {"@context": context_js, "@graph": j2}
framed = frame(j2, frame_js)
out = compact(framed, context_js)
# recursively clean blank node ids
#out = self._clean_bnode_ids(out)
return out
def compact_and_clean(self, js):
newjs = compact(js, context_js)
newjs['@context'] = context
if newjs.has_key("@graph"):
for k,v in newjs['@graph'].items():
newjs[k] = v
del newjs['@graph']
return newjs
validator = Validator()
example = "https://raw.githubusercontent.com/w3c/web-annotation/gh-pages/model/wd2/examples/correct/anno4.json"
example_ttl = "https://raw.githubusercontent.com/w3c/web-annotation/gh-pages/vocab/wd/examples/correct/anno1.ttl"
context = "http://www.w3.org/ns/anno.jsonld"
frameURI = "https://raw.githubusercontent.com/w3c/web-annotation/gh-pages/jsonld/annotation_frame.jsonld"
# ontology = "https://www.w3.org/ns/oa.ttl"
ontology = "https://raw.githubusercontent.com/w3c/web-annotation/gh-pages/vocab/wd/ontology/oa.ttl"
data = fetch(context)
context_js = json.loads(data)
data = fetch(example)
example_js = json.loads(data)
data = fetch(frameURI)
frame_js = json.loads(data)
# Test1: JSON-LD context document can be parsed without errors by JSON-LD validators
# Context document is parsable if it can be loaded and used to expand the example
try:
expanded = expand(example_js, context_js)
except:
print "Context is invalid, failed Test 1"
# Test2: JSON-LD context document can be used to convert JSON-LD serialized Annotations into RDF triples.
try:
jsonld_nq = to_rdf(example_js, {"base": "http://example.org/", "format": "application/nquads"})
except:
print "Cannot use context to convert JSON-LD to NQuads"
# Test3: Graphs produced are isomorphic
try:
rl_g = validator.json_to_rdf(example_js)
g = ConjunctiveGraph()
js_g = g.parse(data=jsonld_nq, format="nt")
rl_g_nq = rl_g.serialize(format="nquads")
assert(len(rl_g.store) == len(js_g.store))
assert(rl_g.isomorphic(js_g))
except:
print "Different triples from two parsers, or non-isomorphic graphs"
# Test4: The graphs produced can be converted back into JSON-LD without loss of information
try:
js = validator.rdf_to_jsonld(jsonld_nq, "nt")
js2 = validator.compact_and_clean(js)
assert(js2 == example_js)
except:
print "Failed to recompact parsed data"
raise
# Test5: ontology documents can be parsed without errors by validators
try:
g = ConjunctiveGraph().parse(ontology, format="turtle")
except:
raise
# Test6: ontology is internally consistent with respect to domains, ranges, etc
# step 1: find all the classes.
rdftype = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type")
rdfsdomain = URIRef("http://www.w3.org/2000/01/rdf-schema#domain")
rdfsrange = URIRef("http://www.w3.org/2000/01/rdf-schema#range")
rdfsresource = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#Resource")
rdfssco = URIRef("http://www.w3.org/2000/01/rdf-schema#subClassOf")
asColl = URIRef("http://www.w3.org/ns/activitystreams#OrderedCollection")
skosConcept = URIRef("http://www.w3.org/2004/02/skos/core#Concept")
otherClasses = [asColl, skosConcept]
classes = list(g.subjects(rdftype, URIRef("http://www.w3.org/2000/01/rdf-schema#Class")))
props = list(g.subjects(rdftype, URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#Property")))
for p in props:
domains = list(g.objects(p, rdfsdomain))
for d in domains:
assert(d in classes)
for p in props:
ranges = list(g.objects(p, rdfsrange))
for r in ranges:
if not r in classes and not str(r).startswith("http://www.w3.org/2001/XMLSchema#") and \
not r == rdfsresource:
print "Found inconsistent property: %s has unknown range" % p
for c in classes:
parents = list(g.objects(c, rdfssco))
for p in parents:
if not p in classes and not p in otherClasses:
print "Found inconsistent class: %s has unknown superClass" % c
print "Done."
| mpl-2.0 | 3,573,403,248,505,460,000 | 33.666667 | 113 | 0.585337 | false | 3.465223 | true | false | false |
ngsutils/ngsutils | ngsutils/bam/pair.py | 1 | 11959 | #!/usr/bin/env python
## category General
## desc Given two separately mapped paired files, re-pair the files
"""
Given two separately mapped paired-end files, re-pair the files, selecting
the most likely pairing partners based upon strand, insert distance, and
maximizing alignment scores.
It is very important that the files are either in the same order with each
read present in both files or sorted in name order.
The value of the attribute/tag given will be used to determine which reads
should be kept and which should be discarded. The tag should be a numeric
(int/float) type. More than one tag can be used. The default is 'AS+, NM-'.
The BEST pair will be kept that maximizes the tag values and otherwise
satisfies strand and distance values.
"""
import os
import sys
import pysam
import ngsutils.bam
def usage(msg=None):
if msg:
print msg
print __doc__
print """
Usage: bamutils pair {opts} out.bam read1.bam read2.bam
Options
-tag VAL Tag to use to determine from which file reads will be
taken. (must be type :i or :f) You may have more than
one of these, in which case they will be sorted in
order. You can add a +/- at the end of the name to
signify sort order (asc/desc).
Default: AS+, NM-
-size low-high The minimum/maximum insert size to accept. By default,
this will attempt to minimize the distance between
reads, upto the lower-bound. Any pair over the upper
bound will be discarded. Note: for RNA, because it is
impossible to detect junctions that are between the
reads, this should be a very big range (ex: 50-1000000)
Default: 50-10000
-fail1 fname.bam Write all failed mappings from read1 to this file
-fail2 fname.bam Write all failed mappings from read1 to this file
(Note: -fail1 and -fail2 can be the same file.)
-reason tag Write the reason for failure to this tag (only for
failed reads/mappings) Must be a valid two char name.
"""
sys.exit(1)
def is_valid_pair(read1, read2):
if read1.is_unmapped or read2.is_unmapped:
# both must be mapped
return False, 'unmapped'
if read1.tid != read2.tid:
# to the same chromosome/reference
return False, 'chromosome'
if read1.is_reverse == read2.is_reverse:
# in opposite orientations
return False, 'orientation'
# sequenced towards each other
if read1.pos < read2.pos and read1.is_reverse:
return False, 'direction'
if read2.pos < read1.pos and read2.is_reverse:
return False, 'direction'
return True, ''
def find_pairs(reads1, reads2, min_size, max_size, tags):
'''
returns pairs, fail1, fail2
'''
possible = []
fail1 = []
fail2 = []
valid = set()
reasons = {}
for r1 in reads1:
for r2 in reads2:
is_valid, reason = is_valid_pair(r1, r2)
if is_valid:
# there can be some strange edge cases for insert size, so we'll just look
# for the biggest
ins_size = max(r2.aend - r1.pos, r1.aend - r2.pos)
# This doesn't work for RNA reads - you can still have hidden introns
# between the two reads. I'm leaving this here so that when I'm tempted
# to add this check again, I'll remember why it's a bad idea.
# junctionstarts = set()
# pos = r1.pos
# for op, size in r1.cigar:
# if op == 0 or op == 2:
# pos += size
# elif op == 3:
# junctionstarts.add(pos)
# ins_size -= size
# pos = r2.pos
# for op, size in r2.cigar:
# if op == 0 or op == 2:
# pos += size
# elif op == 3:
# if not pos in junctionstarts:
# ins_size -= size
if ins_size < min_size or ins_size > max_size:
if not (1, r1.tid, r1.pos) in reasons:
reasons[(1, r1.tid, r1.pos)] = set()
if not (2, r2.tid, r2.pos) in reasons:
reasons[(2, r2.tid, r2.pos)] = set()
reasons[(1, r1.tid, r1.pos)].add('size')
reasons[(2, r2.tid, r2.pos)].add('size')
continue
tag_val = []
for tag in tags:
val = float(r1.opt(tag[:2]))
val += float(r2.opt(tag[:2]))
if tag[-1] == '+':
# we will sort ascending to minimize size, so + tags (AS) need to be reversed
val = -val
tag_val.append(val)
possible.append((tag_val, ins_size, r1, r2))
valid.add((1, r1.tid, r1.pos))
valid.add((2, r2.tid, r2.pos))
else:
if not (1, r1.tid, r1.pos) in reasons:
reasons[(1, r1.tid, r1.pos)] = set()
if not (2, r2.tid, r2.pos) in reasons:
reasons[(2, r2.tid, r2.pos)] = set()
reasons[(1, r1.tid, r1.pos)].add(reason)
reasons[(2, r2.tid, r2.pos)].add(reason)
for r1 in reads1:
if not (1, r1.tid, r1.pos) in valid:
fail1.append((r1, reasons[(1, r1.tid, r1.pos)]))
for r2 in reads2:
if not (2, r2.tid, r2.pos) in valid:
fail2.append((r2, reasons[(2, r2.tid, r2.pos)]))
return possible, fail1, fail2
def bam_pair(out_fname, read1_fname, read2_fname, tags=['AS+', 'NM-'], min_size=50, max_size=1000, fail1_fname=None, fail2_fname=None, reason_tag=None, quiet=False):
bam1 = pysam.Samfile(read1_fname, "rb")
bam2 = pysam.Samfile(read2_fname, "rb")
out = pysam.Samfile('%s.tmp' % out_fname, "wb", template=bam1)
fail1 = None
fail2 = None
if fail1_fname:
fail1 = pysam.Samfile('%s.tmp' % fail1_fname, "wb", template=bam1)
if fail2_fname:
if fail2_fname == fail1_fname:
fail2 = fail1
else:
fail2 = pysam.Samfile('%s.tmp' % fail2_fname, "wb", template=bam1)
gen1 = ngsutils.bam.bam_batch_reads(bam1, quiet=quiet)
gen2 = ngsutils.bam.bam_batch_reads(bam2, quiet=True)
reads1 = None
reads2 = None
while True:
try:
if not reads1:
reads1 = gen1.next()
if not reads2:
reads2 = gen2.next()
except StopIteration:
break
if reads1[0].qname != reads2[0].qname:
if reads1[0].qname < reads2[0].qname:
reads1 = None
else:
reads2 = None
continue
pairs, failed_reads1, failed_reads2 = find_pairs(reads1, reads2, min_size, max_size, tags)
written = set()
if pairs:
pairs.sort() # default: max AS, min NM, min size
tag_val, size, r1, r2 = pairs[0]
best_val = (tag_val, size)
best_pairs = []
for tag_val, size, r1, r2 in pairs:
if (tag_val, size) == best_val:
best_pairs.append((size, r1, r2))
for size, r1, r2 in best_pairs:
# good match! set the flags and write them out
r1.is_paired = True
r2.is_paired = True
r1.is_proper_pair = True
r2.is_proper_pair = True
r1.is_read1 = True
r2.is_read2 = True
if r1.pos < r2.pos:
r1.tlen = size
r2.tlen = -size
else:
r1.tlen = -size
r2.tlen = size
r1.mate_is_reverse = r2.is_reverse
r2.mate_is_reverse = r1.is_reverse
r1.mate_is_unmapped = False
r2.mate_is_unmapped = False
r1.rnext = r2.tid
r2.rnext = r1.tid
r1.pnext = r2.pos
r2.pnext = r1.pos
r1.tags = r1.tags + [('NH', len(best_pairs))]
r2.tags = r2.tags + [('NH', len(best_pairs))]
out.write(r1)
out.write(r2)
written.add((1, r1.tid, r1.pos))
written.add((2, r2.tid, r2.pos))
for tag_val, size, r1, r2 in pairs[1:]:
if fail1:
if (1,r1.tid, r1.pos) not in written:
written.add((1,r1.tid, r1.pos))
r1.is_paired = True
r1.is_proper_pair = False
r1.is_read1 = True
if reason_tag:
r1.tags = r1.tags + [(reason_tag, 'suboptimal')]
fail1.write(r1)
if fail2:
if (2,r2.tid, r2.pos) not in written:
written.add((2,r2.tid, r2.pos))
r2.is_paired = True
r2.is_proper_pair = False
r2.is_read2 = True
if reason_tag:
r2.tags = r2.tags + [(reason_tag, 'suboptimal')]
fail2.write(r2)
if failed_reads1 and fail1:
for r1, reasons in failed_reads1:
r1.is_paired = True
r1.is_proper_pair = False
r1.is_read1 = True
if reason_tag:
r1.tags = r1.tags + [(reason_tag, ','.join(reasons))]
fail1.write(r1)
if failed_reads2 and fail2:
for r2, reasons in failed_reads2:
r2.is_paired = True
r2.is_proper_pair = False
r2.is_read1 = True
if reason_tag:
r2.tags = r2.tags + [(reason_tag, ','.join(reasons))]
fail2.write(r2)
reads1 = None
reads2 = None
bam1.close()
bam2.close()
out.close()
os.rename('%s.tmp' % out_fname, out_fname)
if fail1:
fail1.close()
os.rename('%s.tmp' % fail1_fname, fail1_fname)
if fail2:
if fail2_fname != fail1_fname:
fail2.close()
os.rename('%s.tmp' % fail2_fname, fail2_fname)
if __name__ == '__main__':
out_fname = None
read1_fname = None
read2_fname = None
fail1_fname = None
fail2_fname = None
min_size = 50
max_size = 10000
reason_tag = None
tags = []
last = None
for arg in sys.argv[1:]:
if arg == '-h':
usage()
elif last == '-fail1':
fail1_fname = arg
last = None
elif last == '-fail2':
fail2_fname = arg
last = None
elif last == '-size':
min_size, max_size = [int(x) for x in arg.split('-')]
last = None
elif last == '-tag':
tags.append(arg)
last = None
elif last == '-reason':
reason_tag = arg
last = None
elif arg in ['-tag', '-fail1', '-fail2', '-size', '-reason']:
last = arg
elif not out_fname:
out_fname = arg
elif not read1_fname and os.path.exists(arg):
read1_fname = arg
elif not read2_fname and os.path.exists(arg):
read2_fname = arg
else:
usage('Unknown option: %s' % arg)
if not tags:
tags = ['AS+', 'NM-']
if not read1_fname or not read2_fname or not out_fname:
usage()
else:
bam_pair(out_fname, read1_fname, read2_fname, tags, min_size, max_size, fail1_fname, fail2_fname, reason_tag)
| bsd-3-clause | 6,956,855,958,349,268,000 | 31.944904 | 165 | 0.497617 | false | 3.63606 | false | false | false |
UMN-Hydro/GSFLOW_pre-processor | python_scripts/Create_hydcond_array.py | 1 | 5456 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 7 18:33:11 2017
@author: gcng
"""
# Create_hydcond_array
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt # matlab-like plots
from readSettings import Settings
import platform
import sys
# Set input file
if len(sys.argv) < 2:
settings_input_file = 'settings.ini'
print 'Using default input file: ' + settings_input_file
else:
settings_input_file = sys.argv[1]
print 'Using specified input file: ' + settings_input_file
Settings = Settings(settings_input_file)
if platform.system() == 'Linux':
slashstr = '/'
else:
slashstr = '\\'
# ***SET FOLLOWING BASED ON SITE **********************************************
sw_scheme = 2 # 1: based on elev, 2: based on streams, 3: based on both
hydcond_default = 0.1
if sw_scheme == 1:
elev_thresh = [4500, 4200, 4000] # elev thresholds for different K, high to low
hydcond_by_elev = [0.1, 0.25, 0.4, 0.5] # K for the different elevation intervals
if sw_scheme >= 2:
# settings for hydraulic conductivity based on distance (in pixels) from stream
npix_stream_buffer = 1 # number of different buffers, based on pixels from stream
buffer_dist_pix = np.arange(1,npix_stream_buffer+1) # up to buffer_dist_pix pixels from stream
buffer_hydcond = np.array([0.4, 0.2]) # will use the first npix_stream_buffer values
strm_hydcond = 0.6 # for stream pixels
# *****************************************************************************
# %%
# Only run this script if using spatially distributed K
try:
float(Settings.hydcond0)
fl_runscript = 0 # don't run this script, set constant K
except ValueError:
fl_runscript = 1 # run this script to generate spatially variable K
hydcond_fil = Settings.hydcond0
surfz_fil = Settings.GISinput_dir + slashstr + 'DEM.asc'
NLAY = Settings.NLAY
if fl_runscript == 1:
f = open(surfz_fil, 'r')
sdata = {}
for i in range(6):
line = f.readline()
line = line.rstrip() # remove newline characters
key, value = line.split(': ')
try:
value = int(value)
except:
value = float(value)
sdata[key] = value
f.close()
NSEW = [sdata['north'], sdata['south'], sdata['east'], sdata['west']]
NROW = sdata['rows']
NCOL = sdata['cols']
# - space discretization
DELR = (NSEW[2]-NSEW[3])/NCOL # width of column [m]
DELC = (NSEW[0]-NSEW[1])/NROW # height of row [m]
TOP = np.genfromtxt(surfz_fil, skip_header=6, delimiter=' ', dtype=float)
hydcond = np.ones([NROW, NCOL, NLAY]) * hydcond_default # default
#%%
# ----- Based on elevation -----
if sw_scheme == 1 or sw_scheme == 3:
# - domain dimensions, maybe already in surfz_fil and botm_fil{}?
# NLAY = 2;
# surfz_fil = '/home/gcng/workspace/ProjectFiles/AndesWaterResources/Data/GIS/topo.asc';
hydcond0 = np.copy(hydcond[:,:,0])
hydcond0[TOP>=elev_thresh[0]] = hydcond_by_elev[0]
for ii in range(len(elev_thresh)):
hydcond0[TOP<elev_thresh[ii]] = hydcond_by_elev[ii+1]
hydcond[:,:,0] = np.copy(hydcond0)
#%%
# ----- Based on stream channel -----
if sw_scheme == 2 or sw_scheme == 3:
reach_fil = Settings.GISinput_dir + slashstr + 'reaches.txt'
reach_data_all = pd.read_csv(reach_fil) #
# Set stream cell only
# hydcond[reach_data_all.loc[:,'IRCH']-1, reach_data_all.loc[:,'JRCH']-1, 0] = 0.6
# hydcond depends on distance from stream
row_ind = reach_data_all.loc[:,'IRCH']-1
col_ind = reach_data_all.loc[:,'JRCH']-1
xcoord = DELR * np.arange(NCOL)
ycoord = DELC * np.arange(NROW)
xstrm = xcoord[col_ind]
ystrm = ycoord[row_ind]
xcoord_ar = np.kron(np.ones((NROW,1)), xcoord)
ycoord_ar = np.kron(np.ones((NCOL,1)), ycoord)
ycoord_ar = ycoord_ar.transpose()
dx = np.ceil(np.maximum(DELR, DELC))
buffer_dist = buffer_dist_pix * dx # up to npix pixels from stream
ind = np.argsort(buffer_dist)[::-1]
buffer_dist = np.copy(buffer_dist[ind])
buffer_hydcond = np.copy(buffer_hydcond[ind])
hydcond0 = np.copy(hydcond[:,:,0])
# buffer distances from stream:
for d_i in range(np.size(buffer_dist)):
for strm_i in range(np.size(xstrm)):
dist = ((xcoord_ar-xstrm[strm_i])**2 + (ycoord_ar-ystrm[strm_i])**2)**0.5
hydcond0[dist <= buffer_dist[d_i]] = buffer_hydcond[d_i]
hydcond0[row_ind, col_ind] = strm_hydcond # stream
hydcond[:,:,0] = hydcond0
# %% Plot
#ax = fig.add_subplot(2,2,1)
#im = ax.imshow(TOP_to_plot)
for ilay in range(NLAY):
fig = plt.figure(figsize=(12,12))
# plt.subplot(2,2,ilay+1)
im = plt.imshow(hydcond[:,:,ilay])
# im.set_clim(3800, 6200)
fig.colorbar(im, orientation='horizontal')
plt.title('BOTM lay' + str(ilay+1));
#%% Write to File
fobj = open(hydcond_fil, 'w+')
for ii in range(NLAY):
fobj.write('Layer %d \n' % (ii+1))
np.savetxt(fobj, hydcond[:,:,ii], delimiter=' ', fmt='%10g')
fobj.close()
| gpl-3.0 | -4,137,545,378,533,147,000 | 30.356322 | 99 | 0.562317 | false | 3.157407 | false | false | false |
osin-vladimir/ms-thesis-skoltech | mxnet-ssd/symbol/common.py | 1 | 6808 | import mxnet as mx
import numpy as np
def bn_act_conv_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), stride=(1,1)):
bn = mx.symbol.BatchNorm(data=from_layer, name="bn{}".format(name))
relu = mx.symbol.Activation(data=bn, act_type='relu')
conv = mx.symbol.Convolution(data=relu, kernel=kernel, pad=pad, stride=stride, num_filter=num_filter, name="conv{}".format(name))
return conv, relu
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), stride=(1,1), act_type="relu"):
relu = mx.symbol.Activation(data=from_layer, act_type=act_type, name="{}{}".format(act_type, name))
conv = mx.symbol.Convolution(data=relu, kernel=kernel, pad=pad, stride=stride, num_filter=num_filter, name="conv{}".format(name))
return conv, relu
def multibox_layer(from_layers, num_classes, sizes=[.2, .95], ratios=[1], normalization=-1, num_channels=[], clip=True, interm_layer=0):
"""
the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes
"""
assert len(from_layers) > 0, "from_layers must not be empty list"
assert num_classes > 0, "num_classes {} must be larger than 0".format(num_classes)
assert len(ratios) > 0, "aspect ratios must not be empty list"
if not isinstance(ratios[0], list):
# provided only one ratio list, broadcast to all from_layers
ratios = [ratios] * len(from_layers)
assert len(ratios) == len(from_layers), \
"ratios and from_layers must have same length"
assert len(sizes) > 0, "sizes must not be empty list"
if len(sizes) == 2 and not isinstance(sizes[0], list):
# provided size range, we need to compute the sizes for each layer
assert sizes[0] > 0 and sizes[0] < 1
assert sizes[1] > 0 and sizes[1] < 1 and sizes[1] > sizes[0]
tmp = np.linspace(sizes[0], sizes[1], num=(len(from_layers)-1))
min_sizes = [start_offset] + tmp.tolist()
max_sizes = tmp.tolist() + [tmp[-1]+start_offset]
sizes = zip(min_sizes, max_sizes)
assert len(sizes) == len(from_layers), \
"sizes and from_layers must have same length"
if not isinstance(normalization, list):
normalization = [normalization] * len(from_layers)
assert len(normalization) == len(from_layers)
assert sum(x > 0 for x in normalization) == len(num_channels), \
"must provide number of channels for each normalized layer"
loc_pred_layers = []
cls_pred_layers = []
anchor_layers = []
num_classes += 1 # always use background as label 0
for k, from_layer in enumerate(from_layers):
from_name = from_layer.name
# normalize
if normalization[k] > 0:
from_layer = mx.symbol.L2Normalization(data=from_layer, mode="channel", name="{}_norm".format(from_name))
scale = mx.symbol.Variable(name="{}_scale".format(from_name),shape=(1, num_channels.pop(0), 1, 1))
from_layer = normalization[k] * mx.symbol.broadcast_mul(lhs=scale, rhs=from_layer)
if interm_layer > 0:
from_layer = mx.symbol.Convolution(data=from_layer, kernel=(3,3), stride=(1,1), pad=(1,1), num_filter=interm_layer, name="{}_inter_conv".format(from_name))
from_layer = mx.symbol.Activation(data=from_layer, act_type="relu", name="{}_inter_relu".format(from_name))
# estimate number of anchors per location
# here I follow the original version in caffe
# TODO: better way to shape the anchors??
size = sizes[k]
assert len(size) > 0, "must provide at least one size"
size_str = "(" + ",".join([str(x) for x in size]) + ")"
ratio = ratios[k]
assert len(ratio) > 0, "must provide at least one ratio"
ratio_str = "(" + ",".join([str(x) for x in ratio]) + ")"
num_anchors = len(size) -1 + len(ratio)
# create location prediction layer
num_loc_pred = num_anchors * 4
loc_pred = mx.symbol.Convolution(data=from_layer, kernel=(3,3), stride=(1,1), pad=(1,1), num_filter=num_loc_pred, name="{}_loc_pred_conv".format(from_name))
loc_pred = mx.symbol.transpose(loc_pred, axes=(0,2,3,1))
loc_pred = mx.symbol.Flatten(data=loc_pred)
loc_pred_layers.append(loc_pred)
# create class prediction layer
num_cls_pred = num_anchors * num_classes
cls_pred = mx.symbol.Convolution(data=from_layer, kernel=(3,3), stride=(1,1), pad=(1,1), num_filter=num_cls_pred, name="{}_cls_pred_conv".format(from_name))
cls_pred = mx.symbol.transpose(cls_pred, axes=(0,2,3,1))
cls_pred = mx.symbol.Flatten(data=cls_pred)
cls_pred_layers.append(cls_pred)
# create anchor generation layer
anchors = mx.contrib.symbol.MultiBoxPrior(from_layer, sizes=size_str, ratios=ratio_str, clip=clip, name="{}_anchors".format(from_name))
anchors = mx.symbol.Flatten(data=anchors)
anchor_layers.append(anchors)
loc_preds = mx.symbol.Concat(*loc_pred_layers, num_args=len(loc_pred_layers), dim=1, name="multibox_loc_pred")
cls_preds = mx.symbol.Concat(*cls_pred_layers, num_args=len(cls_pred_layers), dim=1)
cls_preds = mx.symbol.Reshape(data=cls_preds, shape=(0, -1, num_classes))
cls_preds = mx.symbol.transpose(cls_preds, axes=(0, 2, 1), name="multibox_cls_pred")
anchor_boxes = mx.symbol.Concat(*anchor_layers, num_args=len(anchor_layers), dim=1)
anchor_boxes = mx.symbol.Reshape(data=anchor_boxes, shape=(0, -1, 4), name="multibox_anchors")
return [loc_preds, cls_preds, anchor_boxes]
| mit | 7,502,084,082,711,160,000 | 51.369231 | 167 | 0.645858 | false | 3.502058 | false | false | false |
Bobypalcka/coinwatcher | lib/details.py | 1 | 6887 |
import datetime
import os
import pickle
import time
from colorama import Fore, init
from .databaseCore import database
class owerview(object):
def __init__(self):
init()
self.p = database()
self.userid = -1
self.coin_table = self.p.load_all_coins()
# load all updates table, runfunction to group by (last in a day)
self.updates_data = self.p.load_update_all()
self.__dates_normalization()
self.user_data_wall_bal = {}
# update time normalization (save index of last update of a day)
# saved back in self.update_data
def __dates_normalization(self):
try:
with open("data/tmp/updatesid_last_of_day.pickle", "rb") as f:
updates_sorted = pickle.load(f)
except FileNotFoundError:
updates_sorted = {}
for x in self.updates_data:
tmp_date = datetime.datetime.strptime(x[1], "%Y-%m-%d %H:%M:%S")
tp_day = tmp_date.date()
str_day = str(tp_day)
updates_sorted[str_day] = {"date": tmp_date, "id": x[0]}
self.updates_data = updates_sorted
# print(str(self.updates_data))
with open("data/tmp/updatesid_last_of_day.pickle", "wb") as f:
pickle.dump(updates_sorted, f)
# user detail view
def user_view(self, userid, history=False):
user_balance = self.p.load_user_balance(userid)
user_wallets = self.p.load_user_wallet(userid)
self.user_data_wall_bal[0] = user_balance
self.user_data_wall_bal[1] = user_wallets
tmp_coinId_list = []
tmp_coin_price = {}
# Check if coins and wallets != empty
if user_balance == [] and user_wallets == []:
print(
"No coin balances avaliable.\nEnter coin in balance or\nEnter wallet publicKey")
time.sleep(5)
os.system('cls' if os.name == 'nt' else 'clear')
return 0
# get all coin id you will need for price
try:
for x in user_balance:
if x[1] in tmp_coinId_list:
pass
else:
tmp_coinId_list.append(x[1])
except:
pass
try:
for x in user_wallets:
if x[2] in tmp_coinId_list:
pass
else:
tmp_coinId_list.append(x[2])
except:
pass
# Quick view if histroy == False
# Pull last update save in tmp_coin_price[coinID] = { all data }
if history == False:
lastupdate = self.p.check_last_update_id()
coin_price = {}
# pull only newest update
for elem in tmp_coinId_list:
tmp = self.p.load_coin_price_with_mc(elem, lastupdate)
coin_price[elem] = {"usd": tmp[0], "btc": tmp[1], "mc": tmp[2]}
tmp_coin_price[lastupdate] = coin_price
self.__display(tmp_coin_price)
self.pause()
return 0
elif history == True:
coin_price = {}
# pull only newest update
for x in self.updates_data.keys():
tmp_id = self.updates_data[x]['id']
tmp_tmp_coin = {}
for elem in tmp_coinId_list:
tmp = self.p.load_coin_price_with_mc(elem, tmp_id)
tmp_tmp_coin[elem] = {"usd": tmp[0],
"btc": tmp[1], "mc": tmp[2]}
coin_price[tmp_id] = tmp_tmp_coin
# call general view
self.__display(coin_price)
self.pause()
return 0
def pause(self):
sss = input("Press ENTER to continue...")
time.sleep(1)
os.system('cls' if os.name == 'nt' else 'clear')
def __display(self, data_dic):
# you get nested dict if history is true
# dict[updateID][coinID] = {"usd":? , "btc": ? , "mc": ?}
usr_bal = self.user_data_wall_bal[0]
usr_wal = self.user_data_wall_bal[1]
for i in data_dic:
tmp_update = data_dic[i]
date_u = self.p.check_update_date(i)
print(Fore.YELLOW + "\nWith prices from: " + date_u)
# Balance outprint
tot_update_value = 0
if usr_bal != []:
print(Fore.MAGENTA + "\nBalance: \n")
for x in range(len(usr_bal)):
tmp_coinid = int(usr_bal[x][1])
tmp_name = usr_bal[x][2]
tmp_bal_amount = float(usr_bal[x][4])
pri_usd = tmp_update[tmp_coinid]['usd']
pri_btc = tmp_update[tmp_coinid]['btc']
val_usd = tmp_bal_amount * pri_usd
val_btc = tmp_bal_amount * pri_btc
tot_update_value += val_usd
print(Fore.BLUE + "Coin: " + Fore.GREEN + tmp_name + Fore.BLUE + " -balance: " + Fore.GREEN + str(tmp_bal_amount) +
Fore.BLUE + " - btc: " + Fore.GREEN + str(pri_btc) + Fore.BLUE + " - usd: " + Fore.GREEN + str(pri_usd))
print(Fore.BLUE + " Total USD/BTC : " + Fore.GREEN + str(val_usd) +
Fore.MAGENTA + "$ / " + Fore.GREEN + str(val_btc) + Fore.MAGENTA + " BTCs")
else:
pass
# Wallets out print
if usr_wal != []:
print(Fore.MAGENTA + "\nWallets: \n")
for x in range(len(usr_wal)):
tmp_coinid = int(usr_wal[x][2])
tmp_name = usr_wal[x][3]
tmp_wal_name = usr_wal[x][7]
tmp_wal_amount = float(usr_wal[x][5])
tmp_wall_addr = usr_wal[x][4]
pri_usd = tmp_update[tmp_coinid]['usd']
pri_btc = tmp_update[tmp_coinid]['btc']
val_usd = tmp_wal_amount * pri_usd
val_btc = tmp_wal_amount * pri_btc
tot_update_value += val_usd
print(Fore.BLUE + "Wallet: " + Fore.GREEN + tmp_wal_name + Fore.BLUE + " - " + Fore.GREEN + tmp_name + Fore.BLUE + " - balance: " + Fore.GREEN + str(
tmp_wal_amount) + Fore.BLUE + " - btc: " + Fore.GREEN + str(pri_btc) + Fore.BLUE + " - usd: " + Fore.GREEN + str(pri_usd), end=' ')
print(Fore.BLUE + " address: " + Fore.GREEN + tmp_wall_addr + Fore.BLUE + "\n Total USD / BTC : " +
str(val_usd) + Fore.MAGENTA + "$ / " + str(val_btc) + Fore.MAGENTA + " BTCs")
else:
pass
print(Fore.RED + "\nTotal value on date: " + Fore.CYAN + date_u + " " + Fore.GREEN +
str(tot_update_value) + " $\n\n")
time.sleep(0.8)
| unlicense | 1,554,088,555,193,229,000 | 33.782828 | 169 | 0.484391 | false | 3.5647 | false | false | false |
proximate/proximate | pic_choose_dlg.py | 1 | 7610 | #
# Proximate - Peer-to-peer social networking
#
# Copyright (c) 2008-2011 Nokia Corporation
#
# All rights reserved.
#
# This software is licensed under The Clear BSD license.
# See the LICENSE file for more details.
#
import gtk
import gobject
import tempfile
import os
from plugins import get_plugin_by_type
from file_chooser_dlg import File_Chooser, FILE_CHOOSER_TYPE_FILE
from camera import Camera, Camera_Exception, DEFAULT_RESOLUTION
from support import warning, debug
from ossupport import xclose, xremove
from proximateprotocol import PLUGIN_TYPE_NOTIFICATION, MAX_FACE_DIMENSION, \
TP_FACE_SIZE
from guiutils import scale_image, compress_jpeg
class Picture_Choose_Dialog:
""" This class is used for previewing and selecting the profile picture.
Uses File_Chooser to select the picture. """
def __init__(self, gui, got_picture_cb):
self.notify = get_plugin_by_type(PLUGIN_TYPE_NOTIFICATION).notify
self.filename = None
self.gui = gui
self.tempfile = None # file to be removed when dialog is closed
self.got_picture_cb = got_picture_cb
self.dialog = gtk.Dialog("Select Profile Picture",
gui.get_main_window(),
gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_MODAL,
(gtk.STOCK_OK, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
self.dialog.set_border_width(5)
self.dialog.vbox.set_spacing(2)
self.dialog.action_area.set_layout(gtk.BUTTONBOX_END)
self.dialog.set_position(gtk.WIN_POS_CENTER)
self.initialize_widgets()
self.dialog.connect("response", self.response_handler)
self.dialog.connect("delete-event", self.dialog_deleted)
def initialize_widgets(self):
self.profile_image = gtk.Image()
self.profile_image.set_size_request(300, 300)
self.profile_image.set_from_stock(gtk.STOCK_ORIENTATION_PORTRAIT, 4)
self.browse_button = gtk.Button("Browse")
self.take_photo = gtk.Button("Take photo")
self.clear_image = gtk.Button('Clear image')
self.vbox1 = gtk.VBox()
self.vbox1.pack_start(self.profile_image)
self.vbox1.pack_start(self.browse_button, False, True)
self.vbox1.pack_start(self.take_photo, False, True)
self.vbox1.pack_start(self.clear_image, False, True)
self.dialog.vbox.pack_start(self.vbox1)
self.browse_button.connect("clicked", self.browse_button_clicked)
self.take_photo.connect("clicked", self.take_photo_clicked)
self.clear_image.connect('clicked', self.clear_image_clicked)
def response_handler(self, widget, response_id, *args):
""" Handles dialog responses """
if response_id == gtk.RESPONSE_OK:
self.got_picture_cb(self.filename)
self.dialog.hide()
return True
def dialog_deleted(self, dialog, event):
return True
def show(self):
self.dialog.show_all()
def close(self):
self.remove_temp()
self.dialog.destroy()
def browse_button_clicked(self, widget):
file_dlg = File_Chooser(self.gui.main_window, FILE_CHOOSER_TYPE_FILE, False, self.browse_chooser_cb)
file_dlg.add_supported_pixbuf_formats()
#self.dialog.hide()
def browse_chooser_cb(self, filename, ctx):
#self.dialog.show()
if filename == None:
return
# checking if we have to scale the picture down
# also checking if it even is a picture
try:
pixbuf = gtk.gdk.pixbuf_new_from_file(filename)
except gobject.GError:
self.notify("Error: Invalid image file", True)
return
larger_dimension = max((pixbuf.get_width(), pixbuf.get_height()))
if os.path.getsize(filename) <= TP_FACE_SIZE and \
larger_dimension <= MAX_FACE_DIMENSION:
# use the picture directly without recompression
self.remove_temp()
self.set_picture(filename)
else:
# need to recompress the picture
pixbuf = scale_image(pixbuf, MAX_FACE_DIMENSION)
if not self.compress_jpeg(pixbuf):
self.notify("Error: Unable to compress JPEG picture", True)
def remove_temp(self):
if self.tempfile != None:
if not xremove(self.tempfile):
warning("Unable to remove a scaled picture\n")
self.tempfile = None
def take_photo_clicked(self, widget):
self.camera_dialog = Camera_Dialog(self.dialog, DEFAULT_RESOLUTION,
self.got_photo)
def got_photo(self, pixbuf):
if pixbuf:
pixbuf = scale_image(pixbuf, MAX_FACE_DIMENSION)
if not self.compress_jpeg(pixbuf):
self.notify("Error: Unable to compress JPEG picture", True)
self.camera_dialog = None
def clear_image_clicked(self, widget):
self.remove_temp()
self.set_picture(None)
def set_picture(self, fname):
self.filename = fname
self.profile_image.set_from_file(fname)
def compress_jpeg(self, pixbuf):
(fd, filename) = tempfile.mkstemp(prefix = 'proximate-tmp-profile-pic-')
xclose(fd)
if not compress_jpeg(pixbuf, filename, TP_FACE_SIZE):
return False
self.remove_temp()
self.tempfile = filename
self.set_picture(filename)
return True
class Camera_Dialog:
def __init__(self, profile_dialog, resolution, got_photo_cb):
self.cb = got_photo_cb
self.dialog = gtk.Dialog('Camera', profile_dialog,
gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_MODAL)
self.dialog.set_has_separator(False)
self.image = gtk.DrawingArea()
self.image.set_size_request(resolution[0], resolution[1])
self.help_text = gtk.Label('Click to take picture')
try:
self.camera = Camera(resolution, self.image)
except Camera_Exception:
debug('profile dialog: Unable to initialize camera\n')
self.camera = None
self.help_text.set_label('No camera found')
self.image_hbox = gtk.HBox()
self.image_hbox.pack_start(gtk.HBox())
self.image_hbox.pack_start(self.image, False, False)
self.image_hbox.pack_start(gtk.HBox())
if self.camera != None:
self.dialog.vbox.pack_start(self.image_hbox)
self.dialog.vbox.pack_start(self.help_text, False, True)
self.close_button = gtk.Button('Close')
self.dialog.vbox.pack_start(self.close_button, False, True)
self.close_button.connect('clicked', self.close_clicked)
self.dialog.connect('response', self.dialog_response)
self.image.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.image.connect('button-press-event', self.image_clicked)
self.dialog.show_all()
def close_clicked(self, widget):
self.close()
def dialog_response(self, widget, response_id):
self.close()
def close(self):
if self.camera:
self.camera.stop()
if self.camera.buffer:
pixbuf = gtk.gdk.pixbuf_new_from_data(self.camera.buffer,
gtk.gdk.COLORSPACE_RGB, False, 8, self.camera.width,
self.camera.height, 3*self.camera.width)
self.cb(pixbuf)
else:
self.cb(None)
self.dialog.destroy()
def image_clicked(self, widget, data=None):
if self.camera:
self.camera.take_photo()
| bsd-3-clause | -8,045,648,536,350,154,000 | 35.066351 | 108 | 0.625361 | false | 3.752465 | false | false | false |
lsaffre/timtools | timtools/sdoc/macros.py | 1 | 1215 | ## Copyright 2003-2009 Luc Saffre
## This file is part of the TimTools project.
## TimTools is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## TimTools is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with TimTools; if not, see <http://www.gnu.org/licenses/>.
import os
localRoot = ""
targetRoot = ""
SRC_ROOT = os.path.join(os.path.dirname(__file__),"..","..","..")
SRC_ROOT = os.path.abspath(SRC_ROOT)
print "SRC_ROOT =", SRC_ROOT
def fileref(filename):
href = "../../"+filename
return url(href,filename)
def url(url,label=None,title=None):
if label is None:
label = url
if title is None:
title = ""
r= """
.. raw:: html
<a href="%(url)s" title="%(title)s">%(label)s</a>
""" % locals()
# print r
return r
| bsd-2-clause | 2,663,268,168,025,918,500 | 29.973684 | 71 | 0.670782 | false | 3.347107 | false | false | false |
li-xirong/jingwei | preprocess/count_tags.py | 1 | 2257 | import sys
import os
from basic.constant import ROOT_PATH
from basic.common import checkToSkip, printStatus
INFO = __file__
def process(options, collection):
rootpath = options.rootpath
tpp = options.tpp
tagfile = os.path.join(rootpath, collection, "TextData", "id.userid.%stags.txt" % tpp)
resultfile = os.path.join(rootpath, collection, "TextData", "%stag.userfreq.imagefreq.txt" % tpp)
if checkToSkip(resultfile, options.overwrite):
return 0
printStatus(INFO, "parsing " + tagfile)
tag2imfreq = {}
tag2users = {}
for line in open(tagfile):
elems = str.split(line.strip())
photoid = elems[0]
userid = elems[1]
tagset = set(elems[2:])
for tag in tagset:
tag2imfreq[tag] = tag2imfreq.get(tag, 0) + 1
tag2users.setdefault(tag,[]).append(userid)
printStatus(INFO, "collecting user-freq and image-freq")
results = []
for tag,users in tag2users.iteritems():
userfreq = len(set(users))
imfreq = tag2imfreq[tag]
results.append((tag, userfreq, imfreq))
printStatus(INFO, "sorting in descending order (user-freq as primary key)")
results.sort(key=lambda v:(v[1],v[2]), reverse=True)
printStatus(INFO, "-> %s" % resultfile)
with open(resultfile, 'w') as fw:
fw.write(''.join(['%s %d %d\n' % (tag, userfreq, imfreq) for (tag, userfreq, imfreq) in results]))
fw.close()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage="""usage: %prog [options] collection""")
parser.add_option("--overwrite", default=0, type="int", help="overwrite existing file (default: 0)")
parser.add_option("--tpp", default='lemm', type="string", help="tag preprocess (default: lemm)")
parser.add_option("--rootpath", default=ROOT_PATH, type="string", help="rootpath where the train and test collections are stored (default: %s)" % ROOT_PATH)
(options, args) = parser.parse_args(argv)
if len(args) < 1:
parser.print_help()
return 1
return process(options, args[0])
if __name__ == "__main__":
sys.exit(main())
| mit | 2,156,322,188,466,346,800 | 30.788732 | 160 | 0.61852 | false | 3.499225 | false | false | false |
chrisseto/Breeze | breeze/resource.py | 1 | 1480 | import re
import abc
from tornado.web import RequestHandler
from breeze.filter import FilterOptions
class _ResourceMeta(abc.ABCMeta):
def __init__(cls, name, bases, nmspc):
super().__init__(name, bases, nmspc)
if bases == (RequestHandler, ):
return
options = getattr(cls, 'Meta', object())
cls.name = getattr(options, 'name', cls.__name__.lower() + 's')
cls.url_regex = r'/{}(?:/(?P<pk>[^/]+))?/?$'.format(re.escape(cls.name))
class Resource(RequestHandler, metaclass=_ResourceMeta):
# def __init__(self, model, name=None):
# self._model = model
# self._name = name or model.default_resource_name()
# @abc.abstractclassmethod
def load(self, pk):
pass
# @abc.abstractclassmethod
def list(self, filter_options):
pass
# @abc.abstractclassmethod
def delete(self, instance):
pass
# @abc.abstractclassmethod
def update(self, instance, **data):
pass
# @abc.abstractclassmethod
def create(self, **data):
pass
def get(self, pk=None):
if pk is not None:
self.write(self.load(pk).to_json())
return
self.write({
'data': [
x.to_json() for x in
self.list(self._parse_filter_options())
]
})
def post(self, pk=None):
assert pk is None
def _parse_filter_options(self):
return FilterOptions()
| mit | 1,949,619,977,083,950,300 | 22.870968 | 80 | 0.562838 | false | 3.86423 | false | false | false |
auvsi-suas/interop | client/tools/interop_cli.py | 1 | 5988 | #!/usr/bin/env python3
# CLI for interacting with interop server.
from __future__ import print_function
import argparse
import datetime
import getpass
import logging
import sys
import time
from auvsi_suas.client.client import AsyncClient
from auvsi_suas.proto.interop_api_pb2 import Telemetry
from google.protobuf import json_format
from mavlink_proxy import MavlinkProxy
from upload_odlcs import upload_odlcs
logger = logging.getLogger(__name__)
def teams(args, client):
teams = client.get_teams().result()
for team in teams:
print(json_format.MessageToJson(team))
def mission(args, client):
mission = client.get_mission(args.mission_id).result()
print(json_format.MessageToJson(mission))
def odlcs(args, client):
if args.odlc_dir:
upload_odlcs(client, args.odlc_dir)
else:
odlcs = client.get_odlcs(args.mission_id).result()
for odlc in odlcs:
print(json_format.MessageToJson(odlc))
def maps(args, client):
if args.map_filepath:
with open(args.map_filepath, 'rb') as img:
logger.info('Uploading map %s', args.map_filepath)
client.put_map_image(args.mission_id, img.read()).result()
else:
print(client.get_map_image(args.mission_id).result())
def probe(args, client):
while True:
start_time = datetime.datetime.now()
telemetry = Telemetry()
telemetry.latitude = 0
telemetry.longitude = 0
telemetry.altitude = 0
telemetry.heading = 0
client.post_telemetry(telemetry).result()
end_time = datetime.datetime.now()
elapsed_time = (end_time - start_time).total_seconds()
logger.info('Executed interop. Total latency: %f', elapsed_time)
delay_time = args.interop_time - elapsed_time
if delay_time > 0:
try:
time.sleep(delay_time)
except KeyboardInterrupt:
sys.exit(0)
def mavlink(args, client):
proxy = MavlinkProxy(args.device, client)
proxy.proxy()
def main():
# Setup logging
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format='%(asctime)s: %(name)s: %(levelname)s: %(message)s')
# Parse command line args.
parser = argparse.ArgumentParser(description='AUVSI SUAS Interop CLI.')
parser.add_argument('--url',
required=True,
help='URL for interoperability.')
parser.add_argument('--username',
required=True,
help='Username for interoperability.')
parser.add_argument('--password', help='Password for interoperability.')
subparsers = parser.add_subparsers(help='Sub-command help.')
subparser = subparsers.add_parser('teams', help='Get the status of teams.')
subparser.set_defaults(func=teams)
subparser = subparsers.add_parser('mission', help='Get mission details.')
subparser.set_defaults(func=mission)
subparser.add_argument('--mission_id',
type=int,
required=True,
help='ID of the mission to get.')
subparser = subparsers.add_parser(
'odlcs',
help='Upload odlcs.',
description='''Download or upload odlcs to/from the interoperability
server.
Without extra arguments, this prints all odlcs that have been uploaded to the
server.
With --odlc_dir, this uploads new odlcs to the server.
This tool searches for odlc JSON and images files within --odlc_dir
conforming to the 2017 Object File Format and uploads the odlc
characteristics and thumbnails to the interoperability server.
There is no deduplication logic. Odlcs will be uploaded multiple times, as
unique odlcs, if the tool is run multiple times.''',
formatter_class=argparse.RawDescriptionHelpFormatter)
subparser.set_defaults(func=odlcs)
subparser.add_argument('--mission_id',
type=int,
help='Mission ID to restrict ODLCs retrieved.',
default=None)
subparser.add_argument(
'--odlc_dir',
help='Enables odlc upload. Directory containing odlc data.')
subparser = subparsers.add_parser(
'map',
help='Upload maps.',
description='''Download or upload map images to/from the server.
With just the mission specified it prints the imagery data. With a image
filepath specified, it uploads the map to the server.''',
formatter_class=argparse.RawDescriptionHelpFormatter)
subparser.set_defaults(func=maps)
subparser.add_argument('--mission_id',
type=int,
help='Mission ID for the map.',
required=True)
subparser.add_argument('--map_filepath',
type=str,
help='Filepath to the image to upload.')
subparser = subparsers.add_parser('probe', help='Send dummy requests.')
subparser.set_defaults(func=probe)
subparser.add_argument('--interop_time',
type=float,
default=1.0,
help='Time between sent requests (sec).')
subparser = subparsers.add_parser(
'mavlink',
help='''Receive MAVLink GLOBAL_POSITION_INT packets and
forward as telemetry to interop server.''')
subparser.set_defaults(func=mavlink)
subparser.add_argument(
'--device',
type=str,
help='pymavlink device name to read from. E.g. tcp:localhost:8080.')
# Parse args, get password if not provided.
args = parser.parse_args()
if args.password:
password = args.password
else:
password = getpass.getpass('Interoperability Password: ')
# Create client and dispatch subcommand.
client = AsyncClient(args.url, args.username, password)
args.func(args, client)
if __name__ == '__main__':
main()
| apache-2.0 | 723,693,729,749,748,200 | 32.082873 | 79 | 0.628925 | false | 4.043214 | false | false | false |
duk3luk3/client | src/chat/chatlineedit.py | 2 | 4552 |
"""
Created on Dec 8, 2011
@author: thygrrr
"""
from PyQt5 import QtCore, QtWidgets
class ChatLineEdit(QtWidgets.QLineEdit):
"""
A special promoted QLineEdit that is used in channel.ui to provide a mirc-style editing experience
with completion and history.
LATER: History and tab completion support
"""
def __init__(self, parent):
QtWidgets.QLineEdit.__init__(self, parent)
self.returnPressed.connect(self.on_line_entered)
self.history = []
self.currentHistoryIndex = None
self.historyShown = False
self.completionStarted = False
self.chatters = {}
self.LocalChatterNameList = []
self.currenLocalChatter = None
def set_chatters(self, chatters):
self.chatters = chatters
def event(self, event):
if event.type() == QtCore.QEvent.KeyPress:
# Swallow a selection of keypresses that we want for our history support.
if event.key() == QtCore.Qt.Key_Tab:
self.try_completion()
return True
elif event.key() == QtCore.Qt.Key_Space:
self.accept_completion()
return QtWidgets.QLineEdit.event(self, event)
elif event.key() == QtCore.Qt.Key_Up:
self.cancel_completion()
self.prev_history()
return True
elif event.key() == QtCore.Qt.Key_Down:
self.cancel_completion()
self.next_history()
return True
else:
self.cancel_completion()
return QtWidgets.QLineEdit.event(self, event)
# All other events (non-keypress)
return QtWidgets.QLineEdit.event(self, event)
@QtCore.pyqtSlot()
def on_line_entered(self):
self.history.append(self.text())
self.currentHistoryIndex = len(self.history) - 1
def showEvent(self, event):
self.setFocus(True)
return QtWidgets.QLineEdit.showEvent(self, event)
def try_completion(self):
if not self.completionStarted:
# no completion on empty line
if self.text() == "":
return
# no completion if last character is a space
if self.text().rfind(" ") == (len(self.text()) - 1):
return
self.completionStarted = True
self.LocalChatterNameList = []
self.completionText = self.text().split()[-1] # take last word from line
self.completionLine = self.text().rstrip(self.completionText) # store line to be completed without the completion string
# make a copy of users because the list might change frequently giving all kind of problems
for chatter in self.chatters:
if chatter.name.lower().startswith(self.completionText.lower()):
self.LocalChatterNameList.append(chatter.name)
if len(self.LocalChatterNameList) > 0:
self.LocalChatterNameList.sort(key=lambda chatter: chatter.lower())
self.currenLocalChatter = 0
self.setText(self.completionLine + self.LocalChatterNameList[self.currenLocalChatter])
else:
self.currenLocalChatter = None
else:
if self.currenLocalChatter is not None:
self.currenLocalChatter = (self.currenLocalChatter + 1) % len(self.LocalChatterNameList)
self.setText(self.completionLine + self.LocalChatterNameList[self.currenLocalChatter])
def accept_completion(self):
self.completionStarted = False
def cancel_completion(self):
self.completionStarted = False
def prev_history(self):
if self.currentHistoryIndex is not None: # no history nothing to do
if self.currentHistoryIndex > 0 and self.historyShown: # check for boundaries and only change index is hostory is alrady shown
self.currentHistoryIndex -= 1
self.historyShown = True
self.setText(self.history[self.currentHistoryIndex])
def next_history(self):
if self.currentHistoryIndex is not None:
if self.currentHistoryIndex < len(self.history)-1 and self.historyShown: # check for boundaries and only change index is hostory is alrady shown
self.currentHistoryIndex += 1
self.historyShown = True
self.setText(self.history[self.currentHistoryIndex])
| gpl-3.0 | 2,154,726,780,059,968,000 | 40.009009 | 157 | 0.607645 | false | 4.335238 | false | false | false |
softwaresaved/fat | lowfat/migrations/0001_initial.py | 2 | 1362 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-13 13:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('url', models.CharField(max_length=120, primary_key=True, serialize=False, unique=True)),
('name', models.CharField(max_length=120)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('description', models.TextField()),
('budget_request', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='Fellow',
fields=[
('email', models.CharField(max_length=120, primary_key=True, serialize=False, unique=True)),
('full_name', models.CharField(max_length=120, unique=True)),
('year', models.IntegerField()),
],
),
migrations.AddField(
model_name='event',
name='fellow',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lowfat.Fellow'),
),
]
| bsd-3-clause | 6,138,034,044,976,768,000 | 32.219512 | 108 | 0.553598 | false | 4.351438 | false | false | false |
ntim/g4sipm | sample/plots/sqlite/trace.py | 1 | 1206 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, glob
import matplotlib.pyplot as plt
import numpy as np
import sqlite3
millivolt = 1e-9
if __name__ == "__main__":
filename = glob.glob('results/*.sqlite')[-1]
if len(sys.argv) > 1:
filename = sys.argv[1]
# Plot range from arguments
start = 0
stop = 1000
if len(sys.argv) > 3:
start = int(sys.argv[2])
stop = start + int(sys.argv[3])
# Open file.
con = sqlite3.connect(filename)
cur = con.cursor()
# Extract trace.
voltages = []
times = []
for row in cur.execute("SELECT time, voltage FROM `g4sipmVoltageTraceDigis-0` WHERE time BETWEEN %g AND %g;" % (start, stop)):
voltages.append(row[1] / millivolt)
times.append(row[0] / 1000)
# Plot traces superposed.
plt.plot(times, voltages, '-')
#
name, pitch, cellPitch, numberOfCells = cur.execute("SELECT name, pitch, cellPitch, numberOfCells FROM sipmModel;").fetchone()
plt.text(0.025, 0.975, "%s\n%d x %d mm, %d $\mu$m pitch\n%d cells" % (name, pitch, pitch, cellPitch * 1000, numberOfCells),
ha="left", va="top", fontsize="medium", transform=plt.axes().transAxes)
plt.ylabel('voltage / mV')
plt.xlabel(u'time / µs')
plt.savefig("trace.pdf")
plt.show()
| gpl-3.0 | -2,763,521,255,873,647,000 | 29.897436 | 127 | 0.660581 | false | 2.677778 | false | false | false |
dbcls/dbcls-galaxy | tools/taxonomy/gi2taxonomy.py | 2 | 5229 | import sys
import string
import tempfile
import subprocess
from os import path
# -----------------------------------------------------------------------------------
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
# -----------------------------------------------------------------------------------
def gi_name_to_sorted_list(file_name, gi_col, name_col):
""" Suppose input file looks like this:
a 2
b 4
c 5
d 5
where column 1 is gi_col and column 0 is name_col
output of this function will look like this:
[[2, 'a'], [4, 'b'], [5, 'c'], [5, 'd']]
"""
result = []
try:
F = open( file_name, 'r' )
try:
for line in F:
file_cols = string.split(line.rstrip(), '\t')
file_cols[gi_col] = int( file_cols[gi_col] )
result.append( [ file_cols[gi_col], file_cols[name_col] ] )
except:
print >>sys.stderr, 'Non numeric GI field...skipping'
except Exception, e:
stop_err('%s\n' % e)
F.close()
result.sort()
return result
# -----------------------------------------------------------------------------------
def collapse_repeating_gis( L ):
""" Accepts 2-d array of gi-key pairs such as this
L = [
[gi1, 'key1'],
[gi1, 'key2'],
[gi2','key3']
]
Returns this:
[ [gi1, 'key1', 'key2'],
[gi2, 'key3' ]
]
The first value in each sublist MUST be int
"""
gi = []
i = 0
result = []
try:
for item in L:
if i == 0:
prev = item[0]
if prev != item[0]:
prev_L = []
prev_L.append( prev )
result.append( prev_L + gi )
prev = item[0]
gi =[]
gi.append( item[1] )
i += 1
except Exception, e:
stop_err('%s\n' % e)
prev_L = []
prev_L.append( prev )
result.append( prev_L + gi )
del(L)
return result
# -----------------------------------------------------------------------------------
def get_taxId( gi2tax_file, gi_name_list, out_file ):
""" Maps GI numbers from gi_name_list to TaxId identifiers from gi2tax_file and
prints result to out_file
gi2tax_file MUST be sorted on GI column
gi_name_list is a list that look slike this:
[[1,'a'], [2,'b','x'], [7,'c'], [10,'d'], [90,'f']]
where the first element of each sublist is a GI number
this list MUST also be sorted on GI
This function searches through 117,000,000 rows of gi2taxId file from NCBI
in approximately 4 minutes. This time is not dependent on the length of
gi_name_list
"""
L = gi_name_list.pop(0)
my_gi = L[0]
F = open( out_file, 'w' )
gi = 0
for line in file( gi2tax_file ):
line = line.rstrip()
gi, taxId = string.split( line, '\t' )
gi = int( gi )
if gi > my_gi:
try:
while ( my_gi < gi ):
L = gi_name_list.pop(0)
my_gi = L[0]
except:
break
if gi == my_gi:
for i in range( 1,len( L ) ):
print >>F, '%s\t%s\t%d' % (L[i], taxId, gi)
try:
L = gi_name_list.pop(0)
my_gi = L[0]
except:
break
# -----------------------------------------------------------------------------------
try:
in_f = sys.argv[1] # input file with GIs
gi_col = int( sys.argv[2] ) - 1 # column in input containing GIs
name_col = int( sys.argv[3] ) - 1 # column containing sequence names
out_f = sys.argv[4] # output file
tool_data = sys.argv[5]
except:
stop_err('Check arguments\n')
# GI2TAX point to a file produced by concatenation of:
# ftp://ftp.ncbi.nih.gov/pub/taxonomy/gi_taxid_nucl.zip
# and
# ftp://ftp.ncbi.nih.gov/pub/taxonomy/gi_taxid_prot.zip
# a sorting using this command:
# sort -n -k 1
GI2TAX = path.join( tool_data, 'taxonomy', 'gi_taxid_sorted.txt' )
# NAME_FILE and NODE_FILE point to names.dmg and nodes.dmg
# files contained within:
# ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz
NAME_FILE = path.join( tool_data, 'taxonomy', 'names.dmp' )
NODE_FILE = path.join( tool_data, 'taxonomy', 'nodes.dmp' )
g2n = gi_name_to_sorted_list(in_f, gi_col, name_col)
if len(g2n) == 0:
stop_err('No valid GI-containing fields. Please, check your column assignments.\n')
tb_F = tempfile.NamedTemporaryFile('w')
get_taxId( GI2TAX, collapse_repeating_gis( g2n ), tb_F.name )
try:
tb_cmd = 'taxBuilder %s %s %s %s' % ( NAME_FILE, NODE_FILE, tb_F.name, out_f )
retcode = subprocess.call( tb_cmd, shell=True )
if retcode < 0:
print >>sys.stderr, "Execution of taxBuilder terminated by signal", -retcode
except OSError, e:
print >>sys.stderr, "Execution of taxBuilder2tree failed:", e
| mit | 5,218,182,769,841,883,000 | 29.051724 | 87 | 0.468732 | false | 3.502344 | false | false | false |
mtils/ems | ems/qt4/location/geoboundingcircle.py | 1 | 6941 | '''
Created on 24.10.2011
@author: michi
'''
from geoboundingarea import GeoBoundingArea #@UnresolvedImport
from ems.qt4.location.geocoordinate import GeoCoordinate
class GeoBoundingCircle(GeoBoundingArea):
'''
\brief The QGeoBoundingCircle class defines a circular geographic area.
\inmodule QtLocation
\since 1.1
\ingroup maps
The circle is defined in terms of a QGeoCoordinate which specifies the
center of the circle and a qreal which specifies the radius of the circle
in metres.
The circle is considered invalid if the center coordinate is invalid
or if the radius is less than zero.
'''
_center = GeoCoordinate
_radius = 0.0
def __init__(self, centerOrOther=None, radius=None):
'''
Constructs a new, invalid bounding circle.
GeoBoundingCircle(GeoCoordinate center, float radius)
Constructs a new bounding circle centered at \a center and with a radius of \a
radius metres.
GeoBoundingCircle(GeoBoundingCircle other)
Constructs a new bounding circle from the contents of \a other.
@param centerOrOther: GeoCoordinate or GeoBoundingCircle (optional)
@type centerOrOther: GeoCoordinate|GeoBoundingBox
@param radius: Optional radius
@type radius: float
'''
if isinstance(centerOrOther, GeoCoordinate):
self._center = centerOrOther
if not isinstance(radius, (float, int)):
raise TypeError("If you construct with center, pass a radius")
self._radius = float(radius)
if isinstance(centerOrOther, GeoBoundingCircle):
self.__ilshift__(centerOrOther)
def __ilshift__(self, other):
'''
self <<= other
replacement for c++ = operator overloading
@param other: The right operand
@type other: GeoBoundingBox
@rtype: GeoBoundingBox
'''
self._center = other.center()
self._radius = other.radius()
return self
def __eq__(self, other):
'''
Returns whether this bounding circle is equal to \a other.
self == other
@param other: Right operand
@type other: GeoBoundingCircle
@rtype: bool
'''
return self._center == other.center() and\
self._radius == other.radius()
def __ne__(self, other):
'''
Returns whether this bounding circle is not equal to \a other.
self != other
@param other: Right operand
@type other: GeoBoundingCircle
@rtype: bool
'''
return not self.__eq__(other)
def type_(self):
'''
Returns QGeoBoundingArea::CircleType to identify this as a
QGeoBoundingCircle instance.
This function is provided to help find the specific type of
aQGeoBoundingArea instance.
@rtype: int
'''
return GeoBoundingArea.CircleType
def isValid(self):
'''
Returns whether this bounding circle is valid.
A valid bounding circle has a valid center coordinate and a radius
greater than or equal to zero.
@rtype: bool
'''
if isinstance(self._center, GeoCoordinate):
return (self._center.isValid() and self._radius >= -1e-7)
return False
def isEmpty(self):
'''
Returns whether this bounding circle has a geometrical area of zero.
Returns true if this bounding circle is invalid.
'''
return (not self.isValid() or (self._radius <= 1e-7))
def setCenter(self, center):
'''
Sets the center coordinate of this bounding circle to \a center.
@param center: GeoCoordinate
@type center: GeoCoordinate
'''
self._center = center
def center(self):
'''
Returns the center coordinate of this bounding circle.
@rtype: GeoCoordinate
'''
return self._center
def setRadius(self, radius):
'''
Sets the radius in metres of this bounding circle to \a radius.
@param radius: the new radius
@type radius: float
'''
self._radius = radius
def radius(self):
'''
Returns the radius in meters of this bounding circle.
@rtype: float
'''
return self._radius
def contains(self, coordinate):
'''
Returns whether the coordinate \a coordinate is contained within this
bounding circle.
@param coordinate: The other coordinate
@type coordinate: GeoCoordinate
@rtype: bool
'''
if not self.isValid() or not coordinate.isValid():
return False
if self._center.distanceTo(coordinate) <= self._radius:
return True
return False
def translate(self, degreesLatitude, degreesLongitude):
'''
Translates this bounding circle by \a degreesLatitude northwards and \a
degreesLongitude eastwards.
Negative values of \a degreesLatitude and \a degreesLongitude correspond to
southward and westward translation respectively.
@param degreesLatitude: north degrees
@type degreesLatitude: float
@param degreesLongitude: east degrees
@type degreesLongitude: float
'''
# TODO handle dlat, dlon larger than 360 degrees
lat = self._center.latitude()
lon = self._center.longitude()
lat += degreesLatitude
lon += degreesLongitude
if lon < -180.0:
lon += 360.0
if lon > 180.0:
lon -= 360.0
if lat > 90.0:
lat = 180.0 - lat
if lon < 0.0:
lon = 180.0
else:
lon -= 180
if lat < -90.0:
lat = 180.0 + lat
if lon < 0.0:
lon = 180.0
else:
lon -= 180
self._center = GeoCoordinate(lat, lon)
def translated(self, degreesLatitude, degreesLongitude):
'''
Returns a copy of this bounding circle translated by \a degreesLatitude northwards and \a
degreesLongitude eastwards.
Negative values of \a degreesLatitude and \a degreesLongitude correspond to
southward and westward translation respectively.
@param degreesLatitude: north degrees
@type degreesLatitude: float
@param degreesLongitude: east degrees
@type degreesLongitude: float
@rtype: GeoBoundingCircle
'''
result = GeoBoundingCircle(self)
result.translate(degreesLatitude, degreesLongitude)
return result
| mit | 4,673,550,547,916,429,000 | 28.662393 | 97 | 0.583057 | false | 4.750856 | false | false | false |
iocube/skitza | setup.py | 1 | 1254 | from setuptools import setup, find_packages
from codecs import open
from os import path
from skitza import __version__
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='skitza',
version=__version__,
description='project description',
long_description=long_description,
url='https://github.com/iocube/skitza',
author='Vladimir Zeifman',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Topic :: Software Development :: Code Generators',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7'
],
keywords='code generation utility',
packages=find_packages(),
package_data={
'skitza': ['schema.json'],
},
install_requires=[
'click',
'functools32',
'Jinja2',
'jsonschema',
'MarkupSafe',
'PyYAML'
],
entry_points={
'console_scripts': [
'skitza=skitza.__main__:main'
],
},
) | mit | -2,550,209,727,861,696,500 | 25.145833 | 63 | 0.582935 | false | 3.955836 | false | true | false |
bgschiller/pre-commit-hooks | pre_commit_hooks/end_of_file_fixer.py | 1 | 2578 | from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import sys
import platform
from pre_commit_hooks.util import entry
def quote_file(fname):
return "'{}'".format(fname)
def file_ends_with_newline(file_obj):
# Test for newline at end of file
# Empty files will throw IOError here
try:
file_obj.seek(-1, os.SEEK_END)
except IOError:
return True
last_character = file_obj.read(1)
# last_character will be '' for an empty file
if last_character != b'\n' and last_character != b'':
return False
return True
def file_ends_with_multiple_newlines(file_obj):
try:
file_obj.seek(-2, os.SEEK_END)
except IOError:
return False
last_two_chars = file_obj.read(2)
if last_two_chars == b'\n\n':
return True
return False
FIX_MISSING_NEWLINE = '''sed -i '' -e s/[[:space:]]*$// {files}'''
FIX_MULTIPLE_NEWLINES = r'''for ff in {files}; do sed -i '' -e :a -e '/^\n*$/{{$d;N;ba' -e '}}' $ff; done'''
if platform.system() != 'Darwin':
FIX_MISSING_NEWLINE = FIX_MISSING_NEWLINE.replace("-i ''", "-i")
FIX_MULTIPLE_NEWLINES = FIX_MULTIPLE_NEWLINES.replace("-i ''", "-i")
@entry
def end_of_file_fixer(argv):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check')
args = parser.parse_args(argv)
multiple_newline_files = []
missing_newline_files = []
for filename in args.filenames:
# Read as binary so we can read byte-by-byte
with open(filename, 'rb+') as file_obj:
if not file_ends_with_newline(file_obj):
missing_newline_files.append(filename)
if file_ends_with_multiple_newlines(file_obj):
multiple_newline_files.append(filename)
if missing_newline_files:
print("These files are missing a newline at the end:", ", ".join(missing_newline_files))
print("You can fix this with the following:")
print(" ", FIX_MISSING_NEWLINE.format(files=' '.join(map(quote_file, missing_newline_files))))
print()
if multiple_newline_files:
print("These files have extra newlines at the end:", ", ".join(multiple_newline_files))
print("You can fix this with the following:")
print(" ", FIX_MULTIPLE_NEWLINES.format(files=' '.join(map(quote_file, multiple_newline_files))))
print()
return 1 if missing_newline_files or multiple_newline_files else 0
if __name__ == '__main__':
sys.exit(end_of_file_fixer())
| mit | -5,802,170,592,564,159,000 | 32.051282 | 108 | 0.629946 | false | 3.526676 | false | false | false |
resec/superhero | controller/user.py | 1 | 8217 | #coding=utf-8
__author__ = 'phithon'
import tornado.web, os, base64, pymongo, time
from controller.base import BaseHandler
from tornado import gen
from bson.objectid import ObjectId
from util.function import time_span, hash, intval
class UserHandler(BaseHandler):
def initialize(self):
super(UserHandler, self).initialize()
self.topbar = ""
def get(self, *args, **kwargs):
method = "%s_act" % args[0]
if len(args) == 3 : arg = args[2]
else: arg = None
if hasattr(self, method):
getattr(self, method)(arg)
else:
self.detail_act()
def quit_act(self, arg):
if self.get_cookie("user_info"):
self.clear_cookie("user_info")
if self.get_cookie("download_key"):
self.clear_cookie("download_key")
self.session.delete("current_user")
self.redirect("/login")
@tornado.web.asynchronous
@gen.coroutine
def modify_act(self, arg):
pass
@tornado.web.asynchronous
@gen.coroutine
def detail_act(self, arg):
if not arg : arg = self.current_user["username"]
username = self.get_query_argument("u", default = arg)
user = yield self.db.member.find_one({
"username": username
})
if not user:
self.custom_error("不存在这个用户")
limit = 10
page = intval(self.get_argument("page", default=1))
if not page or page <= 0 : page = 1
cursor = self.db.article.find({
"user": username
})
count = yield cursor.count()
cursor.tag([('time', pymongo.DESCENDING)]).limit(limit).skip((page - 1) * limit)
posts = yield cursor.to_list(length = limit)
face = "./static/face/%s/180.png" % user["_id"]
if not os.path.exists(face): face = "./static/face/guest.png"
self.render("user.html", user = user, posts = posts, page = page, time_span = time_span, each = limit, count = count)
@tornado.web.asynchronous
@gen.coroutine
def edit_act(self, arg):
user = yield self.db.member.find_one({
"username": self.current_user["username"]
})
self.render("profile.html", user = user, radio = self.radio)
def face_act(self, arg):
self.render("face.html")
@tornado.web.asynchronous
@gen.coroutine
def bookmark_act(self, arg):
limit = 10
page = intval(arg)
if page <= 0 : page = 1
user = yield self.db.member.find_one({
"username": self.current_user["username"]
})
bookmark = user.get("bookmark")
count = len(bookmark)
bookmark = bookmark[(page - 1) * limit:(page - 1) * limit + limit]
bookmark.reverse()
self.render("bookmark.html", bookmark = bookmark, page = page, count = count, each = limit)
@tornado.web.asynchronous
@gen.coroutine
def like_act(self, arg):
limit = 10
page = intval(arg)
if page <= 0 : page = 1
cursor = self.db.article.find({
"like": self.current_user["username"]
})
count = yield cursor.count()
cursor.tag([('_id', pymongo.DESCENDING)]).limit(limit).skip((page - 1) * limit)
posts = yield cursor.to_list(length = limit)
self.render("like.html", posts = posts, page = page, count = count, each = limit)
@tornado.web.asynchronous
@gen.coroutine
def download_act(self):
key = self.get_query_argument("key")
task = yield self.db.task.find_one({
"_id": ObjectId(key),
"owner": self.current_user.get("username")
})
if task and os.path.exists(task["savepath"]):
self.set_secure_cookie("download_key", task["savepath"])
relpath = os.path.relpath(task["savepath"])
self.redirect("/" + relpath)
else:
self.custom_error("File Not Found", status_code = 404)
def post(self, *args, **kwargs):
method = "_post_%s" % args[0]
if hasattr(self, method):
getattr(self, method)()
else:
self.custom_error("参数错误")
@tornado.web.asynchronous
@gen.coroutine
def _post_edit(self):
profile = {}
profile["email"] = self.get_body_argument("email", default=None)
profile["website"] = self.get_body_argument("website", default=None)
profile["qq"] = self.get_body_argument("qq", default=None)
profile["address"] = self.get_body_argument("address", default=None)
profile["signal"] = self.get_body_argument("signal", default=None)
orgpass = self.get_body_argument("orgpass", default=None)
if orgpass:
password = self.get_body_argument("password")
repassword = self.get_body_argument("repassword")
if not password or len(password) < 5:
self.custom_error("新密码太短")
if password != repassword:
self.custom_error("两次输入的密码不相同")
user = yield self.db.member.find_one({"username": self.current_user["username"]})
check = yield self.backend.submit(hash.verify, orgpass, user["password"])
if not check:
self.custom_error("原始密码输入错误")
profile["password"] = yield self.backend.submit(hash.get, password)
# check email
ufemail = yield self.db.member.find_one({
"email": profile["email"]
})
if ufemail:
self.custom_error("邮箱已经被人使用过啦")
# check user profile
yield self.db.member.update({
"username": self.current_user["username"]
}, {
"$set": profile
})
self.redirect("/user/edit")
@tornado.web.asynchronous
@gen.coroutine
def _post_upface(self):
img = self.get_body_argument("img", default = None)
try:
img = base64.b64decode(img)
uid = self.current_user["_id"]
face = "./static/face/%s/" % uid
if not os.path.isdir(face):
os.makedirs(face)
face += "180.png"
with open(face, "wb") as f:
f.write(img)
self.write("success")
except:
self.write("fail")
@tornado.web.asynchronous
@gen.coroutine
def _post_message(self):
openwebsite = intval(self.get_body_argument("openwebsite", default=1))
openqq = intval(self.get_body_argument("openqq", default=1))
openemail = intval(self.get_body_argument("openemail", default=1))
allowemail = intval(self.get_body_argument("allowemail", default=1))
yield self.db.member.find_and_modify({
"username": self.current_user["username"]
}, {
"$set": {
"openwebsite": openwebsite,
"openqq": openqq,
"openemail": openemail,
"allowemail": allowemail
}
})
self.redirect("/user/edit")
@tornado.web.asynchronous
@gen.coroutine
def _post_like(self):
id = self.get_body_argument("postid")
yield self.db.article.find_and_modify({
"_id": ObjectId(id)
}, {
"$pull": {"like": self.current_user["username"]}
})
self.redirect("/user/like")
@tornado.web.asynchronous
@gen.coroutine
def _post_bookmark(self):
id = self.get_body_argument("postid")
yield self.db.member.find_and_modify({
"username": self.current_user["username"]
}, {
"$pull": {"bookmark": {"id": id}}
})
self.redirect("/user/bookmark")
@gen.coroutine
def __get_tag(self, id):
tag = yield self.db.tag.find_one({
"_id": ObjectId(id)
})
raise gen.Return(tag)
def radio(self, user, key, tr = 1):
check = ""
if key in user:
if user[key] and tr == 1:
check = "checked"
if not user[key] and tr == 0:
check = "checked"
return '<input type="radio" name="%s" value="%d" %s>' % (key, tr, check) | mit | 7,163,133,643,337,519,000 | 33.892704 | 125 | 0.554066 | false | 3.732323 | false | false | false |
CERT-Solucom/certitude | components/scanner/resources/python_source/getfiles.py | 2 | 1403 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
CERTitude: the seeker of IOC
Copyright (c) 2016 CERT-W
Contact: [email protected]
Contributors: @iansus, @nervous, @fschwebel
CERTitude is under licence GPL-2.0:
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
import os, re, win32api, win32file, sys
def getFiles():
ret = []
# List logical drives
drives = win32api.GetLogicalDriveStrings().split('\x00')
drives.pop()
# Only get local dries
drives = [ d for d in drives if win32file.GetDriveType(d)==win32file.DRIVE_FIXED ]
# List files
for drive in drives:
print os.popen('dir /s /b '+drive).read()
def main():
getFiles()
if __name__=='__main__':
main() | gpl-2.0 | 7,535,693,596,363,145,000 | 28.25 | 83 | 0.68995 | false | 3.682415 | false | false | false |
gingi99/research_dr | python/MLEM2/rules_stat.py | 1 | 2688 | # coding: utf-8
from collections import defaultdict
# =====================================
# Rules のうち 指定したConsequentを持つ / 持たないRuleの数
# =====================================
def getNumRulesClass(list_rules, consequent, judge=True):
if judge : rules = [r for r in list_rules if r.getConsequent() == consequent]
else : rules = [r for r in list_rules if r.getConsequent() != consequent]
return(len(rules))
# =====================================
# Rules の Supportの平均数
# =====================================
def getMeanSupport(list_rules, only_avg = True) :
supports = [len(r.getSupport()) for r in list_rules]
if only_avg :
ans = np.mean(supports)
else :
ans = '{mean}±{std}'.format(mean=('%.3f' % round(np.mean(supports),3)), std=('%.3f' % round(np.std(supports),3)))
return(ans)
# =====================================
# Rules の Supportの最小値
# =====================================
def getMinSupport(list_rules) :
supports = [len(r.getSupport()) for r in list_rules]
ans = np.min(supports)
return(ans)
# =====================================
# Rules の Ruleの長さの平均数
# =====================================
def getMeanLength(list_rules, only_avg = True) :
lengths = [len(r.getKey()) for r in list_rules]
if only_avg :
ans = np.mean(lengths)
else :
ans = '{mean}±{std}'.format(mean=('%.3f' % round(np.mean(lengths),3)), std=('%.3f' % round(np.std(lengths),3)))
return(ans)
# =====================================
# Rules のうち k-Supportを満たす割合
# =====================================
def getPerKRules(list_rules, k) :
k_rules = [r for r in list_rules if len(r.getSupport()) >= k]
ans = len(k_rules) / len(rules)
return(ans)
# =====================================
# Rules のうち Suppprt = n の割合
# =====================================
def getPerNSupport(list_rules, n) :
n_rules = [r for r in list_rules if len(r.getSupport()) == n]
ans = len(n_rules) / len(list_rules)
return(ans)
# =====================================
# Rules を構成する基本条件の頻度
# =====================================
def getRulesValueCount(list_rules) :
rules_stat_value_count = defaultdict(dict)
for r in list_rules:
attrs = r.getKey()
for attr in attrs:
value = r.getValue(attr)
if not attr in rules_stat_value_count or not value in rules_stat_value_count[attr]:
rules_stat_value_count[attr].update({value : 1})
else :
rules_stat_value_count[attr][value] += 1
return(rules_stat_value_count)
| mit | 3,592,265,433,694,491,600 | 35.112676 | 121 | 0.49181 | false | 3.257942 | false | false | false |
volpino/Yeps-EURAC | scripts/scramble/scripts/DRMAA_python-macosx.py | 1 | 1938 | import os, sys, shutil
if "SGE_ROOT" not in os.environ:
print "scramble(): Please set SGE_ROOT to the path of your SGE installation"
print "scramble(): before scrambling DRMAA_python"
sys.exit(1)
# change back to the build dir
if os.path.dirname( sys.argv[0] ) != "":
os.chdir( os.path.dirname( sys.argv[0] ) )
# find setuptools
scramble_lib = os.path.join( "..", "..", "..", "lib" )
sys.path.append( scramble_lib )
import get_platform # fixes fat python 2.5
try:
from setuptools import *
import pkg_resources
except:
from ez_setup import use_setuptools
use_setuptools( download_delay=8, to_dir=scramble_lib )
from setuptools import *
import pkg_resources
# clean, in case you're running this by hand from a dirty module source dir
for dir in [ "build", "dist", "gridengine" ]:
if os.access( dir, os.F_OK ):
print "scramble_it.py: removing dir:", dir
shutil.rmtree( dir )
# patch
file = "setup.py"
print "scramble(): Patching", file
if not os.access( "%s.orig" %file, os.F_OK ):
shutil.copyfile( file, "%s.orig" %file )
i = open( "%s.orig" %file, "r" )
o = open( file, "w" )
for line in i.readlines():
if line == 'SGE6_ROOT="/scratch_test02/SGE6"\n':
line = 'SGE6_ROOT="%s"\n' % os.environ["SGE_ROOT"]
if line.startswith('link_args ='):
line = 'link_args = [ "-L%s" % os.path.join(SGE6_ROOT, "lib", SGE6_ARCH), "-ldrmaa" ]\n'
print >>o, line,
i.close()
o.close()
# go
me = sys.argv[0]
sys.argv = [ me ]
sys.argv.append( "build" )
execfile( "setup.py", globals(), locals() )
# fix _cDRMAA.so rpath
so = "build/lib.%s-%s/_cDRMAA.so" % ( pkg_resources.get_platform(), sys.version[:3] )
libdrmaa = os.path.join(SGE6_ROOT, "lib", SGE6_ARCH, "libdrmaa.dylib.1.0" )
os.system( "install_name_tool -change libdrmaa.dylib.1.0 %s %s" % ( libdrmaa, so ) )
sys.argv = [ me ]
sys.argv.append( "bdist_egg" )
execfile( "setup.py", globals(), locals() )
| mit | 814,807,746,194,055,800 | 31.3 | 97 | 0.631063 | false | 2.684211 | false | false | false |
CodethinkLabs/online-atomic-update | migratelib/findmnt.py | 2 | 2345 | # Copyright (C) 2014 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'Find information about mounts'
import shlex
import subprocess
from .mount_commands import mount_cmd, umount_cmd, findmnt_cmd
__all__ = ('search_fields', 'find_mounts')
search_fields = [
'SOURCE', # source device
'TARGET', # mountpoint
'FSTYPE', # filesystem type
'OPTIONS', # all mount options
'VFS-OPTIONS', # VFS specific mount options
'FS-OPTIONS', # FS specific mount options
'LABEL', # filesystem label
'UUID', # filesystem UUID
'PARTLABEL', # partition label
'PARTUUID', # partition UUID
'MAJ:MIN', # major:minor device number
'FSROOT', # filesystem root
'TID', # task ID
'ID', # mount ID
'OPT-FIELDS', # optional mount fields
'PROPAGATION', # VFS propagation flags
]
def find_mounts(root=None, tab_file=None, task=None, fields=None,
recurse=False, runcmd=findmnt_cmd):
argv = ['--pairs', '--nofsroot']
if task is not None:
argv.extend(('--task', str(task)))
if tab_file is not None:
argv.extend(('--tab-file', str(tab_file)))
if fields is not None:
argv.extend(('--output', ','.join(fields)))
if recurse:
if root is None:
raise ValueError('recurse passed without root')
argv.append('--submounts')
if root is not None:
argv.append(root)
o = runcmd(argv)
mount_list = []
for line in o.splitlines():
matches = dict()
for pair in shlex.split(line):
key, value = pair.split('=', 1)
matches[key] = value.decode('string_escape')
mount_list.append(matches)
return mount_list
| lgpl-2.1 | -322,575,308,686,660,300 | 31.123288 | 73 | 0.645203 | false | 3.934564 | false | false | false |
redhat-openstack/heat | heat/tests/test_clients.py | 1 | 18603 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometerclient import exc as ceil_exc
from ceilometerclient.openstack.common.apiclient import exceptions as c_a_exc
from cinderclient import exceptions as cinder_exc
from glanceclient import exc as glance_exc
from heatclient import exc as heat_exc
from keystoneclient import exceptions as keystone_exc
from neutronclient.common import exceptions as neutron_exc
from swiftclient import exceptions as swift_exc
from troveclient.client import exceptions as trove_exc
from heatclient import client as heatclient
import mock
from oslo.config import cfg
from testtools.testcase import skip
from heat.engine import clients
from heat.engine.clients import client_plugin
from heat.tests.common import HeatTestCase
from heat.tests.v1_1 import fakes
class ClientsTest(HeatTestCase):
def test_clients_get_heat_url(self):
con = mock.Mock()
con.tenant_id = "b363706f891f48019483f8bd6503c54b"
c = clients.Clients(con)
con.clients = c
obj = c.client_plugin('heat')
obj._get_client_option = mock.Mock()
obj._get_client_option.return_value = None
obj.url_for = mock.Mock(name="url_for")
obj.url_for.return_value = "url_from_keystone"
self.assertEqual("url_from_keystone", obj.get_heat_url())
heat_url = "http://0.0.0.0:8004/v1/%(tenant_id)s"
obj._get_client_option.return_value = heat_url
tenant_id = "b363706f891f48019483f8bd6503c54b"
result = heat_url % {"tenant_id": tenant_id}
self.assertEqual(result, obj.get_heat_url())
obj._get_client_option.return_value = result
self.assertEqual(result, obj.get_heat_url())
@mock.patch.object(heatclient, 'Client')
def test_clients_heat(self, mock_call):
self.stub_keystoneclient()
con = mock.Mock()
con.auth_url = "http://auth.example.com:5000/v2.0"
con.tenant_id = "b363706f891f48019483f8bd6503c54b"
con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155"
c = clients.Clients(con)
con.clients = c
obj = c.client_plugin('heat')
obj.url_for = mock.Mock(name="url_for")
obj.url_for.return_value = "url_from_keystone"
obj.client()
self.assertEqual('url_from_keystone', obj.get_heat_url())
@mock.patch.object(heatclient, 'Client')
def test_clients_heat_no_auth_token(self, mock_call):
self.stub_keystoneclient(auth_token='anewtoken')
con = mock.Mock()
con.auth_url = "http://auth.example.com:5000/v2.0"
con.tenant_id = "b363706f891f48019483f8bd6503c54b"
con.auth_token = None
c = clients.Clients(con)
con.clients = c
obj = c.client_plugin('heat')
obj.url_for = mock.Mock(name="url_for")
obj.url_for.return_value = "url_from_keystone"
self.assertEqual('anewtoken', c.client('keystone').auth_token)
@mock.patch.object(heatclient, 'Client')
def test_clients_heat_cached(self, mock_call):
self.stub_keystoneclient()
con = mock.Mock()
con.auth_url = "http://auth.example.com:5000/v2.0"
con.tenant_id = "b363706f891f48019483f8bd6503c54b"
con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155"
c = clients.Clients(con)
con.clients = c
obj = c.client_plugin('heat')
obj.get_heat_url = mock.Mock(name="get_heat_url")
obj.get_heat_url.return_value = None
obj.url_for = mock.Mock(name="url_for")
obj.url_for.return_value = "url_from_keystone"
obj._client = None
heat = obj.client()
heat_cached = obj.client()
self.assertEqual(heat, heat_cached)
def test_clients_auth_token_update(self):
fkc = self.stub_keystoneclient(auth_token='token1')
con = mock.Mock()
con.auth_url = "http://auth.example.com:5000/v2.0"
con.trust_id = "b363706f891f48019483f8bd6503c54b"
con.username = 'heat'
con.password = 'verysecret'
con.auth_token = None
obj = clients.Clients(con)
con.clients = obj
self.assertIsNotNone(obj.client('heat'))
self.assertEqual('token1', obj.auth_token)
fkc.auth_token = 'token2'
self.assertEqual('token2', obj.auth_token)
class FooClientsPlugin(client_plugin.ClientPlugin):
def _create(self):
pass
class ClientPluginTest(HeatTestCase):
def test_get_client_option(self):
con = mock.Mock()
con.auth_url = "http://auth.example.com:5000/v2.0"
con.tenant_id = "b363706f891f48019483f8bd6503c54b"
con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155"
c = clients.Clients(con)
con.clients = c
plugin = FooClientsPlugin(con)
cfg.CONF.set_override('ca_file', '/tmp/bar',
group='clients_heat')
cfg.CONF.set_override('ca_file', '/tmp/foo',
group='clients')
cfg.CONF.set_override('endpoint_type', 'internalURL',
group='clients')
# check heat group
self.assertEqual('/tmp/bar',
plugin._get_client_option('heat', 'ca_file'))
# check fallback clients group for known client
self.assertEqual('internalURL',
plugin._get_client_option('glance', 'endpoint_type'))
# check fallback clients group for unknown client foo
self.assertEqual('/tmp/foo',
plugin._get_client_option('foo', 'ca_file'))
def test_auth_token(self):
con = mock.Mock()
con.auth_token = "1234"
c = clients.Clients(con)
con.clients = c
c.client = mock.Mock(name="client")
mock_keystone = mock.Mock()
c.client.return_value = mock_keystone
mock_keystone.auth_token = '5678'
plugin = FooClientsPlugin(con)
# assert token is from keystone rather than context
# even though both are set
self.assertEqual('5678', plugin.auth_token)
c.client.assert_called_with('keystone')
def test_url_for(self):
con = mock.Mock()
con.auth_token = "1234"
c = clients.Clients(con)
con.clients = c
c.client = mock.Mock(name="client")
mock_keystone = mock.Mock()
c.client.return_value = mock_keystone
mock_keystone.url_for.return_value = 'http://192.0.2.1/foo'
plugin = FooClientsPlugin(con)
self.assertEqual('http://192.0.2.1/foo',
plugin.url_for(service_type='foo'))
c.client.assert_called_with('keystone')
def test_abstract_create(self):
con = mock.Mock()
c = clients.Clients(con)
con.clients = c
self.assertRaises(TypeError, client_plugin.ClientPlugin, c)
class TestClientPluginsInitialise(HeatTestCase):
@skip('skipped until keystone can read context auth_ref')
def test_create_all_clients(self):
con = mock.Mock()
con.auth_url = "http://auth.example.com:5000/v2.0"
con.tenant_id = "b363706f891f48019483f8bd6503c54b"
con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155"
c = clients.Clients(con)
con.clients = c
for plugin_name in clients._mgr.names():
self.assertTrue(clients.has_client(plugin_name))
c.client(plugin_name)
def test_create_all_client_plugins(self):
plugin_types = clients._mgr.names()
self.assertIsNotNone(plugin_types)
con = mock.Mock()
c = clients.Clients(con)
con.clients = c
for plugin_name in plugin_types:
plugin = c.client_plugin(plugin_name)
self.assertIsNotNone(plugin)
self.assertEqual(c, plugin.clients)
self.assertEqual(con, plugin.context)
self.assertIsNone(plugin._client)
self.assertTrue(clients.has_client(plugin_name))
class TestIsNotFound(HeatTestCase):
scenarios = [
('ceilometer_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='ceilometer',
exception=lambda: ceil_exc.HTTPNotFound(details='gone'),
)),
('ceilometer_not_found_apiclient', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='ceilometer',
exception=lambda: c_a_exc.NotFound(details='gone'),
)),
('ceilometer_exception', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=False,
plugin='ceilometer',
exception=lambda: Exception()
)),
('ceilometer_overlimit', dict(
is_not_found=False,
is_over_limit=True,
is_client_exception=True,
plugin='ceilometer',
exception=lambda: ceil_exc.HTTPOverLimit(details='over'),
)),
('cinder_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='cinder',
exception=lambda: cinder_exc.NotFound(code=404),
)),
('cinder_exception', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=False,
plugin='cinder',
exception=lambda: Exception()
)),
('cinder_overlimit', dict(
is_not_found=False,
is_over_limit=True,
is_client_exception=True,
plugin='cinder',
exception=lambda: cinder_exc.OverLimit(code=413),
)),
('glance_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='glance',
exception=lambda: glance_exc.HTTPNotFound(details='gone'),
)),
('glance_exception', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=False,
plugin='glance',
exception=lambda: Exception()
)),
('glance_overlimit', dict(
is_not_found=False,
is_over_limit=True,
is_client_exception=True,
plugin='glance',
exception=lambda: glance_exc.HTTPOverLimit(details='over'),
)),
('heat_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='heat',
exception=lambda: heat_exc.HTTPNotFound(message='gone'),
)),
('heat_exception', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=False,
plugin='heat',
exception=lambda: Exception()
)),
('heat_overlimit', dict(
is_not_found=False,
is_over_limit=True,
is_client_exception=True,
plugin='heat',
exception=lambda: heat_exc.HTTPOverLimit(message='over'),
)),
('keystone_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='keystone',
exception=lambda: keystone_exc.NotFound(details='gone'),
)),
('keystone_exception', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=False,
plugin='keystone',
exception=lambda: Exception()
)),
('keystone_overlimit', dict(
is_not_found=False,
is_over_limit=True,
is_client_exception=True,
plugin='keystone',
exception=lambda: keystone_exc.RequestEntityTooLarge(
details='over'),
)),
('neutron_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='neutron',
exception=lambda: neutron_exc.NotFound,
)),
('neutron_network_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='neutron',
exception=lambda: neutron_exc.NetworkNotFoundClient(),
)),
('neutron_port_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='neutron',
exception=lambda: neutron_exc.PortNotFoundClient(),
)),
('neutron_status_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='neutron',
exception=lambda: neutron_exc.NeutronClientException(
status_code=404),
)),
('neutron_exception', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=False,
plugin='neutron',
exception=lambda: Exception()
)),
('neutron_overlimit', dict(
is_not_found=False,
is_over_limit=True,
is_client_exception=True,
plugin='neutron',
exception=lambda: neutron_exc.NeutronClientException(
status_code=413),
)),
('nova_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
is_unprocessable_entity=False,
plugin='nova',
exception=lambda: fakes.fake_exception(),
)),
('nova_exception', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=False,
is_unprocessable_entity=False,
plugin='nova',
exception=lambda: Exception()
)),
('nova_overlimit', dict(
is_not_found=False,
is_over_limit=True,
is_client_exception=True,
is_unprocessable_entity=False,
plugin='nova',
exception=lambda: fakes.fake_exception(413),
)),
('nova_unprocessable_entity', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=True,
is_unprocessable_entity=True,
plugin='nova',
exception=lambda: fakes.fake_exception(422),
)),
('swift_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='swift',
exception=lambda: swift_exc.ClientException(
msg='gone', http_status=404),
)),
('swift_exception', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=False,
plugin='swift',
exception=lambda: Exception()
)),
('swift_overlimit', dict(
is_not_found=False,
is_over_limit=True,
is_client_exception=True,
plugin='swift',
exception=lambda: swift_exc.ClientException(
msg='ouch', http_status=413),
)),
('trove_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
plugin='trove',
exception=lambda: trove_exc.NotFound(message='gone'),
)),
('trove_exception', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=False,
plugin='trove',
exception=lambda: Exception()
)),
('trove_overlimit', dict(
is_not_found=False,
is_over_limit=True,
is_client_exception=True,
plugin='trove',
exception=lambda: trove_exc.RequestEntityTooLarge(
message='over'),
)),
]
def test_is_not_found(self):
con = mock.Mock()
c = clients.Clients(con)
client_plugin = c.client_plugin(self.plugin)
try:
raise self.exception()
except Exception as e:
if self.is_not_found != client_plugin.is_not_found(e):
raise
def test_ignore_not_found(self):
con = mock.Mock()
c = clients.Clients(con)
client_plugin = c.client_plugin(self.plugin)
try:
exp = self.exception()
exp_class = exp.__class__
raise exp
except Exception as e:
if self.is_not_found:
client_plugin.ignore_not_found(e)
else:
self.assertRaises(exp_class,
client_plugin.ignore_not_found,
e)
def test_is_over_limit(self):
con = mock.Mock()
c = clients.Clients(con)
client_plugin = c.client_plugin(self.plugin)
try:
raise self.exception()
except Exception as e:
if self.is_over_limit != client_plugin.is_over_limit(e):
raise
def test_is_client_exception(self):
con = mock.Mock()
c = clients.Clients(con)
client_plugin = c.client_plugin(self.plugin)
try:
raise self.exception()
except Exception as e:
ice = self.is_client_exception
actual = client_plugin.is_client_exception(e)
if ice != actual:
raise
def test_is_unprocessable_entity(self):
con = mock.Mock()
c = clients.Clients(con)
# only 'nova' client plugin need to check this exception
if self.plugin == 'nova':
client_plugin = c.client_plugin(self.plugin)
try:
raise self.exception()
except Exception as e:
iue = self.is_unprocessable_entity
if iue != client_plugin.is_unprocessable_entity(e):
raise
| apache-2.0 | 8,988,158,018,115,488,000 | 33.771963 | 78 | 0.565339 | false | 3.843595 | true | false | false |
dstftw/youtube-dl | youtube_dl/extractor/linuxacademy.py | 9 | 6559 | from __future__ import unicode_literals
import json
import random
import re
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
orderedSet,
unescapeHTML,
urlencode_postdata,
urljoin,
)
class LinuxAcademyIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:www\.)?linuxacademy\.com/cp/
(?:
courses/lesson/course/(?P<chapter_id>\d+)/lesson/(?P<lesson_id>\d+)|
modules/view/id/(?P<course_id>\d+)
)
'''
_TESTS = [{
'url': 'https://linuxacademy.com/cp/courses/lesson/course/1498/lesson/2/module/154',
'info_dict': {
'id': '1498-2',
'ext': 'mp4',
'title': "Introduction to the Practitioner's Brief",
},
'params': {
'skip_download': True,
},
'skip': 'Requires Linux Academy account credentials',
}, {
'url': 'https://linuxacademy.com/cp/courses/lesson/course/1498/lesson/2',
'only_matching': True,
}, {
'url': 'https://linuxacademy.com/cp/modules/view/id/154',
'info_dict': {
'id': '154',
'title': 'AWS Certified Cloud Practitioner',
'description': 'md5:039db7e60e4aac9cf43630e0a75fa834',
},
'playlist_count': 41,
'skip': 'Requires Linux Academy account credentials',
}]
_AUTHORIZE_URL = 'https://login.linuxacademy.com/authorize'
_ORIGIN_URL = 'https://linuxacademy.com'
_CLIENT_ID = 'KaWxNn1C2Gc7n83W9OFeXltd8Utb5vvx'
_NETRC_MACHINE = 'linuxacademy'
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
def random_string():
return ''.join([
random.choice('0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz-._~')
for _ in range(32)])
webpage, urlh = self._download_webpage_handle(
self._AUTHORIZE_URL, None, 'Downloading authorize page', query={
'client_id': self._CLIENT_ID,
'response_type': 'token id_token',
'redirect_uri': self._ORIGIN_URL,
'scope': 'openid email user_impersonation profile',
'audience': self._ORIGIN_URL,
'state': random_string(),
'nonce': random_string(),
})
login_data = self._parse_json(
self._search_regex(
r'atob\(\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
'login info', group='value'), None,
transform_source=lambda x: compat_b64decode(x).decode('utf-8')
)['extraParams']
login_data.update({
'client_id': self._CLIENT_ID,
'redirect_uri': self._ORIGIN_URL,
'tenant': 'lacausers',
'connection': 'Username-Password-Authentication',
'username': username,
'password': password,
'sso': 'true',
})
login_state_url = compat_str(urlh.geturl())
try:
login_page = self._download_webpage(
'https://login.linuxacademy.com/usernamepassword/login', None,
'Downloading login page', data=json.dumps(login_data).encode(),
headers={
'Content-Type': 'application/json',
'Origin': 'https://login.linuxacademy.com',
'Referer': login_state_url,
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
error = self._parse_json(e.cause.read(), None)
message = error.get('description') or error['code']
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, message), expected=True)
raise
callback_page, urlh = self._download_webpage_handle(
'https://login.linuxacademy.com/login/callback', None,
'Downloading callback page',
data=urlencode_postdata(self._hidden_inputs(login_page)),
headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://login.linuxacademy.com',
'Referer': login_state_url,
})
access_token = self._search_regex(
r'access_token=([^=&]+)', compat_str(urlh.geturl()),
'access token')
self._download_webpage(
'https://linuxacademy.com/cp/login/tokenValidateLogin/token/%s'
% access_token, None, 'Downloading token validation page')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
chapter_id, lecture_id, course_id = mobj.group('chapter_id', 'lesson_id', 'course_id')
item_id = course_id if course_id else '%s-%s' % (chapter_id, lecture_id)
webpage = self._download_webpage(url, item_id)
# course path
if course_id:
entries = [
self.url_result(
urljoin(url, lesson_url), ie=LinuxAcademyIE.ie_key())
for lesson_url in orderedSet(re.findall(
r'<a[^>]+\bhref=["\'](/cp/courses/lesson/course/\d+/lesson/\d+/module/\d+)',
webpage))]
title = unescapeHTML(self._html_search_regex(
(r'class=["\']course-title["\'][^>]*>(?P<value>[^<]+)',
r'var\s+title\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1'),
webpage, 'title', default=None, group='value'))
description = unescapeHTML(self._html_search_regex(
r'var\s+description\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
webpage, 'description', default=None, group='value'))
return self.playlist_result(entries, course_id, title, description)
# single video path
info = self._extract_jwplayer_data(
webpage, item_id, require_title=False, m3u8_id='hls',)
title = self._search_regex(
(r'>Lecture\s*:\s*(?P<value>[^<]+)',
r'lessonName\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage,
'title', group='value')
info.update({
'id': item_id,
'title': title,
})
return info
| unlicense | -3,457,482,050,541,847,000 | 36.695402 | 98 | 0.518524 | false | 3.820035 | false | false | false |
jdodds/feather | feather/plugin.py | 1 | 2692 | from multiprocessing import Process, Queue
class InvalidArguments(ValueError):
pass
class Plugin(Process):
"""A Plugin is a self-contained bit of functionality that runs in it's own
process, and runs via listening for messages and sending messages through
Queues.
"""
listeners = set(['SHUTDOWN'])
messengers = set([])
name = 'Base Plugin'
def __new__(cls, *args, **kwargs):
plug = super(Plugin, cls).__new__(cls, *args, **kwargs)
plug.listeners.update(Plugin.listeners)
return plug
def __init__(self):
"""Set us up to run as a separate process, initialze our listener Queue,
and set our runnable attribute.
"""
super(Plugin, self).__init__()
self.listener = Queue()
self.runnable = True
def send(self, message, payload=None):
"""Send a message through our messenger Queue.
Messages are presumably descriptions of a task that just got completed,
or a notification of status, or whatnot.
"""
self.messenger.put((message, payload))
def recieve(self, message, payload=None):
"""Get a message from our listener Queue.
This should currently be used in a subclasses self.run loop.
"""
self.listener.put((message, payload))
def SHUTDOWN(self, payload):
"""Set self.runnable to false.
This should cause a subclass to break out of it's run loop.
"""
self.runnable=False
def pre_run(self):
"""Code to be run before our run loop starts"""
pass
def pre_call_message(self):
"""Code to be run before calling a message handler"""
pass
def pre_first_call_message(self):
"""Code to be run before calling the first message handler"""
def post_first_call_message(self):
"""Code to be run after the first message has been handled"""
pass
def post_call_message(self):
"""Code to be run after a message has been handled"""
pass
def post_run(self):
"""Code to be run after our run loop terminates"""
pass
def run(self):
"""Run our loop, and any defined hooks...
"""
self.pre_run()
first = True
while self.runnable:
self.pre_call_message()
if first:
self.pre_first_call_message()
message, payload = self.listener.get()
getattr(self, message)(payload)
if first:
first = False
self.post_first_call_message()
self.post_call_message()
self.post_run()
| bsd-3-clause | 357,535,382,207,170,900 | 28.582418 | 80 | 0.581352 | false | 4.434926 | false | false | false |
christydennison/ResumeStripClub | convert.py | 1 | 3312 | #!/usr/bin/env python
import sys
import os
import pdf2txt
import re
REDACTED_TEXT = 'REDACTED'
LINK_REGEX = re.compile('(https?:\/\/)?([a-zA-Z0-9]{2,4}\.)?(linkedin.com|lnkd\.in|github.com)\/.+')
EMAIL_REGEX = re.compile('([\w\.]+@(?:[\w]+\.)+[a-zA-Z]{2,})')
BLACKLIST_FILE = "bad_words.txt"
def get_blacklist_words():
blacklist = []
try:
with open(BLACKLIST_FILE) as f:
lines = f.read().splitlines()
for line in lines:
if line:
blacklist.append(line.lower().strip())
except Exception as e:
print "Unable to read bad words from {0}. Error: {1}".format(BLACKLIST_FILE, e)
return set(blacklist)
def join_newlines(array):
return '\n'.join(array)
def redact_initial(file_lines, lastname):
processed_file_lines = []
fullname = ''
firstname = ''
for index, line in enumerate(file_lines):
newline = line
links = LINK_REGEX.search(newline.replace(" ", ""))
if links:
matching_text = links.group()
# print 'links!', matching_text
newline = newline.replace(" ", "").replace(matching_text, REDACTED_TEXT + ' PROFILE')
# print newline
emails = EMAIL_REGEX.search(newline.replace(" ", ""))
if emails:
matching_text = emails.group(1)
# print 'emails!', matching_text
newline = newline.replace(" ", "").replace(matching_text, REDACTED_TEXT + ' EMAIL')
# print newline
if lastname.lower() in newline.lower() or lastname.lower() in newline.lower().replace(" ", ""):
fullname = newline.replace(" ", "")
firstname = re.split(lastname, fullname, flags=re.IGNORECASE)[0]
print fullname
print firstname
newline = newline.replace(" ", "").replace(firstname, firstname[0] + '. ')
# print 'name',firstname
# print newline
processed_file_lines.append(newline)
return processed_file_lines
def redact(list_of_lines):
output = []
blacklist = get_blacklist_words()
for line in list_of_lines:
newline = line
for word in blacklist:
to_replace = re.compile("[^\w]{0}[^\w]".format(word), re.IGNORECASE)
newline = to_replace.sub(" {} ".format(REDACTED_TEXT), newline)
# print newline
output.append(newline)
return output
def process(fname):
lastname = '.'.join(os.path.basename(fname).split(".")[:-1])
print 'Using name', lastname
pathname = os.path.dirname(fname)
file_path = os.path.join(pathname, lastname)
txt_file_path = file_path + '.txt'
redacted_file_path = file_path + '_redacted.txt'
# os.remove(redacted_file_path)
pdf2txt.main(['', '-o', txt_file_path, fname])
with open(txt_file_path) as f:
lines = f.read().splitlines()
names_redacted = redact_initial(lines, lastname)
output = redact(names_redacted)
with open(redacted_file_path, 'w') as ofile:
ofile.write(join_newlines(output))
if __name__ == "__main__":
filenames = []
if len(sys.argv) > 1:
filenames = sys.argv[1:]
else:
print "You must give at least one file to process"
sys.exit(1)
for filename in filenames:
process(filename)
| mit | -276,878,482,242,925,820 | 30.245283 | 103 | 0.583635 | false | 3.580541 | false | false | false |
zzcclp/carbondata | python/pycarbon/tests/hello_world/dataset_with_unischema/pyspark_hello_world_carbon.py | 3 | 1851 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal example of how to read samples from a dataset generated by `generate_pycarbon_dataset.py`
using pyspark"""
from __future__ import print_function
from pyspark.sql import SparkSession
def pyspark_hello_world(dataset_url='file:///tmp/carbon_pycarbon_dataset'):
spark = SparkSession \
.builder \
.master('local[1]') \
.getOrCreate()
dataset_path = dataset_url[7:]
# Create a dataframe object from carbon files
spark.sql("create table readcarbon using carbon location '" + str(dataset_path) + "'")
dataframe = spark.sql("select * from readcarbon")
# Show a schema
dataframe.printSchema()
# Count all
dataframe.count()
# Show just some columns
dataframe.select('id').show()
# This is how you can use a standard SQL to query a dataset. Note that the data is not decoded in this case.
number_of_rows = spark.sql(
'SELECT count(id) '
'from carbon.`{}` '.format(dataset_url)).collect()
print('Number of rows in the dataset: {}'.format(number_of_rows[0][0]))
if __name__ == '__main__':
pyspark_hello_world()
| apache-2.0 | -1,427,326,797,503,931,100 | 33.924528 | 110 | 0.723933 | false | 3.905063 | false | false | false |
genialis/resolwe-bio | resolwe_bio/processes/import_data/bam_scseq.py | 1 | 2434 | """Upload single cell BAM."""
from resolwe.process import (
Cmd,
DataField,
FileField,
Process,
SchedulingClass,
StringField,
)
class ImportScBam(Process):
"""Import scSeq BAM file and index."""
slug = "upload-bam-scseq-indexed"
name = "Single cell BAM file and index"
process_type = "data:alignment:bam:scseq"
version = "1.2.1"
category = "Import"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:2.3.1"}
},
}
data_name = '{{ reads|sample_name|default("?") }}'
class Input:
"""Input fields to process Import ScBam."""
src = FileField(
description="A mapping file in BAM format.",
label="Mapping (BAM)",
)
src2 = FileField(
description="An index file of a BAM mapping file (ending with bam.bai).",
label="BAM index (*.bam.bai file)",
)
reads = DataField(
data_type="screads:",
label="Single cell fastq reads",
)
species = StringField(
label="Species",
description="Species latin name.",
)
build = StringField(
label="Build",
)
class Output:
"""Output fields to process Import ScBam."""
bam = FileField(label="Uploaded BAM")
bai = FileField(label="Index BAI")
stats = FileField(label="Alignment statistics")
build = StringField(label="Build")
species = StringField(label="Species")
def run(self, inputs, outputs):
"""Run the analysis."""
bam_path = inputs.src.import_file(imported_format="extracted")
bai_path = inputs.src2.import_file(imported_format="extracted")
assert bam_path.endswith(".bam")
assert bai_path.endswith(".bam.bai")
bam_name = bam_path[:-4]
bai_name = bai_path[:-8]
if bam_name != bai_name:
self.error("BAM and BAI files should have the same name.")
stats = "{}_stats.txt".format(bam_name)
(Cmd["samtools"]["flagstat"][bam_path] > stats)()
outputs.bam = bam_path
outputs.bai = bai_path
outputs.stats = stats
outputs.species = inputs.species
outputs.build = inputs.build
| apache-2.0 | 2,919,631,471,827,741,000 | 29.425 | 85 | 0.5682 | false | 3.815047 | false | false | false |
gnsiva/Amphitrite | lib/RawFileProcessor.py | 1 | 6313 | """File for running cppapplication.exe to create Amphitrite
data files from MassLynx raw files.
Deprecated - Use RawFileProcessor_v2.py
"""
__author__ = "Ganesh N. Sivalingam <[email protected]"
import os
import shutil
import re
import subprocess
import cPickle as pickle
import numpy as np
import time
import utils
class RawFileProcessor():
def __init__(self,rawPath):
self.path = rawPath
self.rawfolder = os.path.basename(self.path.rstrip('/'))
if not self.path.rstrip('/')[-2:] == '.a':
self.outputfolder = self.path.rstrip('.raw') + '.a'
else:
self.outputfolder = self.path
def setOutputFolder(self,outputFolderPath):
rawFileName = os.path.basename(self.rawfolder)
rawFileName = rawFileName.rstrip('.raw/') + '.a'
self.outputfolder = os.path.join(outputFolderPath,rawFileName)
def processFolder(self,grain=2):
'''1. Copy raw file to working directory
2. Run CppApplication
3. Read text files and make imObj
4. Delete text files, remove raw file
5. Make new folder for processed data
6. Dump pickles there'''
if not self._checkIfProcessed():
# 1
if not os.path.isdir(os.path.join('.',self.rawfolder)):
shutil.copytree(self.path,os.path.join('.',self.rawfolder))
# 2
#print 'raw folder', self.rawfolder
#print 'path', self.path
print \
'''=================================
Arguments passed
================================='''
print ['cppapplication.exe',self.rawfolder,"0","1",str(grain),"0"]
print \
'''================================='''
p = subprocess.call(['cppapplication.exe',self.rawfolder,"0","1",str(grain),"0"])
#print p
#print 'cwd', os.getcwd()
for file in ['MassMobility.txt','MassMobilityXaxis.txt','MassMobilityYaxis.txt']:
try:
os.rename(file, os.path.join(self.rawfolder,file))
except:
print 'waiting for cppapplication'
subprocess.call(['cppapplication.exe',str(self.rawfolder),"0",str(grain),"0"])
time.sleep(5)
try:
os.rename(file, os.path.join(self.rawfolder,file))
except:
print 'still waiting'
time.sleep(10)
try:
os.rename(file, os.path.join(self.rawfolder,file))
except:
print 'Couldnt open file: %s' %self.rawfolder
shutil.rmtree(self.rawfolder)
if not os.path.isdir(self.outputfolder):
os.mkdir(self.outputfolder)
self._processAxisX()
self._processAxisY()
self._processMassMobililty()
shutil.rmtree(path=self.rawfolder)
print 'File processed: %s' %self.rawfolder
def _checkIfProcessed(self):
processed = False
amphiFns = ['MassMobilityXaxis.amphi','MassMobilityYaxis.amphi','MassMobility.amphi']
if os.path.isdir(self.outputfolder):
if utils.isInDir(self.outputfolder, amphiFns):
processed = True
# Legacy support for text files (TO BE REMOVED)
textFns = ['MassMobilityXaxis.txt','MassMobilityYaxis.txt','MassMobility.txt']
if utils.isInDir(self.path,textFns):
processed = True
return processed
def getAxisX(self):
return self._unPickle(os.path.join(self.outputfolder,'MassMobilityXaxis.amphi'))
def getAxisY(self):
return self._unPickle(os.path.join(self.outputfolder,'MassMobilityYaxis.amphi'))
def getMassMobility(self):
return self._unPickle(os.path.join(self.outputfolder,'MassMobility.amphi'))
def _processMassMobililty(self,removeTxt=1):
path = os.path.join(self.rawfolder, 'MassMobility.txt')
text = open(path,'r').readlines()
if removeTxt:
os.remove(path)
lines = len(text)
file = open('temp.xsg','w')
for i,line in enumerate(text):
if i != (lines-1):
print>> file, line.rstrip('\n')
else:
print>> file, line.rstrip(',\n')
file.close()
ifile = open('temp.xsg','r')
temp = np.fromfile(ifile,dtype=np.float64,sep=',')
ifile.close()
os.remove('temp.xsg')
temp = np.array_split(temp,200)
massMobility = np.flipud(temp)
self._pickle(massMobility, os.path.join(self.outputfolder,'MassMobility.amphi'))
def _processAxisX(self,removeTxt=1):
path = os.path.join(self.rawfolder,'MassMobilityXaxis.txt')
ifile = open(path,'r')
xAxis = np.fromfile(ifile,dtype='float64',sep=',')
ifile.close()
if removeTxt:
os.remove(path)
self._pickle(xAxis[:-2], os.path.join(self.outputfolder,'MassMobilityXaxis.amphi'))
def _processAxisY(self,removeTxt=1):
path = os.path.join(self.rawfolder,'MassMobilityYaxis.txt')
ifile = open(path,'r')
yAxis = np.fromfile(path,sep='\n')
ifile.close()
if removeTxt:
os.remove(path)
yAxis = yAxis[::-1]
self._pickle(yAxis, os.path.join(self.outputfolder,'MassMobilityYaxis.amphi'))
def _pickle(self,obj,filename):
obj.dump(filename)
def _unPickle(self,filename):
ifile = open(os.path.join(filename),'rb')
obj = pickle.load(ifile)
ifile.close()
return obj
def makePreview(self):
import fast_driftscope_image as fdi
image = fdi.Driftscope_image()
image.load_folder(self.outputfolder)
image.normalise_mobility()
image.driftscope()
imagename = self.rawfolder.rstrip('/')[:-4] + '_preview.png'
image.savefig(os.path.dirname(self.outputfolder),imagename)
| gpl-2.0 | 8,928,408,720,065,607,000 | 34.869318 | 98 | 0.546175 | false | 3.913825 | false | false | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KShortcutsEditor.py | 1 | 1963 | # encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KShortcutsEditor(__PyQt4_QtGui.QWidget):
# no doc
def addCollection(self, *args, **kwargs): # real signature unknown
pass
def allDefault(self, *args, **kwargs): # real signature unknown
pass
def clearCollections(self, *args, **kwargs): # real signature unknown
pass
def clearConfiguration(self, *args, **kwargs): # real signature unknown
pass
def commit(self, *args, **kwargs): # real signature unknown
pass
def exportConfiguration(self, *args, **kwargs): # real signature unknown
pass
def importConfiguration(self, *args, **kwargs): # real signature unknown
pass
def isModified(self, *args, **kwargs): # real signature unknown
pass
def keyChange(self, *args, **kwargs): # real signature unknown
pass
def printShortcuts(self, *args, **kwargs): # real signature unknown
pass
def resizeColumns(self, *args, **kwargs): # real signature unknown
pass
def save(self, *args, **kwargs): # real signature unknown
pass
def undoChanges(self, *args, **kwargs): # real signature unknown
pass
def writeConfiguration(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
ActionType = None # (!) real value is ''
ActionTypes = None # (!) real value is ''
AllActions = -1
ApplicationAction = 2
GlobalAction = 4
LetterShortcuts = None # (!) real value is ''
LetterShortcutsAllowed = 1
LetterShortcutsDisallowed = 0
WidgetAction = 0
WindowAction = 1
| gpl-2.0 | -8,009,713,668,091,190,000 | 26.263889 | 82 | 0.652573 | false | 3.819066 | false | false | false |
jh23453/privacyidea | migrations/versions/5402fd96fbca_.py | 2 | 2021 | """Add smsgateway table
Revision ID: 5402fd96fbca
Revises: 50adc980d625
Create Date: 2016-06-19 17:25:05.152889
"""
# revision identifiers, used by Alembic.
revision = '5402fd96fbca'
down_revision = '50adc980d625'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.exc import OperationalError, ProgrammingError, InternalError
def upgrade():
try:
op.create_table('smsgateway',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('identifier', sa.Unicode(length=255), nullable=False),
sa.Column('description', sa.Unicode(length=1024), nullable=True),
sa.Column('providermodule', sa.Unicode(length=1024), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('identifier')
)
op.create_table('smsgatewayoption',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('Key', sa.Unicode(length=255), nullable=False),
sa.Column('Value', sa.UnicodeText(), nullable=True),
sa.Column('Type', sa.Unicode(length=100), nullable=True),
sa.Column('gateway_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['gateway_id'], ['smsgateway.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('gateway_id', 'Key', name='sgix_1')
)
op.create_index(op.f('ix_smsgatewayoption_gateway_id'), 'smsgatewayoption', ['gateway_id'], unique=False)
except (OperationalError, ProgrammingError, InternalError) as exx:
if exx.orig.message.lower().startswith("duplicate column name"):
print("Good. Table smsgateway already exists.")
else:
print("Table already exists")
print(exx)
except Exception as exx:
print("Could not add Table smsgateway")
print (exx)
### end Alembic commands ###
def downgrade():
op.drop_index(op.f('ix_smsgatewayoption_gateway_id'), table_name='smsgatewayoption')
op.drop_table('smsgatewayoption')
op.drop_table('smsgateway')
| agpl-3.0 | 1,564,364,602,555,797,000 | 35.089286 | 113 | 0.65908 | false | 3.634892 | false | false | false |
MartinThoma/algorithms | daemon_rpc/call_daemon.py | 1 | 1082 | #!/usr/bin/env python
"""Talk with the daemon."""
import logging
import sys
import Pyro.core
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
def main(up):
"""Do something with bartimaeus - who lives in another realm."""
# you have to change the URI below to match your own host/port.
logging.info("Send up: %i", up)
bartimaeus = Pyro.core.getProxyForURI("PYROLOC://localhost:7766/bartid")
print(bartimaeus.count(up))
def get_parser():
"""Get parser object for call_demon.py."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-n",
dest="up",
default=1,
type=int,
help="count up")
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.up)
| mit | -7,298,478,674,700,020,000 | 27.473684 | 76 | 0.588725 | false | 4.022305 | false | false | false |
zhuyue1314/archinfo | archinfo/arch.py | 1 | 9301 | ''' This class is responsible for architecture-specific things such as call emulation and so forth. '''
import capstone as _capstone
import struct as _struct
import pyvex as _pyvex
import logging
l = logging.getLogger('arch.Arch')
class Arch(object):
def __init__(self, endness):
if endness not in ('Iend_LE', 'Iend_BE'):
raise ArchError('Must pass a valid VEX endness: "Iend_LE" or "Iend_BE"')
if endness == 'Iend_BE':
self.vex_endness = "VexEndnessBE"
self.memory_endness = 'Iend_BE'
self.register_endness = 'Iend_BE'
self.cs_mode -= _capstone.CS_MODE_LITTLE_ENDIAN
self.cs_mode += _capstone.CS_MODE_BIG_ENDIAN
self.ret_instruction = reverse_ends(self.ret_instruction)
self.nop_instruction = reverse_ends(self.nop_instruction)
def __repr__(self):
return '<Arch %s (%s)>' % (self.name, self.memory_endness[-2:])
def __eq__(self, other):
return self.name == other.name and \
self.bits == other.bits and \
self.memory_endness == other.memory_endness
def __ne__(self, other):
return not self == other
def __getstate__(self):
self._cs = None
return self.__dict__
def __setstate__(self, data):
self.__dict__.update(data)
def gather_info_from_state(self, state):
info = {}
for reg in self.persistent_regs:
info[reg] = state.registers.load(reg)
return info
def prepare_state(self, state, info=None):
if info is not None:
# TODO: Only do this for PIC!
for reg in self.persistent_regs:
if reg in info:
state.registers.store(reg, info[reg])
return state
def get_default_reg_value(self, register):
if register == 'sp':
# Convert it to the corresponding register name
registers = [r for r, v in self.registers.items() if v[0] == self.sp_offset]
if len(registers) > 0:
register = registers[0]
else:
return None
for reg, val, _, _ in self.default_register_values:
if reg == register:
return val
return None
def struct_fmt(self, size=None):
fmt = ""
if size is None:
size = self.bits
if self.memory_endness == "Iend_BE":
fmt += ">"
else:
fmt += "<"
if size == 64:
fmt += "Q"
elif size == 32:
fmt += "I"
elif size == 16:
fmt += "H"
elif size == 8:
fmt += "B"
else:
raise ValueError("Invalid size: Must be a muliple of 8")
return fmt
@property
def bytes(self):
return self.bits/8
@property
def capstone(self):
if self.cs_arch is None:
raise ArchError("Arch %s does not support disassembly with capstone" % self.name)
if self._cs is None:
self._cs = _capstone.Cs(self.cs_arch, self.cs_mode)
self._cs.detail = True
return self._cs
def translate_dynamic_tag(self, tag):
try:
return self.dynamic_tag_translation[tag]
except KeyError:
if isinstance(tag, (int, long)):
l.error("Please look up and add dynamic tag type %#x for %s", tag, self.name)
return tag
def translate_symbol_type(self, tag):
try:
return self.symbol_type_translation[tag]
except KeyError:
if isinstance(tag, (int, long)):
l.error("Please look up and add symbol type %#x for %s", tag, self.name)
return tag
def translate_register_name(self, offset):
try:
return self.register_names[offset]
except KeyError:
return str(offset)
def disassemble_vex(self, string, **kwargs):
if self.vex_arch is None:
raise ArchError("Arch %s does not support VEX lifting" % self.name)
return _pyvex.IRSB(bytes=string, arch=self, **kwargs)
# Determined by watching the output of strace ld-linux.so.2 --list --inhibit-cache
def library_search_path(self, pedantic=False):
subfunc = lambda x: x.replace('${TRIPLET}', self.triplet).replace('${ARCH}', self.linux_name)
path = ['/lib/${TRIPLET}/', '/usr/lib/${TRIPLET}/', '/lib/', '/usr/lib', '/usr/${TRIPLET}/lib/']
if self.bits == 64:
path.append('/usr/${TRIPLET}/lib64/')
elif self.bits == 32:
path.append('/usr/${TRIPLET}/lib32/')
if pedantic:
path = sum([[x + 'tls/${ARCH}/', x + 'tls/', x + '${ARCH}/', x] for x in path], [])
return map(subfunc, path)
# various names
name = None
vex_arch = None
qemu_name = None
ida_processor = None
linux_name = None
triplet = None
# instruction stuff
max_inst_bytes = None
ret_instruction = ''
nop_instruction = ''
instruction_alignment = None
# register ofsets
ip_offset = None
sp_offset = None
bp_offset = None
ret_offset = None
# memory stuff
bits = None
vex_endness = 'VexEndnessLE'
memory_endness = 'Iend_LE'
register_endness = 'Iend_LE'
stack_change = None
# is it safe to cache IRSBs?
cache_irsb = True
function_prologs = set()
function_epilogs = set()
# Capstone stuff
cs_arch = None
cs_mode = None
_cs = None
call_pushes_ret = False
initial_sp = 0x7fff0000
# Difference of the stack pointer after a call instruction (or its equivalent) is executed
call_sp_fix = 0
stack_size = 0x8000000
# Register information
default_register_values = [ ]
entry_register_values = { }
default_symbolic_registers = [ ]
registers = { }
register_names = { }
argument_registers = { }
persistent_regs = [ ]
concretize_unique_registers = set() # this is a list of registers that should be concretized, if unique, at the end of each block
lib_paths = []
reloc_s_a = []
reloc_b_a = []
reloc_s = []
reloc_copy = []
reloc_tls_mod_id = []
reloc_tls_doffset = []
reloc_tls_offset = []
dynamic_tag_translation = {}
symbol_type_translation = {}
got_section_name = ''
def arch_from_id(ident, endness='', bits=''):
if bits == 64 or (isinstance(bits, str) and '64' in bits):
bits = 64
else:
bits = 32
endness = endness.lower()
endness_unsure = False
if 'lit' in endness:
endness = 'Iend_LE'
elif 'big' in endness:
endness = 'Iend_BE'
elif 'lsb' in endness:
endness = 'Iend_LE'
elif 'msb' in endness:
endness = 'Iend_BE'
elif 'le' in endness:
endness = 'Iend_LE'
elif 'be' in endness:
endness = 'Iend_BE'
elif 'l' in endness:
endness = 'Iend_LE'
endness_unsure = True
elif 'b' in endness:
endness = 'Iend_BE'
endness_unsure = True
else:
endness = 'Iend_LE'
endness_unsure = True
ident = ident.lower()
if 'ppc64' in ident or 'powerpc64' in ident:
if endness_unsure:
endness = 'Iend_BE'
return ArchPPC64(endness)
elif 'ppc' in ident or 'powerpc' in ident:
if endness_unsure:
endness = 'Iend_BE'
if bits == 64:
return ArchPPC64(endness)
return ArchPPC32(endness)
elif 'mips' in ident:
if 'mipsel' in ident:
if bits == 64:
return ArchMIPS64('Iend_LE')
return ArchMIPS32('Iend_LE')
if endness_unsure:
if bits == 64:
return ArchMIPS64('Iend_BE')
return ArchMIPS32('Iend_BE')
if bits == 64:
return ArchMIPS64(endness)
return ArchMIPS32(endness)
elif 'arm' in ident or 'thumb' in ident:
if bits == 64:
return ArchAArch64(endness)
return ArchARM(endness)
elif 'aarch' in ident:
return ArchAArch64(endness)
elif 'amd64' in ident or ('x86' in ident and '64' in ident) or 'x64' in ident:
return ArchAMD64('Iend_LE')
elif '386' in ident or 'x86' in ident or 'metapc' in ident:
if bits == 64:
return ArchAMD64('Iend_LE')
return ArchX86('Iend_LE')
raise ArchError("Could not parse out arch!")
def reverse_ends(string):
ise = 'I'*(len(string)/4)
return _struct.pack('>' + ise, *_struct.unpack('<' + ise, string))
# pylint: disable=unused-import
from .arch_amd64 import ArchAMD64
from .arch_x86 import ArchX86
from .arch_arm import ArchARM, ArchARMEL, ArchARMHF
from .arch_aarch64 import ArchAArch64
from .arch_ppc32 import ArchPPC32
from .arch_ppc64 import ArchPPC64
from .arch_mips32 import ArchMIPS32
from .arch_mips64 import ArchMIPS64
from .archerror import ArchError
all_arches = [
ArchAMD64(), ArchX86(),
ArchARM('Iend_LE'), ArchARM('Iend_BE'),
ArchAArch64('Iend_LE'), ArchAArch64('Iend_BE'),
ArchPPC32('Iend_LE'), ArchPPC32('Iend_BE'),
ArchPPC64('Iend_LE'), ArchPPC64('Iend_BE'),
ArchMIPS32('Iend_LE'), ArchMIPS32('Iend_BE'),
ArchMIPS64('Iend_LE'), ArchMIPS64('Iend_BE')
]
| bsd-2-clause | 7,537,236,573,429,404,000 | 29.69637 | 133 | 0.567466 | false | 3.46535 | false | false | false |
IntersectAustralia/asvo-tao | core/sageimport_mpi/BSPTree.py | 1 | 8898 | import pg
import getpass
import math
import string
import sys
import settingReader
import numpy
import matplotlib.pyplot as plt
import matplotlib
from MySQLdb.constants.FLAG import NUM
class BSPTree(object):
def __init__(self,Options):
'''
Constructor
'''
self.Options=Options
self.serverip=self.Options['PGDB:serverip']
self.username=self.Options['PGDB:user']
self.password=self.Options['PGDB:password']
self.port=int(self.Options['PGDB:port'])
self.DBName=self.Options['PGDB:NewDBName']
if self.password==None:
print('Password for user:'+self.username+' is not defined')
self.password=getpass.getpass('Please enter password:')
# Take care that the connection will be opened to standard DB 'master'
# This is temp. until the actual database is created
self.CurrentConnection=pg.connect(host=self.serverip,user=self.username,passwd=self.password,port=self.port,dbname=self.DBName)
print('Connection to DB is open...Start Creating Tables')
def ExecuteNoQuerySQLStatment(self,SQLStatment):
try:
SQLStatment=string.lower(SQLStatment)
self.CurrentConnection.query(SQLStatment)
except Exception as Exp:
print(">>>>>Error While creating New Table")
print(type(Exp))
print(Exp.args)
print(Exp)
print("Current SQL Statement =\n"+SQLStatment)
raw_input("PLease press enter to continue.....")
def ExecuteQuerySQLStatment(self,SQLStatment):
try:
SQLStatment=string.lower(SQLStatment)
resultsList=self.CurrentConnection.query(SQLStatment).getresult()
return resultsList
except Exception as Exp:
print(">>>>>Error While creating New Table")
print(type(Exp))
print(Exp.args)
print(Exp)
print("Current SQL Statement =\n"+SQLStatment)
raw_input("PLease press enter to continue.....")
def GenerateRectangles(self):
GetBoundryBox="select min(MinX), min(MinY), min(MinZ), max(MaxX), max(MaxY), max(MaxZ) from TreeSummary;"
GlobalSummary=self.ExecuteQuerySQLStatment(GetBoundryBox)[0]
MinX=int(math.floor(GlobalSummary[0]))
MinY=int(math.floor(GlobalSummary[1]))
MinZ=int(math.floor(GlobalSummary[2]))
MaxX=int(math.ceil(GlobalSummary[3]))
MaxY=int(math.ceil(GlobalSummary[4]))
MaxZ=int(math.ceil(GlobalSummary[5]))
XLocation=-1
YLocation=-1
StepSize=20
self.RectArr=numpy.zeros((0,6))
### Intersection between two Rectangles
### http://silentmatt.com/rectangle-intersection/
for X in range(MinX,MaxX,StepSize):
XLocation=XLocation+1
YLocation=-1
for Y in range(MinY,MaxY,StepSize):
YLocation=YLocation+1
BX1=X;
BX2=X+StepSize
BY1=Y
BY2=Y+StepSize
self.RectArr=numpy.vstack([self.RectArr,[BX1,BX2,BY1,BY2,XLocation,YLocation]])
def GetRectIds(self,PolyPoints):
BoundingRect=self.GetBoundingRect(PolyPoints)
LocationsMatrix=numpy.zeros([0,2])
for Rect in self.RectArr:
color='yellow'
#if self.InsidePolygon(Rect[0],Rect[2] , PolyPoints):
# color='blue'
#if self.InsidePolygon(Rect[1],Rect[2] , PolyPoints):
# color='blue'
#if self.InsidePolygon(Rect[0],Rect[3] , PolyPoints):
# color='blue'
#if self.InsidePolygon(Rect[1],Rect[3] , PolyPoints):
# color='blue'
if self.IntersectPolyRect(PolyPoints,BoundingRect,Rect):
color='blue'
LocationsMatrix=numpy.vstack([LocationsMatrix,Rect[4:6]])
plt.gca().add_patch(matplotlib.patches.Rectangle((Rect[0],Rect[2]), Rect[1]-Rect[0], Rect[3]-Rect[2],fc=color))
#plt.gca().add_patch(matplotlib.patches.Rectangle((BoundingRect[0],BoundingRect[2]), BoundingRect[1]-BoundingRect[0], BoundingRect[3]-BoundingRect[2],fc='white'))
plt.gca().add_patch(matplotlib.patches.Polygon(PolyPoints,fc='red'))
plt.gca().autoscale_view()
plt.draw()
plt.show()
return LocationsMatrix
def IntersectTwoRect(self,RectA,RectB):
## Rect=[X1,X2,Y1,Y2]
if (RectA[0] < RectB[1] and RectA[1] > RectB[0] and RectA[2] < RectB[3] and RectA[3] > RectB[2]):
return True;
else:
return False;
def GetBoundingRect(self,PolyPoints):
PolyMinX=PolyMaxX=PolyPoints[0][0]
PolyMinY=PolyMaxY=PolyPoints[0][1]
for P in PolyPoints:
if P[0]<PolyMinX:
PolyMinX=P[0]
if P[0]>PolyMaxX:
PolyMaxX=P[0]
if P[1]<PolyMinY:
PolyMinY=P[1]
if P[1]>PolyMaxY:
PolyMaxY=P[1]
return [PolyMinX,PolyMaxX,PolyMinY,PolyMaxY]
def IntersectPolyRect(self,PolyPoints,PolygonBoundingRect,Rect):
PolyPoints= numpy.vstack([PolyPoints,PolyPoints[0]])
if self.IntersectTwoRect(Rect, PolygonBoundingRect):
IntersectionResults=False
for i in range(0,len(PolyPoints)-1):
IntersectionResults= IntersectionResults or self.seg_intersect(PolyPoints[i],PolyPoints[i+1],[Rect[0],Rect[2]],[Rect[0],Rect[3]])
IntersectionResults= IntersectionResults or self.seg_intersect(PolyPoints[i],PolyPoints[i+1],[Rect[0],Rect[3]],[Rect[1],Rect[3]])
IntersectionResults= IntersectionResults or self.seg_intersect(PolyPoints[i],PolyPoints[i+1],[Rect[1],Rect[3]],[Rect[1],Rect[2]])
IntersectionResults= IntersectionResults or self.seg_intersect(PolyPoints[i],PolyPoints[i+1],[Rect[1],Rect[2]],[Rect[0],Rect[2]])
return IntersectionResults
else:
return False
def ccw(self,A,B,C):
return (C[1]-A[1])*(B[0]-A[0]) > (B[1]-A[1])*(C[0]-A[0])
def seg_intersect(self,A,B,C,D):
return self.ccw(A,C,D) != self.ccw(B,C,D) and self.ccw(A,B,C) != self.ccw(A,B,D)
def InsidePolygon(self,x,y,points):
n = len(points)
inside = False
p1x, p1y = points[0]
for i in range(1, n + 1):
p2x, p2y = points[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
if __name__ == '__main__':
[CurrentSAGEStruct,Options]=settingReader.ParseParams("settings.xml")
BSPTreeObj=BSPTree(Options)
BSPTreeObj.GenerateRectangles()
#PolyPoints=[(250,90),(400,300),(250,400),(150,250)]
PolyPoints=[(0,0),(500,0),(500,50)]
LocationsMatrix=BSPTreeObj.GetRectIds(PolyPoints)
GridXLocationsstr=''
GridYLocationsstr=''
GridXLocations=numpy.unique(LocationsMatrix[:,0])
GridYLocations=numpy.unique(LocationsMatrix[:,1])
for r in GridXLocations:
GridXLocationsstr=GridXLocationsstr+','+str(int(r))
for r in GridYLocations:
GridYLocationsstr=GridYLocationsstr+','+str(int(r))
GridXLocationsstr=GridXLocationsstr[1:]
GridYLocationsstr=GridYLocationsstr[1:]
Query='select distinct tablename from TreeSummary where globaltreeid in (Select globaltreeid from TreeMapping where gridx in ('+GridXLocationsstr+') and gridy in ('+GridYLocationsstr+'));'
print Query
TablesList=BSPTreeObj.ExecuteQuerySQLStatment(Query)
for table in TablesList:
print(table)
#GridData=BSPTreeObj.ExecuteQuerySQLStatment("select gridx,gridy,count(*) from TreeMapping group by gridx,gridy;")
#Arr=numpy.zeros((25,25))
#for GridPoint in GridData:
# Arr[GridPoint[0],GridPoint[1]]=GridPoint[2]
#print Arr
#plt.contourf(Arr)
#plt.colorbar()
#plt.show()
| gpl-3.0 | -237,638,071,895,796,670 | 34.313492 | 193 | 0.560351 | false | 3.673823 | false | false | false |
horiajurcut/quantum-app | api/controllers/login.py | 1 | 1523 | from flask import Flask
from flask import session
from flask import Response
from flask import request
from flask import redirect, url_for
from flask import render_template
from api.core import app, db
from api.models.user import User
from api.models.page import Page
import urllib
import json
import datetime
@app.route('/')
def landing():
return render_template('landing.html')
@app.route('/auth')
def login():
return render_template('login.html')
@app.route('/auth/<token>')
def get_token(token):
params = {
'access_token': token
}
params = urllib.urlencode(params)
me = json.loads(urllib.urlopen('https://graph.facebook.com/me?%s' % params).read())
fb_user = {
'first_name': me['first_name'],
'last_name': me['last_name'],
'fb_id': me['id'],
'email': me['email'],
'created': datetime.datetime.now()
}
db_user = db.session.query(User).filter(
User.fb_id == fb_user['fb_id']
).first()
if not db_user:
db_user = User(**fb_user)
db.session.add(db_user)
db.session.commit()
session['USER_ID'] = db_user.id
accounts = json.loads(urllib.urlopen('https://graph.facebook.com/me/accounts?%s' % params).read())
if 'data' not in accounts:
return redirect('/')
if len(accounts) > 1:
return render_template('chooser.html', pages=accounts['data'])
return Response(json.dumps({
'accounts': accounts
}), mimetype='application/json')
| apache-2.0 | 8,134,327,485,484,134,000 | 22.430769 | 102 | 0.622456 | false | 3.583529 | false | false | false |
kofrasa/pyutils | pyutils/algorithms.py | 1 | 1875 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
__author__ = 'francis'
def permute(xs):
"""Returns a generator for the permutations of elements in the sequence
:param xs: a sequence of elements
"""
if len(xs) == 1:
yield xs
else:
for i in xrange(0, len(xs)):
for p in permute(xs[0:i] + xs[i + 1:]):
yield [xs[i]] + p
def longest_inc_seq(xs):
"""Finds the longest increasing sequences in the given sequence
:param xs: a sortable sequence of elements
"""
seq = [] # all increasing sequences
indices = [] # indices of longest increasing sequences
size = 0 # current longest size
for i in xrange(0, len(xs)):
for j in xrange(0, len(seq)):
if xs[i] > seq[j][-1]:
t = seq[j] + [xs[i]]
if len(t) > size:
indices = [len(seq)]
size = len(t)
elif len(t) == size:
indices.append(len(seq))
seq.append(t)
seq.append([xs[i]])
return [seq[k] for k in indices]
def longest_common_seq(first, second):
"""Find the longest common sequence of the given sequences
:param first: the first sequence
:param second: the second sequence
"""
res = []
for i in range(0, len(first)):
for j in range(0, len(second)):
if second[j] == first[i]:
t = [first[i]] + longest_common_seq(first[i + 1:], second[j + 1:])
if len(t) > len(res):
res = t
return res
def fib(n):
"""Computes the fibonacci number for the given term
:param n: the term of the fibonacci sequence
"""
if not n:
return 0
previous = 0
current = 1
while n - 1:
current, previous = (previous + current), current
n -= 1
return current | mit | 2,186,507,648,000,325,600 | 27.424242 | 82 | 0.523733 | false | 3.712871 | false | false | false |
osrf/osrf_hw | kicad_scripts/generate_BGA_footprint.py | 1 | 4360 | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
from collections import OrderedDict
#FIXME Best way to configur pad size? right now we use half the pitch
if len(sys.argv) >1:
infile = sys.argv[1]
else:
print('please provide a configuration file')
sys.exit()
dictParameters=OrderedDict([
('outLibrary',''),
('name',''),
('keywords',''),
('Description','_'),
('3dModelPath','_') ])
with open(infile,'r+') as inf:
while True:
line = inf.readline()
line = line.replace('\n','')
if not line: break
lsplit=[]
lsplit.append(line[0:line.find('=')])
lsplit.append(line[line.find('=')+1:])
if lsplit[0] in dictParameters:
if (lsplit[1] != '' and lsplit[1]!= dictParameters[lsplit[0]]):
dictParameters[lsplit[0]]=lsplit[1]
#retrieve BGA package parameters
string = dictParameters['name']
idx = string.find('P')
pitch = float(string[string.find('C')+1:idx])/100.0
str2 = string[idx + 1 :]
idx = str2.find('X')
nBallx = int(str2[:idx])
str2 = str2[idx+1:]
idx = str2.find('_')
nBally = int(str2[:idx])
str2 = str2[idx+1:]
idx = str2.find('X')
lenx = float(str2[:idx])/100.0
str2 = str2[idx+1:]
idx = str2.find('X')
leny = float(str2[:idx])/100.0
def drawRect(x,y,layer):
print(layer)
print(x)
print(y)
width = 0.15
if layer.find('CrtYd') != -1:
width = 0.05
string = ' (fp_line (start -{} -{}) (end -{} {}) (layer {}) (width {}))\n'.format(x,y,x,y,layer,width)
string += ' (fp_line (start -{} -{}) (end {} -{}) (layer {}) (width {}))\n'.format(x,y,x,y,layer,width)
string += ' (fp_line (start {} {}) (end -{} {}) (layer {}) (width {}))\n'.format(x,y,x,y,layer,width)
string += ' (fp_line (start {} {}) (end {} -{}) (layer {}) (width {}))\n'.format(x,y,x,y,layer,width)
return string
def createPinList(nBallx,nBally):
letterBGA= ['A','B','C','D','E','F','G','H','J','K','L','M','N','P','R','T','U','V','W','Y']
pinlist = []
for i in range(nBallx):
for j in range(nBally):
firstletter = j/len(letterBGA)
defstr = ''
if(firstletter != 0):
defstr = letterBGA[firstletter-1]
pinlist.append(defstr+letterBGA[j-firstletter*len(letterBGA)]+str(i+1))
return pinlist
outstring = "(module " + dictParameters['name'] + ' (layer F.Cu)\n' # module name
outstring += ' (descr "'+dictParameters['Description'] + '")\n' # Description
outstring += ' (tags "'+dictParameters['keywords'] + '")\n' # keywords
outstring += ' (attr smd)\n' # attribute
outstring += ' (fp_text reference REF** (at 0 {0}) (layer F.SilkS)\n'.format(int(leny/2.+2)) # reference
outstring += ' (effects (font (size 1 1) (thickness 0.15)))\n'
outstring += ' )\n'
outstring += ' (fp_text value {} (at 0 -{}) (layer F.Fab)\n'.format(dictParameters['name'],int(leny/2.+2)) # value
outstring += ' (effects (font (size 1 1) (thickness 0.15)))\n'
outstring += ' )\n'
outstring += drawRect(lenx/2.,leny/2.,'F.SilkS') # silkscreen rectangle
outstring += drawRect(lenx/2.+0.2,leny/2.+0.2,'F.CrtYd') # courtyard rectangle
outstring += ' (fp_circle (center -{} -{}) (end -{} -{}) (layer F.SilkS) (width 0.15))\n'.format(lenx/2.+0.5,leny/2.+0.5,lenx/2.+1,leny/2.+0.5)#silkscreen circle
pinlist = createPinList(nBallx,nBally)
minx = (nBallx-1)*pitch/2.; miny = (nBally-1)*pitch/2.
pn = 0 ; posx = -minx ; posy = -miny ; bsize = pitch/2.
for pin in pinlist:
if pn % nBallx == 0 and pn / nBallx != 0: # if we start a new column
posx += pitch
posy = -miny
if abs(posx)<0.001: #avoid python precision issue
posx = 0
if abs(posy)<0.001: #avoid python precision issue
posy = 0
outstring += ' (pad {} smd circle (at {} {}) (size {} {}) (layers F.Cu F.Paste F.Mask))\n'.format(pin,posx,posy,bsize,bsize)
posy += pitch
pn += 1
outstring += ' (model '+str(os.path.join(dictParameters['3dModelPath'],dictParameters['name']+'.wrl'))+'\n (at (xyz 0 0 0))\n (scale (xyz 1 1 1))\n (rotate (xyz 0 0 0))\n )\n'
outstring += ')'
outfilepath = os.path.join(dictParameters['outLibrary'],dictParameters['name']+'.kicad_mod')
print(outfilepath)
with open(outfilepath,'w+') as outfile:
outfile.write(outstring)
| apache-2.0 | 1,672,750,590,756,509,200 | 39 | 186 | 0.577982 | false | 2.77884 | false | false | false |
evansde77/cirrus | src/cirrus/plugins/uploaders/fabric_put.py | 1 | 1114 | #!/usr/bin/env python
"""
_fabric_put_
Uploader plugin that uses fabric to do a remote put
"""
from cirrus.logger import get_logger
from cirrus.upload_plugins import Uploader
from cirrus.configuration import get_pypi_auth
from cirrus.scp import put
LOGGER = get_logger()
class Pypi(Uploader):
PLUGGAGE_OBJECT_NAME = 'fabric'
def upload(self, opts, build_artifact):
"""
upload to pypi via fabric over ssh
"""
pypi_conf = self.package_conf.pypi_config()
pypi_auth = get_pypi_auth()
if opts.pypi_url:
pypi_url = opts.pypi_url
else:
pypi_url = pypi_conf['pypi_url']
if pypi_auth['ssh_username'] is not None:
pypi_user = pypi_auth['ssh_username']
else:
pypi_user = pypi_auth['username']
package_dir = pypi_conf['pypi_upload_path']
LOGGER.info("Uploading {0} to {1}".format(build_artifact, pypi_url))
put(build_artifact,
package_dir,
pypi_url,
ssh_username=pypi_user,
ssh_keyfile=pypi_auth['ssh_key']
)
| apache-2.0 | 5,033,704,428,203,177,000 | 24.906977 | 76 | 0.593357 | false | 3.406728 | false | false | false |
beni55/dipy | dipy/sims/voxel.py | 3 | 18837 | from __future__ import division
import numpy as np
from numpy import dot
from dipy.core.geometry import sphere2cart
from dipy.core.geometry import vec2vec_rotmat
# Diffusion coefficients for white matter tracts, in mm^2/s
#
# Based roughly on values from:
#
# Pierpaoli, Basser, "Towards a Quantitative Assessment of Diffusion
# Anisotropy", Magnetic Resonance in Medicine, 1996; 36(6):893-906.
#
diffusion_evals = np.array([1500e-6, 400e-6, 400e-6])
def _add_gaussian(sig, noise1, noise2):
"""
Helper function to add_noise
This one simply adds one of the Gaussians to the sig and ignores the other
one.
"""
return sig + noise1
def _add_rician(sig, noise1, noise2):
"""
Helper function to add_noise.
This does the same as abs(sig + complex(noise1, noise2))
"""
return np.sqrt((sig + noise1) ** 2 + noise2 ** 2)
def _add_rayleigh(sig, noise1, noise2):
"""
Helper function to add_noise
The Rayleigh distribution is $\sqrt\{Gauss_1^2 + Gauss_2^2}$.
"""
return sig + np.sqrt(noise1 ** 2 + noise2 ** 2)
def add_noise(signal, snr, S0, noise_type='rician'):
r""" Add noise of specified distribution to the signal from a single voxel.
Parameters
-----------
signal : 1-d ndarray
The signal in the voxel.
snr : float
The desired signal-to-noise ratio. (See notes below.)
If `snr` is None, return the signal as-is.
S0 : float
Reference signal for specifying `snr`.
noise_type : string, optional
The distribution of noise added. Can be either 'gaussian' for Gaussian
distributed noise, 'rician' for Rice-distributed noise (default) or
'rayleigh' for a Rayleigh distribution.
Returns
--------
signal : array, same shape as the input
Signal with added noise.
Notes
-----
SNR is defined here, following [1]_, as ``S0 / sigma``, where ``sigma`` is
the standard deviation of the two Gaussian distributions forming the real
and imaginary components of the Rician noise distribution (see [2]_).
References
----------
.. [1] Descoteaux, Angelino, Fitzgibbons and Deriche (2007) Regularized,
fast and robust q-ball imaging. MRM, 58: 497-510
.. [2] Gudbjartson and Patz (2008). The Rician distribution of noisy MRI
data. MRM 34: 910-914.
Examples
--------
>>> signal = np.arange(800).reshape(2, 2, 2, 100)
>>> signal_w_noise = add_noise(signal, 10., 100., noise_type='rician')
"""
if snr is None:
return signal
sigma = S0 / snr
noise_adder = {'gaussian': _add_gaussian,
'rician': _add_rician,
'rayleigh': _add_rayleigh}
noise1 = np.random.normal(0, sigma, size=signal.shape)
if noise_type == 'gaussian':
noise2 = None
else:
noise2 = np.random.normal(0, sigma, size=signal.shape)
return noise_adder[noise_type](signal, noise1, noise2)
def sticks_and_ball(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)],
fractions=[35, 35], snr=20):
""" Simulate the signal for a Sticks & Ball model.
Parameters
-----------
gtab : GradientTable
Signal measurement directions.
d : float
Diffusivity value.
S0 : float
Unweighted signal value.
angles : array (K,2) or (K, 3)
List of K polar angles (in degrees) for the sticks or array of K
sticks as unit vectors.
fractions : float
Percentage of each stick. Remainder to 100 specifies isotropic
component.
snr : float
Signal to noise ratio, assuming Rician noise. If set to None, no
noise is added.
Returns
--------
S : (N,) ndarray
Simulated signal.
sticks : (M,3)
Sticks in cartesian coordinates.
References
----------
.. [1] Behrens et al., "Probabilistic diffusion
tractography with multiple fiber orientations: what can we gain?",
Neuroimage, 2007.
"""
fractions = [f / 100. for f in fractions]
f0 = 1 - np.sum(fractions)
S = np.zeros(len(gtab.bvals))
angles = np.array(angles)
if angles.shape[-1] == 3:
sticks = angles
else:
sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
for pair in angles]
sticks = np.array(sticks)
for (i, g) in enumerate(gtab.bvecs[1:]):
S[i + 1] = f0 * np.exp(-gtab.bvals[i + 1] * d) + \
np.sum([fractions[j] * np.exp(-gtab.bvals[i + 1] * d * np.dot(s, g) ** 2)
for (j, s) in enumerate(sticks)])
S[i + 1] = S0 * S[i + 1]
S[gtab.b0s_mask] = S0
S = add_noise(S, snr, S0)
return S, sticks
def single_tensor(gtab, S0=1, evals=None, evecs=None, snr=None):
""" Simulated Q-space signal with a single tensor.
Parameters
-----------
gtab : GradientTable
Measurement directions.
S0 : double,
Strength of signal in the presence of no diffusion gradient (also
called the ``b=0`` value).
evals : (3,) ndarray
Eigenvalues of the diffusion tensor. By default, values typical for
prolate white matter are used.
evecs : (3, 3) ndarray
Eigenvectors of the tensor. You can also think of this as a rotation
matrix that transforms the direction of the tensor. The eigenvectors
needs to be column wise.
snr : float
Signal to noise ratio, assuming Rician noise. None implies no noise.
Returns
--------
S : (N,) ndarray
Simulated signal: ``S(q, tau) = S_0 e^(-b g^T R D R.T g)``.
References
----------
.. [1] M. Descoteaux, "High Angular Resolution Diffusion MRI: from Local
Estimation to Segmentation and Tractography", PhD thesis,
University of Nice-Sophia Antipolis, p. 42, 2008.
.. [2] E. Stejskal and J. Tanner, "Spin diffusion measurements: spin echos
in the presence of a time-dependent field gradient", Journal of
Chemical Physics, nr. 42, pp. 288--292, 1965.
"""
if evals is None:
evals = diffusion_evals
if evecs is None:
evecs = np.eye(3)
out_shape = gtab.bvecs.shape[:gtab.bvecs.ndim - 1]
gradients = gtab.bvecs.reshape(-1, 3)
R = np.asarray(evecs)
S = np.zeros(len(gradients))
D = dot(dot(R, np.diag(evals)), R.T)
for (i, g) in enumerate(gradients):
S[i] = S0 * np.exp(-gtab.bvals[i] * dot(dot(g.T, D), g))
S = add_noise(S, snr, S0)
return S.reshape(out_shape)
def multi_tensor(gtab, mevals, S0=100, angles=[(0, 0), (90, 0)],
fractions=[50, 50], snr=20):
r"""Simulate a Multi-Tensor signal.
Parameters
-----------
gtab : GradientTable
mevals : array (K, 3)
each tensor's eigenvalues in each row
S0 : float
Unweighted signal value (b0 signal).
angles : array (K,2) or (K,3)
List of K tensor directions in polar angles (in degrees) or unit vectors
fractions : float
Percentage of the contribution of each tensor. The sum of fractions
should be equal to 100%.
snr : float
Signal to noise ratio, assuming Rician noise. If set to None, no
noise is added.
Returns
--------
S : (N,) ndarray
Simulated signal.
sticks : (M,3)
Sticks in cartesian coordinates.
Examples
--------
>>> import numpy as np
>>> from dipy.sims.voxel import multi_tensor
>>> from dipy.data import get_data
>>> from dipy.core.gradients import gradient_table
>>> from dipy.io.gradients import read_bvals_bvecs
>>> fimg, fbvals, fbvecs = get_data('small_101D')
>>> bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
>>> gtab = gradient_table(bvals, bvecs)
>>> mevals=np.array(([0.0015, 0.0003, 0.0003],[0.0015, 0.0003, 0.0003]))
>>> e0 = np.array([1, 0, 0.])
>>> e1 = np.array([0., 1, 0])
>>> S = multi_tensor(gtab, mevals)
"""
if np.round(np.sum(fractions), 2) != 100.0:
raise ValueError('Fractions should sum to 100')
fractions = [f / 100. for f in fractions]
S = np.zeros(len(gtab.bvals))
angles = np.array(angles)
if angles.shape[-1] == 3:
sticks = angles
else:
sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
for pair in angles]
sticks = np.array(sticks)
for i in range(len(fractions)):
S = S + fractions[i] * single_tensor(gtab, S0=S0, evals=mevals[i],
evecs=all_tensor_evecs(
sticks[i]).T,
snr=None)
return add_noise(S, snr, S0), sticks
def single_tensor_odf(r, evals=None, evecs=None):
"""Simulated ODF with a single tensor.
Parameters
----------
r : (N,3) or (M,N,3) ndarray
Measurement positions in (x, y, z), either as a list or on a grid.
evals : (3,)
Eigenvalues of diffusion tensor. By default, use values typical for
prolate white matter.
evecs : (3, 3) ndarray
Eigenvectors of the tensor. You can also think of these as the
rotation matrix that determines the orientation of the diffusion
tensor.
Returns
-------
ODF : (N,) ndarray
The diffusion probability at ``r`` after time ``tau``.
References
----------
.. [1] Aganj et al., "Reconstruction of the Orientation Distribution
Function in Single- and Multiple-Shell q-Ball Imaging Within
Constant Solid Angle", Magnetic Resonance in Medicine, nr. 64,
pp. 554--566, 2010.
"""
if evals is None:
evals = diffusion_evals
if evecs is None:
evecs = np.eye(3)
out_shape = r.shape[:r.ndim - 1]
R = np.asarray(evecs)
D = dot(dot(R, np.diag(evals)), R.T)
Di = np.linalg.inv(D)
r = r.reshape(-1, 3)
P = np.zeros(len(r))
for (i, u) in enumerate(r):
P[i] = (dot(dot(u.T, Di), u)) ** (3 / 2)
return (1 / (4 * np.pi * np.prod(evals) ** (1 / 2) * P)).reshape(out_shape)
def all_tensor_evecs(e0):
"""Given the principle tensor axis, return the array of all
eigenvectors (or, the rotation matrix that orientates the tensor).
Parameters
----------
e0 : (3,) ndarray
Principle tensor axis.
Returns
-------
evecs : (3,3) ndarray
Tensor eigenvectors.
"""
axes = np.eye(3)
mat = vec2vec_rotmat(axes[0], e0)
e1 = np.dot(mat, axes[1])
e2 = np.dot(mat, axes[2])
return np.array([e0, e1, e2])
def multi_tensor_odf(odf_verts, mevals, angles, fractions):
r'''Simulate a Multi-Tensor ODF.
Parameters
----------
odf_verts : (N,3) ndarray
Vertices of the reconstruction sphere.
mevals : sequence of 1D arrays,
Eigen-values for each tensor.
angles : sequence of 2d tuples,
Sequence of principal directions for each tensor in polar angles
or cartesian unit coordinates.
fractions : sequence of floats,
Percentages of the fractions for each tensor.
Returns
-------
ODF : (N,) ndarray
Orientation distribution function.
Examples
--------
Simulate a MultiTensor ODF with two peaks and calculate its exact ODF.
>>> import numpy as np
>>> from dipy.sims.voxel import multi_tensor_odf, all_tensor_evecs
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> vertices, faces = sphere.vertices, sphere.faces
>>> mevals = np.array(([0.0015, 0.0003, 0.0003],[0.0015, 0.0003, 0.0003]))
>>> angles = [(0, 0), (90, 0)]
>>> odf = multi_tensor_odf(vertices, mevals, angles, [50, 50])
'''
mf = [f / 100. for f in fractions]
angles = np.array(angles)
if angles.shape[-1] == 3:
sticks = angles
else:
sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
for pair in angles]
sticks = np.array(sticks)
odf = np.zeros(len(odf_verts))
mevecs = []
for s in sticks:
mevecs += [all_tensor_evecs(s).T]
for (j, f) in enumerate(mf):
odf += f * single_tensor_odf(odf_verts,
evals=mevals[j], evecs=mevecs[j])
return odf
def single_tensor_rtop(evals=None, tau=1.0 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
evals : 1D arrays,
Eigen-values for the tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
rtop : float,
Return to origin probability.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator and
Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
if evals is None:
evals = diffusion_evals
rtop = 1.0 / np.sqrt((4 * np.pi * tau) ** 3 * np.prod(evals))
return rtop
def multi_tensor_rtop(mf, mevals=None, tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
mf : sequence of floats, bounded [0,1]
Percentages of the fractions for each tensor.
mevals : sequence of 1D arrays,
Eigen-values for each tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
rtop : float,
Return to origin probability.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator and
Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
rtop = 0
if mevals is None:
mevals = [None, ] * len(mf)
for j, f in enumerate(mf):
rtop += f * single_tensor_rtop(mevals[j], tau=tau)
return rtop
def single_tensor_pdf(r, evals=None, evecs=None, tau=1 / (4 * np.pi ** 2)):
"""Simulated ODF with a single tensor.
Parameters
----------
r : (N,3) or (M,N,3) ndarray
Measurement positions in (x, y, z), either as a list or on a grid.
evals : (3,)
Eigenvalues of diffusion tensor. By default, use values typical for
prolate white matter.
evecs : (3, 3) ndarray
Eigenvectors of the tensor. You can also think of these as the
rotation matrix that determines the orientation of the diffusion
tensor.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
pdf : (N,) ndarray
The diffusion probability at ``r`` after time ``tau``.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator and
Its Features in Diffusion MRI", PhD Thesis, 2012.
"""
if evals is None:
evals = diffusion_evals
if evecs is None:
evecs = np.eye(3)
out_shape = r.shape[:r.ndim - 1]
R = np.asarray(evecs)
D = dot(dot(R, np.diag(evals)), R.T)
Di = np.linalg.inv(D)
r = r.reshape(-1, 3)
P = np.zeros(len(r))
for (i, u) in enumerate(r):
P[i] = (-dot(dot(u.T, Di), u)) / (4 * tau)
pdf = (1 / np.sqrt((4 * np.pi * tau) ** 3 * np.prod(evals))) * np.exp(P)
return pdf.reshape(out_shape)
def multi_tensor_pdf(pdf_points, mevals, angles, fractions,
tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor ODF.
Parameters
----------
pdf_points : (N, 3) ndarray
Points to evaluate the PDF.
mevals : sequence of 1D arrays,
Eigen-values for each tensor. By default, values typical for prolate
white matter are used.
angles : sequence,
Sequence of principal directions for each tensor in polar angles
or cartesian unit coordinates.
fractions : sequence of floats,
Percentages of the fractions for each tensor.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
pdf : (N,) ndarray,
Probability density function of the water displacement.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and its Features in Diffusion MRI", PhD Thesis, 2012.
'''
mf = [f / 100. for f in fractions]
angles = np.array(angles)
if angles.shape[-1] == 3:
sticks = angles
else:
sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
for pair in angles]
sticks = np.array(sticks)
pdf = np.zeros(len(pdf_points))
mevecs = []
for s in sticks:
mevecs += [all_tensor_evecs(s).T]
for j, f in enumerate(mf):
pdf += f * single_tensor_pdf(pdf_points,
evals=mevals[j], evecs=mevecs[j], tau=tau)
return pdf
def single_tensor_msd(evals=None, tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
evals : 1D arrays,
Eigen-values for the tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
msd : float,
Mean square displacement.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator and
Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
if evals is None:
evals = diffusion_evals
msd = 2 * tau * np.sum(evals)
return msd
def multi_tensor_msd(mf, mevals=None, tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
mf : sequence of floats, bounded [0,1]
Percentages of the fractions for each tensor.
mevals : sequence of 1D arrays,
Eigen-values for each tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
msd : float,
Mean square displacement.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator and
Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
msd = 0
if mevals is None:
mevals = [None, ] * len(mf)
for j, f in enumerate(mf):
msd += f * single_tensor_msd(mevals[j], tau=tau)
return msd
# Use standard naming convention, but keep old names
# for backward compatibility
SticksAndBall = sticks_and_ball
SingleTensor = single_tensor
MultiTensor = multi_tensor
| bsd-3-clause | -2,654,405,943,853,423,000 | 27.98 | 85 | 0.585178 | false | 3.426155 | false | false | false |
anchore/anchore-engine | anchore_engine/analyzers/modules/20_file_list.py | 1 | 1814 | #!/usr/bin/env python3
import sys
import os
import re
import json
import subprocess
import stat
import anchore_engine.analyzers.utils
analyzer_name = "file_list"
try:
config = anchore_engine.analyzers.utils.init_analyzer_cmdline(
sys.argv, analyzer_name
)
except Exception as err:
print(str(err))
sys.exit(1)
imgname = config["imgid"]
imgid = config["imgid_full"]
outputdir = config["dirs"]["outputdir"]
unpackdir = config["dirs"]["unpackdir"]
meta = anchore_engine.analyzers.utils.get_distro_from_squashtar(
os.path.join(unpackdir, "squashed.tar"), unpackdir=unpackdir
)
distrodict = anchore_engine.analyzers.utils.get_distro_flavor(
meta["DISTRO"], meta["DISTROVERS"], likedistro=meta["LIKEDISTRO"]
)
simplefiles = {}
outfiles = {}
try:
allfiles = {}
fmap = {}
if os.path.exists(unpackdir + "/anchore_allfiles.json"):
with open(unpackdir + "/anchore_allfiles.json", "r") as FH:
allfiles = json.loads(FH.read())
else:
fmap, allfiles = anchore_engine.analyzers.utils.get_files_from_squashtar(
os.path.join(unpackdir, "squashed.tar")
)
with open(unpackdir + "/anchore_allfiles.json", "w") as OFH:
OFH.write(json.dumps(allfiles))
# fileinfo
for name in list(allfiles.keys()):
outfiles[name] = json.dumps(allfiles[name])
simplefiles[name] = oct(stat.S_IMODE(allfiles[name]["mode"]))
except Exception as err:
import traceback
traceback.print_exc()
raise err
if simplefiles:
ofile = os.path.join(outputdir, "files.all")
anchore_engine.analyzers.utils.write_kvfile_fromdict(ofile, simplefiles)
if outfiles:
ofile = os.path.join(outputdir, "files.allinfo")
anchore_engine.analyzers.utils.write_kvfile_fromdict(ofile, outfiles)
sys.exit(0)
| apache-2.0 | -6,814,669,524,152,885,000 | 25.289855 | 81 | 0.677508 | false | 2.968903 | false | false | false |
henrykironde/deletedret | retriever/lib/load_json.py | 1 | 2818 | import json
from collections import OrderedDict
from retriever.lib.templates import TEMPLATES
from retriever.lib.models import myTables
from retriever.lib.tools import open_fr
def read_json(json_file):
"""Read Json dataset package files
Load each json and get the appropriate encoding for the dataset
Reload the json using the encoding to ensure correct character sets
"""
json_object = OrderedDict()
json_file_encoding = None
json_file = str(json_file) + ".json"
try:
file_obj = open_fr(json_file)
json_object = json.load(file_obj)
if "encoding" in json_object:
json_file_encoding = json_object['encoding']
file_obj.close()
except ValueError:
return None
# Reload json using encoding if available
try:
if json_file_encoding:
file_obj = open_fr(json_file, encoding=json_file_encoding)
else:
file_obj = open_fr(json_file)
json_object = json.load(file_obj)
file_obj.close()
except ValueError:
return None
if isinstance(json_object, dict) and "resources" in json_object.keys():
# Note::formats described by frictionless data may need to change
tabular_exts = {"csv", "tab"}
vector_exts = {"shp", "kmz"}
raster_exts = {"tif", "tiff", "bil", "hdr", "h5", "hdf5", "hr", "image"}
for resource_item in json_object["resources"]:
if "format" not in resource_item:
if "format" in json_object:
resource_item["format"] = json_object["format"]
else:
resource_item["format"] = "tabular"
if "extensions" in resource_item:
exts = set(resource_item["extensions"])
if exts <= tabular_exts:
resource_item["format"] = "tabular"
elif exts <= vector_exts:
resource_item["format"] = "vector"
elif exts <= raster_exts:
resource_item["format"] = "raster"
if "url" in resource_item:
if "urls" in json_object:
json_object["urls"][resource_item["name"]] = resource_item["url"]
json_object["tables"] = OrderedDict()
temp_tables = {}
table_names = [item["name"] for item in json_object["resources"]]
temp_tables["tables"] = OrderedDict(zip(table_names, json_object["resources"]))
for table_name, table_spec in temp_tables["tables"].items():
json_object["tables"][table_name] = myTables[temp_tables["tables"][table_name]
["format"]](**table_spec)
json_object.pop("resources", None)
return TEMPLATES["default"](**json_object)
return None
| mit | -5,715,605,298,331,527,000 | 38.690141 | 90 | 0.572037 | false | 4.150221 | false | false | false |
bbc/kamaelia | Code/Python/Kamaelia/Kamaelia/File/MaxSpeedFileReader.py | 6 | 5340 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
==================================
Reading a file as fast as possible
==================================
MaxSpeedFileReader reads a file in bytes mode as fast as it can; limited only
by any size limit on the inbox it is sending the data to.
This component is therefore useful for building systems that are self rate
limiting - systems that are just trying to process data as fast as they can and
are limited by the speed of the slowest part of the chain.
Example Usage
-------------
Read "myfile" in in chunks of 1024 bytes. The rate is limited by the rate at
which the consumer component can consume the chunks, since its inbox has a size
limit of 5 items of data::
consumer = Consumer()
consumer.inboxes["inbox"].setSize(5)
Pipeline( MaxSpeedFileReader("myfile", chunksize=1024),
consumer,
).run()
More details
------------
Specify a filename and chunksize and MaxSpeedFileReader will read bytes from
the file in the chunksize you specified and send them out of its "outbox"
outbox.
If the destination inbox it is sending chunks to is size limited, then
MaxSpeedFileReader will pause until space becomes available. This is how the
speed at which the file is ingested is regulated - by the rate at which it is
consumed.
When the whole file has been read, this component will terminate and send a
producerFinished() message out of its "signal" outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete sending any data that may be waiting. It will then send the
producerFinished message on out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete sending on any pending data.
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess
from Axon.AxonExceptions import noSpaceInBox
class MaxSpeedFileReader(component):
"""\
MaxSpeedFileReader(filename[,chunksize]) -> new MaxSpeedFileReader component.
Reads the contents of a file in bytes mode; sending it out as fast as it can
in chunks from the "outbox" outbox. The rate of reading is only limited by
any size limit of the destination inbox to which the data is being sent.
Keyword arguments:
- filename -- The filename of the file to read
- chunksize -- Optional. The maximum number of bytes in each chunk of data read from the file and sent out of the "outbox" outbox (default=32768)
"""
def __init__(self, filename, chunksize=32768):
super(MaxSpeedFileReader,self).__init__()
self.filename=filename
self.chunksize=chunksize
def handleControl(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) and not isinstance(self.shutdownMsg, shutdownMicroprocess):
self.shutdownMsg = msg
elif isinstance(msg, shutdownMicroprocess):
self.shutdownMsg = msg
def canStop(self):
self.handleControl()
return isinstance(self.shutdownMsg, (producerFinished,shutdownMicroprocess))
def mustStop(self):
self.handleControl()
return isinstance(self.shutdownMsg, shutdownMicroprocess)
def waitSend(self,data,boxname):
while 1:
try:
self.send(data,boxname)
return
except noSpaceInBox:
if self.mustStop():
raise UserWarning( "STOP" )
self.pause()
yield 1
if self.mustStop():
raise UserWarning( "STOP" )
def main(self):
self.shutdownMsg=""
fh = open(self.filename,"rb")
try:
while 1:
data = fh.read(self.chunksize)
if data=="":
self.shutdownMsg=producerFinished(self)
raise UserWarning( "STOP" )
for _ in self.waitSend(data,"outbox"):
yield _
if self.mustStop():
raise UserWarning( "STOP" )
except UserWarning( "STOP") :
self.send(self.shutdownMsg, "signal")
__kamaelia_components__ = ( MaxSpeedFileReader, )
| apache-2.0 | -237,693,519,546,439,580 | 33.901961 | 150 | 0.652809 | false | 4.431535 | false | false | false |
metaborg/spoofax-deploy | releng/metaborg/util/git.py | 1 | 7900 | import datetime
import os
import re
import time
from enum import Enum, unique
def LatestDate(repo):
date = 0
for submodule in repo.submodules:
subrepo = submodule.module()
head = subrepo.head
if head.is_detached:
commitDate = head.commit.committed_date
else:
commitDate = head.ref.commit.committed_date
if commitDate > date:
date = commitDate
return datetime.datetime.fromtimestamp(date)
def Branch(repo):
head = repo.head
if head.is_detached:
return "DETACHED"
return head.reference.name
def Fetch(submodule):
if not submodule.module_exists():
return
print('Fetching {}'.format(submodule.name))
subrepo = submodule.module()
subrepo.git.fetch()
def FetchAll(repo):
for submodule in repo.submodules:
Fetch(submodule)
def Update(repo, submodule, remote=True, recursive=True, depth=None):
args = ['update', '--init']
if recursive:
args.append('--recursive')
if remote:
args.append('--remote')
if depth:
args.append('--depth')
args.append(depth)
if not submodule.module_exists():
print('Initializing {}'.format(submodule.name))
else:
subrepo = submodule.module()
remote = subrepo.remote()
head = subrepo.head
if head.is_detached:
print('Updating {}'.format(submodule.name))
else:
args.append('--rebase')
print('Updating {} from {}/{}'.format(submodule.name, remote.name, head.reference.name))
args.append('--')
args.append(submodule.name)
repo.git.submodule(args)
def UpdateAll(repo, remote=True, recursive=True, depth=None):
for submodule in repo.submodules:
Update(repo, submodule, remote=remote, recursive=recursive, depth=depth)
def Checkout(repo, submodule):
if not submodule.module_exists():
Update(repo, submodule)
branch = submodule.branch
print('Switching {} to {}'.format(submodule.name, branch.name))
branch.checkout()
if not submodule.module_exists():
print('Cannot recursively checkout, {} has not been initialized yet.'.format(submodule.name))
return
subrepo = submodule.module()
for submodule in subrepo.submodules:
Checkout(subrepo, submodule)
def CheckoutAll(repo):
for submodule in repo.submodules:
Checkout(repo, submodule)
def Clean(submodule):
if not submodule.module_exists():
print('Cannot clean, {} has not been initialized yet.'.format(submodule.name))
return
subrepo = submodule.module()
print('Cleaning {}'.format(submodule.name))
subrepo.git.clean('-dfx', '-e', '.project', '-e', '.classpath', '-e', '.settings', '-e', 'META-INF')
def CleanAll(repo):
for submodule in repo.submodules:
Clean(submodule)
def Reset(submodule, toRemote):
if not submodule.module_exists():
print('Cannot reset, {} has not been initialized yet.'.format(submodule.name))
return
subrepo = submodule.module()
if toRemote:
head = subrepo.head
if head.is_detached:
print('Cannot reset, {} has a DETACHED HEAD.'.format(submodule.name))
return
remote = subrepo.remote()
branchName = '{}/{}'.format(remote.name, head.reference.name)
print('Resetting {} to {}'.format(submodule.name, branchName))
subrepo.git.reset('--hard', branchName)
else:
print('Resetting {}'.format(submodule.name))
subrepo.git.reset('--hard')
def ResetAll(repo, toRemote):
for submodule in repo.submodules:
Reset(submodule, toRemote)
def Merge(submodule, branchName):
if not submodule.module_exists():
print('Cannot merge, {} has not been initialized yet.'.format(submodule.name))
return
subrepo = submodule.module()
subrepo.git.merge(branchName)
def MergeAll(repo, branchName):
for submodule in repo.submodules:
Merge(submodule, branchName)
def Tag(submodule, tagName, tagDescription):
if not submodule.module_exists():
print('Cannot tag, {} has not been initialized yet.'.format(submodule.name))
return
print('Creating tag {} in {}'.format(tagName, submodule.name))
subrepo = submodule.module()
subrepo.create_tag(path=tagName, message=tagDescription)
def TagAll(repo, tagName, tagDescription):
for submodule in repo.submodules:
Tag(submodule, tagName, tagDescription)
def Push(submodule, **kwargs):
if not submodule.module_exists():
print('Cannot push, {} has not been initialized yet.'.format(submodule.name))
return
print('Pushing {}'.format(submodule.name))
subrepo = submodule.module()
remote = subrepo.remote()
remote.push(**kwargs)
def PushAll(repo, **kwargs):
for submodule in repo.submodules:
Push(submodule, **kwargs)
def Track(submodule):
if not submodule.module_exists():
print('Cannot set tracking branch, {} has not been initialized yet.'.format(submodule.name))
return
subrepo = submodule.module()
head = subrepo.head
remote = subrepo.remote()
localBranchName = head.reference.name
remoteBranchName = '{}/{}'.format(remote.name, localBranchName)
print('Setting tracking branch for {} to {}'.format(localBranchName, remoteBranchName))
subrepo.git.branch('-u', remoteBranchName, localBranchName)
def TrackAll(repo):
for submodule in repo.submodules:
Track(submodule)
@unique
class RemoteType(Enum):
SSH = 1
HTTP = 2
def SetRemoteAll(repo, toType=RemoteType.SSH):
for submodule in repo.submodules:
SetRemote(submodule, toType)
def SetRemote(submodule, toType):
if not submodule.module_exists():
print('Cannot set remote, {} has not been initialized yet.'.format(submodule.name))
return
name = submodule.name
subrepo = submodule.module()
origin = subrepo.remote()
currentUrl = origin.config_reader.get('url')
httpMatch = re.match('https?://([\w\.@:\-~]+)/(.+)', currentUrl)
sshMatch = re.match('(?:ssh://)?([\w\.@\-~]+)@([\w\.@\-~]+)[:/](.+)', currentUrl)
if httpMatch:
user = 'git'
host = httpMatch.group(1)
path = httpMatch.group(2)
elif sshMatch:
user = sshMatch.group(1)
host = sshMatch.group(2)
path = sshMatch.group(3)
else:
raise RuntimeError('Cannot set remote for {}, unknown URL format {}.'.format(name, currentUrl))
if toType is RemoteType.SSH:
newUrl = '{}@{}:{}'.format(user, host, path)
elif toType is RemoteType.HTTP:
newUrl = 'https://{}/{}'.format(host, path)
else:
raise RuntimeError('Cannot set remote for {}, unknown URL type {}.'.format(name, str(toType)))
print('Setting remote for {} to {}'.format(name, newUrl))
origin.config_writer.set('url', newUrl)
def create_qualifier(repo, branch=None):
timestamp = LatestDate(repo)
if not branch:
branch = Branch(repo)
return _format_qualifier(timestamp, branch)
def create_now_qualifier(repo, branch=None):
timestamp = datetime.datetime.now()
if not branch:
branch = Branch(repo)
return _format_qualifier(timestamp, branch)
def _format_qualifier(timestamp, branch):
return '{}-{}'.format(timestamp.strftime('%Y%m%d-%H%M%S'), branch.replace('/', '_'))
def repo_changed(repo, qualifierLocation):
timestamp = LatestDate(repo)
branch = Branch(repo)
changed = False
if not os.path.isfile(qualifierLocation):
changed = True
else:
with open(qualifierLocation, mode='r') as qualifierFile:
storedTimestampStr = qualifierFile.readline().replace('\n', '')
storedBranch = qualifierFile.readline().replace('\n', '')
if not storedTimestampStr or not storedBranch:
raise RuntimeError('Invalid qualifier file {}, please delete this file and retry'.format(qualifierLocation))
storedTimestamp = datetime.datetime.fromtimestamp(int(storedTimestampStr))
changed = (timestamp > storedTimestamp) or (branch != storedBranch)
with open(qualifierLocation, mode='w') as timestampFile:
timestampStr = str(int(time.mktime(timestamp.timetuple())))
timestampFile.write('{}\n{}\n'.format(timestampStr, branch))
return changed, _format_qualifier(timestamp, branch)
| apache-2.0 | 4,397,770,428,864,752,600 | 26.915194 | 116 | 0.690506 | false | 3.717647 | false | false | false |
quimaguirre/diana | scripts/old_scripts/classify_drug_combinations.py | 1 | 22397 | import argparse
import cPickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pylab
import scipy
import time
import sys, os, re
from context import diana
import diana.classes.comparison as diana_comparison
import diana.classes.analysis as diana_analysis
def main():
options = parse_user_arguments()
analysis_results(options)
def parse_user_arguments(*args, **kwds):
"""
Parses the arguments of the program
"""
parser = argparse.ArgumentParser(
description = "Generate the profiles of the input drug",
epilog = "@oliva's lab 2017")
parser.add_argument('-th','--threshold_list',dest='threshold_list',action = 'store',
help = """List of percentages that will be used as cut-offs to define the profiles of the drugs. It has to be a file containing:
- Different numbers that will be the threshold values separated by newline characters.
For example, a file called "top_threshold.list" containing:
0.1
0.5
1
5
10
""")
parser.add_argument('-f','--formula',dest='formula',action = 'store',default='simpson',
help = """Define the formula used to classify. It can be: simpson, jaccard""")
parser.add_argument('-se','--consider_se',dest='consider_se',action = 'store_true',
help = """" Consider Side Effects / ATCs. """)
parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'),
help = """Define the workspace directory where the data directory and the results directory will be created""")
options=parser.parse_args()
return options
#################
#################
# MAIN FUNCTION #
#################
#################
def analysis_results(options):
"""
Analyzes the results of the comparisons
"""
# Start marker for time measure
start = time.time()
print("\n\t\t-------------------------------------------------------------------------------------------------------------------------------\n")
print("\t\tStarting Drug Interactions ANAlysis (DIANA), a program created by @OLIVA'S LAB. Analysis of results: Classify drug combinations\n")
print("\t\t-------------------------------------------------------------------------------------------------------------------------------\n")
# Get the script path
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
toolbox_dir = os.path.join(main_path, 'diana/toolbox')
# Check the directory of the profiles, comparisons and analysis
data_dir = os.path.join(options.workspace, "profiles")
check_directory(data_dir)
results_dir = os.path.join(options.workspace, "comparisons")
check_directory(results_dir)
analysis_dir = os.path.join(options.workspace, "analysis")
check_directory(analysis_dir)
# Get the list of thresholds to create the profiles
if options.threshold_list and fileExist(options.threshold_list):
threshold_list = get_values_from_threshold_file(options.threshold_list)
else:
threshold_list = [1, 5, 10, 20, 50]
# Do we consider Side Effects/ATC?
if options.consider_se:
consider_se = True
else:
consider_se = False
# Get the names of the columns
columns = diana_analysis.obtain_columns(threshold_list, ATC_SE=consider_se)
#-----------------------------------------------------#
# PARSE THE RESULTS AND CREATE A PANDAS DATAFRAME #
#-----------------------------------------------------#
pair2comb_file = os.path.join(toolbox_dir, 'pair2comb.pcl')
pair2comb = cPickle.load(open(pair2comb_file))
diana_id_to_drugbank_file = os.path.join(toolbox_dir, 'diana_id_to_drugbank.pcl')
diana_id_to_drugbank = cPickle.load(open(diana_id_to_drugbank_file))
ddi = sum(1 for x in pair2comb.values() if x == 1)
non_ddi = sum(1 for x in pair2comb.values() if x == 0)
print('NUMBER OF DRUG COMBINATIONS:\t\t{}\n'.format(ddi))
print('NUMBER OF NON-DRUG COMBINATIONS:\t{}\n'.format(non_ddi))
output_dataframe = os.path.join(analysis_dir, 'dcdb_comparisons.csv')
if not fileExist(output_dataframe):
# Create a data frame to store the results
df = pd.DataFrame(columns=columns)
# Obtain all the results subfolders of the results main folder
results_dir_list = [f for f in os.listdir(results_dir) if os.path.isdir(os.path.join(results_dir, f))]
for comparison in results_dir_list:
drug_id1, drug_id2 = comparison.split('---')
comparison_dir = os.path.join(results_dir, comparison)
results_table = os.path.join(comparison_dir, 'results_table.tsv')
# Add the Comb field (if it is drug combination or not)
drug1 = diana_id_to_drugbank[drug_id1].upper()
drug2 = diana_id_to_drugbank[drug_id2].upper()
comparison_without_id = '{}---{}'.format(drug1, drug2)
if comparison_without_id in pair2comb:
combination_field = pair2comb[comparison_without_id]
else:
print('The comparison {} is not in the pair2comb dictionary!\n'.format(comparison_without_id))
print(pair2comb)
sys.exit(10)
if not fileExist(results_table):
print('The comparison {} has not been executed properly!\n'.format(comparison))
sys.exit(10)
results = diana_analysis.get_results_from_table(results_table, columns, combination_field)
df2 = pd.DataFrame([results], columns=columns, index=[comparison])
# Add the information to the main data frame
df = df.append(df2)
# Output the Pandas dataframe in a CSV file
df.to_csv(output_dataframe)
else:
df = pd.read_csv(output_dataframe, index_col=0)
#---------------------------#
# REMOVE MISSING VALUES #
#---------------------------#
# Replace the None values in dcstructure by nan
if 'None' in df['dcstructure']:
df = df.replace(to_replace={'dcstructure':{'None':np.nan}})
# Remove the nan values in dcstructure
df = df.dropna()
# Count the number of drug combinations / non-drug combinations
dc_data = df[df['combination'] == 1]
ndc_data = df[df['combination'] == 0]
num_dc = len(dc_data.index)
num_ndc = len(ndc_data.index)
print('Number of drug combinations after removing missing values:\t{}\n'.format(num_dc))
print('Number of non-drug combinations after removing missing values:\t{}\n'.format(num_ndc))
#---------------------------#
# IDENTIFY ME-TOO DRUGS #
#---------------------------#
me_too_dir = os.path.join(analysis_dir, 'me_too_drugs')
create_directory(me_too_dir)
me_too_drugs_table = os.path.join(me_too_dir, 'me_too_drugs.tsv')
me_too_drug_combs_table = os.path.join(me_too_dir, 'me_too_drug_combinations.tsv')
me_too_drug_pairs_file = os.path.join(me_too_dir, 'me_too_drug_pairs.pcl')
me_too_drug_comb_pairs_file = os.path.join(me_too_dir, 'me_too_drug_comb_pairs.pcl')
if not fileExist(me_too_drug_pairs_file) or not fileExist(me_too_drug_comb_pairs_file):
df_struc = df[['dcstructure']]
df_struc = df_struc.astype(float)
me_too_drug_pairs, me_too_drug_comb_pairs = diana_analysis.obtain_me_too_drugs_and_combinations(df_struc, columns, me_too_drugs_table, me_too_drug_combs_table)
cPickle.dump(me_too_drug_pairs, open(me_too_drug_pairs_file, 'w'))
cPickle.dump(me_too_drug_comb_pairs, open(me_too_drug_comb_pairs_file, 'w'))
else:
me_too_drug_pairs = cPickle.load(open(me_too_drug_pairs_file))
me_too_drug_comb_pairs = cPickle.load(open(me_too_drug_comb_pairs_file))
# Process me-too drug combination pairs
me_too_drug_combinations = set()
drug_pair_to_me_too_times = {}
for pair in me_too_drug_comb_pairs:
drug_comb1, drug_comb2 = pair.split('___')
me_too_drug_combinations.add(frozenset([drug_comb1, drug_comb2]))
drug_pair_to_me_too_times.setdefault(drug_comb1, 0)
drug_pair_to_me_too_times.setdefault(drug_comb2, 0)
drug_pair_to_me_too_times[drug_comb1] += 1
drug_pair_to_me_too_times[drug_comb2] += 1
removed_drug_pairs = set()
for pair in me_too_drug_comb_pairs:
drug_comb1, drug_comb2 = pair.split('___')
if drug_comb1 in removed_drug_pairs or drug_comb2 in removed_drug_pairs:
continue
if drug_pair_to_me_too_times[drug_comb1] > drug_pair_to_me_too_times[drug_comb2]:
removed_drug_pairs.add(drug_comb1)
else:
removed_drug_pairs.add(drug_comb2)
# Remove the drug pairs which appear in me-too pairs of drug pairs more times
df = df.loc[~df.index.isin(list(removed_drug_pairs))]
# Count the number of drug combinations / non-drug combinations
dc_data = df[df['combination'] == 1]
ndc_data = df[df['combination'] == 0]
num_dc = len(dc_data.index)
num_ndc = len(ndc_data.index)
print('Number of drug combinations after removing me-too conflictive drug pairs:\t{}\n'.format(num_dc))
print('Number of non-drug combinations after removing me-too conflictive drug pairs:\t{}\n'.format(num_ndc))
img_dir = os.path.join(analysis_dir, 'figures')
create_directory(img_dir)
fig_format = 'png'
#-----------------------------------------------------#
# PLOT DISTRIBUTION OF NUMBER OF TARGETS PER DRUG #
#-----------------------------------------------------#
# Plot distribution of comparisons of targets
drugbank2targets_file = os.path.join(toolbox_dir, 'drugbank_to_targets.pcl')
drugbank_to_targets = cPickle.load(open(drugbank2targets_file))
plot_distribution_targets = os.path.join(img_dir, 'distribution_number_targets.{}'.format(fig_format))
targets = [len(x) for x in drugbank_to_targets.values()]
n, bins, patches = plt.hist(np.array(targets), bins=50, weights=np.zeros_like(np.array(targets)) + 1. / np.array(targets).size, facecolor='r')
plt.xlabel('Number of targets per drug')
plt.ylabel('Relative frequency')
plt.title('Distribution of the number of targets per drug')
plt.savefig(plot_distribution_targets, format=fig_format, dpi=300)
plt.clf()
#----------------------------------------------------------------------------------------------#
# EVALUATE OVERLAP BETWEEN TARGETS, BIOLOGICAL PROCESSES AND PATHWAYS IN DRUG COMBINATIONS #
#----------------------------------------------------------------------------------------------#
tables_dir = os.path.join(analysis_dir, 'tables')
create_directory(tables_dir)
if options.formula != 'jaccard' and options.formula != 'simpson':
print('Please, introduce a correct formula to classify drug combinations: jaccard or simpson!\n')
sys.exit(10)
# Plot of distribution of comparisons of Targets
plot_ji_targets = os.path.join(img_dir, 'distribution_{}_index_targets.{}'.format(options.formula, fig_format))
# Plot of distribution of comparisons of Biological Processes
plot_ji_bp = os.path.join(img_dir, 'distribution_{}_index_biological_processes.{}'.format(options.formula, fig_format))
# Plot of distribution of comparisons of Pathways
plot_ji_pathways = os.path.join(img_dir, 'distribution_{}_index_pathways.{}'.format(options.formula, fig_format))
# Output pickle file of the classification
classification_targets_bp_file = os.path.join(toolbox_dir, 'classification_targets_bp.pcl')
classification_targets_pathways_file = os.path.join(toolbox_dir, 'classification_targets_pathways.pcl')
# Get the classification files
drug_int_2_drugs_file = os.path.join(toolbox_dir, 'drug_int_2_drugs.pcl')
drug_int_2_drugs = cPickle.load(open(drug_int_2_drugs_file))
drug_int_2_info_file = os.path.join(toolbox_dir, 'drug_int_2_info.pcl')
drug_int_2_info = cPickle.load(open(drug_int_2_info_file))
drugbank_to_dcdb_file = os.path.join(toolbox_dir, 'drugbank_to_dcdb.pcl')
drugbank_to_dcdb = cPickle.load(open(drugbank_to_dcdb_file))
bio_processes_file = os.path.join(toolbox_dir, 'target_to_bio_processes.pcl')
target_to_bio_processes = cPickle.load(open(bio_processes_file))
pathways_file = os.path.join(toolbox_dir, 'target_to_pathways.pcl')
target_to_pathways = cPickle.load(open(pathways_file))
target_comparisons = []
bp_comparisons = []
pathway_comparisons = []
dc_to_target_ji = {}
dc_to_bp_ji = {}
dc_to_pathway_ji = {}
all_drugs = set()
for index, row in dc_data.iterrows():
(drug_id1, drug_id2) = index.split('---')
drug1 = diana_id_to_drugbank[drug_id1].upper()
drug2 = diana_id_to_drugbank[drug_id2].upper()
all_drugs.add(drug1)
all_drugs.add(drug2)
if drug1 in drugbank_to_targets and drug2 in drugbank_to_targets:
targets1 = drugbank_to_targets[drug1]
targets2 = drugbank_to_targets[drug2]
if options.formula == 'jaccard':
result_targets = diana_comparison.calculate_jaccard_index(targets1, targets2)
elif options.formula == 'simpson':
result_targets = diana_comparison.calculate_simpson_index(targets1, targets2)
target_comparisons.append(result_targets)
dc_to_target_ji[index] = result_targets
bio_proc1 = get_results_from_dict_of_sets(targets1, target_to_bio_processes)
bio_proc2 = get_results_from_dict_of_sets(targets2, target_to_bio_processes)
if options.formula == 'jaccard':
result_bp = diana_comparison.calculate_jaccard_index(bio_proc1, bio_proc2)
elif options.formula == 'simpson':
result_bp = diana_comparison.calculate_simpson_index(bio_proc1, bio_proc2)
bp_comparisons.append(result_bp)
dc_to_bp_ji[index] = result_bp
pathways1 = get_results_from_dict_of_sets(targets1, target_to_pathways)
pathways2 = get_results_from_dict_of_sets(targets2, target_to_pathways)
if options.formula == 'jaccard':
result_pathways = diana_comparison.calculate_jaccard_index(pathways1, pathways2)
elif options.formula == 'simpson':
result_pathways = diana_comparison.calculate_simpson_index(pathways1, pathways2)
pathway_comparisons.append(result_pathways)
dc_to_pathway_ji[index] = result_pathways
# Plot distribution of comparisons of targets
n, bins, patches = plt.hist(np.array(target_comparisons), bins=50, weights=np.zeros_like(np.array(target_comparisons)) + 1. / np.array(target_comparisons).size, facecolor='r')
plt.xlabel('{} Index of Targets'.format(options.formula.capitalize()))
plt.ylabel('Relative frequency')
plt.title('Distribution of {} Index of Targets in drug combinations'.format(options.formula.capitalize()))
plt.savefig(plot_ji_targets, format=fig_format, dpi=300)
plt.clf()
# Plot distribution of comparisons of biological processes
n, bins, patches = plt.hist(np.array(bp_comparisons), bins=50, weights=np.zeros_like(np.array(bp_comparisons)) + 1. / np.array(bp_comparisons).size, facecolor='b')
plt.xlabel('{} Index of Biological Processes'.format(options.formula.capitalize()))
plt.ylabel('Relative frequency')
plt.title('Distribution of {} Index of Biological Processes in drug combinations'.format(options.formula.capitalize()))
plt.savefig(plot_ji_bp, format=fig_format, dpi=300)
plt.clf()
# Plot distribution of comparisons of pathways
n, bins, patches = plt.hist(np.array(pathway_comparisons), bins=50, weights=np.zeros_like(np.array(pathway_comparisons)) + 1. / np.array(pathway_comparisons).size, facecolor='g')
plt.xlabel('{} Index of Pathways'.format(options.formula.capitalize()))
plt.ylabel('Relative frequency')
plt.title('Distribution of {} Index of Pathways in drug combinations'.format(options.formula.capitalize()))
plt.savefig(plot_ji_pathways, format=fig_format, dpi=300)
plt.clf()
#------------------------------------#
# CLASSIFY THE DRUG COMBINATIONS #
#------------------------------------#
# Similar targets --> ji > 0.25
# Different targets --> ji <= 0.25
target_cut_off = 0.5
# Similar biological processes --> ji >= 0.25
# Different biological processes --> ji < 0.25
bp_cut_off = 0.5
# Similar pathways --> ji >= 0.5
# Different pathways --> ji < 0.5
pathway_cut_off = 0.5
classification_tar_bp = {}
st = 0
dt = 0
st_sbp = 0
st_dbp = 0
dt_sbp = 0
dt_dbp = 0
for dc in dc_to_target_ji:
# Classify by targets and biological processes
if dc in dc_to_bp_ji:
ji_tar = dc_to_target_ji[dc]
ji_bp = dc_to_bp_ji[dc]
if ji_tar > target_cut_off:
classification_tar_bp[dc] = 'similar_targets'
st += 1
if ji_bp > bp_cut_off:
st_sbp += 1
elif ji_bp <= bp_cut_off:
st_dbp += 1
elif ji_tar <= target_cut_off:
dt += 1
if ji_bp > bp_cut_off:
dt_sbp += 1
classification_tar_bp[dc] = 'different_targets_similar_bp'
elif ji_bp <= bp_cut_off:
dt_dbp += 1
classification_tar_bp[dc] = 'different_targets_different_bp'
print('Similar targets {}: similar bp {}, diff bp {}\n'.format(st, st_sbp, st_dbp))
print('Different targets {}: similar bp {}, diff bp {}\n'.format(dt, dt_sbp, dt_dbp))
cPickle.dump(classification_tar_bp, open(classification_targets_bp_file, 'w'))
classification_tar_pathway = {}
st = 0
dt = 0
st_spath = 0
st_dpath = 0
dt_spath = 0
dt_dpath = 0
for dc in dc_to_target_ji:
# Classify by targets and biological processes
if dc in dc_to_pathway_ji:
ji_tar = dc_to_target_ji[dc]
ji_path = dc_to_pathway_ji[dc]
if ji_tar > target_cut_off:
classification_tar_pathway[dc] = 'similar_targets'
st += 1
if ji_path > pathway_cut_off:
st_spath += 1
elif ji_path <= pathway_cut_off:
st_dpath += 1
elif ji_tar <= target_cut_off:
dt += 1
if ji_path > pathway_cut_off:
dt_spath += 1
classification_tar_pathway[dc] = 'different_targets_similar_pathways'
elif ji_path <= pathway_cut_off:
dt_dpath += 1
classification_tar_pathway[dc] = 'different_targets_different_pathways'
print('Similar targets {}: similar pathways {}, diff pathways {}\n'.format(st, st_spath, st_dpath))
print('Different targets {}: similar pathways {}, diff pathways {}\n'.format(dt, dt_spath, dt_dpath))
cPickle.dump(classification_tar_pathway, open(classification_targets_pathways_file, 'w'))
# Get number of drugs in drug combinations per number of targets
targets = [len(drugbank_to_targets[drug]) for drug in drugbank_to_targets if drug in all_drugs]
numtargets_to_numdrugs = {}
for target in targets:
numtargets_to_numdrugs.setdefault(target, 0)
numtargets_to_numdrugs[target] += 1
print('Number of drugs in drug combination: {}. Divided by four: {}'.format(len(all_drugs), len(all_drugs)/4))
for numtar, numdrug in sorted(numtargets_to_numdrugs.iteritems(), key=lambda (x, y): x, reverse = True):
print(numtar, numdrug)
# End marker for time
end = time.time()
print('\n DIANA INFO:\tTIME OF EXECUTION: {:.3f} seconds or {:.3f} minutes.\n'.format(end - start, (end - start) / 60))
return
#######################
#######################
# SECONDARY FUNCTIONS #
#######################
#######################
def fileExist(file):
"""
Checks if a file exists AND is a file
"""
return os.path.exists(file) and os.path.isfile(file)
def check_file(file):
"""
Checks if a file exists and if not, raises FileNotFound exception
"""
if not fileExist(file):
raise FileNotFound(file)
def create_directory(directory):
"""
Checks if a directory exists and if not, creates it
"""
try:
os.stat(directory)
except:
os.mkdir(directory)
return
def check_directory(directory):
"""
Checks if a directory exists and if not, raises DirNotFound exception
"""
try:
os.stat(directory)
except:
raise DirNotFound(directory)
class FileNotFound(Exception):
"""
Exception raised when a file is not found.
"""
def __init__(self, file):
self.file = file
def __str__(self):
return 'The file {} has not been found.\nTherefore, the comparison cannot be performed. Please, check that all the profiles have been correctly generated.\n'.format(self.file)
class DirNotFound(Exception):
"""
Exception raised when a directory is not found.
"""
def __init__(self, directory):
self.directory = directory
def __str__(self):
return 'The directory {} has not been found.\nTherefore, the comparison cannot be performed. Please, check that all the parameters have been correctly introduced and the profiles have been correctly generated.\n'.format(self.directory)
def get_results_from_dict_of_sets(list_of_elements, dict_of_sets):
"""
We have a list of elements that are in a dict of elements, and every element have a set with results.
We want to extract the results corresponding to our elements.
"""
results = set()
for element in list_of_elements:
if element in dict_of_sets:
for result in dict_of_sets[element]:
results.add(result)
return results
if __name__ == "__main__":
main()
| mit | 5,589,249,258,767,770,000 | 38.431338 | 243 | 0.605394 | false | 3.503363 | false | false | false |
quru/qis | src/imageserver/template_manager.py | 1 | 10243 | #
# Quru Image Server
#
# Document: template_manager.py
# Date started: 22 Sep 2015
# By: Matt Fozard
# Purpose: Provides a managed interface to the image templates
# Requires:
# Copyright: Quru Ltd (www.quru.com)
# Licence:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Last Changed: $Date$ $Rev$ by $Author$
#
# Notable modifications:
# Date By Details
# ========= ==== ============================================================
# 19Aug2016 Matt Added system default template
#
from datetime import datetime, timedelta
import threading
from .models import ImageTemplate, Property
from .template_attrs import TemplateAttrs
from .util import KeyValueCache
class ImageTemplateManager(object):
"""
Provides access to image templates (used mostly by the image manager),
in a variety of formats, backed by a cache for performance.
The cache is invalidated and refreshed automatically.
Rather than having to monitor object change times and search for added and
deleted rows, we'll just use a simple counter for detecting database changes.
This is the same mechanism as used for PermissionsManager.
"""
TEMPLATE_CACHE_SYNC_INTERVAL = 60
_TEMPLATE_LIST_KEY = '__template_info_list__'
_TEMPLATE_NAMES_KEY = '__template_names__'
_TEMPLATE_NAMES_LOWER_KEY = '__template_names_lower__'
def __init__(self, data_manager, logger):
self._db = data_manager
self._logger = logger
self._default_template_name = ''
self._data_version = 0
self._template_cache = KeyValueCache()
self._update_lock = threading.Lock()
self._last_check = datetime.min
self._useable = threading.Event()
self._useable.set()
def get_template_list(self):
"""
Returns a list of {id, name, description, is_default} dictionaries
representing the available templates, sorted by name.
"""
self._check_data_version()
cached_list = self._template_cache.get(ImageTemplateManager._TEMPLATE_LIST_KEY)
if cached_list is None:
db_obj_list = [
tdata['db_obj'] for tdata in self._template_cache.values()
if isinstance(tdata, dict)
]
cached_list = [{
'id': dbo.id,
'name': dbo.name,
'description': dbo.description,
'is_default': (dbo.name.lower() == self._default_template_name)
} for dbo in db_obj_list
]
cached_list.sort(key=lambda o: o['name'])
self._template_cache.set(ImageTemplateManager._TEMPLATE_LIST_KEY, cached_list)
return cached_list
def get_template_names(self, lowercase=False):
"""
Returns a sorted list of available template names - those names that
are valid for use with get_template() and when generating an image.
"""
self._check_data_version()
if lowercase:
cached_list = self._template_cache.get(ImageTemplateManager._TEMPLATE_NAMES_LOWER_KEY)
if cached_list is None:
names_list = self._get_cached_names_list(True)
cached_list = [name.lower() for name in names_list]
self._template_cache.set(
ImageTemplateManager._TEMPLATE_NAMES_LOWER_KEY, cached_list
)
return cached_list
else:
cached_list = self._template_cache.get(ImageTemplateManager._TEMPLATE_NAMES_KEY)
if cached_list is None:
cached_list = self._get_cached_names_list(True)
self._template_cache.set(
ImageTemplateManager._TEMPLATE_NAMES_KEY, cached_list
)
return cached_list
def get_template(self, name):
"""
Returns the TemplateAttrs object matching the given name (case insensitive),
or None if no template matches the name.
"""
self._check_data_version()
tdata = self._template_cache.get(name.lower())
return tdata['attr_obj'] if tdata is not None else None
def get_default_template(self):
"""
Returns the TemplateAttrs object for the system's default image template.
"""
self._check_data_version()
tdata = self._template_cache.get(self._default_template_name)
if tdata is None:
raise ValueError(
'System default template \'%s\' was not found' % self._default_template_name
)
return tdata['attr_obj']
def get_template_db_obj(self, name):
"""
Returns the ImageTemplate database object matching the given name
(case insensitive), or None if no template matches the name.
"""
self._check_data_version()
tdata = self._template_cache.get(name.lower())
return tdata['db_obj'] if tdata is not None else None
def reset(self):
"""
Invalidates the cached template data by incrementing the database data
version number. This change will be detected on the next call to this
object, and within the SYNC_INTERVAL by all other processes.
"""
with self._update_lock:
new_ver = self._db.increment_property(Property.IMAGE_TEMPLATES_VERSION)
self._last_check = datetime.min
self._logger.info('Image templates setting new version ' + new_ver)
def _load_data(self):
"""
Re-populates the internal caches with the latest template data from the database.
The internal update lock must be held while this method is being called.
"""
# Reset the caches
self._template_cache.clear()
db_ver = self._db.get_object(Property, Property.IMAGE_TEMPLATES_VERSION)
self._data_version = int(db_ver.value)
# Refresh default template setting
db_def_t = self._db.get_object(Property, Property.DEFAULT_TEMPLATE)
self._default_template_name = db_def_t.value.lower()
# Load the templates
db_templates = self._db.list_objects(ImageTemplate)
for db_template in db_templates:
try:
# Create a TemplateAttrs (this also validates the template values)
template_attrs = TemplateAttrs(
db_template.name,
db_template.template
)
# If here it's valid, so add to cache
self._template_cache.set(
db_template.name.lower(),
{'db_obj': db_template, 'attr_obj': template_attrs}
)
except Exception as e:
self._logger.error(
'Unable to load \'%s\' template configuration: %s' % (
db_template.name, str(e)
)
)
self._logger.info('Loaded templates: %s at version %d' % (
', '.join(self._template_cache.keys()), self._data_version
))
def _check_data_version(self, _force=False):
"""
Periodically checks for changes in the template data, sets the
internal data version number, and resets the caches if necessary.
Uses an internal lock for thread safety.
"""
check_secs = ImageTemplateManager.TEMPLATE_CACHE_SYNC_INTERVAL
if _force or (self._data_version == 0) or (
self._last_check < (datetime.utcnow() - timedelta(seconds=check_secs))
):
# Check for newer data version
if self._update_lock.acquire(0): # 0 = nonblocking
try:
old_ver = self._data_version
db_ver = self._db.get_object(Property, Property.IMAGE_TEMPLATES_VERSION)
if int(db_ver.value) != old_ver:
action = 'initialising with' if old_ver == 0 else 'detected new'
self._logger.info('Image templates %s version %s' % (action, db_ver.value))
self._useable.clear()
self._load_data()
finally:
self._last_check = datetime.utcnow()
self._useable.set()
self._update_lock.release()
else:
# Another thread is checking or updating. When the server is busy,
# because the update is time based, many threads get here at the
# same time.
# v4.1 It is safe to carry on if a check is in place but an update
# is not. If an update is in place then the template cache is empty
# and we should wait for it to load.
if not self._useable.is_set() or self._data_version == 0:
self._logger.debug('Another thread is loading image templates, waiting for it')
if not self._useable.wait(10.0):
self._logger.warning('Timed out waiting for image template data')
else:
self._logger.debug('Got new image template data, continuing')
def _get_cached_names_list(self, sort=False):
"""
Returns a list of all the template names currently in the internal cache.
"""
db_obj_list = [
tdata['db_obj'] for tdata in self._template_cache.values()
if isinstance(tdata, dict)
]
names_list = [dbo.name for dbo in db_obj_list]
if sort:
names_list.sort()
return names_list
| agpl-3.0 | -4,422,943,304,084,045,300 | 41.152263 | 99 | 0.586156 | false | 4.358723 | false | false | false |
macbre/3pc | scripts/sources/ghostery.py | 1 | 1251 | import json
import re
from . import ThirdPCSource
# Generate trackers.json using Ghostery data (issue #1)
class GhosterySource(ThirdPCSource):
SOURCE = 'https://raw.githubusercontent.com/jonpierce/ghostery/master/' + \
'firefox/ghostery-statusbar/ghostery/chrome/content/ghostery-bugs.js'
FILENAME = 'trackers.json'
def _generate(self):
content = self._fetch_url(self.SOURCE).strip(';')
rules = json.loads(content)
self._data['by_regexp'] = {}
self._data['by_url'] = {}
self._logger.info('Parsing {} rules'.format(len(rules)))
for entry in rules:
pattern = entry['pattern']
if re.search(r'[\(\|\*\?]', pattern):
# regexp rule: "/google-analytics\\.com\\/(urchin\\.js|ga\\.js)/i"
pattern = re.sub(r'^/|/i$', '', pattern) # remove wrapping /
self._data['by_regexp'][pattern] = entry['name']
self._count += 1
else:
# strpos rule: "/\\/piwik\\.js/i"
pattern = re.sub(r'^/|/i$', '', pattern)
pattern = re.sub(r'\\', '', pattern)
self._data['by_url'][pattern] = entry['name']
self._count += 1
| bsd-2-clause | 3,909,223,640,555,136,500 | 32.810811 | 82 | 0.529976 | false | 3.564103 | false | false | false |
pyinvoke/invoke | tests/_util.py | 1 | 9860 | import os
import sys
try:
import termios
except ImportError:
# Not available on Windows
termios = None
from contextlib import contextmanager
from invoke.vendor.six import BytesIO, b, wraps
from mock import patch, Mock
from pytest import skip
from pytest_relaxed import trap
from invoke import Program, Runner
from invoke.terminals import WINDOWS
support = os.path.join(os.path.dirname(__file__), "_support")
ROOT = os.path.abspath(os.path.sep)
def skip_if_windows(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if WINDOWS:
skip()
return fn(*args, **kwargs)
return wrapper
@contextmanager
def support_path():
sys.path.insert(0, support)
try:
yield
finally:
sys.path.pop(0)
def load(name):
with support_path():
imported = __import__(name)
return imported
def support_file(subpath):
with open(os.path.join(support, subpath)) as fd:
return fd.read()
@trap
def run(invocation, program=None, invoke=True):
"""
Run ``invocation`` via ``program``, returning output stream captures.
``program`` defaults to ``Program()``.
To skip automatically assuming the argv under test starts with ``"invoke
"``, say ``invoke=False``.
:returns: Two-tuple of ``stdout, stderr`` strings.
"""
if program is None:
program = Program()
if invoke:
invocation = "invoke {}".format(invocation)
program.run(invocation, exit=False)
return sys.stdout.getvalue(), sys.stderr.getvalue()
def expect(
invocation, out=None, err=None, program=None, invoke=True, test=None
):
"""
Run ``invocation`` via ``program`` and expect resulting output to match.
May give one or both of ``out``/``err`` (but not neither).
``program`` defaults to ``Program()``.
To skip automatically assuming the argv under test starts with ``"invoke
"``, say ``invoke=False``.
To customize the operator used for testing (default: equality), use
``test`` (which should be an assertion wrapper of some kind).
"""
stdout, stderr = run(invocation, program, invoke)
# Perform tests
if out is not None:
if test:
test(stdout, out)
else:
assert out == stdout
if err is not None:
if test:
test(stderr, err)
else:
assert err == stderr
# Guard against silent failures; since we say exit=False this is the only
# real way to tell if stuff died in a manner we didn't expect.
elif stderr:
assert False, "Unexpected stderr: {}".format(stderr)
return stdout, stderr
class MockSubprocess(object):
def __init__(self, out="", err="", exit=0, isatty=None, autostart=True):
self.out_file = BytesIO(b(out))
self.err_file = BytesIO(b(err))
self.exit = exit
self.isatty = isatty
if autostart:
self.start()
def start(self):
# Start patchin'
self.popen = patch("invoke.runners.Popen")
Popen = self.popen.start()
self.read = patch("os.read")
read = self.read.start()
self.sys_stdin = patch("sys.stdin", new_callable=BytesIO)
sys_stdin = self.sys_stdin.start()
# Setup mocks
process = Popen.return_value
process.returncode = self.exit
process.stdout.fileno.return_value = 1
process.stderr.fileno.return_value = 2
# If requested, mock isatty to fake out pty detection
if self.isatty is not None:
sys_stdin.isatty = Mock(return_value=self.isatty)
def fakeread(fileno, count):
fd = {1: self.out_file, 2: self.err_file}[fileno]
return fd.read(count)
read.side_effect = fakeread
# Return the Popen mock as it's sometimes wanted inside tests
return Popen
def stop(self):
self.popen.stop()
self.read.stop()
self.sys_stdin.stop()
def mock_subprocess(out="", err="", exit=0, isatty=None, insert_Popen=False):
def decorator(f):
@wraps(f)
# We have to include a @patch here to trick pytest into ignoring
# the wrapped test's sometimes-there, sometimes-not mock_Popen arg. (It
# explicitly "skips ahead" past what it perceives as patch args, even
# though in our case those are not applying to the test function!)
# Doesn't matter what we patch as long as it doesn't
# actually get in our way.
@patch("invoke.runners.pty")
def wrapper(*args, **kwargs):
proc = MockSubprocess(
out=out, err=err, exit=exit, isatty=isatty, autostart=False
)
Popen = proc.start()
args = list(args)
args.pop() # Pop the dummy patch
if insert_Popen:
args.append(Popen)
try:
f(*args, **kwargs)
finally:
proc.stop()
return wrapper
return decorator
def mock_pty(
out="",
err="",
exit=0,
isatty=None,
trailing_error=None,
skip_asserts=False,
insert_os=False,
be_childish=False,
os_close_error=False,
):
# Windows doesn't have ptys, so all the pty tests should be
# skipped anyway...
if WINDOWS:
return skip_if_windows
def decorator(f):
import fcntl
ioctl_patch = patch("invoke.runners.fcntl.ioctl", wraps=fcntl.ioctl)
@wraps(f)
@patch("invoke.runners.pty")
@patch("invoke.runners.os")
@ioctl_patch
def wrapper(*args, **kwargs):
args = list(args)
pty, os, ioctl = args.pop(), args.pop(), args.pop()
# Don't actually fork, but pretend we did (with "our" pid differing
# depending on be_childish) & give 'parent fd' of 3 (typically,
# first allocated non-stdin/out/err FD)
pty.fork.return_value = (12345 if be_childish else 0), 3
# We don't really need to care about waiting since not truly
# forking/etc, so here we just return a nonzero "pid" + sentinel
# wait-status value (used in some tests about WIFEXITED etc)
os.waitpid.return_value = None, Mock(name="exitstatus")
# Either or both of these may get called, depending...
os.WEXITSTATUS.return_value = exit
os.WTERMSIG.return_value = exit
# If requested, mock isatty to fake out pty detection
if isatty is not None:
os.isatty.return_value = isatty
out_file = BytesIO(b(out))
err_file = BytesIO(b(err))
def fakeread(fileno, count):
fd = {3: out_file, 2: err_file}[fileno]
ret = fd.read(count)
# If asked, fake a Linux-platform trailing I/O error.
if not ret and trailing_error:
raise trailing_error
return ret
os.read.side_effect = fakeread
if os_close_error:
os.close.side_effect = IOError
if insert_os:
args.append(os)
# Do the thing!!!
f(*args, **kwargs)
# Short-circuit if we raised an error in fakeread()
if trailing_error:
return
# Sanity checks to make sure the stuff we mocked, actually got ran!
pty.fork.assert_called_with()
# Skip rest of asserts if we pretended to be the child
if be_childish:
return
# Expect a get, and then later set, of terminal window size
assert ioctl.call_args_list[0][0][1] == termios.TIOCGWINSZ
assert ioctl.call_args_list[1][0][1] == termios.TIOCSWINSZ
if not skip_asserts:
for name in ("execve", "waitpid"):
assert getattr(os, name).called
# Ensure at least one of the exit status getters was called
assert os.WEXITSTATUS.called or os.WTERMSIG.called
# Ensure something closed the pty FD
os.close.assert_called_once_with(3)
return wrapper
return decorator
class _Dummy(Runner):
"""
Dummy runner subclass that does minimum work required to execute run().
It also serves as a convenient basic API checker; failure to update it to
match the current Runner API will cause TypeErrors, NotImplementedErrors,
and similar.
"""
# Neuter the input loop sleep, so tests aren't slow (at the expense of CPU,
# which isn't a problem for testing).
input_sleep = 0
def start(self, command, shell, env, timeout=None):
pass
def read_proc_stdout(self, num_bytes):
return ""
def read_proc_stderr(self, num_bytes):
return ""
def _write_proc_stdin(self, data):
pass
def close_proc_stdin(self):
pass
@property
def process_is_finished(self):
return True
def returncode(self):
return 0
def stop(self):
pass
@property
def timed_out(self):
return False
# Dummy command that will blow up if it ever truly hits a real shell.
_ = "nope"
# Runner that fakes ^C during subprocess exec
class _KeyboardInterruptingRunner(_Dummy):
def __init__(self, *args, **kwargs):
super(_KeyboardInterruptingRunner, self).__init__(*args, **kwargs)
self._interrupted = False
# Trigger KeyboardInterrupt during wait()
def wait(self):
if not self._interrupted:
self._interrupted = True
raise KeyboardInterrupt
# But also, after that has been done, pretend subprocess shutdown happened
# (or we will loop forever).
def process_is_finished(self):
return self._interrupted
class OhNoz(Exception):
pass
| bsd-2-clause | 2,217,632,598,268,301,600 | 28.78852 | 79 | 0.593915 | false | 4.069336 | true | false | false |
berkeley-stat159/project-beta | code/utils/tests/test_regression.py | 4 | 1054 | # import numpy as np
# import os
# import sys
# from numpy.testing import assert_almost_equal, assert_array_equal
# __file__ = os.getcwd()
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),"../utils/")))
# from regression import *
# n_splits = 10 # number of subdivisions of validation data for cross validation of ridge parameter (alpha)
# n_resamps = 10 # number of times to compute regression & prediction within training data (can be <= n_splits)
# chunk_sz = 6000 # number of voxels to fit at once. Memory-saving.
# pthr = 0.005 # Ridge parameter is chosen based on how many voxels are predicted above a correlation threshold
# # for each alpha value (technically it's slightly more complicated than that, see the code).
# # This p value sets that correlation threshold.
# def test_ridge_cv():
# out = regression.ridge_cv(efs,data_est_masked,val_fs=vfs,
# val_data=data_val_masked,alphas=alpha,n_resamps=n_resamps,
# n_splits=n_splits,chunk_sz=chunk_sz,pthr=pthr,is_verbose=True)
# | bsd-3-clause | 5,637,090,201,098,224,000 | 46.954545 | 112 | 0.704934 | false | 3.273292 | false | false | false |
fretboardfreak/space | lib/model/building.py | 1 | 6403 | # Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import log10
from logging import debug
from lib.error import ObjectNotFound
from .resources import Resources
def get_all_building_names():
return [cls.name for cls in ALL_BUILDINGS]
def get_all_building_abbr():
return [cls.abbr for cls in ALL_BUILDINGS]
def get_building(building_name, level=None):
if isinstance(building_name, type):
building_name = building_name.__name__
debug('getting building type %s, lvl %s' % (building_name, level))
for building in ALL_BUILDINGS:
if (building.__name__.lower() == building_name.lower() or
building.name.lower() == building_name.lower() or
building.abbr.lower() == building_name.lower()):
if level is None:
return building
else:
return building(level)
else:
raise ObjectNotFound(name=building_name)
class BuildingRequirements(object):
def __init__(self, resources=None, research=None, buildings=None):
self.resources = resources
if not self.resources:
self.resources = Resources()
# research dict: not defined yet
self.research = research
if self.research is None:
self.research = dict()
# buildings dict: key=str, building name; value=int, building level
self.buildings = buildings
if self.buildings is None:
self.buildings = dict()
def __repr__(self):
return ("{}(Resources: {}, Research: {}, Buildings: {})".format(
self.__class__.__name__, repr(self.resources), repr(self.research),
repr(self.buildings)))
def __str__(self):
# remove 1st and last char from resources repr string, "(", ")"
ret_val = repr(self.resources)[1:-1]
if self.research:
ret_val += "\nResearch: {}".format(self.research)
if self.buildings:
ret_val += "\nBuildings: {}".format(self.buildings)
return ret_val
def __getstate__(self):
return (self.resources, self.research, self.buildings)
def __setstate__(self, state):
(self.resources, self.research, self.buildings) = state
class Building(object):
name = 'Building'
abbr = 'BLDNG'
def __init__(self, level=None):
if level is None:
self.level = 1
else:
self.level = level
self.under_construction = False
def _modifier(self):
"""The building's per time unit resource production."""
return Resources(ore=self.level)
def _construction_modifier(self):
"""The building's production capacity while under construction."""
return Resources()
@property
def modifier(self):
if self.under_construction:
return self._construction_modifier()
return self._modifier()
def electricity(self, sun_energy):
"""The building's per time unit electricity production/consumption."""
return 0
@property
def requirements(self):
return BuildingRequirements()
def __repr__(self):
return ("{}(level: {}, modifier: {}, under construction: {}"
"requirements: {})".format(
self.__class__.__name__, self.level, repr(self.modifier),
self.under_construction, repr(self.requirements)))
def __str__(self):
return ("{}: level: {}\n - modifier: {}\n - under construction:{}\n"
" - requirements: {})".format(
self.__class__.__name__, self.level,
repr(self.modifier)[1:-1], self.under_construction,
str(self.requirements).replace('\n', '\n' + ' ' * 8)))
def __eq__(self, other):
return (self.modifier == other.modifier and
self.level == other.level)
def __ne__(self, other):
return not self.__eq__(other)
def _compare(self, other):
"""Calculate an evenly weighted average of the atributes."""
mod = self.modifier.trade_value - other.modifier.trade_value
lev = self.level - other.level
avg = (lev + mod) / 2.0
return avg
def __lt__(self, other):
return self._compare(other) < 0
def __gt__(self, other):
return self._compare(other) > 0
def __le__(self, other):
return self._compare(other) <= 0
def __ge__(self, other):
return self._compare(other) >= 0
def __hash__(self):
return hash("%s%s" % (self.__class__, self.level))
@classmethod
def are_requirements_met(cls, build_site, level=None):
reqs = cls(level).requirements
if reqs.resources > build_site.resources:
return False
for bldng in reqs.buildings:
if (bldng not in build_site.buildings or
reqs.buildings[bldng] > build_site.buildings[bldng]):
return False
# TODO: implement research requirements here
return True
class Mine(Building):
name = 'Mine'
abbr = 'Mn'
def _modifier(self):
return Resources(ore=0.2*self.level, metal=0.025*self.level)
def electricity(self, sun_energy):
return -1 * pow(self.level, 2)
@property
def requirements(self):
return BuildingRequirements(resources=Resources(
ore=10+(2*(-1+self.level)), metal=-10+(5*(1+self.level))))
class SolarPowerPlant(Building):
name = 'Solar Power Plant'
abbr = 'SPP'
def _modifier(self):
return Resources()
def electricity(self, sun_energy):
return 10 * abs(log10(sun_energy)) * self.level
@property
def requirements(self):
return BuildingRequirements(resources=Resources(
ore=10+(5*self.level), metal=50+(6*self.level)))
ALL_BUILDINGS = [Mine, SolarPowerPlant]
| apache-2.0 | 1,765,995,792,540,239,400 | 30.387255 | 79 | 0.601124 | false | 4.042298 | false | false | false |
CarlosCorreiaM16e/bismarck | bismarck_cli/utils/term.py | 1 | 4577 | import inspect
import os
import sys
#----------------------------------------------------------------------
class AnsiPrint( object ):
FG_BLACK = "\033[30m"
FG_BLUE = "\033[34m"
FG_GREEN = "\033[32m"
FG_CYAN = "\033[36m"
FG_RED = "\033[31m"
FG_MAGENTA = "\033[35m"
FG_YELLOW = "\033[33m"
FG_DARK_GRAY = "\033[1;30m"
RESET = "\033[0m"
def __init__( self, color ):
self.color = color
#----------------------------------------------------------------------
def getColoredText( self, text ):
return self.color + text + self.RESET
#----------------------------------------------------------------------
def getTerminalSize():
rows, columns = os.popen( 'stty size', 'r' ).read().split()
return ( int( rows ), int( columns ) )
#----------------------------------------------------------------------
def printLine( text, color = None ):
if color:
text = color + text + AnsiPrint.RESET
print text
#----------------------------------------------------------------------
def printLog( text ):
cwd = os.getcwd()
stack = inspect.stack()[1]
dir = stack[1]
if dir.startswith( os.getenv( 'HOME' ) ):
dir = dir[ len( cwd ) + 1 : ]
text = "*** in %s:%d:\n%s\n" % (dir, stack[2], text)
print text
#----------------------------------------------------------------------
def printDebug( text ):
cwd = os.getcwd()
stack = inspect.stack()[1]
dir = stack[1]
if dir.startswith( os.getenv( 'HOME' ) ):
dir = dir[ len( cwd ) + 1 : ]
text = "*** in %s:%d:\n%s\n" % (dir, stack[2], text )
# text = "%s*** in %s:%d:\n%s%s\n" % (
# AnsiPrint.FG_MAGENTA, dir, stack[2], text, AnsiPrint.RESET )
print text
#----------------------------------------------------------------------
def printWarn( text ):
cwd = os.getcwd()
stack = inspect.stack()[2]
dir = stack[1]
if dir.startswith( os.getenv( 'HOME' ) ):
dir = dir[ len( cwd ) + 1 : ]
text = "%s*** in %s:%d:\n%s%s" % (
AnsiPrint.FG_RED, dir, stack[2], text, AnsiPrint.RESET )
print text
#----------------------------------------------------------------------
def printDeprecated( text ):
cwd = os.getcwd()
stack = inspect.stack()[2]
dir = stack[1]
if dir.startswith( os.getenv( 'HOME' ) ):
dir = dir[ len( cwd ) + 1 : ]
text = "%s!!! DEPRECATION in %s:%d: %s%s" % (
AnsiPrint.FG_RED, dir, stack[2], text, AnsiPrint.RESET )
print text
#----------------------------------------------------------------------
def formatStorage( st, indent = '' ):
if isinstance( st, dict ):
text = indent + '{\n'
indent += ' '
first = True
for k in st.keys():
v = st[k]
# printLog( 'v:' + repr( v ) )
if v and repr( v ).startswith( '<' ):
continue
if first:
first = False
else:
text += ',\n'
text += indent + k + ': ' + formatStorage( v, indent )
text += '\n'
text += indent + '}\n'
return text
else:
print 'not dict'
return str( st )
#----------------------------------------------------------------------
def printLogStorage( storage ):
text = formatStorage( storage )
stack = inspect.stack()[1]
text = "*** in %s:%d:\n%s" % (stack[1], stack[2], text)
print text
#----------------------------------------------------------------------
def printLogDict( d, indent = 0, dictName = '' ):
cwd = os.getcwd()
stack = inspect.stack()[1]
dir = stack[1]
if dir.startswith( os.getenv( 'HOME' ) ):
dir = dir[ len( cwd ) + 1 : ]
text = "*** in %s:%d:" % (dir, stack[2] )
print text
print( 'dictName: %s' % dictName )
printDict( d, indent )
#----------------------------------------------------------------------
def printDict( d, indent = 0 ):
iStr = ' ' * indent
kList = d.keys()
for k in kList:
print k
val = d[k]
if isinstance( val, dict ):
print '%s%s:' % (iStr, k, )
printDict( val, indent + 1 )
else:
print '%s%s: %s' % (iStr, k, repr( val ))
#----------------------------------------------------------------------
def printChars( text, color = None ):
if color:
text = color + text + AnsiPrint.RESET
sys.stdout.write( text )
sys.stdout.flush()
#----------------------------------------------------------------------
if __name__ == "__main__":
printDebug( 'ola' )
| gpl-3.0 | -3,800,203,227,403,442,700 | 30.784722 | 75 | 0.392834 | false | 3.80782 | false | false | false |
GPflow/GPflow | doc/source/notebooks/basics/classification.pct.py | 1 | 9219 | # ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Basic (binary) GP classification model
#
#
# This notebook shows how to build a GP classification model using variational inference.
# Here we consider binary (two-class, 0 vs. 1) classification only (there is a separate notebook on [multiclass classification](../advanced/multiclass_classification.ipynb)).
# We first look at a one-dimensional example, and then show how you can adapt this when the input space is two-dimensional.
# %%
import numpy as np
import gpflow
import tensorflow as tf
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams["figure.figsize"] = (8, 4)
# %% [markdown]
# ## One-dimensional example
#
# First of all, let's have a look at the data. `X` and `Y` denote the input and output values.
# **NOTE:** `X` and `Y` must be two-dimensional NumPy arrays, $N \times 1$ or $N \times D$, where $D$ is the number of input dimensions/features, with the same number of rows as $N$ (one for each data point):
# %%
X = np.genfromtxt("data/classif_1D_X.csv").reshape(-1, 1)
Y = np.genfromtxt("data/classif_1D_Y.csv").reshape(-1, 1)
plt.figure(figsize=(10, 6))
_ = plt.plot(X, Y, "C3x", ms=8, mew=2)
# %% [markdown]
# ### Reminders on GP classification
#
# For a binary classification model using GPs, we can simply use a `Bernoulli` likelihood. The details of the generative model are as follows:
#
# __1. Define the latent GP:__ we start from a Gaussian process $f \sim \mathcal{GP}(0, k(\cdot, \cdot'))$:
# %%
# build the kernel and covariance matrix
k = gpflow.kernels.Matern52(variance=20.0)
x_grid = np.linspace(0, 6, 200).reshape(-1, 1)
K = k(x_grid)
# sample from a multivariate normal
rng = np.random.RandomState(6)
L = np.linalg.cholesky(K)
f_grid = np.dot(L, rng.randn(200, 5))
plt.plot(x_grid, f_grid, "C0", linewidth=1)
_ = plt.plot(x_grid, f_grid[:, 1], "C0", linewidth=2)
# %% [markdown]
# __2. Squash them to $[0, 1]$:__ the samples of the GP are mapped to $[0, 1]$.
# By default, GPflow uses the standard normal cumulative distribution function (inverse probit function): $p(x) = \Phi(f(x)) = \frac{1}{2} (1 + \operatorname{erf}(x / \sqrt{2}))$.
# (This choice has the advantage that predictive mean, variance and density can be computed analytically, but any choice of invlink is possible, e.g. the logit $p(x) = \frac{\exp(f(x))}{1 + \exp(f(x))}$. Simply pass another function as the `invlink` argument to the `Bernoulli` likelihood class.)
# %%
def invlink(f):
return gpflow.likelihoods.Bernoulli().invlink(f).numpy()
p_grid = invlink(f_grid)
plt.plot(x_grid, p_grid, "C1", linewidth=1)
_ = plt.plot(x_grid, p_grid[:, 1], "C1", linewidth=2)
# %% [markdown]
# __3. Sample from a Bernoulli:__ for each observation point $X_i$, the class label $Y_i \in \{0, 1\}$ is generated by sampling from a Bernoulli distribution $Y_i \sim \mathcal{B}(g(X_i))$.
# %%
# Select some input locations
ind = rng.randint(0, 200, (30,))
X_gen = x_grid[ind]
# evaluate probability and get Bernoulli draws
p = p_grid[ind, 1:2]
Y_gen = rng.binomial(1, p)
# plot
plt.plot(x_grid, p_grid[:, 1], "C1", linewidth=2)
plt.plot(X_gen, p, "C1o", ms=6)
_ = plt.plot(X_gen, Y_gen, "C3x", ms=8, mew=2)
# %% [markdown]
# ### Implementation with GPflow
#
# For the model described above, the posterior $f(x)|Y$ (say $p$) is not Gaussian any more and does not have a closed-form expression.
# A common approach is then to look for the best approximation of this posterior by a tractable distribution (say $q$) such as a Gaussian distribution.
# In variational inference, the quality of an approximation is measured by the Kullback-Leibler divergence $\mathrm{KL}[q \| p]$.
# For more details on this model, see Nickisch and Rasmussen (2008).
#
# The inference problem is thus turned into an optimization problem: finding the best parameters for $q$.
# In our case, we introduce $U \sim \mathcal{N}(q_\mu, q_\Sigma)$, and we choose $q$ to have the same distribution as $f | f(X) = U$.
# The parameters $q_\mu$ and $q_\Sigma$ can be seen as parameters of $q$, which can be optimized in order to minimise $\mathrm{KL}[q \| p]$.
#
# This variational inference model is called `VGP` in GPflow:
# %%
m = gpflow.models.VGP(
(X, Y), likelihood=gpflow.likelihoods.Bernoulli(), kernel=gpflow.kernels.Matern52()
)
opt = gpflow.optimizers.Scipy()
opt.minimize(m.training_loss, variables=m.trainable_variables)
# %% [markdown]
# We can now inspect the result of the optimization with `gpflow.utilities.print_summary(m)`:
# %%
gpflow.utilities.print_summary(m, fmt="notebook")
# %% [markdown]
# In this table, the first two lines are associated with the kernel parameters, and the last two correspond to the variational parameters.
# **NOTE:** In practice, $q_\Sigma$ is actually parameterized by its lower-triangular square root $q_\Sigma = q_\text{sqrt} q_\text{sqrt}^T$ in order to ensure its positive-definiteness.
#
# For more details on how to handle models in GPflow (getting and setting parameters, fixing some of them during optimization, using priors, and so on), see [Manipulating GPflow models](../understanding/models.ipynb).
# %% [markdown]
# ### Predictions
#
# Finally, we will see how to use model predictions to plot the resulting model.
# We will replicate the figures of the generative model above, but using the approximate posterior distribution given by the model.
# %%
plt.figure(figsize=(12, 8))
# bubble fill the predictions
mu, var = m.predict_f(x_grid)
plt.fill_between(
x_grid.flatten(),
np.ravel(mu + 2 * np.sqrt(var)),
np.ravel(mu - 2 * np.sqrt(var)),
alpha=0.3,
color="C0",
)
# plot samples
tf.random.set_seed(6)
samples = m.predict_f_samples(x_grid, 10).numpy().squeeze().T
plt.plot(x_grid, samples, "C0", lw=1)
# plot p-samples
p = invlink(samples)
plt.plot(x_grid, p, "C1", lw=1)
# plot data
plt.plot(X, Y, "C3x", ms=8, mew=2)
plt.ylim((-3, 3))
# %% [markdown]
# ## Two-dimensional example
#
# In this section we will use the following data:
# %%
X = np.loadtxt("data/banana_X_train", delimiter=",")
Y = np.loadtxt("data/banana_Y_train", delimiter=",").reshape(-1, 1)
mask = Y[:, 0] == 1
plt.figure(figsize=(6, 6))
plt.plot(X[mask, 0], X[mask, 1], "oC0", mew=0, alpha=0.5)
_ = plt.plot(X[np.logical_not(mask), 0], X[np.logical_not(mask), 1], "oC1", mew=0, alpha=0.5)
# %% [markdown]
# The model definition is the same as above; the only important difference is that we now specify that the kernel operates over a two-dimensional input space:
# %%
m = gpflow.models.VGP(
(X, Y), kernel=gpflow.kernels.SquaredExponential(), likelihood=gpflow.likelihoods.Bernoulli()
)
opt = gpflow.optimizers.Scipy()
opt.minimize(
m.training_loss, variables=m.trainable_variables, options=dict(maxiter=25), method="L-BFGS-B"
)
# in practice, the optimization needs around 250 iterations to converge
# %% [markdown]
# We can now plot the predicted decision boundary between the two classes.
# To do so, we can equivalently plot the contour lines $E[f(x)|Y]=0$, or $E[g(f(x))|Y]=0.5$.
# We will do the latter, because it allows us to introduce the `predict_y` function, which returns the mean and variance at test points:
# %%
x_grid = np.linspace(-3, 3, 40)
xx, yy = np.meshgrid(x_grid, x_grid)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
p, _ = m.predict_y(Xplot) # here we only care about the mean
plt.figure(figsize=(7, 7))
plt.plot(X[mask, 0], X[mask, 1], "oC0", mew=0, alpha=0.5)
plt.plot(X[np.logical_not(mask), 0], X[np.logical_not(mask), 1], "oC1", mew=0, alpha=0.5)
_ = plt.contour(
xx,
yy,
p.numpy().reshape(*xx.shape),
[0.5], # plot the p=0.5 contour line only
colors="k",
linewidths=1.8,
zorder=100,
)
# %% [markdown]
# ## Further reading
#
# There are dedicated notebooks giving more details on how to manipulate [models](../understanding/models.ipynb) and [kernels](../advanced/kernels.ipynb).
#
# This notebook covers only very basic classification models. You might also be interested in:
# * [Multiclass classification](../advanced/multiclass_classification.ipynb) if you have more than two classes.
# * [Sparse models](../advanced/gps_for_big_data.ipynb). The models above have one inducing variable $U_i$ per observation point $X_i$, which does not scale to large datasets. Sparse Variational GP (SVGP) is an efficient alternative where the variables $U_i$ are defined at some inducing input locations $Z_i$ that can also be optimized.
# * [Exact inference](../advanced/mcmc.ipynb). We have seen that variational inference provides an approximation to the posterior. GPflow also supports exact inference using Markov Chain Monte Carlo (MCMC) methods, and the kernel parameters can also be assigned prior distributions in order to avoid point estimates.
#
# ## References
#
# Hannes Nickisch and Carl Edward Rasmussen. 'Approximations for binary Gaussian process classification'. *Journal of Machine Learning Research* 9(Oct):2035--2078, 2008.
| apache-2.0 | 7,272,020,779,500,549,000 | 38.566524 | 341 | 0.694869 | false | 3.019653 | false | false | false |
honzajavorek/tipi | tipi/repl.py | 1 | 2146 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from tipi.compat import unicode
from tipi.html import HTMLFragment
__all__ = ('Replacement', 'replace')
class Replacement(object):
"""Replacement representation."""
skipped_tags = (
'code', 'kbd', 'pre', 'samp', 'script', 'style', 'tt', 'xmp'
)
textflow_tags = (
'b', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'cite',
'dfn', 'em', 'kbd', 'strong', 'samp', 'var', 'a', 'bdo', 'q', 'script',
'span', 'sub', 'sup'
)
def __init__(self, pattern, replacement):
self.pattern = pattern
self.replacement = replacement
def _is_replacement_allowed(self, s):
"""Tests whether replacement is allowed on given piece of HTML text."""
if any(tag in s.parent_tags for tag in self.skipped_tags):
return False
if any(tag not in self.textflow_tags for tag in s.involved_tags):
return False
return True
def replace(self, html):
"""Perform replacements on given HTML fragment."""
self.html = html
text = html.text()
positions = []
def perform_replacement(match):
offset = sum(positions)
start, stop = match.start() + offset, match.end() + offset
s = self.html[start:stop]
if self._is_replacement_allowed(s):
repl = match.expand(self.replacement)
self.html[start:stop] = repl
else:
repl = match.group() # no replacement takes place
positions.append(match.end())
return repl
while True:
if positions:
text = text[positions[-1]:]
text, n = self.pattern.subn(perform_replacement, text, count=1)
if not n: # all is already replaced
break
def replace(html, replacements=None):
"""Performs replacements on given HTML string."""
if not replacements:
return html # no replacements
html = HTMLFragment(html)
for r in replacements:
r.replace(html)
return unicode(html)
| mit | -7,513,477,758,203,943,000 | 28 | 79 | 0.560578 | false | 4.041431 | false | false | false |
abawchen/leetcode | solutions/028_remove_duplicated_from_sorted_array.py | 1 | 1614 | # Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
# Do not allocate extra space for another array, you must do this in place with constant memory.
# For example,
# Given input array nums = [1,1,2],
# Your function should return length = 2, with the first two elements of nums being 1 and 2 respectively. It doesn't matter what you leave beyond the new length.
class Solution:
# @param {integer[]} nums
# @return {integer}
def removeDuplicates(self, nums):
# O(n?)
if not nums:
return 0
tail = 0
for i in xrange(1, len(nums)):
if nums[i] != nums[tail]:
tail += 1
nums[tail] = nums[i]
return tail + 1
# # O(n?)
# i = 0
# while i < len(nums) - 1:
# if nums[i] == nums[i+1]:
# del nums[i]
# else:
# i += 1
# return len(nums)
# WA
# nums[:] = list(set(nums))
# return len(nums)
import time
start_time = time.time()
s = Solution()
print s.removeDuplicates([])
print s.removeDuplicates([1])
print s.removeDuplicates([1, 1, 1])
print s.removeDuplicates([1, 2])
print s.removeDuplicates([1, 1, 2])
print s.removeDuplicates([1, 1, 2, 2])
print s.removeDuplicates([1, 1, 2, 2, 2, 2, 3])
print s.removeDuplicates([0, 1, 1, 2, 2, 2, 2, 3])
print s.removeDuplicates([0, 0, 1, 1, 2, 2, 2, 2, 3, 8])
print s.removeDuplicates([0, 0, 1, 1, 2, 2, 2, 2, 3, 8, 9, 9, 10, 10])
print("--- %s seconds ---" % (time.time() - start_time)) | mit | -9,218,560,894,084,267,000 | 29.471698 | 162 | 0.566914 | false | 3.215139 | false | false | false |
autodesk-cloud/ochonetes | images/portal/resources/toolset/toolset/commands/info.py | 1 | 2028 | #
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
from toolset.io import fire, run
from toolset.tool import Template
#: Our ochopod logger.
logger = logging.getLogger('ochopod')
def go():
class _Tool(Template):
help = \
'''
Displays detailled information for the specified cluster(s).
'''
tag = 'info'
def customize(self, parser):
parser.add_argument('clusters', type=str, nargs='*', default='*', help='1+ clusters (can be a glob pattern, e.g foo*)')
def body(self, args, proxy):
for token in args.clusters:
def _query(zk):
replies = fire(zk, token, 'info')
return len(replies), {key: hints for key, (_, hints, code) in replies.items() if code == 200}
total, js = run(proxy, _query)
if not total:
logger.info('\n<%s> -> no pods found' % token)
else:
#
# - justify & format the whole thing in a nice set of columns
#
pct = (len(js) * 100) / total
unrolled = ['%s\n%s\n' % (k, json.dumps(js[k], indent=4, separators=(',', ': '))) for k in sorted(js.keys())]
logger.info('\n<%s> -> %d%% replies (%d pods total) ->\n\n- %s' % (token, pct, total, '\n- '.join(unrolled)))
return _Tool() | apache-2.0 | 1,971,570,965,618,026,000 | 30.215385 | 131 | 0.569034 | false | 3.930233 | false | false | false |
fosstp/fosstp | alembic/versions/e32bf2e2b443_init.py | 1 | 2881 | """init
Revision ID: e32bf2e2b443
Revises:
Create Date: 2016-11-23 14:33:06.308794
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e32bf2e2b443'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('about',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('forum_categories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
users_table = op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=255), nullable=False),
sa.Column('group', sa.String(length=255), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('workshop',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('link', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('forum_topics',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['forum_categories.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('forum_replies',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('forum_topic_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['forum_topic_id'], ['forum_topics.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
op.bulk_insert(users_table,
[
{'id': 1, 'name': 'admin',
'password': '$2b$12$OUCC1PDBsg305zzY5KaR5uR14./Ohsopd1K2usCb05iewLtY1Bb6S',
'group': 'admin', 'email': '[email protected]'},
]
)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('forum_replies')
op.drop_table('forum_topics')
op.drop_table('workshop')
op.drop_table('users')
op.drop_table('forum_categories')
op.drop_table('about')
### end Alembic commands ###
| mit | 6,698,585,677,987,855,000 | 34.567901 | 97 | 0.636932 | false | 3.373536 | false | false | false |
BetaRavener/micropython-hw-lib | PCF8574/pcf8574.py | 1 | 1583 | # TODO: Implement input interupts if needed
class PCF8574:
def __init__(self, i2c, address):
self._i2c = i2c
self._address = address
self._input = 0 # Buffers the result of read in memory
self._input_mask = 0 # Mask specifying which pins are set as input
self._output = 0 # The state of pins set for output
self._write()
def _read(self):
self._input = self._i2c.readfrom(self._address, 1)[0] & self._input_mask
def _write(self):
self._i2c.writeto(self._address, bytes([self._output | self._input_mask]))
def read(self, pin):
bit_mask = 1 << pin
self._input_mask |= bit_mask
self._output &= ~bit_mask
self._write() # Update input mask before reading
self._read()
return (self._input & bit_mask) >> pin
def read8(self):
self._input_mask = 0xFF
self._output = 0
self._write() # Update input mask before reading
self._read()
return self._input
def write(self, pin, value):
bit_mask = 1 << pin
self._input_mask &= ~bit_mask
self._output = self._output | bit_mask if value else self._output & (~bit_mask)
self._write()
def write8(self, value):
self._input_mask = 0
self._output = value
self._write()
def set(self):
self.write8(0xFF)
def clear(self):
self.write8(0x0)
def toggle(self, pin):
bit_mask = 1 << pin
self._input_mask &= ~bit_mask
self._output ^= bit_mask
self._write()
| mit | -60,218,181,321,789,170 | 28.867925 | 87 | 0.557802 | false | 3.630734 | false | false | false |
cloudbau/glance | glance/db/simple/api.py | 1 | 25713 | # Copyright 2012 OpenStack, Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
from glance.common import exception
import glance.openstack.common.log as logging
from glance.openstack.common import timeutils
from glance.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
DATA = {
'images': {},
'members': {},
'tags': {},
'locations': [],
'tasks': {},
}
def log_call(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
LOG.info(_('Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s') %
{"funcname": func.__name__,
"args": args,
"kwargs": kwargs})
output = func(*args, **kwargs)
LOG.info(_('Returning %(funcname)s: %(output)s') %
{"funcname": func.__name__,
"output": output})
return output
return wrapped
def reset():
global DATA
DATA = {
'images': {},
'members': [],
'tags': {},
'locations': [],
'tasks': {},
}
def setup_db_env(*args, **kwargs):
"""
Setup global environment configuration variables.
We have no connection-oriented environment variables, so this is a NOOP.
"""
pass
def clear_db_env(*args, **kwargs):
"""
Setup global environment configuration variables.
We have no connection-oriented environment variables, so this is a NOOP.
"""
pass
def _get_session():
return DATA
def _image_locations_format(image_id, value, meta_data):
dt = timeutils.utcnow()
return {
'id': uuidutils.generate_uuid(),
'image_id': image_id,
'created_at': dt,
'updated_at': dt,
'deleted_at': None,
'deleted': False,
'url': value,
'metadata': meta_data,
}
def _image_property_format(image_id, name, value):
return {
'image_id': image_id,
'name': name,
'value': value,
'deleted': False,
'deleted_at': None,
}
def _image_member_format(image_id, tenant_id, can_share, status='pending'):
dt = timeutils.utcnow()
return {
'id': uuidutils.generate_uuid(),
'image_id': image_id,
'member': tenant_id,
'can_share': can_share,
'status': status,
'created_at': dt,
'updated_at': dt,
}
def _task_format(task_id, **values):
dt = timeutils.utcnow()
task = {
'id': task_id,
'type': 'import',
'status': 'pending',
'input': None,
'result': None,
'owner': None,
'message': None,
'expires_at': None,
'created_at': dt,
'updated_at': dt,
'deleted_at': None,
'deleted': False,
}
task.update(values)
return task
def _image_format(image_id, **values):
dt = timeutils.utcnow()
image = {
'id': image_id,
'name': None,
'owner': None,
'locations': [],
'status': 'queued',
'protected': False,
'is_public': False,
'container_format': None,
'disk_format': None,
'min_ram': 0,
'min_disk': 0,
'size': None,
'checksum': None,
'tags': [],
'created_at': dt,
'updated_at': dt,
'deleted_at': None,
'deleted': False,
}
locations = values.pop('locations', None)
if locations is not None:
locations = [
_image_locations_format(image_id, location['url'],
location['metadata'])
for location in locations
]
image['locations'] = locations
#NOTE(bcwaldon): store properties as a list to match sqlalchemy driver
properties = values.pop('properties', {})
properties = [{'name': k,
'value': v,
'image_id': image_id,
'deleted': False} for k, v in properties.items()]
image['properties'] = properties
image.update(values)
return image
def _filter_images(images, filters, context,
status='accepted', is_public=None,
admin_as_user=False):
filtered_images = []
if 'properties' in filters:
prop_filter = filters.pop('properties')
filters.update(prop_filter)
if status == 'all':
status = None
visibility = filters.pop('visibility', None)
for image in images:
member = image_member_find(context, image_id=image['id'],
member=context.owner, status=status)
is_member = len(member) > 0
has_ownership = context.owner and image['owner'] == context.owner
can_see = (image['is_public'] or has_ownership or is_member or
(context.is_admin and not admin_as_user))
if not can_see:
continue
if visibility:
if visibility == 'public':
if not image['is_public']:
continue
elif visibility == 'private':
if image['is_public']:
continue
if not (has_ownership or (context.is_admin
and not admin_as_user)):
continue
elif visibility == 'shared':
if not is_member:
continue
if is_public is not None:
if not image['is_public'] == is_public:
continue
add = True
for k, value in filters.iteritems():
key = k
if k.endswith('_min') or k.endswith('_max'):
key = key[0:-4]
try:
value = int(value)
except ValueError:
msg = _("Unable to filter on a range "
"with a non-numeric value.")
raise exception.InvalidFilterRangeValue(msg)
if k.endswith('_min'):
add = image.get(key) >= value
elif k.endswith('_max'):
add = image.get(key) <= value
elif k != 'is_public' and image.get(k) is not None:
add = image.get(key) == value
elif k == 'tags':
filter_tags = value
image_tags = image_tag_get_all(context, image['id'])
for tag in filter_tags:
if tag not in image_tags:
add = False
break
else:
properties = {}
for p in image['properties']:
properties = {p['name']: p['value'],
'deleted': p['deleted']}
add = (properties.get(key) == value and
properties.get('deleted') is False)
if not add:
break
if add:
filtered_images.append(image)
return filtered_images
def _do_pagination(context, images, marker, limit, show_deleted,
status='accepted'):
start = 0
end = -1
if marker is None:
start = 0
else:
# Check that the image is accessible
_image_get(context, marker, force_show_deleted=show_deleted,
status=status)
for i, image in enumerate(images):
if image['id'] == marker:
start = i + 1
break
else:
raise exception.NotFound()
end = start + limit if limit is not None else None
return images[start:end]
def _sort_images(images, sort_key, sort_dir):
reverse = False
if images and not (sort_key in images[0]):
raise exception.InvalidSortKey()
keyfn = lambda x: (x[sort_key] if x[sort_key] is not None else '',
x['created_at'], x['id'])
reverse = sort_dir == 'desc'
images.sort(key=keyfn, reverse=reverse)
return images
def _image_get(context, image_id, force_show_deleted=False, status=None):
try:
image = DATA['images'][image_id]
image['locations'] = _image_location_get_all(image_id)
except KeyError:
LOG.info(_('Could not find image %s') % image_id)
raise exception.NotFound()
if image['deleted'] and not (force_show_deleted or context.show_deleted):
LOG.info(_('Unable to get deleted image'))
raise exception.NotFound()
if not is_image_visible(context, image):
LOG.info(_('Unable to get unowned image'))
raise exception.Forbidden("Image not visible to you")
return image
@log_call
def image_get(context, image_id, session=None, force_show_deleted=False):
image = _image_get(context, image_id, force_show_deleted)
image = _normalize_locations(image)
return copy.deepcopy(image)
@log_call
def image_get_all(context, filters=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc',
member_status='accepted', is_public=None,
admin_as_user=False):
filters = filters or {}
images = DATA['images'].values()
images = _filter_images(images, filters, context, member_status,
is_public, admin_as_user)
images = _sort_images(images, sort_key, sort_dir)
images = _do_pagination(context, images, marker, limit,
filters.get('deleted'))
for image in images:
image['locations'] = _image_location_get_all(image['id'])
_normalize_locations(image)
return images
@log_call
def image_property_create(context, values):
image = _image_get(context, values['image_id'])
prop = _image_property_format(values['image_id'],
values['name'],
values['value'])
image['properties'].append(prop)
return prop
@log_call
def image_property_delete(context, prop_ref, image_ref, session=None):
prop = None
for p in DATA['images'][image_ref]['properties']:
if p['name'] == prop_ref:
prop = p
if not prop:
raise exception.NotFound()
prop['deleted_at'] = timeutils.utcnow()
prop['deleted'] = True
return prop
@log_call
def image_member_find(context, image_id=None, member=None, status=None):
filters = []
images = DATA['images']
members = DATA['members']
def is_visible(member):
return (member['member'] == context.owner or
images[member['image_id']]['owner'] == context.owner)
if not context.is_admin:
filters.append(is_visible)
if image_id is not None:
filters.append(lambda m: m['image_id'] == image_id)
if member is not None:
filters.append(lambda m: m['member'] == member)
if status is not None:
filters.append(lambda m: m['status'] == status)
for f in filters:
members = filter(f, members)
return [copy.deepcopy(m) for m in members]
@log_call
def image_member_create(context, values):
member = _image_member_format(values['image_id'],
values['member'],
values.get('can_share', False),
values.get('status', 'pending'))
global DATA
DATA['members'].append(member)
return copy.deepcopy(member)
@log_call
def image_member_update(context, member_id, values):
global DATA
for member in DATA['members']:
if (member['id'] == member_id):
member.update(values)
member['updated_at'] = timeutils.utcnow()
return copy.deepcopy(member)
else:
raise exception.NotFound()
@log_call
def image_member_delete(context, member_id):
global DATA
for i, member in enumerate(DATA['members']):
if (member['id'] == member_id):
del DATA['members'][i]
break
else:
raise exception.NotFound()
def _image_locations_set(image_id, locations):
global DATA
image = DATA['images'][image_id]
for location in image['locations']:
location['deleted'] = True
location['deleted_at'] = timeutils.utcnow()
for i, location in enumerate(DATA['locations']):
if image_id == location['image_id'] and location['deleted'] is False:
del DATA['locations'][i]
for location in locations:
location_ref = _image_locations_format(image_id, value=location['url'],
meta_data=location['metadata'])
DATA['locations'].append(location_ref)
image['locations'].append(location_ref)
def _normalize_locations(image):
undeleted_locations = filter(lambda x: not x['deleted'],
image['locations'])
image['locations'] = [{'url': loc['url'],
'metadata': loc['metadata']}
for loc in undeleted_locations]
return image
def _image_location_get_all(image_id):
location_data = []
for location in DATA['locations']:
if image_id == location['image_id']:
location_data.append(location)
return location_data
@log_call
def image_create(context, image_values):
global DATA
image_id = image_values.get('id', uuidutils.generate_uuid())
if image_id in DATA['images']:
raise exception.Duplicate()
if 'status' not in image_values:
raise exception.Invalid('status is a required attribute')
allowed_keys = set(['id', 'name', 'status', 'min_ram', 'min_disk', 'size',
'checksum', 'locations', 'owner', 'protected',
'is_public', 'container_format', 'disk_format',
'created_at', 'updated_at', 'deleted_at', 'deleted',
'properties', 'tags'])
incorrect_keys = set(image_values.keys()) - allowed_keys
if incorrect_keys:
raise exception.Invalid(
'The keys %s are not valid' % str(incorrect_keys))
image = _image_format(image_id, **image_values)
DATA['images'][image_id] = image
location_data = image_values.get('locations', None)
if location_data is not None:
_image_locations_set(image_id, location_data)
DATA['tags'][image_id] = image.pop('tags', [])
return _normalize_locations(copy.deepcopy(image))
@log_call
def image_update(context, image_id, image_values, purge_props=False):
global DATA
try:
image = DATA['images'][image_id]
except KeyError:
raise exception.NotFound()
location_data = image_values.pop('locations', None)
if location_data is not None:
_image_locations_set(image_id, location_data)
# replace values for properties that already exist
new_properties = image_values.pop('properties', {})
for prop in image['properties']:
if prop['name'] in new_properties:
prop['value'] = new_properties.pop(prop['name'])
elif purge_props:
# this matches weirdness in the sqlalchemy api
prop['deleted'] = True
# add in any completly new properties
image['properties'].extend([{'name': k, 'value': v,
'image_id': image_id, 'deleted': False}
for k, v in new_properties.items()])
image['updated_at'] = timeutils.utcnow()
image.update(image_values)
DATA['images'][image_id] = image
return _normalize_locations(image)
@log_call
def image_destroy(context, image_id):
global DATA
try:
DATA['images'][image_id]['deleted'] = True
DATA['images'][image_id]['deleted_at'] = timeutils.utcnow()
_image_locations_set(image_id, [])
for prop in DATA['images'][image_id]['properties']:
image_property_delete(context, prop['name'], image_id)
members = image_member_find(context, image_id=image_id)
for member in members:
image_member_delete(context, member['id'])
tags = image_tag_get_all(context, image_id)
for tag in tags:
image_tag_delete(context, image_id, tag)
_normalize_locations(DATA['images'][image_id])
return copy.deepcopy(DATA['images'][image_id])
except KeyError:
raise exception.NotFound()
@log_call
def image_tag_get_all(context, image_id):
return DATA['tags'].get(image_id, [])
@log_call
def image_tag_get(context, image_id, value):
tags = image_tag_get_all(context, image_id)
if value in tags:
return value
else:
raise exception.NotFound()
@log_call
def image_tag_set_all(context, image_id, values):
global DATA
DATA['tags'][image_id] = values
@log_call
def image_tag_create(context, image_id, value):
global DATA
DATA['tags'][image_id].append(value)
return value
@log_call
def image_tag_delete(context, image_id, value):
global DATA
try:
DATA['tags'][image_id].remove(value)
except ValueError:
raise exception.NotFound()
def is_image_mutable(context, image):
"""Return True if the image is mutable in this context."""
# Is admin == image mutable
if context.is_admin:
return True
# No owner == image not mutable
if image['owner'] is None or context.owner is None:
return False
# Image only mutable by its owner
return image['owner'] == context.owner
def is_image_sharable(context, image, **kwargs):
"""Return True if the image can be shared to others in this context."""
# Is admin == image sharable
if context.is_admin:
return True
# Only allow sharing if we have an owner
if context.owner is None:
return False
# If we own the image, we can share it
if context.owner == image['owner']:
return True
# Let's get the membership association
if 'membership' in kwargs:
member = kwargs['membership']
if member is None:
# Not shared with us anyway
return False
else:
members = image_member_find(context,
image_id=image['id'],
member=context.owner)
if members:
member = members[0]
else:
# Not shared with us anyway
return False
# It's the can_share attribute we're now interested in
return member['can_share']
def is_image_visible(context, image, status=None):
"""Return True if the image is visible in this context."""
# Is admin == image visible
if context.is_admin:
return True
# No owner == image visible
if image['owner'] is None:
return True
# Image is_public == image visible
if image['is_public']:
return True
# Perform tests based on whether we have an owner
if context.owner is not None:
if context.owner == image['owner']:
return True
# Figure out if this image is shared with that tenant
if status == 'all':
status = None
members = image_member_find(context,
image_id=image['id'],
member=context.owner,
status=status)
if members:
return True
# Private image
return False
def user_get_storage_usage(context, owner_id, image_id=None, session=None):
images = image_get_all(context, filters={'owner': owner_id})
total = 0
for image in images:
if image['id'] != image_id:
total = total + (image['size'] * len(image['locations']))
return total
@log_call
def task_create(context, task_values):
"""Create a task object"""
global DATA
task_id = task_values.get('id', uuidutils.generate_uuid())
required_attributes = ['type', 'status', 'input']
allowed_attributes = ['id', 'type', 'status', 'input', 'result', 'owner',
'message', 'expires_at', 'created_at',
'updated_at', 'deleted_at', 'deleted']
if task_id in DATA['tasks']:
raise exception.Duplicate()
for key in required_attributes:
if key not in task_values:
raise exception.Invalid('%s is a required attribute' % key)
incorrect_keys = set(task_values.keys()) - set(allowed_attributes)
if incorrect_keys:
raise exception.Invalid(
'The keys %s are not valid' % str(incorrect_keys))
task = _task_format(task_id, **task_values)
DATA['tasks'][task_id] = task
return copy.deepcopy(task)
@log_call
def task_update(context, task_id, values, purge_props=False):
"""Update a task object"""
global DATA
try:
task = DATA['tasks'][task_id]
except KeyError:
msg = (_("No task found with ID %s") % task_id)
LOG.debug(msg)
raise exception.TaskNotFound(task_id=task_id)
task.update(values)
task['updated_at'] = timeutils.utcnow()
DATA['tasks'][task_id] = task
return task
@log_call
def task_get(context, task_id, force_show_deleted=False):
task = _task_get(context, task_id, force_show_deleted)
return copy.deepcopy(task)
def _task_get(context, task_id, force_show_deleted=False):
try:
task = DATA['tasks'][task_id]
except KeyError:
msg = _('Could not find task %s') % task_id
LOG.info(msg)
raise exception.TaskNotFound(task_id=task_id)
if task['deleted'] and not (force_show_deleted or context.show_deleted):
msg = _('Unable to get deleted task %s') % task_id
LOG.info(msg)
raise exception.TaskNotFound(task_id=task_id)
if not _is_task_visible(context, task):
msg = (_("Forbidding request, task %s is not visible") % task_id)
LOG.debug(msg)
raise exception.Forbidden(msg)
return task
@log_call
def task_delete(context, task_id):
global DATA
try:
DATA['tasks'][task_id]['deleted'] = True
DATA['tasks'][task_id]['deleted_at'] = timeutils.utcnow()
DATA['tasks'][task_id]['updated_at'] = timeutils.utcnow()
return copy.deepcopy(DATA['tasks'][task_id])
except KeyError:
msg = (_("No task found with ID %s") % task_id)
LOG.debug(msg)
raise exception.TaskNotFound(task_id=task_id)
@log_call
def task_get_all(context, filters=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc'):
"""
Get all tasks that match zero or more filters.
:param filters: dict of filter keys and values.
:param marker: task id after which to start page
:param limit: maximum number of tasks to return
:param sort_key: task attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
:return: tasks set
"""
filters = filters or {}
tasks = DATA['tasks'].values()
tasks = _filter_tasks(tasks, filters, context)
tasks = _sort_tasks(tasks, sort_key, sort_dir)
tasks = _paginate_tasks(context, tasks, marker, limit,
filters.get('deleted'))
return tasks
def _is_task_visible(context, task):
"""Return True if the task is visible in this context."""
# Is admin == task visible
if context.is_admin:
return True
# No owner == task visible
if task['owner'] is None:
return True
# Perform tests based on whether we have an owner
if context.owner is not None:
if context.owner == task['owner']:
return True
return False
def _filter_tasks(tasks, filters, context, admin_as_user=False):
filtered_tasks = []
for task in tasks:
has_ownership = context.owner and task['owner'] == context.owner
can_see = (has_ownership or (context.is_admin and not admin_as_user))
if not can_see:
continue
add = True
for k, value in filters.iteritems():
add = task[k] == value and task['deleted'] is False
if not add:
break
if add:
filtered_tasks.append(task)
return filtered_tasks
def _sort_tasks(tasks, sort_key, sort_dir):
reverse = False
if tasks and not (sort_key in tasks[0]):
raise exception.InvalidSortKey()
keyfn = lambda x: (x[sort_key] if x[sort_key] is not None else '',
x['created_at'], x['id'])
reverse = sort_dir == 'desc'
tasks.sort(key=keyfn, reverse=reverse)
return tasks
def _paginate_tasks(context, tasks, marker, limit, show_deleted):
start = 0
end = -1
if marker is None:
start = 0
else:
# Check that the task is accessible
_task_get(context, marker, force_show_deleted=show_deleted)
for i, task in enumerate(tasks):
if task['id'] == marker:
start = i + 1
break
else:
if task:
raise exception.TaskNotFound(task_id=task['id'])
else:
msg = _("Task does not exist")
raise exception.NotFound(message=msg)
end = start + limit if limit is not None else None
return tasks[start:end]
| apache-2.0 | -2,055,230,222,026,911,700 | 28.691686 | 79 | 0.568934 | false | 4.042925 | false | false | false |
ThreeSixes/airSuck | faaIngest.py | 1 | 16742 | #!/usr/bin/python
"""
faaIngest by ThreeSixes (https://github.com/ThreeSixes)
This project is licensed under GPLv3. See COPYING for dtails.
This file is part of the airSuck project (https://github.com/ThreeSixes/airSUck).
"""
import config as config
import csv
import traceback
import datetime
import pymongo
import zipfile
import os
import shutil
import pycurl
from libAirSuck import asLog
from pprint import pprint
class importFaaDb:
def __init__(self):
"""
Handle the FAA aircraft database files.
"""
# Build logger.
self.__logger = asLog(config.ssrRegMongo['logMode'])
self.__logger.log("AirSuck FAA database import starting...")
# Master list of aircraft properties.
self.__acList = {}
# Master list of engine properties.
self.__engList = {}
try:
#MongoDB config
faaRegMongo = pymongo.MongoClient(config.ssrRegMongo['host'], config.ssrRegMongo['port'])
mDB = faaRegMongo[config.ssrRegMongo['dbName']]
tempCollName = "%s_tmp" %config.ssrRegMongo['coll']
# Set up the temporary colleciton.
self.__mDBColl = mDB[tempCollName]
# Nuke it if it exists.
try:
self.__mDBColl.drop()
except:
# Probably means it doesn't exist. We DGAF if it blows up.
None
except:
tb = traceback.format_exc()
self.__logger.log("Failed to connect to MongoDB:\n%s" %tb)
raise
def __getFaaData(self):
"""
Download and decompress FAA data.
"""
# Final location the zip file should end up.
fileTarget = "%s%s" %(config.ssrRegMongo['tempPath'], config.ssrRegMongo['tempZip'])
self.__logger.log("Downloading FAA database to %s..." %fileTarget)
try:
try:
# Try to create our directory
os.makedirs(config.ssrRegMongo['tempPath'])
except OSError:
# Already exists. We DGAF.
None
except:
raise
# Open the file and download the FAA DB into it.
with open(fileTarget, 'wb') as outZip:
# Use cURL to snag our database.
crl = pycurl.Curl()
crl.setopt(crl.URL, config.ssrRegMongo['faaDataURL'])
crl.setopt(crl.WRITEDATA, outZip)
crl.perform()
crl.close()
except:
raise
self.__logger.log("Unzipping relevatnt files from %s..." %fileTarget)
try:
# Open our zip file
zipF = zipfile.ZipFile(fileTarget, 'r')
# Extract master file
zipF.extract(config.ssrRegMongo['masterFile'], config.ssrRegMongo['tempPath'], config.ssrRegMongo['tempPath'])
# Extract aircraft file.
zipF.extract(config.ssrRegMongo['acFile'], config.ssrRegMongo['tempPath'], config.ssrRegMongo['tempPath'])
# Extract engine file.
zipF.extract(config.ssrRegMongo['engFile'], config.ssrRegMongo['tempPath'], config.ssrRegMongo['tempPath'])
except:
raise
finally:
zipF.close()
return
def __nukeFaaData(self):
"""
Delete FAA data files downloaded above.
"""
self.__logger.log("Deleting %s..." %fileTarget)
try:
# Nuke the temporary directory and all files under it.
shutil.rmtree(config.ssrRegMongo['tempPath'])
except:
raise
def __loadAcftRef(self):
"""
Load eircraft reference data from file.
"""
dataRow = False
targetFile = "%s%s" %(config.ssrRegMongo['tempPath'], config.ssrRegMongo['acFile'])
self.__logger.log("Processing aicraft reference data in %s..." %targetFile)
with open(targetFile, 'rb') as csvFile:
for row in csv.reader(csvFile):
# Blank the row, create template.
thisRow = {}
if dataRow:
# Type-correct our CSV data.
try:
thisRow.update({'mfgName': row[1].strip()})
except:
None
try:
thisRow.update({'modelName': row[2].strip()})
except:
None
try:
thisRow.update({'acType': int(row[3].strip())})
except:
None
try:
thisRow.update({'engType': int(row[4].strip())})
except:
None
try:
thisRow.update({'acCat': int(row[5].strip())})
except:
None
try:
thisRow.update({'buldCert': int(row[6].strip())})
except:
None
try:
thisRow.update({'engCt': int(row[7].strip())})
except:
None
try:
thisRow.update({'seatCt': int(row[8].strip())})
except:
None
try:
thisRow.update({'weight': int(row[9].replace("CLASS ", "").strip())})
except:
None
try:
thisRow.update({'cruiseSpd': int(row[10].strip())})
except:
None
self.__acList.update({row[0].strip(): thisRow})
else:
dataRow = True
return
def __loadEngine(self):
"""
Load engine reference data from file.
"""
dataRow = False
targetFile = "%s%s" %(config.ssrRegMongo['tempPath'], config.ssrRegMongo['engFile'])
self.__logger.log("Processing engine reference data in %s..." %targetFile)
with open(targetFile, 'rb') as csvFile:
for row in csv.reader(csvFile):
# Blank the row, create template.
thisRow = {}
if dataRow:
# Type-correct our CSV data.
try:
thisRow.update({'mfgName': row[1].strip()})
except:
None
try:
thisRow.update({'modelName': row[2].strip()})
except:
None
try:
thisRow.update({'engType': int(row[3].strip())})
except:
None
try:
thisRow.update({'engHP': int(row[4].strip())})
except:
None
try:
thisRow.update({'thrust': int(row[5].strip())})
except:
None
# Tack our row on.
self.__engList.update({row[0].strip(): thisRow})
else:
dataRow = True
return
def __processMaster(self):
"""
Load master aircraft data from file. This should be called AFTER __loadAcftRef and __loadEngine.
"""
dataRow = False
targetFile = "%s%s" %(config.ssrRegMongo['tempPath'], config.ssrRegMongo['masterFile'])
self.__logger.log("Processing master aicraft data in %s..." %targetFile)
with open(targetFile, 'rb') as csvFile:
for row in csv.reader(csvFile):
# Blank the row, create template.
thisRow = {}
if dataRow:
# Type-correct our CSV data.
try:
thisRow.update({'nNumber': "N%s" %row[0].strip()})
except:
None
try:
thisRow.update({'serial': row[1].strip()})
except:
None
try:
thisRow.update({'acMfg': self.__acList[row[2].strip()]})
except:
None
try:
thisRow.update({'engMfg': self.__engList[row[3].strip()]})
except:
None
try:
thisRow.update({'yearMfg': int(row[4].strip())})
except:
None
try:
thisRow.update({'regType': int(row[5].strip())})
except:
None
try:
thisRow.update({'regName': row[6].strip()})
except:
None
try:
thisRow.update({'street1': row[7].strip()})
except:
None
try:
thisRow.update({'street2': row[8].strip()})
except:
None
try:
thisRow.update({'city': row[9].strip()})
except:
None
try:
thisRow.update({'state': row[10].strip()})
except:
None
try:
thisRow.update({'zip': row[11].strip()})
except:
None
try:
thisRow.update({'region': row[12].strip()})
except:
None
try:
thisRow.update({'countyCode': row[13].strip()})
except:
None
try:
thisRow.update({'countryCode': row[14].strip()})
except:
None
try:
thisRow.update({'lastActDate': datetime.datetime.strptime(row[15].strip(), '%Y%m%d')})
except:
None
try:
thisRow.update({'certIssDate': datetime.datetime.strptime(row[16].strip(), '%Y%m%d')})
except:
None
try:
thisRow.update({'airWorthClass': row[17].strip()})
except:
None
try:
thisRow.update({'acType': int(row[18].strip())})
except:
None
try:
thisRow.update({'engType': int(row[19].strip())})
except:
None
try:
thisRow.update({'statCode': row[20].strip()})
except:
None
try:
thisRow.update({'modeSInt': int(row[21].strip())})
except:
None
try:
thisRow.update({'fractOwner': row[22].strip()})
except:
None
try:
thisRow.update({'airWorthyDate': datetime.datetime.strptime(row[23].strip(), '%Y%m%d')})
except:
None
try:
thisRow.update({'otherName1': row[24].strip()})
except:
None
try:
thisRow.update({'otherName2': row[25].strip()})
except:
None
try:
thisRow.update({'otherName3': row[26].strip()})
except:
None
try:
thisRow.update({'otherName4': row[27].strip()})
except:
None
try:
thisRow.update({'otherName5': row[28].strip()})
except:
None
try:
thisRow.update({'expireDate': datetime.datetime.strptime(row[29].strip(), '%Y%m%d')})
except:
None
try:
thisRow.update({'uid': row[30].strip()})
except:
None
try:
thisRow.update({'kitMfr': row[31].strip()})
except:
None
try:
thisRow.update({'kitMdl': row[32].strip()})
except:
None
try:
thisRow.update({'modeSHex': row[33].strip().lower()})
except:
None
# Insert the row.
try:
self.__mDBColl.insert(thisRow)
except:
raise
else:
dataRow = True
return
def migrateDb(self):
"""
Swap out the old database for the new.
"""
self.__logger.log("Migrate new processed aircraft data to live data...")
try:
# Try to overwrite the main collection.
self.__mDBColl.renameCollection(config.ssrRegMongo['coll'], True)
except:
raise
return
def run(self):
"""
Do all the work in sequence.
"""
try:
# Grab and decompress file.
self.__getFaaData()
# Pull aircraft reference data.
self.__loadAcftRef()
# Pull aircraft engine data.
self.__loadEngine()
# Insert master aircraft records combined with record from the engine and aicraft records.
self.__processMaster()
# Swap the database.
self.__migrateDb()
except:
tb = traceback.format_exc()
print("Unhandled exception:\n%s" %tb)
finally:
try:
# Drop the temporary collection.
self.__mDBColl.drop()
except:
# We DGAF it this doesn't work.
None
try:
# Drop the temporary collection.
self.__nukeFaaData()
except:
# We DGAF it this doesn't work.
None
ifdb = importFaaDb()
ifdb.run()
| gpl-3.0 | 8,160,541,461,397,076,000 | 31.571984 | 122 | 0.369191 | false | 5.575092 | true | false | false |
terkkila/cgml | test/test_optimizers.py | 1 | 2318 |
import theano
import theano.tensor as T
import cgml.types
from cgml.optimizers import Momentum,AdaDelta
from cgml.graph import ComputationalGraph
from cgml.layers.base import _make_shared
import numpy as np
from nose.tools import assert_true,assert_equals,assert_almost_equals
def test_momentum():
schema = {'description':'logreg',
'type':'classification',
'supervised-cost':
{'type':'negative-log-likelihood',
'name':'class-out'},
'graph':
[{'activation':'softmax',
'n_in':10,
'n_out':2,
'dropout':0.0,
'name':'class-out'}]
}
#optimizer = Momentum()
#model = ComputationalGraph(schema = schema,
# optimizer = optimizer)
def test_adadelta_logreg():
x = T.fvector('x')
y = T.fscalar('y')
w = _make_shared([1.0,1.0],name='w')
b = _make_shared([1.0],name='b')
yhat = 1.0 / ( 1.0 + T.exp( - T.dot(x,w) - b ) )
e = y - yhat
cost = T.dot(e,e)
ad = AdaDelta(cost = cost,
params = [w,b])
update = theano.function( inputs = [x,y],
outputs = cost,
updates = ad.updates )
c = update([2,1],0)
assert_almost_equals(c,0.9643510838246173)
c_prev = c
for i in range(100):
c = update([2,1],0)
assert_equals(c,c)
assert_true(c < c_prev)
c_prev = c
def test_adadelta_model():
schema = {'description':'logreg',
'type':'classification',
'supervised-cost':
{'type':'negative-log-likelihood',
'name':'class-out'},
'graph':
[{'activation':'softmax',
'n_in':10,
'n_out':2,
'dropout':0.0,
'name':'class-out'}]
}
model = ComputationalGraph(schema = schema,
seed = 0)
x = np.asarray([[1,2,3,4,5,1,2,3,4,5]]).astype(cgml.types.floatX)
y = np.asarray([0],dtype=cgml.types.intX)
model.setTrainDataOnDevice(x,y)
for i in range(10):
model.supervised_update(0,1)
| apache-2.0 | 3,047,407,276,439,846,000 | 24.195652 | 69 | 0.477998 | false | 3.604977 | false | false | false |
electronicvisions/brick | source/waf/modules.py | 2 | 1179 | import os, subprocess, time, re
from waflib.Configure import conf
def configure(conf):
conf.start_msg('Checking for program module')
# this does not work, since module is exported as a function on valgol:
# conf.find_program('module')
# Therfore:
if os.system('source /usr/local/Modules/current/init/bash && module purge') == 0:
conf.end_msg('module')
else:
conf.end_msg('module not found')
conf.fatal('Could not find the program module')
@conf
def load_modules(self,*k,**kw):
module_string = ''
try:
for module in kw['modules']:
module_string += module+' '
except KeyError:
self.fatal('You must give modules to function check_modules like this: check_module(modules=[a,b,c])')
#self.start_msg('Loading modules')
p = subprocess.Popen('source /usr/local/Modules/current/init/bash && module load '+module_string+' && export -p', shell=True, stdout=subprocess.PIPE)
p.wait()
if p.returncode == 0:
for key in os.environ.iterkeys():
os.unsetenv(key)
for line in p.stdout:
m = re.search('(\w+)=(".+")$', line)
if (m):
os.putenv(m.group(1), m.group(2))
#self.end_msg(module_string)
else:
self.fatal('Loading modules did not work')
| bsd-3-clause | -4,127,682,612,295,093,000 | 28.475 | 150 | 0.682782 | false | 3 | false | false | false |
baali/Tweet_De_Feed | feeds/migrations/0003_auto_20170205_1207.py | 2 | 1049 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-05 12:07
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feeds', '0002_urlshared_cleaned_text'),
]
operations = [
migrations.AddField(
model_name='authtoken',
name='me_json',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
migrations.AddField(
model_name='twitteraccount',
name='account_json',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
migrations.AddField(
model_name='urlshared',
name='url_json',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
migrations.AlterField(
model_name='urlshared',
name='url_shared',
field=models.DateTimeField(),
),
]
| gpl-3.0 | 7,523,856,348,130,655,000 | 28.138889 | 77 | 0.584366 | false | 4.246964 | false | false | false |
hds-lab/textvisdrg | msgvis/apps/enhance/migrations/0003_auto_20150303_2224.py | 1 | 1276 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('enhance', '0002_auto_20150303_2223'),
]
operations = [
migrations.RenameField(
model_name='messagetopic',
old_name='source',
new_name='message',
),
migrations.RenameField(
model_name='messageword',
old_name='source',
new_name='message',
),
migrations.AlterField(
model_name='messagetopic',
name='topic_model',
field=models.ForeignKey(to='enhance.TopicModel', db_index=False),
preserve_default=True,
),
migrations.AlterField(
model_name='messageword',
name='dictionary',
field=models.ForeignKey(to='enhance.Dictionary', db_index=False),
preserve_default=True,
),
migrations.AlterIndexTogether(
name='messagetopic',
index_together=set([('topic_model', 'message')]),
),
migrations.AlterIndexTogether(
name='messageword',
index_together=set([('dictionary', 'message')]),
),
]
| mit | 7,324,922,098,762,494,000 | 28 | 77 | 0.547022 | false | 4.4 | false | false | false |
geotagx/pybossa | pybossa/auth/webhook.py | 1 | 1831 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2015 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
class WebhookAuth(object):
_specific_actions = []
def __init__(self, project_repo):
self.project_repo = project_repo
@property
def specific_actions(self):
return self._specific_actions
def can(self, user, action, webhook=None, project_id=None):
action = ''.join(['_', action])
return getattr(self, action)(user, webhook, project_id)
def _create(self, user, webhook, project_id=None):
return False
def _read(self, user, webhook=None, project_id=None):
if user.is_anonymous() or (webhook is None and project_id is None):
return False
project = self._get_project(webhook, project_id)
return user.admin or user.id == project.owner_id
def _update(self, user, webhook, project_id=None):
return False
def _delete(self, user, webhook, project_id=None):
return False
def _get_project(self, webhook, project_id):
if webhook is not None:
return self.project_repo.get(webhook.project_id)
return self.project_repo.get(project_id)
| agpl-3.0 | 6,591,285,263,620,765,000 | 33.54717 | 77 | 0.678318 | false | 3.854737 | false | false | false |
nanolearning/edx-platform | common/test/acceptance/tests/test_studio_general.py | 2 | 5023 | """
Acceptance tests for Studio.
"""
from bok_choy.web_app_test import WebAppTest
from ..pages.studio.asset_index import AssetIndexPage
from ..pages.studio.auto_auth import AutoAuthPage
from ..pages.studio.checklists import ChecklistsPage
from ..pages.studio.course_import import ImportPage
from ..pages.studio.course_info import CourseUpdatesPage
from ..pages.studio.edit_tabs import PagesPage
from ..pages.studio.export import ExportPage
from ..pages.studio.howitworks import HowitworksPage
from ..pages.studio.index import DashboardPage
from ..pages.studio.login import LoginPage
from ..pages.studio.manage_users import CourseTeamPage
from ..pages.studio.overview import CourseOutlinePage
from ..pages.studio.settings import SettingsPage
from ..pages.studio.settings_advanced import AdvancedSettingsPage
from ..pages.studio.settings_graders import GradingPage
from ..pages.studio.signup import SignupPage
from ..pages.studio.textbooks import TextbooksPage
from ..fixtures.course import CourseFixture, XBlockFixtureDesc
from .helpers import UniqueCourseTest
class LoggedOutTest(WebAppTest):
"""
Smoke test for pages in Studio that are visible when logged out.
"""
def setUp(self):
super(LoggedOutTest, self).setUp()
self.pages = [LoginPage(self.browser), HowitworksPage(self.browser), SignupPage(self.browser)]
def test_page_existence(self):
"""
Make sure that all the pages are accessible.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
for page in self.pages:
page.visit()
class LoggedInPagesTest(WebAppTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and do not have a course yet.
"""
def setUp(self):
super(LoggedInPagesTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
def test_dashboard_no_courses(self):
"""
Make sure that you can get to the dashboard page without a course.
"""
self.auth_page.visit()
self.dashboard_page.visit()
class CoursePagesTest(UniqueCourseTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and have a course.
"""
COURSE_ID_SEPARATOR = "."
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CoursePagesTest, self).setUp()
CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
).install()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.pages = [
clz(self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'])
for clz in [
AssetIndexPage, ChecklistsPage, ImportPage, CourseUpdatesPage,
PagesPage, ExportPage, CourseTeamPage, CourseOutlinePage, SettingsPage,
AdvancedSettingsPage, GradingPage, TextbooksPage
]
]
def test_page_existence(self):
"""
Make sure that all these pages are accessible once you have a course.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
# Log in
self.auth_page.visit()
# Verify that each page is available
for page in self.pages:
page.visit()
class DiscussionPreviewTest(UniqueCourseTest):
"""
Tests that Inline Discussions are rendered with a custom preview in Studio
"""
def setUp(self):
super(DiscussionPreviewTest, self).setUp()
CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
)
)
)
)
).install()
AutoAuthPage(self.browser, staff=True).visit()
cop = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
cop.visit()
self.unit = cop.section('Test Section').subsection('Test Subsection').toggle_expand().unit('Test Unit')
self.unit.go_to()
def test_is_preview(self):
"""
Ensure that the preview version of the discussion is rendered.
"""
self.assertTrue(self.unit.q(css=".discussion-preview").present)
self.assertFalse(self.unit.q(css=".discussion-show").present)
| agpl-3.0 | -2,325,661,804,641,440,000 | 32.939189 | 111 | 0.637667 | false | 4.274894 | true | false | false |
portnov/sverchok | nodes/analyzer/area.py | 1 | 3597 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import math
from mathutils import Vector, Matrix
import bpy
from bpy.props import BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
# unit normal vector of plane defined by points a, b, and c
def unit_normal(a, b, c):
mat_x = Matrix(((1, a[1], a[2]), (1, b[1], b[2]), (1, c[1], c[2])))
mat_y = Matrix(((a[0], 1, a[2]), (b[0], 1, b[2]), (c[0], 1, c[2])))
mat_z = Matrix(((a[0], a[1], 1), (b[0], b[1], 1), (c[0], c[1], 1)))
x = Matrix.determinant(mat_x)
y = Matrix.determinant(mat_y)
z = Matrix.determinant(mat_z)
magnitude = (x**2 + y**2 + z**2)**.5
if magnitude == 0:
magnitude = 1
return (x/magnitude, y/magnitude, z/magnitude)
# area of polygon poly
def area_pol(poly):
if len(poly) < 3: # not a plane - no area
return 0
total = Vector((0, 0, 0))
for i in range(len(poly)):
vi1 = Vector(poly[i])
if i is len(poly)-1:
vi2 = Vector(poly[0])
else:
vi2 = Vector(poly[i+1])
prod = vi1.cross(vi2)[:]
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
result = total.dot(unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2)
def areas(Vertices, Polygons, per_face):
areas = []
for i, obj in enumerate(Polygons):
res = []
for face in obj:
poly = []
for j in face:
poly.append(Vertices[i][j])
res.append(area_pol(poly))
if per_face:
areas.extend(res)
else:
areas.append(math.fsum(res))
return areas
class AreaNode(bpy.types.Node, SverchCustomTreeNode):
''' Area '''
bl_idname = 'AreaNode'
bl_label = 'Area'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_AREA'
per_face = BoolProperty(name='per_face',
default=True,
update=updateNode)
def sv_init(self, context):
self.inputs.new('VerticesSocket', "Vertices", "Vertices")
self.inputs.new('StringsSocket', "Polygons", "Polygons")
self.outputs.new('StringsSocket', "Area", "Area")
def draw_buttons(self, context, layout):
layout.prop(self, "per_face", text="Count faces")
def process(self):
# inputs
inputs = self.inputs
outputs = self.outputs
if not 'Area' in outputs:
return
Vertices = inputs["Vertices"].sv_get()
Polygons = inputs["Polygons"].sv_get()
# outputs
if outputs['Area'].is_linked:
outputs['Area'].sv_set([areas(Vertices, Polygons, self.per_face)])
def register():
bpy.utils.register_class(AreaNode)
def unregister():
bpy.utils.unregister_class(AreaNode)
| gpl-3.0 | -4,636,814,415,671,273,000 | 28.008065 | 78 | 0.593828 | false | 3.393396 | false | false | false |
majk1/shellrc | utils/unicode.py | 1 | 3190 | #!/usr/bin/env python
#
# === Search for unicode symbol ===
#
# MIT License
# Copyright (c) 2016 Attila Majoros
#
# Source: https://github.com/majk1/shellrc/blob/master/utils/unicode.py
#
import sys
import os.path
import re
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
UNICODE_SYMBOLS_SOURCE_URL = 'http://www.unicode.org/Public/UNIDATA/UnicodeData.txt'
UNICODE_SYMBOLS_SOURCE_URL_FALLBACK = 'https://static.codelens.io/UnicodeData.txt'
UNICODE_SYMBOLS_LIST = '/tmp/unicode_symbols.lst'
DEBUG = False
def debug(message):
if DEBUG:
sys.stderr.write('DBG: ' + message + '\n')
def fetch_unicode_data(url, target_file):
debug('Fetching unicode symbol list from ' + url)
data = urlopen(url)
fl = open(target_file, 'wt')
line_counter = 0
for desc_line in data:
m = re.match('^(.{4};[^<][^;]+);.*', desc_line.decode())
if m:
fl.write(m.group(1).lower())
fl.write('\n')
line_counter = line_counter + 1
fl.close()
debug('Fetched and filtered ' + str(line_counter) + ' symbols into ' + target_file)
def is_unicode_symbols_list_file_usable(file_name):
if os.path.exists(file_name):
if os.path.getsize(file_name) > 0:
return True
return False
if __name__ == '__main__':
args = list(sys.argv[1:])
if len(args) > 0:
if args[0] == '-d':
DEBUG = True
args = args[1:]
debug('Python version: ' + sys.version)
if len(args) == 0:
sys.stderr.write('Usage: unicode.py [-d] <search>\n')
sys.stderr.write('\n')
sys.stderr.write(' -d - enabled debug messages\n')
sys.stderr.write(' <search> - multiple search patterns separated by space\n')
sys.stderr.write('\n')
sys.stderr.write('Example: ./unicode.py black circle\n')
sys.stderr.write('\n')
sys.exit(1)
if not is_unicode_symbols_list_file_usable(UNICODE_SYMBOLS_LIST):
try:
fetch_unicode_data(UNICODE_SYMBOLS_SOURCE_URL, UNICODE_SYMBOLS_LIST)
except Exception as e:
debug('Could not download unicode symbol list: ' + str(e))
debug('trying fallback url: ' + UNICODE_SYMBOLS_SOURCE_URL_FALLBACK)
try:
fetch_unicode_data(UNICODE_SYMBOLS_SOURCE_URL_FALLBACK, UNICODE_SYMBOLS_LIST)
except Exception as ee:
sys.stderr.write('Could not download unicode symbol list from fallback url: ' + str(ee) + '\n')
sys.exit(2)
search = [s.lower() for s in args]
debug('searching for unicode symbols by: ' + '+'.join(search))
with open(UNICODE_SYMBOLS_LIST, 'r') as f:
for line in f:
if all(s in line for s in search):
code, name = line.rstrip().split(';')
symbol = ('\\u' + code).encode('utf-8').decode('raw_unicode_escape')
try:
print(symbol + '\t\\u' + code + '\t&#x' + code + ';\t' + name)
except UnicodeEncodeError as e:
print((symbol + '\t\\u' + code + '\t&#x' + code + ';\t' + name).encode('utf-8'))
| mit | -8,347,624,188,629,027,000 | 32.93617 | 111 | 0.578683 | false | 3.40812 | false | false | false |
miyyer/qb | validate_annotations.py | 2 | 1075 | from qanta import qlogging
from qanta.ingestion.answer_mapping import read_wiki_titles
from qanta.ingestion.annotated_mapping import PageAssigner
log = qlogging.get("validate_annotations")
def normalize(title):
return title.replace(" ", "_")
def check_page(page, titles):
n_page = normalize(page)
if n_page not in titles:
log.error(f"Title not found: {page}")
def main():
titles = read_wiki_titles()
assigner = PageAssigner()
log.info("Checking direct protobowl mappings...")
for page in assigner.protobowl_direct.values():
check_page(page, titles)
log.info("Checking direct quizdb mappings...")
for page in assigner.quizdb_direct.values():
check_page(page, titles)
log.info("Checking unambiguous mappings...")
for page in assigner.unambiguous.values():
check_page(page, titles)
log.info("Checking ambiguous mappings...")
for entry in assigner.ambiguous.values():
for option in entry:
check_page(option["page"], titles)
if __name__ == "__main__":
main()
| mit | 7,737,588,136,300,409,000 | 25.219512 | 59 | 0.666047 | false | 3.681507 | false | false | false |
picklepete/pyicloud | pyicloud/utils.py | 1 | 1910 | """Utils."""
import getpass
import keyring
from sys import stdout
from .exceptions import PyiCloudNoStoredPasswordAvailableException
KEYRING_SYSTEM = "pyicloud://icloud-password"
def get_password(username, interactive=stdout.isatty()):
"""Get the password from a username."""
try:
return get_password_from_keyring(username)
except PyiCloudNoStoredPasswordAvailableException:
if not interactive:
raise
return getpass.getpass(
"Enter iCloud password for {username}: ".format(username=username)
)
def password_exists_in_keyring(username):
"""Return true if the password of a username exists in the keyring."""
try:
get_password_from_keyring(username)
except PyiCloudNoStoredPasswordAvailableException:
return False
return True
def get_password_from_keyring(username):
"""Get the password from a username."""
result = keyring.get_password(KEYRING_SYSTEM, username)
if result is None:
raise PyiCloudNoStoredPasswordAvailableException(
"No pyicloud password for {username} could be found "
"in the system keychain. Use the `--store-in-keyring` "
"command-line option for storing a password for this "
"username.".format(username=username)
)
return result
def store_password_in_keyring(username, password):
"""Store the password of a username."""
return keyring.set_password(KEYRING_SYSTEM, username, password)
def delete_password_in_keyring(username):
"""Delete the password of a username."""
return keyring.delete_password(KEYRING_SYSTEM, username)
def underscore_to_camelcase(word, initial_capital=False):
"""Transform a word to camelCase."""
words = [x.capitalize() or "_" for x in word.split("_")]
if not initial_capital:
words[0] = words[0].lower()
return "".join(words)
| mit | -5,112,024,701,855,479,000 | 28.384615 | 78 | 0.683246 | false | 4.143167 | false | false | false |
mixman/django-js-utils | django_js_utils/utils.py | 1 | 3286 | import sys
import re
import types
import fnmatch
from collections import OrderedDict
from django.conf import settings
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver
from django_js_utils import conf_jsutils
import six
class PatternsParser(object):
def __init__(self):
self._patterns = OrderedDict()
def parse(self, input):
self.handle_url_module(input)
def handle_url_module(self, module_name, prefix=""):
"""
Load the module and output all of the patterns
Recurse on the included modules
"""
if isinstance(module_name, six.string_types):
__import__(module_name)
root_urls = sys.modules[module_name]
patterns = root_urls.urlpatterns
elif isinstance(module_name, types.ModuleType):
root_urls = module_name
patterns = root_urls.urlpatterns
else:
root_urls = module_name
patterns = root_urls
def match(rule, target):
return re.match(rule, target.strip('^$'))
for pattern in patterns:
if issubclass(pattern.__class__, RegexURLPattern):
if any(match(k, prefix) for k in getattr(settings, 'URLS_EXCLUDE_PREFIX', [])):
continue
if getattr(settings, 'URLS_INCLUDE_PREFIX', []):
if not any(match(k, prefix) for k in getattr(settings, 'URLS_INCLUDE_PREFIX', [])):
continue
val = getattr(pattern, 'name', None) or ''
if any(match(k, pattern.regex.pattern) for k in getattr(settings, 'URLS_EXCLUDE_PATTERN', [])):
continue
if getattr(settings, 'URLS_INCLUDE_PATTERN', []):
if not any(match(k, pattern.regex.pattern) for k in getattr(settings, 'URLS_INCLUDE_PATTERN', [])):
continue
self.parse_pattern(pattern, prefix)
elif issubclass(pattern.__class__, RegexURLResolver):
if pattern.url_patterns:
self.handle_url_module(pattern.url_patterns, prefix=prefix+pattern.regex.pattern)
elif pattern.urlconf_name:
self.handle_url_module(pattern.urlconf_name, prefix=pattern.regex.pattern)
def parse_pattern(self, pattern, prefix):
full_url = prefix + pattern.regex.pattern
for chr in ("^", "$"):
full_url = full_url.replace(chr, "")
#handle kwargs, args
kwarg_matches = conf_jsutils.RE_KWARG.findall(full_url)
if kwarg_matches:
for el in kwarg_matches:
#prepare the output for JS resolver
full_url = full_url.replace(el[0], "<%s>" % el[1])
#after processing all kwargs try args
args_matches = conf_jsutils.RE_ARG.findall(full_url)
if args_matches:
for el in args_matches:
full_url = full_url.replace(el, "<>") # replace by a empty parameter name
#unescape escaped chars which are not special sequences
full_url = re.sub(r'\\([^\dAZbBdDsSwW])', r'\1', full_url)
self._patterns[pattern.name] = "/" + full_url
@property
def patterns(self):
return self._patterns
| mit | 171,514,094,672,612,260 | 37.209302 | 119 | 0.584297 | false | 4.261997 | false | false | false |
dfm/twitterick | webapp.py | 1 | 5990 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import tornado.web
import tornado.ioloop
import tornado.httpserver
from tornado.escape import json_encode
from tornado.options import define, options, parse_command_line
from twitterick import emoji
from twitterick.limericker import write
from twitterick.database import get_connection
define("port", default=3058, help="run on the given port", type=int)
define("debug", default=False, help="run in debug mode")
define("xheaders", default=True, help="use X-headers")
define("cookie_secret", default="secret key", help="secure key")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
(r"/new", NewHandler),
(r"/recent", RecentHandler),
(r"/popular", PopularHandler),
(r"/([0-9]+)", TwitterickHandler),
(r"/like/([0-9]+)", LikeHandler),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
xheaders=options.xheaders,
cookie_secret=options.cookie_secret,
debug=options.debug,
)
super(Application, self).__init__(handlers, ui_methods=emoji,
**settings)
self._db = get_connection()
@property
def db(self):
return self._db
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
def get_poems(self, poem_id=None, page=0, per_page=10, popular=False):
q = """
select
twittericks.id, twittericks.votes,
t1.tweet_id, t1.username, t1.body,
t2.tweet_id, t2.username, t2.body,
t3.tweet_id, t3.username, t3.body,
t4.tweet_id, t4.username, t4.body,
t5.tweet_id, t5.username, t5.body
from twittericks
join tweets as t1 on l1=t1.id
join tweets as t2 on l2=t2.id
join tweets as t3 on l3=t3.id
join tweets as t4 on l4=t4.id
join tweets as t5 on l5=t5.id
"""
args = []
if poem_id is not None:
q += "where twittericks.id=%s limit 1\n"
args += [poem_id]
else:
if popular:
q += "where votes > 0 order by votes desc, id desc"
else:
q += "order by id desc"
q += " offset %s limit %s"
args += [page * per_page, per_page]
with self.db as conn:
c = conn.cursor()
c.execute(q, args)
results = c.fetchall()
return [dict(poem_id=r[0], votes=r[1],
lines=[dict(tweet_id=r[2+i*3], username=r[3+i*3],
body=r[4+i*3]) for i in range(5)])
for r in results]
class IndexHandler(BaseHandler):
def get(self):
# Count the number of tweets.
with self.db as conn:
c = conn.cursor()
c.execute("select count(*) from tweets")
count = c.fetchone()
if count is None:
count = "many"
else:
count = count[0]
poems = self.get_poems()
# Get the result of the query.
if not len(poems):
self.render("noresults.html")
return
# Parse the poem and display the results.
self.render("index.html", title="Twitterick", poems=poems, count=count)
class NewHandler(BaseHandler):
def get(self):
with self.db as conn:
poem_id = write(conn.cursor())
self.redirect("/{0}".format(poem_id))
class RecentHandler(BaseHandler):
def get(self):
# Pagination.
page = self.get_argument("page", 0)
page = max([0, int(page)])
poems = self.get_poems(page=page)
# Get the result of the query.
if not len(poems):
self.render("noresults.html")
return
# Parse the poem and display the results.
self.render("poems.html", title="Recent Twittericks", poems=poems,
next_page=page+1, prev_page=page-1)
class PopularHandler(BaseHandler):
def get(self):
# Pagination.
page = self.get_argument("page", 0)
page = max([0, int(page)])
poems = self.get_poems(page=page, popular=True)
# Get the result of the query.
if not len(poems):
self.render("noresults.html")
return
# Parse the poem and display the results.
self.render("poems.html", title="Popular Twittericks", poems=poems,
next_page=page+1, prev_page=page-1)
class TwitterickHandler(BaseHandler):
def get(self, poem_id):
poems = self.get_poems(poem_id=poem_id)
# Get the result of the query.
if not len(poems):
self.render("noresults.html")
return
# Parse the poem and display the results.
self.render("poem.html", title="Twitterick #{0}".format(poem_id),
poem=poems[0])
class LikeHandler(BaseHandler):
def get(self, poem_id):
with self.db as conn:
c = conn.cursor()
c.execute("update twittericks set votes=votes+1 where id=%s "
"returning votes",
(poem_id, ))
votes = c.fetchone()
self.set_header("Content-Type", "application/json")
if votes is None:
self.set_status(404)
self.write(json_encode(dict(message="Failure", votes=0)))
self.finish()
self.write(json_encode(dict(message="Success", votes=votes[0])))
def main():
parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port, address="0.0.0.0")
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| mit | -3,771,856,059,757,141,000 | 27.660287 | 79 | 0.564608 | false | 3.470452 | false | false | false |
jnewland/ha-config | custom_components/bhyve/pybhyve/client.py | 1 | 6186 | """Define an object to interact with the REST API."""
import logging
import re
import time
from asyncio import ensure_future
from .const import (
API_HOST,
API_POLL_PERIOD,
DEVICES_PATH,
DEVICE_HISTORY_PATH,
TIMER_PROGRAMS_PATH,
LOGIN_PATH,
WS_HOST,
)
from .errors import RequestError
from .websocket import OrbitWebsocket
_LOGGER = logging.getLogger(__name__)
class Client:
"""Define the API object."""
def __init__(
self, username: str, password: str, loop, session, async_callback
) -> None:
"""Initialize."""
self._username: str = username
self._password: int = password
self._ws_url: str = WS_HOST
self._token: str = None
self._websocket = None
self._loop = loop
self._session = session
self._async_callback = async_callback
self._devices = []
self._last_poll_devices = 0
self._timer_programs = []
self._last_poll_programs = 0
self._device_histories = dict()
self._last_poll_device_histories = 0
async def _request(
self, method: str, endpoint: str, params: dict = None, json: dict = None
) -> list:
"""Make a request against the API."""
url: str = f"{API_HOST}{endpoint}"
if not params:
params = {}
headers = {
"Accept": "application/json, text/plain, */*",
"Host": re.sub("https?://", "", API_HOST),
"Content-Type": "application/json; charset=utf-8;",
"Referer": API_HOST,
"Orbit-Session-Token": self._token or "",
}
headers["User-Agent"] = (
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/72.0.3626.81 Safari/537.36"
)
async with self._session.request(
method, url, params=params, headers=headers, json=json
) as resp:
try:
resp.raise_for_status()
return await resp.json(content_type=None)
except Exception as err:
raise RequestError(f"Error requesting data from {url}: {err}")
async def _refresh_devices(self, force_update=False):
now = time.time()
if force_update:
_LOGGER.info("Forcing device refresh")
elif now - self._last_poll_devices < API_POLL_PERIOD:
return
self._devices = await self._request(
"get", DEVICES_PATH, params={"t": str(time.time())}
)
self._last_poll_devices = now
async def _refresh_timer_programs(self, force_update=False):
now = time.time()
if force_update:
_LOGGER.debug("Forcing device refresh")
elif now - self._last_poll_programs < API_POLL_PERIOD:
return
self._timer_programs = await self._request(
"get", TIMER_PROGRAMS_PATH, params={"t": str(time.time())}
)
self._last_poll_programs = now
async def _refresh_device_history(self, device_id, force_update=False):
now = time.time()
if force_update:
_LOGGER.info("Forcing refresh of device history %s", device_id)
elif now - self._last_poll_device_histories < API_POLL_PERIOD:
return
device_history = await self._request(
"get",
DEVICE_HISTORY_PATH.format(device_id),
params={"t": str(time.time()), "page": str(1), "per-page": str(10),},
)
self._device_histories.update({device_id: device_history})
self._last_poll_device_histories = now
async def _async_ws_handler(self, data):
"""Process incoming websocket message."""
if self._async_callback:
ensure_future(self._async_callback(data))
async def login(self) -> bool:
"""Log in with username & password and save the token."""
url: str = f"{API_HOST}{LOGIN_PATH}"
json = {"session": {"email": self._username, "password": self._password}}
async with self._session.request("post", url, json=json) as resp:
try:
resp.raise_for_status()
response = await resp.json(content_type=None)
_LOGGER.debug("Logged in")
self._token = response["orbit_session_token"]
except Exception as err:
raise RequestError(f"Error requesting data from {url}: {err}")
if self._token is None:
return False
self._websocket = OrbitWebsocket(
token=self._token,
loop=self._loop,
session=self._session,
url=self._ws_url,
async_callback=self._async_ws_handler,
)
self._websocket.start()
return True
async def stop(self):
"""Stop the websocket."""
if self._websocket is not None:
await self._websocket.stop()
@property
async def devices(self):
"""Get all devices."""
await self._refresh_devices()
return self._devices
@property
async def timer_programs(self):
"""Get timer programs."""
await self._refresh_timer_programs()
return self._timer_programs
async def get_device(self, device_id, force_update=False):
"""Get device by id."""
await self._refresh_devices(force_update=force_update)
for device in self._devices:
if device.get("id") == device_id:
return device
return None
async def get_device_history(self, device_id, force_update=False):
"""Get device watering history by id."""
await self._refresh_device_history(device_id, force_update=force_update)
return self._device_histories.get(device_id)
async def update_program(self, program_id, program):
"""Update the state of a program"""
path = "{0}/{1}".format(TIMER_PROGRAMS_PATH, program_id)
json = {"sprinkler_timer_program": program}
await self._request("put", path, json=json)
async def send_message(self, payload):
"""Send a message via the websocket"""
await self._websocket.send(payload)
| mit | 2,308,968,507,594,210,000 | 31.051813 | 85 | 0.571613 | false | 4.024723 | false | false | false |
cambridgehackers/connectal | scripts/discover_tcp.py | 2 | 7174 | #!/usr/bin/env python
# Copyright (c) 2013-2015 Quanta Research Cambridge, Inc.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import sys
import os
import socket
import struct
import select
import time
import threading
import argparse
import netifaces
from adb import adb_commands
from adb import common
deviceAddresses = []
def ip2int(addr):
return struct.unpack("!I", socket.inet_aton(addr))[0]
def int2ip(addr):
return socket.inet_ntoa(struct.pack("!I", addr))
def connect_with_adb(ipaddr,port):
global deviceAddresses
device_serial = '%s:%d' % (ipaddr,port)
cnt = 0
while cnt < 5:
try:
connection = adb_commands.AdbCommands.ConnectDevice(serial=device_serial)
except:
#print 'discover_tcp: connection error to', device_serial
pass
else:
if 'hostname.txt' in connection.Shell('ls /mnt/sdcard/'):
name = connection.Shell('cat /mnt/sdcard/hostname.txt').strip()
connection.Close()
print('discover_tcp: ', ipaddr, name)
deviceAddresses[ipaddr] = name
return
else:
print('discover_tcp: ', ipaddr, " /mnt/sdcard/hostname.txt not found")
deviceAddresses[ipaddr] = ipaddr
return
cnt = cnt+1
def open_adb_socket(dest_addr,port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
sock.connect_ex((dest_addr,port))
return sock
# non-Darwin version
def do_work_poll(start, end, port, get_hostname):
print("scanning "+int2ip(start)+" to "+int2ip(end))
connected = []
total = end-start
READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR
READ_WRITE = READ_ONLY | select.POLLOUT
poller = select.poll()
while (start <= end):
fd_map = {}
while (start <= end):
try:
s = open_adb_socket(int2ip(start),port)
except:
break
else:
fd_map[s.fileno()] = (start,s)
start = start+1
poller.register(s, READ_WRITE)
time.sleep(0.2)
events = poller.poll(0.1)
for fd,flag in events:
(addr,sock) = fd_map[fd]
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) == 0:
print('ADDCON', fd, int2ip(addr))
connected.append(int2ip(addr))
try:
fd_map_items = fd_map.iteritems()
except AttributeError:
fd_map_items = fd_map.items() # Python 3 compatibility
for fd,t in fd_map_items:
poller.unregister(t[1])
t[1].close()
sys.stdout.write("\r%d/%d" % (total-(end-start),total))
sys.stdout.flush()
print()
if get_hostname:
for c in connected:
connect_with_adb(c,port)
# Darwin version
def do_work_kqueue(start, end, port, get_hostname):
print("kqueue scanning "+int2ip(start)+" to "+int2ip(end))
connected = []
total = end-start
while (start <= end):
kq = select.kqueue()
fd_map = {}
kevents = []
while (start <= end):
try:
s = open_adb_socket(int2ip(start),port)
except:
break
else:
fd_map[s.fileno()] = (start,s)
start = start+1
kevents.append(select.kevent(s,filter=select.KQ_FILTER_WRITE))
kq.control(kevents,0,0)
time.sleep(0.2)
for k in kq.control([],len(kevents),0.1):
w = fd_map[k.ident][1]
addr = fd_map[w.fileno()][0]
if w.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) == 0:
print('ADDCON2', k.ident, w.fileno(), int2ip(addr), fd_map[w.fileno()])
connected.append(int2ip(addr))
try:
fd_map_items = fd_map.iteritems()
except AttributeError:
fd_map_items = fd_map.items() # Python 3 compatibility
for fd,t in fd_map_items:
t[1].close()
sys.stdout.write("\r%d/%d" % (total-(end-start),total))
sys.stdout.flush()
print()
if get_hostname:
for c in connected:
connect_with_adb(c,port)
argparser = argparse.ArgumentParser("Discover Zedboards on a network")
argparser.add_argument('-n', '--network', help='xxx.xxx.xxx.xxx/N')
argparser.add_argument('-p', '--port', default=5555, help='Port to probe')
argparser.add_argument('-g', '--get_hostname', default=True, help='Get hostname with adb')
def do_work(start,end,port,get_hostname):
if sys.platform == 'darwin':
do_work_kqueue(start,end,port,get_hostname)
else:
do_work_poll(start,end,port,get_hostname)
def detect_network(network=None, port=5555, get_hostname=True):
global deviceAddresses
deviceAddresses = {}
if network:
nw = network.split("/")
start = ip2int(nw[0])
if len(nw) != 2:
print('Usage: discover_tcp.py ipaddr/prefix_width')
sys.exit(-1)
end = start + (1 << (32-int(nw[1])) ) - 2
do_work(start+1,end,port,get_hostname)
else:
for ifc in netifaces.interfaces():
ifaddrs = netifaces.ifaddresses(ifc)
if netifaces.AF_INET in ifaddrs.keys():
af_inet = ifaddrs[netifaces.AF_INET]
for i in af_inet:
if i.get('addr') == '127.0.0.1':
print('skipping localhost')
else:
addr = ip2int(i.get('addr'))
netmask = ip2int(i.get('netmask'))
start = addr & netmask
end = start + (netmask ^ 0xffffffff)
start = start+1
end = end-1
print((int2ip(start), int2ip(end)))
do_work(start, end,port,get_hostname)
if __name__ == '__main__':
options = argparser.parse_args()
detect_network(options.network,options.port,options.get_hostname)
| mit | -3,697,450,608,753,739,000 | 34.514851 | 90 | 0.583914 | false | 3.730629 | false | false | false |
Trigition/Village | DataAggregator/AQS_Aggregator.py | 1 | 1036 | #!/usr/bin/env python
import requests
import pandas as pd
from FIPS_Reference import FIPS_Reference
# Load American POSTAL data
columns = ["country", "better_fips", "name", "state", "state_code", "county", "county_code", "subdivision",\
"subdivision_code", "latitude", "longitude", "accuracy"]
#data = pd.read_csv("US.txt", sep="\t", header=None, names=columns)
data = pd.read_table("US.txt", header=None, names=columns)
# print data.loc[data["state_code"] == 'CA']
print data[:1]
def get_data(row, year, param_code):
params = {}
params["Query Type"] = "rawData"
params["Output Format"] = "AQCVS"
params["Parameter Code"] = param_code
params["Begin Date"] = str(year) + "0101"
params["End Date"] = str(year + 1) + "0101"
params["State Code"] = FIPS_Reference[row['state_code']]
params["County Code"] = '%03d' % int(row['county_code'])
print params
r = requests.get("https://aqs.epa.gov/api", auth=("[email protected] ", "saffronfrog61"))
get_data(data[:1],0,0)
#print r.status_code
| apache-2.0 | 8,320,193,534,384,223,000 | 37.37037 | 108 | 0.649614 | false | 2.943182 | false | false | false |
mesheven/pyOCD | pyocd/test/test_regcache.py | 1 | 8907 | """
mbed CMSIS-DAP debugger
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pyocd.debug.cache import RegisterCache
from pyocd.debug.context import DebugContext
from pyocd.coresight.cortex_m import (
CortexM,
CORE_REGISTER,
register_name_to_index,
is_psr_subregister,
sysm_to_psr_mask
)
from pyocd.core import memory_map
from pyocd.utility import conversion
from pyocd.utility import mask
import pytest
import logging
@pytest.fixture(scope='function')
def regcache(mockcore):
return RegisterCache(DebugContext(mockcore))
# Copy of the register list without composite registers.
CORE_REGS_NO_COMPOSITES = CORE_REGISTER.copy()
CORE_REGS_NO_COMPOSITES.pop('cfbp')
CORE_REGS_NO_COMPOSITES.pop('xpsr')
CORE_REGS_NO_COMPOSITES.pop('iapsr')
CORE_REGS_NO_COMPOSITES.pop('eapsr')
CORE_REGS_NO_COMPOSITES.pop('iepsr')
# Appropriate modifiers for masked registers - others modified by adding 7
REG_MODIFIER = {
'apsr': 0x30010000,
'epsr': 0x01000C00,
}
def get_modifier(r):
return REG_MODIFIER.get(r, 7)
def get_expected_reg_value(r):
i = register_name_to_index(r)
if is_psr_subregister(i):
return 0x55555555 & sysm_to_psr_mask(i)
if i < 0:
i += 100
return i + 1
def get_expected_cfbp():
return ((get_expected_reg_value('control') << 24) |
(get_expected_reg_value('faultmask') << 16) |
(get_expected_reg_value('basepri') << 8) |
get_expected_reg_value('primask'))
def get_expected_xpsr():
return (get_expected_reg_value('apsr') |
get_expected_reg_value('ipsr') |
get_expected_reg_value('epsr'))
class TestRegisterCache:
def set_core_regs(self, mockcore, modify=False):
for r in CORE_REGS_NO_COMPOSITES:
if modify:
modifier = get_modifier(r)
else:
modifier = 0
mockcore.write_core_registers_raw([r], [get_expected_reg_value(r) + modifier])
assert mockcore.read_core_registers_raw([r]) == [get_expected_reg_value(r) + modifier]
def test_r_1(self, mockcore, regcache):
assert regcache.read_core_registers_raw(['r0']) == [0] # cache initial value of 0
mockcore.write_core_registers_raw(['r0'], [1234]) # modify reg behind the cache's back
assert mockcore.read_core_registers_raw(['r0']) == [1234] # verify modified reg
assert regcache.read_core_registers_raw(['r0']) == [0] # should return cached 0 value
regcache.invalidate() # explicitly invalidate cache
assert mockcore.read_core_registers_raw(['r0']) == [1234] # verify modified reg
assert regcache.read_core_registers_raw(['r0']) == [1234] # now should return updated 1234 value
def test_run_token(self, mockcore, regcache):
assert regcache.read_core_registers_raw(['r0']) == [0] # cache initial value of 0
mockcore.write_core_registers_raw(['r0'], [1234]) # modify reg behind the cache's back
assert mockcore.read_core_registers_raw(['r0']) == [1234] # verify modified reg
assert regcache.read_core_registers_raw(['r0']) == [0] # should return cached 0 value
mockcore.run_token += 1 # bump run token to cause cache to invalidate
assert regcache.read_core_registers_raw(['r0']) == [1234] # now should return updated 1234 value
def test_reading_from_core(self, mockcore, regcache):
self.set_core_regs(mockcore)
for r in CORE_REGS_NO_COMPOSITES:
assert regcache.read_core_registers_raw([r]) == [get_expected_reg_value(r)]
def test_read_cached(self, mockcore, regcache):
self.set_core_regs(mockcore)
# cache all regs
regcache.read_core_registers_raw(CORE_REGS_NO_COMPOSITES.values())
# modify regs in mock core
self.set_core_regs(mockcore, True)
# cache should return original unmodified values
for r in CORE_REGS_NO_COMPOSITES:
assert regcache.read_core_registers_raw([r]) == [get_expected_reg_value(r)]
def test_read_cfbp(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert regcache.read_core_registers_raw(['cfbp', 'control', 'faultmask']) == [
get_expected_cfbp(), get_expected_reg_value('control'), get_expected_reg_value('faultmask')
]
def test_read_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert regcache.read_core_registers_raw(['xpsr', 'ipsr', 'apsr', 'eapsr']) == [
get_expected_xpsr(), get_expected_reg_value('ipsr'),
get_expected_reg_value('apsr'), get_expected_reg_value('eapsr')
]
def test_read_cached_cfbp(self, mockcore, regcache):
self.set_core_regs(mockcore)
# cache it
regcache.read_core_registers_raw(['cfbp'])
# modify behind the cache's back
mockcore.write_core_registers_raw(['control', 'primask'], [0x55, 0xaa])
# cache should return original value
assert regcache.read_core_registers_raw(['cfbp']) == [get_expected_cfbp()]
def test_read_cached_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
# cache it
regcache.read_core_registers_raw(['xpsr'])
# modify behind the cache's back
mockcore.write_core_registers_raw(['ipsr', 'apsr'], [0x22, 0x10000000])
# cache should return original value
assert regcache.read_core_registers_raw(['xpsr']) == [get_expected_xpsr()]
def test_write_1(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['r0']) == [get_expected_reg_value('r0')]
assert regcache.read_core_registers_raw(['r0']) == [get_expected_reg_value('r0')]
regcache.write_core_registers_raw(['r0'], [1234])
assert mockcore.read_core_registers_raw(['r0']) == [1234]
assert regcache.read_core_registers_raw(['r0']) == [1234]
def test_write_regs(self, mockcore, regcache):
self.set_core_regs(mockcore)
for r in CORE_REGS_NO_COMPOSITES:
regcache.write_core_registers_raw([r], [get_expected_reg_value(r) + get_modifier(r)])
for r in CORE_REGS_NO_COMPOSITES:
assert mockcore.read_core_registers_raw([r]) == [get_expected_reg_value(r) + get_modifier(r)]
def test_write_cfbp(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['cfbp']) == [get_expected_cfbp()]
regcache.write_core_registers_raw(['control', 'primask'], [3, 19])
assert mockcore.read_core_registers_raw(['control', 'primask', 'cfbp']) == [
3, 19,
((3 << 24) | (get_expected_reg_value('faultmask') << 16) |
(get_expected_reg_value('basepri') << 8) | 19)
]
def test_write_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['xpsr']) == [get_expected_xpsr()]
regcache.write_core_registers_raw(['iapsr'], [0x10000022])
assert mockcore.read_core_registers_raw(['ipsr', 'apsr', 'iapsr', 'xpsr']) == [
0x22, 0x10000000, 0x10000022,
0x10000022 | get_expected_reg_value('epsr')
]
def test_write_full_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['xpsr']) == [get_expected_xpsr()]
regcache.write_core_registers_raw(['xpsr'], [0xffffffff])
assert mockcore.read_core_registers_raw(['ipsr', 'apsr', 'epsr', 'xpsr']) == [
CortexM.IPSR_MASK, CortexM.APSR_MASK, CortexM.EPSR_MASK,
0xffffffff
]
def test_invalid_reg_r(self, regcache):
with pytest.raises(ValueError):
regcache.read_core_registers_raw([132423])
def test_invalid_reg_w(self, regcache):
with pytest.raises(ValueError):
regcache.write_core_registers_raw([132423], [1234])
def test_invalid_fpu_reg_r(self, mockcore, regcache):
mockcore.has_fpu = False
with pytest.raises(ValueError):
regcache.read_core_registers_raw(['s1'])
def test_invalid_fpu_reg_w(self, mockcore, regcache):
mockcore.has_fpu = False
with pytest.raises(ValueError):
regcache.write_core_registers_raw(['s1'], [1.234])
| apache-2.0 | -298,325,491,646,307,200 | 41.414286 | 105 | 0.639497 | false | 3.338456 | true | false | false |
ta2-1/pootle | tests/pootle_app/views.py | 1 | 2311 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from django.utils.translation import get_language
from django.urls import reverse
from pootle.core.delegate import revision
from pootle_app.views.index.index import (
COOKIE_NAME, IndexView, WelcomeView)
from pootle_score.display import TopScoreDisplay
@pytest.mark.django_db
def test_view_index(client, rf, request_users, language0):
user = request_users["user"]
client.login(
username=user.username,
password=request_users["password"])
response = client.get("")
if not user.is_authenticated:
assert response.status_code == 200
assert isinstance(response.context["view"], WelcomeView)
else:
assert response.status_code == 302
assert response["Location"] == reverse("pootle-projects-browse")
request = rf.get("")
request.user = user
request.COOKIES[COOKIE_NAME] = language0.code
response = IndexView.as_view()(request=request)
if not user.is_authenticated:
assert response.status_code == 200
else:
assert response.status_code == 302
assert response["Location"] == reverse(
"pootle-language-browse",
kwargs=dict(language_code=language0.code))
@pytest.mark.django_db
def test_view_welcome(client, member, system, project_set):
response = client.get(reverse('pootle-home'))
assert isinstance(response.context["top_scorers"], TopScoreDisplay)
assert isinstance(response.context["view"], WelcomeView)
assert response.context["view"].request_lang == get_language()
assert (
response.context["view"].project_set.directory
== project_set.directory)
assert (
response.context["view"].revision
== revision.get(project_set.directory.__class__)(
project_set.directory).get(key="stats"))
assert (
response.context["view"].cache_key
== (
"%s.%s.%s"
% (response.wsgi_request.user.username,
response.context["view"].revision,
get_language())))
| gpl-3.0 | -6,459,012,648,656,102,000 | 34.015152 | 77 | 0.667676 | false | 3.99827 | false | false | false |
tiagoprn/experiments | micro/stack_trace_capturing_for_exception.py | 1 | 1729 | import inspect
import sys
import traceback
'''
This print the full stack trace from the current point in the code
(from where "stack = inspect.stack()", below, is called).
It can be useful, e.g., to understand code that makes many calls
or to get information for debugging exceptions.
'''
# reference:
# https://gist.github.com/diosmosis/1148066
def get_exception_info():
# this variable is never used. it exists so we can detect if a frame is
# referencing this specific function.
__lgw_marker_local__ = 0
value_to_string = str
frame_template = ' File "%s", line %i, in %s\n %s\n'
log_file = []
# iterate through the frames in reverse order so we print the
# most recent frame first
frames = inspect.getinnerframes(sys.exc_info()[2])
for frame_info in reversed(frames):
f_locals = frame_info[0].f_locals
# if there's a local variable named __lgw_marker_local__, we assume
# the frame is from a call of this function, 'wrapper', and we skip
# it. Printing these frames won't help determine the cause of an
# exception, so skipping it reduces clutter.
if '__lgw_marker_local__' in f_locals:
continue
# log the frame information
log_file.append(frame_template %
(frame_info[1], frame_info[2], frame_info[3], frame_info[4][0].lstrip()))
# log every local variable of the frame
for k, v in f_locals.items():
log_file.append(' %s = %s\n' % (k, value_to_string(v)))
log_file.append('\n')
return ''.join(log_file)
try:
print('Hey!')
raise Exception('forced error!')
except:
exc_info = get_exception_info()
print(exc_info)
| mit | 3,846,056,117,046,317,600 | 25.19697 | 85 | 0.631579 | false | 3.647679 | false | false | false |
pwdyson/inflect.py | tests/test_inflections.py | 2 | 8786 | import os
import io
import six
from nose.tools import eq_, assert_not_equal, raises
import inflect
def is_eq(p, a, b):
return (
p.compare(a, b)
or p.plnounequal(a, b)
or p.plverbequal(a, b)
or p.pladjequal(a, b)
)
def test_many():
p = inflect.engine()
data = get_data()
for line in data:
if "TODO:" in line:
continue
try:
singular, rest = line.split("->", 1)
except ValueError:
continue
singular = singular.strip()
rest = rest.strip()
try:
plural, comment = rest.split("#", 1)
except ValueError:
plural = rest.strip()
comment = ""
try:
mod_plural, class_plural = plural.split("|", 1)
mod_plural = mod_plural.strip()
class_plural = class_plural.strip()
except ValueError:
mod_plural = class_plural = plural.strip()
if "verb" in comment.lower():
is_nv = "_V"
elif "noun" in comment.lower():
is_nv = "_N"
else:
is_nv = ""
p.classical(all=0, names=0)
mod_PL_V = p.plural_verb(singular)
mod_PL_N = p.plural_noun(singular)
mod_PL = p.plural(singular)
if is_nv == "_V":
mod_PL_val = mod_PL_V
elif is_nv == "_N":
mod_PL_val = mod_PL_N
else:
mod_PL_val = mod_PL
p.classical(all=1)
class_PL_V = p.plural_verb(singular)
class_PL_N = p.plural_noun(singular)
class_PL = p.plural(singular)
if is_nv == "_V":
class_PL_val = class_PL_V
elif is_nv == "_N":
class_PL_val = class_PL_N
else:
class_PL_val = class_PL
check_all(
p, is_nv, singular, mod_PL_val, class_PL_val, mod_plural, class_plural
)
def check_all(p, is_nv, singular, mod_PL_val, class_PL_val, mod_plural, class_plural):
eq_(mod_plural, mod_PL_val)
eq_(class_plural, class_PL_val)
eq_(
is_eq(p, singular, mod_plural) in ("s:p", "p:s", "eq"),
True,
msg="is_eq({},{}) == {} != {}".format(
singular, mod_plural, is_eq(p, singular, mod_plural), "s:p, p:s or eq"
),
)
eq_(
is_eq(p, mod_plural, singular) in ("p:s", "s:p", "eq"),
True,
msg="is_eq({},{}) == {} != {}".format(
mod_plural, singular, is_eq(p, mod_plural, singular), "s:p, p:s or eq"
),
)
eq_(is_eq(p, singular, class_plural) in ("s:p", "p:s", "eq"), True)
eq_(is_eq(p, class_plural, singular) in ("p:s", "s:p", "eq"), True)
assert_not_equal(singular, "")
eq_(mod_PL_val, mod_PL_val if class_PL_val else "%s|%s"(mod_PL_val, class_PL_val))
if is_nv != "_V":
eq_(
p.singular_noun(mod_plural, 1),
singular,
msg="p.singular_noun({}) == {} != {}".format(
mod_plural, p.singular_noun(mod_plural, 1), singular
),
)
eq_(
p.singular_noun(class_plural, 1),
singular,
msg="p.singular_noun({}) == {} != {}".format(
class_plural, p.singular_noun(class_plural, 1), singular
),
)
def test_def():
p = inflect.engine()
p.defnoun("kin", "kine")
p.defnoun("(.*)x", "$1xen")
p.defverb("foobar", "feebar", "foobar", "feebar", "foobars", "feebar")
p.defadj("red", "red|gules")
eq_(p.no("kin", 0), "no kine", msg="kin -> kine (user defined)...")
eq_(p.no("kin", 1), "1 kin")
eq_(p.no("kin", 2), "2 kine")
eq_(p.no("regex", 0), "no regexen", msg="regex -> regexen (user defined)")
eq_(p.plural("foobar", 2), "feebar", msg="foobar -> feebar (user defined)...")
eq_(p.plural("foobars", 2), "feebar")
eq_(p.plural("red", 0), "red", msg="red -> red...")
eq_(p.plural("red", 1), "red")
eq_(p.plural("red", 2), "red")
p.classical(all=True)
eq_(p.plural("red", 0), "red", msg="red -> gules...")
eq_(p.plural("red", 1), "red")
eq_(p.plural("red", 2), "gules")
def test_ordinal():
p = inflect.engine()
eq_(p.ordinal(0), "0th", msg="0 -> 0th...")
eq_(p.ordinal(1), "1st")
eq_(p.ordinal(2), "2nd")
eq_(p.ordinal(3), "3rd")
eq_(p.ordinal(4), "4th")
eq_(p.ordinal(5), "5th")
eq_(p.ordinal(6), "6th")
eq_(p.ordinal(7), "7th")
eq_(p.ordinal(8), "8th")
eq_(p.ordinal(9), "9th")
eq_(p.ordinal(10), "10th")
eq_(p.ordinal(11), "11th")
eq_(p.ordinal(12), "12th")
eq_(p.ordinal(13), "13th")
eq_(p.ordinal(14), "14th")
eq_(p.ordinal(15), "15th")
eq_(p.ordinal(16), "16th")
eq_(p.ordinal(17), "17th")
eq_(p.ordinal(18), "18th")
eq_(p.ordinal(19), "19th")
eq_(p.ordinal(20), "20th")
eq_(p.ordinal(21), "21st")
eq_(p.ordinal(22), "22nd")
eq_(p.ordinal(23), "23rd")
eq_(p.ordinal(24), "24th")
eq_(p.ordinal(100), "100th")
eq_(p.ordinal(101), "101st")
eq_(p.ordinal(102), "102nd")
eq_(p.ordinal(103), "103rd")
eq_(p.ordinal(104), "104th")
eq_(p.ordinal("zero"), "zeroth", msg="zero -> zeroth...")
eq_(p.ordinal("one"), "first")
eq_(p.ordinal("two"), "second")
eq_(p.ordinal("three"), "third")
eq_(p.ordinal("four"), "fourth")
eq_(p.ordinal("five"), "fifth")
eq_(p.ordinal("six"), "sixth")
eq_(p.ordinal("seven"), "seventh")
eq_(p.ordinal("eight"), "eighth")
eq_(p.ordinal("nine"), "ninth")
eq_(p.ordinal("ten"), "tenth")
eq_(p.ordinal("eleven"), "eleventh")
eq_(p.ordinal("twelve"), "twelfth")
eq_(p.ordinal("thirteen"), "thirteenth")
eq_(p.ordinal("fourteen"), "fourteenth")
eq_(p.ordinal("fifteen"), "fifteenth")
eq_(p.ordinal("sixteen"), "sixteenth")
eq_(p.ordinal("seventeen"), "seventeenth")
eq_(p.ordinal("eighteen"), "eighteenth")
eq_(p.ordinal("nineteen"), "nineteenth")
eq_(p.ordinal("twenty"), "twentieth")
eq_(p.ordinal("twenty-one"), "twenty-first")
eq_(p.ordinal("twenty-two"), "twenty-second")
eq_(p.ordinal("twenty-three"), "twenty-third")
eq_(p.ordinal("twenty-four"), "twenty-fourth")
eq_(p.ordinal("one hundred"), "one hundredth")
eq_(p.ordinal("one hundred and one"), "one hundred and first")
eq_(p.ordinal("one hundred and two"), "one hundred and second")
eq_(p.ordinal("one hundred and three"), "one hundred and third")
eq_(p.ordinal("one hundred and four"), "one hundred and fourth")
def test_prespart():
p = inflect.engine()
eq_(p.present_participle("sees"), "seeing", msg="sees -> seeing...")
eq_(p.present_participle("eats"), "eating")
eq_(p.present_participle("bats"), "batting")
eq_(p.present_participle("hates"), "hating")
eq_(p.present_participle("spies"), "spying")
eq_(p.present_participle("skis"), "skiing")
def test_inflect_on_tuples():
p = inflect.engine()
eq_(p.inflect("plural('egg', ('a', 'b', 'c'))"), "eggs")
eq_(p.inflect("plural('egg', ['a', 'b', 'c'])"), "eggs")
eq_(p.inflect("plural_noun('egg', ('a', 'b', 'c'))"), "eggs")
eq_(p.inflect("plural_adj('a', ('a', 'b', 'c'))"), "some")
eq_(p.inflect("plural_verb('was', ('a', 'b', 'c'))"), "were")
eq_(p.inflect("singular_noun('eggs', ('a', 'b', 'c'))"), "eggs")
eq_(p.inflect("an('error', ('a', 'b', 'c'))"), "('a', 'b', 'c') error")
eq_(p.inflect("This is not a function(name)"), "This is not a function(name)")
def test_inflect_on_builtin_constants():
p = inflect.engine()
eq_(p.inflect("Plural of False is plural('False')"), "Plural of False is Falses")
eq_(p.inflect("num(%d, False) plural('False')" % 10), " Falses")
eq_(p.inflect("plural('True')"), "Trues")
eq_(p.inflect("num(%d, True) plural('False')" % 10), "10 Falses")
eq_(p.inflect("num(%d, %r) plural('False')" % (10, True)), "10 Falses")
eq_(p.inflect("plural('None')"), "Nones")
eq_(p.inflect("num(%d, %r) plural('True')" % (10, None)), "10 Trues")
def test_inflect_keyword_args():
p = inflect.engine()
eq_(
p.inflect("number_to_words(1234, andword='')"),
"one thousand, two hundred thirty-four",
)
eq_(
p.inflect("number_to_words(1234, andword='plus')"),
"one thousand, two hundred plus thirty-four",
)
eq_(
p.inflect("number_to_words('555_1202', group=1, zero='oh')"),
"five, five, five, one, two, oh, two",
)
@raises(NameError)
def test_NameError_in_strings():
p = inflect.engine()
eq_(p.inflect("plural('two')"), "twoes")
p.inflect("plural(two)")
def get_data():
filename = os.path.join(os.path.dirname(__file__), "inflections.txt")
with io.open(filename) as strm:
return list(map(six.text_type.strip, strm))
| agpl-3.0 | -575,220,977,912,059,460 | 31.18315 | 86 | 0.526519 | false | 2.854451 | true | false | false |
hirofumi0810/asr_preprocessing | timit/path.py | 1 | 5615 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Prepare for making dataset (TIMIT corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, basename, splitext
from glob import glob
class Path(object):
"""Prepare for making dataset.
Args:
data_path (string): path to TIMIT corpus
config_path (string): path to config dir
htk_save_path (string, optional): path to htk files
"""
def __init__(self, data_path, config_path, htk_save_path=None):
self.data_path = data_path
self.config_path = config_path
self.htk_save_path = htk_save_path
# Paths to TIMIT data
self.train_data_path = join(data_path, 'train')
self.test_data_path = join(data_path, 'test')
self.__make()
def __make(self):
self._wav_paths = {}
self._text_paths = {}
self._word_paths = {}
self._phone_paths = {}
self._utt2wav = {}
for data_type in ['train', 'dev', 'test']:
self._wav_paths[data_type] = []
self._text_paths[data_type] = []
self._word_paths[data_type] = []
self._phone_paths[data_type] = []
data_path = self.train_data_path if data_type == 'train' else self.test_data_path
if data_type != 'train':
# Load speaker list
speaker_list = []
with open(join(self.config_path, data_type + '_speaker_list.txt'), 'r') as f:
for line in f:
line = line.strip()
speaker_list.append(line)
for file_path in glob(join(data_path, '*/*/*')):
region, speaker, file_name = file_path.split('/')[-3:]
utt_index = basename(file_name)
ext = splitext(file_name)[1]
if data_type == 'train':
# if utt_index[0: 2] in ['sx', 'si', 'sa']:
if utt_index[0: 2] in ['sx', 'si']:
if ext == '.wav':
self._wav_paths[data_type].append(file_path)
self._utt2wav[speaker + '_' +
utt_index] = file_path
elif ext == '.txt':
self._text_paths[data_type].append(file_path)
elif ext == '.wrd':
self._word_paths[data_type].append(file_path)
elif ext == '.phn':
self._phone_paths[data_type].append(file_path)
else:
if speaker not in speaker_list:
continue
if utt_index[0: 2] in ['sx', 'si']:
if ext == '.wav':
self._wav_paths[data_type].append(file_path)
self._utt2wav[speaker + '_' +
utt_index] = file_path
elif ext == '.txt':
self._text_paths[data_type].append(file_path)
elif ext == '.wrd':
self._word_paths[data_type].append(file_path)
elif ext == '.phn':
self._phone_paths[data_type].append(file_path)
def utt2wav(self, utt_name):
return self._utt2wav[utt_name]
def wav(self, data_type):
"""Get paths to wav files.
Args:
data_type (string): train or dev or test
Returns:
list of paths to wav files
"""
return sorted(self._wav_paths[data_type])
def htk(self, data_type):
"""Get paths to htk files.
Args:
data_type (string): train or dev or test
Returns:
htk_paths (list): paths to htk files
"""
if self.htk_save_path is None:
raise ValueError('Set path to htk files.')
return [p for p in glob(join(self.htk_save_path, data_type, '*/*.htk'))]
# NOTE: ex.) timit/htk/data_type/speaker/speaker_utt-index.htk
def trans(self, data_type):
"""Get paths to sentence-level transcription files.
Args:
data_type (string): train or dev or test
Returns:
list of paths to transcription files
"""
return sorted(self._text_paths[data_type])
def word(self, data_type):
"""Get paths to word-level transcription files.
Args:
data_type (string): train or dev or test
Returns:
list of paths to transcription files
"""
return sorted(self._word_paths[data_type])
def phone(self, data_type):
"""Get paths to phone-level transcription files.
Args:
data_type (string): train or dev or test
Returns:
list of paths to transcription files
"""
return sorted(self._phone_paths[data_type])
if __name__ == '__main__':
path = Path(data_path='/n/sd8/inaguma/corpus/timit/data',
config_path='./config',
htk_save_path='/n/sd8/inaguma/corpus/timit/htk')
for data_type in ['train', 'dev', 'test']:
print('===== %s ======' % data_type)
print(len(path.wav(data_type=data_type)))
print(len(path.htk(data_type=data_type)))
print(len(path.trans(data_type=data_type)))
print(len(path.word(data_type=data_type)))
print(len(path.phone(data_type=data_type)))
| mit | 1,845,563,186,827,868,700 | 34.314465 | 93 | 0.495993 | false | 3.8966 | true | false | false |
clintonblackmore/enchanting2 | event_loop.py | 1 | 7932 | """The event loop triggers and runs all the scripts, as appropriate"""
import gevent
import gevent.pool
try:
from gevent.lock import BoundedSemaphore
except:
print "Enchanting2 requires gevent v1.0 or newer"
raise
import factory
import server
import script
port = 8000
def is_trigger_key(top_block, media_and_event):
"""True if user pressed key hat block is waiting for."""
key_name = top_block.arguments[0].as_string()
media_env, event = media_and_event
return media_env.does_key_event_match(key_name, event)
def does_match_broadcast(top_block, message_string):
message_to_start_script = top_block.arguments[0].as_string()
if message_to_start_script == "any message":
return True
else:
return message_string == message_to_start_script
class EventLoop(object):
def __init__(self, media_environment):
self.active_scripts = gevent.pool.Group()
self.sleeping_scripts = []
self.project = None
self.media_environment = media_environment
# Get the script_lock before adding or removing scripts
self.script_lock = BoundedSemaphore(1)
self.clients = []
def queue(self, script, sprite):
"""Queues up a script"""
# Scripts usually start with a hat block and do nothing until it is
# activated
with self.script_lock:
self.sleeping_scripts.append((script, sprite))
def run_forever(self):
"""Runs all the scripts in the project"""
# First, fire up the webserver
server.ClientConnection.event_loop = self
gevent.spawn(server.run_web_servers, port)
# This is the main loop
# It checks for events (from pygame)
# and it updates the screen every so often
while True:
self.media_environment.check_for_events(self)
gevent.sleep(1.0 / 30) # max 30 fps
self.media_environment.draw(self.project)
def trigger_quit_event(self):
"""Anything we need to do before quitting? Do it now!"""
print "Quitting"
pass
def trigger_key_press(self, media_and_event):
"""A key was pressed"""
self.trigger_scripts("receiveKey", is_trigger_key, media_and_event)
def trigger_green_flag(self):
"""The green flag was pressed / the project is starting"""
self.stop_all_scripts()
self.trigger_scripts("receiveGo")
def stop_all_scripts(self):
"""The stop button was pressed -- halt execution of all scripts"""
if self.project:
self.project.stop_all_scripts()
def broadcast_message(self, message_string):
"""A message was broadcast"""
self.trigger_scripts(
"receiveMessage", does_match_broadcast, message_string)
def trigger_scripts(self, function_name_match, callback=None, data=None):
"""Trigger all sleeping scripts that match specified conditions"""
with self.script_lock:
# We can't remove items from the list in-place,
# so we create a new list of sleeping scripts
new_sleeping_scripts = []
# print "sleeping scripts: %s, active scripts: %s" % \
# (len(self.sleeping_scripts), len(self.active_scripts))
for script, sprite in self.sleeping_scripts:
top_block = script.top_block()
if top_block and top_block.function_name == function_name_match \
and (callback is None or callback(top_block, data)):
# activate this script
greenlet = gevent.spawn(self.run_script, script, sprite)
self.active_scripts.add(greenlet)
else:
# return script to sleeping list
new_sleeping_scripts.append((script, sprite))
self.sleeping_scripts = new_sleeping_scripts
def run_script(self, script, sprite):
"""Runs a script, and queues it up to run again if needs be"""
script.run(sprite)
if script.starts_on_trigger():
self.queue(script.from_start(), sprite)
def purge_all_scripts(self):
"""Reset everything -- purge all running and queued scripts"""
self.stop_all_scripts()
with self.script_lock:
self.active_scripts.kill()
self.sleeping_scripts = []
def load_project_from_disk(self, filename):
"""Loads a project from a file, and starts executing it"""
self.purge_all_scripts()
self.project = factory.deserialize_file(filename, self)
self.media_environment.setup_for_project(self.project)
# gevent.spawn(self.trigger_green_flag)
def load_project_from_xml(self, xml):
"""Loads a file from xml"""
self.purge_all_scripts()
self.project = factory.deserialize_xml(xml, self)
self.media_environment.setup_for_project(self.project)
# gevent.spawn(self.trigger_green_flag)
def client_connected(self, client):
self.clients.append(client)
print "Now serving %s clients; %s just connected" \
% (len(self.clients), client)
# Send the client a copy of the current world (if there is one)
if self.project:
message = "load_project %s" % factory.xml_for_object(self.project)
client.ws.send(message)
def client_disconnected(self, client):
self.clients.remove(client)
print "Now serving %s clients; %s just disconnected" \
% (len(self.clients), client)
def message_from_client(self, message, client):
print "message_from_client"
print message
split = message.find(" ")
if split == -1:
print "Unrecognized message: %s" % message
command = message[:split]
if command == "load_project":
xml = message[split + 1:]
self.load_project_from_xml(xml)
self.send_message_to_other_clients(message, client)
elif command == "green_flag_press":
self.trigger_green_flag()
elif command == "stop_sign_press":
self.stop_all_scripts()
elif command == "execute_block":
self.execute_block(message, split, client)
else:
print "Unrecognized command: %s" % command
def send_message_to_other_clients(self, message, source_client=None):
"""Send a message to all web clients, except the source"""
for client in self.clients:
if client != source_client:
client.ws.send(message)
def execute_block(self, message, split, client):
"""Executed block requested by user and return result"""
# payload is the index of the sprite,
# and the xml of the block to run
split2 = message.find(" ", split + 1)
sprite_index = int(message[split + 1:split2])
sprite = self.project.sprite_from_index(sprite_index)
xml = message[split2 + 1:]
# run the block and return the result
greenlet = gevent.spawn(self.execute_block_and_return_result,
sprite, xml, client)
self.active_scripts.add(greenlet)
def execute_block_and_return_result(self, sprite, xml_for_block, client):
"""Runs a block and tells client the result"""
# We seem to get command blocks wrapped up in scripts
# and reporter blocks as blocks
print xml_for_block
obj = factory.deserialize_xml(xml_for_block)
if isinstance(obj, script.Script):
result = obj.run(sprite)
# result is almost certainly 'None'
else:
empty_script = script.Script()
result = obj.evaluate(sprite, empty_script)
if result is not None:
result_xml = factory.xml_for_object(result)
client.ws.send("execute_block_result %s" % result_xml)
| agpl-3.0 | 172,016,229,269,100,580 | 36.415094 | 81 | 0.611321 | false | 4.078149 | false | false | false |
paulproteus/oppia-test-3 | controllers/editor.py | 1 | 16511 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the Oppia editor view."""
__author__ = '[email protected] (Sean Lip)'
import json
from apps.exploration.models import Exploration
from apps.parameter.models import Parameter
from apps.state.models import AnswerHandlerInstance
from apps.state.models import Content
from apps.state.models import Rule
from apps.state.models import State
from apps.statistics.models import Statistics
from apps.statistics.models import STATS_ENUMS
from apps.widget.models import InteractiveWidget
from controllers.base import BaseHandler
from controllers.base import require_editor
from controllers.base import require_user
import feconf
import utils
from google.appengine.api import users
EDITOR_MODE = 'editor'
def get_state_for_frontend(state, exploration):
"""Returns a representation of the given state for the frontend."""
state_repr = state.as_dict()
modified_state_dict = state.internals_as_dict(human_readable_dests=True)
# TODO(sll): The following is for backwards-compatibility and should be
# deleted later.
rules = {}
for handler in state_repr['widget']['handlers']:
rules[handler['name']] = handler['rules']
for item in rules[handler['name']]:
if item['name'] == 'Default':
item['rule'] = 'Default'
else:
item['rule'] = InteractiveWidget.get(
state.widget.widget_id).get_readable_name(
handler['name'], item['name']
)
state_repr['widget']['rules'] = rules
state_repr['widget']['id'] = state_repr['widget']['widget_id']
state_repr['yaml'] = utils.yaml_from_dict(modified_state_dict)
return state_repr
def get_exploration_stats(exploration):
"""Returns a dict with stats for the given exploration."""
num_visits = Statistics.get_exploration_stats(
STATS_ENUMS.exploration_visited, exploration.id)
num_completions = Statistics.get_exploration_stats(
STATS_ENUMS.exploration_completed, exploration.id)
answers = Statistics.get_exploration_stats(
STATS_ENUMS.rule_hit, exploration.id)
state_counts = Statistics.get_exploration_stats(
STATS_ENUMS.state_hit, exploration.id)
state_stats = {}
for state_id in answers.keys():
state_stats[state_id] = {
'name': answers[state_id]['name'],
'count': state_counts[state_id]['count'],
'rule_stats': {},
}
all_rule_count = 0
state_count = state_counts[state_id]['count']
for rule in answers[state_id]['rules'].keys():
state_stats[state_id]['rule_stats'][rule] = answers[state_id]['rules'][rule]
rule_count = 0
for _, count in answers[state_id]['rules'][rule]['answers']:
rule_count += count
all_rule_count += count
state_stats[state_id]['rule_stats'][rule]['chartData'] = [
['', 'This rule', 'Other answers'],
['', rule_count, state_count - rule_count]]
state_stats[state_id]['no_answer_chartdata'] = [
['', 'No answer', 'Answer given'],
['', state_count - all_rule_count, all_rule_count]]
return {
'num_visits': num_visits,
'num_completions': num_completions,
'state_stats': state_stats,
}
class NewExploration(BaseHandler):
"""Creates a new exploration."""
@require_user
def post(self):
"""Handles POST requests."""
payload = json.loads(self.request.get('payload'))
title = payload.get('title')
category = payload.get('category')
if not title:
raise self.InvalidInputException('No title supplied.')
if not category:
raise self.InvalidInputException('No category chosen.')
yaml = self.request.get('yaml')
if yaml and feconf.ALLOW_YAML_FILE_UPLOAD:
exploration = Exploration.create_from_yaml(
yaml_file=yaml, user=self.user, title=title, category=category)
else:
exploration = Exploration.create(
self.user, title=title, category=category)
self.render_json({'explorationId': exploration.id})
class ForkExploration(BaseHandler):
"""Forks an existing exploration."""
@require_user
def post(self):
"""Handles POST requests."""
payload = json.loads(self.request.get('payload'))
exploration_id = payload.get('exploration_id')
forked_exploration = Exploration.get(exploration_id)
if not forked_exploration.is_demo_exploration():
raise self.InvalidInputException('Exploration cannot be forked.')
# Get the demo exploration as a YAML file, so that new states can be
# created.
yaml = forked_exploration.as_yaml()
title = 'Copy of %s' % forked_exploration.title
category = forked_exploration.category
exploration = Exploration.create_from_yaml(
yaml_file=yaml, user=self.user, title=title, category=category)
self.render_json({'explorationId': exploration.id})
class ExplorationPage(BaseHandler):
"""Page describing a single exploration."""
@require_editor
def get(self, unused_exploration):
"""Handles GET requests."""
self.values.update({
'nav_mode': EDITOR_MODE,
})
self.render_template('editor/editor_exploration.html')
class ExplorationHandler(BaseHandler):
"""Page with editor data for a single exploration."""
@require_editor
def get(self, exploration):
"""Gets the question name and state list for a question page."""
state_list = {}
for state_key in exploration.states:
state = state_key.get()
state_list[state.id] = get_state_for_frontend(state, exploration)
parameters = []
for param in exploration.parameters:
parameters.append({
'name': param.name, 'obj_type': param.obj_type,
'description': param.description, 'values': param.values
})
self.values.update({
'exploration_id': exploration.id,
'init_state_id': exploration.init_state.get().id,
'is_public': exploration.is_public,
'image_id': exploration.image_id,
'category': exploration.category,
'title': exploration.title,
'editors': [editor.nickname() for editor in exploration.editors],
'states': state_list,
'parameters': parameters,
})
statistics = get_exploration_stats(exploration)
self.values.update({
'num_visits': statistics['num_visits'],
'num_completions': statistics['num_completions'],
'state_stats': statistics['state_stats'],
})
improvements = Statistics.get_top_ten_improvable_states([exploration.id])
self.values.update({
'imp': improvements,
})
self.render_json(self.values)
@require_editor
def post(self, exploration):
"""Adds a new state to the given exploration."""
payload = json.loads(self.request.get('payload'))
state_name = payload.get('state_name')
if not state_name:
raise self.InvalidInputException('Please specify a state name.')
state = exploration.add_state(state_name)
self.render_json(state.as_dict())
@require_editor
def put(self, exploration):
"""Updates properties of the given exploration."""
payload = json.loads(self.request.get('payload'))
is_public = payload.get('is_public')
category = payload.get('category')
title = payload.get('title')
image_id = payload.get('image_id')
editors = payload.get('editors')
parameters = payload.get('parameters')
if is_public:
exploration.is_public = True
if category:
exploration.category = category
if title:
exploration.title = title
if 'image_id' in payload:
exploration.image_id = None if image_id == 'null' else image_id
if editors:
if exploration.editors and self.user == exploration.editors[0]:
exploration.editors = []
for email in editors:
editor = users.User(email=email)
exploration.editors.append(editor)
else:
raise self.UnauthorizedUserException(
'Only the exploration owner can add new collaborators.')
if parameters:
exploration.parameters = [
Parameter(
name=item['name'], obj_type=item['obj_type'],
description=item['description'], values=item['values']
) for item in parameters
]
exploration.put()
@require_editor
def delete(self, exploration):
"""Deletes the given exploration."""
exploration.delete()
class ExplorationDownloadHandler(BaseHandler):
"""Downloads an exploration as a YAML file."""
@require_editor
def get(self, exploration):
"""Handles GET requests."""
filename = 'oppia-%s' % utils.to_ascii(exploration.title)
if not filename:
filename = feconf.DEFAULT_FILE_NAME
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Content-Disposition'] = (
'attachment; filename=%s.txt' % filename)
# TODO(sll): Cache the YAML file.
self.response.write(exploration.as_yaml())
class StateHandler(BaseHandler):
"""Handles state transactions."""
@require_editor
def put(self, exploration, state):
"""Saves updates to a state."""
payload = json.loads(self.request.get('payload'))
yaml_file = payload.get('yaml_file')
if yaml_file and feconf.ALLOW_YAML_FILE_UPLOAD:
# The user has uploaded a YAML file. Process only this action.
state = State.modify_using_dict(
exploration, state,
utils.dict_from_yaml(yaml_file))
self.render_json(get_state_for_frontend(state, exploration))
return
state_name = payload.get('state_name')
param_changes = payload.get('param_changes')
interactive_widget = payload.get('interactive_widget')
interactive_params = payload.get('interactive_params')
interactive_rulesets = payload.get('interactive_rulesets')
sticky_interactive_widget = payload.get('sticky_interactive_widget')
content = payload.get('content')
unresolved_answers = payload.get('unresolved_answers')
if 'state_name' in payload:
# Replace the state name with this one, after checking validity.
if state_name == feconf.END_DEST:
raise self.InvalidInputException('Invalid state name: END')
exploration.rename_state(state, state_name)
if 'param_changes' in payload:
state.param_changes = []
for param_change in param_changes:
instance = exploration.get_param_change_instance(
param_change['name'])
instance.values = param_change['values']
state.param_changes.append(instance)
if interactive_widget:
state.widget.widget_id = interactive_widget
if interactive_params:
state.widget.params = interactive_params
if sticky_interactive_widget is not None:
state.widget.sticky = sticky_interactive_widget
if interactive_rulesets:
ruleset = interactive_rulesets['submit']
utils.recursively_remove_key(ruleset, u'$$hashKey')
state.widget.handlers = [AnswerHandlerInstance(
name='submit', rules=[])]
# This is part of the state. The rules should be put into it.
state_ruleset = state.widget.handlers[0].rules
# TODO(yanamal): Do additional calculations here to get the
# parameter changes, if necessary.
for rule_ind in range(len(ruleset)):
rule = ruleset[rule_ind]
state_rule = Rule()
state_rule.name = rule.get('name')
state_rule.inputs = rule.get('inputs')
state_rule.dest = rule.get('dest')
state_rule.feedback = rule.get('feedback')
# Generate the code to be executed.
if rule['rule'] == 'Default':
# This is the default rule.
assert rule_ind == len(ruleset) - 1
state_rule.name = 'Default'
state_ruleset.append(state_rule)
continue
# Normalize the params here, then store them.
classifier_func = state_rule.name.replace(' ', '')
first_bracket = classifier_func.find('(')
mutable_rule = rule['rule']
params = classifier_func[first_bracket + 1: -1].split(',')
for index, param in enumerate(params):
if param not in rule['inputs']:
raise self.InvalidInputException(
'Parameter %s could not be replaced.' % param)
typed_object = state.get_typed_object(mutable_rule, param)
# TODO(sll): Make the following check more robust.
if (not isinstance(rule['inputs'][param], basestring) or
'{{' not in rule['inputs'][param] or
'}}' not in rule['inputs'][param]):
normalized_param = typed_object.normalize(
rule['inputs'][param])
else:
normalized_param = rule['inputs'][param]
if normalized_param is None:
raise self.InvalidInputException(
'%s has the wrong type. Please replace it with a '
'%s.' % (rule['inputs'][param],
typed_object.__name__))
state_rule.inputs[param] = normalized_param
state_ruleset.append(state_rule)
if content:
state.content = [Content(type=item['type'], value=item['value'])
for item in content]
if 'unresolved_answers' in payload:
state.unresolved_answers = {}
for answer, count in unresolved_answers.iteritems():
if count > 0:
state.unresolved_answers[answer] = count
state.put()
self.render_json(get_state_for_frontend(state, exploration))
@require_editor
def delete(self, exploration, state):
"""Deletes the state with id state_id."""
# Do not allow deletion of initial states.
if exploration.init_state == state.key:
raise self.InvalidInputException(
'Cannot delete initial state of an exploration.')
# Find all dests in this exploration which equal the state to be
# deleted, and change them to loop back to their containing state.
for state_key in exploration.states:
origin_state = state_key.get()
changed = False
for handler in origin_state.widget.handlers:
for rule in handler.rules:
if rule.dest == state.id:
rule.dest = origin_state.id
changed = True
if changed:
origin_state.put()
# Delete the state with id state_id.
state.key.delete()
exploration.states.remove(state.key)
exploration.put()
| apache-2.0 | 7,582,681,898,049,399,000 | 36.020179 | 88 | 0.589849 | false | 4.442023 | false | false | false |
trondkr/model2roms | IOBry.py | 1 | 45054 | import os
import time
from datetime import datetime
from netCDF4 import Dataset
_author_ = 'Trond Kristiansen'
_email_ = '[email protected]'
_created_ = datetime(2009, 3, 2)
_modified_ = datetime(2014, 4, 7)
_version_ = "0.1.0"
_status_ = "Development"
def help():
"""
This function generates a BRY file from scratch. The variables are all created
for East, West, North, and South. Varibales include:
salt, temp, u, v, ubar, vbar, zeta, and time. Time dimension for each variable is ocean_time which is days
since 1948/1/1.
This file is netcdf CF compliant and follows the setup for variable names and units given in the ROMS source
file: Data/ROMS/CDL/bry_unlimit.cdl
(also see: https://www.myroms.org/forum/viewtopic.php?f=30&t=1450&p=5209&hilit=cf+compliant#p5209)
This function is called from clim2bry.py.
To check the BRY file for CF compliancy: http://titania.badc.rl.ac.uk/cgi-bin/cf-checker.pl?cfversion=1.0
"""
def createBryFile(confM2R):
if (confM2R.output_format == 'NETCDF4'):
myzlib = True
else:
myzlib = False
grdROMS = confM2R.grdROMS
if os.path.exists(confM2R.bry_name):
os.remove(confM2R.bry_name)
print(('\n=>Creating initial Boundary (BRY) file {}'.format(confM2R.bry_name)))
f1 = Dataset(confM2R.bry_name, mode='w', format=confM2R.output_format)
f1.title = "Boundary forcing file (BRY) used for forcing of the ROMS model"
f1.description = "Created for the {} grid file".format(confM2R.roms_grid_path)
f1.grdFile = "{}".format(confM2R.roms_grid_path)
f1.history = 'Created ' + time.ctime(time.time())
f1.source = "{} ({})".format(confM2R.author_name, confM2R.author_email)
f1.type = "File in {} format created using MODEL2ROMS".format(confM2R.output_format)
f1.link = "https://github.com/trondkr/model2roms"
f1.Conventions = "CF-1.0"
""" Define dimensions """
f1.createDimension('xi_rho', grdROMS.xi_rho)
f1.createDimension('eta_rho', grdROMS.eta_rho)
f1.createDimension('xi_u', grdROMS.xi_u)
f1.createDimension('eta_u', grdROMS.eta_u)
f1.createDimension('xi_v', grdROMS.xi_v)
f1.createDimension('eta_v', grdROMS.eta_v)
f1.createDimension('xi_psi', grdROMS.xi_psi)
f1.createDimension('eta_psi', grdROMS.eta_psi)
f1.createDimension('ocean_time', None)
f1.createDimension('s_rho', len(grdROMS.s_rho))
f1.createDimension('s_w', len(grdROMS.s_w))
vnc = f1.createVariable('lon_rho', 'd', ('eta_rho', 'xi_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Longitude of RHO-points'
vnc.units = 'degree_east'
vnc.standard_name = 'longitude'
vnc[:, :] = grdROMS.lon_rho
vnc = f1.createVariable('lat_rho', 'd', ('eta_rho', 'xi_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Latitude of RHO-points'
vnc.units = 'degree_north'
vnc.standard_name = 'latitude'
vnc[:, :] = grdROMS.lat_rho
vnc = f1.createVariable('lon_u', 'd', ('eta_u', 'xi_u',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Longitude of U-points'
vnc.units = 'degree_east'
vnc.standard_name = 'longitude'
vnc[:, :] = grdROMS.lon_u
vnc = f1.createVariable('lat_u', 'd', ('eta_u', 'xi_u',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Latitude of U-points'
vnc.units = 'degree_north'
vnc.standard_name = 'latitude'
vnc[:, :] = grdROMS.lat_u
vnc = f1.createVariable('lon_v', 'd', ('eta_v', 'xi_v',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Longitude of V-points'
vnc.units = 'degree_east'
vnc.standard_name = 'longitude'
vnc[:, :] = grdROMS.lon_v
vnc = f1.createVariable('lat_v', 'd', ('eta_v', 'xi_v',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Latitude of V-points'
vnc.units = 'degree_north'
vnc.standard_name = 'latitude'
vnc[:, :] = grdROMS.lat_v
vnc = f1.createVariable('lat_psi', 'd', ('eta_psi', 'xi_psi',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Latitude of PSI-points'
vnc.units = 'degree_north'
vnc.standard_name = 'latitude'
vnc[:, :] = grdROMS.lat_psi
vnc = f1.createVariable('lon_psi', 'd', ('eta_psi', 'xi_psi',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Longitude of PSI-points'
vnc.units = 'degree_east'
vnc.standard_name = 'longitude'
vnc[:, :] = grdROMS.lon_psi
vnc = f1.createVariable('h', 'd', ('eta_rho', 'xi_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "Bathymetry at RHO-points"
vnc.units = "meter"
vnc.coordinates = "lat_rho lon_rho"
vnc.field = "bath, scalar"
vnc[:, :] = grdROMS.h
vnc = f1.createVariable('s_rho', 'd', ('s_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "S-coordinate at RHO-points"
vnc.valid_min = -1.
vnc.valid_max = 0.
if grdROMS.vtransform == 2:
vnc.standard_name = "ocean_s_coordinate_g2"
vnc.formula_terms = "s: s_rho C: Cs_r eta: zeta depth: h depth_c: hc"
if grdROMS.vtransform == 1:
vnc.standard_name = "ocean_s_coordinate_g1"
vnc.formula_terms = "s: s_rho C: Cs_r eta: zeta depth: h depth_c: hc"
vnc.field = "s_rho, scalar"
vnc[:] = grdROMS.s_rho
vnc = f1.createVariable('s_w', 'd', ('s_w',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "S-coordinate at W-points"
vnc.valid_min = -1.
vnc.valid_max = 0.
if grdROMS.vtransform == 2:
vnc.standard_name = "ocean_s_coordinate_g2"
vnc.formula_terms = "s: s_w C: Cs_w eta: zeta depth: h depth_c: hc"
if grdROMS.vtransform == 1:
vnc.standard_name = "ocean_s_coordinate_g1"
vnc.formula_terms = "s: s_w C: Cs_w eta: zeta depth: h depth_c: hc"
vnc.field = "s_w, scalar"
vnc[:] = grdROMS.s_w
vnc = f1.createVariable('Cs_r', 'd', ('s_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "S-coordinate stretching curves at RHO-points"
vnc.valid_min = -1.
vnc.valid_max = 0.
vnc.field = "Cs_rho, scalar"
vnc[:] = grdROMS.Cs_rho
vnc = f1.createVariable('Cs_w', 'd', ('s_w',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "S-coordinate stretching curves at W-points"
vnc.valid_min = -1.
vnc.valid_max = 0.
vnc.field = "Cs_w, scalar"
vnc[:] = grdROMS.Cs_w
vnc = f1.createVariable('hc', 'd')
vnc.long_name = "S-coordinate parameter, critical depth";
vnc.units = "meter"
vnc[:] = grdROMS.hc
vnc = f1.createVariable('z_r', 'd', ('s_rho', 'eta_rho', 'xi_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "Sigma layer to depth matrix";
vnc.units = "meter"
vnc[:, :, :] = grdROMS.z_r
vnc = f1.createVariable('Tcline', 'd')
vnc.long_name = "S-coordinate surface/bottom layer width"
vnc.units = "meter"
vnc[:] = grdROMS.tcline
vnc = f1.createVariable('theta_s', 'd')
vnc.long_name = "S-coordinate surface control parameter"
vnc[:] = grdROMS.theta_s
vnc = f1.createVariable('theta_b', 'd')
vnc.long_name = "S-coordinate bottom control parameter"
vnc[:] = grdROMS.theta_b
vnc = f1.createVariable('angle', 'd', ('eta_rho', 'xi_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "angle between xi axis and east"
vnc.units = "radian"
v_time = f1.createVariable('ocean_time', 'd', ('ocean_time',), zlib=myzlib, fill_value=grdROMS.fillval)
v_time.long_name = 'seconds since 1948-01-01 00:00:00'
v_time.units = 'seconds since 1948-01-01 00:00:00'
v_time.field = 'time, scalar, series'
if (confM2R.ocean_indata_type == "NORESM"):
v_time.calendar = 'noleap'
else:
v_time.calendar = 'standard'
v_temp_west = f1.createVariable('temp_west', 'f', ('ocean_time', 's_rho', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_temp_west.long_name = "potential temperature western boundary condition"
v_temp_west.units = "Celsius"
v_temp_west.field = "temp_west, scalar, series"
#v_temp_west.missing_value = grdROMS.fillval
v_temp_west.time = "ocean_time"
v_temp_east = f1.createVariable('temp_east', 'f', ('ocean_time', 's_rho', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_temp_east.long_name = "potential temperature eastern boundary condition"
v_temp_east.units = "Celsius"
v_temp_east.field = "temp_east, scalar, series"
#v_temp_east.missing_value = grdROMS.fillval
v_temp_east.time = "ocean_time"
v_temp_south = f1.createVariable('temp_south', 'f', ('ocean_time', 's_rho', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_temp_south.long_name = "potential temperature southern boundary condition"
v_temp_south.units = "Celsius"
v_temp_south.field = "temp_south, scalar, series"
#v_temp_south.missing_value = grdROMS.fillval
v_temp_south.time = "ocean_time"
v_temp_north = f1.createVariable('temp_north', 'f', ('ocean_time', 's_rho', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_temp_north.long_name = "potential temperature northern boundary condition"
v_temp_north.units = "Celsius"
v_temp_north.field = "temp_north, scalar, series"
#v_temp_north.missing_value = grdROMS.fillval
v_temp_north.time = "ocean_time"
v_salt_west = f1.createVariable('salt_west', 'f', ('ocean_time', 's_rho', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_salt_west.long_name = "salinity western boundary condition"
v_salt_west.field = "salt_west, scalar, series"
#v_salt_west.missing_value = grdROMS.fillval
v_salt_west.time = "ocean_time"
v_salt_east = f1.createVariable('salt_east', 'f', ('ocean_time', 's_rho', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_salt_east.long_name = "salinity eastern boundary condition"
v_salt_east.field = "salt_east, scalar, series"
#v_salt_east.missing_value = grdROMS.fillval
v_salt_east.time = "ocean_time"
v_salt_south = f1.createVariable('salt_south', 'f', ('ocean_time', 's_rho', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_salt_south.long_name = "salinity southern boundary condition"
v_salt_south.field = "salt_south, scalar, series"
#v_salt_south.missing_value = grdROMS.fillval
v_salt_south.time = "ocean_time"
v_salt_north = f1.createVariable('salt_north', 'f', ('ocean_time', 's_rho', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_salt_north.long_name = "salinity northern boundary condition"
v_salt_north.field = "salt_north, scalar, series"
#v_salt_north.missing_value = grdROMS.fillval
v_salt_north.time = "ocean_time"
v_ssh_west = f1.createVariable('zeta_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ssh_west.long_name = "free-surface western boundary condition"
v_ssh_west.units = "meter"
v_ssh_west.field = "zeta_west, scalar, series"
#v_ssh_west.missing_value = grdROMS.fillval
v_ssh_west.time = "ocean_time"
v_ssh_east = f1.createVariable('zeta_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ssh_east.long_name = "free-surface eastern boundary condition"
v_ssh_east.units = "meter"
v_ssh_east.field = "zeta_east, scalar, series"
#v_ssh_east.missing_value = grdROMS.fillval
v_ssh_east.time = "ocean_time"
v_ssh_south = f1.createVariable('zeta_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ssh_south.long_name = "free-surface southern boundary condition"
v_ssh_south.units = "meter"
v_ssh_south.field = "zeta_south, scalar, series"
#v_ssh_south.missing_value = grdROMS.fillval
v_ssh_south.time = "ocean_time"
v_ssh_north = f1.createVariable('zeta_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ssh_north.long_name = "free-surface northern boundary condition"
v_ssh_north.units = "meter"
v_ssh_north.field = "zeta_north, scalar, series"
#v_ssh_north.missing_value = grdROMS.fillval
v_ssh_north.time = "ocean_time"
v_u_west = f1.createVariable('u_west', 'f', ('ocean_time', 's_rho', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_u_west.long_name = "3D u-momentum western boundary condition"
v_u_west.units = "meter second-1"
v_u_west.field = "u_west, scalar, series"
#v_u_west.missing_value = grdROMS.fillval
v_u_west.time = "ocean_time"
v_u_east = f1.createVariable('u_east', 'f', ('ocean_time', 's_rho', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_u_east.long_name = "3D u-momentum eastern boundary condition"
v_u_east.units = "meter second-1"
v_u_east.field = "u_east, scalar, series"
#v_u_east.missing_value = grdROMS.fillval
v_u_east.time = "ocean_time"
v_u_south = f1.createVariable('u_south', 'f', ('ocean_time', 's_rho', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_u_south.long_name = "3D u-momentum southern boundary condition"
v_u_south.units = "meter second-1"
v_u_south.field = "u_south, scalar, series"
#v_u_south.missing_value = grdROMS.fillval
v_u_south.time = "ocean_time"
v_u_north = f1.createVariable('u_north', 'f', ('ocean_time', 's_rho', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_u_north.long_name = "3D u-momentum northern boundary condition"
v_u_north.units = "meter second-1"
v_u_north.field = "u_north, scalar, series"
#v_u_north.missing_value = grdROMS.fillval
v_u_north.time = "ocean_time"
v_v_west = f1.createVariable('v_west', 'f', ('ocean_time', 's_rho', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_v_west.long_name = "3D v-momentum western boundary condition"
v_v_west.units = "meter second-1"
v_v_west.field = "v_west, scalar, series"
#v_v_west.missing_value = grdROMS.fillval
v_v_west.time = "ocean_time"
v_v_east = f1.createVariable('v_east', 'f', ('ocean_time', 's_rho', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_v_east.long_name = "3D v-momentum eastern boundary condition"
v_v_east.units = "meter second-1"
v_v_east.field = "v_east, scalar, series"
#v_v_east.missing_value = grdROMS.fillval
v_v_east.time = "ocean_time"
v_v_south = f1.createVariable('v_south', 'f', ('ocean_time', 's_rho', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_v_south.long_name = "3D v-momentum southern boundary condition"
v_v_south.units = "meter second-1"
v_v_south.field = "v_south, scalar, series"
#v_v_south.missing_value = grdROMS.fillval
v_v_south.time = "ocean_time"
v_v_north = f1.createVariable('v_north', 'f', ('ocean_time', 's_rho', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_v_north.long_name = "3D v-momentum northern boundary condition"
v_v_north.units = "meter second-1"
v_v_north.field = "v_north, scalar, series"
#v_v_north.missing_value = grdROMS.fillval
v_v_north.time = "ocean_time"
v_vbar_west = f1.createVariable('vbar_west', 'f', ('ocean_time', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_vbar_west.long_name = "2D v-momentum western boundary condition"
v_vbar_west.units = "meter second-1"
v_vbar_west.field = "vbar_west, scalar, series"
#v_vbar_west.missing_value = grdROMS.fillval
v_vbar_west.time = "ocean_time"
v_vbar_east = f1.createVariable('vbar_east', 'f', ('ocean_time', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_vbar_east.long_name = "2D v-momentum eastern boundary condition"
v_vbar_east.units = "meter second-1"
v_vbar_east.field = "vbar_east, scalar, series"
#v_vbar_east.missing_value = grdROMS.fillval
v_vbar_east.time = "ocean_time"
v_vbar_south = f1.createVariable('vbar_south', 'f', ('ocean_time', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_vbar_south.long_name = "2D v-momentum southern boundary condition"
v_vbar_south.units = "meter second-1"
v_vbar_south.field = "vbar_south, scalar, series"
#v_vbar_south.missing_value = grdROMS.fillval
v_vbar_south.time = "ocean_time"
v_vbar_north = f1.createVariable('vbar_north', 'f', ('ocean_time', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_vbar_north.long_name = "2D v-momentum northern boundary condition"
v_vbar_north.units = "meter second-1"
v_vbar_north.field = "vbar_north, scalar, series"
#v_vbar_north.missing_value = grdROMS.fillval
v_vbar_north.time = "ocean_time"
v_ubar_west = f1.createVariable('ubar_west', 'f', ('ocean_time', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ubar_west.long_name = "2D u-momentum western boundary condition"
v_ubar_west.units = "meter second-1"
v_ubar_west.field = "ubar_west, scalar, series"
# v_ubar_west.missing_value = grdROMS.fillval
v_ubar_west.time = "ocean_time"
v_ubar_east = f1.createVariable('ubar_east', 'f', ('ocean_time', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ubar_east.long_name = "2D u-momentum eastern boundary condition"
v_ubar_east.units = "meter second-1"
v_ubar_east.field = "ubar_east, scalar, series"
#v_ubar_east.missing_value = grdROMS.fillval
v_ubar_east.time = "ocean_time"
v_ubar_south = f1.createVariable('ubar_south', 'f', ('ocean_time', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ubar_south.long_name = "2D u-momentum southern boundary condition"
v_ubar_south.units = "meter second-1"
v_ubar_south.field = "ubar_south, scalar, series"
#v_ubar_south.missing_value = grdROMS.fillval
v_ubar_south.time = "ocean_time"
v_ubar_north = f1.createVariable('ubar_north', 'f', ('ocean_time', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ubar_north.long_name = "2D u-momentum northern boundary condition"
v_ubar_north.units = "meter second-1"
v_ubar_north.field = "ubar_north, scalar, series"
#v_ubar_north.missing_value = grdROMS.fillval
v_ubar_north.time = "ocean_time"
if confM2R.write_bcg:
directions=['east','west','north','south']
dimens=['eta_rho','eta_rho','xi_rho','xi_rho']
lndirections=['eastern','western','northern','southern']
for currentdir,lndirection,dim in zip(directions, lndirections,dimens):
currentvar='O3_c_'+currentdir
longname="carbonate/total dissolved inorganic carbon {} boundary condition".format(lndirection)
O3_c = f1.createVariable(currentvar, 'f', ('ocean_time', 's_rho', dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
O3_c.long_name = longname
O3_c.field = "{}, scalar, series".format(currentvar)
O3_c.units = "mmol C/m^3"
currentvar='O3_TA_'+currentdir
longname="carbonate/bioalkalinity {} boundary condition".format(lndirection)
O3_ta = f1.createVariable(currentvar, 'f', ('ocean_time', 's_rho', dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
O3_ta.long_name = longname
O3_ta.field = "{}, scalar, series".format(currentvar)
O3_ta.units = "umol/kg"
currentvar='N1_p_'+currentdir
longname="phosphate/phosphorus {} boundary condition".format(lndirection)
N1_p = f1.createVariable(currentvar, 'f', ('ocean_time','s_rho', dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
N1_p.long_name = longname
N1_p.field = "{}, scalar, series".format(currentvar)
N1_p.units = "mmol P/m^3"
currentvar='N3_n_'+currentdir
longname="nitrate/nitrogen {} boundary condition".format(lndirection)
N3_n = f1.createVariable(currentvar, 'f', ('ocean_time','s_rho', dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
N3_n.long_name = longname
N3_n.field = "{}, scalar, series".format(currentvar)
N3_n.units = "mmol N/m^3"
currentvar='N5_s_'+currentdir
longname="silicate/silicate {} boundary condition".format(lndirection)
N5_s = f1.createVariable(currentvar, 'f', ('ocean_time','s_rho', dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
N5_s.long_name = longname
N5_s.field = "{}, scalar, series".format(currentvar)
N5_s.units = "mmol Si/m^3"
currentvar='O2_o_'+currentdir
longname="oxygen/oxygen {} boundary condition".format(lndirection)
O2_o = f1.createVariable(currentvar, 'f', ('ocean_time', 's_rho',dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
O2_o.long_name = longname
O2_o.field = "{}, scalar, series".format(currentvar)
O2_o.units = "mmol O_2/m^3"
if confM2R.write_ice:
ageice_west = f1.createVariable('ageice_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ageice_west.long_name = "time-averaged age of the ice western boundary conditions"
ageice_west.units = "years"
ageice_west.time = "ocean_time"
ageice_west.field = "ice age, scalar, series"
#ageice_west.missing_value = grdROMS.fillval
ageice_east = f1.createVariable('ageice_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ageice_east.long_name = "time-averaged age of the ice eastern boundary conditions"
ageice_east.units = "years"
ageice_east.time = "ocean_time"
ageice_east.field = "ice age, scalar, series"
#ageice_east.missing_value = grdROMS.fillval
ageice_south = f1.createVariable('ageice_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ageice_south.long_name = "time-averaged age of the ice southern boundary conditions"
ageice_south.units = "years"
ageice_south.time = "ocean_time"
ageice_south.field = "ice age, scalar, series"
#ageice_south.missing_value = grdROMS.fillval
ageice_north = f1.createVariable('ageice_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ageice_north.long_name = "time-averaged age of the ice northern boundary conditions"
ageice_north.units = "years"
ageice_north.time = "ocean_time"
ageice_north.field = "ice age, scalar, series"
#ageice_north.missing_value = grdROMS.fillval
# ----------------------------------------
uice_west = f1.createVariable('uice_west', 'f', ('ocean_time', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
uice_west.long_name = "time-averaged age of the u-component of ice velocity western boundary conditions"
uice_west.units = "meter second-1"
uice_west.time = "ocean_time"
uice_west.field = "u-component of ice velocity, scalar, series"
#uice_west.missing_value = grdROMS.fillval
uice_east = f1.createVariable('uice_east', 'f', ('ocean_time', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
uice_east.long_name = "time-averaged age of the u-component of ice velocity eastern boundary conditions"
uice_east.units = "meter second-1"
uice_east.time = "ocean_time"
uice_east.field = "u-component of ice velocity, scalar, series"
#uice_east.missing_value = grdROMS.fillval
uice_south = f1.createVariable('uice_south', 'f', ('ocean_time', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
uice_south.long_name = "time-averaged age of the u-component of ice velocity southern boundary conditions"
uice_south.units = "meter second-1"
uice_south.time = "ocean_time"
uice_south.field = "u-component of ice velocity, scalar, series"
#uice_south.missing_value = grdROMS.fillval
uice_north = f1.createVariable('uice_north', 'f', ('ocean_time', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
uice_north.long_name = "time-averaged age of the u-component of ice velocity northern boundary conditions"
uice_north.units = "meter second-1"
uice_north.time = "ocean_time"
uice_north.field = "u-component of ice velocity, scalar, series"
#uice_north.missing_value = grdROMS.fillval
# ----------------------------------------
vice_west = f1.createVariable('vice_west', 'f', ('ocean_time', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
vice_west.long_name = "time-averaged age of the v-component of ice velocity western boundary conditions"
vice_west.units = "meter second-1"
uice_west.time = "ocean_time"
vice_west.field = "v-component of ice velocity, scalar, series"
#vice_west.missing_value = grdROMS.fillval
vice_east = f1.createVariable('vice_east', 'f', ('ocean_time', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
vice_east.long_name = "time-averaged age of the v-component of ice velocity eastern boundary conditions"
vice_east.units = "meter second-1"
vice_east.time = "ocean_time"
vice_east.field = "v-component of ice velocity, scalar, series"
#vice_east.missing_value = grdROMS.fillval
vice_south = f1.createVariable('vice_south', 'f', ('ocean_time', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
vice_south.long_name = "time-averaged age of the v-component of ice velocity southern boundary conditions"
vice_south.units = "meter second-1"
vice_south.time = "ocean_time"
vice_south.field = "v-component of ice velocity, scalar, series"
#vice_south.missing_value = grdROMS.fillval
vice_north = f1.createVariable('vice_north', 'f', ('ocean_time', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
vice_north.long_name = "time-averaged age of the u-component of ice velocity northern boundary conditions"
vice_north.units = "meter second-1"
vice_north.time = "ocean_time"
vice_north.field = "v-component of ice velocity, scalar, series"
#vice_north.missing_value = grdROMS.fillval
# ----------------------------------------
aice_west = f1.createVariable('aice_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
aice_west.long_name = "time-averaged fraction of cell covered by ice western boundary conditions"
aice_west.units = "%"
aice_west.time = "ocean_time"
aice_west.field = "ice concentration, scalar, series"
#aice_west.missing_value = grdROMS.fillval
aice_east = f1.createVariable('aice_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
aice_east.long_name = "time-averaged fraction of cell covered by ice eastern boundary conditions"
aice_east.units = "%"
aice_east.time = "ocean_time"
aice_east.field = "ice concentration, scalar, series"
#aice_east.missing_value = grdROMS.fillval
aice_south = f1.createVariable('aice_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
aice_south.long_name = "time-averaged fraction of cell covered by ice southern boundary conditions"
aice_south.units = "%"
aice_south.time = "ocean_time"
aice_south.field = "ice concentration, scalar, series"
#aice_south.missing_value = grdROMS.fillval
aice_north = f1.createVariable('aice_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
aice_north.long_name = "time-averaged fraction of cell covered by ice northern boundary conditions"
aice_north.units = "%"
aice_north.time = "ocean_time"
aice_north.field = "ice concentration, scalar, series"
#aice_north.missing_value = grdROMS.fillval
# ----------------------------------------
hice_west = f1.createVariable('hice_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
hice_west.long_name = "time-averaged ice thickness in cell western boundary conditions"
hice_west.units = "meter"
hice_west.time = "ocean_time"
hice_west.field = "ice thickness, scalar, series"
#hice_west.missing_value = grdROMS.fillval
hice_east = f1.createVariable('hice_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
hice_east.long_name = "time-averaged ice thickness in cell eastern boundary conditions"
hice_east.units = "meter"
hice_east.time = "ocean_time"
hice_east.field = "ice thickness, scalar, series"
#hice_east.missing_value = grdROMS.fillval
hice_south = f1.createVariable('hice_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
hice_south.long_name = "time-averaged ice thickness in cell southern boundary conditions"
hice_south.units = "meter"
hice_south.time = "ocean_time"
hice_south.field = "ice thickness, scalar, series"
#hice_south.missing_value = grdROMS.fillval
hice_north = f1.createVariable('hice_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
hice_north.long_name = "time-averaged ice thickness in cell northern boundary conditions"
hice_north.units = "meter"
hice_north.time = "ocean_time"
hice_north.field = "ice thickness, scalar, series"
#hice_north.missing_value = grdROMS.fillval
# ----------------------------------------
snow_thick_west = f1.createVariable('snow_thick_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
snow_thick_west.long_name = "time-averaged ice thickness in cell western boundary conditions"
snow_thick_west.units = "meter"
snow_thick_west.time = "ocean_time"
snow_thick_west.field = "snow thickness, scalar, series"
#snow_thick_west.missing_value = grdROMS.fillval
snow_thick_east = f1.createVariable('snow_thick_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
snow_thick_east.long_name = "time-averaged ice thickness in cell eastern boundary conditions"
snow_thick_east.units = "meter"
snow_thick_east.time = "ocean_time"
snow_thick_east.field = "snow thickness, scalar, series"
#snow_thick_east.missing_value = grdROMS.fillval
snow_thick_south = f1.createVariable('snow_thick_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
snow_thick_south.long_name = "time-averaged ice thickness in cell southern boundary conditions"
snow_thick_south.units = "meter"
snow_thick_south.time = "ocean_time"
snow_thick_south.field = "snow thickness, scalar, series"
#snow_thick_south.missing_value = grdROMS.fillval
snow_thick_north = f1.createVariable('snow_thick_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
snow_thick_north.long_name = "time-averaged ice thickness in cell northern boundary conditions"
snow_thick_north.units = "meter"
snow_thick_north.time = "ocean_time"
snow_thick_north.field = "snow thickness, scalar, series"
#snow_thick_north.missing_value = grdROMS.fillval
# ----------------------------------------
ti_west = f1.createVariable('ti_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ti_west.long_name = "time-averaged interior ice temperature cell western boundary conditions"
ti_west.units = "degrees Celcius"
ti_west.time = "ocean_time"
ti_west.field = "interior temperature, scalar, series"
#ti_west.missing_value = grdROMS.fillval
ti_east = f1.createVariable('ti_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ti_east.long_name = "time-averaged interior ice temperature eastern boundary conditions"
ti_east.units = "degrees Celcius"
ti_east.time = "ocean_time"
ti_east.field = "interior temperature, scalar, series"
#ti_east.missing_value = grdROMS.fillval
ti_south = f1.createVariable('ti_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ti_south.long_name = "time-averaged interior ice temperature southern boundary conditions"
ti_south.units = "degrees Celcius"
ti_south.time = "ocean_time"
ti_south.field = "interior temperature, scalar, series"
#ti_south.missing_value = grdROMS.fillval
ti_north = f1.createVariable('ti_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ti_north.long_name = "time-averaged interior ice temperature northern boundary conditions"
ti_north.units = "degrees Celcius"
ti_north.time = "ocean_time"
ti_north.field = "interior temperature, scalar, series"
#ti_north.missing_value = grdROMS.fillval
# ----------------------------------------
sfwat_west = f1.createVariable('sfwat_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sfwat_west.long_name = "time-averaged surface melt water thickness on ice western boundary conditions"
sfwat_west.units = "meter"
sfwat_west.time = "ocean_time"
sfwat_west.field = "melt water thickness, scalar, series"
#sfwat_west.missing_value = grdROMS.fillval
sfwat_east = f1.createVariable('sfwat_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sfwat_east.long_name = "time-averaged surface melt water thickness on ice eastern boundary conditions"
sfwat_east.units = "meter"
sfwat_east.time = "ocean_time"
sfwat_east.field = "melt water thickness, scalar, series"
#sfwat_east.missing_value = grdROMS.fillval
sfwat_south = f1.createVariable('sfwat_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sfwat_south.long_name = "time-averaged surface melt water thickness on ice southern boundary conditions"
sfwat_south.units = "meter"
sfwat_south.time = "ocean_time"
sfwat_south.field = "melt water thickness, scalar, series"
#sfwat_south.missing_value = grdROMS.fillval
sfwat_north = f1.createVariable('sfwat_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sfwat_north.long_name = "time-averaged surface melt water thickness on ice northern boundary conditions"
sfwat_north.units = "meter"
sfwat_north.time = "ocean_time"
sfwat_north.field = "melt water thickness, scalar, series"
#sfwat_north.missing_value = grdROMS.fillval
# ----------------------------------------
tisrf_west = f1.createVariable('tisrf_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
tisrf_west.long_name = "time-averaged temperature of ice surfacewestern boundary conditions"
tisrf_west.units = "degrees Celcius"
tisrf_west.time = "ocean_time"
tisrf_west.field = "surface temperature, scalar, series"
#tisrf_west.missing_value = grdROMS.fillval
tisrf_east = f1.createVariable('tisrf_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
tisrf_east.long_name = "time-averaged temperature of ice surface eastern boundary conditions"
tisrf_east.units = "degrees Celcius"
tisrf_east.time = "ocean_time"
tisrf_east.field = "surface temperature, scalar, series"
#tisrf_east.missing_value = grdROMS.fillval
tisrf_south = f1.createVariable('tisrf_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
tisrf_south.long_name = "time-averaged temperature of ice surface southern boundary conditions"
tisrf_south.units = "degrees Celcius"
tisrf_south.time = "ocean_time"
tisrf_south.field = "surface temperature, scalar, series"
#tisrf_south.missing_value = grdROMS.fillval
tisrf_north = f1.createVariable('tisrf_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
tisrf_north.long_name = "time-averaged temperature of ice surface northern boundary conditions"
tisrf_north.units = "degrees Celcius"
tisrf_north.time = "ocean_time"
tisrf_north.field = "surface temperature, scalar, series"
#tisrf_north.missing_value = grdROMS.fillval
# ----------------------------------------
sig11_west = f1.createVariable('sig11_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig11_west.long_name = "time-averaged internal ice stress 11 component boundary conditions"
sig11_west.units = "Newton meter-1"
sig11_west.time = "ocean_time"
sig11_west.field = "ice stress 11, scalar, series"
#sig11_west.missing_value = grdROMS.fillval
sig11_east = f1.createVariable('sig11_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig11_east.long_name = "time-averaged internal ice stress 11 component eastern boundary conditions"
sig11_east.units = "Newton meter-1"
sig11_east.time = "ocean_time"
sig11_east.field = "ice stress 11, scalar, series"
#sig11_east.missing_value = grdROMS.fillval
sig11_south = f1.createVariable('sig11_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig11_south.long_name = "time-averaged internal ice stress 11 componentsouthern boundary conditions"
sig11_south.units = "Newton meter-1"
sig11_south.time = "ocean_time"
sig11_south.field = "ice stress 11, scalar, series"
#sig11_south.missing_value = grdROMS.fillval
sig11_north = f1.createVariable('sig11_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig11_north.long_name = "time-averaged internal ice stress 11 component northern boundary conditions"
sig11_north.units = "Newton meter-1"
sig11_north.time = "ocean_time"
sig11_north.field = "ice stress 11, scalar, series"
#sig11_north.missing_value = grdROMS.fillval
# ----------------------------------------
sig12_west = f1.createVariable('sig12_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig12_west.long_name = "time-averaged internal ice stress 12 component boundary conditions"
sig12_west.units = "Newton meter-1"
sig12_west.time = "ocean_time"
sig12_west.field = "ice stress 12, scalar, series"
#sig12_west.missing_value = grdROMS.fillval
sig12_east = f1.createVariable('sig12_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig12_east.long_name = "time-averaged internal ice stress 12 component eastern boundary conditions"
sig12_east.units = "Newton meter-1"
sig12_east.time = "ocean_time"
sig12_east.field = "ice stress 12, scalar, series"
#sig12_east.missing_value = grdROMS.fillval
sig12_south = f1.createVariable('sig12_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig12_south.long_name = "time-averaged internal ice stress 12 componentsouthern boundary conditions"
sig12_south.units = "Newton meter-1"
sig12_south.time = "ocean_time"
sig12_south.field = "ice stress 12, scalar, series"
#sig12_south.missing_value = grdROMS.fillval
sig12_north = f1.createVariable('sig12_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig12_north.long_name = "time-averaged internal ice stress 12 component northern boundary conditions"
sig12_north.units = "Newton meter-1"
sig12_north.time = "ocean_time"
sig12_north.field = "ice stress 12, scalar, series"
#sig12_north.missing_value = grdROMS.fillval
# ----------------------------------------
sig22_west = f1.createVariable('sig22_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig22_west.long_name = "time-averaged internal ice stress 22 component boundary conditions"
sig22_west.units = "Newton meter-1"
sig22_west.time = "ocean_time"
sig22_west.field = "ice stress 22, scalar, series"
#sig22_west.missing_value = grdROMS.fillval
sig22_east = f1.createVariable('sig22_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig22_east.long_name = "time-averaged internal ice stress 22 component eastern boundary conditions"
sig22_east.units = "Newton meter-1"
sig22_east.time = "ocean_time"
sig22_east.field = "ice stress 22, scalar, series"
#sig22_east.missing_value = grdROMS.fillval
sig22_south = f1.createVariable('sig22_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig22_south.long_name = "time-averaged internal ice stress 22 componentsouthern boundary conditions"
sig22_south.units = "Newton meter-1"
sig22_south.time = "ocean_time"
sig22_south.field = "ice stress 22, scalar, series"
#sig22_south.missing_value = grdROMS.fillval
sig22_north = f1.createVariable('sig22_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig22_north.long_name = "time-averaged internal ice stress 22 component northern boundary conditions"
sig22_north.units = "Newton meter-1"
sig22_north.time = "ocean_time"
sig22_north.field = "ice stress 22, scalar, series"
#sig22_north.missing_value = grdROMS.fillval
# ----------------------------------------
f1.close()
| mit | -7,422,311,014,768,623,000 | 49.966063 | 114 | 0.599614 | false | 3.129185 | false | false | false |
yashp241195/OpenCVTutorial | BasicFilters.py | 1 | 8245 | import cv2 as CvObject
import numpy as np
# PART 1 - Introduction
# Topics
# >> Read frames from camera
# >> Baic Filters (Smoothening,blurring)
# >> Morphological Transformation
# >> Edge Detection
Selext_WebCam = 0
Select_External_Camera = 1
CamTitle = "MyCam"
# Store the captured video inside cap varaiable
capture = CvObject.VideoCapture(Selext_WebCam)
# if camera is open then or started capturing
while (capture.isOpened()):
# Start capturing frames/images
'''
cap.read() return two values
>> ret - able to capture or not (true,false) return values
>> img - captured Frame
'''
# 'Camera' is an identifier for frame which is taken from camera
ret,Camera = capture.read()
# Show the title "myimage" and start showing
# CvObject.imshow(CamTitle,Camera)
# Grayscale Camera
# RGB(Red,Blue,Green) Notation is inverted as BGR in OpenCV
GrayScale = CvObject.cvtColor(Camera,CvObject.COLOR_BGR2GRAY)
# CvObject.imshow('GrayScale',GrayScale)
# Detect BLUE COLOR
# BGR to Binary (means only one color is visible) within a particular range
# Range is defined in BGR
# Detect in range using hsv (Hue,Saturation,Value)
lower_color_bound_HSV = np.array([150,100,0])
upper_color_bound_HSV = np.array([255,180,180])
imgThreshold = CvObject.inRange(Camera,lower_color_bound_HSV,upper_color_bound_HSV)
#CvObject.imshow('Thresholded',imgThreshold)
# Bitwise AND will help BlackOut (fill black color) in uncomman region
# mask will help to put the mask on the comman portion so that it will
# look recolored
MonoColor = CvObject.bitwise_and(Camera,Camera,mask=imgThreshold)
#CvObject.imshow('MonoColor',MonoColor)
# Blurring and Smoothening
#Kernel size
x=5
y=5
# Kernel is 15*15 matrix with all element "1",
# Kernel will decide smoothness/brurness/sharpness
# according to the Kernel defination
# float32 is datatype divide all elements by (15*15=225)
#Kernel Defination
kernel = np.ones((x,y),np.float32)/(x*y)
# 2D filter consist of 3 parameter
# >> Frame (I choose GrayScale Frame )
# >> depth = -1
# >> kernel Matrix
# filter2D helps you to build customFilter
# with the use of kernel Definations
smoothed = CvObject.filter2D(GrayScale,-1,kernel)
#CvObject.imshow('Smoothed',smoothed)
# Gaussian Blur :
# It uses Gaussian Probability Distribution in order to remove
# gaussian noise in the Frame,if Standard deviation(sigmaX,sigmaY)
# is 0 than deviation is computed using kernel size(x,y)
sigmaX = 0
sigmaY = 0
gaussianblur = CvObject.GaussianBlur(GrayScale,(x,y),sigmaX,sigmaY)
#CvObject.imshow('Gaussian Blur',gaussianblur)
# Median Blur - It uses median of the group of pixels to do smoothening
kernel_size = x # Only square kernal is allowed
#medianblur = CvObject.medianBlur(GrayScale,kernel_size)
#CvObject.imshow('Median Blur',medianblur)
# Bilateral Filter -
# It is designed to overcome the drawback of Gaussian filter
# Used to avoid edge smoothening but slower process
# intensity of each pixel is replaced by average intensity of it's neighbours
# it is formulated on the basis of anisotropic diffusion Partial Differentail Eqn
diameter = 15 # diamter of pixel neighbourhood,How many pixels should be selected as neighbourhood
SigmaColor = 80 # recoloring by calculating neighbour color and equilizing it
SigmaSpace = 80 # rebuilding pixel intensities according to neighbours creating new space
bilateral = CvObject.bilateralFilter(Camera,diameter,SigmaColor,SigmaSpace)
#CvObject.imshow('BilateralFilter',bilateral)
# Morphological Transformation - Transformation based on shapes
# Erosion -
# erodes the boundary of foreground object
# or Make Outline more bolder and thicker
# Remove foreground and make outline thick
# Kernel Defination for erosion
kernel_Erode_or_Dilate = np.ones((2*x,2*y),np.uint8)
erosion = CvObject.erode(Camera,kernel_Erode_or_Dilate,iterations = 1)
#CvObject.imshow('Erosion',erosion)
# Dilation - Opposite of erosion (Remove Outlines)
# Remove Outline and make Outline Dilute
dilate = CvObject.dilate(Camera,kernel_Erode_or_Dilate,iterations = 1)
#CvObject.imshow('Dilation',dilate)
# Opening - erosion followed by dilation means after dilution the erosion is compensated (Somehow it Cartoonize)
# lighten the Outline than darken the required outline
opening = CvObject.morphologyEx(Camera,CvObject.MORPH_OPEN,kernel_Erode_or_Dilate)
#CvObject.imshow('Opening',opening)
# Closing - dilation followed by erosion means after erosion the dilution is compensated (Make You Ugliest person :P )
# darken the outline then remove the other unecessary darker outlines
closing = CvObject.morphologyEx(Camera,CvObject.MORPH_CLOSE,kernel_Erode_or_Dilate)
#CvObject.imshow('Closing',closing)
# Morphological Gradient - differnce of dilation and erosion
gradient = CvObject.morphologyEx(Camera,CvObject.MORPH_GRADIENT,kernel_Erode_or_Dilate)
#CvObject.imshow('Gradient',gradient)
# Top Hat - difference between Opening and original
topHat = CvObject.morphologyEx(Camera,CvObject.MORPH_TOPHAT,kernel_Erode_or_Dilate)
#CvObject.imshow('TopHat',topHat)
# Black Hat - difference between Closing and original
BlackHat = CvObject.morphologyEx(Camera,CvObject.MORPH_BLACKHAT,kernel_Erode_or_Dilate)
#CvObject.imshow('BlackHat',BlackHat)
# Edge Detection
# Laplacian - Boundary value Problem
# In Edges, there is High Variation in Intensities between edges and the rest
# Using First Derrivative we will find the values which can be considered as edge
# Using Second Derrivative we can confirm whether it is edge or not
# Laplacian is the way to compute Second derrivative for real valued function
# if Del(F) = 0 ,Divergence is zero than it is en edge means intensities are not
# diverging or intensities remains same at the positions where we are supposed to find
# edges after calculation of first derrivative
laplacian = CvObject.Laplacian(Camera,CvObject.CV_64F)
#CvObject.imshow('Laplacian',laplacian)
# First Order Derrivative Approaches
# Sobel - 1D Edge scan
SobelKernelSize = 5
# For SobelX means scanX for edges or find edges in Y , activateX = 1 and activateY = 0
# it can be simply seen by when we find dY/dX , we are finding maxima/minima in 'Y'
# at given point "X", Y axis edges will be detected
activateX = 1
activateY = 0
#sobelX = CvObject.Sobel(Camera,CvObject.CV_64F,activateX ,activateY,ksize = SobelKernelSize)
#CvObject.imshow('SobelX',sobelX)
# For SobelY means scanX for edges or find edges in X
# activateX = 0 and activateY = 1
activateX = 0
activateY = 1
#sobelY = CvObject.Sobel(Camera,CvObject.CV_64F,activateX ,activateY,ksize = SobelKernelSize)
#CvObject.imshow('SobelY',sobelY)
# Canny - Based on noise reduction
# remove x axis noise 200 times
Reduce_noiseX = 200 # reduced noise effect will be found on Y axis OR all edges in x is visible
Reduce_noiseY = 250 # reduced noise effect will be found on X axis OR all edges in y is visible
canny = CvObject.Canny(Camera,Reduce_noiseX,Reduce_noiseY)
CvObject.imshow('Canny',canny)
# Closing Camera
time = 10 #10 millisec
key = CvObject.waitKey(time)
# if key is "ESC" (ASCII = 27) than exit the capturing
# press hard
if key == 27:
break
capture.release()
CvObject.destroyAllWindows()
| apache-2.0 | -7,397,239,752,318,426,000 | 26.529412 | 122 | 0.670224 | false | 3.487733 | false | false | false |
netscaler/neutron | neutron/db/migration/cli.py | 9 | 4487 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import os
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import util as alembic_util
from oslo.config import cfg
from neutron.common import legacy
_core_opts = [
cfg.StrOpt('core_plugin',
default='',
help=_('Neutron plugin provider module')),
cfg.ListOpt('service_plugins',
default=[],
help=_("The service plugins Neutron will use")),
]
_quota_opts = [
cfg.StrOpt('quota_driver',
default='',
help=_('Neutron quota driver class')),
]
_db_opts = [
cfg.StrOpt('connection',
deprecated_name='sql_connection',
default='',
help=_('URL to database')),
]
CONF = cfg.ConfigOpts()
CONF.register_opts(_core_opts)
CONF.register_opts(_db_opts, 'database')
CONF.register_opts(_quota_opts, 'QUOTAS')
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
def do_upgrade_downgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision
if CONF.command.delta:
sign = '+' if CONF.command.name == 'upgrade' else '-'
revision = sign + str(CONF.command.delta)
else:
revision = CONF.command.revision
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def do_revision(config, cmd):
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = subparsers.add_parser(name)
parser.set_defaults(func=do_alembic_command)
parser = subparsers.add_parser('check_migration')
parser.set_defaults(func=do_check_migration)
for name in ['upgrade', 'downgrade']:
parser = subparsers.add_parser(name)
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.set_defaults(func=do_upgrade_downgrade)
parser = subparsers.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = subparsers.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
config = alembic_config.Config(
os.path.join(os.path.dirname(__file__), 'alembic.ini')
)
config.set_main_option('script_location',
'neutron.db.migration:alembic_migrations')
# attach the Neutron conf to the Alembic conf
config.neutron_config = CONF
CONF()
#TODO(gongysh) enable logging
legacy.modernize_quantum_config(CONF)
CONF.command.func(config, CONF.command.name)
| apache-2.0 | -1,401,840,322,780,305,200 | 30.377622 | 78 | 0.636728 | false | 3.881488 | true | false | false |
TheTrueTom/HP1100SeriesConverter | src/converter.py | 1 | 2445 | # -*- coding: utf-8 -*-
import struct
import csv
import os
import tkinter as tk
from decrypter import *
from tkinter.filedialog import askdirectory
ext = "ch"
class Application(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.instructionLabel = tk.Label(self)
self.instructionLabel.config(text="Welcome ! Please select a folder to start a conversion")
self.instructionLabel.grid(row=0, column=0, columnspan=4, ipadx=5, ipady=5)
self.selectButton = tk.Button(self)
self.selectButton["text"] = "Select folder"
self.selectButton["command"] = self.selectFolder
self.selectButton.grid(row=1, column=0, sticky='W')
self.convertButton = tk.Button(self)
self.convertButton["text"] = "Convert files"
self.convertButton["command"] = self.convert
self.convertButton.grid(row=1, column=1, sticky='W')
self.convertButton.config(state='disabled')
self.statusText = tk.Label(self)
self.statusText.config(text="No job in progress")
self.statusText.config(width=40)
self.statusText.grid(row=1, column=2)
self.quit = tk.Button(self, text="Quit", fg="red", command=root.destroy)
self.quit.grid(row=1, column=3, sticky='E')
def convert(self):
#print("Converting")
self.convertButton.config(state='disabled')
if self.selectedPath != None:
decrypter = Decrypter(self.selectedPath, self)
def selectFolder(self):
#print("Selecting folder")
self.selectedPath = askdirectory(parent=root, title='Choose a folder')
if self.selectedPath != None:
self.convertButton.config(state='normal')
experimentList = []
for fRoot, subFolders, files in os.walk(self.selectedPath):
for fichier in files:
if fichier[-2:] == '.B':
# New experiment
experimentList.append(fRoot)
text = "Found " + str(len(experimentList)) + " experiments"
self.instructionLabel.config(text="Click on the 'Convert files' button to transform files into CSV files or select another folder")
self.statusText.config(text=text)
# Create windows reference
root = tk.Tk()
# Change app icon
basePath = sys._MEIPASS
prepath = os.path.join(basePath, 'icon')
path = os.path.join(prepath, 'icon.ico')
root.iconbitmap(default=path)
# Create application
app = Application(master=root)
app.master.title("Agilent Data Converter")
app.master.minsize(500,60)
app.master.maxsize(650,60)
# Run Application
app.mainloop()
| mit | 7,359,510,089,815,305,000 | 28.107143 | 134 | 0.721881 | false | 3.102792 | true | false | false |
xuerenlv/PaperWork | my_version/Main_Run.py | 1 | 17033 | # -*- coding: utf-8 -*-
'''
Created on 2015-08-21
@author: xhj
'''
from craw_page_parse import crawl_real_time_with_keyword, \
crawl_set_time_with_keyword, crawl_set_time_with_keyword_and_nickname
# from craw_page_parse import crawl_set_time_with_only_keyword
import os
import logging.config
import random
import datetime
from crawl_comment_from_db import crawl_comment, crawl_repost
from craw_page_parse_2 import crawl_uid_from_nickname, \
crawl_userinfo_from_uname_or_uid, crawl_userinfo_2_from_uid
from store_model import UserInfo_store, Single_weibo_with_more_info_store, \
Bie_Ming_store, Weibo_url_to_Comment_url, Single_comment, \
Single_comment_store, Weibo_url_to_repost_url, Single_repost_store,\
UserInfo_for_regester_time_store
from craw_page_parse_2 import crawl_userinfo_3_for_regester_time
from urllib import quote_plus
from mongoengine.context_managers import switch_collection
from mongoengine.queryset.visitor import Q
if not os.path.exists('logs/'):
os.mkdir('logs')
if os.path.exists('logs/scheduler.log'):
open('logs/scheduler.log', 'w').truncate()
curpath = os.path.normpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
logging.config.fileConfig(curpath + '/runtime_infor_log.conf')
if not os.path.exists('data/'):
os.mkdir('data')
if not os.path.exists('cookies/'):
os.mkdir('cookies')
# 抓取实时的微博,现在还不需要
def crawl_real_time_main(key_words_list):
thrads_list = []
for i in range(len(key_words_list)):
thrads_list.append(crawl_real_time_with_keyword(key_words_list[i], 'real_time_' + str(i)))
return thrads_list
# 按照天数,分别创建开始url
# 关键词,对应微博很多,按天抓取
def crawl_set_time_main_many(key_word, start_time, end_time, how_many_days_one_thread, how_many_days_crawl_once):
thrads_list = []
while start_time <= end_time:
end_2 = start_time + datetime.timedelta(days=how_many_days_one_thread-1)
thrads_list.append(crawl_set_time_with_keyword(key_word, start_time, end_2, how_many_days_crawl_once, 'crawl_settime_thread' + str(start_time) + " to " + str(end_2)))
start_time = end_2+datetime.timedelta(days=1)
return thrads_list
# 不按天抓取,一次抓取全部
# 给定: 关键词,开始时间,结束时间,用户list
def crawl_set_time_main_little(key_word, start_time, end_time, nickname_list):
thrads_list = []
for nickname in nickname_list:
thrads_list.append(crawl_set_time_with_keyword_and_nickname(key_word, start_time, end_time, nickname, nickname + "_thread"))
return thrads_list
# 从 数据库 中已转换的 comment url 中 提取url,然后进行抓取
def crawl_comment_from_fie():
# 从单独的微博文件中读取信息
all_thrads_list = []
# 读出数据
list_contains_set_weibourl_and_commenturl = []
global Weibo_url_to_Comment_url
for one_entry in Weibo_url_to_Comment_url.objects:
list_contains_set_weibourl_and_commenturl.append((one_entry['weibo_url'], one_entry['comment_url']))
one_piece = len(list_contains_set_weibourl_and_commenturl) / 12
for i in range(12):
all_thrads_list.append(crawl_comment(list_contains_set_weibourl_and_commenturl[i * one_piece:(i + 1) * one_piece], 'crawl_comment___' + str(i)))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
# 抓取用户 转发
def crawl_repost_from_db():
all_thrads_list = []
# 读出数据
list_contains_set_weibourl_and_reposturl = []
global Weibo_url_to_repost_url
for one_entry in Weibo_url_to_repost_url.objects:
list_contains_set_weibourl_and_reposturl.append((one_entry['weibo_url'], one_entry['repost_url']))
random.shuffle(list_contains_set_weibourl_and_reposturl)
one_piece = len(list_contains_set_weibourl_and_reposturl) / 12
for i in range(12):
all_thrads_list.append(crawl_repost(list_contains_set_weibourl_and_reposturl[i * one_piece:(i + 1) * one_piece], 'crawl_repost___' + str(i)))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
pass
# 抓取一个关键词下所有的微博;也可以抓取一个 hashtag 下所有的微博,但是要修改相应的 初始url
def crawl_one_keyword():
all_thrads_list = []
key_word = '转基因'
start_time = datetime.datetime(2016, 2, 16)
end_time = datetime.datetime(2016, 2, 26)
all_thrads_list.extend(crawl_set_time_main_many(key_word, start_time, end_time, how_many_days_one_thread=1, how_many_days_crawl_once=1))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
# 抓取一个 hashtag 下所有的微博
# def crawl_hash_tag():
# all_thrads_list = []
# key_word = '四六级成绩'
# start_time = datetime.datetime(2015, 12, 10)
# end_time = datetime.datetime(2015, 12, 31)
#
# how_many_days_one_thread = 5
# while start_time + datetime.timedelta(days=how_many_days_one_thread) < end_time:
# end_2 = start_time + datetime.timedelta(days=how_many_days_one_thread)
# all_thrads_list.append(crawl_set_time_with_only_keyword(key_word, start_time, end_2, 'crawl_settime_thread' + str(start_time) + " to " + str(end_2)))
# start_time = end_2
# if start_time < end_time:
# all_thrads_list.append(crawl_set_time_with_only_keyword(key_word, start_time, end_time, 'crawl_settime_thread' + str(start_time) + " to " + str(end_time)))
# for thread in all_thrads_list:
# thread.start()
# for thread in all_thrads_list:
# thread.join()
# 抓取特定用户下的微博,抓取特别媒体关于末个关键词的微博
def crawl_set_user_weibo_about_keyword():
all_thrads_list = []
key_word = '扶老人'
start_time = datetime.datetime(2011, 1, 1)
end_time = datetime.datetime(2015, 9, 6)
nickname_list = ["新闻晨报", "南方都市报", "广州日报", "南方日报", "环球时报", "扬子晚报", "新京报", "每日经济新闻", "楚天都市报"]
all_thrads_list.extend(crawl_set_time_main_little(key_word, start_time, end_time, nickname_list))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
####################################################################################### crawl userinfo start
# 通过用户的uid来抓取用户信息,,抓取任务中的一个需要
def chuli_nickname_crawl_userinfo():
uid_or_uname_list = []
# uid_or_uname_list = read_data_from_database_for___uid_or_uname_list()
with open("test_nolabels.txt") as file_r:
for one_line in file_r.readlines():
uid_or_uname_list.append(one_line[:-2])
print len(uid_or_uname_list)
how_many_uids_one_thread = len(uid_or_uname_list) / 10
all_thrads_list = []
start = 0
end = how_many_uids_one_thread
count = 0
while end < len(uid_or_uname_list):
all_thrads_list.append(crawl_userinfo_from_uname_or_uid(uid_or_uname_list[start:end], "crawl_userinfo_from_uname_or_uid_" + str(count)))
start = start + how_many_uids_one_thread
end = end + how_many_uids_one_thread
count = count + 1
if start < len(uid_or_uname_list):
all_thrads_list.append(crawl_userinfo_from_uname_or_uid(uid_or_uname_list[start:len(uid_or_uname_list)], "crawl_userinfo_from_uname_or_uid_" + str(count)))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
# 抓取用户信息,从网页端,主要是为了 注册时间
def crawl_userinfo_for_regester_time():
uid_crawl_list = []
count = 1
for one_user in UserInfo_store.objects:
uid = one_user['uid_or_uname']
if len(UserInfo_for_regester_time_store.objects(uid=uid))==0:
uid_crawl_list.append(uid)
print count
count += 1
print len(uid_crawl_list)
how_many_uids_one_thread = len(uid_crawl_list) / 10
all_thrads_list = []
start = 0
end = how_many_uids_one_thread
count = 0
while end < len(uid_crawl_list):
all_thrads_list.append(crawl_userinfo_3_for_regester_time(uid_crawl_list[start:end], "crawl_userinfo_for_regestertime_" + str(count)))
start = start + how_many_uids_one_thread
end = end + how_many_uids_one_thread
count = count + 1
if start < len(uid_crawl_list):
all_thrads_list.append(crawl_userinfo_3_for_regester_time(uid_crawl_list[start:len(uid_crawl_list)], "crawl_userinfo_for_regestertime_" + str(count)))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
pass
# 从数据库中取出已经爬取的用户信息的 uid 与 nickname
def read_data_from_database_uids_and_nicknames():
uids_and_nicknames = []
for one_user_info in UserInfo_store.objects:
uids_and_nicknames.append(one_user_info["uid_or_uname"])
uids_and_nicknames.append(one_user_info["nickname"])
return uids_and_nicknames
# 处理数据库中的 at_info
def chuli_at_info(at_info):
nickname_list = []
for one in at_info.split("[fen_ge]"):
nickname_list.append(one[:one.find(":")])
return nickname_list
# def read_alread_crawled_uids_or_nicknames():
# alread_crawled_uids_or_nicknames = []
# fr = open("data/already_crawled_uids_or_nicknames.txt","r")
# for one_line in fr.readlines():
# # alread_crawled_uids_or_nicknames.append(one_line[:-1])
# pass
# fr.close()
# return alread_crawled_uids_or_nicknames
# def write_alread_crawled_uids_or_nicknames(alread_crawled_uids_or_nicknames):
# fw = open("data/already_crawled_uids_or_nicknames.txt","a")
# for one_thing in alread_crawled_uids_or_nicknames:
# fw.write(one_thing+"\n")
# fw.close()
# 从数据库中读出数据构造 uid_or_uname_list
def read_data_from_database_for___uid_or_uname_list():
uid_or_uname_list = []
this_uid_list = []
this_nickname_list = []
weibo_collection_name = []
# weibo_collection_name = ["zhuanjiyin_nohashtag_original_2014_03_01_to_2014_03_10_detmine_1", \
# "zhuanjiyin_nohashtag_original_2014_03_10_to_2014_03_20_detmine_2", \
# "zhuanjiyin_nohashtag_original_2014_03_20_to_2014_04_01_detmine_3"]
# 处理微博中的用户信息
print "start single weibo"
global Single_weibo_with_more_info_store
for one_collection in weibo_collection_name:
with switch_collection(Single_weibo_with_more_info_store, one_collection) as Single_weibo_with_more_info_store:
for one_weibo in Single_weibo_with_more_info_store.objects:
this_uid_list.append(one_weibo["uid"])
this_uid_list.append(one_weibo["come_from_user_id"])
this_nickname_list.extend(chuli_at_info(one_weibo["at_info"]))
this_nickname_list.extend(chuli_at_info(one_weibo["retweet_reason_at_info"]))
# 处理 comment 中的用户信息
# 'zhuanjiyin_nohashtag_original_single_comment_2016_with_more_info'
print "start comment"
comment_collections = []
# comment_collections.append('zhuanjiyin_nohashtag_original_single_comment_2014_with_more_info_repair')
global Single_comment_store
for one_collection in comment_collections:
with switch_collection(Single_comment_store, one_collection) as Single_comment_store:
for one_comment in Single_comment_store.objects:
this_uid_list.append(one_comment["uid"])
this_nickname_list.extend(chuli_at_info(one_comment["at_info"]))
print "start repost"
repost_collections = []
repost_collections.append("zhuanjiyin_nohashtag_original_single_repost_2016_with_more_info_repair")
global Single_repost_store
for one_collection in repost_collections:
with switch_collection(Single_repost_store, one_collection) as Single_repost_store:
for one_comment in Single_repost_store.objects:
this_uid_list.append(one_comment["uid"])
this_nickname_list.extend(chuli_at_info(one_comment["at_info"]))
uid_or_uname_list.extend(list(set(this_uid_list)))
uid_or_uname_list.extend(list(set(this_nickname_list)))
uid_or_uname_list = list(set(uid_or_uname_list))
# print "start filter"
# for uid_or_nickname in set(this_uid_list):
# if len(UserInfo_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(nickname=str(uid_or_nickname)))) == 0 or\
# len(Bie_Ming_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(bie_ming=str(uid_or_nickname)))) == 0:
# uid_or_uname_list.append(uid_or_nickname)
#
# for uid_or_nickname in set(this_nickname_list) :
# if len(UserInfo_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(nickname=str(uid_or_nickname)))) == 0 or\
# len(Bie_Ming_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(bie_ming=str(uid_or_nickname)))) == 0:
# uid_or_uname_list.append(uid_or_nickname)
random.shuffle(uid_or_uname_list)
print len(uid_or_uname_list)
return uid_or_uname_list
####################################################################################### crawl userinfo end
# 通过抓取页面,把nickname转换成uid或者在微博的标示,,,这个是中间的一个需要
def main_2_just_tran_nickname_to_uidoruname():
file_r = open("100_atname_file.txt", 'r')
nickname_list = []
for line in file_r.readlines():
op_nickname = line[line.find('nickname:'):]
nickname = op_nickname[op_nickname.find(':') + 1:op_nickname.rfind(']')]
nickname_list.append(nickname)
all_thrads_list = []
start = 0
end = 10
count = 1
while end < len(nickname_list):
all_thrads_list.append(crawl_uid_from_nickname(nickname_list[start:end], "crawl_uid_from_nickname_" + str(count)))
start += 10
end += 10
count += 1
if(start < len(nickname_list)):
all_thrads_list.append(crawl_uid_from_nickname(nickname_list[start:len(nickname_list)], "crawl_uid_from_nickname_" + str(count)))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
###################################################################################### start 1
# 用query expansion ,抓取相应词语的微博。
# key_word_list : 存放query expansion的keyword
# start_time : datetime对象,开始时间 end_time : datetime对象,结束时间
def crawl_keywords_list(key_word_list, start_time, end_time):
all_thrads_list = []
for key_word in key_word_list:
all_thrads_list.extend(crawl_set_time_main_many(key_word, start_time, end_time, 110))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
# 读文件,构造keywordslist ,这个是 query expansion 的抓取
def gen_keywords_list():
# 已操作文件: 1
file_r = open('./query_expansion_three_word/result_three_word_0.txt', 'r')
start_time = ""
end_time = ""
count = 1
key_words_list = []
for line in file_r.readlines():
if count == 1:
line = line[:-1].split(' ')
start_time = datetime.datetime(int(line[0]), int(line[1]), int(line[2]))
elif count == 2:
line = line[:-1].split(' ')
end_time = datetime.datetime(int(line[0]), int(line[1]), int(line[2]))
else:
key_words_list.append(line[:line.find('-')])
count += 1
return (key_words_list, start_time, end_time)
###################################################################################### end 1
if __name__ == '__main__':
# key_words_list,start_time,end_time=gen_keywords_list()
# crawl_keywords_list(key_words_list, start_time, end_time)
# 对于一个关键词,抓取特定时间段的微博(hashtag 也可以)
# crawl_one_keyword()
# 抓取用户评论,通过已转换好的 comment url
# crawl_comment_from_fie()
# 抓取用户转发,通过已转换好的 repost url
# crawl_repost_from_db()
# 从数据库中,读取 uid 昵称 之类,然后去抓取用户信息
# chuli_nickname_crawl_userinfo()
# 从数据库中提取 uid ,然后 通过 网页端 的 网页 抓取用户的注册信息
crawl_userinfo_for_regester_time()
# 抓取特定用户,关于特定关键词的微博
# crawl_set_user_weibo_about_keyword()
pass
| apache-2.0 | -6,991,902,831,607,894,000 | 36.545882 | 174 | 0.628878 | false | 2.700457 | false | false | false |
fabriziocosta/pyMotif | dataset_generator.py | 1 | 2970 | """Dataset generation module."""
import random
import numpy as np
def random_string(length, alphabet_list):
"""Generate a random string."""
rand_str = ''.join(random.choice(alphabet_list) for i in range(length))
return rand_str
def perturb(seed, alphabet_list, p=0.5):
"""Randomize a string."""
seq = ''
for c in seed:
if random.random() < p:
c = random.choice(alphabet_list)
seq += c
return seq
def inflate_normalize(pwms=None, exp=2):
"""Inflate and normalize PWM to utilize noise level."""
num_motives = pwms.shape[0]
for j in range(num_motives): # inflate
pwms[j] = pwms[j] ** exp
for j in range(num_motives): # normalize
pwms[j] = pwms[j] / pwms[j].sum(axis=0)
return pwms
def get_pwms(alphabet='ACGT', num=2, length=6, exp=2):
"""Generate PWMs for every motif."""
letters = len(alphabet)
pwms = []
for i in range(num):
i_pwm = np.random.random_sample((letters, length))
i_pwm = i_pwm / i_pwm.sum(axis=0) # normalize
pwms.append(i_pwm)
pwms = np.array(pwms)
pwms = inflate_normalize(pwms=pwms, exp=exp)
return pwms
def motif_from_pwm(alphabet_list, pwm):
"""Create motif string from the PWM."""
seq = ""
length = pwm.shape[1]
for i in range(length):
alphabet_dist = pwm[:, i]
c = np.random.choice(a=alphabet_list, p=alphabet_dist)
seq += c
return seq
def make_artificial_dataset(alphabet='ACGT', motif_length=6, sequence_length=100,
n_sequences=1000, n_motives=2, p=0.2, random_state=1):
"""Generate artificial dataset.
Returns: motives - list of motives used in sequences
seq - dataset as list of sequences
binary_seq - a sequence of 0's & 1's which can be used for computing ROC score.
"""
random.seed(random_state)
alphabet_list = [c for c in alphabet]
pwms = get_pwms(alphabet=alphabet, num=n_motives, length=motif_length, exp=2 - p)
sequence_length = sequence_length / n_motives
flanking_length = (sequence_length - motif_length) / 2
n_seq_per_motif = n_sequences
counter = 0
seqs = []
motives = []
for i in range(n_seq_per_motif):
total_seq = ''
motif = []
for j in range(n_motives):
left_flanking = random_string(flanking_length, alphabet_list)
right_flanking = random_string(flanking_length, alphabet_list)
noisy_motif = motif_from_pwm(alphabet_list, pwms[j])
seq = left_flanking + noisy_motif + right_flanking
total_seq += seq
motif.append(noisy_motif)
seqs.append(('ID%d' % counter, total_seq))
motives.append(motif)
counter += 1
binary_skeleton = '0' * flanking_length + \
'1' * motif_length + '0' * flanking_length
binary_seq = binary_skeleton * n_motives
return motives, seqs, binary_seq
| mit | -1,912,691,036,445,324,300 | 29.618557 | 92 | 0.601684 | false | 3.253012 | false | false | false |
knowmetools/km-api | km_api/know_me/tests/views/test_accepted_accessor_list_view.py | 1 | 1214 | from unittest import mock
from know_me import serializers, views
@mock.patch("know_me.views.DRYPermissions.has_permission", autospec=True)
def test_check_permissions(mock_dry_permissions):
"""
The view should check for model permissions.
"""
view = views.AcceptedAccessorListView()
view.check_permissions(None)
assert mock_dry_permissions.call_count == 1
def test_get_queryset(api_rf, km_user_accessor_factory, user_factory):
"""
The view should operate on the accessors owned by the requesting user.
"""
user = user_factory()
api_rf.user = user
km_user_accessor_factory(is_accepted=True)
km_user_accessor_factory(is_accepted=True, user_with_access=user)
km_user_accessor_factory(is_accepted=False, user_with_access=user)
view = views.AcceptedAccessorListView()
view.request = api_rf.get("/")
expected = user.km_user_accessors.filter(is_accepted=True)
assert list(view.get_queryset()) == list(expected)
def test_get_serializer_class():
"""
Test getting the serializer class the view uses.
"""
view = views.AcceptedAccessorListView()
assert view.get_serializer_class() == serializers.KMUserAccessorSerializer
| apache-2.0 | 8,413,144,891,487,430,000 | 27.232558 | 78 | 0.710049 | false | 3.712538 | false | false | false |
ksurya/SciPyCentral | scipy_central/comments/views/moderation.py | 4 | 2181 | import simplejson
from django.conf import settings
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404
from django.contrib import comments
from django.contrib.comments.views.moderation import perform_flag, perform_delete
from django.views.decorators.csrf import csrf_protect
@csrf_protect
def flag(request, comment_id):
"""
The view overrides the built-in view django.contrib.comments.moderation.flag()
Reasons to override:
1) Not compatible with Ajax calls
@login_required is not used as it redirects to login-page and Ajax calls
cannot be completed as expected.
1) Error handling cannot be done using response attributes
"""
if not request.user.is_authenticated():
return HttpResponse('Unauthorized', status=401)
if request.method == "POST" and \
request.is_ajax():
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
perform_flag(request, comment)
data = {"success": True}
data = simplejson.dumps(data)
return HttpResponse(data, mimetype="application/json")
else:
raise Http404
@csrf_protect
def delete_my_comment(request, comment_id):
"""
Provide user to delete his own comment.
Note: We are not removing the comment in the database!
We are just setting comment field `comment.is_removed = True`
Thus if anyone re-comment the same "exact" comment previously posted,
will not be saved!
django.contrib.comments.views.moderation.delete()
provides Moderators to perform this operation!
"""
if not request.user.is_authenticated():
return HttpResponse(status=401)
if request.method == 'POST' and \
request.is_ajax():
data = {}
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
if request.user == comment.user:
perform_delete(request, comment)
data['success'] = True
else:
raise Http404
return HttpResponse(simplejson.dumps(data), mimetype="application/json")
else:
raise Http404
| bsd-3-clause | -7,130,026,666,514,306,000 | 34.754098 | 99 | 0.681339 | false | 4.178161 | false | false | false |
jyejare/robottelo | tests/foreman/cli/test_product.py | 1 | 12864 | # -*- encoding: utf-8 -*-
"""Test class for Product CLI
:Requirement: Product
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: ContentManagement
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_alphanumeric
from fauxfactory import gen_string
from robottelo import ssh
from robottelo.api.utils import wait_for_tasks
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.defaults import Defaults
from robottelo.cli.factory import CLIFactoryError
from robottelo.cli.factory import make_gpg_key
from robottelo.cli.factory import make_org
from robottelo.cli.factory import make_product
from robottelo.cli.factory import make_repository
from robottelo.cli.factory import make_sync_plan
from robottelo.cli.http_proxy import HttpProxy
from robottelo.cli.package import Package
from robottelo.cli.product import Product
from robottelo.cli.repository import Repository
from robottelo.config import settings
from robottelo.constants import FAKE_0_YUM_REPO
from robottelo.constants import FAKE_0_YUM_REPO_PACKAGES_COUNT
from robottelo.datafactory import invalid_values_list
from robottelo.datafactory import valid_data_list
from robottelo.datafactory import valid_labels_list
from robottelo.decorators import run_in_one_thread
from robottelo.decorators import tier1
from robottelo.decorators import tier2
from robottelo.decorators import upgrade
from robottelo.test import CLITestCase
class ProductTestCase(CLITestCase):
"""Product CLI tests."""
org = None
def setUp(self):
"""Tests for Lifecycle Environment via Hammer CLI"""
super(ProductTestCase, self).setUp()
if ProductTestCase.org is None:
ProductTestCase.org = make_org(cached=True)
@tier1
@upgrade
def test_positive_CRUD(self):
"""Check if product can be created, updated, synchronized and deleted
:id: 9d7b5ec8-59d0-4371-b5d2-d43145e4e2db
:expectedresults: Product is created, updated, synchronized and deleted
:BZ: 1422552
:CaseImportance: Critical
"""
desc = list(valid_data_list().values())[0]
gpg_key = make_gpg_key({'organization-id': self.org['id']})
name = list(valid_data_list().values())[0]
label = valid_labels_list()[0]
sync_plan = make_sync_plan({'organization-id': self.org['id']})
product = make_product(
{
'description': desc,
'gpg-key-id': gpg_key['id'],
'label': label,
'name': name,
'organization-id': self.org['id'],
'sync-plan-id': sync_plan['id'],
}
)
self.assertEqual(product['name'], name)
self.assertGreater(len(product['label']), 0)
self.assertEqual(product['label'], label)
self.assertEqual(product['description'], desc)
self.assertEqual(product['gpg']['gpg-key-id'], gpg_key['id'])
self.assertEqual(product['sync-plan-id'], sync_plan['id'])
# update
desc = list(valid_data_list().values())[0]
new_gpg_key = make_gpg_key({'organization-id': self.org['id']})
new_sync_plan = make_sync_plan({'organization-id': self.org['id']})
new_prod_name = gen_string('alpha', 8)
Product.update(
{
'description': desc,
'id': product['id'],
'gpg-key-id': new_gpg_key['id'],
'sync-plan-id': new_sync_plan['id'],
'name': new_prod_name,
}
)
product = Product.info({'id': product['id'], 'organization-id': self.org['id']})
self.assertEqual(product['name'], new_prod_name)
self.assertEqual(product['description'], desc)
self.assertEqual(product['gpg']['gpg-key-id'], new_gpg_key['id'])
self.assertNotEqual(product['gpg']['gpg-key-id'], gpg_key['id'])
self.assertEqual(product['sync-plan-id'], new_sync_plan['id'])
self.assertNotEqual(product['sync-plan-id'], sync_plan['id'])
# synchronize
repo = make_repository({'product-id': product['id'], 'url': FAKE_0_YUM_REPO})
Product.synchronize({'id': product['id'], 'organization-id': self.org['id']})
packages = Package.list({'product-id': product['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(int(repo['content-counts']['packages']), len(packages))
self.assertEqual(len(packages), FAKE_0_YUM_REPO_PACKAGES_COUNT)
# delete
Product.remove_sync_plan({'id': product['id']})
product = Product.info({'id': product['id'], 'organization-id': self.org['id']})
self.assertEqual(len(product['sync-plan-id']), 0)
Product.delete({'id': product['id']})
wait_for_tasks(
search_query='label = Actions::Katello::Product::Destroy'
' and resource_id = {}'.format(product['id']),
max_tries=10,
)
with self.assertRaises(CLIReturnCodeError):
Product.info({'id': product['id'], 'organization-id': self.org['id']})
@tier2
def test_negative_create_with_name(self):
"""Check that only valid names can be used
:id: 2da26ab2-8d79-47ea-b4d2-defcd98a0649
:expectedresults: Product is not created
:CaseImportance: High
"""
for invalid_name in invalid_values_list():
with self.subTest(invalid_name):
with self.assertRaises(CLIFactoryError):
make_product({'name': invalid_name, 'organization-id': self.org['id']})
@tier2
def test_negative_create_with_label(self):
"""Check that only valid labels can be used
:id: 7cf970aa-48dc-425b-ae37-1e15dfab0626
:expectedresults: Product is not created
:CaseImportance: High
"""
product_name = gen_alphanumeric()
for invalid_label in (
gen_string('latin1', 15),
gen_string('utf8', 15),
gen_string('html', 15),
):
with self.subTest(invalid_label):
with self.assertRaises(CLIFactoryError):
make_product(
{
'label': invalid_label,
'name': product_name,
'organization-id': self.org['id'],
}
)
@run_in_one_thread
@tier2
def test_product_list_with_default_settings(self):
"""Listing product of an organization apart from default organization using hammer
does not return output if a defaults settings are applied on org.
:id: d5c5edac-b19c-4277-92fe-28d9b9fa43ef
:BZ: 1745575
:expectedresults: product/reporsitory list should work as expected.
"""
default_product_name = gen_string('alpha')
non_default_product_name = gen_string('alpha')
default_org = self.org
non_default_org = make_org()
default_product = make_product(
{'name': default_product_name, 'organization-id': default_org['id']}
)
non_default_product = make_product(
{'name': non_default_product_name, 'organization-id': non_default_org['id']}
)
for product in (default_product, non_default_product):
make_repository({'product-id': product['id'], 'url': FAKE_0_YUM_REPO})
Defaults.add({'param-name': 'organization_id', 'param-value': default_org['id']})
result = ssh.command('hammer defaults list')
self.assertTrue(default_org['id'] in "".join(result.stdout))
try:
# Verify --organization-id is not required to pass if defaults are set
result = ssh.command('hammer product list')
self.assertTrue(default_product_name in "".join(result.stdout))
result = ssh.command('hammer repository list')
self.assertTrue(default_product_name in "".join(result.stdout))
# verify that defaults setting should not affect other entities
product_list = Product.list({'organization-id': non_default_org['id']})
self.assertEquals(non_default_product_name, product_list[0]['name'])
repository_list = Repository.list({'organization-id': non_default_org['id']})
self.assertEquals(non_default_product_name, repository_list[0]['product'])
finally:
Defaults.delete({'param-name': 'organization_id'})
result = ssh.command('hammer defaults list')
self.assertTrue(default_org['id'] not in "".join(result.stdout))
@tier2
def test_positive_assign_http_proxy_to_products(self):
"""Assign http_proxy to Products and perform product sync.
:id: 6af7b2b8-15d5-4d9f-9f87-e76b404a966f
:expectedresults: HTTP Proxy is assigned to all repos present
in Products and sync operation performed successfully.
:CaseImportance: Critical
"""
# create HTTP proxies
http_proxy_a = HttpProxy.create(
{
'name': gen_string('alpha', 15),
'url': settings.http_proxy.un_auth_proxy_url,
'organization-id': self.org['id'],
}
)
http_proxy_b = HttpProxy.create(
{
'name': gen_string('alpha', 15),
'url': settings.http_proxy.auth_proxy_url,
'username': settings.http_proxy.username,
'password': settings.http_proxy.password,
'organization-id': self.org['id'],
}
)
# Create products and repositories
product_a = make_product({'organization-id': self.org['id']})
product_b = make_product({'organization-id': self.org['id']})
repo_a1 = make_repository(
{'product-id': product_a['id'], 'url': FAKE_0_YUM_REPO, 'http-proxy-policy': 'none'}
)
repo_a2 = make_repository(
{
'product-id': product_a['id'],
'url': FAKE_0_YUM_REPO,
'http-proxy-policy': 'use_selected_http_proxy',
'http-proxy-id': http_proxy_a['id'],
}
)
repo_b1 = make_repository(
{'product-id': product_b['id'], 'url': FAKE_0_YUM_REPO, 'http-proxy-policy': 'none'}
)
repo_b2 = make_repository({'product-id': product_b['id'], 'url': FAKE_0_YUM_REPO})
# Add http_proxy to products
Product.update_proxy(
{
'ids': '{},{}'.format(product_a['id'], product_b['id']),
'http-proxy-policy': 'use_selected_http_proxy',
'http-proxy-id': http_proxy_b['id'],
}
)
# Perform sync and verify packages count
Product.synchronize({'id': product_a['id'], 'organization-id': self.org['id']})
Product.synchronize({'id': product_b['id'], 'organization-id': self.org['id']})
repo_a1 = Repository.info({'id': repo_a1['id']})
repo_a2 = Repository.info({'id': repo_a2['id']})
repo_b1 = Repository.info({'id': repo_b1['id']})
repo_b2 = Repository.info({'id': repo_b2['id']})
assert repo_a1['http-proxy']['http-proxy-policy'] == "use_selected_http_proxy"
assert repo_a2['http-proxy']['http-proxy-policy'] == "use_selected_http_proxy"
assert repo_b1['http-proxy']['http-proxy-policy'] == "use_selected_http_proxy"
assert repo_b2['http-proxy']['http-proxy-policy'] == "use_selected_http_proxy"
assert repo_a1['http-proxy']['id'] == http_proxy_b['id']
assert repo_a2['http-proxy']['id'] == http_proxy_b['id']
assert repo_b1['http-proxy']['id'] == http_proxy_b['id']
assert repo_b2['http-proxy']['id'] == http_proxy_b['id']
assert int(repo_a1['content-counts']['packages']) == FAKE_0_YUM_REPO_PACKAGES_COUNT
assert int(repo_a2['content-counts']['packages']) == FAKE_0_YUM_REPO_PACKAGES_COUNT
assert int(repo_b1['content-counts']['packages']) == FAKE_0_YUM_REPO_PACKAGES_COUNT
assert int(repo_b2['content-counts']['packages']) == FAKE_0_YUM_REPO_PACKAGES_COUNT
Product.update_proxy(
{'ids': '{},{}'.format(product_a['id'], product_b['id']), 'http-proxy-policy': 'none'}
)
repo_a1 = Repository.info({'id': repo_a1['id']})
repo_a2 = Repository.info({'id': repo_a2['id']})
repo_b1 = Repository.info({'id': repo_b1['id']})
repo_b2 = Repository.info({'id': repo_b2['id']})
assert repo_a1['http-proxy']['http-proxy-policy'] == "none"
assert repo_a2['http-proxy']['http-proxy-policy'] == "none"
assert repo_b1['http-proxy']['http-proxy-policy'] == "none"
assert repo_b2['http-proxy']['http-proxy-policy'] == "none"
| gpl-3.0 | -2,658,068,404,343,697,400 | 40.496774 | 98 | 0.593284 | false | 3.749344 | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.